diff --git a/.flake8 b/.flake8 new file mode 100644 index 00000000..95788c22 --- /dev/null +++ b/.flake8 @@ -0,0 +1,5 @@ +[flake8] +exclude = + fatiando/_version.py, + fatiando/_our_duecredit.py +ignore = F401,E226 diff --git a/.pylintrc b/.pylintrc new file mode 100644 index 00000000..805284a9 --- /dev/null +++ b/.pylintrc @@ -0,0 +1,407 @@ +[MASTER] + +# Specify a configuration file. +#rcfile= + +# Python code to execute, usually for sys.path manipulation such as +# pygtk.require(). +#init-hook= + +# Add files or directories to the blacklist. They should be base names, not +# paths. +ignore= + +# Add files or directories matching the regex patterns to the blacklist. The +# regex matches against base names, not paths. +ignore-patterns= + +# Pickle collected data for later comparisons. +persistent=yes + +# List of plugins (as comma separated values of python modules names) to load, +# usually to register additional checkers. +load-plugins= + +# Use multiple processes to speed up Pylint. +jobs=4 + +# Allow loading of arbitrary C extensions. Extensions are imported into the +# active Python interpreter and may run arbitrary code. +unsafe-load-any-extension=no + +# A comma-separated list of package or module names from where C extensions may +# be loaded. Extensions are loading into the active Python interpreter and may +# run arbitrary code +extension-pkg-whitelist= + +# Allow optimization of some AST trees. This will activate a peephole AST +# optimizer, which will apply various small optimizations. For instance, it can +# be used to obtain the result of joining multiple strings with the addition +# operator. Joining a lot of strings can lead to a maximum recursion error in +# Pylint and this flag can prevent that. It has one side effect, the resulting +# AST will be different than the one from reality. This option is deprecated +# and it will be removed in Pylint 2.0. +optimize-ast=no + + +[MESSAGES CONTROL] + +# Only show warnings with the listed confidence levels. Leave empty to show +# all. Valid levels: HIGH, INFERENCE, INFERENCE_FAILURE, UNDEFINED +confidence= + +# Enable the message, report, category or checker with the given id(s). You can +# either give multiple identifier separated by comma (,) or put this option +# multiple time (only on the command line, not in the configuration file where +# it should appear only once). See also the "--disable" option for examples. +#enable= + +# Disable the message, report, category or checker with the given id(s). You +# can either give multiple identifiers separated by comma (,) or put this +# option multiple times (only on the command line, not in the configuration +# file where it should appear only once).You can also use "--disable=all" to +# disable everything first and then reenable specific checks. For example, if +# you want to run only the similarities checker, you can use "--disable=all +# --enable=similarities". If you want to run only the classes checker, but have +# no Warning level messages displayed, use"--disable=all --enable=classes +# --disable=W" +disable= + + +[REPORTS] + +# Set the output format. Available formats are text, parseable, colorized, msvs +# (visual studio) and html. You can also give a reporter class, eg +# mypackage.mymodule.MyReporterClass. +output-format=colorized + +# Put messages in a separate file for each module / package specified on the +# command line instead of printing them on stdout. Reports (if any) will be +# written in a file name "pylint_global.[txt|html]". This option is deprecated +# and it will be removed in Pylint 2.0. +files-output=no + +# Tells whether to display a full report or only the messages +reports=yes + +# Python expression which should return a note less than 10 (10 is the highest +# note). You have access to the variables errors warning, statement which +# respectively contain the number of errors / warnings messages and the total +# number of statements analyzed. This is used by the global evaluation report +# (RP0004). +evaluation=10.0 - ((float(5 * error + warning + refactor + convention) / statement) * 10) + +# Template used to display messages. This is a python new-style format string +# used to format the message information. See doc for all details +#msg-template= + + +[LOGGING] + +# Logging modules to check that the string format arguments are in logging +# function parameter format +logging-modules=logging + + +[SIMILARITIES] + +# Minimum lines number of a similarity. +min-similarity-lines=4 + +# Ignore comments when computing similarities. +ignore-comments=yes + +# Ignore docstrings when computing similarities. +ignore-docstrings=yes + +# Ignore imports when computing similarities. +ignore-imports=no + + +[FORMAT] + +# Maximum number of characters on a single line. +max-line-length=80 + +# Regexp for a line that is allowed to be longer than the limit. +ignore-long-lines=^\s*(# )??$ + +# Allow the body of an if to be on the same line as the test if there is no +# else. +single-line-if-stmt=no + +# List of optional constructs for which whitespace checking is disabled. `dict- +# separator` is used to allow tabulation in dicts, etc.: {1 : 1,\n222: 2}. +# `trailing-comma` allows a space between comma and closing bracket: (a, ). +# `empty-line` allows space-only lines. +no-space-check=trailing-comma,dict-separator + +# Maximum number of lines in a module +max-module-lines=1000 + +# String used as indentation unit. This is usually " " (4 spaces) or "\t" (1 +# tab). +indent-string=' ' + +# Number of spaces of indent required inside a hanging or continued line. +indent-after-paren=4 + +# Expected format of line ending, e.g. empty (any line ending), LF or CRLF. +expected-line-ending-format= + + +[TYPECHECK] + +# Tells whether missing members accessed in mixin class should be ignored. A +# mixin class is detected if its name ends with "mixin" (case insensitive). +ignore-mixin-members=yes + +# List of module names for which member attributes should not be checked +# (useful for modules/projects where namespaces are manipulated during runtime +# and thus existing member attributes cannot be deduced by static analysis. It +# supports qualified module names, as well as Unix pattern matching. +ignored-modules= + +# List of class names for which member attributes should not be checked (useful +# for classes with dynamically set attributes). This supports the use of +# qualified names. +ignored-classes=optparse.Values,thread._local,_thread._local + +# List of members which are set dynamically and missed by pylint inference +# system, and so shouldn't trigger E1101 when accessed. Python regular +# expressions are accepted. +generated-members= + +# List of decorators that produce context managers, such as +# contextlib.contextmanager. Add to this list to register other decorators that +# produce valid context managers. +contextmanager-decorators=contextlib.contextmanager + + +[MISCELLANEOUS] + +# List of note tags to take in consideration, separated by a comma. +notes=FIXME,XXX,TODO + + +[SPELLING] + +# Spelling dictionary name. Available dictionaries: none. To make it working +# install python-enchant package. +spelling-dict= + +# List of comma separated words that should not be checked. +spelling-ignore-words= + +# A path to a file that contains private dictionary; one word per line. +spelling-private-dict-file= + +# Tells whether to store unknown words to indicated private dictionary in +# --spelling-private-dict-file option instead of raising a message. +spelling-store-unknown-words=no + + +[BASIC] + +# Good variable names which should always be accepted, separated by a comma +good-names=i,j,k,x,y,z,x1,x2,y1,y2,z1,z2,xp,yp,zp,xc,yc,zc,nx,ny,nc,dx,dy,dz,dt,p1,p2,r1,r2,w,e,s,n,_ + +# Bad variable names which should always be refused, separated by a comma +bad-names=foo,bar,baz,toto,tutu,tata + +# Colon-delimited sets of names that determine each other's naming style when +# the name regexes allow several styles. +name-group= + +# Include a hint for the correct naming format with invalid-name +include-naming-hint=yes + +# List of decorators that produce properties, such as abc.abstractproperty. Add +# to this list to register other decorators that produce valid properties. +property-classes=abc.abstractproperty + +# Regular expression matching correct function names +function-rgx=[a-z_][a-z0-9_]{2,30}$ + +# Naming hint for function names +function-name-hint=[a-z_][a-z0-9_]{2,30}$ + +# Regular expression matching correct variable names +variable-rgx=[a-z_][a-z0-9_]{2,30}$ + +# Naming hint for variable names +variable-name-hint=[a-z_][a-z0-9_]{2,30}$ + +# Regular expression matching correct constant names +const-rgx=(([A-Z_][A-Z0-9_]*)|(__.*__))$ + +# Naming hint for constant names +const-name-hint=(([A-Z_][A-Z0-9_]*)|(__.*__))$ + +# Regular expression matching correct attribute names +attr-rgx=[a-z_][a-z0-9_]{2,30}$ + +# Naming hint for attribute names +attr-name-hint=[a-z_][a-z0-9_]{2,30}$ + +# Regular expression matching correct argument names +argument-rgx=[a-z_][a-z0-9_]{2,30}$ + +# Naming hint for argument names +argument-name-hint=[a-z_][a-z0-9_]{2,30}$ + +# Regular expression matching correct class attribute names +class-attribute-rgx=([A-Za-z_][A-Za-z0-9_]{2,30}|(__.*__))$ + +# Naming hint for class attribute names +class-attribute-name-hint=([A-Za-z_][A-Za-z0-9_]{2,30}|(__.*__))$ + +# Regular expression matching correct inline iteration names +inlinevar-rgx=[A-Za-z_][A-Za-z0-9_]*$ + +# Naming hint for inline iteration names +inlinevar-name-hint=[A-Za-z_][A-Za-z0-9_]*$ + +# Regular expression matching correct class names +class-rgx=[A-Z_][a-zA-Z0-9]+$ + +# Naming hint for class names +class-name-hint=[A-Z_][a-zA-Z0-9]+$ + +# Regular expression matching correct module names +module-rgx=(([a-z_][a-z0-9_]*)|([A-Z][a-zA-Z0-9]+))$ + +# Naming hint for module names +module-name-hint=(([a-z_][a-z0-9_]*)|([A-Z][a-zA-Z0-9]+))$ + +# Regular expression matching correct method names +method-rgx=[a-z_][a-z0-9_]{2,30}$ + +# Naming hint for method names +method-name-hint=[a-z_][a-z0-9_]{2,30}$ + +# Regular expression which should only match function or class names that do +# not require a docstring. +no-docstring-rgx=^_ + +# Minimum line length for functions/classes that require docstrings, shorter +# ones are exempt. +docstring-min-length=-1 + + +[ELIF] + +# Maximum number of nested blocks for function / method body +max-nested-blocks=5 + + +[VARIABLES] + +# Tells whether we should check for unused import in __init__ files. +init-import=no + +# A regular expression matching the name of dummy variables (i.e. expectedly +# not used). +dummy-variables-rgx=(_+[a-zA-Z0-9]*?$)|dummy + +# List of additional names supposed to be defined in builtins. Remember that +# you should avoid to define new builtins when possible. +additional-builtins= + +# List of strings which can identify a callback function by name. A callback +# name must start or end with one of those strings. +callbacks=cb_,_cb + +# List of qualified module names which can have objects that can redefine +# builtins. +redefining-builtins-modules=six.moves,future.builtins + + +[IMPORTS] + +# Deprecated modules which should not be used, separated by a comma +deprecated-modules=regsub,TERMIOS,Bastion,rexec + +# Create a graph of every (i.e. internal and external) dependencies in the +# given file (report RP0402 must not be disabled) +import-graph= + +# Create a graph of external dependencies in the given file (report RP0402 must +# not be disabled) +ext-import-graph= + +# Create a graph of internal dependencies in the given file (report RP0402 must +# not be disabled) +int-import-graph= + +# Force import order to recognize a module as part of the standard +# compatibility libraries. +known-standard-library= + +# Force import order to recognize a module as part of a third party library. +known-third-party=enchant + +# Analyse import fallback blocks. This can be used to support both Python 2 and +# 3 compatible code, which means that the block might have code that exists +# only in one or another interpreter, leading to false positives when analysed. +analyse-fallback-blocks=no + + +[DESIGN] + +# Maximum number of arguments for function / method +max-args=10 + +# Argument names that match this expression will be ignored. Default to name +# with leading underscore +ignored-argument-names=_.* + +# Maximum number of locals for function / method body +max-locals=15 + +# Maximum number of return / yield for function / method body +max-returns=6 + +# Maximum number of branch for function / method body +max-branches=12 + +# Maximum number of statements in function / method body +max-statements=50 + +# Maximum number of parents for a class (see R0901). +max-parents=7 + +# Maximum number of attributes for a class (see R0902). +max-attributes=15 + +# Minimum number of public methods for a class (see R0903). +min-public-methods=1 + +# Maximum number of public methods for a class (see R0904). +max-public-methods=20 + +# Maximum number of boolean expressions in a if statement +max-bool-expr=5 + + +[CLASSES] + +# List of method names used to declare (i.e. assign) instance attributes. +defining-attr-methods=__init__,__new__,setUp + +# List of valid names for the first argument in a class method. +valid-classmethod-first-arg=cls + +# List of valid names for the first argument in a metaclass class method. +valid-metaclass-classmethod-first-arg=mcs + +# List of member names, which should be excluded from the protected access +# warning. +exclude-protected=_asdict,_fields,_replace,_source,_make + + +[EXCEPTIONS] + +# Exceptions that will emit a warning when being caught. Defaults to +# "Exception" +overgeneral-exceptions=Exception diff --git a/.travis.yml b/.travis.yml index 27a042fe..8da20750 100644 --- a/.travis.yml +++ b/.travis.yml @@ -73,6 +73,8 @@ install: script: - bash ci/run-tests.sh - make pep8 + # Check for Python 3 compatibility regressions + - make check3 - if [ "$BUILD_DOCS" == "true" ]; then make -C doc; fi after_success: diff --git a/Makefile b/Makefile index 345d2eca..db7ac5ae 100644 --- a/Makefile +++ b/Makefile @@ -10,7 +10,9 @@ help: @echo " cython generate C code from Cython files" @echo " test run the test suite (including doctests)" @echo " pep8 check for PEP8 style compliance" - @echo " pep8-stats print a summary of the PEP8 check" + @echo " lint run static analysis using pylint" + @echo " check3 check for compatibility with Python 3" + @echo " check run all code quality checks (pep8, lint, check3)" @echo " coverage calculate test coverage using Coverage" @echo " clean clean up build and generated files" @echo "" @@ -36,10 +38,15 @@ coverage: rm -r $(TESTDIR) pep8: - pep8 $(PEP8ARGS) fatiando cookbook gallery setup.py + flake8 fatiando gallery setup.py -pep8-stats: - pep8 $(PEP8ARGS) --statistics -qq fatiando cookbook gallery setup.py +lint: + pylint fatiando gallery setup.py + +check3: + pylint fatiando gallery setup.py --py3k + +check: pep8 check3 lint clean: find . -name "*.so" -exec rm -v {} \; diff --git a/ci/requirements.txt b/ci/requirements.txt index 41985d8e..d9ae43eb 100644 --- a/ci/requirements.txt +++ b/ci/requirements.txt @@ -10,7 +10,8 @@ sphinx pytest pytest-cov coverage -pep8=1.6.2 +flake8 +pylint cython sphinx-gallery sphinx_bootstrap_theme diff --git a/cookbook/seismic_wavefd_elastic_psv.py b/cookbook/seismic_wavefd_elastic_psv.py index 2c4e1df5..a8cf2184 100644 --- a/cookbook/seismic_wavefd_elastic_psv.py +++ b/cookbook/seismic_wavefd_elastic_psv.py @@ -3,9 +3,9 @@ """ import numpy as np from matplotlib import animation +import matplotlib.pyplot as plt from fatiando import gridder from fatiando.seismic import wavefd -from fatiando.vis import mpl # Set the parameters of the finite difference grid shape = (200, 200) @@ -33,26 +33,25 @@ xz2ps=True) # This part makes an animation using matplotlibs animation API -fig = mpl.figure(figsize=(12, 5)) -mpl.subplot(2, 2, 2) -mpl.title('x component') -xseismogram, = mpl.plot([], [], '-k') -mpl.xlim(0, duration) -mpl.ylim(-10 ** (-3), 10 ** (-3)) -mpl.subplot(2, 2, 4) -mpl.title('z component') -zseismogram, = mpl.plot([], [], '-k') -mpl.xlim(0, duration) -mpl.ylim(-10 ** (-3), 10 ** (-3)) -mpl.subplot(1, 2, 1) +fig = plt.figure(figsize=(12, 5)) +plt.subplot(2, 2, 2) +plt.title('x component') +xseismogram, = plt.plot([], [], '-k') +plt.xlim(0, duration) +plt.ylim(-10 ** (-3), 10 ** (-3)) +plt.subplot(2, 2, 4) +plt.title('z component') +zseismogram, = plt.plot([], [], '-k') +plt.xlim(0, duration) +plt.ylim(-10 ** (-3), 10 ** (-3)) +plt.subplot(1, 2, 1) # Start with everything zero and grab the plot so that it can be updated later -wavefield = mpl.imshow(np.zeros(shape), extent=area, vmin=-10 ** -6, - vmax=10 ** -6, cmap=mpl.cm.gray_r) -mpl.points(stations, '^k') -mpl.ylim(area[2:][::-1]) -mpl.xlabel('x (km)') -mpl.ylabel('z (km)') -mpl.m2km() +wavefield = plt.imshow(np.zeros(shape), extent=area, vmin=-10 ** -6, + vmax=10 ** -6, cmap=plt.cm.gray_r) +plt.plot(stations[0][0], stations[0][1], '^k') +plt.ylim(area[2:][::-1]) +plt.xlabel('x (km)') +plt.ylabel('z (km)') times = np.linspace(0, maxit * dt, maxit) # This function updates the plot every few timesteps @@ -63,7 +62,7 @@ def animate(i): xz2ps=True """ t, p, s, xcomp, zcomp = simulation.next() - mpl.title('time: %0.1f s' % (times[t])) + plt.title('time: %0.1f s' % (times[t])) wavefield.set_array((p + s)[::-1]) xseismogram.set_data(times[:t + 1], xcomp[0][:t + 1]) zseismogram.set_data(times[:t + 1], zcomp[0][:t + 1]) @@ -73,4 +72,4 @@ def animate(i): anim = animation.FuncAnimation( fig, animate, frames=maxit / snapshot, interval=1) # anim.save('psv_wave.mp4', fps=20, dpi=200, bitrate=4000) -mpl.show() +plt.show() diff --git a/environment.yml b/environment.yml index 15d82bfc..51598cbb 100644 --- a/environment.yml +++ b/environment.yml @@ -14,7 +14,8 @@ dependencies: - pytest - pytest-cov - coverage - - pep8=1.6.2 + - flake8 + - pylint - cython - sphinx-gallery - sphinx_bootstrap_theme diff --git a/fatiando/__init__.py b/fatiando/__init__.py index ec05006c..6968d64b 100644 --- a/fatiando/__init__.py +++ b/fatiando/__init__.py @@ -5,7 +5,10 @@ See the API reference for each subpackage for a list of all functions and classes defined by it. """ +from __future__ import absolute_import from ._version import get_versions + + __version__ = get_versions()['version'] __commit__ = get_versions()['full'] del get_versions diff --git a/fatiando/_our_duecredit.py b/fatiando/_our_duecredit.py index 35d35d7e..2152f1c4 100644 --- a/fatiando/_our_duecredit.py +++ b/fatiando/_our_duecredit.py @@ -1,3 +1,4 @@ +# pylint: skip-file # emacs: at the end of the file # ex: set sts=4 ts=4 sw=4 et: # ## ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### # diff --git a/fatiando/_version.py b/fatiando/_version.py index a29c8591..c28c63e5 100644 --- a/fatiando/_version.py +++ b/fatiando/_version.py @@ -1,4 +1,4 @@ - +# pylint: skip-file # This file helps to compute a version number in source trees obtained from # git-archive tarball (such as those provided by githubs download-from-tag # feature). Distribution tarballs (built by setup.py sdist) and build diff --git a/fatiando/datasets/__init__.py b/fatiando/datasets/__init__.py index 9b7016b9..49965cde 100644 --- a/fatiando/datasets/__init__.py +++ b/fatiando/datasets/__init__.py @@ -1,6 +1,7 @@ """ Functions to read data from files and fetch datasets from the internet. """ +from __future__ import absolute_import from .surfer import load_surfer from .utils import check_hash from .hawaii_gravity import fetch_hawaii_gravity diff --git a/fatiando/datasets/hawaii_gravity.py b/fatiando/datasets/hawaii_gravity.py index 22fdeb18..8ebcec54 100644 --- a/fatiando/datasets/hawaii_gravity.py +++ b/fatiando/datasets/hawaii_gravity.py @@ -2,7 +2,7 @@ """ Load gravity data from the eigen-6c4 model for Hawaii. """ -from __future__ import unicode_literals +from __future__ import unicode_literals, absolute_import import os import numpy as np diff --git a/fatiando/datasets/image.py b/fatiando/datasets/image.py index 50afd963..9e45662b 100644 --- a/fatiando/datasets/image.py +++ b/fatiando/datasets/image.py @@ -1,6 +1,7 @@ """ Load data/models from images. """ +from __future__ import absolute_import import os from PIL import Image import scipy.misc diff --git a/fatiando/datasets/surfer.py b/fatiando/datasets/surfer.py index c5597a86..1701a472 100644 --- a/fatiando/datasets/surfer.py +++ b/fatiando/datasets/surfer.py @@ -1,7 +1,7 @@ """ Functions for dealing with Surfer data grids. """ -from __future__ import division +from __future__ import division, absolute_import import numpy as np @@ -54,8 +54,8 @@ def load_surfer(fname, dtype=np.float64): # zMin zMax Z min max # z11 z21 z31 ... List of Z values with open(fname) as f: - # DSAA is a Surfer ASCII GRD ID - id = f.readline() + # DSAA is a Surfer ASCII GRD ID (discard it for now) + f.readline() # Read the number of columns (ny) and rows (nx) ny, nx = [int(s) for s in f.readline().split()] shape = (nx, ny) diff --git a/fatiando/datasets/tests/test_hawaii_gravity.py b/fatiando/datasets/tests/test_hawaii_gravity.py index 0d5933b3..05198100 100644 --- a/fatiando/datasets/tests/test_hawaii_gravity.py +++ b/fatiando/datasets/tests/test_hawaii_gravity.py @@ -1,3 +1,4 @@ +from __future__ import absolute_import import numpy as np import numpy.testing as npt diff --git a/fatiando/datasets/tests/test_image.py b/fatiando/datasets/tests/test_image.py index 0a054ad2..7f710939 100644 --- a/fatiando/datasets/tests/test_image.py +++ b/fatiando/datasets/tests/test_image.py @@ -1,3 +1,4 @@ +from __future__ import absolute_import import os import numpy as np import numpy.testing as npt diff --git a/fatiando/datasets/tests/test_surfer.py b/fatiando/datasets/tests/test_surfer.py index a2cee859..a21fe5c6 100644 --- a/fatiando/datasets/tests/test_surfer.py +++ b/fatiando/datasets/tests/test_surfer.py @@ -1,4 +1,4 @@ -from __future__ import division +from __future__ import division, absolute_import import os import numpy as np import numpy.testing as npt diff --git a/fatiando/datasets/tests/test_utils.py b/fatiando/datasets/tests/test_utils.py index 48936b2c..9b7b04c8 100644 --- a/fatiando/datasets/tests/test_utils.py +++ b/fatiando/datasets/tests/test_utils.py @@ -1,3 +1,4 @@ +from __future__ import absolute_import import os from pytest import raises diff --git a/fatiando/datasets/utils.py b/fatiando/datasets/utils.py index 3d2859ad..92bd6cd0 100644 --- a/fatiando/datasets/utils.py +++ b/fatiando/datasets/utils.py @@ -1,6 +1,7 @@ """ Utilities for loading the data sets. """ +from __future__ import absolute_import import hashlib diff --git a/fatiando/geothermal/climsig.py b/fatiando/geothermal/climsig.py index fb1c6b14..5ef39579 100644 --- a/fatiando/geothermal/climsig.py +++ b/fatiando/geothermal/climsig.py @@ -44,7 +44,7 @@ ---- """ -from __future__ import division +from __future__ import division, absolute_import from future.builtins import super import numpy as np import scipy.special @@ -79,8 +79,8 @@ def linear(amp, age, zp, diffus=THERMAL_DIFFUSIVITY_YEAR): """ tmp = zp / np.sqrt(4. * diffus * age) - res = amp * ((1. + 2 * tmp ** 2) * scipy.special.erfc(tmp) - - 2. / np.sqrt(np.pi) * tmp * np.exp(-tmp ** 2)) + res = amp*((1 + 2*tmp**2)*scipy.special.erfc(tmp) - + 2/np.sqrt(np.pi)*tmp*np.exp(-tmp**2)) return res @@ -226,9 +226,8 @@ def jacobian(self, p): jac[:, 1] = amp*tmp*np.exp(-(tmp**2))/(np.sqrt(np.pi)*age) if self.mode == 'linear': delta = 0.5 - at_p = linear(amp, age, self.zp, self.diffus) jac[:, 0] = linear(1., age, self.zp, self.diffus) - jac[:, 1] = ( - linear(amp, age + delta, self.zp, self.diffus) - - linear(amp, age - delta, self.zp, self.diffus))/(2*delta) + jac[:, 1] = (linear(amp, age + delta, self.zp, self.diffus) - + linear(amp, age - delta, self.zp, self.diffus) + )/(2*delta) return jac diff --git a/fatiando/gravmag/__init__.py b/fatiando/gravmag/__init__.py index e68e30dd..e8c33b6f 100644 --- a/fatiando/gravmag/__init__.py +++ b/fatiando/gravmag/__init__.py @@ -43,4 +43,5 @@ ---- """ +from __future__ import absolute_import from .euler import EulerDeconv, EulerDeconvMW, EulerDeconvEW diff --git a/fatiando/gravmag/_polyprism_numpy.py b/fatiando/gravmag/_polyprism_numpy.py index 4e2cd0d7..ebec9509 100644 --- a/fatiando/gravmag/_polyprism_numpy.py +++ b/fatiando/gravmag/_polyprism_numpy.py @@ -1,5 +1,5 @@ # Numpy implementation for the potential fields of a polygonal prism -from __future__ import division +from __future__ import division, absolute_import import numpy from numpy import arctan2, log, sqrt @@ -21,8 +21,8 @@ def tf(xp, yp, zp, prisms, inc, dec, pmag=None): pmx, pmy, pmz = pmag res = numpy.zeros(len(xp), dtype=numpy.float) for prism in prisms: - if prism is None or ('magnetization' not in prism.props - and pmag is None): + if prism is None or ('magnetization' not in prism.props and + pmag is None): continue if pmag is None: mag = prism.props['magnetization'] diff --git a/fatiando/gravmag/_prism_numpy.py b/fatiando/gravmag/_prism_numpy.py index a2c1a968..a956b87d 100644 --- a/fatiando/gravmag/_prism_numpy.py +++ b/fatiando/gravmag/_prism_numpy.py @@ -3,6 +3,7 @@ right rectangular prisms. This is used to test the more efficient Cython version in fatiando.gravmag._prism. Not meant for actual use. """ +from __future__ import absolute_import, division import numpy from numpy import sqrt, log, arctan2, pi @@ -51,14 +52,14 @@ def potential(xp, yp, zp, prisms, dens=None): for j in range(2): for i in range(2): r = sqrt(x[i]**2 + y[j]**2 + z[k]**2) - kernel = (x[i]*y[j]*safe_log(z[k] + r) - + y[j]*z[k]*safe_log(x[i] + r) - + x[i]*z[k]*safe_log(y[j] + r) - - 0.5*x[i]**2 * - safe_atan2(z[k]*y[j], x[i]*r) - - 0.5*y[j]**2 * - safe_atan2(z[k]*x[i], y[j]*r) - - 0.5*z[k]**2*safe_atan2(x[i]*y[j], z[k]*r)) + kernel = (x[i]*y[j]*safe_log(z[k] + r) + + y[j]*z[k]*safe_log(x[i] + r) + + x[i]*z[k]*safe_log(y[j] + r) - + 0.5*x[i]**2 * + safe_atan2(z[k]*y[j], x[i]*r) - + 0.5*y[j]**2 * + safe_atan2(z[k]*x[i], y[j]*r) - + 0.5*z[k]**2*safe_atan2(x[i]*y[j], z[k]*r)) res += ((-1.)**(i + j + k))*kernel*density # Now all that is left is to multiply res by the gravitational constant res *= G @@ -86,9 +87,9 @@ def gx(xp, yp, zp, prisms, dens=None): r = sqrt(x[i]**2 + y[j]**2 + z[k]**2) # Minus because Nagy et al (2000) give the formula for the # gradient of the potential. Gravity is -grad(V) - kernel = -(y[j]*safe_log(z[k] + r) - + z[k]*safe_log(y[j] + r) - - x[i]*safe_atan2(z[k]*y[j], x[i]*r)) + kernel = -(y[j]*safe_log(z[k] + r) + + z[k]*safe_log(y[j] + r) - + x[i]*safe_atan2(z[k]*y[j], x[i]*r)) res += ((-1.)**(i + j + k))*kernel*density # Now all that is left is to multiply res by the gravitational constant and # convert it to mGal units @@ -117,9 +118,9 @@ def gy(xp, yp, zp, prisms, dens=None): r = sqrt(x[i]**2 + y[j]**2 + z[k]**2) # Minus because Nagy et al (2000) give the formula for the # gradient of the potential. Gravity is -grad(V) - kernel = -(z[k]*safe_log(x[i] + r) - + x[i]*safe_log(z[k] + r) - - y[j]*safe_atan2(x[i]*z[k], y[j]*r)) + kernel = -(z[k]*safe_log(x[i] + r) + + x[i]*safe_log(z[k] + r) - + y[j]*safe_atan2(x[i]*z[k], y[j]*r)) res += ((-1.)**(i + j + k))*kernel*density # Now all that is left is to multiply res by the gravitational constant and # convert it to mGal units @@ -148,9 +149,9 @@ def gz(xp, yp, zp, prisms, dens=None): r = sqrt(x[i]**2 + y[j]**2 + z[k]**2) # Minus because Nagy et al (2000) give the formula for the # gradient of the potential. Gravity is -grad(V) - kernel = -(x[i]*safe_log(y[j] + r) - + y[j]*safe_log(x[i] + r) - - z[k]*safe_atan2(x[i]*y[j], z[k]*r)) + kernel = -(x[i]*safe_log(y[j] + r) + + y[j]*safe_log(x[i] + r) - + z[k]*safe_atan2(x[i]*y[j], z[k]*r)) res += ((-1.)**(i + j + k))*kernel*density # Now all that is left is to multiply res by the gravitational constant and # convert it to mGal units @@ -267,8 +268,8 @@ def tf(xp, yp, zp, prisms, inc, dec, pmag=None): pintensity = numpy.linalg.norm(pmag) pmx, pmy, pmz = numpy.array(pmag) / pintensity for prism in prisms: - if prism is None or ('magnetization' not in prism.props - and pmag is None): + if prism is None or ('magnetization' not in prism.props and + pmag is None): continue if pmag is None: mag = prism.props['magnetization'] @@ -300,13 +301,13 @@ def tf(xp, yp, zp, prisms, inc, dec, pmag=None): zr = z[k]*r res += ((-1.)**(i + j))*intensity*( 0.5*(my*fz + mz*fy) * - safe_log((r - x[i]) / (r + x[i])) - + 0.5*(mx*fz + mz*fx) * - safe_log((r - y[j]) / (r + y[j])) - - (mx*fy + my*fx)*safe_log(r + z[k]) - - mx*fx*safe_atan2(xy, x_sqr + zr + z_sqr) - - my*fy*safe_atan2(xy, r_sqr + zr - x_sqr) - + mz*fz*safe_atan2(xy, zr)) + safe_log((r - x[i]) / (r + x[i])) + + 0.5*(mx*fz + mz*fx) * + safe_log((r - y[j]) / (r + y[j])) - + (mx*fy + my*fx)*safe_log(r + z[k]) - + mx*fx*safe_atan2(xy, x_sqr + zr + z_sqr) - + my*fy*safe_atan2(xy, r_sqr + zr - x_sqr) + + mz*fz*safe_atan2(xy, zr)) res *= CM*T2NT return res diff --git a/fatiando/gravmag/_tesseroid_numba.py b/fatiando/gravmag/_tesseroid_numba.py index eb19f28e..4293a45c 100644 --- a/fatiando/gravmag/_tesseroid_numba.py +++ b/fatiando/gravmag/_tesseroid_numba.py @@ -21,7 +21,7 @@ [ 2., 4., 3., 6., 11., 9.]]) """ -from __future__ import division +from __future__ import division, absolute_import import numba import numpy as np @@ -116,9 +116,9 @@ def split(w, e, s, n, top, bottom, nlon, nlat, nr, stack, stktop): dlon = (e - w)/nlon dlat = (n - s)/nlat dr = (top - bottom)/nr - for i in xrange(nlon): - for j in xrange(nlat): - for k in xrange(nr): + for i in range(nlon): + for j in range(nlat): + for k in range(nr): stktop += 1 stack[stktop, 0] = w + i*dlon stack[stktop, 1] = w + (i + 1)*dlon diff --git a/fatiando/gravmag/basin2d.py b/fatiando/gravmag/basin2d.py index 2ec4abfb..7681c696 100644 --- a/fatiando/gravmag/basin2d.py +++ b/fatiando/gravmag/basin2d.py @@ -18,8 +18,8 @@ ---- """ -from __future__ import division -from future.builtins import super +from __future__ import division, absolute_import +from future.builtins import super, range import numpy as np from ..inversion.misfit import Misfit @@ -194,7 +194,7 @@ def jacobian(self, p): verts = self.p2vertices(p) delta = np.array([0, 1]) jac = np.empty((self.ndata, self.nparams)) - for i in xrange(self.nparams): + for i in range(self.nparams): diff = Polygon([verts[i + 2], verts[i + 1] - delta, verts[i], verts[i + 1] + delta], self.props) jac[:, i] = talwani.gz(self.x, self.z, [diff])/(2*delta[1]) @@ -339,11 +339,11 @@ def jacobian(self, p): verts = self.verts x, z = p jac = np.transpose([ - (talwani.gz(xp, zp, [Polygon(verts + [[x + delta, z]], props)]) - - talwani.gz(xp, zp, [Polygon(verts + [[x - delta, z]], props)]) + (talwani.gz(xp, zp, [Polygon(verts + [[x + delta, z]], props)]) - + talwani.gz(xp, zp, [Polygon(verts + [[x - delta, z]], props)]) ) / (2. * delta), - (talwani.gz(xp, zp, [Polygon(verts + [[x, z + delta]], props)]) - - talwani.gz(xp, zp, [Polygon(verts + [[x, z - delta]], props)]) + (talwani.gz(xp, zp, [Polygon(verts + [[x, z + delta]], props)]) - + talwani.gz(xp, zp, [Polygon(verts + [[x, z - delta]], props)]) ) / (2. * delta)]) return jac diff --git a/fatiando/gravmag/eqlayer.py b/fatiando/gravmag/eqlayer.py index 12ae351c..057c1edd 100644 --- a/fatiando/gravmag/eqlayer.py +++ b/fatiando/gravmag/eqlayer.py @@ -30,7 +30,7 @@ ---- """ -from __future__ import division +from __future__ import division, absolute_import from future.builtins import super, range import numpy import scipy.sparse diff --git a/fatiando/gravmag/euler.py b/fatiando/gravmag/euler.py index 11b8bc04..51c80949 100644 --- a/fatiando/gravmag/euler.py +++ b/fatiando/gravmag/euler.py @@ -20,7 +20,7 @@ ---- """ -from __future__ import division +from __future__ import division, absolute_import from future.builtins import super import numpy as np diff --git a/fatiando/gravmag/harvester.py b/fatiando/gravmag/harvester.py index 96e72fe0..955b9d69 100644 --- a/fatiando/gravmag/harvester.py +++ b/fatiando/gravmag/harvester.py @@ -64,6 +64,8 @@ ---- """ +from __future__ import absolute_import, division +from future.builtins import range import json import bisect from math import sqrt @@ -322,7 +324,7 @@ def harvest(data, seeds, mesh, compactness, threshold, report=False, output = [fmt_estimate(estimate, mesh.size), predicted] if report: goal, misfit, regul = update[4:] - soa = goal - compactness * 1. / (sum(mesh.shape) / 3.) * regul + soa = goal - compactness*1/(sum(mesh.shape)/3)*regul output.append({'goal': goal, 'misfit': misfit, 'regularizer': regul, 'accretions': accretions, 'shape-of-anomaly': soa}) return output @@ -357,13 +359,13 @@ def iharvest(data, seeds, mesh, compactness, threshold, restrict): totalmisfit = _misfitfunc(data, predicted) regularizer = 0. # Weight the regularizing function by the mean extent of the mesh - mu = compactness * 1. / (sum(mesh.shape) / 3.) + mu = compactness*1/(sum(mesh.shape)/3) yield [estimate, predicted, None, neighbors, totalgoal, totalmisfit, regularizer] accretions = 0 - for iteration in xrange(mesh.size - nseeds): + for iteration in range(mesh.size - nseeds): grew = False # To check if at least one seed grew (stopping criterion) - for s in xrange(nseeds): + for s in range(nseeds): best, bestgoal, bestmisfit, bestregularizer = _grow( neighbors[s], data, predicted, totalmisfit, mu, regularizer, threshold) @@ -429,7 +431,7 @@ def _grow(neighbors, data, predicted, totalmisfit, mu, regularizer, threshold): pred = [p + e for p, e in zip(predicted, neighbors[n].effect)] misfit = _misfitfunc(data, pred) if (misfit < totalmisfit and - float(abs(misfit - totalmisfit)) / totalmisfit >= threshold): + abs(misfit - totalmisfit)/totalmisfit >= threshold): reg = regularizer + neighbors[n].distance goal = _shapefunc(data, pred) + mu * reg if bestgoal is None or goal < bestgoal: @@ -447,7 +449,7 @@ def _shapefunc(data, predicted): """ result = 0. for d, p in zip(data, predicted): - alpha = numpy.sum(d.observed * p) / d.norm ** 2 + alpha = numpy.sum(d.observed * p)/d.norm**2 result += numpy.linalg.norm(alpha * d.observed - p) return result @@ -460,7 +462,7 @@ def _misfitfunc(data, predicted): result = 0. for d, p, in zip(data, predicted): residuals = d.observed - p - result += sqrt(numpy.dot(d.weights * residuals, residuals)) / d.norm + result += sqrt(numpy.dot(d.weights*residuals, residuals))/d.norm return result @@ -471,8 +473,8 @@ def _get_neighbors(cell, neighborhood, estimate, mesh, data, restrict): objects. """ indexes = [n for n in _neighbor_indexes(cell.i, mesh, restrict) - if not _is_neighbor(n, cell.props, neighborhood) - and not _in_estimate(n, cell.props, estimate)] + if not _is_neighbor(n, cell.props, neighborhood) and + not _in_estimate(n, cell.props, estimate)] neighbors = dict( (i, Neighbor( i, cell.props, cell.seed, _distance(i, cell.seed, mesh), @@ -504,9 +506,9 @@ def _index2ijk(index, mesh): Transform the index of a cell in mesh to a 3-dimensional (i,j,k) index. """ nz, ny, nx = mesh.shape - k = index / (nx * ny) - j = (index - k * (nx * ny)) / nx - i = (index - k * (nx * ny) - j * nx) + k = index//(nx*ny) + j = (index - k*(nx*ny))//nx + i = (index - k*(nx*ny) - j*nx) return i, j, k @@ -642,7 +644,7 @@ def weights(x, y, seeds, influences, decay=2): The calculated weights """ - distances = numpy.array([((x - s.x) ** 2 + (y - s.y) ** 2) / influence ** 2 + distances = numpy.array([((x - s.x) ** 2 + (y - s.y) ** 2)/influence**2 for s, influence in zip(seeds, influences)]) # min along axis=0 gets the smallest value from each column weights = numpy.exp(-(distances.min(axis=0) ** decay)) diff --git a/fatiando/gravmag/imaging.py b/fatiando/gravmag/imaging.py index 854df19d..48b0b3d9 100644 --- a/fatiando/gravmag/imaging.py +++ b/fatiando/gravmag/imaging.py @@ -49,6 +49,9 @@ ---- """ +from __future__ import absolute_import, division +from future.builtins import range +from functools import reduce import numpy from fatiando.mesher import PrismMesh @@ -109,7 +112,7 @@ def migrate(x, y, z, gz, zmin, zmax, meshshape, power=0.5, scale=1): depths = mesh.get_zs()[:-1] + 0.5 * dz weights = numpy.abs(depths) ** power / (2 * G * numpy.sqrt(numpy.pi)) density = [] - for l in xrange(nlayers): + for l in range(nlayers): sensibility_T = numpy.array( [pot_prism.gz(x, y, z, [p], dens=1) for p in mesh.get_layer(l)]) density.extend(scale * weights[l] * numpy.dot(sensibility_T, gz)) @@ -168,17 +171,16 @@ def sandwich(x, y, z, data, shape, zmin, zmax, nlayers, power=0.5): density = [] # Offset by the data z because in the paper the data is at z=0 for depth, weight in zip(depths - z[0], weights): + # The 1e-10 is to avoid zero division when freq[i]==0 density.extend( numpy.real(numpy.fft.ifft2( weight * - (numpy.exp(-freq * depth) - numpy.exp(-freq * (depth + dz))) - * freq * dataft / + (numpy.exp(-freq * depth) - numpy.exp(-freq * (depth + dz))) * + freq * dataft / (numpy.pi * G * reduce(numpy.add, - [w * (numpy.exp(-freq * h) - - numpy.exp(-freq * (h + dz))) ** 2 - # To avoid zero division when freq[i]==0 - + 10. ** (-10) + [w * (numpy.exp(-freq * h) - + numpy.exp(-freq * (h + dz))) ** 2 + 1e-10 for h, w in zip(depths, weights)]) ) ).ravel())) diff --git a/fatiando/gravmag/interactive.py b/fatiando/gravmag/interactive.py index a61e2d17..4a3e835a 100644 --- a/fatiando/gravmag/interactive.py +++ b/fatiando/gravmag/interactive.py @@ -10,8 +10,12 @@ ---- """ -from __future__ import division -import cPickle as pickle +from __future__ import division, absolute_import +from future.builtins import zip +try: + import cPickle as pickle +except ImportError: + import pickle import numpy from matplotlib import pyplot, widgets, patches @@ -389,7 +393,7 @@ def _make_polygon(self, vertices, density): """ poly = patches.Polygon(vertices, animated=False, alpha=0.9, color=self._density2color(density)) - x, y = zip(*poly.xy) + x, y = list(zip(*poly.xy)) line = Line2D(x, y, **self.line_args) return poly, line @@ -511,7 +515,7 @@ def _button_press_callback(self, event): elif self._drawing: if event.button == 1: self._xy.append([event.xdata, event.ydata]) - self._drawing_plot.set_data(zip(*self._xy)) + self._drawing_plot.set_data(list(zip(*self._xy))) self.canvas.restore_region(self.background) self.modelax.draw_artist(self._drawing_plot) self.canvas.blit(self.modelax.bbox) @@ -567,7 +571,7 @@ def _key_press_callback(self, event): if self._drawing and self._xy: self._xy.pop() if self._xy: - self._drawing_plot.set_data(zip(*self._xy)) + self._drawing_plot.set_data(list(zip(*self._xy))) else: self._drawing_plot.set_data([], []) self.canvas.restore_region(self.background) @@ -580,7 +584,7 @@ def _key_press_callback(self, event): verts = numpy.atleast_1d(self._ivert) poly.xy = numpy.array([xy for i, xy in enumerate(poly.xy) if i not in verts]) - line.set_data(zip(*poly.xy)) + line.set_data(list(zip(*poly.xy))) self._update_data() self._update_data_plot() self.canvas.restore_region(self.background) @@ -648,7 +652,7 @@ def _mouse_move_callback(self, event): dy = y - self._lastevent.ydata self.polygons[p].xy[:, 0] += dx self.polygons[p].xy[:, 1] += dy - self.lines[p].set_data(zip(*self.polygons[p].xy)) + self.lines[p].set_data(list(zip(*self.polygons[p].xy))) self._lastevent = event self.canvas.restore_region(self.background) self.modelax.draw_artist(self.polygons[p]) diff --git a/fatiando/gravmag/magdir.py b/fatiando/gravmag/magdir.py index 838b5a66..a9064059 100644 --- a/fatiando/gravmag/magdir.py +++ b/fatiando/gravmag/magdir.py @@ -16,7 +16,7 @@ """ -from __future__ import division +from __future__ import division, absolute_import from future.builtins import super import numpy as np from ..inversion import Misfit diff --git a/fatiando/gravmag/normal_gravity.py b/fatiando/gravmag/normal_gravity.py index 33177b44..294ca04e 100644 --- a/fatiando/gravmag/normal_gravity.py +++ b/fatiando/gravmag/normal_gravity.py @@ -72,7 +72,7 @@ ---- """ -from __future__ import division +from __future__ import division, absolute_import import math import numpy @@ -197,8 +197,8 @@ def gamma_somigliana(latitude, ellipsoid=WGS84): lat = numpy.deg2rad(latitude) sin2 = numpy.sin(lat)**2 cos2 = numpy.cos(lat)**2 - top = ((ellipsoid.a*ellipsoid.gamma_a)*cos2 - + (ellipsoid.b*ellipsoid.gamma_b)*sin2) + top = ((ellipsoid.a*ellipsoid.gamma_a)*cos2 + + (ellipsoid.b*ellipsoid.gamma_b)*sin2) bottom = numpy.sqrt(ellipsoid.a**2*cos2 + ellipsoid.b**2*sin2) gamma = top/bottom return utils.si2mgal(gamma) diff --git a/fatiando/gravmag/polyprism.py b/fatiando/gravmag/polyprism.py index 0e688029..1e24d685 100644 --- a/fatiando/gravmag/polyprism.py +++ b/fatiando/gravmag/polyprism.py @@ -61,7 +61,7 @@ ---- """ -from __future__ import division +from __future__ import division, absolute_import import numpy from numpy import arctan2, log, sqrt @@ -118,8 +118,8 @@ def tf(xp, yp, zp, prisms, inc, dec, pmag=None): pmx, pmy, pmz = pmag res = numpy.zeros(len(xp), dtype=numpy.float) for prism in prisms: - if prism is None or ('magnetization' not in prism.props - and pmag is None): + if prism is None or ('magnetization' not in prism.props and + pmag is None): continue if pmag is None: mag = prism.props['magnetization'] @@ -273,7 +273,6 @@ def gz(xp, yp, zp, prisms): """ if xp.shape != yp.shape != zp.shape: raise ValueError("Input arrays xp, yp, and zp must have same shape!") - dummy = 10 ** (-10) size = len(xp) res = numpy.zeros(size, dtype=numpy.float) for prism in prisms: diff --git a/fatiando/gravmag/prism.py b/fatiando/gravmag/prism.py index 0fae795c..ff6ee304 100644 --- a/fatiando/gravmag/prism.py +++ b/fatiando/gravmag/prism.py @@ -83,7 +83,7 @@ ---- """ -from __future__ import division +from __future__ import division, absolute_import import numpy diff --git a/fatiando/gravmag/sphere.py b/fatiando/gravmag/sphere.py index 74f01b04..626ab047 100644 --- a/fatiando/gravmag/sphere.py +++ b/fatiando/gravmag/sphere.py @@ -1,7 +1,7 @@ r""" The potential fields of a homogeneous sphere. """ -from __future__ import division +from __future__ import division, absolute_import import numpy import numpy as np diff --git a/fatiando/gravmag/talwani.py b/fatiando/gravmag/talwani.py index 9bfc42b3..6a56cd86 100644 --- a/fatiando/gravmag/talwani.py +++ b/fatiando/gravmag/talwani.py @@ -20,6 +20,8 @@ ---- """ +from __future__ import absolute_import, division +from future.builtins import range import numpy from numpy import arctan2, pi, sin, cos, log, tan @@ -59,8 +61,8 @@ def gz(xp, zp, polygons, dens=None): raise ValueError("Input arrays xp and zp must have same shape!") res = numpy.zeros_like(xp) for polygon in polygons: - if polygon is None or ('density' not in polygon.props - and dens is None): + if polygon is None or ('density' not in polygon.props and + dens is None): continue if dens is None: density = polygon.props['density'] @@ -69,7 +71,7 @@ def gz(xp, zp, polygons, dens=None): x = polygon.x z = polygon.y nverts = polygon.nverts - for v in xrange(nverts): + for v in range(nverts): # Change the coordinates of this vertice xv = x[v] - xp zv = z[v] - zp diff --git a/fatiando/gravmag/tensor.py b/fatiando/gravmag/tensor.py index f76f0d54..d9cafad1 100644 --- a/fatiando/gravmag/tensor.py +++ b/fatiando/gravmag/tensor.py @@ -42,7 +42,7 @@ ---- """ -from __future__ import division +from __future__ import division, absolute_import import numpy import numpy.linalg @@ -74,8 +74,8 @@ def invariants(tensor): gyyzz = gyy * gzz gyz_sqr = gyz ** 2 inv1 = gxx * gyy + gyyzz + gxx * gzz - gxy ** 2 - gyz_sqr - gxz ** 2 - inv2 = (gxx * (gyyzz - gyz_sqr) + gxy * (gyz * gxz - gxy * gzz) - + gxz * (gxy * gyz - gxz * gyy)) + inv2 = (gxx * (gyyzz - gyz_sqr) + gxy * (gyz * gxz - gxy * gzz) + + gxz * (gxy * gyz - gxz * gyy)) inv = -((0.5 * inv2) ** 2) / ((inv1 / 3.) ** 3) return [inv1, inv2, inv] diff --git a/fatiando/gravmag/tesseroid.py b/fatiando/gravmag/tesseroid.py index 5b8e0c17..8aa59cd9 100644 --- a/fatiando/gravmag/tesseroid.py +++ b/fatiando/gravmag/tesseroid.py @@ -87,7 +87,8 @@ ---- """ -from __future__ import division +from __future__ import division, absolute_import +from future.builtins import range import multiprocessing import warnings @@ -159,9 +160,9 @@ def _check_tesseroid(tesseroid, dens): "Invalid tesseroid dimensions {}".format(tesseroid.get_bounds()) # Check if the tesseroid has volume > 0 if (e - w <= 1e-6) or (n - s <= 1e-6) or (top - bottom <= 1e-3): - msg = ("Encountered tesseroid with dimensions smaller than the " - + "numerical threshold (1e-6 degrees or 1e-3 m). " - + "Ignoring this tesseroid.") + msg = ("Encountered tesseroid with dimensions smaller than the " + + "numerical threshold (1e-6 degrees or 1e-3 m). " + + "Ignoring this tesseroid.") warnings.warn(msg, RuntimeWarning) return None if dens is not None: @@ -218,10 +219,10 @@ def _forward_model(args): lon, sinlat, coslat, radius = _convert_coords(lon, lat, height) func = getattr(_tesseroid_numba, field) warning_msg = ( - "Stopped dividing a tesseroid because it's dimensions would be below " - + "the minimum numerical threshold (1e-6 degrees or 1e-3 m). " - + "Will compute without division. Cannot guarantee the accuracy of " - + "the solution.") + "Stopped dividing a tesseroid because it's dimensions would be " + + "below the minimum numerical threshold (1e-6 degrees or 1e-3 m). " + + "Will compute without division. Cannot guarantee the accuracy of " + + "the solution.") # Arrays needed by the kernel. Can't allocate them inside the kernel # because numba doesn't like that. stack = np.empty((STACK_SIZE, 6), dtype='float') @@ -258,7 +259,7 @@ def _split_arrays(arrays, extra_args, nparts): """ size = len(arrays[0]) n = size//nparts - strides = [(i*n, (i + 1)*n) for i in xrange(nparts - 1)] + strides = [(i*n, (i + 1)*n) for i in range(nparts - 1)] strides.append((strides[-1][-1], size)) chunks = [[x[low:high] for x in arrays] + extra_args for low, high in strides] diff --git a/fatiando/gravmag/tests/test_eqlayer.py b/fatiando/gravmag/tests/test_eqlayer.py index f9e0316e..8747dd3e 100644 --- a/fatiando/gravmag/tests/test_eqlayer.py +++ b/fatiando/gravmag/tests/test_eqlayer.py @@ -1,13 +1,13 @@ -from __future__ import division +from __future__ import division, absolute_import import pytest import numpy as np from numpy.testing import assert_allclose, assert_array_almost_equal -from fatiando.gravmag.eqlayer import (EQLGravity, EQLTotalField, - PELGravity, PELTotalField, PELSmoothness) -from fatiando.inversion import Damping -from fatiando.gravmag import sphere, prism -from fatiando.mesher import PointGrid, Prism -from fatiando import utils, gridder +from ..eqlayer import EQLGravity, EQLTotalField, PELGravity, PELTotalField, \ + PELSmoothness +from ...inversion import Damping +from .. import sphere, prism +from ...mesher import PointGrid, Prism +from ... import utils, gridder @pytest.mark.xfail @@ -47,14 +47,14 @@ def test_pelgrav_prism_interp(): shape = (40, 40) n = shape[0]*shape[1] area = [-2000, 2000, -2000, 2000] - x, y, z = gridder.scatter(area, n, z=-100, seed=42) + x, y, z = gridder.scatter(area, n, z=-100, seed=42) data = prism.gz(x, y, z, model) layer = PointGrid(area, 100, shape) windows = (20, 20) degree = 1 - eql = (PELGravity(x, y, z, data, layer, windows, degree) - + 5e-22*PELSmoothness(layer, windows, degree)) + eql = (PELGravity(x, y, z, data, layer, windows, degree) + + 5e-22*PELSmoothness(layer, windows, degree)) eql.fit() layer.addprop('density', eql.estimate_) @@ -104,7 +104,7 @@ def test_eqlgrav_prism_interp(): shape = (30, 30) n = shape[0]*shape[1] area = [-2000, 2000, -2000, 2000] - x, y, z = gridder.scatter(area, n, z=-100, seed=42) + x, y, z = gridder.scatter(area, n, z=-100, seed=42) data = prism.gz(x, y, z, model) layer = PointGrid(area, 200, shape) eql = EQLGravity(x, y, z, data, layer) + 1e-23*Damping(layer.size) @@ -134,8 +134,8 @@ def test_eqlayer_polereduce(): true = prism.tf(x, y, z, model, -90, 0, pmag=utils.ang2vec(5, -90, 0)) layer = PointGrid(area, 200, shape) - eql = (EQLTotalField(x, y, z, data, inc, dec, layer, sinc, sdec) - + 1e-24*Damping(layer.size)) + eql = (EQLTotalField(x, y, z, data, inc, dec, layer, sinc, sdec) + + 1e-24*Damping(layer.size)) eql.fit() assert_allclose(eql[0].predicted(), data, rtol=0.01) diff --git a/fatiando/gravmag/tests/test_euler.py b/fatiando/gravmag/tests/test_euler.py index 7c63edc3..97d1eced 100644 --- a/fatiando/gravmag/tests/test_euler.py +++ b/fatiando/gravmag/tests/test_euler.py @@ -1,9 +1,8 @@ -from __future__ import division +from __future__ import division, absolute_import import numpy as np -from fatiando.gravmag import EulerDeconv, EulerDeconvEW, EulerDeconvMW -from fatiando.gravmag import sphere -from fatiando.mesher import Sphere -from fatiando import utils, gridder +from .. import EulerDeconv, EulerDeconvEW, EulerDeconvMW, sphere +from ...mesher import Sphere +from ... import utils, gridder model = None xp, yp, zp = None, None, None @@ -28,12 +27,12 @@ def setup(): field = sphere.tf(x, y, z, [model], inc, dec) + base # Use finite difference derivatives so that these tests don't depend on the # performance of the FFT derivatives. - dx = (sphere.tf(x + 1, y, z, [model], inc, dec) - - sphere.tf(x - 1, y, z, [model], inc, dec))/2 - dy = (sphere.tf(x, y + 1, z, [model], inc, dec) - - sphere.tf(x, y - 1, z, [model], inc, dec))/2 - dz = (sphere.tf(x, y, z + 1, [model], inc, dec) - - sphere.tf(x, y, z - 1, [model], inc, dec))/2 + dx = (sphere.tf(x + 1, y, z, [model], inc, dec) - + sphere.tf(x - 1, y, z, [model], inc, dec))/2 + dy = (sphere.tf(x, y + 1, z, [model], inc, dec) - + sphere.tf(x, y - 1, z, [model], inc, dec))/2 + dz = (sphere.tf(x, y, z + 1, [model], inc, dec) - + sphere.tf(x, y, z - 1, [model], inc, dec))/2 def test_euler_sphere_mag(): diff --git a/fatiando/gravmag/tests/test_harvester.py b/fatiando/gravmag/tests/test_harvester.py index 5a7f6886..d328e7a5 100644 --- a/fatiando/gravmag/tests/test_harvester.py +++ b/fatiando/gravmag/tests/test_harvester.py @@ -1,7 +1,9 @@ +from __future__ import absolute_import +from future.builtins import range import numpy as np -from fatiando.gravmag import harvester, prism -from fatiando.mesher import PrismMesh -from fatiando import gridder +from .. import harvester, prism +from ...mesher import PrismMesh +from ... import gridder def test_harvest_restrict(): @@ -24,7 +26,7 @@ def fill(i, case): for testcase in cases: mref = PrismMesh(bounds, shape) mesh = mref.copy() - mref.addprop('density', [fill(i, testcase) for i in xrange(mref.size)]) + mref.addprop('density', [fill(i, testcase) for i in range(mref.size)]) # Calculate reference gravity field xp, yp, zp = gridder.regular(bounds[:4], shapegz, z=-1) gzref = prism.gz(xp, yp, zp, mref) diff --git a/fatiando/gravmag/tests/test_normal_gravity.py b/fatiando/gravmag/tests/test_normal_gravity.py index 590d113c..cbdc3edb 100644 --- a/fatiando/gravmag/tests/test_normal_gravity.py +++ b/fatiando/gravmag/tests/test_normal_gravity.py @@ -1,15 +1,13 @@ -from __future__ import division -from fatiando.gravmag.normal_gravity import (WGS84, - gamma_somigliana, - gamma_somigliana_free_air, - gamma_closed_form, - bouguer_plate) - -from fatiando import utils +from __future__ import division, absolute_import +from future.builtins import range import numpy from numpy.testing import assert_almost_equal +from ..normal_gravity import WGS84, gamma_somigliana, \ + gamma_somigliana_free_air, gamma_closed_form, bouguer_plate +from ... import utils + def test_bouguer(): "gravmag.normal_gravity.bouguer_plate returns correct results" @@ -27,7 +25,7 @@ def test_bouguer(): bg_water = bg[:25][::-1] bg_rock = bg[26:] assert len(bg_rock) == len(bg_water), "Diff size in rock and water" - for i in xrange(len(bg_water)): + for i in range(len(bg_water)): assert_almost_equal(bg_water[i], -0.5*bg_rock[i], decimal=5, err_msg="water = -0.5*rock with array") @@ -58,12 +56,12 @@ def test_closed_form(): lat = numpy.linspace(-90, 90, 200) som = gamma_somigliana(lat, ellipsoid=WGS84) closed = gamma_closed_form(lat, 0, ellipsoid=WGS84) - for i in xrange(len(lat)): + for i in range(len(lat)): assert_almost_equal(closed[i], som[i], decimal=3, err_msg='lat = {}'.format(lat[i])) - gradient = (gamma_closed_form(lat, 1, ellipsoid=WGS84) - - gamma_closed_form(lat, 0, ellipsoid=WGS84)) + gradient = (gamma_closed_form(lat, 1, ellipsoid=WGS84) - + gamma_closed_form(lat, 0, ellipsoid=WGS84)) mean = numpy.mean(gradient) assert_almost_equal(mean, -0.3086, decimal=4, err_msg='mean vs free-air') diff --git a/fatiando/gravmag/tests/test_polyprism.py b/fatiando/gravmag/tests/test_polyprism.py index 8f6dbeee..af1e8df3 100644 --- a/fatiando/gravmag/tests/test_polyprism.py +++ b/fatiando/gravmag/tests/test_polyprism.py @@ -1,8 +1,9 @@ +from __future__ import absolute_import, division import numpy as np -from fatiando.mesher import PolygonalPrism, Prism -from fatiando.gravmag import polyprism, prism, _polyprism_numpy -from fatiando import utils +from ...mesher import PolygonalPrism, Prism +from .. import polyprism, prism, _polyprism_numpy +from ... import utils model = None prismmodel = None diff --git a/fatiando/gravmag/tests/test_prism.py b/fatiando/gravmag/tests/test_prism.py index 558379c6..247a0bd1 100644 --- a/fatiando/gravmag/tests/test_prism.py +++ b/fatiando/gravmag/tests/test_prism.py @@ -1,10 +1,11 @@ +from __future__ import absolute_import import numpy as np from numpy.testing import assert_array_almost_equal as assert_almost from pytest import raises -from fatiando.mesher import Prism -from fatiando.gravmag import _prism_numpy, prism -from fatiando import utils, gridder +from ...mesher import Prism +from .. import _prism_numpy, prism +from ... import utils, gridder def test_fails_if_shape_mismatch(): @@ -235,8 +236,6 @@ def test_cython_agains_numpy(): def test_around(): "gravmag.prism gravitational results are consistent around the prism" - funcs = ['potential', 'gx', 'gy', 'gz', - 'gxx', 'gxy', 'gxz', 'gyy', 'gyz', 'gzz'] model = [Prism(-300, 300, -300, 300, -300, 300, {'density': 1000})] # Make the computation points surround the prism shape = (101, 101) diff --git a/fatiando/gravmag/tests/test_sphere.py b/fatiando/gravmag/tests/test_sphere.py index 2b795dfc..4f305eb3 100644 --- a/fatiando/gravmag/tests/test_sphere.py +++ b/fatiando/gravmag/tests/test_sphere.py @@ -1,3 +1,4 @@ +from __future__ import absolute_import, division import os import numpy as np import numpy.testing as npt diff --git a/fatiando/gravmag/tests/test_tesseroid.py b/fatiando/gravmag/tests/test_tesseroid.py index 15376fad..53a491bc 100644 --- a/fatiando/gravmag/tests/test_tesseroid.py +++ b/fatiando/gravmag/tests/test_tesseroid.py @@ -1,14 +1,14 @@ -from __future__ import division +from __future__ import division, absolute_import import numpy as np from numpy.testing import assert_array_almost_equal, assert_allclose from pytest import raises import multiprocessing import warnings -from fatiando.gravmag import tesseroid -from fatiando.mesher import Tesseroid, TesseroidMesh -from fatiando import gridder -from fatiando.constants import SI2MGAL, SI2EOTVOS, G, MEAN_EARTH_RADIUS +from .. import tesseroid +from ...mesher import Tesseroid, TesseroidMesh +from ... import gridder +from ...constants import SI2MGAL, SI2EOTVOS, G, MEAN_EARTH_RADIUS def test_warn_if_division_makes_too_small(): @@ -23,17 +23,17 @@ def test_warn_if_division_makes_too_small(): lat, lon = np.zeros((2, 1)) h = np.array([0.1]) warning_msg = ( - "Stopped dividing a tesseroid because it's dimensions would be below " - + "the minimum numerical threshold (1e-6 degrees or 1e-3 m). " - + "Will compute without division. Cannot guarantee the accuracy of " - + "the solution.") + "Stopped dividing a tesseroid because it's dimensions would be " + + "below the minimum numerical threshold (1e-6 degrees or 1e-3 m). " + + "Will compute without division. Cannot guarantee the accuracy of " + + "the solution.") for i, model in enumerate(models): with warnings.catch_warnings(record=True) as w: # Cause all warnings to always be triggered. warnings.simplefilter("always") tesseroid.gz(lon, lat, h, model) - msg = ("Failed model {}. Got {} warnings.\n\n".format(i, len(w)) - + "\n\n".join([str(j.message) for j in w])) + msg = ("Failed model {}. Got {} warnings.\n\n".format(i, len(w)) + + "\n\n".join([str(j.message) for j in w])) assert len(w) >= 1, msg assert any(issubclass(j.category, RuntimeWarning) for j in w), \ "No RuntimeWarning found. " + msg @@ -50,16 +50,16 @@ def test_warn_if_too_small(): lat, lon = np.zeros((2, 1)) h = np.array([10]) warning_msg = ( - "Encountered tesseroid with dimensions smaller than the " - + "numerical threshold (1e-6 degrees or 1e-3 m). " - + "Ignoring this tesseroid.") + "Encountered tesseroid with dimensions smaller than the " + + "numerical threshold (1e-6 degrees or 1e-3 m). " + + "Ignoring this tesseroid.") for i, model in enumerate(models): with warnings.catch_warnings(record=True) as w: # Cause all warnings to always be triggered. warnings.simplefilter("always") tesseroid.gz(lon, lat, h, model) - msg = ("Failed model {}. Got {} warnings.\n\n".format(i, len(w)) - + "\n\n".join([str(j.message) for j in w])) + msg = ("Failed model {}. Got {} warnings.\n\n".format(i, len(w)) + + "\n\n".join([str(j.message) for j in w])) assert len(w) >= 1, msg assert any(issubclass(j.category, RuntimeWarning) for j in w), \ "No RuntimeWarning found. " + msg @@ -103,7 +103,7 @@ def test_ignore_zero_volume(): Tesseroid(5, 10, -10, -5, 2000.5, 0, props)] lon, lat, height = gridder.regular((-20, 20, -20, 20), (50, 50), z=250e3) for f in 'potential gx gy gz gxx gxy gxz gyy gyz gzz'.split(): - with warnings.catch_warnings(record=True) as w: + with warnings.catch_warnings(record=True): func = getattr(tesseroid, f) f1 = func(lon, lat, height, model) f2 = func(lon, lat, height, [model[-1]]) diff --git a/fatiando/gravmag/tests/test_transform.py b/fatiando/gravmag/tests/test_transform.py index f8e59047..53a4a1a4 100644 --- a/fatiando/gravmag/tests/test_transform.py +++ b/fatiando/gravmag/tests/test_transform.py @@ -1,10 +1,10 @@ -from __future__ import division +from __future__ import division, absolute_import import pytest import numpy as np import numpy.testing as npt -from fatiando.gravmag import transform, prism -from fatiando import gridder, utils -from fatiando.mesher import Prism +from .. import transform, prism +from ... import gridder, utils +from ...mesher import Prism def _trim(array, shape, d=20): @@ -68,9 +68,9 @@ def test_upcontinue_warning(): x, y, z = gridder.regular([-5000, 5000, -5000, 5000], shape, z=-500) data = prism.gz(x, y, z, model) with pytest.warns(UserWarning): - up = transform.upcontinue(x, y, data, shape, height=0) + transform.upcontinue(x, y, data, shape, height=0) with pytest.warns(UserWarning): - up = transform.upcontinue(x, y, data, shape, height=-100) + transform.upcontinue(x, y, data, shape, height=-100) def test_second_horizontal_derivatives_fd(): diff --git a/fatiando/gravmag/transform.py b/fatiando/gravmag/transform.py index 452262e5..c4ac4997 100644 --- a/fatiando/gravmag/transform.py +++ b/fatiando/gravmag/transform.py @@ -29,7 +29,7 @@ ---- """ -from __future__ import division +from __future__ import division, absolute_import import warnings import numpy diff --git a/fatiando/gridder/__init__.py b/fatiando/gridder/__init__.py index 22abcf19..b217021c 100644 --- a/fatiando/gridder/__init__.py +++ b/fatiando/gridder/__init__.py @@ -1,6 +1,7 @@ """ Create and operate on data grids, scatters, and profiles. """ +from __future__ import absolute_import from .slicing import inside, cut from .interpolation import interp, interp_at, profile from .padding import pad_array, unpad_array, pad_coords diff --git a/fatiando/gridder/padding.py b/fatiando/gridder/padding.py index f57ac526..3e3fd130 100644 --- a/fatiando/gridder/padding.py +++ b/fatiando/gridder/padding.py @@ -2,6 +2,7 @@ Apply padding to data grids using different algorithms for the filling. """ from __future__ import division, absolute_import, print_function +from future.builtins import range import numpy as np @@ -283,7 +284,7 @@ def _padcvec(x, n, dx): # of points on either side and the point spacing xp = np.zeros(len(x) + n[0] + n[1]) xp[n[0]:n[0]+len(x)] = x[:] - for ii, jj in enumerate(range(0, n[0])[::-1]): + for ii, jj in enumerate(list(range(0, n[0]))[::-1]): xp[ii] = x[0] - ((jj + 1) * dx) for ii, jj in enumerate(range(len(x) + n[0], len(xp))): xp[jj] = x[-1] + (dx * (ii + 1)) diff --git a/fatiando/gridder/tests/test_point_generation.py b/fatiando/gridder/tests/test_point_generation.py index 20fe78e8..11edfd0d 100644 --- a/fatiando/gridder/tests/test_point_generation.py +++ b/fatiando/gridder/tests/test_point_generation.py @@ -1,4 +1,5 @@ from __future__ import division, absolute_import, print_function +from future.builtins import range import numpy.testing as npt import numpy as np from pytest import raises @@ -132,7 +133,7 @@ def test_circular_scatter_random(): "gridder.circular_scatter return different sequences if random=True" area = [-1000, 1200, -40, 200] size = 1300 - for i in xrange(20): + for i in range(20): x1, y1 = gridder.circular_scatter(area, size, random=True) x2, y2 = gridder.circular_scatter(area, size, random=True) with raises(AssertionError): diff --git a/fatiando/inversion/__init__.py b/fatiando/inversion/__init__.py index 0c787ec7..2a19bddb 100644 --- a/fatiando/inversion/__init__.py +++ b/fatiando/inversion/__init__.py @@ -344,6 +344,8 @@ ---- """ -from .misfit import * -from .regularization import * -from .hyper_param import * +from __future__ import absolute_import +from .misfit import Misfit +from .regularization import Damping, Smoothness, Smoothness1D, Smoothness2D, \ + TotalVariation, TotalVariation1D, TotalVariation2D +from .hyper_param import LCurve diff --git a/fatiando/inversion/base.py b/fatiando/inversion/base.py index 34cc46e4..f1538c5b 100644 --- a/fatiando/inversion/base.py +++ b/fatiando/inversion/base.py @@ -30,7 +30,7 @@ ---- """ -from __future__ import division +from __future__ import division, absolute_import from future.utils import with_metaclass from future.builtins import super, object, range, isinstance, zip, map import hashlib @@ -213,8 +213,8 @@ def fit(self): ``estimate_``. """ - not_configured = (getattr(self, 'fit_method', None) is None - or getattr(self, 'fit_args', None) is None) + not_configured = (getattr(self, 'fit_method', None) is None or + getattr(self, 'fit_args', None) is None) if not_configured: if self.islinear: self.config('linear') diff --git a/fatiando/inversion/hyper_param.py b/fatiando/inversion/hyper_param.py index 3fc32acd..fca24e9a 100644 --- a/fatiando/inversion/hyper_param.py +++ b/fatiando/inversion/hyper_param.py @@ -16,15 +16,14 @@ ---- """ -from __future__ import division +from __future__ import division, absolute_import +from future.builtins import range import multiprocessing import numpy from ..vis import mpl from .base import OptimizerMixin -__all__ = ['LCurve'] - class LCurve(OptimizerMixin): """ @@ -364,9 +363,9 @@ def dist(p1, p2): cte = 7. * numpy.pi / 8. angmin = None c = [x[-1], y[-1]] - for k in xrange(0, n - 2): + for k in range(0, n - 2): b = [x[k], y[k]] - for j in xrange(k + 1, n - 1): + for j in range(k + 1, n - 1): a = [x[j], y[j]] ab = dist(a, b) ac = dist(a, c) diff --git a/fatiando/inversion/misfit.py b/fatiando/inversion/misfit.py index c4ee69bd..4b16406c 100644 --- a/fatiando/inversion/misfit.py +++ b/fatiando/inversion/misfit.py @@ -15,7 +15,7 @@ ---- """ -from __future__ import division +from __future__ import division, absolute_import import copy from abc import abstractmethod import numpy as np @@ -106,8 +106,8 @@ def copy(self, deep=False): obj = copy.copy(self) for name in ['predicted', 'jacobian', 'hessian']: meth = getattr(obj, name) - is_cached = (isinstance(meth, CachedMethod) - or isinstance(meth, CachedMethodPermanent)) + is_cached = (isinstance(meth, CachedMethod) or + isinstance(meth, CachedMethodPermanent)) if is_cached: setattr(obj, name, copy.copy(meth)) getattr(obj, name).instance = obj diff --git a/fatiando/inversion/optimization.py b/fatiando/inversion/optimization.py index 91d4b316..08a391f0 100644 --- a/fatiando/inversion/optimization.py +++ b/fatiando/inversion/optimization.py @@ -38,7 +38,8 @@ ---- """ -from __future__ import division +from __future__ import division, absolute_import +from future.builtins import range import copy import warnings import numpy @@ -160,7 +161,7 @@ def newton(hessian, gradient, value, initial, maxit=30, tol=10 ** -5, p = numpy.array(initial, dtype=numpy.float) misfit = value(p) stats['objective'].append(misfit) - for iteration in xrange(maxit): + for iteration in range(maxit): hess = hessian(p) grad = gradient(p) if precondition: @@ -179,9 +180,9 @@ def newton(hessian, gradient, value, initial, maxit=30, tol=10 ** -5, misfit = newmisfit if iteration == maxit - 1: warnings.warn( - 'Exited because maximum iterations reached. ' - + 'Might not have achieved convergence. ' - + 'Try inscreasing the maximum number of iterations allowed.', + 'Exited because maximum iterations reached. ' + + 'Might not have achieved convergence. ' + + 'Try inscreasing the maximum number of iterations allowed.', RuntimeWarning) @@ -250,7 +251,7 @@ def levmarq(hessian, gradient, value, initial, maxit=30, maxsteps=20, lamb=10, stats['objective'].append(misfit) stats['step_attempts'].append(0) stats['step_size'].append(lamb) - for iteration in xrange(maxit): + for iteration in range(maxit): hess = hessian(p) minus_gradient = -gradient(p) if precondition: @@ -261,7 +262,7 @@ def levmarq(hessian, gradient, value, initial, maxit=30, maxsteps=20, lamb=10, minus_gradient = safe_dot(precond, minus_gradient) stagnation = True diag = scipy.sparse.diags(safe_diagonal(hess), 0).tocsr() - for step in xrange(maxsteps): + for step in range(maxsteps): newp = p + safe_solve(hess + lamb * diag, minus_gradient) newmisfit = value(newp) if newmisfit >= misfit: @@ -275,10 +276,10 @@ def levmarq(hessian, gradient, value, initial, maxit=30, maxsteps=20, lamb=10, if stagnation: stop = True warnings.warn( - "Exited because couldn't take a step without increasing " - + 'the objective function. ' - + 'Might not have achieved convergence. ' - + 'Try inscreasing the max number of step attempts allowed.', + "Exited because couldn't take a step without increasing " + + 'the objective function. ' + + 'Might not have achieved convergence. ' + + 'Try inscreasing the max number of step attempts allowed.', RuntimeWarning) else: stop = newmisfit > misfit or abs( @@ -296,9 +297,9 @@ def levmarq(hessian, gradient, value, initial, maxit=30, maxsteps=20, lamb=10, break if iteration == maxit - 1: warnings.warn( - 'Exited because maximum iterations reached. ' - + 'Might not have achieved convergence. ' - + 'Try inscreasing the maximum number of iterations allowed.', + 'Exited because maximum iterations reached. ' + + 'Might not have achieved convergence. ' + + 'Try inscreasing the maximum number of iterations allowed.', RuntimeWarning) @@ -401,14 +402,14 @@ def steepest(gradient, value, initial, maxit=1000, linesearch=True, # This is a mystic parameter of the Armijo rule alpha = 10 ** (-4) stagnation = False - for iteration in xrange(maxit): + for iteration in range(maxit): grad = gradient(p) if linesearch: # Calculate now to avoid computing inside the loop gradnorm = numpy.linalg.norm(grad) ** 2 stagnation = True # Determine the best step size - for i in xrange(maxsteps): + for i in range(maxsteps): stepsize = beta**i newp = p - stepsize*grad newmisfit = value(newp) @@ -421,10 +422,10 @@ def steepest(gradient, value, initial, maxit=1000, linesearch=True, if stagnation: stop = True warnings.warn( - "Exited because couldn't take a step without increasing " - + 'the objective function. ' - + 'Might not have achieved convergence. ' - + 'Try inscreasing the max number of step attempts allowed.', + "Exited because couldn't take a step without increasing " + + 'the objective function. ' + + 'Might not have achieved convergence. ' + + 'Try inscreasing the max number of step attempts allowed.', RuntimeWarning) else: stop = abs((newmisfit - misfit) / misfit) < tol @@ -441,9 +442,9 @@ def steepest(gradient, value, initial, maxit=1000, linesearch=True, break if iteration == maxit - 1: warnings.warn( - 'Exited because maximum iterations reached. ' - + 'Might not have achieved convergence. ' - + 'Try inscreasing the maximum number of iterations allowed.', + 'Exited because maximum iterations reached. ' + + 'Might not have achieved convergence. ' + + 'Try inscreasing the maximum number of iterations allowed.', RuntimeWarning) @@ -535,21 +536,21 @@ def acor(value, bounds, nparams, nants=None, archive_size=None, maxit=1000, variance = 2 * diverse ** 2 * archive_size ** 2 weights = amp * numpy.exp(-numpy.arange(archive_size) ** 2 / variance) weights /= numpy.sum(weights) - for iteration in xrange(maxit): - for k in xrange(nants): + for iteration in range(maxit): + for k in range(nants): # Sample the propabilities to produce new estimates ant = numpy.empty(nparams, dtype=numpy.float) # 1. Choose a pdf from the archive pdf = numpy.searchsorted( numpy.cumsum(weights), numpy.random.uniform()) - for i in xrange(nparams): + for i in range(nparams): # 2. Get the mean and stddev of the chosen pdf mean = archive[pdf][i] std = (evap / (archive_size - 1)) * numpy.sum( abs(p[i] - archive[pdf][i]) for p in archive) # 3. Sample the pdf until the samples are in bounds - for atempt in xrange(100): + for atempt in range(100): ant[i] = numpy.random.normal(mean, std) if bounds.size == 2: low, high = bounds diff --git a/fatiando/inversion/regularization.py b/fatiando/inversion/regularization.py index eb04dc12..c675b62e 100644 --- a/fatiando/inversion/regularization.py +++ b/fatiando/inversion/regularization.py @@ -34,8 +34,8 @@ ---- """ -from __future__ import division -from future.builtins import super +from __future__ import division, absolute_import +from future.builtins import super, range import copy import numpy @@ -44,9 +44,6 @@ from .base import OperatorMixin, CachedMethod, CachedMethodPermanent from ..utils import safe_dot -__all__ = ["Damping", "Smoothness", "Smoothness1D", "Smoothness2D", - "TotalVariation", "TotalVariation1D", "TotalVariation2D"] - class Regularization(OperatorMixin): """ @@ -627,8 +624,8 @@ def fd1d(size): [ 0, 0, 1, -1]]) """ - i = range(size - 1) + range(size - 1) - j = range(size - 1) + range(1, size) + i = list(range(size - 1)) + list(range(size - 1)) + j = list(range(size - 1)) + list(range(1, size)) v = [1] * (size - 1) + [-1] * (size - 1) return scipy.sparse.coo_matrix((v, (i, j)), (size - 1, size)).tocsr() @@ -670,8 +667,8 @@ def fd2d(shape): I, J, V = [], [], [] deriv = 0 param = 0 - for i in xrange(ny): - for j in xrange(nx - 1): + for i in range(ny): + for j in range(nx - 1): I.extend([deriv, deriv]) J.extend([param, param + 1]) V.extend([1, -1]) @@ -679,8 +676,8 @@ def fd2d(shape): param += 1 param += 1 param = 0 - for i in xrange(ny - 1): - for j in xrange(nx): + for i in range(ny - 1): + for j in range(nx): I.extend([deriv, deriv]) J.extend([param, param + nx]) V.extend([1, -1]) diff --git a/fatiando/mesher.py b/fatiando/mesher.py index 8de6ce25..9d8f1625 100644 --- a/fatiando/mesher.py +++ b/fatiando/mesher.py @@ -1,7 +1,8 @@ """ Generate and operate on various kinds of meshes and geometric elements """ -from __future__ import division +from __future__ import division, absolute_import +from future.builtins import range import numpy import scipy.special import scipy.interpolate @@ -1154,8 +1155,8 @@ class PrismMesh(object): def __init__(self, bounds, shape, props=None): object.__init__(self) nz, ny, nx = shape - if (not isinstance(nx, int) or not isinstance(ny, int) - or not isinstance(nz, int)): + if not isinstance(nx, int) or not isinstance(ny, int) or \ + not isinstance(nz, int): raise AttributeError( 'Invalid mesh shape {}. shape must be integers'.format( str(shape))) @@ -1279,7 +1280,7 @@ def carvetopo(self, x, y, height, below=False): if numpy.ma.isMA(topo): topo_mask = topo.mask else: - topo_mask = [False for i in xrange(len(topo))] + topo_mask = [False for i in range(len(topo))] c = 0 for cellz in zc: for h, masked in zip(topo, topo_mask): @@ -1370,7 +1371,7 @@ def get_layer(self, i): raise IndexError('Layer index %d is out of range.' % (i)) start = i * nx * ny end = (i + 1) * nx * ny - layer = [self.__getitem__(p) for p in xrange(start, end)] + layer = [self.__getitem__(p) for p in range(start, end)] return layer def layers(self): @@ -1394,7 +1395,7 @@ def layers(self): """ nz, ny, nx = self.shape - for i in xrange(nz): + for i in range(nz): yield self.get_layer(i) def dump(self, meshfile, propfile, prop): diff --git a/fatiando/seismic/__init__.py b/fatiando/seismic/__init__.py index 2199099f..21f63e34 100644 --- a/fatiando/seismic/__init__.py +++ b/fatiando/seismic/__init__.py @@ -20,5 +20,6 @@ ---- """ +from __future__ import absolute_import from .elastic_moduli import lame_lambda, lame_mu from .wavelets import RickerWavelet diff --git a/fatiando/seismic/conv.py b/fatiando/seismic/conv.py index f2b55159..462abaf3 100644 --- a/fatiando/seismic/conv.py +++ b/fatiando/seismic/conv.py @@ -23,9 +23,9 @@ """ -from __future__ import division +from __future__ import division, absolute_import import numpy as np -from scipy import interpolate # linear interpolation of velocity/density +from scipy import interpolate def convolutional_model(rc, f, wavelet, dt): @@ -135,11 +135,9 @@ def depth_2_time(vel, model, dt, dz): for j in range(1, n_samples): TWT[j, :] = TWT[j-1]+2*dz/vel[j, :] TMAX = max(TWT[-1, :]) - TMIN = min(TWT[0, :]) TWT_rs = np.zeros(int(np.ceil(TMAX/dt_dwn))) for j in range(1, len(TWT_rs)): TWT_rs[j] = TWT_rs[j-1]+dt_dwn - resmpl = int(dt/dt_dwn) model_t = _resampling(model, TMAX, TWT, TWT_rs, dt, dt_dwn, n_traces) return model_t diff --git a/fatiando/seismic/epic2d.py b/fatiando/seismic/epic2d.py index ffb78ce0..7993deac 100644 --- a/fatiando/seismic/epic2d.py +++ b/fatiando/seismic/epic2d.py @@ -14,7 +14,7 @@ ---- """ -from __future__ import division +from __future__ import division, absolute_import from future.builtins import super import numpy as np diff --git a/fatiando/seismic/profile.py b/fatiando/seismic/profile.py index d1b40ef1..c6522b04 100644 --- a/fatiando/seismic/profile.py +++ b/fatiando/seismic/profile.py @@ -19,8 +19,8 @@ ---- """ -from __future__ import division -from future.builtins import super +from __future__ import division, absolute_import +from future.builtins import super, range import numpy as np from . import ttime2d @@ -69,11 +69,11 @@ def layered_straight_ray(thickness, velocity, zp): raise ValueError("thickness and velocity must have same length") nlayers = len(thickness) zmax = sum(thickness) - z = [sum(thickness[:i]) for i in xrange(nlayers + 1)] + z = [sum(thickness[:i]) for i in range(nlayers + 1)] layers = [Square((0, zmax, z[i], z[i + 1]), props={'vp': velocity[i]}) - for i in xrange(nlayers)] + for i in range(nlayers)] srcs = [(0, 0)] * len(zp) - recs = [(0, z) for z in zp] + recs = [(0, k) for k in zp] return ttime2d.straight(layers, 'vp', srcs, recs) @@ -185,9 +185,9 @@ def jacobian(self, p): thicks = self.thickness nlayers = len(thicks) zmax = np.sum(thicks) - z = [np.sum(thicks[:i]) for i in xrange(nlayers + 1)] + z = [np.sum(thicks[:i]) for i in range(nlayers + 1)] layers = [Square((0, zmax, z[i], z[i + 1]), props={'vp': 1.}) - for i in xrange(nlayers)] + for i in range(nlayers)] srcs = [(0, 0)]*self.ndata recs = np.transpose([np.zeros(self.ndata), self.zp]) jac = np.empty((self.ndata, self.nparams)) diff --git a/fatiando/seismic/srtomo.py b/fatiando/seismic/srtomo.py index 09848e01..916cf923 100644 --- a/fatiando/seismic/srtomo.py +++ b/fatiando/seismic/srtomo.py @@ -14,7 +14,7 @@ ---- """ -from __future__ import division +from __future__ import division, absolute_import from future.builtins import super import numpy as np import scipy.sparse diff --git a/fatiando/seismic/tests/test_seismic_conv.py b/fatiando/seismic/tests/test_seismic_conv.py index 5bc16ced..2026d538 100644 --- a/fatiando/seismic/tests/test_seismic_conv.py +++ b/fatiando/seismic/tests/test_seismic_conv.py @@ -1,3 +1,4 @@ +from __future__ import absolute_import, division import numpy as np from numpy.testing import assert_array_almost_equal, assert_allclose from pytest import raises @@ -13,7 +14,7 @@ def test_impulse_response(): """ w = conv.rickerwave(30., 2.e-3) rc_test = np.zeros((w.shape[0], 20)) - rc_test[w.shape[0]/2, :] = 1. + rc_test[w.shape[0]//2, :] = 1. spike = conv.convolutional_model(rc_test, 30., conv.rickerwave, dt=2.e-3) for j in range(0, rc_test.shape[1]): assert_array_almost_equal(spike[:, j], w, 9) @@ -27,12 +28,12 @@ def test_rc_shorter_than_wavelet(): """ w = conv.rickerwave(30., 2.e-3) rc_test = np.zeros((21, 20)) - rc_test[rc_test.shape[0]/2, :] = 1 + rc_test[rc_test.shape[0]//2, :] = 1 spike = conv.convolutional_model(rc_test, 30., conv.rickerwave, dt=2.e-3) for j in range(0, rc_test.shape[1]): - assert_array_almost_equal(spike[:, j], - w[(w.shape[0]-rc_test.shape[0])/2: - -(w.shape[0]-rc_test.shape[0])/2], 9) + wmin = (w.shape[0] - rc_test.shape[0])//2 + wmax = -(w.shape[0] - rc_test.shape[0])//2 + assert_array_almost_equal(spike[:, j], w[wmin:wmax], 9) def test_reflectivity_wrong_dimensions(): diff --git a/fatiando/seismic/tests/test_seismic_srtomo.py b/fatiando/seismic/tests/test_seismic_srtomo.py index 19e6ad9a..43f215a6 100644 --- a/fatiando/seismic/tests/test_seismic_srtomo.py +++ b/fatiando/seismic/tests/test_seismic_srtomo.py @@ -1,4 +1,4 @@ -from __future__ import division, print_function +from __future__ import division, print_function, absolute_import from future.builtins import range import numpy as np from numpy.testing import assert_array_almost_equal, assert_allclose diff --git a/fatiando/seismic/tests/test_wavelets.py b/fatiando/seismic/tests/test_wavelets.py index 6c1b805e..4cce54ec 100644 --- a/fatiando/seismic/tests/test_wavelets.py +++ b/fatiando/seismic/tests/test_wavelets.py @@ -1,3 +1,4 @@ +from __future__ import absolute_import from pytest import raises from .. import RickerWavelet @@ -18,6 +19,6 @@ def test_ricker_copy(): def test_ricker_fail_zero_frequency(): "Wavelet creation should fail if f=0" with raises(AssertionError): - w = RickerWavelet(f=-1) + RickerWavelet(f=-1) with raises(AssertionError): - w = RickerWavelet(f=0) + RickerWavelet(f=0) diff --git a/fatiando/seismic/ttime2d.py b/fatiando/seismic/ttime2d.py index c76010cb..58610d5f 100644 --- a/fatiando/seismic/ttime2d.py +++ b/fatiando/seismic/ttime2d.py @@ -7,6 +7,8 @@ ---- """ +from __future__ import absolute_import, division +from future.builtins import range import multiprocessing import math import numpy @@ -90,7 +92,7 @@ def straight(cells, prop, srcs, recs, velocity=None, par=False): perjob = size / jobs processes = [] pipes = [] - for i in xrange(jobs): + for i in range(jobs): if i == jobs - 1: end = size else: @@ -127,7 +129,7 @@ def _straight(cells, prop, srcs, recs, velocity): Calculate the travel time of a straight ray. """ times = numpy.zeros(len(srcs), dtype=numpy.float) - for l in xrange(len(times)): + for l in range(len(times)): x_src, y_src = srcs[l] x_rec, y_rec = recs[l] maxx = max(x_src, x_rec) @@ -161,7 +163,7 @@ def _straight(cells, prop, srcs, recs, velocity): b_ray = y_src - a_ray * (x_src) # Add the src and rec locations so that the travel time of a # src or rec inside a cell is accounted for - xps = [x1, x2, (y1 - b_ray) / a_ray, (y2 - b_ray) / a_ray, + xps = [x1, x2, (y1 - b_ray) / a_ray, (y2 - b_ray) / a_ray, x_src, x_rec] yps = [a_ray * x1 + b_ray, a_ray * x2 + b_ray, y1, y2, y_src, y_rec] diff --git a/fatiando/seismic/wavefd.py b/fatiando/seismic/wavefd.py index e9c90628..5986bdce 100644 --- a/fatiando/seismic/wavefd.py +++ b/fatiando/seismic/wavefd.py @@ -133,27 +133,19 @@ ---- """ -from __future__ import division +from __future__ import division, absolute_import +from future.builtins import range import warnings +import math import numpy import scipy.sparse import scipy.sparse.linalg -try: - from fatiando.seismic._wavefd import * -except: - def not_implemented(*args, **kwargs): - raise NotImplementedError( - "Couldn't load C coded extension module.") - _apply_damping = not_implemented - _step_elastic_sh = not_implemented - _step_elastic_psv = not_implemented - _xz2ps = not_implemented - _nonreflexive_sh_boundary_conditions = not_implemented - _nonreflexive_psv_boundary_conditions = not_implemented - _step_scalar = not_implemented - _reflexive_scalar_boundary_conditions = not_implemented +from ._wavefd import _apply_damping, _step_elastic_sh, \ + _step_elastic_psv, _xz2ps, _nonreflexive_sh_boundary_conditions, \ + _nonreflexive_psv_boundary_conditions, _step_scalar, \ + _reflexive_scalar_boundary_conditions # Tell users at import time that this code is not very trustworthy @@ -194,8 +186,8 @@ class MexHatSource(object): def __init__(self, x, z, area, shape, amp, frequency, delay=0): nz, nx = shape dz, dx = sum(area[2:]) / (nz - 1), sum(area[:2]) / (nx - 1) - self.i = int(round((z - area[2]) / dz)) - self.j = int(round((x - area[0]) / dx)) + self.i = int(math.floor((z - area[2]) / dz)) + self.j = int(math.floor((x - area[0]) / dx)) self.x, self.z = x, z self.amp = amp self.frequency = frequency @@ -312,16 +304,16 @@ def blast_source(x, z, area, shape, amp, frequency, delay=0, nz, nx = shape xsources, zsources = [], [] center = sourcetype(x, z, area, shape, amp, frequency, delay) - i, j = center.indexes() + ic, jc = center.indexes() tmp = numpy.sqrt(2) - locations = [[i - 1, j - 1, -amp, -amp], - [i - 1, j, 0, -tmp * amp], - [i - 1, j + 1, amp, -amp], - [i, j - 1, -tmp * amp, 0], - [i, j + 1, tmp * amp, 0], - [i + 1, j - 1, -amp, amp], - [i + 1, j, 0, tmp * amp], - [i + 1, j + 1, amp, amp]] + locations = [[ic - 1, jc - 1, -amp, -amp], + [ic - 1, jc, 0, -tmp * amp], + [ic - 1, jc + 1, amp, -amp], + [ic, jc - 1, -tmp * amp, 0], + [ic, jc + 1, tmp * amp, 0], + [ic + 1, jc - 1, -amp, amp], + [ic + 1, jc, 0, tmp * amp], + [ic + 1, jc + 1, amp, amp]] locations = [[i, j, xamp, zamp] for i, j, xamp, zamp in locations if i >= 0 and i < nz and j >= 0 and j < nx] for i, j, xamp, zamp in locations: @@ -335,7 +327,6 @@ def blast_source(x, z, area, shape, amp, frequency, delay=0, class GaussSource(MexHatSource): - r""" A wave source that vibrates as a Gaussian derivative wavelet. @@ -370,9 +361,8 @@ def __init__(self, x, z, area, shape, amp, frequency, delay=None): def __call__(self, time): t = time - self.delay - psi = self.amp * ((2 * numpy.sqrt(numpy.e) * self.frequency) - * t * numpy.exp(-2 * (t ** 2) * self.f2) - ) + psi = self.amp*((2*numpy.sqrt(numpy.e)*self.frequency)*t * + numpy.exp(-2*(t**2)*self.f2)) return psi @@ -459,10 +449,10 @@ def _add_pad(array, pad, shape): """ array_pad = numpy.zeros(shape, dtype=numpy.float) array_pad[:-pad, pad:-pad] = array - for k in xrange(pad): + for k in range(pad): array_pad[:-pad, k] = array[:, 0] array_pad[:-pad, -(k + 1)] = array[:, -1] - for k in xrange(pad): + for k in range(pad): array_pad[-(pad - k), :] = array_pad[-(pad + 1), :] return array_pad @@ -523,9 +513,10 @@ def scalar(vel, area, dt, iterations, sources, stations=None, # Get the index of the closest point to the stations and start the # seismograms if stations is not None: - stations = [[int(round((z - z1) / ds)), int(round((x - x1) / ds))] + stations = [[int(math.floor((z - z1) / ds)), + int(math.floor((x - x1) / ds))] for x, z in stations] - seismograms = [numpy.zeros(iterations) for i in xrange(len(stations))] + seismograms = [numpy.zeros(iterations) for i in range(len(stations))] else: stations, seismograms = [], [] # Add some padding to x and z. The padding region is where the wave is @@ -551,7 +542,7 @@ def scalar(vel, area, dt, iterations, sources, stations=None, if snapshot is not None: yield 0, u[1, :-pad, pad:-pad], seismograms - for iteration in xrange(1, iterations): + for iteration in range(1, iterations): t, tm1 = iteration % 2, (iteration + 1) % 2 tp1 = tm1 _step_scalar(u[tp1], u[t], u[tm1], 2, nx - 2, 2, nz - 2, @@ -637,9 +628,10 @@ def elastic_sh(mu, density, area, dt, iterations, sources, stations=None, # Get the index of the closest point to the stations and start the # seismograms if stations is not None: - stations = [[int(round((z - z1) / dz)), int(round((x - x1) / dx))] + stations = [[int(math.floor((z - z1) / dz)), + int(math.floor((x - x1) / dx))] for x, z in stations] - seismograms = [numpy.zeros(iterations) for i in xrange(len(stations))] + seismograms = [numpy.zeros(iterations) for i in range(len(stations))] else: stations, seismograms = [], [] # Add some padding to x and z. The padding region is where the wave is @@ -664,7 +656,7 @@ def elastic_sh(mu, density, area, dt, iterations, sources, stations=None, seismogram[0] = u[1, i, j + pad] if snapshot is not None: yield 0, u[1, :-pad, pad:-pad], seismograms - for iteration in xrange(1, iterations): + for iteration in range(1, iterations): t, tm1 = iteration % 2, (iteration + 1) % 2 tp1 = tm1 _step_elastic_sh(u[tp1], u[t], u[tm1], 3, nx - 3, 3, nz - 3, dt, dx, @@ -762,10 +754,11 @@ def elastic_psv(mu, lamb, density, area, dt, iterations, sources, # Get the index of the closest point to the stations and start the # seismograms if stations is not None: - stations = [[int(round((z - z1) / dz)), int(round((x - x1) / dx))] + stations = [[int(math.floor((z - z1) / dz)), + int(math.floor((x - x1) / dx))] for x, z in stations] - xseismograms = [numpy.zeros(iterations) for i in xrange(len(stations))] - zseismograms = [numpy.zeros(iterations) for i in xrange(len(stations))] + xseismograms = [numpy.zeros(iterations) for i in range(len(stations))] + zseismograms = [numpy.zeros(iterations) for i in range(len(stations))] else: stations, xseismograms, zseismograms = [], [], [] # Add padding to have an absorbing region to simulate an infinite medium @@ -811,10 +804,10 @@ def elastic_psv(mu, lamb, density, area, dt, iterations, sources, else: yield [0, ux[1, :-pad, pad:-pad], uz[1, :-pad, pad:-pad], xseismograms, zseismograms] - for iteration in xrange(1, iterations): + for iteration in range(1, iterations): t, tm1 = iteration % 2, (iteration + 1) % 2 tp1 = tm1 - _step_elastic_psv(ux, uz, tp1, t, tm1, 1, nx - 1, 1, nz - 1, dt, dx, + _step_elastic_psv(ux, uz, tp1, t, tm1, 1, nx - 1, 1, nz - 1, dt, dx, dz, mu_pad, lamb_pad, dens_pad) _apply_damping(ux[t], nx, nz, pad, taper) _apply_damping(uz[t], nx, nz, pad, taper) diff --git a/fatiando/seismic/wavelets.py b/fatiando/seismic/wavelets.py index 25d7ca26..878f2d65 100644 --- a/fatiando/seismic/wavelets.py +++ b/fatiando/seismic/wavelets.py @@ -1,7 +1,7 @@ """ Classes to evaluate and sample wavelets. """ -from __future__ import division +from __future__ import division, absolute_import from future.builtins import super, object import copy diff --git a/fatiando/tests/mesher/test_geometric_element.py b/fatiando/tests/mesher/test_geometric_element.py index 61c030c8..9d080d1f 100644 --- a/fatiando/tests/mesher/test_geometric_element.py +++ b/fatiando/tests/mesher/test_geometric_element.py @@ -1,3 +1,4 @@ +from __future__ import absolute_import from ...mesher import GeometricElement diff --git a/fatiando/tests/mesher/test_pointgrid.py b/fatiando/tests/mesher/test_pointgrid.py index 8d3040a8..37aa222d 100644 --- a/fatiando/tests/mesher/test_pointgrid.py +++ b/fatiando/tests/mesher/test_pointgrid.py @@ -1,4 +1,4 @@ -from __future__ import division +from __future__ import division, absolute_import import numpy as np import numpy.testing as npt from pytest import raises @@ -91,17 +91,17 @@ def test_fails_invalid_index(): "Indexing should fail for an invalid index" area, z, shape = [0, 10, 2, 6], 200, (2, 3) g = PointGrid(area, z, shape) - with raises(IndexError) as e: + with raises(IndexError): g[-7] - with raises(IndexError) as e: + with raises(IndexError): g[-500] - with raises(IndexError) as e: + with raises(IndexError): g[6] - with raises(IndexError) as e: + with raises(IndexError): g[28752] - with raises(IndexError) as e: + with raises(IndexError): g[[1]] - with raises(IndexError) as e: + with raises(IndexError): g['1'] diff --git a/fatiando/tests/mesher/test_polygon.py b/fatiando/tests/mesher/test_polygon.py index ec3595fc..4b5e8fbf 100644 --- a/fatiando/tests/mesher/test_polygon.py +++ b/fatiando/tests/mesher/test_polygon.py @@ -1,3 +1,4 @@ +from __future__ import absolute_import from ...mesher import Polygon diff --git a/fatiando/tests/mesher/test_polygonal_prism.py b/fatiando/tests/mesher/test_polygonal_prism.py index 597fa967..c521b6de 100644 --- a/fatiando/tests/mesher/test_polygonal_prism.py +++ b/fatiando/tests/mesher/test_polygonal_prism.py @@ -1,3 +1,4 @@ +from __future__ import absolute_import from ...mesher import PolygonalPrism import numpy as np diff --git a/fatiando/tests/mesher/test_prism.py b/fatiando/tests/mesher/test_prism.py index 23fa0431..1ba72267 100644 --- a/fatiando/tests/mesher/test_prism.py +++ b/fatiando/tests/mesher/test_prism.py @@ -1,3 +1,4 @@ +from __future__ import absolute_import from ...mesher import Prism diff --git a/fatiando/tests/mesher/test_prism_mesh.py b/fatiando/tests/mesher/test_prism_mesh.py index 8ad7e71e..c840372b 100644 --- a/fatiando/tests/mesher/test_prism_mesh.py +++ b/fatiando/tests/mesher/test_prism_mesh.py @@ -1,3 +1,4 @@ +from __future__ import absolute_import from fatiando.mesher import PrismMesh, Prism import numpy as np diff --git a/fatiando/tests/mesher/test_sphere.py b/fatiando/tests/mesher/test_sphere.py index d24bcbb2..086cefdf 100644 --- a/fatiando/tests/mesher/test_sphere.py +++ b/fatiando/tests/mesher/test_sphere.py @@ -1,3 +1,4 @@ +from __future__ import absolute_import from ...mesher import Sphere import numpy as np diff --git a/fatiando/tests/mesher/test_square.py b/fatiando/tests/mesher/test_square.py index 41f63680..5b6990cb 100644 --- a/fatiando/tests/mesher/test_square.py +++ b/fatiando/tests/mesher/test_square.py @@ -1,3 +1,4 @@ +from __future__ import absolute_import from ...mesher import Square diff --git a/fatiando/tests/mesher/test_square_mesh.py b/fatiando/tests/mesher/test_square_mesh.py index 56e9322d..03d548e3 100644 --- a/fatiando/tests/mesher/test_square_mesh.py +++ b/fatiando/tests/mesher/test_square_mesh.py @@ -1,3 +1,4 @@ +from __future__ import absolute_import from ...mesher import SquareMesh import numpy as np diff --git a/fatiando/tests/mesher/test_tesseroid.py b/fatiando/tests/mesher/test_tesseroid.py index bc980d9e..96d0bd9d 100644 --- a/fatiando/tests/mesher/test_tesseroid.py +++ b/fatiando/tests/mesher/test_tesseroid.py @@ -1,3 +1,4 @@ +from __future__ import absolute_import from ...mesher import Tesseroid diff --git a/fatiando/tests/mesher/test_tesseroid_mesh.py b/fatiando/tests/mesher/test_tesseroid_mesh.py index bb53b6d6..34b5f602 100644 --- a/fatiando/tests/mesher/test_tesseroid_mesh.py +++ b/fatiando/tests/mesher/test_tesseroid_mesh.py @@ -1,3 +1,4 @@ +from __future__ import absolute_import from ...mesher import TesseroidMesh import numpy as np diff --git a/fatiando/tests/test_random.py b/fatiando/tests/test_random.py index 8278790a..5d907012 100644 --- a/fatiando/tests/test_random.py +++ b/fatiando/tests/test_random.py @@ -1,3 +1,5 @@ +from __future__ import absolute_import, division +from future.builtins import range import numpy from numpy.testing import assert_allclose from fatiando import utils @@ -8,7 +10,7 @@ def test_utils_contaminate(): size = 10 ** 6 data = numpy.zeros(size) std = 4.213 - for i in xrange(20): + for i in range(20): noise = utils.contaminate(data, std) assert abs(noise.mean()) < 10 ** -10, 'mean:%g' % (noise.mean()) assert abs(noise.std() - std) / std < 0.01, 'std:%g' % (noise.std()) @@ -19,7 +21,7 @@ def test_utils_contaminate_seed(): size = 10 ** 6 data = numpy.zeros(size) std = 4400.213 - for i in xrange(20): + for i in range(20): noise = utils.contaminate(data, std, seed=i) assert abs(noise.mean()) < 10 ** - \ 10, 's:%d mean:%g' % (i, noise.mean()) @@ -32,7 +34,7 @@ def test_utils_contaminate_diff(): size = 1235 data = numpy.linspace(-100., 12255., size) noise = 244.4 - for i in xrange(20): + for i in range(20): d1 = utils.contaminate(data, noise) d2 = utils.contaminate(data, noise) assert numpy.all(d1 != d2) diff --git a/fatiando/utils.py b/fatiando/utils.py index 5cf6a9d8..1d0b5d8f 100644 --- a/fatiando/utils.py +++ b/fatiando/utils.py @@ -1,6 +1,8 @@ """ Miscellaneous utility functions. """ +from __future__ import absolute_import, division +from future.builtins import range import math import numpy @@ -482,7 +484,7 @@ def contaminate(data, stddev, percent=False, return_stddev=False, seed=None): stddev = [stddev] data = [data] contam = [] - for i in xrange(len(stddev)): + for i in range(len(stddev)): if stddev[i] == 0.: contam.append(data[i]) continue diff --git a/fatiando/vis/mpl.py b/fatiando/vis/mpl.py index 00302bb3..32f2d6fd 100644 --- a/fatiando/vis/mpl.py +++ b/fatiando/vis/mpl.py @@ -53,19 +53,13 @@ ---- """ -from __future__ import division +from __future__ import division, absolute_import +from future.builtins import range import warnings import numpy from matplotlib import pyplot, widgets -# Quick hack so that the docs can build using the mocks for readthedocs -# Ideal would be to log an error message saying that functions from pyplot -# were not imported -try: - from matplotlib.pyplot import * -except: - pass -import fatiando.gridder +from .. import gridder # Dummy variable to laizy import the basemap toolkit (slow) Basemap = None @@ -405,7 +399,7 @@ def erase(event): line.figure.canvas.mpl_connect('key_press_event', erase) line.figure.canvas.mpl_connect('motion_notify_event', move) pyplot.show() - thickness = [depths[i + 1] - depths[i] for i in xrange(len(depths) - 1)] + thickness = [depths[i + 1] - depths[i] for i in range(len(depths) - 1)] return thickness, values @@ -428,10 +422,10 @@ def draw_geolines(area, dlon, dlat, basemap, linewidth=1): """ west, east, south, north = area - meridians = basemap.drawmeridians(numpy.arange(west, east, dlon), - labels=[0, 0, 0, 1], linewidth=linewidth) - parallels = basemap.drawparallels(numpy.arange(south, north, dlat), - labels=[1, 0, 0, 0], linewidth=linewidth) + basemap.drawmeridians(numpy.arange(west, east, dlon), labels=[0, 0, 0, 1], + linewidth=linewidth) + basemap.drawparallels(numpy.arange(south, north, dlat), + labels=[1, 0, 0, 0], linewidth=linewidth) def draw_countries(basemap, linewidth=1, style='dashed'): @@ -667,7 +661,7 @@ def layers(thickness, values, style='-k', z0=0., linewidth=1, label=None, if len(thickness) != len(values): raise ValueError("thickness and values must have same length") nlayers = len(thickness) - interfaces = [z0 + sum(thickness[:i]) for i in xrange(nlayers + 1)] + interfaces = [z0 + sum(thickness[:i]) for i in range(nlayers + 1)] ys = [interfaces[0]] for y in interfaces[1:-1]: ys.append(y) @@ -870,8 +864,7 @@ def contour(x, y, v, shape, levels, interp=False, extrapolate=False, color='k', if x.shape != y.shape != v.shape: raise ValueError("Input arrays x, y, and v must have same shape!") if interp: - x, y, v = fatiando.gridder.interp(x, y, v, shape, - extrapolate=extrapolate) + x, y, v = gridder.interp(x, y, v, shape, extrapolate=extrapolate) X = numpy.reshape(x, shape) Y = numpy.reshape(y, shape) V = numpy.reshape(v, shape) @@ -935,8 +928,7 @@ def contourf(x, y, v, shape, levels, interp=False, extrapolate=False, if x.shape != y.shape != v.shape: raise ValueError("Input arrays x, y, and v must have same shape!") if interp: - x, y, v = fatiando.gridder.interp(x, y, v, shape, - extrapolate=extrapolate) + x, y, v = gridder.interp(x, y, v, shape, extrapolate=extrapolate) X = numpy.reshape(x, shape) Y = numpy.reshape(y, shape) V = numpy.reshape(v, shape) @@ -992,8 +984,7 @@ def pcolor(x, y, v, shape, interp=False, extrapolate=False, cmap=pyplot.cm.jet, if vmax is None: vmax = v.max() if interp: - x, y, v = fatiando.gridder.interp(x, y, v, shape, - extrapolate=extrapolate) + x, y, v = gridder.interp(x, y, v, shape, extrapolate=extrapolate) X = numpy.reshape(x, shape) Y = numpy.reshape(y, shape) V = numpy.reshape(v, shape) @@ -1099,7 +1090,7 @@ def seismic_image(section, dt, ranges=None, cmap=pyplot.cm.gray, aspect = numpy.round((x1 - x0)/numpy.max(t)) aspect -= aspect*0.2 if vmin is None and vmax is None: - scale = np.abs([section.max(), section.min()]).max() + scale = numpy.abs([section.max(), section.min()]).max() vmin = -scale vmax = scale pyplot.imshow(data, aspect=aspect, cmap=cmap, origin='upper', diff --git a/fatiando/vis/myv.py b/fatiando/vis/myv.py index 16199265..fa5b5796 100644 --- a/fatiando/vis/myv.py +++ b/fatiando/vis/myv.py @@ -40,6 +40,8 @@ ---- """ +from __future__ import absolute_import, division +from future.builtins import range import warnings import numpy @@ -92,7 +94,7 @@ def _lazy_import_tvtk(): from enthought.tvtk.api import tvtk -def title(text, color=(0, 0, 0), size=0.3, height=1): +def title(text, color=(0, 0, 0), size=0.3, height=1): """ Draw a title on a Mayavi figure. @@ -266,22 +268,22 @@ def polyprisms(prisms, prop=None, style='surface', opacity=1, edges=True, # The top surface points.extend( reversed(numpy.transpose([x, y, prism.z1 * numpy.ones_like(x)]))) - polygons.append(range(offset, offset + nverts)) + polygons.append(list(range(offset, offset + nverts))) scalars.extend(scalar * numpy.ones(nverts)) offset += nverts # The bottom surface points.extend( reversed(numpy.transpose([x, y, prism.z2 * numpy.ones_like(x)]))) - polygons.append(range(offset, offset + nverts)) + polygons.append(list(range(offset, offset + nverts))) scalars.extend(scalar * numpy.ones(nverts)) offset += nverts # The sides - for i in xrange(nverts): + for i in range(nverts): x1, y1 = x[i], y[i] x2, y2 = x[(i + 1) % nverts], y[(i + 1) % nverts] points.extend([[x1, y1, prism.z1], [x2, y2, prism.z1], [x2, y2, prism.z2], [x1, y1, prism.z2]]) - polygons.append(range(offset, offset + 4)) + polygons.append(list(range(offset, offset + 4))) scalars.extend(scalar * numpy.ones(4)) offset += 4 mesh = tvtk.PolyData(points=points, polys=polygons) @@ -431,7 +433,7 @@ def tesseroids(tesseroids, prop=None, style='surface', opacity=1, edges=True, utils.sph2cart(e, n, 0.5 * (top + bottom)), utils.sph2cart(w, n, 0.5 * (top + bottom))]) cells.append(20) - cells.extend(range(start, start + 20)) + cells.extend(list(range(start, start + 20))) start += 20 offsets.append(offset) offset += 21 @@ -559,7 +561,7 @@ def prisms(prisms, prop=None, style='surface', opacity=1, edges=True, points.extend([[x1, y1, z1], [x2, y1, z1], [x2, y2, z1], [x1, y2, z1], [x1, y1, z2], [x2, y1, z2], [x2, y2, z2], [x1, y2, z2]]) cells.append(8) - cells.extend([i for i in xrange(start, start + 8)]) + cells.extend([i for i in range(start, start + 8)]) start += 8 offsets.append(offset) offset += 9 diff --git a/gallery/gravmag/eqlayer_mag_transform.py b/gallery/gravmag/eqlayer_mag_transform.py index 59fae903..d1864577 100644 --- a/gallery/gravmag/eqlayer_mag_transform.py +++ b/gallery/gravmag/eqlayer_mag_transform.py @@ -49,8 +49,8 @@ # Notice that we only estimate the intensity. We must provide the magnetization # direction of the layer through the sinc and sdec parameters. layer = mesher.PointGrid(area, 700, shape) -eql = (EQLTotalField(x, y, z, data, inc, dec, layer, sinc=inc, sdec=dec) - + 1e-15*Damping(layer.size)) +eql = (EQLTotalField(x, y, z, data, inc, dec, layer, sinc=inc, sdec=dec) + + 1e-15*Damping(layer.size)) eql.fit() # Print some statistics of how well the estimated layer fits the data residuals = eql[0].residuals() diff --git a/setup.py b/setup.py index 1dcc9b37..325987ca 100644 --- a/setup.py +++ b/setup.py @@ -1,6 +1,7 @@ """ Build extension modules, package and install Fatiando. """ +from __future__ import absolute_import import sys import os from setuptools import setup, Extension, find_packages