Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
15 changes: 8 additions & 7 deletions micasense/capture.py
Original file line number Diff line number Diff line change
Expand Up @@ -44,6 +44,8 @@
import micasense.imageutils as imageutils
import micasense.plotutils as plotutils

logger = logging.getLogger(__name__)


class Capture(object):
"""
Expand Down Expand Up @@ -814,7 +816,7 @@ def SIFT_align_capture(self, ref=5, min_matches=10, verbose=0, err_red=10.0, err
keypoints_ref = descriptor_extractor.keypoints
descriptor_ref = descriptor_extractor.descriptors
if verbose > 1:
print('found {:d} keypoints in the reference image'.format(len(keypoints_ref)))
logger.info('found %d keypoints in the reference image', len(keypoints_ref))
match_images = []
ratio = []
filter_tr = []
Expand Down Expand Up @@ -844,8 +846,7 @@ def SIFT_align_capture(self, ref=5, min_matches=10, verbose=0, err_red=10.0, err
descriptors.append(descriptor_extractor.descriptors)
if verbose > 1:
for k, ix in zip(keypoints, img_index):
print('found {:d} keypoints for band {:} '.format(len(k), self.images[ix].band_name))
print(' in the remaining stack')
logger.info('found %d keypoints for band %s in the remainig stack', len(k), self.images[ix].band_name)

matches = [match_descriptors(d, descriptor_ref, max_ratio=r)
for d, r in zip(descriptors, ratio)]
Expand All @@ -865,7 +866,7 @@ def SIFT_align_capture(self, ref=5, min_matches=10, verbose=0, err_red=10.0, err
keypoints[posBLUE], keypoints_ref, matches[posBLUE])
# we trust this match to work
if len(kpi) < min_matches:
print('we have just {:d} matching keypoints -the match of BLUE camera to RED failed!!'.format(len(kpi)))
logger.error('we have just {:d} matching keypoints -the match of BLUE camera to RED failed!!'.format(len(kpi)))
# if it worked, scale it and get the transform
scale_i = np.array(self.images[iBlueREF].raw().shape) / np.array(rest_shape)
P = estimate_transform('projective', (scale * kpr)[:, ::-1], (scale_i * kpi)[:, ::-1])
Expand All @@ -889,7 +890,7 @@ def SIFT_align_capture(self, ref=5, min_matches=10, verbose=0, err_red=10.0, err
scale_i,
threshold=t)
if verbose > 0:
print('found {:d} matching keypoints for index {:d}'.format(len(filtered_match), ix))
logger.info('found %d matching keypoints for index %d', len(filtered_match), ix)
# if we have enough SIFT matches that actually correspond, compute a model
if len(filtered_match) > min_matches:
kpi, kpr, imatch, model = self.find_inliers(filtered_kpi,
Expand All @@ -904,15 +905,15 @@ def SIFT_align_capture(self, ref=5, min_matches=10, verbose=0, err_red=10.0, err
else:
P = ProjectiveTransform(matrix=warp_matrices_calibrated[ix])
if verbose > 0:
print('no match for index {:d}'.format(ix))
logger.info('no match for index %d', ix)
models.append(P)
kp_image.append(kpi)
kp_ref.append(kpr)
img = self.images[ix].undistorted(self.images[ix].raw())

# no need for the upsampled stacks here
if verbose > 0:
print("Finished aligning band", ix)
logger.info("Finished aligning band %d", ix)

self.__sift_aligned_capture = [np.eye(3)] * len(self.images)
for ix, m in zip(img_index, models):
Expand Down
5 changes: 4 additions & 1 deletion micasense/dls.py
Original file line number Diff line number Diff line change
Expand Up @@ -23,8 +23,11 @@
CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""

import logging

import numpy as np

logger = logging.getLogger(__name__)
# for DLS correction, we need the sun position at the time the image was taken
# this can be computed using the pysolar package (ver 0.6)
# https://pypi.python.org/pypi/Pysolar/0.6
Expand All @@ -49,7 +52,7 @@
havePysolar = True
finally:
if not havePysolar:
print("Unable to import pysolar")
logger.error("Unable to import pysolar")


def fresnel(phi):
Expand Down
25 changes: 14 additions & 11 deletions micasense/image.py
Original file line number Diff line number Diff line change
Expand Up @@ -28,6 +28,7 @@

import math
import os
import logging

import cv2
import numpy as np
Expand All @@ -36,6 +37,8 @@
import micasense.metadata as metadata
import micasense.plotutils as plotutils

logger = logging.getLogger(__name__)


# helper function to convert euler angles to a rotation matrix
def rotations_degrees_to_rotation_matrix(rotation_degrees):
Expand All @@ -46,15 +49,15 @@ def rotations_degrees_to_rotation_matrix(rotation_degrees):
sy = np.sin(np.deg2rad(rotation_degrees[1]))
sz = np.sin(np.deg2rad(rotation_degrees[2]))

Rx = np.mat([1, 0, 0,
0, cx, -sx,
0, sx, cx]).reshape(3, 3)
Ry = np.mat([cy, 0, sy,
0, 1, 0,
-sy, 0, cy]).reshape(3, 3)
Rz = np.mat([cz, -sz, 0,
sz, cz, 0,
0, 0, 1]).reshape(3, 3)
Rx = np.asmatrix([1, 0, 0,
0, cx, -sx,
0, sx, cx]).reshape(3, 3)
Ry = np.asmatrix([cy, 0, sy,
0, 1, 0,
-sy, 0, cy]).reshape(3, 3)
Rz = np.asmatrix([cz, -sz, 0,
sz, cz, 0,
0, 0, 1]).reshape(3, 3)
R = Rx * Ry * Rz
return R

Expand All @@ -68,7 +71,7 @@ class Image(object):
def __init__(self, image_path: str, exiftool_obj=None, allow_uncalibrated=False):
if not os.path.isfile(image_path):
raise IOError("Provided path is not a file: {}".format(image_path))
self.path = image_path
self.path = str(image_path)
self.meta = metadata.Metadata(self.path, exiftool_obj=exiftool_obj)

if self.meta.band_name() is None:
Expand Down Expand Up @@ -228,7 +231,7 @@ def raw(self):
except ImportError:
self.__raw_image = cv2.imread(self.path, -1)
except IOError:
print(("Could not open image at path {}".format(self.path)))
logger.error("Could not open image at path %s", self.path)
raise
return self.__raw_image

Expand Down
19 changes: 12 additions & 7 deletions micasense/imageutils.py
Original file line number Diff line number Diff line change
Expand Up @@ -25,6 +25,7 @@

import multiprocessing
import os
import logging

import cv2
import exiftool
Expand All @@ -35,6 +36,8 @@
from skimage.transform import warp
from skimage.util import img_as_ubyte

logger = logging.getLogger(__name__)


# start helper functions for finding a "hole"-free rectangle
def get_longest_sequence(b):
Expand Down Expand Up @@ -206,7 +209,7 @@ def align(pair):
nol = pair['pyramid_levels']

if pair['debug']:
print(("number of pyramid levels: {}".format(nol)))
logger.info("number of pyramid levels: %s", nol)

warp_matrix[0][2] /= (2 ** nol)
warp_matrix[1][2] /= (2 ** nol)
Expand Down Expand Up @@ -244,7 +247,7 @@ def align(pair):
plotutils.plotwithcolorbar(gray2_pyr[level], "match level {}".format(level))
plotutils.plotwithcolorbar(grad1, "ref grad level {}".format(level))
plotutils.plotwithcolorbar(grad2, "match grad level {}".format(level))
print(("Starting warp for level {} is:\n {}".format(level, warp_matrix)))
logger.info(("Starting warp for level %s is:\n %s", level, warp_matrix)

try:
cc, warp_matrix = cv2.findTransformECC(grad1, grad2, warp_matrix, warp_mode, criteria, inputMask=None,
Expand All @@ -253,7 +256,7 @@ def align(pair):
cc, warp_matrix = cv2.findTransformECC(grad1, grad2, warp_matrix, warp_mode, criteria)

if show_debug_images:
print(("Warp after alignment level {} is \n{}".format(level, warp_matrix)))
logger.info("Warp after alignment level %s is \n%s", level, warp_matrix)

if level != nol: # scale up only the offset by a factor of 2 for the next (larger image) pyramid level
if warp_mode == cv2.MOTION_HOMOGRAPHY:
Expand Down Expand Up @@ -326,15 +329,15 @@ def align_capture(capture, ref_index=None, warp_mode=cv2.MOTION_HOMOGRAPHY, max_
pool = multiprocessing.Pool(processes=multiprocessing.cpu_count())
for _, mat in enumerate(pool.imap_unordered(align, alignment_pairs)):
warp_matrices[mat['match_index']] = mat['warp_matrix']
print(("Finished aligning band {}".format(mat['match_index'])))
logger.info("Finished aligning band %s", mat['match_index'])
pool.close()
pool.join()
else:
# Single-threaded alternative
for pair in alignment_pairs:
mat = align(pair)
warp_matrices[mat['match_index']] = mat['warp_matrix']
print(("Finished aligning band {}".format(mat['match_index'])))
logger.info(("Finished aligning band %s", mat['match_index'])

if capture.images[-1].band_name == 'LWIR':
img = capture.images[-1]
Expand Down Expand Up @@ -509,7 +512,7 @@ def min_max(pts):

def map_points(pts, image_size, warpMatrix, distortion_coeffs, camera_matrix, warp_mode=cv2.MOTION_HOMOGRAPHY):
# extra dimension makes opencv happy
pts = np.array([pts], dtype=float)
pts = np.array([pts], dtype=np.float)
new_cam_mat, _ = cv2.getOptimalNewCameraMatrix(camera_matrix, distortion_coeffs, image_size, 1)
new_pts = cv2.undistortPoints(pts, camera_matrix, distortion_coeffs, P=new_cam_mat)
if warp_mode == cv2.MOTION_AFFINE:
Expand Down Expand Up @@ -588,7 +591,7 @@ def radiometric_pan_sharpen(capture, warp_matrices=None, panchro_band=5, irradia
# for comparison
# use the warp matrices we have for the stack, if not user supplied
if warp_matrices is None:
print("No SIFT warp matrices provided.")
logger.warning("No SIFT warp matrices provided.")
warp_matrices = capture.get_warp_matrices(ref_index=panchro_band)
h, w = capture.images[panchro_band].raw().shape
if irradiance_list is None:
Expand Down Expand Up @@ -688,6 +691,8 @@ def write_exif_to_stack(thecapture=None, thefilename=None, existing_exif_list=No
raise Exception(
"Please provide an existing capture object and filename or a list of existing exif data for batch processing")
exif_bytes_list = []
logger.debug("EXIF_DATA %s", exif_data)

for exif in exif_data:
for key, val in exif.items():
if key != 'Capture ID' and key != 'Filename':
Expand Down
11 changes: 7 additions & 4 deletions micasense/metadata.py
Original file line number Diff line number Diff line change
Expand Up @@ -28,10 +28,13 @@
import math
import os
from datetime import datetime, timedelta
import logging

import exiftool
import pytz

logger = logging.getLogger(__name__)


class Metadata(object):
""" Container for Micasense image metadata"""
Expand Down Expand Up @@ -74,10 +77,10 @@ def get_item(self, item, index=None):
except KeyError:
pass
except IndexError:
print("Item {0} is length {1}, index {2} is outside this range.".format(
logger.error("Item %s is length %s, index %s is outside this range.",
item,
len(self.exif[item]),
index))
len(self.exif[0][item]),
index)
return val

def size(self, item):
Expand All @@ -98,7 +101,7 @@ def size(self, item):

def print_all(self):
for item in self.get_all():
print("{}: {}".format(item, self.get_item(item)))
logger.info("%s: %s", item, self.get_item(item))

def dls_present(self):
return self.get_item("XMP:Irradiance") is not None \
Expand Down
19 changes: 11 additions & 8 deletions micasense/panel.py
Original file line number Diff line number Diff line change
Expand Up @@ -25,19 +25,22 @@

import math
import re
import logging

import cv2
import matplotlib.pyplot as plt
import numpy as np
import pyzbar.pyzbar as pyzbar
from skimage import measure

logger = logging.getLogger(__name__)


class Panel(object):

def __init__(self, img, panel_corners=None, ignore_autocalibration=False):
# if we have panel images with QR metadata, panel detection is not called,
# so this can be forced here
# so this can be forced here
if img is None:
raise IOError("Must provide an image")

Expand Down Expand Up @@ -124,7 +127,7 @@ def reflectance_from_panel_serial(self):
return None

def get_panel_type(self):
print(self.__panel_type)
logger.info(self.__panel_type)

def qr_corners(self):
if self.__panel_type == 'auto':
Expand All @@ -146,13 +149,13 @@ def panel_detected(self):
return self.qr_bounds is not None

def panel_corners(self):
""" get the corners of a panel region based on the qr code location
""" get the corners of a panel region based on the qr code location
Our algorithm to do this uses a 'reference' qr code location, and
it's associate panel region. We find the affine transform
between the reference qr and our qr, and apply that same transform to the
reference panel region to find our panel region. Because of a limitation
of the pyzbar library, the rotation of the absolute QR code isn't known,
so we then try all 4 rotations and test against a cost function which is the
of the pyzbar library, the rotation of the absolute QR code isn't known,
so we then try all 4 rotations and test against a cost function which is the
minimum of the standard deviation divided by the mean value for the panel region"""
if self.__panel_bounds is not None:
return self.__panel_bounds
Expand All @@ -164,7 +167,7 @@ def panel_corners(self):
if self.panel_version < 3:
# use the actual panel measures here - we use units of [mm]
# the panel is 154.4 x 152.4 mm , vs. the 84 x 84 mm for the QR code
# it is left 143.20 mm from the QR code
# it is left 143.20 mm from the QR code
# use the inner 50% square of the panel
s = 76.2
p = 42
Expand All @@ -177,7 +180,7 @@ def panel_corners(self):
elif self.panel_version >= 6:
# use the actual panel measures here - we use units of [mm]
# the panel is 100 x 100 mm , vs. the 91 x 91 mm for the QR code
# it is down 125.94 mm from the QR code
# it is down 125.94 mm from the QR code
# use the inner 50% square of the panel
p = 41
s = 50
Expand Down Expand Up @@ -271,7 +274,7 @@ def radiance(self):
def reflectance_mean(self):
reflectance_image = self.image.reflectance()
if reflectance_image is None:
print(
logger.info(
"First calculate the reflectance image by providing a\n band specific irradiance to the calling "
"image.reflectance(irradiance)")
mean, _, _, _ = self.region_stats(reflectance_image,
Expand Down