Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Allow reconstructions to be extended #178

Open
wants to merge 9 commits into
base: main
Choose a base branch
from
8 changes: 3 additions & 5 deletions bin/clean
Original file line number Diff line number Diff line change
Expand Up @@ -7,14 +7,12 @@ rm -rf trash/*
mv -vf $1/reconstruction*.json $trash
mv -vf $1/exif $trash
mv -vf $1/matches $trash
mv -vf $1/sift $trash
mv -vf $1/surf $trash
mv -vf $1/akaze* $trash
mv -vf $1/root* $trash
mv -vf $1/hahog $trash
mv -vf $1/features $trash
mv -vf $1/camera_models.json $trash
mv -vf $1/reference_lla.json $trash
mv -vf $1/profile.log $trash
mv -vf $1/navigation_graph.json $trash
mv -vf $1/plot_inliers $trash
mv -vf $1/depthmaps $trash
mv -vf $1/tracks.csv $trash
mv -vf $1/track_sets.pkl $trash
14 changes: 12 additions & 2 deletions opensfm/commands/create_tracks.py
Original file line number Diff line number Diff line change
Expand Up @@ -19,6 +19,16 @@ def run(self, args):
data = dataset.DataSet(args.dataset)
images = data.images()

try:
graph = data.load_tracks_graph()
tracks, processed_images = matching.tracks_and_images(graph)
except IOError:
graph = None
tracks = None
processed_images = []

remaining_images = set(images) - set(processed_images)

# Read local features
logging.info('reading features')
features = {}
Expand All @@ -30,7 +40,7 @@ def run(self, args):

# Read matches
matches = {}
for im1 in images:
for im1 in remaining_images:
try:
im1_matches = data.load_matches(im1)
except IOError:
Expand All @@ -39,7 +49,7 @@ def run(self, args):
matches[im1, im2] = im1_matches[im2]

tracks_graph = matching.create_tracks_graph(features, colors, matches,
data.config)
data.config, data)
data.save_tracks_graph(tracks_graph)

end = time.time()
Expand Down
4 changes: 2 additions & 2 deletions opensfm/commands/detect_features.py
Original file line number Diff line number Diff line change
Expand Up @@ -38,10 +38,10 @@ def run(self, args):

def detect(args):
image, data = args
logger.info('Extracting {} features for image {}'.format(
data.feature_type().upper(), image))

if not data.feature_index_exists(image):
logger.info('Extracting {} features for image {}'.format(
data.feature_type().upper(), image))
mask = data.mask_as_array(image)
if mask is not None:
logger.info('Found mask to apply for image {}'.format(image))
Expand Down
11 changes: 8 additions & 3 deletions opensfm/commands/extract_metadata.py
Original file line number Diff line number Diff line change
Expand Up @@ -20,9 +20,14 @@ def add_arguments(self, parser):
def run(self, args):
start = time.time()
data = dataset.DataSet(args.dataset)

camera_models = {}
for image in data.images():
# Try not to recreate exif files that already exist
try:
camera_models = data.load_camera_models()
images = data.images_requiring_exif_files()
except IOError:
camera_models = {}
images = data.images()
for image in images:
logging.info('Extracting focal lengths for image {}'.format(image))

# EXIF data in Image
Expand Down
48 changes: 37 additions & 11 deletions opensfm/commands/match_features.py
Original file line number Diff line number Diff line change
Expand Up @@ -79,8 +79,12 @@ def has_gps_info(exif):
'longitude' in exif['gps'])


def match_candidates_by_distance(images, exifs, reference, max_neighbors, max_distance):
"""Find candidate matching pairs by GPS distance."""
def match_candidates_by_distance(images, exifs, reference, max_neighbors, max_distance,
data):
"""
Find candidate matching pairs by GPS distance.
Only computes pairs where the first image of the pair does not already have a matches file
"""
if max_neighbors <= 0 and max_distance <= 0:
return set()
max_neighbors = max_neighbors or 99999999
Expand All @@ -98,6 +102,8 @@ def match_candidates_by_distance(images, exifs, reference, max_neighbors, max_di

pairs = set()
for i, image in enumerate(images):
if data.matches_exists(image):
continue
distances, neighbors = tree.query(
points[i], k=k, distance_upper_bound=max_distance)
for j in neighbors:
Expand All @@ -106,8 +112,11 @@ def match_candidates_by_distance(images, exifs, reference, max_neighbors, max_di
return pairs


def match_candidates_by_time(images, exifs, max_neighbors):
"""Find candidate matching pairs by time difference."""
def match_candidates_by_time(images, exifs, max_neighbors, data):
"""
Find candidate matching pairs by time difference.
Only computes pairs where the first image of the pair does not already have a matches file
"""
if max_neighbors <= 0:
return set()
k = min(len(images), max_neighbors + 1)
Expand All @@ -120,21 +129,27 @@ def match_candidates_by_time(images, exifs, max_neighbors):

pairs = set()
for i, image in enumerate(images):
if data.matches_exists(image):
continue
distances, neighbors = tree.query(times[i], k=k)
for j in neighbors:
if i != j and j < len(images):
pairs.add(tuple(sorted((images[i], images[j]))))
return pairs


def match_candidates_by_order(images, exifs, max_neighbors):
"""Find candidate matching pairs by sequence order."""
def match_candidates_by_order(images, max_neighbors, data):
"""
Find candidate matching pairs by sequence order.
Only computes pairs where the first image of the pair does not already have a matches file"""
if max_neighbors <= 0:
return set()
n = (max_neighbors + 1) / 2

pairs = set()
for i, image in enumerate(images):
if data.matches_exists(image):
continue
a = max(0, i - n)
b = min(len(images), i + n)
for j in range(a, b):
Expand All @@ -144,7 +159,10 @@ def match_candidates_by_order(images, exifs, max_neighbors):


def match_candidates_from_metadata(images, exifs, data):
"""Compute candidate matching pairs"""
"""
Compute candidate matching pairs based on GPS, capture time and order of images
Only computes pairs where the first image of the pair does not already have a matches file
"""
max_distance = data.config['matching_gps_distance']
gps_neighbors = data.config['matching_gps_neighbors']
time_neighbors = data.config['matching_time_neighbors']
Expand All @@ -163,14 +181,19 @@ def match_candidates_from_metadata(images, exifs, data):
images.sort()

d = match_candidates_by_distance(images, exifs, reference,
gps_neighbors, max_distance)
t = match_candidates_by_time(images, exifs, time_neighbors)
o = match_candidates_by_order(images, exifs, order_neighbors)
gps_neighbors, max_distance, data)
t = match_candidates_by_time(images, exifs, time_neighbors, data)
o = match_candidates_by_order(images, order_neighbors, data)
pairs = d | t | o


res = {im: [] for im in images}
for im1, im2 in pairs:
res[im1].append(im2)
if not data.matches_exists(im1):
res[im1].append(im2)
else:
assert not data.matches_exists(im2)
res[im2].append(im1)
return res


Expand All @@ -182,6 +205,9 @@ def match_arguments(pairs, ctx):
def match(args):
"""Compute all matches for a single image"""
im1, candidates, i, n, ctx = args
if ctx.data.matches_exists(im1):
assert(len(candidates) == 0)
return
logger.info('Matching {} - {} / {}'.format(im1, i + 1, n))

config = ctx.data.config
Expand Down
2 changes: 2 additions & 0 deletions opensfm/commands/mesh.py
Original file line number Diff line number Diff line change
Expand Up @@ -21,6 +21,8 @@ def run(self, args):
graph = data.load_tracks_graph()
reconstructions = data.load_reconstruction()

logger.debug("Starting calculation of reconstruction mesh")

for i, r in enumerate(reconstructions):
for shot in r.shots.values():
if shot.id in graph:
Expand Down
25 changes: 24 additions & 1 deletion opensfm/dataset.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,7 +3,7 @@
import os
import json
import errno
import pickle
import cPickle as pickle
import gzip
import numpy as np
import networkx as nx
Expand Down Expand Up @@ -191,6 +191,10 @@ def __exif_file(self, image):
"""
return os.path.join(self.__exif_path(), image + '.exif')

def images_requiring_exif_files(self):
""" Return all images that we don't already have exif files for"""
return set(image for image in self.images() if not os.path.isfile(self.__exif_file(image)))

def load_exif(self, image):
"""
Return extracted exif information, as dictionary, usually with fields:
Expand Down Expand Up @@ -325,6 +329,19 @@ def find_matches(self, im1, im2):
return im2_matches[im1][:, [1, 0]]
return []

def __track_sets_file(self, filename=None):
"""Return path of unionfind file"""
return os.path.join(self.data_path, filename or 'track_sets.pkl')

def load_track_sets_file(self, filename=None):
"""Return unionfind of tracks"""
with open(self.__track_sets_file(filename)) as fin:
return load_track_sets_file(fin)

def save_track_sets_file(self, unionfind, filename=None):
with open(self.__track_sets_file(filename), 'w') as fout:
save_track_sets_file(fout, unionfind)

def __tracks_graph_file(self, filename=None):
"""Return path of tracks file"""
return os.path.join(self.data_path, filename or 'tracks.csv')
Expand Down Expand Up @@ -497,3 +514,9 @@ def save_tracks_graph(fileobj, graph):
r, g, b = data['feature_color']
fileobj.write('%s\t%s\t%d\t%g\t%g\t%g\t%g\t%g\n' % (
str(image), str(track), fid, x, y, r, g, b))

def load_track_sets_file(fileobj):
return pickle.load(fileobj)

def save_track_sets_file(fileobj, unionfind):
pickle.dump(unionfind, fileobj, protocol=pickle.HIGHEST_PROTOCOL)
30 changes: 24 additions & 6 deletions opensfm/matching.py
Original file line number Diff line number Diff line change
Expand Up @@ -142,10 +142,16 @@ def good_track(track, min_length):
return False
return True


def create_tracks_graph(features, colors, matches, config):
def create_tracks_graph(features, colors, matches, config, data):
logger.debug('Merging features onto tracks')
uf = UnionFind()

try:
uf, track_ids, max_id = data.load_track_sets_file()
except IOError:
uf = UnionFind()
track_ids = {}
max_id = 0

for im1, im2 in matches:
for f1, f2 in matches[im1, im2]:
uf.union((im1, f1), (im2, f2))
Expand All @@ -157,12 +163,17 @@ def create_tracks_graph(features, colors, matches, config):
sets[p].append(i)
else:
sets[p] = [i]
if p not in track_ids:
track_ids[p] = max_id
max_id += 1

tracks = [t for t in sets.values() if good_track(t, config.get('min_track_length', 2))]
track_sets = (uf, track_ids, max_id)
data.save_track_sets_file(track_sets)
tracks = [(track_ids[track_name], t) for track_name, t in sets.iteritems() if good_track(t, config.get('min_track_length', 2))]
logger.debug('Good tracks: {}'.format(len(tracks)))

tracks_graph = nx.Graph()
for track_id, track in enumerate(tracks):
for track_id, track in tracks:
for image, featureid in track:
if image not in features:
continue
Expand Down Expand Up @@ -210,15 +221,22 @@ def common_tracks(g, im1, im2):
return tracks, p1, p2


def all_common_tracks(graph, tracks, include_features=True, min_common=50):
def all_common_tracks(graph, tracks, include_features=True, min_common=50, remaining_images=None):
"""
Returns a dictionary mapping image pairs to the list of tracks observed in both images
:param graph: Graph structure (networkx) as returned by :method:`DataSet.tracks_graph`
:param tracks: list of track identifiers
:param include_features: whether to include the features from the images
:param min_common: the minimum number of tracks the two images need to have in common
:param remaining_images: if not none, only find pairs from within this list
:return: tuple: im1, im2 -> tuple: tracks, features from first image, features from second image
"""
if remaining_images is not None:
# We just look at the subgraph comprising of remaining images, and tracks that pass through them
tracks = {track for imagename in remaining_images for track in graph[imagename]}
filtered_nodes = set(remaining_images).union(tracks)
graph = graph.subgraph(filtered_nodes)

track_dict = defaultdict(list)
for tr in tracks:
track_images = sorted(graph[tr].keys())
Expand Down
30 changes: 28 additions & 2 deletions opensfm/reconstruction.py
Original file line number Diff line number Diff line change
Expand Up @@ -905,13 +905,39 @@ def incremental_reconstruction(data):
data.invent_reference_lla()

graph = data.load_tracks_graph()

try:
existing_reconstructions = data.load_reconstruction()
# we remove any points that were in the previous reconstruction but are no longer in our graph
for reconstruction in existing_reconstructions:
reconstruction.points = {k: point for k, point in reconstruction.points.iteritems() if k in graph}
except IOError:
existing_reconstructions = []

reconstructed_images = set(image for reconstruction in existing_reconstructions for image in reconstruction.shots.keys())

tracks, images = matching.tracks_and_images(graph)
remaining_images = set(images)
remaining_images = set(images) - reconstructed_images
gcp = None
if data.ground_control_points_exist():
gcp = data.load_ground_control_points()
common_tracks = matching.all_common_tracks(graph, tracks)

reconstructions = []
for reconstruction in existing_reconstructions:
all_cameras = data.load_camera_models()
all_cameras.update(reconstruction.cameras)
reconstruction.cameras = all_cameras
grow_reconstruction(data, graph, reconstruction, remaining_images, gcp)
reconstructions.append(reconstruction)
reconstructions = sorted(reconstructions,
key=lambda x: -len(x.shots))
data.save_reconstruction(reconstructions)

if len(reconstructed_images) != 0:
common_tracks = matching.all_common_tracks(graph, tracks, remaining_images=remaining_images)
else:
# Filtering the graph is slow, so don't pass remaining_images if all images are remaining
common_tracks = matching.all_common_tracks(graph, tracks)
pairs = compute_image_pairs(common_tracks, data.config)
for im1, im2 in pairs:
if im1 in remaining_images and im2 in remaining_images:
Expand Down