Skip to content

Commit

Permalink
V0.5.3 (#79)
Browse files Browse the repository at this point in the history
* Fixes issue 47

* Branch for next release - 0.5.3

* Cleans test

* updates test

* updates test

* updates test

* Adds logger to the main package

* Adss omx to supported export type
  • Loading branch information
pedrocamargo authored Jan 31, 2020
1 parent 6fe8194 commit ba0b7ca
Show file tree
Hide file tree
Showing 14 changed files with 130 additions and 143 deletions.
2 changes: 1 addition & 1 deletion aequilibrae/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -7,4 +7,4 @@

name = "aequilibrae"

StartsLogging()
logger = StartsLogging()
2 changes: 2 additions & 0 deletions aequilibrae/matrix/aequilibrae_matrix.py
Original file line number Diff line number Diff line change
Expand Up @@ -67,6 +67,8 @@


matrix_export_types = ["Aequilibrae matrix (*.aem)", "Comma-separated file (*.csv)"]
if has_omx:
matrix_export_types.append("Open matrix (*.omx)")


class AequilibraeMatrix(object):
Expand Down
33 changes: 27 additions & 6 deletions aequilibrae/paths/AoN.pyx
Original file line number Diff line number Diff line change
Expand Up @@ -23,6 +23,8 @@ from .__version__ import binary_version as VERSION_COMPILED
def one_to_all(origin, matrix, graph, result, aux_result, curr_thread):
cdef long nodes, orig, i, block_flows_through_centroids, classes, b, origin_index, zones, posit, posit1
cdef int critical_queries = 0
cdef int path_file = 0
cdef int skims
cdef int link_extract_queries, query_type

# Origin index is the index of the matrix we are assigning
Expand All @@ -33,6 +35,12 @@ def one_to_all(origin, matrix, graph, result, aux_result, curr_thread):
orig = origin
origin_index = graph.nodes_to_indices[orig]

#We transform the python variables in Cython variables
nodes = graph.num_nodes


skims = len(graph.skim_fields)

if VERSION_COMPILED != graph.__version__:
raise ValueError('This graph was created for a different version of AequilibraE. Please re-create it')

Expand Down Expand Up @@ -77,8 +85,13 @@ def one_to_all(origin, matrix, graph, result, aux_result, curr_thread):

# path file variables
# 'origin', 'node', 'predecessor', 'connector'
posit = origin_index * graph.num_nodes * result.path_file['save']
posit1 = posit + graph.num_nodes
if result.path_file['save']:
path_file = 1
posit = origin_index * graph.num_nodes * result.path_file['save']
posit1 = posit + graph.num_nodes
else:
posit = 0
posit1 = 1

cdef unsigned int [:] pred_view = result.path_file['results'].predecessor[posit:posit1]
cdef unsigned int [:] c_view = result.path_file['results'].connector[posit:posit1]
Expand Down Expand Up @@ -116,7 +129,16 @@ def one_to_all(origin, matrix, graph, result, aux_result, curr_thread):
reached_first_view,
node_load_view,
w)

if skims > 0:
skim_single_path(origin_index,
nodes,
skims,
skim_matrix_view,
predecessors_view,
conn_view,
graph_skim_view,
reached_first_view,
w)
if block_flows_through_centroids: # Re-blocks the centroid if that is the case
b = 1
blocking_centroid_flows(b,
Expand All @@ -128,8 +150,7 @@ def one_to_all(origin, matrix, graph, result, aux_result, curr_thread):
_copy_skims(skim_matrix_view,
final_skim_matrices_view)

if result.path_file['save']:
with nogil:
if path_file > 0:
put_path_file_on_disk(orig,
pred_view,
predecessors_view,
Expand Down Expand Up @@ -185,7 +206,7 @@ def path_computation(origin, destination, graph, results):
#We transform the python variables in Cython variables
nodes = graph.num_nodes

# initializes skim_matrix for output
# initializes skim_matrix for output
# initializes predecessors and link connectors for output
results.predecessors.fill(-1)
results.connectors.fill(-1)
Expand Down
78 changes: 31 additions & 47 deletions aequilibrae/paths/graph.py
Original file line number Diff line number Diff line change
Expand Up @@ -59,7 +59,7 @@ def __init__(self):

self.cost = None # This array holds the values being used in the shortest path routine
self.skims = False # 2-D Array with the fields to be computed as skims
self.skim_fields = False # List of skim fields to be used in computation
self.skim_fields = [] # List of skim fields to be used in computation
self.cost_field = False # Name of the cost field
self.ids = False # 1-D Array with link IDs (sequence from 0 to N-1)

Expand Down Expand Up @@ -128,7 +128,6 @@ def find_field_index(fields, field_name):

f = [str(x[0]) for x in fields]
raise ValueError(field_name + " does not exist. Fields available are: " + ", ".join(f))
return -1

# collect the fields in the network
check_titles = [id_field, dir_field, anode, bnode, cost_field]
Expand Down Expand Up @@ -356,54 +355,42 @@ def __build_dtype(self, all_titles):

# We set which are the fields that are going to be minimized in this file
# TODO: Change the call for all the uses on this function
def set_graph(self, cost_field=None, skim_fields=False, block_centroid_flows=None):
"""
:type cost_field
:type block_centroid_flows
:type skim_fields: list of fields for skims
:type self: object
"""
if block_centroid_flows is not None:
if isinstance(block_centroid_flows, bool):
self.set_blocked_centroid_flows(block_centroid_flows)
def set_graph(self, cost_field, block_centroid_flows=True) -> None:

if isinstance(block_centroid_flows, bool):
self.set_blocked_centroid_flows(block_centroid_flows)
else:
raise ValueError("block_centroid_flows needs to be a boolean")

if cost_field in self.graph.dtype.names:
self.cost_field = cost_field
if self.graph[cost_field].dtype == self.__float_type:
self.cost = self.graph[cost_field]
else:
raise ValueError("block_c" "entroid_flows needs to be a boolean")
self.cost = self.graph[cost_field].astype(self.__float_type)
Warning("Cost field with wrong type. Converting to float64")
else:
raise ValueError("cost_field not available in the graph:" + str(self.graph.dtype.names))

if cost_field is not None:
if cost_field in self.graph.dtype.names:
self.cost_field = cost_field
if self.graph[cost_field].dtype == self.__float_type:
self.cost = self.graph[cost_field]
else:
self.cost = self.graph[cost_field].astype(self.__float_type)
Warning("Cost field with wrong type. Converting to float64")
self.build_derived_properties()

else:
raise ValueError("cost_field not available in the graph:" + str(self.graph.dtype.names))
def set_skimming(self, skim_fields: list) -> None:

if self.cost_field is not None:
if not skim_fields:
skim_fields = [self.cost_field]
else:
s = [self.cost_field]
for i in skim_fields:
if i in self.graph.dtype.names:
if i not in s:
s.append(i)
else:
self.skim_fields = None
self.skims = None
raise ValueError("Skim", i, " not available in the graph:", self.graph.dtype.names)
skim_fields = s
else:
if skim_fields:
raise ValueError("Before setting skims, you need to set the cost field")
if not skim_fields:
self.skim_fields = []
self.skims = None

if isinstance(skim_fields, str):
skim_fields = [skim_fields]
elif not isinstance(skim_fields, list):
raise ValueError("You need to provide a list of skims or the same of a single field")

t = False
for i in skim_fields:
if self.graph[i].dtype != self.__float_type:
t = True
# Check if list of fields make sense
k = [x for x in skim_fields if x not in self.graph.dtype.names]
if k:
raise ValueError("At least one of the skim fields does not exist in the graph: {}".format(",".join(k)))

t = [x for x in skim_fields if self.graph[x].dtype != self.__float_type]

self.skims = np.zeros((self.num_links, len(skim_fields) + 1), self.__float_type)

Expand All @@ -416,9 +403,6 @@ def set_graph(self, cost_field=None, skim_fields=False, block_centroid_flows=Non
self.skims[:, i] = self.graph[j]
self.skim_fields = skim_fields

self.build_derived_properties()
return True

def set_blocked_centroid_flows(self, blocking):
if self.num_zones > 0:
self.block_centroid_flows = blocking
Expand Down
4 changes: 2 additions & 2 deletions aequilibrae/paths/network_skimming.py
Original file line number Diff line number Diff line change
Expand Up @@ -80,15 +80,15 @@ def execute(self):
elif self.graph.fs[int(i)] == self.graph.fs[int(i) + 1]:
self.report.append("Centroid " + str(orig) + " does not exist in the graph")
else:
pool.apply_async(self.func_assig_thread, args=(orig, all_threads))
pool.apply_async(self.func_skim_thread, args=(orig, all_threads))
pool.close()
pool.join()

if pyqt:
self.skimming.emit(["text skimming", "Saving Outputs"])
self.skimming.emit(["finished_threaded_procedure", None])

def func_assig_thread(self, O, all_threads):
def func_skim_thread(self, O, all_threads):
if threading.get_ident() in all_threads:
th = all_threads[threading.get_ident()]
else:
Expand Down
4 changes: 2 additions & 2 deletions aequilibrae/paths/parameters.pxi
Original file line number Diff line number Diff line change
Expand Up @@ -15,6 +15,6 @@ cdef ITYPE_t NULL_IDX = 18446744073709551615
cdef double INFINITE = 1.79769313e+308

VERSION = 0.5
MINOR_VRSN = 2
binary_version = "0.5.0"
MINOR_VRSN = 3
binary_version = "0.5.3"
release_name = "Karlsruhe"
2 changes: 2 additions & 0 deletions aequilibrae/starts_logging.py
Original file line number Diff line number Diff line change
Expand Up @@ -27,3 +27,5 @@ def StartsLogging():
ch.setFormatter(formatter)
ch.setLevel(logging.DEBUG)
logger.addHandler(ch)

return logger
12 changes: 8 additions & 4 deletions docs/source/usageexamples.rst
Original file line number Diff line number Diff line change
Expand Up @@ -37,9 +37,12 @@ you need only a graph that you have previously built, and the list of skims you
g.load_from_disk(aeg_pth)

# You now have to set the graph for what you want
# In this case, we are computing fastest path (minimizing free flow time) and skimming **length** along the way
# In this case, we are computing fastest path (minimizing free flow time)
# We are also **blocking** paths from going through centroids
g.set_graph(cost_field='fftime', skim_fields=['length'],block_centroid_flows=True)
g.set_graph(cost_field='fftime', block_centroid_flows=True)

# We will be skimming for fftime **AND** length along the way
g.set_skimming(['fftime', 'length'])

# We instantiate the skim results and prepare it to have results compatible with the graph provided
result = skmr()
Expand Down Expand Up @@ -80,15 +83,16 @@ the list of centroids in your graph is updated to include all nodes in the graph

# And continue **almost** like we did before
# We just need to remember to NOT block paths through centroids. Otherwise there will be no paths available
g.set_graph(cost_field='fftime', skim_fields=['length'],block_centroid_flows=False)
g.set_graph(cost_field='fftime', block_centroid_flows=False)
g.set_skimming('fftime')

result = skmr()
result.prepare(g)

skm = NetworkSkimming(g, result)
skm.execute()

After it is all said and done, the skim matrices are part of the result object.
Setting skimming after setting the graph is **CRITICAL**, and the skim matrices are part of the result object.

You can save the results to your place of choice in AequilibraE format or export to OMX or CSV

Expand Down
22 changes: 7 additions & 15 deletions tests/aequilibrae/paths/test__aon_skimming_single_origin.py
Original file line number Diff line number Diff line change
@@ -1,30 +1,22 @@
import os
import sys
import unittest
from aequilibrae.paths import Graph
from aequilibrae.paths.results import SkimResults
from aequilibrae.paths import skimming_single_origin
from aequilibrae.paths.multi_threaded_skimming import MultiThreadedNetworkSkimming
import numpy as np
from .parameters_test import centroids

# Adds the folder with the data to the path and collects the paths to the files
# lib_path = os.path.abspath(os.path.join('..', '../tests'))
# sys.path.append(lib_path)
from ...data import path_test, test_graph
from ...data import test_graph


class TestSkimming_single_origin(unittest.TestCase):
def test_skimming_single_origin(self):

origin = 1

# graph
g = Graph()
g.load_from_disk(test_graph)
g.set_graph(cost_field="distance", skim_fields=None)
# g.block_centroid_flows = False
# None implies that only the cost field will be skimmed
g.set_graph(cost_field="distance")
g.set_skimming("distance")

origin = np.random.choice(g.centroids, 1)[0]

# skimming results
res = SkimResults()
Expand All @@ -35,7 +27,7 @@ def test_skimming_single_origin(self):
a = skimming_single_origin(origin, g, res, aux_result, 0)
tot = np.sum(res.skims.distance[origin, :])
if tot > 10e10:
self.fail("Skimming was not successful. At least one np.inf returned.")
self.fail("Skimming was not successful. At least one np.inf returned for origin {}.".format(origin))

if a != origin:
self.fail("Skimming returned an error: " + a)
self.fail("Skimming returned an error: {} for origin {}".format(a, origin))
Loading

0 comments on commit ba0b7ca

Please sign in to comment.