diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index d359cdb094..34f3bff413 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -52,7 +52,7 @@ jobs: - uses: actions/setup-python@v5 with: - python-version: "3.8" + python-version: "3.9" - name: Install system packages if: runner.os == 'Linux' diff --git a/CHANGELOG.md b/CHANGELOG.md index a3b9da62a8..cb8bd09eff 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -2,6 +2,7 @@ ## Next Release - Adding incremental infrastructure which allows pushing and popping constraints to/from the InputQuery. + - Dropped support for parsing Tensorflow network format. Newest Marabou version that supports Tensorflow is at commit 190555573e4702. ## Version 2.0.0 diff --git a/maraboupy/Marabou.py b/maraboupy/Marabou.py index f92f7084bf..bbd15a1e9e 100644 --- a/maraboupy/Marabou.py +++ b/maraboupy/Marabou.py @@ -21,10 +21,6 @@ from maraboupy.MarabouNetworkNNet import * except ImportError: warnings.warn("NNet parser is unavailable because the numpy package is not installed") -try: - from maraboupy.MarabouNetworkTF import * -except ImportError: - warnings.warn("Tensorflow parser is unavailable because tensorflow package is not installed") try: from maraboupy.MarabouNetworkONNX import * except ImportError: @@ -43,24 +39,6 @@ def read_nnet(filename, normalize=False): """ return MarabouNetworkNNet(filename, normalize=normalize) - -def read_tf(filename, inputNames=None, outputNames=None, modelType="frozen", savedModelTags=[]): - """Constructs a MarabouNetworkTF object from a frozen Tensorflow protobuf - - Args: - filename (str): Path to tensorflow network - inputNames (list of str, optional): List of operation names corresponding to inputs - outputNames (list of str, optional): List of operation names corresponding to outputs - modelType (str, optional): Type of model to read. The default is "frozen" for a frozen graph. - Can also use "savedModel_v1" or "savedModel_v2" for the SavedModel format - created from either tensorflow versions 1.X or 2.X respectively. - savedModelTags (list of str, optional): If loading a SavedModel, the user must specify tags used, default is [] - - Returns: - :class:`~maraboupy.MarabouNetworkTF.MarabouNetworkTF` - """ - return MarabouNetworkTF(filename, inputNames, outputNames, modelType, savedModelTags) - def read_onnx(filename, inputNames=None, outputNames=None): """Constructs a MarabouNetworkONNX object from an ONNX file diff --git a/maraboupy/MarabouNetwork.py b/maraboupy/MarabouNetwork.py index ea4ff8c1b5..f7fb613976 100644 --- a/maraboupy/MarabouNetwork.py +++ b/maraboupy/MarabouNetwork.py @@ -105,7 +105,7 @@ def calculateBounds(self, filename="", verbose=True, options=None): if options == None: options = MarabouCore.Options() exitCode, bounds, stats = MarabouCore.calculateBounds(ipq, options, str(filename)) - + if verbose: print(exitCode) if exitCode == "": @@ -115,95 +115,6 @@ def calculateBounds(self, filename="", verbose=True, options=None): return [exitCode, bounds, stats] - def evaluateLocalRobustness(self, input, epsilon, originalClass, verbose=True, options=None, targetClass=None): - """Function evaluating a specific input is a local robustness within the scope of epslion - - Args: - input (numpy.ndarray): Target input - epsilon (float): L-inf norm of purturbation - originalClass (int): Output class of a target input - verbose (bool): If true, print out solution after solve finishes - options (:class:`~maraboupy.MarabouCore.Options`): Object for specifying Marabou options, defaults to None - targetClass (int): If set, find a feasible solution with which the value of targetClass is max within outputs. - - Returns: - (tuple): tuple containing: - - vals (Dict[int, float]): Empty dictionary if UNSAT, otherwise a dictionary of SATisfying values for variables - - stats (:class:`~maraboupy.MarabouCore.Statistics`): A Statistics object to how Marabou performed - - maxClass (int): Output class which value is max within outputs if SAT. - """ - inputVars = None - if (type(self.inputVars) is list): - if (len(self.inputVars) != 1): - raise NotImplementedError("Operation for %d inputs is not implemented" % len(self.inputVars)) - inputVars = self.inputVars[0][0] - elif (type(self.inputVars) is np.ndarray): - inputVars = self.inputVars[0] - else: - err_msg = "Unpexpected type of input vars." - raise RuntimeError(err_msg) - - if inputVars.shape != input.shape: - raise RuntimeError("Input shape of the model should be same as the input shape\n input shape of the model: {0}, shape of the input: {1}".format(inputVars.shape, input.shape)) - - if (type(self.outputVars) is list): - if (len(self.outputVars) != 1): - raise NotImplementedError("Operation for %d outputs is not implemented" % len(self.outputVars)) - elif (type(self.outputVars) is np.ndarray): - if (len(self.outputVars) != 1): - raise NotImplementedError("Operation for %d outputs is not implemented" % len(self.outputVars)) - else: - err_msg = "Unpexpected type of output vars." - raise RuntimeError(err_msg) - - if options == None: - options = MarabouCore.Options() - - # Add constratins to all input nodes - flattenInputVars = inputVars.flatten() - flattenInput = input.flatten() - for i in range(flattenInput.size): - self.setLowerBound(flattenInputVars[i], flattenInput[i] - epsilon) - self.setUpperBound(flattenInputVars[i], flattenInput[i] + epsilon) - - maxClass = None - outputStartIndex = self.outputVars[0][0][0] - - if targetClass is None: - outputLayerSize = len(self.outputVars[0][0]) - # loop for all of output classes except for original class - for outputLayerIndex in range(outputLayerSize): - if outputLayerIndex != originalClass: - self.addMaxConstraint(set([outputStartIndex + outputLayerIndex, outputStartIndex + originalClass]), - outputStartIndex + outputLayerIndex) - exitCode, vals, stats = self.solve(options = options) - if (stats.hasTimedOut()): - break - elif (len(vals) > 0): - maxClass = outputLayerIndex - break - else: - self.addMaxConstraint(set(self.outputVars[0][0]), outputStartIndex + targetClass) - exitCode, vals, stats = self.solve(options = options) - if verbose: - if not stats.hasTimedOut() and len(vals) > 0: - maxClass = targetClass - - # print timeout, or feasible inputs and outputs if verbose is on. - if verbose: - if stats.hasTimedOut(): - print("TO") - elif len(vals) > 0: - print("sat") - for j in range(len(self.inputVars[0])): - for i in range(self.inputVars[0][j].size): - print("input {} = {}".format(i, vals[self.inputVars[0][j].item(i)])) - - for j in range(len(self.outputVars[0])): - for i in range(self.outputVars[0][j].size): - print("output {} = {}".format(i, vals[self.outputVars[0][j].item(i)])) - - return [vals, stats, maxClass] def evaluateWithMarabou(self, inputValues, filename="evaluateWithMarabou.log", options=None): """Function to evaluate network at a given point using Marabou as solver diff --git a/maraboupy/MarabouNetworkTF.py b/maraboupy/MarabouNetworkTF.py deleted file mode 100644 index ea4ccc7623..0000000000 --- a/maraboupy/MarabouNetworkTF.py +++ /dev/null @@ -1,918 +0,0 @@ -''' -Top contributors (to current version): - - Kyle Julian - - Christopher Lazarus - - Shantanu Thakoor - - Chelsea Sidrane - -This file is part of the Marabou project. -Copyright (c) 2017-2024 by the authors listed in the file AUTHORS -in the top-level source directory) and their institutional affiliations. -All rights reserved. See the file COPYING in the top-level source -directory for licensing information. - -MarabouNetworkTF represents neural networks with piecewise linear constraints derived from tensorflow formats -''' - -import numpy as np -import os -os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2' -os.environ["CUDA_VISIBLE_DEVICES"] = "-1" - -import tensorflow as tf -tf.compat.v1.disable_v2_behavior() -from tensorflow.python.framework import tensor_util -from tensorflow.python.framework import graph_util -from maraboupy import MarabouUtils -from maraboupy import MarabouNetwork - -# Try to load helper function available for tensorflow versions >=1.14.0 -try: - from tensorflow.python.framework.convert_to_constants import convert_variables_to_constants_v2 -except: - pass -# Need eager exectution for savedModel_v2 format -try: - tf.compat.v1.enable_eager_execution() -except: - pass - -class MarabouNetworkTF(MarabouNetwork.MarabouNetwork): - """Constructs a MarabouNetworkTF object from a frozen Tensorflow protobuf or SavedModel - - Args: - filename (str): Path to tensorflow network - inputNames (list of str, optional): List of operation names corresponding to inputs - outputNames (list of str, optional): List of operation names corresponding to outputs - modelType (str, optional): Type of model to read. The default is "frozen" for a frozen graph. - Can also use "savedModel_v1" or "savedModel_v2" for the SavedModel format - created from either tensorflow versions 1.X or 2.X respectively. - savedModelTags (list of str, optional): If loading a SavedModel, the user must specify tags used, default is [] - """ - def __init__(self, filename, inputNames=None, outputNames=None, modelType="frozen", savedModelTags=[]): - super().__init__() - self.readTF(filename, inputNames, outputNames, modelType, savedModelTags) - - def clear(self): - """Reset values to represent empty network - """ - super().clear() - self.madeGraphEquations = [] - self.varMap = dict() - self.constantMap = dict() - self.inputOps = None - self.outputOps = None - self.outputShapes = None - self.sess = None - - def readTF(self, filename, inputNames, outputNames, modelType, savedModelTags): - """Read a tensorflow file to create a MarabouNetworkTF object - - Args: - filename (str): Path to tensorflow network - inputNames (list of str, optional): List of operation names corresponding to inputs - outputNames (list of str, optional): List of operation names corresponding to outputs - modelType (str, optional): Type of model to read. The default is "frozen" for a frozen graph. - Can also use "savedModel_v1" or "savedModel_v2" for the SavedModel format - created from either tensorflow versions 1.X or 2.X respectively. - savedModelTags (list of str, optional): If loading a SavedModel, the user must specify tags used, default is [] - """ - # Read tensorflow model - if modelType == "frozen": - # Read frozen graph protobuf file - with tf.io.gfile.GFile(filename, "rb") as f: - graph_def = tf.compat.v1.GraphDef() - graph_def.ParseFromString(f.read()) - with tf.Graph().as_default() as graph: - tf.import_graph_def(graph_def, name = "") - self.sess = tf.compat.v1.Session(graph=graph) - - elif modelType == "savedModel_v1": - # Use default tag for savedModel format for tensorflow 1.X - if not savedModelTags: - savedModelTags = ["serve"] - - # Read SavedModel format created by tensorflow version 1.X - sess = tf.compat.v1.Session() - tf.compat.v1.saved_model.loader.load(sess, savedModelTags, filename) - - # Simplify graph using outputNames, which must be specified for SavedModel - simp_graph_def = graph_util.convert_variables_to_constants(sess,sess.graph.as_graph_def(),outputNames) - with tf.Graph().as_default() as graph: - tf.import_graph_def(simp_graph_def, name = "") - self.sess = tf.compat.v1.Session(graph=graph) - - elif modelType == "savedModel_v2": - # Use default tag for savedModel format for tensorflow 2.X - if not savedModelTags: - savedModelTags = ["serving_default"] - - # Read SavedModel format created by tensorflow version 2.X - sess = tf.compat.v1.Session() - loaded = tf.saved_model.load(filename) - model = loaded.signatures[savedModelTags[0]] - - # Create a concrete function from model - full_model = tf.function(lambda x: model(x)) - full_model = full_model.get_concrete_function(tf.TensorSpec(model.inputs[0].shape, model.inputs[0].dtype)) - - # Get frozen ConcreteFunction - frozen_func = convert_variables_to_constants_v2(full_model) - self.sess = tf.compat.v1.Session(graph=frozen_func.graph) - - else: - err_msg = "Unknown input to modelType.\n" - err_msg += "Please use either 'frozen', 'savedModel_v1', or 'savedModel_v2'" - raise RuntimeError(err_msg) - - # Valid names to use for input or output operations - valid_names = [op.node_def.name for op in self.sess.graph.get_operations() if self.isVariable(op)] - - # Get output operation if outputName is given - if outputNames: - if isinstance(outputNames, str): - outputNames = [outputNames] - - outputOps = [] - for o in outputNames: - if o not in valid_names: - err_msg = "The given output name %s is not an operation in the network.\n" % o - err_msg += "Valid names are\n" + ", ".join(valid_names) - raise RuntimeError(err_msg) - outputOps.append(self.sess.graph.get_operation_by_name(o)) - # By default, assume that the last operation is the output if output not specified - else: - if modelType == "savedModel_v2": - outputOps = [output.op for output in self.sess.graph.outputs] - else: - outputOps = [self.sess.graph.get_operations()[-1]] - - # Get input operations if inputNames is given - inputOps = [] - if inputNames: - for i in inputNames: - if i not in valid_names: - err_msg = "The input name %s is not an operation in the network.\n" % i - err_msg += "Valid names are\n" + ", ".join(valid_names) - raise RuntimeError(err_msg) - inputOps.append(self.sess.graph.get_operation_by_name(i)) - # By default, use Placeholders that contribute to output operation as input operations - else: - for o in outputOps: - placeholders = self.findPlaceholders(o, []) - for i in placeholders: - if i not in inputOps: - inputOps.append(i) - - # Input operation should not equal output operation - for o in outputOps: - if o in inputOps: - raise RuntimeError("%s cannot be used as both input and output" % (o.node_def.name)) - - self.setInputOps(inputOps) - self.setOutputOps(outputOps) - - # Generate equations corresponding to network - self.foundnInputFlags = 0 - for op in self.outputOps: - self.buildEquations(op) - - # If we did not use all of the input variables, throw an error and tell the user which inputs were used - if self.foundnInputFlags < len(inputOps): - good_inputs = [] - for iop in inputOps: - if iop in self.madeGraphEquations: - good_inputs.append(iop.node_def.name) - err_msg = "All given inputs are part of the graph, but not all inputs contributed to one of the outputs %s\n"%outputNames - err_msg += "Please use only these inputs for the inputName list:\n" - err_msg += "['" + "', '".join(good_inputs) + "']" - raise RuntimeError(err_msg) - - # Set output variables - self.outputVars = [self.varMap[outputOp].reshape(self.outputShapes[outputOp]) for outputOp in self.outputOps] - - def findPlaceholders(self, op, returnOps): - """Function that recursively finds the placeholder operations that contribute to a given operation - - Args: - op: (tf.op) representing operation in network - returnOps (list of tf.op) Placehoder operartions already found - - Returns: - returnOps (list of tf.op) Placehoder operartions found so far - - :meta private: - """ - if op in returnOps: - return returnOps - if op.node_def.op == 'Const' or op in self.constantMap: - return returnOps - if op.node_def.op == 'Placeholder'or op in self.varMap: - return returnOps + [op] - for i in op.inputs: - returnOps = self.findPlaceholders(i.op, returnOps) - return returnOps - - def setInputOps(self, ops): - """Function to set input operations - - Args: - ops: (list of tf.op) list representing input operations - - :meta private: - """ - self.inputVars = [] - self.inputShapes = [] - self.dummyFeed = dict() - for op in ops: - shape = tuple(op.outputs[0].shape.as_list()) - self.inputVars.append(self.makeNewVariables(op)) - - # Dummy inputs are needed to compute constants for later versions of tensorflow - self.dummyFeed[op.outputs[0]] = np.zeros(self.inputVars[-1].shape) - self.inputOps = ops - - def setOutputOps(self, ops): - """Function to set output operations - - Args: - ops: (list of tf.op) list representing output operations - - :meta private: - """ - self.outputShapes = {} - for op in ops: - shape = op.outputs[0].shape.as_list() - if shape[0] == None: - shape[0] = 1 - self.outputShapes[op] = shape - self.outputOps = ops - - def makeNewVariables(self, x): - """Function to find variables corresponding to operation - - Args: - x: (tf.op) the operation to find variables for - Returns: - v: (np array) of variable numbers, in same shape as x - - :meta private: - """ - # Find number and shape of new variables representing output of given operation - shape = [a if a is not None else 1 for a in x.outputs[0].get_shape().as_list()] - size = np.prod(shape) - - # Assign variables - v = np.array([self.getNewVariable() for _ in range(size)]).reshape(shape) - self.varMap[x] = v - assert all([np.equal(np.mod(i, 1), 0) for i in v.reshape(-1)]) # check if integers - return v - - def getValues(self, op): - """Function to find underlying constants or variables representing operation - - Args: - op: (tf.op) operation to find values for - - Returns: - values: (np array) scalars or variable numbers representing the operation - isVar: (bool) true if the operation is represented by variables - - :meta private: - """ - if op in self.varMap: - return self.varMap[op], True - return self.constantMap[op], False - - def isVariable(self, op): - """Function returning whether operation represents variable or constant - - Args: - op: (tf.op) representing operation in network - - Returns: - isVariable: (bool) true if variable, false if constant - - :meta private: - """ - if op.node_def.op == 'Const' or op in self.constantMap: - return False - if op.node_def.op == 'Placeholder'or op in self.varMap: - return True - return any([self.isVariable(i.op) for i in op.inputs]) - - def identity(self, op): - """Function handling identity operation, performed on variables - - Args: - op: (tf.op) identity operation - - :meta private: - """ - self.varMap[op] = self.varMap[op.inputs[0].op] - - def reshape(self, op): - """Function handling reshape operation, performed on variables - - Args: - op: (tf.op) reshape operation - - :meta private: - """ - variables = self.varMap[op.inputs[0].op] - shape = self.constantMap[op.inputs[1].op] - self.varMap[op] = variables.reshape(shape) - - def transpose(self, op): - """Function handling transpose operation, performed on variables - - Args: - op: (tf.op) transpose operation - - :meta private: - """ - variables = self.varMap[op.inputs[0].op] - perm = self.constantMap[op.inputs[1].op] - self.varMap[op] = np.transpose(variables, perm) - - def concat(self, op): - """Function handling concat operation, performed on variables - - Args: - op: (tf.op) concat operation - - :meta private: - """ - input1 = self.varMap[op.inputs[0].op] - input2 = self.varMap[op.inputs[1].op] - axis = self.constantMap[op.inputs[2].op] - self.varMap[op] = np.concatenate([input1, input2], axis=axis) - - def matMulEquations(self, op): - """Function to generate equations corresponding to matrix multiplication - - Args: - op: (tf.op) representing matrix multiplication operation - - :meta private: - """ - # Get variables and constants of inputs - assert len(op.inputs) == 2 - A, A_isVar = self.getValues(op.inputs[0].op) - B, B_isVar = self.getValues(op.inputs[1].op) - - # For linear equations, both inputs cannot be variables - assert not A_isVar or not B_isVar - - # Handle transpose attributes - aTranspose = op.node_def.attr['transpose_a'].b - bTranspose = op.node_def.attr['transpose_b'].b - if aTranspose: - A = np.transpose(A) - if bTranspose: - B = np.transpose(B) - - # Get output variables and scalar values (if matmul is the input to an addition operation) - outputVars = self.makeNewVariables(op) - scalars, sgnVar, sgnScalar = self.getScalars(op, outputVars) - - # Make sure shapes are valid - assert (A.shape[0], B.shape[1]) == outputVars.shape - assert A.shape[1] == B.shape[0] - m, n = outputVars.shape - p = A.shape[1] - - # Generate equations - for i in range(m): - for j in range(n): - e = MarabouUtils.Equation() - for k in range(p): - # Make sure addend is added with weight, the variable number - if A_isVar: - e.addAddend(B[k][j] * sgnVar, A[i][k]) - else: - e.addAddend(A[i][k] * sgnVar, B[k][j]) - e.addAddend(-1, outputVars[i][j]) - e.setScalar(-sgnScalar * scalars[i][j]) - self.addEquation(e) - - def getScalars(self, op, outputVars): - """Get scalar values - - This helper function determines if MatMul of Conv2D equations can be combined with a subsequent - addition or subtraction operation. If so, this function returns a scalar array, as well as - the sign used for the matmul variables and scalar constants. If combining MatMul/Conv2D with - addition fails, this returns values so that MatMul or Conv2D equations are unaffected. - - Args: - op: (tf.op) representing conv2D operation - outputVars: (np array) output variables from MatMul/Conv2D - - Returns: - scalars (np array) array of scalar values to be used with MatMul/Conv2D - sgnVar (int) sign for input variables to MatMul/Conv2D - sgnScalar (int) sign for scalars to MatMul/Conv2D - - :meta private: - """ - # If the given operation is the output operation, don't add any scalars - if op in self.outputOps: - return np.zeros(outputVars.shape), 1, 1 - - # Look trhough the graph to find operations where the output of given operation is an input - outputOp = op.outputs[0] - nextOps = [] - for nextOp in self.sess.graph.get_operations(): - if op.outputs[0] in nextOp.inputs: - nextOps.append(nextOp) - - # If output of given operation is used in multiple operations, don't combine with addition - if len(nextOps) != 1: - return np.zeros(outputVars.shape), 1, 1 - - # The subsequent operation must be an addition/subtraction operation - nextOp = nextOps[0] - if nextOp.node_def.op not in ["Add", "AddV2", "BiasAdd", "Sub"]: - return np.zeros(outputVars.shape), 1, 1 - - # The subsequent addition/subtraction must combine the given operation with constants - input1 = nextOp.inputs[0] - input2 = nextOp.inputs[1] - if self.isVariable(input1.op) and self.isVariable(input2.op): - return np.zeros(outputVars.shape), 1, 1 - - # Signs corresponding to addition/subtraction - sgn1 = 1 - sgn2 = 1 - if nextOp.node_def.op in ["Sub"]: - sgn2 = -1 - - # Compute scalar values and signs depending on which input to nextOp is the output of given operation - if input1 == op.outputs[0]: - sgnVar = sgn1 - sgnScalar = sgn2 - # Extract constants with sess.run since they haven't been added to constantMap yet - self.constantMap[input2.op] = self.sess.run(input2, feed_dict = self.dummyFeed) - - # Special case for BiasAdd with NCHW format. We need to add the bias along channels dimension - if nextOp.node_def.op == 'BiasAdd': - if 'data_format' in nextOp.node_def.attr: - data_format = nextOp.node_def.attr['data_format'].s.decode().upper() - if data_format == 'NCHW': - in2 = self.constantMap[input2.op] - self.constantMap[input2.op] = in2.reshape((1, len(in2), 1, 1)) - scalars = np.broadcast_to(self.constantMap[input2.op], outputVars.shape) - else: - sgnVar = sgn2 - sgnScalar = sgn1 - # Extract constants with sess.run since they haven't been added to constantMap yet - self.constantMap[input1.op] = self.sess.run(input1, feed_dict = self.dummyFeed) - scalars = np.broadcast_to(self.constantMap[input1.op], outputVars.shape) - - # Save outputVars to correspond to nextOp, so that new equations will not be generated for that operation - self.varMap[nextOp] = outputVars - return scalars, sgnVar, sgnScalar - - def addEquations(self, op): - """Function to generate equations corresponding to all types of add/subtraction operations - - Args: - op: (tf.op) representing addition or subtraction operations - - :meta private: - """ - # We may have included the add equation with a prior matmul equation - if op in self.varMap: - return - - # Get inputs and outputs - assert len(op.inputs) == 2 - input1, input1_isVar = self.getValues(op.inputs[0].op) - input2, input2_isVar = self.getValues(op.inputs[1].op) - outputVars = self.makeNewVariables(op).flatten() - - # Special case for BiasAdd with NCHW format. We need to add the bias along the channels dimension - if op.node_def.op == 'BiasAdd': - data_format = 'NHWC' - if 'data_format' in op.node_def.attr: - data_format = op.node_def.attr['data_format'].s.decode().upper() - if data_format == 'NCHW': - input2 = input2.reshape((1, len(input2), 1, 1)) - - # Broadcast and flatten. Assert that lengths are all the same - input1, input2 = np.broadcast_arrays(input1, input2) - input1 = input1.flatten() - input2 = input2.flatten() - assert len(input1) == len(input2) - assert len(outputVars) == len(input1) - - # Signs for addition/subtraction - sgn1 = 1 - sgn2 = 1 - if op.node_def.op in ["Sub"]: - sgn2 = -1 - - # Create new equations depending on if the inputs are variables or constants - # At least one input must be a variable, otherwise this operation is a constant, - # which gets caught in makeGraphEquations. - assert input1_isVar or input2_isVar - - # Always negate the scalar term because it changes sides in equation, from - # w1*x1+...wk*xk + b = x_out - # to - # w1*x1+...wk+xk - x_out = -b - if input1_isVar and input2_isVar: - for i in range(len(outputVars)): - e = MarabouUtils.Equation() - e.addAddend(sgn1, input1[i]) - e.addAddend(sgn2, input2[i]) - e.addAddend(-1, outputVars[i]) - e.setScalar(0.0) - self.addEquation(e) - elif input1_isVar: - for i in range(len(outputVars)): - e = MarabouUtils.Equation() - e.addAddend(sgn1, input1[i]) - e.addAddend(-1, outputVars[i]) - e.setScalar(-sgn2 * input2[i]) - self.addEquation(e) - else: - for i in range(len(outputVars)): - e = MarabouUtils.Equation() - e.addAddend(sgn2, input2[i]) - e.addAddend(-1, outputVars[i]) - e.setScalar(-sgn1 * input1[i]) - self.addEquation(e) - - def mulEquations(self, op): - """Function to generate equations corresponding to multiply and divide operations - - Args: - op: (tf.op) representing an element-wise multiply or divide operation - - :meta private: - """ - # Get inputs and outputs - assert len(op.inputs) == 2 - input1, input1_isVar = self.getValues(op.inputs[0].op) - input2, input2_isVar = self.getValues(op.inputs[1].op) - - # For linear equations, both inputs cannot be variables - assert not input1_isVar or not input2_isVar - - # If multiplying by 1, no need for new equations - if input1_isVar and np.all(input2 == 1.0): - self.varMap[op] = input1 - return - if input2_isVar and np.all(input1 == 1.0): - self.varMap[op] = input2 - return - - # Broadcast and flatten. Assert that lengths are all the same - input1, input2 = np.broadcast_arrays(input1, input2) - outputVars = self.makeNewVariables(op) - scalars, sgnVar, sgnScalar = self.getScalars(op, outputVars) - input1 = input1.flatten() - input2 = input2.flatten() - outputVars = outputVars.flatten() - scalars = scalars.flatten() - assert len(input1) == len(input2) - assert len(outputVars) == len(input1) - - # Handle divide by negating power - power = 1.0 - if op.node_def.op in ["RealDiv"]: - power = -1.0 - - # Equations - if input1_isVar: - for i in range(len(outputVars)): - e = MarabouUtils.Equation() - e.addAddend(sgnVar * input2[i]**power, input1[i]) - e.addAddend(-1, outputVars[i]) - e.setScalar(-sgnScalar * scalars[i]) - self.addEquation(e) - else: - if power == -1.0: - raise RuntimeError("Dividing a constant by a variable is not allowed") - for i in range(len(outputVars)): - e = MarabouUtils.Equation() - e.addAddend(sgnVar * input1[i], input2[i]) - e.addAddend(-1, outputVars[i]) - e.setScalar(-sgnScalar * scalars[i]) - self.addEquation(e) - - def conv2DEquations(self, op): - """Function to generate equations corresponding to 2D convolution operation - - Args: - op: (tf.op) representing conv2D operation - - :meta private: - """ - - # Get input variables and constants - assert len(op.inputs) == 2 - inputVars = self.varMap[op.inputs[0].op] - filters = self.constantMap[op.inputs[1].op] - - # Make new variables for output - outputVars = self.makeNewVariables(op) - - # Extract attributes - padding = op.node_def.attr['padding'].s.decode().upper() - strides = list(op.node_def.attr['strides'].list.i) - data_format = op.node_def.attr['data_format'].s.decode().upper() - - # Handle different data formats - if data_format == 'NHWC': - out_num, out_height, out_width, out_channels = outputVars.shape - in_num, in_height, in_width, in_channels = inputVars.shape - strides_height = strides[1] - strides_width = strides[2] - elif data_format == 'NCHW': - out_num, out_channels, out_height, out_width = outputVars.shape - in_num, in_channels, in_height, in_width = inputVars.shape - strides_height = strides[2] - strides_width = strides[3] - else: - raise NotImplementedError("Network uses %s data format. Only 'NHWC' and 'NCHW' are currently supported" % data_format) - - # Assert that dimensions match up - filter_height, filter_width, filter_channels, num_filters = filters.shape - assert out_num == in_num - assert filter_channels == in_channels - assert out_channels == num_filters - - # Use padding to determine top and left offsets - if padding=='SAME': - pad_top = max((out_height - 1) * strides_height + filter_height - in_height, 0) // 2 - pad_left = max((out_width - 1) * strides_width + filter_width - in_width, 0) // 2 - elif padding=='VALID': - pad_top = 0 - pad_left = 0 - else: - raise NotImplementedError("Network uses %s for conv padding. Only 'SAME' and 'VALID' padding are supported" % padding) - - # Try to get scalar values in case this operation is followed by BiasAddition - scalars, sgnVar, sgnScalar = self.getScalars(op, outputVars) - - # Generate equations - # There is one equation for every output variable - for n in range(out_num): - for i in range(out_height): - for j in range(out_width): - for k in range(out_channels): # Out_channel also corresponds to filter number - e = MarabouUtils.Equation() - - # The equation convolves the filter with the specified input region - # Iterate over the filter - for di in range(filter_height): - for dj in range(filter_width): - for dk in range(filter_channels): - - # Get 2D location of filter with respect to input variables - h_ind = strides_height * i + di - pad_top - w_ind = strides_width * j + dj - pad_left - - # Build equation when h_ind and w_ind are valid - if h_ind < in_height and h_ind >= 0 and w_ind < in_width and w_ind >= 0: - if data_format == 'NHWC': - var = inputVars[n][h_ind][w_ind][dk] - c = filters[di][dj][dk][k] * sgnVar - e.addAddend(c, var) - else: - var = inputVars[n][dk][h_ind][w_ind] - c = filters[di][dj][dk][k] * sgnVar - e.addAddend(c, var) - - # Add output variable - if data_format == 'NHWC': - e.addAddend(-1, outputVars[n][i][j][k]) - e.setScalar(-sgnScalar * scalars[n][i][j][k]) - self.addEquation(e) - else: - e.addAddend(-1, outputVars[n][k][i][j]) - e.setScalar(-sgnScalar * scalars[n][k][i][j]) - self.addEquation(e) - - def reluEquations(self, op): - """Function to generate equations corresponding to pointwise Relu - - Args: - op: (tf.op) representing Relu operation - - :meta private: - """ - # Get input and output variables - assert len(op.inputs) == 1 - inputVars = self.varMap[op.inputs[0].op].flatten() - outputVars = self.makeNewVariables(op).flatten() - assert len(inputVars) == len(outputVars) - - # Generate Relu constratins - for inVar, outVar in zip(inputVars, outputVars): - self.addRelu(inVar, outVar) - self.setLowerBound(outVar, 0.0) - - def maxpoolEquations(self, op): - """Function to generate maxpooling equations - - Args: - op: (tf.op) representing maxpool operation - - :meta private: - """ - # Get input and output variables - assert len(op.inputs) == 1 - inputVars = self.varMap[op.inputs[0].op] - outputVars = self.makeNewVariables(op) - - # Get attributes - padding = op.node_def.attr['padding'].s.decode().upper() - strides = list(op.node_def.attr['strides'].list.i) - ksize = list(op.node_def.attr['ksize'].list.i) - data_format = op.node_def.attr['data_format'].s.decode().upper() - - # Handle the different data formats - if data_format == 'NHWC': - out_num, out_height, out_width, out_channels = outputVars.shape - in_num, in_height, in_width, in_channels = inputVars.shape - strides_height = strides[1] - strides_width = strides[2] - ksize_height = ksize[1] - ksize_width = ksize[2] - elif data_format == 'NCHW': - out_num, out_channels, out_height, out_width = outputVars.shape - in_num, in_channels, in_height, in_width = inputVars.shape - strides_height = strides[2] - strides_width = strides[3] - ksize_height = ksize[2] - ksize_width = ksize[3] - else: - raise NotImplementedError("Network uses %s data format. Only 'NHWC' and 'NCHW' are currently supported" % data_format) - - # Number of data points and channels should remain the same - assert out_num == in_num - assert out_channels == in_channels - - # Determine top and left padding for the padding type - if padding == "VALID": - pad_top = 0 - pad_left = 0 - elif padding == "SAME": - pad_top = max((out_height - 1) * strides_height + ksize_height - in_height, 0) // 2 - pad_left = max((out_width - 1) * strides_width + ksize_width - in_width, 0) // 2 - else: - raise NotImplementedError("Network uses %s padding for maxpool. Only 'VALID' and 'SAME' padding are currently supported" % padding) - - # Create max constraints, one for every output variable - for n in range(out_num): - for i in range(out_height): - for j in range(out_width): - for k in range(out_channels): - maxVars = set() - - # Iterate over the input region of the max filter - for di in range(ksize_height): - for dj in range(ksize_width): - - # Get 2D location of filter with respect to input variables - h_ind = strides_height * i + di - pad_top - w_ind = strides_width * j + dj - pad_left - - # Add input variable to set if it is valid - if h_ind < in_height and w_ind < in_width and h_ind>=0 and w_ind>=0: - if data_format == "NHWC": - maxVars.add(inputVars[n][h_ind][w_ind][k]) - else: - maxVars.add(inputVars[n][k][h_ind][w_ind]) - - # Set output variable and add max constraint - if data_format == "NHWC": - self.addMaxConstraint(maxVars, outputVars[n][i][j][k]) - else: - self.addMaxConstraint(maxVars, outputVars[n][k][i][j]) - - def signEquations(self, op): - """Function to generate equations corresponding to pointwise sign activation - - Args: - op: (tf.op) representing Sign operation - - :meta private: - """ - # Get input and output variables - assert len(op.inputs) == 1 - inputVars = self.varMap[op.inputs[0].op].flatten() - outputVars = self.makeNewVariables(op).flatten() - assert len(inputVars) == len(outputVars) - - # Generate sign constraints - for inVar, outVar in zip(inputVars, outputVars): - self.addSignConstraint(inVar, outVar) - self.setLowerBound(outVar, -1.0) - self.setUpperBound(outVar, 1.0) - - def makeEquations(self, op): - """Function to generate equations corresponding to given operation - - Args: - op: (tf.op) for which to generate equations - - :meta private: - """ - # These operations do not create new equations, but manipulate the output from previous equations - if op.node_def.op in ["Identity", "IdentityN"]: - self.identity(op) - elif op.node_def.op == "Reshape": - self.reshape(op) - elif op.node_def.op == "ConcatV2": - self.concat(op) - elif op.node_def.op == "Transpose": - self.transpose(op) - - # These operations do require new equations - elif op.node_def.op == 'MatMul': - self.matMulEquations(op) - elif op.node_def.op in ['BiasAdd', 'Add', 'AddV2', 'Sub']: - self.addEquations(op) - elif op.node_def.op in ['Mul', 'RealDiv']: - self.mulEquations(op) - elif op.node_def.op in ['Conv2D','QuantConv2D']: - self.conv2DEquations(op) - elif op.node_def.op == 'Relu': - self.reluEquations(op) - elif op.node_def.op == 'Sign': - self.signEquations(op) - elif op.node_def.op == 'MaxPool': - self.maxpoolEquations(op) - - # If we've recursed to find a Placeholder operation, this operation needs to be added the inputName list - elif op.node_def.op == 'Placeholder': - raise RuntimeError("One of the outputs in %s depends on placeholder %s.\nPlease add '%s' to the inputName list." % ([self.outputOps[i].node_def.name for i in range(len(self.outputOps))], op.node_def.name, op.node_def.name)) - else: - raise NotImplementedError("Operation %s not implemented" % (op.node_def.op)) - - def buildEquations(self, op): - """Function that searches the graph recursively and builds equations necessary to calculate op - - Args: - op: (tf.op) representing operation until which we want to generate equations - - :meta private: - """ - # No need to make more equations if we've already seen this operation - if op in self.madeGraphEquations: - return - self.madeGraphEquations += [op] - - # Stop recursing once an input variable is found - # No equations are needed for the input variables, since they - # are not the output of any equation - if op in self.inputOps: - self.foundnInputFlags += 1 - return - - # If operation is a constant, or the product of equations using - # constants, don't recurse because equations are not needed - if not self.isVariable(op): - self.constantMap[op] = self.sess.run(op.outputs[0], feed_dict = self.dummyFeed) - return - - # Recurse using operation inputs - in_ops = [x.op for x in op.inputs] - for x in in_ops: - self.buildEquations(x) - - # Make equations after recursing to build equations of inputs first - self.makeEquations(op) - - def evaluateWithoutMarabou(self, inputValues): - """Function to evaluate network at a given point using Tensorflow - - Args: - inputValues: (list of np arrays) representing inputs to network - - Returns: - outputValues: (list of np arrays) representing output of network - """ - if len(inputValues) != len(self.inputOps): - raise RuntimeError("Bad input given. Please provide a list of %d np arrays"%len(self.inputOps)) - - # Try to reshape inputs to shapes desired by tensorflow - inReshaped = [] - for i, (op, inputVals) in enumerate(zip(self.inputOps, inputValues)): - opShape = self.varMap[op].shape - if np.prod(opShape) != np.prod(inputVals.shape): - raise RuntimeError("Input %d should have shape "%i + str(opShape) + - ", or be reshapeable to this shape, but an array with shape " + str(inputVals.shape) + " was given") - inReshaped.append(np.array(inputVals).reshape(opShape)) - - # Evaluate network - inputNames = [o.outputs[0] for o in self.inputOps] - feed_dict = dict(zip(inputNames, inReshaped)) - out = [self.sess.run(outputOp.outputs[0], feed_dict=feed_dict) for outputOp in self.outputOps] - - # Make sure to return output in correct shape - return [out[i].reshape(self.varMap[outputOp].shape) for i, outputOp in enumerate(self.outputOps)] diff --git a/maraboupy/examples/2_ONNXExample.py b/maraboupy/examples/1_ONNXExample.py similarity index 100% rename from maraboupy/examples/2_ONNXExample.py rename to maraboupy/examples/1_ONNXExample.py diff --git a/maraboupy/examples/1_TensorflowExample.py b/maraboupy/examples/1_TensorflowExample.py deleted file mode 100644 index 626d516037..0000000000 --- a/maraboupy/examples/1_TensorflowExample.py +++ /dev/null @@ -1,51 +0,0 @@ -''' -Tensorflow Example -==================== - -Top contributors (to current version): - - Christopher Lazarus - - Kyle Julian - -This file is part of the Marabou project. -Copyright (c) 2017-2024 by the authors listed in the file AUTHORS -in the top-level source directory) and their institutional affiliations. -All rights reserved. See the file COPYING in the top-level source -directory for licensing information. -''' - -from maraboupy import Marabou -import numpy as np - -# %% -# This network has inputs x0, x1, and was trained to create outputs that approximate -# y0 = abs(x0) + abs(x1), y1 = x0^2 + x1^2 -filename = "../../resources/tf/frozen_graph/fc1.pb" -network = Marabou.read_tf(filename) - -# %% -# Or, you can specify the operation names of the input and output operations. -# The default chooses the placeholder operations as input and the last operation as output -inputNames = ['Placeholder'] -outputNames = ['y_out'] -network = Marabou.read_tf(filename = filename, inputNames = inputNames, outputNames = outputNames) - -# %% -# Get the input and output variable numbers; [0] since first dimension is batch size -inputVars = network.inputVars[0][0] -outputVars = network.outputVars[0] - -# %% -# Set input bounds on both input variables -network.setLowerBound(inputVars[0],-10.0) -network.setUpperBound(inputVars[0], 10.0) -network.setLowerBound(inputVars[1],-10.0) -network.setUpperBound(inputVars[1], 10.0) - -# %% -# Set output bounds on the second output variable -network.setLowerBound(outputVars[0][1], 194.0) -network.setUpperBound(outputVars[0][1], 210.0) - -# %% -# Call to C++ Marabou solver -exitCode, vals, stats = network.solve("marabou.log") diff --git a/maraboupy/examples/7_PythonicAPI.py b/maraboupy/examples/2_PythonicAPI.py similarity index 100% rename from maraboupy/examples/7_PythonicAPI.py rename to maraboupy/examples/2_PythonicAPI.py diff --git a/maraboupy/parsers/ONNXParser.py b/maraboupy/parsers/ONNXParser.py index b255a24478..ade092ab38 100644 --- a/maraboupy/parsers/ONNXParser.py +++ b/maraboupy/parsers/ONNXParser.py @@ -1239,7 +1239,7 @@ def subEquations(self, node, makeEquations): nodeName = node.output[0] inputName1, inputName2 = node.input[0], node.input[1] assert inputName1 in self.shapeMap and inputName2 in self.shapeMap - assert self.shapeMap[inputName1] == self.shapeMap[inputName2] + assert list(self.shapeMap[inputName1]) == list(self.shapeMap[inputName2]) self.shapeMap[nodeName] = self.shapeMap[inputName1] if not makeEquations: @@ -1258,7 +1258,7 @@ def subEquations(self, node, makeEquations): e = MarabouUtils.Equation() e.addAddend(1, inputVars[i]) e.addAddend(-1, outputVars[i]) - e.setScalar(-constants[i]) + e.setScalar(constants[i]) self.query.addEquation(e) def sigmoidEquations(self, node, makeEquations): diff --git a/maraboupy/test/test_network.py b/maraboupy/test/test_network.py index 9644b40df3..03dd125413 100644 --- a/maraboupy/test/test_network.py +++ b/maraboupy/test/test_network.py @@ -579,101 +579,49 @@ def test_resize(): for l in range(len(outputVars[i][j][k])): assert abs(vals[outputVars[i][j][k][l]] - expectedOutputValues[i][j][k][l]) < TOL -def test_local_robustness_unsat(): - """ - Tests local robustness of an nnet network. (UNSAT) - """ - filename = "fc_2-2-3.nnet" - - network = loadNetwork(filename) - options = Marabou.createOptions(verbosity = 0) - - input = np.array([1, 0]) - vals, stats, maxClass = network.evaluateLocalRobustness(input=input, epsilon=0.1, originalClass=0, options=options, targetClass=None) - - # should be local robustness - assert(len(vals) == 0) - -def test_local_robustness_sat(): - """ - Tests local robustness of an nnet network. (SAT) - """ - filename = "fc_2-2-3.nnet" - - network = loadNetwork(filename) - options = Marabou.createOptions(verbosity = 0) - - input = np.array([1, -2]) - vals, stats, maxClass = network.evaluateLocalRobustness(input=input, epsilon=0.1, originalClass=0, options=options, targetClass=None) - - # should be not local robustness - assert(len(vals) > 0) - -def test_local_robustness_unsat_of_onnx(): - """ - Tests local robustness of an onnx network. (UNSAT) - """ - filename = "fc_2-2-3.onnx" - - network = loadNetworkInONNX(filename) - options = Marabou.createOptions(verbosity = 0) - - input = np.array([1, 0]) - vals, stats, maxClass = network.evaluateLocalRobustness(input=input, epsilon=0.1, originalClass=0, options=options, targetClass=None) - - # should be local robustness - assert(len(vals) == 0) - -def test_local_robustness_sat_of_onnx(): +def test_calculate_bounds(): """ - Tests local robustness of an onnx network. (SAT) + Tests calculate bounds of an onnx network """ filename = "fc_2-2-3.onnx" - - network = loadNetworkInONNX(filename) options = Marabou.createOptions(verbosity = 0) - input = np.array([1, 0]) - vals, stats, maxClass = network.evaluateLocalRobustness(input=input, epsilon=1.0, originalClass=0, options=options, targetClass=None) - - # should be not local robustness - assert(len(vals) > 0) - -def test_local_robustness_sat_with_target_class(): - """ - Tests local robustness of an onnx network. (SAT) - """ - filename = "fc_2-2-3.onnx" - + # Not UNSAT case network = loadNetworkInONNX(filename) - options = Marabou.createOptions(verbosity = 0) - - input = np.array([1, -2]) - targetClass = 1 - vals, stats, maxClass = network.evaluateLocalRobustness(input=input, epsilon=0.1, originalClass=0, options=options, targetClass=targetClass) + inputVars = network.inputVars[0][0] + network.setLowerBound(inputVars[0], 3) + network.setUpperBound(inputVars[0], 4) + network.setLowerBound(inputVars[1], -2) + network.setUpperBound(inputVars[1], -1) - # should be not local robustness - assert(len(vals) > 0) + # calculate bounds + exitCode, vals, _ = network.calculateBounds(options = options) - # maxClass should be equal to targetClass - assert(maxClass == targetClass) + # exitCode should be empty + assert(exitCode == '') -def test_local_robustness_sat_of_onnx_3D(): - """ - Tests local robustness of an onnx network which input's dimension is 3. (SAT) - """ - filename = "KJ_TinyTaxiNet.onnx" + # output bounds should be correct + assert(vals[network.outputVars[0][0][0]] == (2.0, 6.0)) + assert(vals[network.outputVars[0][0][1]] == (-3.0, -1.0)) + assert(vals[network.outputVars[0][0][2]] == (1.0, 3.0)) + # UNSAT case network = loadNetworkInONNX(filename) - options = Marabou.createOptions(verbosity = 0) + inputVars = network.inputVars[0][0] + outputVars = network.outputVars[0] + network.setLowerBound(inputVars[0], 3) + network.setUpperBound(inputVars[0], 4) + network.setLowerBound(inputVars[1], -2) + network.setUpperBound(inputVars[1], -1) + network.setUpperBound(outputVars[0][0], 1) - input = np.ones([8, 16, 1]) * 0.5 - vals, stats, maxClass = network.evaluateLocalRobustness(input=input, epsilon=1.0, originalClass=0, options=options, targetClass=None) + # calculate bounds + exitCode, vals, _ = network.calculateBounds(options = options) - # should be not local robustness - assert(len(vals) > 0) + # exitCode should be unsat + assert(exitCode == 'unsat') -def test_calculate_bounds(): +def test_calculate_bounds_no_opt(): """ Tests calculate bounds of an onnx network """ @@ -689,9 +637,9 @@ def test_calculate_bounds(): network.setUpperBound(inputVars[1], -1) # calculate bounds - exitCode, vals, _ = network.calculateBounds(options = options) + exitCode, vals, _ = network.calculateBounds() - # exitCode should be empty + # exitCode should be empty assert(exitCode == '') # output bounds should be correct @@ -710,9 +658,9 @@ def test_calculate_bounds(): network.setUpperBound(outputVars[0][0], 1) # calculate bounds - exitCode, vals, _ = network.calculateBounds(options = options) + exitCode, vals, _ = network.calculateBounds() - # exitCode should be unsat + # exitCode should be unsat assert(exitCode == 'unsat') def loadNetwork(filename): @@ -733,4 +681,3 @@ def evaluateNetwork(network, testInputs, testOutputs): for testInput, testOutput in zip(testInputs, testOutputs): marabouEval = network.evaluateWithMarabou([testInput], options = OPT, filename = "")[0].flatten() assert max(abs(marabouEval - testOutput)) < TOL - diff --git a/maraboupy/test/test_onnx.py b/maraboupy/test/test_onnx.py index 0087035aa8..e18745f54f 100644 --- a/maraboupy/test/test_onnx.py +++ b/maraboupy/test/test_onnx.py @@ -39,6 +39,10 @@ def test_split_onnx(): os.remove(presplit_filename) os.remove(postsplit_filename) +def test_sub(): + filename = "test_sub.onnx" + evaluateFile(filename) + def test_dropout(): filename = "test_dropout.onnx" evaluateFile(filename) diff --git a/maraboupy/test/test_tf.py b/maraboupy/test/test_tf.py deleted file mode 100644 index 053b83805f..0000000000 --- a/maraboupy/test/test_tf.py +++ /dev/null @@ -1,238 +0,0 @@ -# Supress warnings caused by tensorflow -import warnings -warnings.filterwarnings('ignore', category = DeprecationWarning) -warnings.filterwarnings('ignore', category = PendingDeprecationWarning) - -import pytest -from maraboupy import Marabou -import numpy as np -import os - -# Global settings -OPT = Marabou.createOptions(verbosity = 0) # Turn off printing -TOL = 1e-4 # Set tolerance for checking Marabou evaluations -FG_FOLDER = "../../resources/tf/frozen_graph/" # Folder for test networks written in frozen graph format -SM1_FOLDER = "../../resources/tf/saved_model_v1/" # Folder for test networks written in SavedModel format from tensorflow v1.X -SM2_FOLDER = "../../resources/tf/saved_model_v2/" # Folder for test networks written in SavedModel format from tensorflow v2.X -np.random.seed(123) # Seed random numbers for repeatability -NUM_RAND = 5 # Default number of random test points per example - -def test_fc1(): - """ - Test a fully-connected neural network - Uses Const, Identity, Placeholder, MutMul, Add, and Relu layers - """ - filename = os.path.join(os.path.dirname(__file__), FG_FOLDER, "fc1.pb") - network = Marabou.read_tf(filename) - evaluateNetwork(network) - -def test_fc2(): - """ - Test a fully-connected neural network. - This network tests different types of Mul and RealDiv layers. - """ - filename = os.path.join(os.path.dirname(__file__), FG_FOLDER, "fc2.pb") - network = Marabou.read_tf(filename, outputNames = ["y"]) - evaluateNetwork(network) - -def test_KJ_TinyTaxiNet(): - """ - Test a convolutional network without max-pooling - Uses Const, Identity, Placeholder, Conv2D, BiasAdd, Reshape, - MatMul, Add, and Relu layers - """ - filename = os.path.join(os.path.dirname(__file__), FG_FOLDER, "KJ_TinyTaxiNet.pb") - network = Marabou.read_tf(filename) - evaluateNetwork(network) - -def test_conv_mp1(): - """ - Test a convolutional network using max pool - Uses Const, Identity, Placeholder, Conv2D, Add, Relu, and MaxPool layers - """ - filename = os.path.join(os.path.dirname(__file__), FG_FOLDER, "conv_mp1.pb") - network = Marabou.read_tf(filename) - evaluateNetwork(network) - -def test_conv_mp2(): - """ - Test a convolutional network using max pool - This network tests different padding types for convolutional layers - """ - filename = os.path.join(os.path.dirname(__file__), FG_FOLDER, "conv_mp2.pb") - network = Marabou.read_tf(filename) - evaluateNetwork(network) - -def test_conv_mp3(): - """ - Test a convolutional network using max pool - This network tests SAME padding for max pool, as well as a convolutional network - where the first dimension is an integer value rather than None - """ - filename = os.path.join(os.path.dirname(__file__), FG_FOLDER, "conv_mp3.pb") - network = Marabou.read_tf(filename) - evaluateNetwork(network) - -def test_conv_NCHW(): - """ - Test a convolutional network using max pool - These networks are identical, except they use different data format conventions. - NHWC means tensors are written in the [Num, Height, Width, Channels] convention. - NCHW means tensors are written in the [Num, Channels, Height, Width] convention. - Only the default, NHWC, can be run in tensorflow on CPUs, so this tests the GPU NCHW - convention by comparing the outputs generated by Marabou to the NHWC outupt from tensorflow - """ - filename = os.path.join(os.path.dirname(__file__), FG_FOLDER, "conv_mp4.pb") - network_nhwc = Marabou.read_tf(filename) - filename = os.path.join(os.path.dirname(__file__), FG_FOLDER, "conv_mp5.pb") - network_nchw = Marabou.read_tf(filename) - - # Evaluate test points using both Marabou and Tensorflow, and assert that the max error is less than TOL - testInputs = [[np.random.random(inVars.shape) for inVars in network_nhwc.inputVars] for _ in range(5)] - for testInput in testInputs: - mar_nhwc = network_nhwc.evaluateWithMarabou(testInput, options = OPT, filename = "") - mar_nchw = network_nchw.evaluateWithMarabou(testInput, options = OPT, filename = "") - tf_nhwc = network_nhwc.evaluateWithoutMarabou(testInput) - - assert max(abs(mar_nhwc[0] - tf_nhwc[0]).flatten()) < TOL - assert max(abs(mar_nchw[0] - tf_nhwc[0]).flatten()) < TOL - -def test_sm1_fc1(): - """ - Test a fully-connected neural network, written in the - SavedModel format created by tensorflow version 1.X - """ - filename = os.path.join(os.path.dirname(__file__), SM1_FOLDER, "fc1") - network = Marabou.read_tf(filename, modelType = "savedModel_v1", outputNames = ["add_3"]) - evaluateNetwork(network) - -def test_sm2_fc1(): - """ - Test a fully-connected neural network, written in the - SavedModel format created by tensorflow version 2.X - """ - filename = os.path.join(os.path.dirname(__file__), SM2_FOLDER, "fc1") - network = Marabou.read_tf(filename, modelType = "savedModel_v2") - evaluateNetwork(network) - -def test_sm2_sign(): - """ - Test a fully-connected neural network with sign activations, written in the - SavedModel format created by tensorflow version 2.X - """ - filename = os.path.join(os.path.dirname(__file__), SM2_FOLDER, "signNetwork") - network = Marabou.read_tf(filename, modelType = "savedModel_v2") - evaluateNetwork(network) - -def test_sub_concat(): - """ - Test a fully-connected neural network - This network has two inputs (X0 and X1) that undergo independent operations before being concatenated - together (concat). The inputs are used again via addition and subtraction to create the output, Y. - This function tests different configurations of input and output variables, as well as a network where - the first dimension is an integer rather than None. - """ - filename = os.path.join(os.path.dirname(__file__), FG_FOLDER, "sub_concat.pb") - - # Test default, which should find both X0 and X1 for inputs - network = Marabou.read_tf(filename) - evaluateNetwork(network) - assert len(network.inputVars) == 2 - assert len(network.outputVars) == 1 - assert network.outputVars[0].shape == (5,2) - - # If an intermediate layer is used as the output, which depends on only one input variable, - # then only that input variable is used - network = Marabou.read_tf(filename, outputNames = ["Relu_2"]) - assert len(network.inputVars) == 1 - # All output variables come from a ReLU activation, so they should be a part of a PL constraint, - # and they should have a lower bound - assert np.all([network.lowerBoundExists(var) for var in network.outputVars[0].flatten()]) - evaluateNetwork(network) - # Evaluation does not add permanent upper/lower bound values to the network - for inputVars in network.inputVars: - assert not np.any([network.lowerBoundExists(var) for var in inputVars.flatten()]) - assert not np.any([network.upperBoundExists(var) for var in inputVars.flatten()]) - - # Test that the output of a MatMul operation can be used as output operation - network = Marabou.read_tf(filename, inputNames = ["X0"], outputNames = ["MatMul_2"]) - evaluateNetwork(network) - assert len(network.outputVars[0][1]) == 20 - - # Test that concatenation can be defined as output operation - network = Marabou.read_tf(filename, inputNames = ["X0", "X1"], outputNames = ["concat"]) - evaluateNetwork(network) - assert len(network.outputVars[0][1]) == 40 - - # An intermediate layer can be used as an input, which forces that layer to have the given values - # and ignore the equations used to create that intermediate layer - network = Marabou.read_tf(filename, inputNames = ["X0","X1","concat"], outputNames = ["Y"]) - evaluateNetwork(network) - -def test_sub_matmul(): - """ - Test a fully-connected neural network - This network tests a variety of ways that matmul and subtraction can be used - """ - filename = os.path.join(os.path.dirname(__file__), FG_FOLDER, "sub_matmul.pb") - network = Marabou.read_tf(filename) - evaluateNetwork(network) - -def test_errors(): - """ - Test that user errors of the parser can be caught and helpful information is given - """ - filename = os.path.join(os.path.dirname(__file__), FG_FOLDER, "sub_concat.pb") - # Bad modelType - with pytest.raises(RuntimeError, match=r"Unknown input to modelType"): - network = Marabou.read_tf(filename, modelType="badModelType") - - # Input name not found in graph - with pytest.raises(RuntimeError, match=r"input.*is not an operation"): - network = Marabou.read_tf(filename, inputNames = ["X123"], outputNames = ["MatMul_2"]) - - # Output name not found in graph - with pytest.raises(RuntimeError, match=r"output.*is not an operation"): - network = Marabou.read_tf(filename, inputNames = ["X0"], outputNames = ["MatMul_123"]) - - # Output also used as an input - with pytest.raises(RuntimeError, match=r"cannot be used as both input and output"): - network = Marabou.read_tf(filename, inputNames = ["Relu"], outputNames = ["Relu"]) - - # One of the inputs is not needed to compute the output - with pytest.raises(RuntimeError, match=r"not all inputs contributed to one of the outputs"): - network = Marabou.read_tf(filename, inputNames = ["X0","X1"], outputNames = ["MatMul_2"]) - - # There are missing inputs, which are needed to compute the output - with pytest.raises(RuntimeError, match=r"One of the outputs in.*depends on placeholder"): - network = Marabou.read_tf(filename, inputNames = ["concat"], outputNames = ["Relu"]) - - # Not enough input values are given for all input variables - with pytest.raises(RuntimeError, match=r"Bad input given"): - network = Marabou.read_tf(filename, inputNames = ["X0","X1"], outputNames = ["Y"]) - testInput = [np.random.random(inVars.shape) for inVars in network.inputVars] - testInput = testInput[1:] - network.evaluateWithoutMarabou(testInput) - - # Input values given have the wrong shape, and they cannot be reshaped to the correct shape - with pytest.raises(RuntimeError, match=r"Input.*should have shape"): - network = Marabou.read_tf(filename, inputNames = ["X0","X1"], outputNames = ["Y"]) - testInput = [np.random.random((2,) + inVars.shape) for inVars in network.inputVars] - network.evaluateWithoutMarabou(testInput) - -def evaluateNetwork(network, testInputs = None, numPoints = NUM_RAND): - """ - Evaluate a network at random testInputs with and without Marabou - Args: - network (MarabouNetwork): network loaded into Marabou to be evaluated - """ - # Create test points if none provided. This creates a list of test points. - # Each test point is itself a list, representing the values for each input array. - if not testInputs: - testInputs = [[np.random.random(inVars.shape) for inVars in network.inputVars] for _ in range(numPoints)] - - # Evaluate test points using both Marabou and Tensorflow, and assert that the max error is less than TOL - for testInput in testInputs: - err = network.findError(testInput, options = OPT, filename = "") - for i in range(len(err)): - assert max(err[i].flatten()) < TOL diff --git a/maraboupy/test_requirements.txt b/maraboupy/test_requirements.txt index 678ba20c49..007e182b10 100644 --- a/maraboupy/test_requirements.txt +++ b/maraboupy/test_requirements.txt @@ -4,6 +4,3 @@ onnx>=1.15.0,<2 onnxruntime>=1.12.0,<2 pytest>=7.2.1,<8 pytest-cov>=4.0.0,<5 -# 2023-07-12: -# Cannot use tensorflow 2.13 because it removes 'tensorflow.python.framework.graph_util.convert_variables_to_constants' -tensorflow>=2.10,<2.13 diff --git a/pyproject.toml b/pyproject.toml index 2b288ecfaa..22c87b7f72 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -19,10 +19,7 @@ test = [ "onnx>=1.15.0,<2", "onnxruntime>=1.12.0,<2", "pytest>=7.2.1,<8", - "pytest-cov>=4.0.0,<5", - # 2023-07-12: - # Cannot use tensorflow 2.13 because it removes 'tensorflow.python.framework.graph_util.convert_variables_to_constants' - "tensorflow>=2.10,<2.13", + "pytest-cov>=4.0.0,<5" ] [project.scripts] @@ -54,7 +51,9 @@ skip = [ # 2023-04-16: pybind11 does not support PyPy "pp*", # 2023-04-16: skip building musllinux wheels - "*musllinux*" + "*musllinux*", + # 2024-8-22: have to skip building macox arm64 + "cp38-macosx_arm64" ] before-test = ["python {package}/setup.py build_ext --inplace"] test-command = "pytest {package}/maraboupy/test" diff --git a/resources/onnx/test_sub.onnx b/resources/onnx/test_sub.onnx new file mode 100644 index 0000000000..3c72306c39 Binary files /dev/null and b/resources/onnx/test_sub.onnx differ diff --git a/resources/tf/frozen_graph/KJ_TinyTaxiNet.pb b/resources/tf/frozen_graph/KJ_TinyTaxiNet.pb deleted file mode 100644 index 216daf4c58..0000000000 Binary files a/resources/tf/frozen_graph/KJ_TinyTaxiNet.pb and /dev/null differ diff --git a/resources/tf/frozen_graph/conv_mp1.pb b/resources/tf/frozen_graph/conv_mp1.pb deleted file mode 100644 index fb0c700fa9..0000000000 Binary files a/resources/tf/frozen_graph/conv_mp1.pb and /dev/null differ diff --git a/resources/tf/frozen_graph/conv_mp2.pb b/resources/tf/frozen_graph/conv_mp2.pb deleted file mode 100644 index 356ca6ad10..0000000000 Binary files a/resources/tf/frozen_graph/conv_mp2.pb and /dev/null differ diff --git a/resources/tf/frozen_graph/conv_mp3.pb b/resources/tf/frozen_graph/conv_mp3.pb deleted file mode 100644 index 7b30356adf..0000000000 Binary files a/resources/tf/frozen_graph/conv_mp3.pb and /dev/null differ diff --git a/resources/tf/frozen_graph/conv_mp4.pb b/resources/tf/frozen_graph/conv_mp4.pb deleted file mode 100644 index d18f40b1d2..0000000000 Binary files a/resources/tf/frozen_graph/conv_mp4.pb and /dev/null differ diff --git a/resources/tf/frozen_graph/conv_mp5.pb b/resources/tf/frozen_graph/conv_mp5.pb deleted file mode 100644 index 9a6a99e826..0000000000 Binary files a/resources/tf/frozen_graph/conv_mp5.pb and /dev/null differ diff --git a/resources/tf/frozen_graph/fc1.pb b/resources/tf/frozen_graph/fc1.pb deleted file mode 100644 index 9a1c163d0c..0000000000 Binary files a/resources/tf/frozen_graph/fc1.pb and /dev/null differ diff --git a/resources/tf/frozen_graph/fc2.pb b/resources/tf/frozen_graph/fc2.pb deleted file mode 100644 index 2209be7975..0000000000 Binary files a/resources/tf/frozen_graph/fc2.pb and /dev/null differ diff --git a/resources/tf/frozen_graph/sub_concat.pb b/resources/tf/frozen_graph/sub_concat.pb deleted file mode 100644 index 7cdba55ac1..0000000000 Binary files a/resources/tf/frozen_graph/sub_concat.pb and /dev/null differ diff --git a/resources/tf/frozen_graph/sub_matmul.pb b/resources/tf/frozen_graph/sub_matmul.pb deleted file mode 100644 index 1547720965..0000000000 Binary files a/resources/tf/frozen_graph/sub_matmul.pb and /dev/null differ diff --git a/resources/tf/saved_model_v1/fc1/saved_model.pb b/resources/tf/saved_model_v1/fc1/saved_model.pb deleted file mode 100644 index 5d1f88e5cd..0000000000 Binary files a/resources/tf/saved_model_v1/fc1/saved_model.pb and /dev/null differ diff --git a/resources/tf/saved_model_v1/fc1/variables/variables.data-00000-of-00001 b/resources/tf/saved_model_v1/fc1/variables/variables.data-00000-of-00001 deleted file mode 100644 index cdc0535a74..0000000000 Binary files a/resources/tf/saved_model_v1/fc1/variables/variables.data-00000-of-00001 and /dev/null differ diff --git a/resources/tf/saved_model_v1/fc1/variables/variables.index b/resources/tf/saved_model_v1/fc1/variables/variables.index deleted file mode 100644 index 4e02c185cc..0000000000 Binary files a/resources/tf/saved_model_v1/fc1/variables/variables.index and /dev/null differ diff --git a/resources/tf/saved_model_v2/fc1/saved_model.pb b/resources/tf/saved_model_v2/fc1/saved_model.pb deleted file mode 100644 index 5ce1784756..0000000000 Binary files a/resources/tf/saved_model_v2/fc1/saved_model.pb and /dev/null differ diff --git a/resources/tf/saved_model_v2/fc1/variables/variables.data-00000-of-00001 b/resources/tf/saved_model_v2/fc1/variables/variables.data-00000-of-00001 deleted file mode 100644 index d5df63ca38..0000000000 Binary files a/resources/tf/saved_model_v2/fc1/variables/variables.data-00000-of-00001 and /dev/null differ diff --git a/resources/tf/saved_model_v2/fc1/variables/variables.index b/resources/tf/saved_model_v2/fc1/variables/variables.index deleted file mode 100644 index 9b35c5889e..0000000000 Binary files a/resources/tf/saved_model_v2/fc1/variables/variables.index and /dev/null differ diff --git a/resources/tf/saved_model_v2/signNetwork/saved_model.pb b/resources/tf/saved_model_v2/signNetwork/saved_model.pb deleted file mode 100644 index d3098aee8c..0000000000 Binary files a/resources/tf/saved_model_v2/signNetwork/saved_model.pb and /dev/null differ diff --git a/resources/tf/saved_model_v2/signNetwork/variables/variables.data-00000-of-00001 b/resources/tf/saved_model_v2/signNetwork/variables/variables.data-00000-of-00001 deleted file mode 100644 index a650f2932e..0000000000 Binary files a/resources/tf/saved_model_v2/signNetwork/variables/variables.data-00000-of-00001 and /dev/null differ diff --git a/resources/tf/saved_model_v2/signNetwork/variables/variables.index b/resources/tf/saved_model_v2/signNetwork/variables/variables.index deleted file mode 100644 index 2da30854c5..0000000000 Binary files a/resources/tf/saved_model_v2/signNetwork/variables/variables.index and /dev/null differ diff --git a/tools/download_boost.sh b/tools/download_boost.sh index f77d5fbd42..3f361a659a 100755 --- a/tools/download_boost.sh +++ b/tools/download_boost.sh @@ -13,6 +13,7 @@ wget -q https://sourceforge.net/projects/boost/files/boost/$version/boost_$under echo "Unzipping boost" tar xzvf boost-$version.tar.gz >> /dev/null + mv boost_$underscore_version boost-$version echo "Installing boost"