Skip to content

Commit

Permalink
Enabled ExternalTensorFlow with multiple models.
Browse files Browse the repository at this point in the history
  • Loading branch information
friedenhe committed Jun 15, 2024
1 parent 46f131c commit d1a8a13
Show file tree
Hide file tree
Showing 9 changed files with 161 additions and 37 deletions.
50 changes: 32 additions & 18 deletions dafoam/pyDAFoam.py
Original file line number Diff line number Diff line change
Expand Up @@ -743,10 +743,9 @@ def __init__(self):
## tensorflow related functions
self.tensorflow = {
"active": False,
"modelName": "model",
"nInputs": 1,
"nOutputs": 1,
"batchSize": 1000,
"model1": {
"batchSize": 1000
}
}


Expand Down Expand Up @@ -928,9 +927,9 @@ def __init__(self, comm=None, options=None):
TensorFlowHelper.options = self.getOption("tensorflow")
TensorFlowHelper.initialize()
# pass this helper function to the C++ layer
self.solver.initTensorFlowFuncs(TensorFlowHelper.predict, TensorFlowHelper.calcJacVecProd)
self.solver.initTensorFlowFuncs(TensorFlowHelper.predict, TensorFlowHelper.calcJacVecProd, TensorFlowHelper.setModelName)
if self.getOption("useAD")["mode"] in ["forward", "reverse"]:
self.solverAD.initTensorFlowFuncs(TensorFlowHelper.predict, TensorFlowHelper.calcJacVecProd)
self.solverAD.initTensorFlowFuncs(TensorFlowHelper.predict, TensorFlowHelper.calcJacVecProd, TensorFlowHelper.setModelName)

Info("pyDAFoam initialization done!")

Expand Down Expand Up @@ -4491,31 +4490,42 @@ class TensorFlowHelper:

options = {}

model = None
model = {}

modelName = None

@staticmethod
def initialize():
"""
Initialize parameters and load models
"""
Info("Initializing the TensorFlowHelper")
TensorFlowHelper.modelName = TensorFlowHelper.options["modelName"]
TensorFlowHelper.nInputs = TensorFlowHelper.options["nInputs"]
TensorFlowHelper.nOutputs = TensorFlowHelper.options["nOutputs"]
TensorFlowHelper.batchSize = TensorFlowHelper.options["batchSize"]
TensorFlowHelper.model = tf.keras.models.load_model(TensorFlowHelper.modelName)
for key in list(TensorFlowHelper.options.keys()):
if key != "active":
modelName = key
TensorFlowHelper.predictBatchSize[modelName] = TensorFlowHelper.options[modelName]["predictBatchSize"]
TensorFlowHelper.model[modelName] = tf.keras.models.load_model(modelName)

if TensorFlowHelper.nOutputs != 1:
raise Error("current version supports nOutputs=1 only!")
@staticmethod
def setModelName(modelName):
"""
Set the model name from the C++ to Python layer
"""
TensorFlowHelper.modelName = modelName

@staticmethod
def predict(inputs, n, outputs, m):
"""
Calculate the outputs based on the inputs using the saved model
"""

inputs_tf = np.reshape(inputs, (-1, TensorFlowHelper.nInputs))
outputs_tf = TensorFlowHelper.model.predict(inputs_tf, verbose=False, batch_size=TensorFlowHelper.batchSize)
nInputs = int(n / m)

modelName = TensorFlowHelper.modelName

inputs_tf = np.reshape(inputs, (-1, nInputs))
batchSize = TensorFlowHelper.predictBatchSize[modelName]
outputs_tf = TensorFlowHelper.model[modelName].predict(inputs_tf, verbose=False, batch_size=batchSize)

for i in range(m):
outputs[i] = outputs_tf[i, 0]
Expand All @@ -4526,11 +4536,15 @@ def calcJacVecProd(inputs, inputs_b, n, outputs, outputs_b, m):
Calculate the gradients of the outputs wrt the inputs
"""

inputs_tf = np.reshape(inputs, (-1, TensorFlowHelper.nInputs))
nInputs = int(n / m)

modelName = TensorFlowHelper.modelName

inputs_tf = np.reshape(inputs, (-1, nInputs))
inputs_tf_var = tf.Variable(inputs_tf, dtype=tf.float32)

with tf.GradientTape() as tape:
outputs_tf = TensorFlowHelper.model(inputs_tf_var)
outputs_tf = TensorFlowHelper.model[modelName](inputs_tf_var)

gradients_tf = tape.gradient(outputs_tf, inputs_tf_var)

Expand Down
85 changes: 80 additions & 5 deletions src/adjoint/DARegression/DARegression.C
Original file line number Diff line number Diff line change
Expand Up @@ -89,8 +89,13 @@ DARegression::DARegression(
}
else if (modelType_[modelName] == "externalTensorFlow")
{
featuresFlattenArray_ = new scalar[mesh_.nCells() * inputNames_[modelName].size()];
outputFieldArray_ = new scalar[mesh_.nCells()];
useExternalModel_ = 1;
// here the array size is chosen based on the regModel that has the largest number of inputs
// this is important because we support multiple regModels but we don't want to create multiple featuresFlattenArray_
if (inputNames_[modelName].size() * mesh_.nCells() > featuresFlattenArraySize_)
{
featuresFlattenArraySize_ = inputNames_[modelName].size() * mesh_.nCells();
}
}
else
{
Expand Down Expand Up @@ -128,6 +133,19 @@ DARegression::DARegression(
}
features_.set(modelName, features);
}

// if external model is used, initialize space for the features and output arrays
if (useExternalModel_)
{

#if defined(CODI_AD_FORWARD)
featuresFlattenArrayDouble_ = new double[featuresFlattenArraySize_];
outputFieldArrayDouble_ = new double[mesh_.nCells()];
#else
featuresFlattenArray_ = new scalar[featuresFlattenArraySize_];
outputFieldArray_ = new scalar[mesh_.nCells()];
#endif
}
}
}

Expand Down Expand Up @@ -480,10 +498,31 @@ label DARegression::compute()
else if (modelType_[modelName] == "externalTensorFlow")
{
label nInputs = inputNames_[modelName].size();

DAUtility::pySetModelNameInterface(modelName.c_str(), DAUtility::pySetModelName);

// NOTE: forward mode not supported..
#if defined(CODI_AD_REVERSE)
// we need to use the external function helper from CoDiPack to propagate the AD

// assign features_ to featuresFlattenArray_
label counterI = 0;
// loop over all features
forAll(features_[modelName], idxI)
{
// loop over all cells
forAll(features_[modelName][idxI], cellI)
{
featuresFlattenArray_[counterI] = features_[modelName][idxI][cellI];
counterI++;
}
}
// assign outputField to outputFieldArray_
forAll(outputField, cellI)
{
outputFieldArray_[cellI] = outputField[cellI];
}

// we need to use the external function helper from CoDiPack to propagate the AD
codi::ExternalFunctionHelper<codi::RealReverse> externalFunc;
for (label i = 0; i < mesh_.nCells() * nInputs; i++)
{
Expand All @@ -510,8 +549,35 @@ label DARegression::compute()
}

#elif defined(CODI_AD_FORWARD)
// do nothing
// assign features_ to featuresFlattenArrayDouble_
label counterI = 0;
// loop over all features
forAll(features_[modelName], idxI)
{
// loop over all cells
forAll(features_[modelName][idxI], cellI)
{
featuresFlattenArrayDouble_[counterI] = features_[modelName][idxI][cellI].value();
counterI++;
}
}
// assign outputField to outputFieldArrayDouble_
forAll(outputField, cellI)
{
outputFieldArrayDouble_[cellI] = outputField[cellI].value();
}

// python callback function
DAUtility::pyCalcBetaInterface(
featuresFlattenArrayDouble_, mesh_.nCells() * nInputs, outputFieldArrayDouble_, mesh_.nCells(), DAUtility::pyCalcBeta);

forAll(outputField, cellI)
{
outputField[cellI] = outputFieldArrayDouble_[cellI];
}

#else
// assign features_ to featuresFlattenArray_
label counterI = 0;
// loop over all features
forAll(features_[modelName], idxI)
Expand All @@ -523,6 +589,11 @@ label DARegression::compute()
counterI++;
}
}
// assign outputField to outputFieldArray_
forAll(outputField, cellI)
{
outputFieldArray_[cellI] = outputField[cellI];
}

// python callback function
DAUtility::pyCalcBetaInterface(
Expand Down Expand Up @@ -591,9 +662,13 @@ label DARegression::nParameters(word modelName)

return nParameters;
}
else if (modelType_[modelName] == "externalTensorFlow")
{
// do nothing
}
else
{
FatalErrorIn("") << "modelType_: " << modelType_[modelName] << " not supported. Options are: neuralNetwork and radialBasisFunction" << abort(FatalError);
FatalErrorIn("") << "modelType_: " << modelType_[modelName] << " not supported. Options are: neuralNetwork, radialBasisFunction, and externalTensorFlow" << abort(FatalError);
}
}

Expand Down
25 changes: 23 additions & 2 deletions src/adjoint/DARegression/DARegression.H
Original file line number Diff line number Diff line change
Expand Up @@ -105,11 +105,24 @@ protected:
/// a list of scalarField to save the input features
HashTable<PtrList<volScalarField>> features_;

#if defined(CODI_AD_FORWARD)
/// a flatten double feature array for externalTensorFlow model (forward AD)
double* featuresFlattenArrayDouble_;

/// output field double array for externalTensorFlow model (forward AD)
double* outputFieldArrayDouble_;
#else
/// a flatten feature array for externalTensorFlow model
scalar* featuresFlattenArray_;

/// output field array for externalTensorFlow model
scalar* outputFieldArray_;
#endif
/// whether to use external model
label useExternalModel_ = 0;

/// the array size is chosen based on the regModel that has the largest number of inputs (this is important because we support multiple regModels but we don't want to create multiple featuresFlattenArray_)
label featuresFlattenArraySize_ = -100;

/// whether to write the feature fields to the disk
HashTable<label> writeFeatures_;
Expand All @@ -124,8 +137,16 @@ public:
/// Destructor
virtual ~DARegression()
{
delete[] featuresFlattenArray_;
delete[] outputFieldArray_;
if (useExternalModel_)
{
#if defined(CODI_AD_FORWARD)
delete[] featuresFlattenArrayDouble_;
delete[] outputFieldArrayDouble_;
#else
delete[] featuresFlattenArray_;
delete[] outputFieldArray_;
#endif
}
}

// Members
Expand Down
3 changes: 3 additions & 0 deletions src/adjoint/DASolver/DASolver.C
Original file line number Diff line number Diff line change
Expand Up @@ -19,6 +19,9 @@ pyComputeInterface Foam::DAUtility::pyCalcBetaInterface = NULL;
void* Foam::DAUtility::pyCalcBetaJacVecProd = NULL;
pyJacVecProdInterface Foam::DAUtility::pyCalcBetaJacVecProdInterface = NULL;

void* Foam::DAUtility::pySetModelName = NULL;
pySetCharInterface Foam::DAUtility::pySetModelNameInterface = NULL;

scalar Foam::DAUtility::primalMaxInitRes_ = -1e16;

namespace Foam
Expand Down
7 changes: 6 additions & 1 deletion src/adjoint/DASolver/DASolver.H
Original file line number Diff line number Diff line change
Expand Up @@ -1244,13 +1244,18 @@ public:
pyComputeInterface computeInterface,
void* compute,
pyJacVecProdInterface jacVecProdInterface,
void* jacVecProd)
void* jacVecProd,
pySetCharInterface setModelNameInterface,
void* setModelName)
{
DAUtility::pyCalcBetaInterface = computeInterface;
DAUtility::pyCalcBeta = compute;

DAUtility::pyCalcBetaJacVecProdInterface = jacVecProdInterface;
DAUtility::pyCalcBetaJacVecProd = jacVecProd;

DAUtility::pySetModelNameInterface = setModelNameInterface;
DAUtility::pySetModelName = setModelName;
}

/// write state variables that are NO_WRITE to disk
Expand Down
4 changes: 4 additions & 0 deletions src/adjoint/DAUtility/DAUtility.H
Original file line number Diff line number Diff line change
Expand Up @@ -26,6 +26,7 @@ namespace Foam

typedef void (*pyComputeInterface)(const double*, int, double*, int, void*);
typedef void (*pyJacVecProdInterface)(const double*, double*, int, const double*, const double*, int, void*);
typedef void (*pySetCharInterface)(const char*, void*);

/*---------------------------------------------------------------------------*\
Class DAUtility Declaration
Expand Down Expand Up @@ -132,6 +133,9 @@ public:
static void* pyCalcBetaJacVecProd;
static pyJacVecProdInterface pyCalcBetaJacVecProdInterface;

static void* pySetModelName;
static pySetCharInterface pySetModelNameInterface;

/// the max initial residual norms for the primal solution for each state
static scalar primalMaxInitRes_;

Expand Down
7 changes: 5 additions & 2 deletions src/pyDASolvers/DASolvers.H
Original file line number Diff line number Diff line change
Expand Up @@ -983,9 +983,12 @@ public:
pyComputeInterface computeInterface,
void* compute,
pyJacVecProdInterface jacVecProdInterface,
void* jacVecProd)
void* jacVecProd,
pySetCharInterface setModelNameInterface,
void* setModelName)
{
DASolverPtr_->initTensorFlowFuncs(computeInterface, compute, jacVecProdInterface, jacVecProd);
DASolverPtr_->initTensorFlowFuncs(
computeInterface, compute, jacVecProdInterface, jacVecProd, setModelNameInterface, setModelName);
}

/// write the sensitivity map for all wall surfaces
Expand Down
10 changes: 7 additions & 3 deletions src/pyDASolvers/pyDASolvers.pyx
Original file line number Diff line number Diff line change
Expand Up @@ -24,6 +24,7 @@ cdef public api CPointerToPyArray(const double* data, int size) with gil:

ctypedef void (*pyComputeInterface)(const double *, int, double *, int, void *)
ctypedef void (*pyJacVecProdInterface)(const double *, double *, int, const double *, const double *, int, void *)
ctypedef void (*pySetCharInterface)(const char *, void *)

cdef void pyCalcBetaCallBack(const double* inputs, int n, double* outputs, int m, void *func):
inputs_data = CPointerToPyArray(inputs, n)
Expand All @@ -37,6 +38,9 @@ cdef void pyCalcBetaJacVecProdCallBack(const double* inputs, double* inputs_b, i
outputs_b_data = CPointerToPyArray(outputs_b, m)
(<object>func)(inputs_data, inputs_b_data, n, outputs_data, outputs_b_data, m)

cdef void pySetModelNameCallBack(const char* modelName, void *func):
(<object>func)(modelName)

# declare cpp functions
cdef extern from "DASolvers.H" namespace "Foam":
cppclass DASolvers:
Expand Down Expand Up @@ -141,7 +145,7 @@ cdef extern from "DASolvers.H" namespace "Foam":
void calcdForceProfiledXvWAD(char *, char *, char *, PetscVec, PetscVec, PetscVec, PetscVec)
void calcdForcedStateTPsiAD(char *, PetscVec, PetscVec, PetscVec, PetscVec)
int runFPAdj(PetscVec, PetscVec, PetscVec, PetscVec)
void initTensorFlowFuncs(pyComputeInterface, void *, pyJacVecProdInterface, void *)
void initTensorFlowFuncs(pyComputeInterface, void *, pyJacVecProdInterface, void *, pySetCharInterface, void *)
void readStateVars(double, int)
void calcPCMatWithFvMatrix(PetscMat)
double getEndTime()
Expand Down Expand Up @@ -654,8 +658,8 @@ cdef class pyDASolvers:
def runFPAdj(self, Vec xvVec, Vec wVec, Vec dFdW, Vec psi):
return self._thisptr.runFPAdj(xvVec.vec, wVec.vec, dFdW.vec, psi.vec)

def initTensorFlowFuncs(self, compute, jacVecProd):
self._thisptr.initTensorFlowFuncs(pyCalcBetaCallBack, <void*>compute, pyCalcBetaJacVecProdCallBack, <void*>jacVecProd)
def initTensorFlowFuncs(self, compute, jacVecProd, setModelName):
self._thisptr.initTensorFlowFuncs(pyCalcBetaCallBack, <void*>compute, pyCalcBetaJacVecProdCallBack, <void*>jacVecProd, pySetModelNameCallBack, <void*>setModelName)

def writeSensMapSurface(self,
name,
Expand Down
7 changes: 1 addition & 6 deletions tests/runTests_DASimpleFoamkOmegaSSTFIML.py
Original file line number Diff line number Diff line change
Expand Up @@ -39,12 +39,7 @@
"solverName": "DASimpleFoam",
"primalMinResTol": 5e-5,
"primalMinResTolDiff": 1e4,
"tensorflow": {
"active": True,
"modelName": "model",
"nInputs": 9,
"nOutputs": 1,
},
"tensorflow": {"active": True, "model": {"predictBatchSize": 100}},
"primalBC": {
"U0": {"variable": "U", "patches": ["inout"], "value": [U0, 0.0, 0.0]},
"p0": {"variable": "p", "patches": ["inout"], "value": [p0]},
Expand Down

0 comments on commit d1a8a13

Please sign in to comment.