Skip to content

Commit

Permalink
Update TensorFlow to version 2.15.0
Browse files Browse the repository at this point in the history
  • Loading branch information
Dobiasd committed Jan 1, 2024
1 parent 6e673fb commit c0c060e
Show file tree
Hide file tree
Showing 4 changed files with 11 additions and 102 deletions.
2 changes: 1 addition & 1 deletion .github/workflows/ci.yml
Original file line number Diff line number Diff line change
Expand Up @@ -13,7 +13,7 @@ jobs:
sudo apt-get install libblas-dev liblapack-dev libatlas-base-dev gfortran
# python libs
sudo pip3 install --upgrade pip
sudo pip3 install numpy scipy h5py "tensorflow==2.13.0" "keras==2.13.1"
sudo pip3 install numpy scipy h5py "tensorflow==2.15.0"
echo "Version numbers of TensorFlow and Keras:"
python3 -c "import tensorflow as tf; import tensorflow.keras; print(tf.__version__)"
# FunctionalPlus
Expand Down
2 changes: 1 addition & 1 deletion README.md
Original file line number Diff line number Diff line change
Expand Up @@ -139,7 +139,7 @@ Requirements and Installation

- A **C++14**-compatible compiler: Compilers from these versions on are fine: GCC 4.9, Clang 3.7 (libc++ 3.7) and Visual C++ 2015
- Python 3.7 or higher
- TensorFlow 2.13.0 and Keras 2.13.1 (These are the tested versions, but somewhat older ones might work too.)
- TensorFlow 2.15.0 (These are the tested versions, but somewhat older ones might work too.)

Guides for different ways to install frugally-deep can be found in [`INSTALL.md`](INSTALL.md).

Expand Down
6 changes: 2 additions & 4 deletions test/Dockerfile
Original file line number Diff line number Diff line change
Expand Up @@ -20,21 +20,19 @@ RUN echo "deb [arch=amd64] https://storage.googleapis.com/bazel-apt stable jdk1.
RUN apt-get update && apt-get install -y bazel-5.1.1
RUN ln -s /usr/bin/bazel-5.1.1 /usr/bin/bazel

RUN git clone -b 'v2.13.0' --single-branch --depth 1 https://github.com/tensorflow/tensorflow.git
RUN git clone -b 'v2.15.0' --single-branch --depth 1 https://github.com/tensorflow/tensorflow.git
WORKDIR /tensorflow
RUN ./configure
RUN bazel build --jobs 3 --local_ram_resources=HOST_RAM*.3 -c opt //tensorflow/tools/pip_package:build_pip_package
RUN apt-get install -y patchelf
RUN ./bazel-bin/tensorflow/tools/pip_package/build_pip_package /tmp/tensorflow_pkg
RUN pip install /tmp/tensorflow_pkg/tensorflow-2.13.0-cp310-cp310-linux_x86_64.whl
RUN pip install /tmp/tensorflow_pkg/tensorflow-2.15.0-cp310-cp310-linux_x86_64.whl
WORKDIR /

RUN git clone -b 'v0.2.22' --single-branch --depth 1 https://github.com/Dobiasd/FunctionalPlus && cd FunctionalPlus && mkdir -p build && cd build && cmake .. && make && make install
RUN git clone -b '3.4.0' --single-branch --depth 1 https://gitlab.com/libeigen/eigen.git && cd eigen && mkdir -p build && cd build && cmake .. && make && make install && ln -s /usr/local/include/eigen3/Eigen /usr/local/include/Eigen
RUN git clone -b 'v3.10.5' --single-branch --depth 1 https://github.com/nlohmann/json && cd json && mkdir -p build && cd build && cmake -DJSON_BuildTests=OFF .. && make && make install

RUN pip install -U keras=="2.13.1" keras_applications

ADD include frugally-deep/include
ADD keras_export frugally-deep/keras_export
ADD test frugally-deep/test
Expand Down
103 changes: 7 additions & 96 deletions test/applications_performance.cpp
Original file line number Diff line number Diff line change
@@ -1,100 +1,11 @@
// Copyright 2016, Tobias Hermann.
// https://github.com/Dobiasd/frugally-deep
// Distributed under the MIT License.
// (See accompanying LICENSE file or at
// https://opensource.org/licenses/MIT)

/*
CUDA_VISIBLE_DEVICES='' python3.11 ../keras_export/generate_test_models.py exhaustive test_model_exhaustive.keras
CUDA_VISIBLE_DEVICES='' python3.11 ../keras_export/convert_model.py test_model_exhaustive.keras test_model_exhaustive.json
cat test_model_exhaustive.json | jq . > test_model_exhaustive.json.formatted.json
subl test_model_exhaustive.json.formatted.json
*/
#include "fdeep/fdeep.hpp"

int main()
{
std::vector<std::string> model_paths = {
// "convnextbase.json",
// "convnextlarge.json",
// "convnextsmall.json",
// "convnexttiny.json",
// "convnextxlarge.json",
"densenet121.json",
"densenet169.json",
"densenet201.json",
"efficientnetb0.json",
"efficientnetb1.json",
"efficientnetb2.json",
"efficientnetb3.json",
"efficientnetb4.json",
"efficientnetb5.json",
"efficientnetb6.json",
"efficientnetb7.json",
"efficientnetv2b0.json",
"efficientnetv2b1.json",
"efficientnetv2b2.json",
"efficientnetv2b3.json",
"efficientnetv2l.json",
"efficientnetv2m.json",
"efficientnetv2s.json",
// "inceptionresnetv2.json",
"inceptionv3.json",
"mobilenet.json",
"mobilenetv2.json",
"nasnetlarge.json",
"nasnetmobile.json",
"resnet101.json",
"resnet101v2.json",
"resnet152.json",
"resnet152v2.json",
"resnet50.json",
"resnet50v2.json",
"vgg16.json",
"vgg19.json",
"xception.json"
};

bool error = false;

for (const auto& model_path : model_paths)
{
std::cout << "----" << std::endl;
std::cout << model_path << std::endl;
#ifdef NDEBUG
try
{
const auto model = fdeep::load_model(model_path, true);
const std::size_t warm_up_runs = 3;
const std::size_t test_runs = 5;
for (std::size_t i = 0; i < warm_up_runs; ++i)
{
const double duration = model.test_speed();
std::cout << "Forward pass took "
<< duration << " s." << std::endl;
}
double duration_sum = 0;
std::cout << "Starting performance measurements." << std::endl;
for (std::size_t i = 0; i < test_runs; ++i)
{
const double duration = model.test_speed();
duration_sum += duration;
std::cout << "Forward pass took "
<< duration << " s." << std::endl;
}
const double duration_avg =
duration_sum / static_cast<double>(test_runs);
std::cout << "Forward pass took "
<< duration_avg << " s on average." << std::endl;
}
catch (const std::exception& e)
{
std::cerr << "ERROR: " << e.what() << std::endl;
error = true;
}
#else
const auto model = fdeep::load_model(model_path, true);
#endif
}

if (error)
{
std::cout << "There were errors." << std::endl;
return 1;
}
std::cout << "All imports and test OK." << std::endl;
fdeep::load_model("test_model_exhaustive.json");
}

0 comments on commit c0c060e

Please sign in to comment.