Skip to content

Commit

Permalink
Merge pull request #42 from cvjena/releasing_v2.0.0
Browse files Browse the repository at this point in the history
Pull Changes for v2.0.0
  • Loading branch information
Clemens-Alexander Brust committed Dec 13, 2015
2 parents 592c08b + e3e9736 commit 88821b2
Show file tree
Hide file tree
Showing 57 changed files with 3,584 additions and 416 deletions.
12 changes: 10 additions & 2 deletions .travis.yml
Original file line number Diff line number Diff line change
Expand Up @@ -19,7 +19,7 @@ install:
- if [ "$TRAVIS_OS_NAME" == "linux" ]; then if [ "$TEST_SUITE" = "opencl" ]; then mkdir -p cl12/CL; fi; fi
- if [ "$TRAVIS_OS_NAME" == "linux" ]; then if [ "$TEST_SUITE" = "opencl" ]; then cd cl12/CL; fi; fi
- if [ "$TRAVIS_OS_NAME" == "linux" ]; then if [ "$TEST_SUITE" = "opencl" ]; then wget https://www.khronos.org/registry/cl/api/1.2/cl.h; fi; fi
- if [ "$TRAVIS_OS_NAME" == "linux" ]; then if [ "$TEST_SUITE" = "opencl" ]; then wget https://www.khronos.org/registry/cl/api/1.2/cl.hpp; fi; fi
# - if [ "$TRAVIS_OS_NAME" == "linux" ]; then if [ "$TEST_SUITE" = "opencl" ]; then wget https://www.khronos.org/registry/cl/api/1.2/cl.hpp; fi; fi
- if [ "$TRAVIS_OS_NAME" == "linux" ]; then if [ "$TEST_SUITE" = "opencl" ]; then wget https://www.khronos.org/registry/cl/api/1.2/cl_d3d10.h; fi; fi
- if [ "$TRAVIS_OS_NAME" == "linux" ]; then if [ "$TEST_SUITE" = "opencl" ]; then wget https://www.khronos.org/registry/cl/api/1.2/cl_d3d11.h; fi; fi
- if [ "$TRAVIS_OS_NAME" == "linux" ]; then if [ "$TEST_SUITE" = "opencl" ]; then wget https://www.khronos.org/registry/cl/api/1.2/cl_dx9_media_sharing.h; fi; fi
Expand Down Expand Up @@ -49,7 +49,10 @@ before_script:
- if [ "$TEST_SUITE" = "reference" ]; then cmake -DCMAKE_BUILD_TYPE=Release ..; fi
- if [ "$TEST_SUITE" = "opencl" ]; then cmake -DCMAKE_BUILD_TYPE=Release -DCN24_BUILD_OPENCL:BOOL=ON -DCN24_BUILD_OPENCL_CLBLAS:BOOL=ON ..; fi

script: make
script:
- make
# OpenCL on Mac OS X supports a maximum work group size of (1,1,1) on CPUs, so we only do a sanity check in that case
- if [ "$TEST_SUITE" == "opencl" ] && [ "$TRAVIS_OS_NAME" == "osx" ]; then ./testOpenCL; else ./runBenchmark --ci;fi

os:
- linux
Expand All @@ -62,3 +65,8 @@ compiler:
env:
- TEST_SUITE=opencl
- TEST_SUITE=reference

matrix:
exclude:
- os: linux
env: TEST_SUITE=opencl
13 changes: 8 additions & 5 deletions CMakeLists.txt
Original file line number Diff line number Diff line change
@@ -1,7 +1,9 @@
cmake_minimum_required(VERSION 2.8)
project(CN24 C CXX)

set(CMAKE_BUILD_TYPE "Debug" CACHE STRING "Build type")
if(NOT CMAKE_BUILD_TYPE)
set(CMAKE_BUILD_TYPE "RelWithDebInfo" CACHE STRING "Build type: Select either Debug, RelWithDebInfo or Release" FORCE)
endif()

set(CMAKE_MODULE_PATH ${CMAKE_MODULE_PATH} "${CMAKE_SOURCE_DIR}/cmake/")

Expand Down Expand Up @@ -197,16 +199,17 @@ if(CN24_BUILD_ACCELERATE)
message(STATUS "Using Accelerate include directory: ${ACCELERATE_INCLUDE_DIR}")
include_directories(${ACCELERATE_INCLUDE_DIR})
set(CN24_LIBS ${CN24_LIBS} ${ACCELERATE_BLAS})
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -flax-vector-conversions")
add_definitions("-DBUILD_BLAS")
add_definitions("-DBLAS_ACCELERATE")
endif()

set(CN24_BUILD_OPENCL OFF CACHE BOOL "Build CN24 with OpenCL support")
if(CN24_BUILD_OPENCL)
find_library(OPENCL_ICDL NAMES OpenCL libOpenCL PATHS $ENV{CUDA_PATH}/lib/x64
$ENV{INTELOCLSDKROOT}/lib/x64)
$ENV{INTELOCLSDKROOT}/lib/x64 $ENV{AMDAPPSDKROOT}/lib/x86_64)
find_path(OPENCL_INCLUDE_DIR CL/cl.h cl.h PATHS $ENV{CUDA_PATH}/include
$ENV{INTELOCLSDKROOT}/include)
$ENV{INTELOCLSDKROOT}/include $ENV{AMDAPPSDKROOT}/include)
message(STATUS "Using OpenCL library: ${OPENCL_ICDL}")
message(STATUS "Using OpenCL include directory: ${OPENCL_INCLUDE_DIR}")
include_directories(${OPENCL_INCLUDE_DIR})
Expand All @@ -219,8 +222,8 @@ endif()
set(CN24_BUILD_OPENCL_CLBLAS OFF CACHE BOOL "Build CN24 with OpenCL/clBLAS")
if(CN24_BUILD_OPENCL_CLBLAS)
if(CN24_BUILD_OPENCL)
find_library(CLBLAS_LIBRARY clBLAS libclBLAS)
find_path(CLBLAS_INCLUDE_DIR clBLAS.h)
find_library(CLBLAS_LIBRARY clBLAS libclBLAS PATHS $ENV{AMDAPPSDKROOT}/lib64/import)
find_path(CLBLAS_INCLUDE_DIR clBLAS.h PATHS $ENV{AMDAPPSDKROOT}/include)
message(STATUS "Using OpenCL/clBLAS library: ${CLBLAS_LIBRARY}")
message(STATUS "Using OpenCL/clBLAS include directory: ${CLBLAS_INCLUDE_DIR}")
include_directories(${CLBLAS_INCLUDE_DIR})
Expand Down
11 changes: 11 additions & 0 deletions include/cn24.h
Original file line number Diff line number Diff line change
Expand Up @@ -17,14 +17,22 @@
#include "cn24/util/Config.h"
#include "cn24/util/Dataset.h"
#include "cn24/util/Tensor.h"
#include "cn24/util/CompressedTensor.h"
#include "cn24/util/TensorViewer.h"
#include "cn24/util/CombinedTensor.h"
#include "cn24/util/TensorStream.h"
#include "cn24/util/CompressedTensorStream.h"
#include "cn24/util/FloatTensorStream.h"
#include "cn24/util/PNGUtil.h"
#include "cn24/util/JPGUtil.h"
#include "cn24/util/Log.h"
#include "cn24/util/KITTIData.h"
#include "cn24/util/Init.h"
#include "cn24/util/GradientTester.h"
#include "cn24/util/StatAggregator.h"
#include "cn24/util/StatSink.h"
#include "cn24/util/ConsoleStatSink.h"
#include "cn24/util/CSVStatSink.h"

#include "cn24/math/TensorMath.h"

Expand All @@ -38,6 +46,7 @@
#include "cn24/net/ConvolutionLayer.h"
#include "cn24/net/MaxPoolingLayer.h"
#include "cn24/net/AdvancedMaxPoolingLayer.h"
#include "cn24/net/InputDownSamplingLayer.h"
#include "cn24/net/LocalResponseNormalizationLayer.h"
#include "cn24/net/UpscaleLayer.h"
#include "cn24/net/LossFunctionLayer.h"
Expand All @@ -48,11 +57,13 @@
#include "cn24/net/SpatialPriorLayer.h"
#include "cn24/net/ConcatenationLayer.h"
#include "cn24/net/GradientAccumulationLayer.h"
#include "cn24/net/SumLayer.h"
#include "cn24/net/Net.h"
#include "cn24/net/Trainer.h"
#include "cn24/net/NetGraph.h"
#include "cn24/net/NetStatus.h"

#include "cn24/factory/ConfigurableFactory.h"
#include "cn24/factory/SkipLayerNetworkFactory.h"

#endif
14 changes: 13 additions & 1 deletion include/cn24/factory/ConfigurableFactory.h
Original file line number Diff line number Diff line change
Expand Up @@ -24,8 +24,20 @@
#include "../util/Log.h"

namespace Conv {

class Factory {
public:
virtual int AddLayers(Net& net, Connection data_layer_connection, const unsigned int output_classes, bool add_loss_layer = false, std::ostream& graph_output = std::cout) = 0;
virtual bool AddLayers(NetGraph& graph, NetGraphConnection data_layer_connection, const unsigned int output_classes, bool add_loss_layer = false) = 0;
virtual int patchsizex() = 0;
virtual int patchsizey() = 0;
virtual Layer* CreateLossLayer(const unsigned int output_classes, const datum loss_weight = 1.0) = 0;
virtual void InitOptimalSettings() = 0;
virtual TrainerSettings optimal_settings() const = 0;
virtual Method method() const = 0;
};

class ConfigurableFactory {
class ConfigurableFactory : public Factory {
public:
/**
* @brief Builds a ConfigurableFactory using an input stream and a random seed
Expand Down
28 changes: 28 additions & 0 deletions include/cn24/factory/SkipLayerNetworkFactory.h
Original file line number Diff line number Diff line change
@@ -0,0 +1,28 @@
#ifndef CONV_SKIPLAYERNETWORKFACTORY_H
#define CONV_SKIPLAYERNETWORKFACTORY_H

#include <iostream>

#include "../net/Net.h"
#include "../net/NetGraph.h"
#include "../net/Trainer.h"
#include "../util/Dataset.h"
#include "../util/Log.h"
#include "ConfigurableFactory.h"

namespace Conv {

class SkipLayerNetworkFactory : public Factory {
int AddLayers(Net& net, Connection data_layer_connection, const unsigned int output_classes, bool add_loss_layer = false, std::ostream& graph_output = std::cout);
bool AddLayers(NetGraph& graph, NetGraphConnection data_layer_connection, const unsigned int output_classes, bool add_loss_layer = false);
int patchsizex();
int patchsizey();
Layer* CreateLossLayer(const unsigned int output_classes, const datum loss_weight = 1.0);
void InitOptimalSettings();
TrainerSettings optimal_settings() const;
Method method() const;
};

}

#endif
19 changes: 19 additions & 0 deletions include/cn24/math/TensorMath.h
Original file line number Diff line number Diff line change
Expand Up @@ -100,6 +100,25 @@ class TensorMath {
static void SMS2(
const Tensor& source,
Tensor& target);

static void DOWN(
const Tensor& source,
Tensor& target,
const int region_width,
const int region_height,
const datum target_factor);

static void UP(
const Tensor& source,
Tensor& target,
const int region_width,
const int region_height,
const datum target_factor);

static void ADD(
const Tensor& source_a,
const Tensor& source_b,
Tensor& target);
};

}
Expand Down
10 changes: 10 additions & 0 deletions include/cn24/net/BinaryStatLayer.h
Original file line number Diff line number Diff line change
Expand Up @@ -19,6 +19,7 @@

#include "Layer.h"
#include "StatLayer.h"
#include "../util/StatAggregator.h"

namespace Conv {

Expand All @@ -33,6 +34,8 @@ class BinaryStatLayer: public Layer, public StatLayer {
*/
BinaryStatLayer(unsigned int thresholds = 24, const datum min_t = -0.458333,
const datum max_t = 0.5);

void UpdateAll();

/**
* @brief Prints the current statistics
Expand Down Expand Up @@ -80,6 +83,13 @@ class BinaryStatLayer: public Layer, public StatLayer {
datum* false_negatives_ = nullptr;

bool disabled_ = false;

StatDescriptor* stat_fpr_ = nullptr;
StatDescriptor* stat_fnr_ = nullptr;
StatDescriptor* stat_pre_ = nullptr;
StatDescriptor* stat_rec_ = nullptr;
StatDescriptor* stat_acc_ = nullptr;
StatDescriptor* stat_f1_ = nullptr;
};

}
Expand Down
12 changes: 10 additions & 2 deletions include/cn24/net/ConfusionMatrixLayer.h
Original file line number Diff line number Diff line change
Expand Up @@ -19,7 +19,9 @@
#include <vector>

#include "Layer.h"
#include "StatLayer.h"
#include "StatLayer.h"

#include "../util/StatAggregator.h"

namespace Conv {

Expand All @@ -34,10 +36,11 @@ class ConfusionMatrixLayer: public Layer, public StatLayer {
explicit ConfusionMatrixLayer(std::vector<std::string> names,
const unsigned int classes);

void UpdateAll();
/**
* @brief Prints the current statistics
*
* @param prefix This is printed before every line ouf output
* @param prefix This is printed before every line of output
* @param training Whether the net is currently training. Affects output color
*/
void Print (std::string prefix, bool training);
Expand Down Expand Up @@ -80,6 +83,11 @@ class ConfusionMatrixLayer: public Layer, public StatLayer {
long double total_ = 0;
long double right_ = 0;
long double* per_class_ = nullptr;

StatDescriptor* stat_orr_ = nullptr;
StatDescriptor* stat_arr_ = nullptr;
StatDescriptor* stat_iou_ = nullptr;

};

}
Expand Down
70 changes: 70 additions & 0 deletions include/cn24/net/InputDownSamplingLayer.h
Original file line number Diff line number Diff line change
@@ -0,0 +1,70 @@
/*
* This file is part of the CN24 semantic segmentation software,
* copyright (C) 2015 Clemens-Alexander Brust (ikosa dot de at gmail dot com).
*
* For licensing information, see the LICENSE file included with this project.
*/
/**
* @file InputDownSamplingLayer.h
* @class InputDownSamplingLayer
* @brief Layer that scales input down
*
* @author Clemens-Alexander Brust (ikosa dot de at gmail dot com)
*/

#ifndef CONV_INPUTDOWNSAMPLINGLAYER_H
#define CONV_INPUTDOWNSAMPLINGLAYER_H

#include <string>
#include <sstream>

#include "SimpleLayer.h"


namespace Conv {

class InputDownSamplingLayer : public SimpleLayer {
public:
/**
* @brief Constructs a max-pooling Layer.
*
* @param region_width Width of the pooling regions
* @param region_height Height of the pooling regions
*/
InputDownSamplingLayer(const unsigned int region_width,
const unsigned int region_height);

// Implementations for SimpleLayer
bool CreateOutputs (const std::vector< CombinedTensor* >& inputs, std::vector< CombinedTensor* >& outputs);
bool Connect (const CombinedTensor* input, CombinedTensor* output);
void FeedForward();
void BackPropagate();

inline unsigned int Gain() {
return gain / (region_width_ * region_height_);
}

inline std::string GetLayerDescription() {
std::ostringstream ss;
ss << "Input Down-Sampling Layer (" << region_width_ << "x" << region_height_ << ")";
return ss.str();
}

bool IsOpenCLAware();
private:
// Settings
unsigned int region_width_ = 0;
unsigned int region_height_ = 0;

// Feature map dimensions
unsigned int input_width_ = 0;
unsigned int input_height_ = 0;
unsigned int output_width_ = 0;
unsigned int output_height_ = 0;

unsigned int maps_ = 0;
};

}

#endif
8 changes: 8 additions & 0 deletions include/cn24/net/NetGraph.h
Original file line number Diff line number Diff line change
Expand Up @@ -20,6 +20,8 @@
#include "NetStatus.h"
#include "../util/TensorViewer.h"

#include "StatLayer.h"

#include <vector>

namespace Conv {
Expand Down Expand Up @@ -106,6 +108,12 @@ class NetGraph : public NetStatus {
// Output
void PrintGraph(std::ostream& graph_output);
void SetLayerViewEnabled(bool enabled) { layerview_enabled_ = enabled; }
void SetStatLayersEnabled(bool enabled) {
for (unsigned int n = 0; n < GetStatNodes().size(); n++) {
StatLayer* stat_layer = dynamic_cast<StatLayer*>(GetStatNodes()[n]->layer);
stat_layer->SetDisabled(!enabled);
}
}
datum AggregateLoss();

// Status
Expand Down
8 changes: 7 additions & 1 deletion include/cn24/net/NetStatus.h
Original file line number Diff line number Diff line change
Expand Up @@ -15,6 +15,9 @@
#ifndef CONV_NETSTATUS_H
#define CONV_NETSTATUS_H

#include "../util/Init.h"
#include "../util/StatAggregator.h"

namespace Conv {

class NetStatus{
Expand All @@ -29,7 +32,10 @@ class NetStatus{
*
* @param is_testing The new testing status
*/
inline void SetIsTesting(bool is_testing) { is_testing_ = is_testing; }
inline void SetIsTesting(bool is_testing) {
is_testing_ = is_testing;
System::stat_aggregator->hardcoded_stats_.is_training = !is_testing;
}
private:
bool is_testing_ = false;
};
Expand Down
1 change: 1 addition & 0 deletions include/cn24/net/StatLayer.h
Original file line number Diff line number Diff line change
Expand Up @@ -23,6 +23,7 @@ namespace Conv {

class StatLayer {
public:
virtual void UpdateAll() = 0;
virtual void Print(std::string prefix, bool training) = 0;
virtual void Reset() = 0;
virtual void SetDisabled(bool disabled) = 0;
Expand Down
Loading

0 comments on commit 88821b2

Please sign in to comment.