Skip to content

Commit

Permalink
Merge pull request #30 from cvjena/master
Browse files Browse the repository at this point in the history
Pull changes for v1.3.0
  • Loading branch information
Clemens-Alexander Brust committed Sep 18, 2015
2 parents 538de59 + 9477752 commit 3291b2f
Show file tree
Hide file tree
Showing 6 changed files with 24 additions and 17 deletions.
3 changes: 3 additions & 0 deletions CONTRIBUTORS
Original file line number Diff line number Diff line change
Expand Up @@ -2,6 +2,9 @@ Contributors from the Computer Vision Group of the Friedrich Schiller University
Dipl.-Inf. Sven Sickert
Marcel Simon M.Sc.
Dr. rer. nat. Erik Rodner

Contributors from GitHub:
lolz0r (RGB regression)

FindSQLite3.CMake (New BSD License):
Andreas Schneider
Expand Down
23 changes: 13 additions & 10 deletions example/labelmefacade.net
Original file line number Diff line number Diff line change
Expand Up @@ -2,13 +2,13 @@

# Network configuration
?convolutional kernels=16 size=7x7
?maxpooling size=2x2
?amaxpooling size=2x2 stride=2x2
?tanh

?convolutional size=5x5 kernels=12
?convolutional kernels=12 size=5x5
?tanh

?convolutional size=5x5 kernels=64
?convolutional kernels=96 size=5x5
?tanh

?fullyconnected neurons=192
Expand All @@ -18,12 +18,15 @@
?output

# Learning settings
l1=0.001
l2=0.0005
lr=0.0001
gamma=0.003
method=patch
l1=0.000
l2=0.0008
lr=0.02
gamma=0.00003
momentum=0.9
exponent=0.75
iterations=100
sbatchsize=10
pbatchsize=2
iterations=10000
sbatchsize=1
pbatchsize=96
mu=1.75
eta=0.1
Binary file modified example/lmf_pretrained.Tensor
Binary file not shown.
10 changes: 5 additions & 5 deletions src/math/TensorMath.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -194,7 +194,7 @@ void TensorMath::IM2COL(const Tensor& source, const int source_width, const int
FATAL("Error setting kernel args: " << (signed int) error);
}

size_t global_work_size[] = {target_width * target_height, target_maps, samples};
size_t global_work_size[] = {(size_t)(target_width * target_height), (size_t)target_maps, (size_t)samples};

error = clEnqueueNDRangeKernel (CLHelper::queue, CLHelper::k_im2col, 3, NULL,
global_work_size, NULL, 0, NULL, NULL);
Expand Down Expand Up @@ -294,7 +294,7 @@ void TensorMath::COL2IM(Tensor& source, const int source_width, const int source
FATAL("Error setting kernel args: " << (signed int) error);
}

size_t global_work_size[] = {source_width * source_height, maps, samples};
size_t global_work_size[] = {(size_t)(source_width * source_height), (size_t)maps, (size_t)samples};

error = clEnqueueNDRangeKernel (CLHelper::queue, CLHelper::k_col2im, 3, NULL,
global_work_size, NULL, 0, NULL, NULL);
Expand Down Expand Up @@ -365,7 +365,7 @@ void TensorMath::SETSAMPLE(Tensor& A, const int smA, const datum value)
FATAL("Error setting kernel args: " << (signed int) error);
}

size_t global_work_size[] = {smA == -1 ? A.elements() : A.width() * A.height() * A.samples()};
size_t global_work_size[] = {(size_t)(smA == -1 ? A.elements() : A.width() * A.height() * A.samples())};

error = clEnqueueNDRangeKernel (CLHelper::queue, CLHelper::k_setValue, 1, NULL,
global_work_size, NULL, 0, NULL, NULL);
Expand Down Expand Up @@ -416,7 +416,7 @@ void TensorMath::SMS(const Tensor& source, Tensor& target)
FATAL("Error setting kernel args: " << (signed int) error);
}

size_t global_work_size[] = {target.elements()};
size_t global_work_size[] = {(size_t)target.elements()};

error = clEnqueueNDRangeKernel (CLHelper::queue, CLHelper::k_sms, 1, NULL,
global_work_size, NULL, 0, NULL, NULL);
Expand Down Expand Up @@ -451,4 +451,4 @@ void TensorMath::SMS(const Tensor& source, Tensor& target)
target.hint_ignore_content_ = false;
}

}
}
4 changes: 2 additions & 2 deletions src/net/Trainer.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -153,8 +153,8 @@ void Trainer::Epoch() {
for (NetGraphNode* training_node : graph_.GetTrainingNodes())
(dynamic_cast<TrainingLayer*>(training_node->layer))->SetTestingMode(false);

LOGDEBUG << "Epoch: " << epoch_ << ", it: " << iterations <<
", bsize: " << first_training_layer_->GetBatchSize() * settings_.sbatchsize << ", lr0: " <<
LOGINFO << "Epoch: " << epoch_ << ", it: " << iterations <<
", bsize: " << first_training_layer_->GetBatchSize() * settings_.sbatchsize << ", current lr: " <<
CalculateLR (epoch_ * iterations) << std::endl;

auto t_begin = std::chrono::system_clock::now();
Expand Down
1 change: 1 addition & 0 deletions tools/trainNetwork.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -98,6 +98,7 @@ int main (int argc, char* argv[]) {
settings.testing_ratio = 1 * it_factor;

// Load dataset
LOGINFO << "Loading dataset, this can take a long time depending on the size!" << std::flush;
Conv::Dataset* dataset = nullptr;

if (patchwise_training) {
Expand Down

0 comments on commit 3291b2f

Please sign in to comment.