Skip to content

Commit 4147ea0

Browse files
Updated Caffe
1 parent 97c5c2c commit 4147ea0

File tree

15 files changed

+452
-231
lines changed

15 files changed

+452
-231
lines changed

3rdparty/Versions.txt

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,6 @@
11
Unix:
22
- Caffe:
3-
- Version 1.0.0, extracted from GitHub on 09/30/2017 from the current master branch.
3+
- Version 1.0.0, extracted from GitHub on 11/03/2017 from the current master branch.
44
- Link: https://github.com/BVLC/caffe
55

66
Windows:

3rdparty/caffe/Makefile

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -699,6 +699,6 @@ $(DISTRIBUTE_DIR): all py | $(DISTRIBUTE_SUBDIRS)
699699
install -m 644 $(DYNAMIC_NAME) $(DISTRIBUTE_DIR)/lib
700700
cd $(DISTRIBUTE_DIR)/lib; rm -f $(DYNAMIC_NAME_SHORT); ln -s $(DYNAMIC_VERSIONED_NAME_SHORT) $(DYNAMIC_NAME_SHORT)
701701
# add python - it's not the standard way, indeed...
702-
cp -r python $(DISTRIBUTE_DIR)/python
702+
cp -r python $(DISTRIBUTE_DIR)/
703703

704704
-include $(DEPS)

3rdparty/caffe/docs/development.md

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -116,5 +116,5 @@ To get a list of all options `googletest` provides, simply pass the `--help` fla
116116

117117
- **Run `make lint` to check C++ code.**
118118
- Wrap lines at 80 chars.
119-
- Follow [Google C++ style](http://google-styleguide.googlecode.com/svn/trunk/cppguide.xml) and [Google python style](http://google-styleguide.googlecode.com/svn/trunk/pyguide.html) + [PEP 8](http://legacy.python.org/dev/peps/pep-0008/).
119+
- Follow [Google C++ style](https://google.github.io/styleguide/cppguide.html) and [Google python style](https://google.github.io/styleguide/pyguide.html) + [PEP 8](http://legacy.python.org/dev/peps/pep-0008/).
120120
- Remember that “a foolish consistency is the hobgoblin of little minds,” so use your best judgement to write the clearest code for your particular case.

3rdparty/caffe/docs/install_apt.md

Lines changed: 1 addition & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -40,6 +40,7 @@ Continue with [compilation](installation.html#compilation).
4040

4141
sudo apt-get install libprotobuf-dev libleveldb-dev libsnappy-dev libopencv-dev libhdf5-serial-dev protobuf-compiler
4242
sudo apt-get install --no-install-recommends libboost-all-dev
43+
sudo apt-get install libgflags-dev libgoogle-glog-dev liblmdb-dev
4344

4445
**CUDA**: Install by `apt-get` or the NVIDIA `.run` package.
4546
The NVIDIA package tends to follow more recent library and driver versions, but the installation is more manual.
@@ -54,12 +55,6 @@ This can be skipped for CPU-only installation.
5455

5556
CUDA 8 is required on Ubuntu 16.04.
5657

57-
**Remaining dependencies, 14.04**
58-
59-
Everything is packaged in 14.04.
60-
61-
sudo apt-get install libgflags-dev libgoogle-glog-dev liblmdb-dev
62-
6358
**Remaining dependencies, 12.04**
6459

6560
These dependencies need manual installation in 12.04.

3rdparty/caffe/docs/tutorial/layers.md

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -87,7 +87,7 @@ Layers:
8787
* [ELU](layers/elu.html) - exponential linear rectification.
8888
* [Sigmoid](layers/sigmoid.html)
8989
* [TanH](layers/tanh.html)
90-
* [Absolute Value](layers/abs.html)
90+
* [Absolute Value](layers/absval.html)
9191
* [Power](layers/power.html) - f(x) = (shift + scale * x) ^ power.
9292
* [Exp](layers/exp.html) - f(x) = base ^ (shift + scale * x).
9393
* [Log](layers/log.html) - f(x) = log(x).

3rdparty/caffe/examples/web_demo/readme.md

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -11,7 +11,7 @@ priority: 10
1111
## Requirements
1212

1313
The demo server requires Python with some dependencies.
14-
To make sure you have the dependencies, please run `pip install -r examples/web_demo/requirements.txt`, and also make sure that you've compiled the Python Caffe interface and that it is on your `PYTHONPATH` (see [installation instructions](/installation.html)).
14+
To make sure you have the dependencies, please run `pip install -r examples/web_demo/requirements.txt`, and also make sure that you've compiled the Python Caffe interface and that it is on your `PYTHONPATH` (see [installation instructions](http://caffe.berkeleyvision.org/installation.html)).
1515

1616
Make sure that you have obtained the Reference CaffeNet Model and the ImageNet Auxiliary Data:
1717

3rdparty/caffe/include/caffe/filler.hpp

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -250,10 +250,10 @@ class BilinearFiller : public Filler<Dtype> {
250250
CHECK_EQ(blob->width(), blob->height()) << "Filter must be square";
251251
Dtype* data = blob->mutable_cpu_data();
252252
int f = ceil(blob->width() / 2.);
253-
float c = (2 * f - 1 - f % 2) / (2. * f);
253+
Dtype c = (blob->width() - 1) / (2. * f);
254254
for (int i = 0; i < blob->count(); ++i) {
255-
float x = i % blob->width();
256-
float y = (i / blob->width()) % blob->height();
255+
Dtype x = i % blob->width();
256+
Dtype y = (i / blob->width()) % blob->height();
257257
data[i] = (1 - fabs(x / f - c)) * (1 - fabs(y / f - c));
258258
}
259259
CHECK_EQ(this->filler_param_.sparse(), -1)

3rdparty/caffe/include/caffe/layers/accuracy_layer.hpp

Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -68,6 +68,8 @@ class AccuracyLayer : public Layer<Dtype> {
6868
*/
6969
virtual void Forward_cpu(const vector<Blob<Dtype>*>& bottom,
7070
const vector<Blob<Dtype>*>& top);
71+
virtual void Forward_gpu(const vector<Blob<Dtype>*>& bottom,
72+
const vector<Blob<Dtype>*>& top);
7173

7274

7375
/// @brief Not implemented -- AccuracyLayer cannot be used as a loss.
@@ -77,6 +79,8 @@ class AccuracyLayer : public Layer<Dtype> {
7779
if (propagate_down[i]) { NOT_IMPLEMENTED; }
7880
}
7981
}
82+
virtual void Backward_gpu(const vector<Blob<Dtype>*>& top,
83+
const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom);
8084

8185
int label_axis_, outer_num_, inner_num_;
8286

3rdparty/caffe/include/caffe/layers/infogain_loss_layer.hpp

Lines changed: 11 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -13,20 +13,21 @@
1313
namespace caffe {
1414

1515
/**
16-
* @brief A generalization of MultinomialLogisticLossLayer that takes an
16+
* @brief A generalization of SoftmaxWithLossLayer that takes an
1717
* "information gain" (infogain) matrix specifying the "value" of all label
1818
* pairs.
1919
*
20-
* Equivalent to the MultinomialLogisticLossLayer if the infogain matrix is the
20+
* Equivalent to the SoftmaxWithLossLayer if the infogain matrix is the
2121
* identity.
2222
*
2323
* @param bottom input Blob vector (length 2-3)
2424
* -# @f$ (N \times C \times H \times W) @f$
25-
* the predictions @f$ \hat{p} @f$, a Blob with values in
26-
* @f$ [0, 1] @f$ indicating the predicted probability of each of the
27-
* @f$ K = CHW @f$ classes. Each prediction vector @f$ \hat{p}_n @f$
28-
* should sum to 1 as in a probability distribution: @f$
29-
* \forall n \sum\limits_{k=1}^K \hat{p}_{nk} = 1 @f$.
25+
* the predictions @f$ x @f$, a Blob with values in
26+
* @f$ [-\infty, +\infty] @f$ indicating the predicted score for each of
27+
* the @f$ K = CHW @f$ classes. This layer maps these scores to a
28+
* probability distribution over classes using the softmax function
29+
* @f$ \hat{p}_{nk} = \exp(x_{nk}) /
30+
* \left[\sum_{k'} \exp(x_{nk'})\right] @f$ (see SoftmaxLayer).
3031
* -# @f$ (N \times 1 \times 1 \times 1) @f$
3132
* the labels @f$ l @f$, an integer-valued Blob with values
3233
* @f$ l_n \in [0, 1, 2, ..., K - 1] @f$
@@ -35,7 +36,7 @@ namespace caffe {
3536
* (\b optional) the infogain matrix @f$ H @f$. This must be provided as
3637
* the third bottom blob input if not provided as the infogain_mat in the
3738
* InfogainLossParameter. If @f$ H = I @f$, this layer is equivalent to the
38-
* MultinomialLogisticLossLayer.
39+
* SoftmaxWithLossLayer.
3940
* @param top output Blob vector (length 1)
4041
* -# @f$ (1 \times 1 \times 1 \times 1) @f$
4142
* the computed infogain multinomial logistic loss: @f$ E =
@@ -98,8 +99,8 @@ class InfogainLossLayer : public LossLayer<Dtype> {
9899
* infogain matrix, if provided as bottom[2])
99100
* @param bottom input Blob vector (length 2-3)
100101
* -# @f$ (N \times C \times H \times W) @f$
101-
* the predictions @f$ \hat{p} @f$; Backward computes diff
102-
* @f$ \frac{\partial E}{\partial \hat{p}} @f$
102+
* the predictions @f$ x @f$; Backward computes diff
103+
* @f$ \frac{\partial E}{\partial x} @f$
103104
* -# @f$ (N \times 1 \times 1 \times 1) @f$
104105
* the labels -- ignored as we can't compute their error gradients
105106
* -# @f$ (1 \times 1 \times K \times K) @f$

3rdparty/caffe/python/caffe/io.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -323,7 +323,7 @@ def resize_image(im, new_dims, interp_order=1):
323323
# skimage is fast but only understands {1,3} channel images
324324
# in [0, 1].
325325
im_std = (im - im_min) / (im_max - im_min)
326-
resized_std = resize(im_std, new_dims, order=interp_order)
326+
resized_std = resize(im_std, new_dims, order=interp_order, mode='constant')
327327
resized_im = resized_std * (im_max - im_min) + im_min
328328
else:
329329
# the image is a constant -- avoid divide by 0

0 commit comments

Comments
 (0)