12 #ifndef MLPACK_METHODS_ANN_LAYER_ADAPTIVE_MAX_POOLING_HPP 13 #define MLPACK_METHODS_ANN_LAYER_ADAPTIVE_MAX_POOLING_HPP 30 typename InputDataType = arma::mat,
31 typename OutputDataType = arma::mat
46 const size_t outputHeight);
63 void Forward(
const arma::Mat<eT>& input, arma::Mat<eT>& output);
75 void Backward(
const arma::Mat<eT>& input,
76 const arma::Mat<eT>& gy,
81 {
return poolingLayer.OutputParameter(); }
87 const OutputDataType&
Delta()
const {
return poolingLayer.Delta(); }
89 OutputDataType&
Delta() {
return poolingLayer.Delta(); }
92 size_t InputWidth()
const {
return poolingLayer.InputWidth(); }
94 size_t&
InputWidth() {
return poolingLayer.InputWidth(); }
97 size_t InputHeight()
const {
return poolingLayer.InputHeight(); }
112 size_t InputSize()
const {
return poolingLayer.InputSize(); }
115 size_t OutputSize()
const {
return poolingLayer.OutputSize(); }
123 template<
typename Archive>
124 void serialize(Archive& ar,
const uint32_t version);
130 void IntializeAdaptivePadding()
132 poolingLayer.StrideWidth() = std::floor(poolingLayer.InputWidth() /
134 poolingLayer.StrideHeight() = std::floor(poolingLayer.InputHeight() /
137 poolingLayer.KernelWidth() = poolingLayer.InputWidth() -
138 (outputWidth - 1) * poolingLayer.StrideWidth();
139 poolingLayer.KernelHeight() = poolingLayer.InputHeight() -
140 (outputHeight - 1) * poolingLayer.StrideHeight();
142 if (poolingLayer.KernelHeight() <= 0 || poolingLayer.KernelWidth() <= 0 ||
143 poolingLayer.StrideWidth() <= 0 || poolingLayer.StrideHeight() <= 0)
145 Log::Fatal <<
"Given output shape (" << outputWidth <<
", " 146 << outputHeight <<
") is not possible for given input shape (" 147 << poolingLayer.InputWidth() <<
", " << poolingLayer.InputHeight()
148 <<
")." << std::endl;
169 #include "adaptive_max_pooling_impl.hpp" size_t InputWidth() const
Get the input width.
Implementation of the AdaptiveMaxPooling layer.
void Forward(const arma::Mat< eT > &input, arma::Mat< eT > &output)
Ordinary feed forward pass of a neural network, evaluating the function f(x) by propagating the activ...
Linear algebra utility functions, generally performed on matrices or vectors.
size_t OutputWidth() const
Get the output width.
void serialize(Archive &ar, const uint32_t version)
Serialize the layer.
The core includes that mlpack expects; standard C++ includes and Armadillo.
size_t OutputHeight() const
Get the output height.
OutputDataType & Delta()
Modify the delta.
size_t & OutputWidth()
Modify the output width.
size_t & OutputHeight()
Modify the output height.
size_t InputHeight() const
Get the input height.
size_t InputSize() const
Get the input size.
const OutputDataType & OutputParameter() const
Get the output parameter.
const OutputDataType & Delta() const
Get the delta.
static MLPACK_EXPORT util::PrefixedOutStream Fatal
Prints fatal messages prefixed with [FATAL], then terminates the program.
size_t & InputHeight()
Modify the input height.
size_t WeightSize() const
Get the size of the weights.
size_t & InputWidth()
Modify the input width.
AdaptiveMaxPooling()
Create the AdaptiveMaxPooling object.
OutputDataType & OutputParameter()
Modify the output parameter.
size_t OutputSize() const
Get the output size.
Implementation of the MaxPooling layer.
void Backward(const arma::Mat< eT > &input, const arma::Mat< eT > &gy, arma::Mat< eT > &g)
Ordinary feed backward pass of a neural network, using 3rd-order tensors as input, calculating the function f(x) by propagating x backwards through f.