Skip to content
This repository has been archived by the owner on Nov 17, 2023. It is now read-only.

[MXNET-794] Remove Wrong InferType for AdaptiveAvgPool and BilinearReisze2D #12098

Merged
merged 1 commit into from
Aug 10, 2018
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
35 changes: 0 additions & 35 deletions src/operator/contrib/adaptive_avg_pooling-inl.h
Original file line number Diff line number Diff line change
Expand Up @@ -144,41 +144,6 @@ static bool AdaptiveAvgPoolOpInferShape(const nnvm::NodeAttrs& attrs,
return true;
}

static bool AdaptiveAvgPoolOpInferType(const nnvm::NodeAttrs& attrs,
std::vector<int> *in_type,
std::vector<int> *out_type) {
using namespace mshadow;
CHECK_EQ(in_type->size(), 1U);
int dtype = (*in_type)[0];
CHECK_NE(dtype, -1) << "First input must have specified type";
// For float16 input type beta, gamma, mean, and average are stored in float32.
// For other input types, these parameters have the same type as input
// NOTE: This requirement is from cuDNN (v. 4 and 5)
int dtype_param = 0;
MSHADOW_REAL_TYPE_SWITCH_EX(dtype, DTypeX, AccRealX, {
dtype_param = mshadow::DataType<AccRealX>::kFlag; });
out_type->clear();
out_type->push_back(dtype_param);
return true;
}

static inline bool AdaptiveAvgPoolOpStorageType(const nnvm::NodeAttrs &attrs,
const int dev_mask,
DispatchMode *dispatch_mode,
std::vector<int> *in_attrs,
std::vector<int> *out_attrs) {
CHECK_EQ(in_attrs->size(), 1);
CHECK_EQ(out_attrs->size(), 1);
*dispatch_mode = DispatchMode::kFCompute;
for (int& v : *in_attrs) {
if (v == - 1) v = kDefaultStorage;
}
for (size_t i = 0; i < out_attrs->size(); i++) {
(*out_attrs)[i] = kDefaultStorage;
}
return true;
}

using namespace mshadow;
template<typename xpu, int Dim, typename DType>
MSHADOW_XINLINE int get_stride(Tensor<xpu, Dim, DType> tensor, int idx) {
Expand Down
3 changes: 0 additions & 3 deletions src/operator/contrib/adaptive_avg_pooling.cc
Original file line number Diff line number Diff line change
Expand Up @@ -216,8 +216,6 @@ The pooling kernel and stride sizes are automatically chosen for desired output
.set_num_inputs(1)
.set_num_outputs(1)
.set_attr<nnvm::FInferShape>("FInferShape", AdaptiveAvgPoolOpInferShape)
.set_attr<nnvm::FInferType>("FInferType", AdaptiveAvgPoolOpInferType)
.set_attr<FInferStorageType>("FInferStorageType", AdaptiveAvgPoolOpStorageType)
.set_attr<FCompute>("FCompute<cpu>", AdaptiveAvgPoolOpForward<cpu>)
.set_attr<nnvm::FGradient>("FGradient",
ElemwiseGradUseNone{"_backward_contrib_AdaptiveAvgPooling2D"})
Expand All @@ -229,7 +227,6 @@ NNVM_REGISTER_OP(_backward_contrib_AdaptiveAvgPooling2D)
.set_num_inputs(1)
.set_num_outputs(1)
.set_attr<nnvm::TIsBackward>("TIsBackward", true)
.set_attr<FInferStorageType>("FInferStorageType", AdaptiveAvgPoolOpStorageType)
.set_attr<FCompute>("FCompute<cpu>", AdaptiveAvgPoolOpBackward<cpu>);


Expand Down
36 changes: 0 additions & 36 deletions src/operator/contrib/bilinear_resize-inl.h
Original file line number Diff line number Diff line change
Expand Up @@ -136,42 +136,6 @@ static bool BilinearSampleOpInferShape(const nnvm::NodeAttrs& attrs,
return true;
}

static bool BilinearSampleOpInferType(const nnvm::NodeAttrs& attrs,
std::vector<int> *in_type,
std::vector<int> *out_type) {
using namespace mshadow;
CHECK_EQ(in_type->size(), 1U);
int dtype = (*in_type)[0];
CHECK_NE(dtype, -1) << "First input must have specified type";
// For float16 input type beta, gamma, mean, and average are stored in float32.
// For other input types, these parameters have the same type as input
// NOTE: This requirement is from cuDNN (v. 4 and 5)
int dtype_param = 0;
MSHADOW_REAL_TYPE_SWITCH_EX(dtype, DTypeX, AccRealX, {
dtype_param = mshadow::DataType<AccRealX>::kFlag; });
out_type->clear();
out_type->push_back(dtype_param);
return true;
}

static inline bool BilinearSampleOpStorageType(const nnvm::NodeAttrs &attrs,
const int dev_mask,
DispatchMode *dispatch_mode,
std::vector<int> *in_attrs,
std::vector<int> *out_attrs) {
CHECK_EQ(in_attrs->size(), 1);
CHECK_EQ(out_attrs->size(), 1);
*dispatch_mode = DispatchMode::kFCompute;
for (int& v : *in_attrs) {
if (v == - 1) v = kDefaultStorage;
}
for (size_t i = 0; i < out_attrs->size(); i++) {
(*out_attrs)[i] = kDefaultStorage;
}
return true;
}


} // namespace op
} // namespace mxnet

Expand Down
3 changes: 0 additions & 3 deletions src/operator/contrib/bilinear_resize.cc
Original file line number Diff line number Diff line change
Expand Up @@ -177,8 +177,6 @@ for more details.
.set_num_inputs(1)
.set_num_outputs(1)
.set_attr<nnvm::FInferShape>("FInferShape", BilinearSampleOpInferShape)
.set_attr<nnvm::FInferType>("FInferType", BilinearSampleOpInferType)
.set_attr<FInferStorageType>("FInferStorageType", BilinearSampleOpStorageType)
.set_attr<FCompute>("FCompute<cpu>", BilinearSampleOpForward<cpu>)
.set_attr<nnvm::FGradient>("FGradient",
ElemwiseGradUseNone{"_backward_contrib_BilinearResize2D"})
Expand All @@ -190,7 +188,6 @@ NNVM_REGISTER_OP(_backward_contrib_BilinearResize2D)
.set_num_inputs(1)
.set_num_outputs(1)
.set_attr<nnvm::TIsBackward>("TIsBackward", true)
.set_attr<FInferStorageType>("FInferStorageType", BilinearSampleOpStorageType)
.set_attr<FCompute>("FCompute<cpu>", BilinearSampleOpBackward<cpu>);


Expand Down