Skip to content
This repository has been archived by the owner on Nov 17, 2023. It is now read-only.

[MXNET-344] [ONNX-MXNet] Add new Operator Translations for ONNX import module #11140

Merged
merged 24 commits into from
Jun 12, 2018
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
24 changes: 22 additions & 2 deletions python/mxnet/contrib/onnx/_import/import_helper.py
Original file line number Diff line number Diff line change
Expand Up @@ -32,6 +32,10 @@
from .op_translations import reduce_max, reduce_mean, reduce_min, reduce_sum
from .op_translations import reduce_prod, avg_pooling, max_pooling
from .op_translations import argmax, argmin, maximum, minimum
from .op_translations import clip, reduce_log_sum, reduce_log_sum_exp
from .op_translations import reduce_sum_square, reduce_l2, max_roi_pooling, instance_norm
from .op_translations import log_softmax, softsign, lesser, greater, equal
from .op_translations import logical_and, logical_or, logical_xor, logical_not

# convert_map defines maps of ONNX operator names to converter functor(callable)
# defined in the op_translations module.
Expand Down Expand Up @@ -102,6 +106,22 @@
# Sorting and Searching
'ArgMax' : argmax,
'ArgMin' : argmin,
'Max' : maximum, #elemwise maximum
'Min' : minimum #elemwise minimum
'Max' : maximum,
'Min' : minimum,
'Clip' : clip,
'ReduceLogSum' : reduce_log_sum,
'ReduceLogSumExp' : reduce_log_sum_exp,
'ReduceSumSquare' : reduce_sum_square,
'ReduceL2' : reduce_l2,
'MaxRoiPool' : max_roi_pooling,
'InstanceNormalization' : instance_norm,
'LogSoftmax' : log_softmax,
'Softsign' : softsign,
'Less' : lesser,
'Greater' : greater,
'Equal' : equal,
'And' : logical_and,
'Xor' : logical_xor,
'Not' : logical_not,
'Or' : logical_or
}
110 changes: 99 additions & 11 deletions python/mxnet/contrib/onnx/_import/op_translations.py
Original file line number Diff line number Diff line change
Expand Up @@ -18,6 +18,7 @@
# coding: utf-8
""" Module for translating ONNX operators into Mxnet operatoes"""
# pylint: disable=unused-argument,protected-access
import numpy as np
from . import translation_utils
from .... import symbol

Expand Down Expand Up @@ -80,6 +81,22 @@ def divide(attrs, inputs, proto_obj):
return op_value, new_attr, inputs
return 'broadcast_div', new_attr, inputs

def logical_and(attrs, inputs, proto_obj):
"""Logical and of two input arrays."""
return 'broadcast_logical_and', attrs, inputs

def logical_or(attrs, inputs, proto_obj):
"""Logical or of two input arrays."""
return 'broadcast_logical_or', attrs, inputs

def logical_xor(attrs, inputs, proto_obj):
"""Logical xor of two input arrays."""
return 'broadcast_logical_xor', attrs, inputs

def logical_not(attrs, inputs, proto_obj):
"""Logical not of two input arrays."""
return 'logical_not', attrs, inputs

def absolute(attrs, inputs, proto_obj):
"""Returns element-wise absolute value of the input."""
return 'abs', attrs, inputs
Expand All @@ -97,7 +114,6 @@ def argmax(attrs, inputs, proto_obj):
"""Returns indices of the maximum values along an axis"""
return 'argmax', attrs, inputs


def argmin(attrs, inputs, proto_obj):
"""Returns indices of the minimum values along an axis."""
return 'argmin', attrs, inputs
Expand Down Expand Up @@ -130,6 +146,18 @@ def minimum(attrs, inputs, proto_obj):
mxnet_op = inputs[0]
return mxnet_op, attrs, inputs

def lesser(attrs, inputs, proto_obj):
"""Logical Lesser operator with broadcasting."""
return 'broadcast_lesser', attrs, inputs

def greater(attrs, inputs, proto_obj):
"""Logical Greater operator with broadcasting."""
return 'broadcast_greater', attrs, inputs

def equal(attrs, inputs, proto_obj):
"""Logical Equal operator with broadcasting."""
return 'broadcast_equal', attrs, inputs

#Hyperbolic functions
def tanh(attrs, inputs, proto_obj):
"""Returns the hyperbolic tangent of the input array."""
Expand All @@ -151,6 +179,10 @@ def concat(attrs, inputs, proto_obj):
return 'concat', new_attrs, inputs

# Basic neural network functions
def softsign(attrs, inputs, proto_obj):
"""Computes softsign of x element-wise."""
return 'softsign', attrs, inputs

def sigmoid(attrs, inputs, proto_obj):
"""Computes elementwise sigmoid of the input array"""
return 'sigmoid', attrs, inputs
Expand Down Expand Up @@ -183,6 +215,11 @@ def batch_norm(attrs, inputs, proto_obj):
new_attrs['fix_gamma'] = not attrs.get('is_test', 1)
return 'BatchNorm', new_attrs, inputs

def instance_norm(attrs, inputs, proto_obj):
"""Instance Normalization."""
new_attrs = translation_utils._fix_attribute_names(attrs, {'epsilon' : 'eps'})
return 'InstanceNorm', new_attrs, inputs

def leaky_relu(attrs, inputs, proto_obj):
"""Leaky Relu function"""
if 'alpha' in attrs:
Expand Down Expand Up @@ -211,6 +248,16 @@ def softmax(attrs, inputs, proto_obj):
attrs = translation_utils._add_extra_attributes(attrs, {'axis': 1})
return 'softmax', attrs, inputs

def log_softmax(attrs, inputs, proto_obj):
"""Computes the log softmax of the input. This is equivalent to
computing softmax followed by log."""
return 'log_softmax', attrs, inputs

def softplus(attrs, inputs, proto_obj):
"""Applies the sofplus activation function element-wise to the input."""
new_attrs = translation_utils._add_extra_attributes(attrs, {'act_type' : 'softrelu'})
return 'Activation', new_attrs, inputs

def conv(attrs, inputs, proto_obj):
"""Compute N-D convolution on (N+2)-D input."""
new_attrs = translation_utils._fix_attribute_names(attrs, {'kernel_shape' : 'kernel',
Expand Down Expand Up @@ -389,15 +436,9 @@ def transpose(attrs, inputs, proto_obj):

def squeeze(attrs, inputs, proto_obj):
"""Remove single-dimensional entries from the shape of a tensor."""
# MXNet doesnt have a squeeze operator.
# Using "split" to perform similar operation.
new_attrs = translation_utils._fix_attribute_names(attrs,
{'axes' : 'axis'})
axes = new_attrs.get('axis')
mxnet_op = symbol.split(inputs[0], axis=axes[0], num_outputs=1, squeeze_axis=1)
for i in axes[1:]:
mxnet_op = symbol.split(mxnet_op, axis=i-1, num_outputs=1, squeeze_axis=1)
return mxnet_op, new_attrs, inputs
return 'squeeze', new_attrs, inputs

def unsqueeze(attrs, inputs, cls):
"""Inserts a new axis of size 1 into the array shape"""
Expand All @@ -417,6 +458,16 @@ def flatten(attrs, inputs, proto_obj):
new_attrs = translation_utils._remove_attributes(attrs, ['axis'])
return 'Flatten', new_attrs, inputs

def clip(attrs, inputs, proto_obj):
"""Clips (limits) the values in an array."""
new_attrs = translation_utils._fix_attribute_names(attrs, {'min' : 'a_min',
'max' : 'a_max'})
if 'a_max' not in new_attrs:
new_attrs = translation_utils._add_extra_attributes(new_attrs, {'a_max' : np.inf})
if 'a_min' not in new_attrs:
new_attrs = translation_utils._add_extra_attributes(new_attrs, {'a_min' : -np.inf})
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

can we use np.ninf? I think that is float, please check once.

Copy link
Member Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

return 'clip', new_attrs, inputs

#Powers
def reciprocal(attrs, inputs, proto_obj):
"""Returns the reciprocal of the argument, element-wise."""
Expand Down Expand Up @@ -454,20 +505,49 @@ def reduce_mean(attrs, inputs, proto_obj):
return 'mean', new_attrs, inputs

def reduce_min(attrs, inputs, proto_obj):
"""Reduce the array along a given axis by mean value"""
"""Reduce the array along a given axis by minimum value"""
new_attrs = translation_utils._fix_attribute_names(attrs, {'axes':'axis'})
return 'min', new_attrs, inputs

def reduce_sum(attrs, inputs, proto_obj):
"""Reduce the array along a given axis by mean value"""
"""Reduce the array along a given axis by sum value"""
new_attrs = translation_utils._fix_attribute_names(attrs, {'axes':'axis'})
return 'sum', new_attrs, inputs

def reduce_prod(attrs, inputs, proto_obj):
"""Reduce the array along a given axis by mean value"""
"""Reduce the array along a given axis by product value"""
new_attrs = translation_utils._fix_attribute_names(attrs, {'axes':'axis'})
return 'prod', new_attrs, inputs

def reduce_log_sum(attrs, inputs, proto_obj):
"""Reduce the array along a given axis by log sum value"""
keep_dims = True if 'keepdims' not in attrs else attrs.get('keepdims')
sum_op = symbol.sum(inputs[0], axis=attrs.get('axes'),
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Why inputs[0]?

Copy link
Member Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

ReduceLogSum op takes has input tensor - https://github.com/onnx/onnx/blob/master/docs/Operators.md#ReduceLogSum. And while translating this ONNX operator into MXNet, we are splitting the reduceLogSum op to sum over a given axis and then log operator.

Copy link
Member Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

keepdims=keep_dims)
log_sym = symbol.log(sum_op)
return log_sym, attrs, inputs

def reduce_log_sum_exp(attrs, inputs, proto_obj):
"""Reduce the array along a given axis by log sum exp value"""
keep_dims = True if 'keepdims' not in attrs else attrs.get('keepdims')
exp_op = symbol.exp(inputs[0])
sum_op = symbol.sum(exp_op, axis=attrs.get('axes'),
keepdims=keep_dims)
log_sym = symbol.log(sum_op)
return log_sym, attrs, inputs

def reduce_sum_square(attrs, inputs, proto_obj):
"""Reduce the array along a given axis by sum square value"""
square_op = symbol.square(inputs[0])
sum_op = symbol.sum(square_op, axis=attrs.get('axes'),
keepdims=attrs.get('keepdims'))
return sum_op, attrs, inputs

def reduce_l2(attrs, inputs, proto_obj):
"""Reduce input tensor by l2 normalization."""
new_attrs = translation_utils._fix_attribute_names(attrs, {'axes':'axis'})
return 'norm', new_attrs, inputs

def avg_pooling(attrs, inputs, proto_obj):
""" Average pooling"""
new_attrs = translation_utils._fix_attribute_names(attrs,
Expand Down Expand Up @@ -497,3 +577,11 @@ def max_pooling(attrs, inputs, proto_obj):
new_op = translation_utils._fix_pooling('max', inputs, new_attrs)

return new_op, new_attrs, inputs

def max_roi_pooling(attrs, inputs, proto_obj):
"""Max ROI Pooling."""
new_attrs = translation_utils._fix_attribute_names(attrs,
{'pooled_shape': 'pooled_size',
'spatial_scale': 'spatial_scale'
})
return 'ROIPooling', new_attrs, inputs
72 changes: 72 additions & 0 deletions tests/python-pytest/onnx/import/onnx_import_test.py
Original file line number Diff line number Diff line change
Expand Up @@ -77,6 +77,78 @@ def test_broadcast():
output = bkd_rep.run([input1, input2])
npt.assert_almost_equal(output[0], numpy_op)

@with_seed()
def test_greater():
"""Test for logical greater in onnx operators."""
input1 = np.random.rand(1, 3, 4, 5).astype("float32")
input2 = np.random.rand(1, 5).astype("float32")
inputs = [helper.make_tensor_value_info("input1", TensorProto.FLOAT, shape=(1, 3, 4, 5)),
helper.make_tensor_value_info("input2", TensorProto.FLOAT, shape=(1, 5))]

outputs = [helper.make_tensor_value_info("output", TensorProto.FLOAT, shape=(1, 3, 4, 5))]

nodes = [helper.make_node("Greater", ["input1", "input2"], ["output"])]

graph = helper.make_graph(nodes,
"greater_test",
inputs,
outputs)

greater_model = helper.make_model(graph)

bkd_rep = mxnet_backend.prepare(greater_model)
numpy_op = np.greater(input1, input2).astype(np.float32)
output = bkd_rep.run([input1, input2])
npt.assert_almost_equal(output[0], numpy_op)

@with_seed()
def test_lesser():
"""Test for logical greater in onnx operators."""
input1 = np.random.rand(1, 3, 4, 5).astype("float32")
input2 = np.random.rand(1, 5).astype("float32")
inputs = [helper.make_tensor_value_info("input1", TensorProto.FLOAT, shape=(1, 3, 4, 5)),
helper.make_tensor_value_info("input2", TensorProto.FLOAT, shape=(1, 5))]

outputs = [helper.make_tensor_value_info("output", TensorProto.FLOAT, shape=(1, 3, 4, 5))]

nodes = [helper.make_node("Less", ["input1", "input2"], ["output"])]

graph = helper.make_graph(nodes,
"lesser_test",
inputs,
outputs)

greater_model = helper.make_model(graph)

bkd_rep = mxnet_backend.prepare(greater_model)
numpy_op = np.less(input1, input2).astype(np.float32)
output = bkd_rep.run([input1, input2])
npt.assert_almost_equal(output[0], numpy_op)

@with_seed()
def test_equal():
"""Test for logical greater in onnx operators."""
input1 = np.random.rand(1, 3, 4, 5).astype("float32")
input2 = np.random.rand(1, 5).astype("float32")
inputs = [helper.make_tensor_value_info("input1", TensorProto.FLOAT, shape=(1, 3, 4, 5)),
helper.make_tensor_value_info("input2", TensorProto.FLOAT, shape=(1, 5))]

outputs = [helper.make_tensor_value_info("output", TensorProto.FLOAT, shape=(1, 3, 4, 5))]

nodes = [helper.make_node("Equal", ["input1", "input2"], ["output"])]

graph = helper.make_graph(nodes,
"equal_test",
inputs,
outputs)

greater_model = helper.make_model(graph)

bkd_rep = mxnet_backend.prepare(greater_model)
numpy_op = np.equal(input1, input2).astype(np.float32)
output = bkd_rep.run([input1, input2])
npt.assert_almost_equal(output[0], numpy_op)

def test_super_resolution_example():
"""Test the super resolution example in the example/onnx folder"""
sys.path.insert(0, os.path.join(CURR_PATH, '../../../../example/onnx/'))
Expand Down
10 changes: 10 additions & 0 deletions tests/python-pytest/onnx/import/test_cases.py
Original file line number Diff line number Diff line change
Expand Up @@ -58,6 +58,16 @@
'test_argmax',
'test_argmin',
'test_min',
'test_logical_and',
'test_logical_xor',
'test_logical_not',
'test_logical_or',
'test_clip',
'test_softsign',
'test_reduce_l2',
'test_reduce_log_sum',
'test_reduce_log_sum_exp',
'test_reduce_sum_square'
#pytorch operator tests
'test_operator_exp',
'test_operator_maxpool',
Expand Down