Skip to content

Commit

Permalink
Fix concatenate and add missing functionality to backends: (#262)
Browse files Browse the repository at this point in the history
* Fix concatenate and add missing functionality to backends:
- tile
- einsum
- zeros
- abs
  • Loading branch information
kratsg authored and lukasheinrich committed Sep 14, 2018
1 parent 75edf5b commit b143ffd
Show file tree
Hide file tree
Showing 5 changed files with 162 additions and 8 deletions.
26 changes: 24 additions & 2 deletions pyhf/tensor/mxnet_backend.py
Original file line number Diff line number Diff line change
Expand Up @@ -127,6 +127,10 @@ def product(self, tensor_in, axis=None):
else:
return nd.prod(tensor_in, axis)

def abs(self, tensor):
tensor = self.astensor(tensor)
return nd.abs(tensor)

def ones(self, shape):
"""
A new array filled with all ones, with the given shape.
Expand Down Expand Up @@ -269,17 +273,18 @@ def where(self, mask, tensor_in_1, tensor_in_2):
return nd.add(nd.multiply(mask, tensor_in_1),
nd.multiply(nd.subtract(1, mask), tensor_in_2))

def concatenate(self, sequence):
def concatenate(self, sequence, axis=0):
"""
Join the elements of the sequence.
Args:
sequence (Array of Tensors): The sequence of arrays to join
axis: dimension along which to concatenate
Returns:
MXNet NDArray: The ndarray of the joined elements.
"""
return nd.concat(*sequence, dim=0)
return nd.concat(*sequence, dim=axis)

def simple_broadcast(self, *args):
"""
Expand Down Expand Up @@ -319,6 +324,23 @@ def simple_broadcast(self, *args):
for arg in args]
return nd.stack(*broadcast)

def einsum(self, subscripts, *operands):
"""
A generalized contraction between tensors of arbitrary dimension.
Warning: not implemented in MXNet
Args:
subscripts: str, specifies the subscripts for summation
operands: list of array_like, these are the tensors for the operation
Returns:
tensor: the calculation based on the Einstein summation convention
"""
raise NotImplementedError("mxnet::einsum is not implemented.")
return self.astensor([])


def poisson(self, n, lam):
"""
The continous approximation to the probability density function of the Poisson
Expand Down
41 changes: 39 additions & 2 deletions pyhf/tensor/numpy_backend.py
Original file line number Diff line number Diff line change
Expand Up @@ -60,9 +60,16 @@ def product(self, tensor_in, axis=None):
tensor_in = self.astensor(tensor_in)
return np.product(tensor_in, axis = axis)

def abs(self, tensor):
tensor = self.astensor(tensor)
return np.abs(tensor)

def ones(self,shape):
return np.ones(shape)

def zeros(self,shape):
return np.zeros(shape)

def power(self,tensor_in_1, tensor_in_2):
tensor_in_1 = self.astensor(tensor_in_1)
tensor_in_2 = self.astensor(tensor_in_2)
Expand Down Expand Up @@ -94,8 +101,19 @@ def where(self, mask, tensor_in_1, tensor_in_2):
tensor_in_2 = self.astensor(tensor_in_2)
return np.where(mask, tensor_in_1, tensor_in_2)

def concatenate(self, sequence):
return np.concatenate(sequence)
def concatenate(self, sequence, axis=0):
"""
Join a sequence of arrays along an existing axis.
Args:
sequence: sequence of tensors
axis: dimension along which to concatenate
Returns:
output: the concatenated tensor
"""
return np.concatenate(sequence, axis=axis)

def simple_broadcast(self, *args):
"""
Expand All @@ -119,6 +137,25 @@ def simple_broadcast(self, *args):
"""
return np.broadcast_arrays(*args)

def einsum(self, subscripts, *operands):
"""
Evaluates the Einstein summation convention on the operands.
Using the Einstein summation convention, many common multi-dimensional
array operations can be represented in a simple fashion. This function
provides a way to compute such summations. The best way to understand
this function is to try the examples below, which show how many common
NumPy functions can be implemented as calls to einsum.
Args:
subscripts: str, specifies the subscripts for summation
operands: list of array_like, these are the tensors for the operation
Returns:
tensor: the calculation based on the Einstein summation convention
"""
return np.einsum(subscripts, *operands)

def poisson(self, n, lam):
n = np.asarray(n)
if self.pois_from_norm:
Expand Down
38 changes: 36 additions & 2 deletions pyhf/tensor/pytorch_backend.py
Original file line number Diff line number Diff line change
Expand Up @@ -65,9 +65,16 @@ def product(self, tensor_in, axis=None):
tensor_in = self.astensor(tensor_in)
return torch.prod(tensor_in) if axis is None else torch.prod(tensor_in, axis)

def abs(self, tensor):
tensor = self.astensor(tensor)
return torch.abs(tensor)

def ones(self, shape):
return torch.Tensor(torch.ones(shape))

def zeros(self, shape):
return torch.Tensor(torch.zeros(shape))

def power(self, tensor_in_1, tensor_in_2):
tensor_in_1 = self.astensor(tensor_in_1)
tensor_in_2 = self.astensor(tensor_in_2)
Expand Down Expand Up @@ -99,8 +106,19 @@ def where(self, mask, tensor_in_1, tensor_in_2):
tensor_in_2 = self.astensor(tensor_in_2)
return mask * tensor_in_1 + (1-mask) * tensor_in_2

def concatenate(self, sequence):
return torch.cat(sequence)
def concatenate(self, sequence, axis=0):
"""
Join a sequence of arrays along an existing axis.
Args:
sequence: sequence of tensors
axis: dimension along which to concatenate
Returns:
output: the concatenated tensor
"""
return torch.cat(sequence, dim=axis)

def simple_broadcast(self, *args):
"""
Expand Down Expand Up @@ -134,6 +152,22 @@ def simple_broadcast(self, *args):
for arg in args]
return broadcast

def einsum(self, subscripts, *operands):
"""
This function provides a way of computing multilinear expressions (i.e.
sums of products) using the Einstein summation convention.
Args:
subscripts: str, specifies the subscripts for summation
operands: list of array_like, these are the tensors for the operation
Returns:
tensor: the calculation based on the Einstein summation convention
"""
ops = tuple(self.astensor(op) for op in operands)
return torch.einsum(subscripts, ops)


def poisson(self, n, lam):
return self.normal(n,lam, self.sqrt(lam))

Expand Down
39 changes: 37 additions & 2 deletions pyhf/tensor/tensorflow_backend.py
Original file line number Diff line number Diff line change
Expand Up @@ -79,9 +79,16 @@ def product(self, tensor_in, axis=None):
tensor_in = self.astensor(tensor_in)
return tf.reduce_prod(tensor_in) if axis is None else tf.reduce_prod(tensor_in, axis)

def abs(self, tensor):
tensor = self.astensor(tensor)
return tf.abs(tensor)

def ones(self, shape):
return tf.ones(shape)

def zeros(self, shape):
return tf.zeros(shape)

def power(self, tensor_in_1, tensor_in_2):
tensor_in_1 = self.astensor(tensor_in_1)
tensor_in_2 = self.astensor(tensor_in_2)
Expand Down Expand Up @@ -113,8 +120,19 @@ def where(self, mask, tensor_in_1, tensor_in_2):
tensor_in_2 = self.astensor(tensor_in_2)
return mask * tensor_in_1 + (1-mask) * tensor_in_2

def concatenate(self, sequence):
return tf.concat(sequence, axis=0)
def concatenate(self, sequence, axis=0):
"""
Join a sequence of arrays along an existing axis.
Args:
sequence: sequence of tensors
axis: dimension along which to concatenate
Returns:
output: the concatenated tensor
"""
return tf.concat(sequence, axis=axis)

def simple_broadcast(self, *args):
"""
Expand Down Expand Up @@ -160,6 +178,23 @@ def generic_len(a):
tf.tile(tf.slice(arg, [0], [1]), tf.stack([max_dim])) for arg in args]
return broadcast

def einsum(self, subscripts, *operands):
"""
A generalized contraction between tensors of arbitrary dimension.
This function returns a tensor whose elements are defined by equation,
which is written in a shorthand form inspired by the Einstein summation
convention.
Args:
subscripts: str, specifies the subscripts for summation
operands: list of array_like, these are the tensors for the operation
Returns:
tensor: the calculation based on the Einstein summation convention
"""
return tf.einsum(subscripts, *operands)

def poisson(self, n, lam):
# could be changed to actual Poisson easily
return self.normal(n, lam, self.sqrt(lam))
Expand Down
26 changes: 26 additions & 0 deletions tests/test_tensor.py
Original file line number Diff line number Diff line change
Expand Up @@ -52,10 +52,36 @@ def test_common_tensor_backends():
== [[1, 1, 1], [2, 3, 4], [5, 6, 7]]
assert list(map(tb.tolist, tb.simple_broadcast([1], [2, 3, 4], [5, 6, 7]))) \
== [[1, 1, 1], [2, 3, 4], [5, 6, 7]]
assert tb.tolist(tb.ones((4,5))) == [[1.]*5]*4
assert tb.tolist(tb.zeros((4,5))) == [[0.]*5]*4
assert tb.tolist(tb.abs([-1,-2])) == [1,2]
with pytest.raises(Exception):
tb.simple_broadcast([1], [2, 3], [5, 6, 7])


def test_einsum():
tf_sess = tf.Session()
backends = [numpy_backend(poisson_from_normal=True),
pytorch_backend(),
tensorflow_backend(session=tf_sess),
mxnet_backend() #no einsum in mxnet
]



for b in backends[:-1]:
pyhf.set_backend(b)

x = np.arange(20).reshape(5,4).tolist()
assert np.all(b.tolist(b.einsum('ij->ji',x)) == np.asarray(x).T.tolist())
assert b.tolist(b.einsum('i,j->ij',b.astensor([1,1,1]),b.astensor([1,2,3]))) == [[1,2,3]]*3

for b in backends[-1:]:
pyhf.set_backend(b)
x = np.arange(20).reshape(5,4).tolist()
with pytest.raises(NotImplementedError):
assert b.einsum('ij->ji',[1,2,3])

def test_pdf_eval():
tf_sess = tf.Session()
backends = [numpy_backend(poisson_from_normal=True),
Expand Down

0 comments on commit b143ffd

Please sign in to comment.