diff --git a/pyhf/tensor/mxnet_backend.py b/pyhf/tensor/mxnet_backend.py index 3571c34bc9..e2b4fcf9b7 100644 --- a/pyhf/tensor/mxnet_backend.py +++ b/pyhf/tensor/mxnet_backend.py @@ -127,6 +127,10 @@ def product(self, tensor_in, axis=None): else: return nd.prod(tensor_in, axis) + def abs(self, tensor): + tensor = self.astensor(tensor) + return nd.abs(tensor) + def ones(self, shape): """ A new array filled with all ones, with the given shape. @@ -269,17 +273,18 @@ def where(self, mask, tensor_in_1, tensor_in_2): return nd.add(nd.multiply(mask, tensor_in_1), nd.multiply(nd.subtract(1, mask), tensor_in_2)) - def concatenate(self, sequence): + def concatenate(self, sequence, axis=0): """ Join the elements of the sequence. Args: sequence (Array of Tensors): The sequence of arrays to join + axis: dimension along which to concatenate Returns: MXNet NDArray: The ndarray of the joined elements. """ - return nd.concat(*sequence, dim=0) + return nd.concat(*sequence, dim=axis) def simple_broadcast(self, *args): """ @@ -319,6 +324,23 @@ def simple_broadcast(self, *args): for arg in args] return nd.stack(*broadcast) + def einsum(self, subscripts, *operands): + """ + A generalized contraction between tensors of arbitrary dimension. + + Warning: not implemented in MXNet + + Args: + subscripts: str, specifies the subscripts for summation + operands: list of array_like, these are the tensors for the operation + + Returns: + tensor: the calculation based on the Einstein summation convention + """ + raise NotImplementedError("mxnet::einsum is not implemented.") + return self.astensor([]) + + def poisson(self, n, lam): """ The continous approximation to the probability density function of the Poisson diff --git a/pyhf/tensor/numpy_backend.py b/pyhf/tensor/numpy_backend.py index 95e6ab834c..fe5171a090 100644 --- a/pyhf/tensor/numpy_backend.py +++ b/pyhf/tensor/numpy_backend.py @@ -60,9 +60,16 @@ def product(self, tensor_in, axis=None): tensor_in = self.astensor(tensor_in) return np.product(tensor_in, axis = axis) + def abs(self, tensor): + tensor = self.astensor(tensor) + return np.abs(tensor) + def ones(self,shape): return np.ones(shape) + def zeros(self,shape): + return np.zeros(shape) + def power(self,tensor_in_1, tensor_in_2): tensor_in_1 = self.astensor(tensor_in_1) tensor_in_2 = self.astensor(tensor_in_2) @@ -94,8 +101,19 @@ def where(self, mask, tensor_in_1, tensor_in_2): tensor_in_2 = self.astensor(tensor_in_2) return np.where(mask, tensor_in_1, tensor_in_2) - def concatenate(self, sequence): - return np.concatenate(sequence) + def concatenate(self, sequence, axis=0): + """ + Join a sequence of arrays along an existing axis. + + Args: + sequence: sequence of tensors + axis: dimension along which to concatenate + + Returns: + output: the concatenated tensor + + """ + return np.concatenate(sequence, axis=axis) def simple_broadcast(self, *args): """ @@ -119,6 +137,25 @@ def simple_broadcast(self, *args): """ return np.broadcast_arrays(*args) + def einsum(self, subscripts, *operands): + """ + Evaluates the Einstein summation convention on the operands. + + Using the Einstein summation convention, many common multi-dimensional + array operations can be represented in a simple fashion. This function + provides a way to compute such summations. The best way to understand + this function is to try the examples below, which show how many common + NumPy functions can be implemented as calls to einsum. + + Args: + subscripts: str, specifies the subscripts for summation + operands: list of array_like, these are the tensors for the operation + + Returns: + tensor: the calculation based on the Einstein summation convention + """ + return np.einsum(subscripts, *operands) + def poisson(self, n, lam): n = np.asarray(n) if self.pois_from_norm: diff --git a/pyhf/tensor/pytorch_backend.py b/pyhf/tensor/pytorch_backend.py index 65cc35207d..0a3df608d3 100644 --- a/pyhf/tensor/pytorch_backend.py +++ b/pyhf/tensor/pytorch_backend.py @@ -65,9 +65,16 @@ def product(self, tensor_in, axis=None): tensor_in = self.astensor(tensor_in) return torch.prod(tensor_in) if axis is None else torch.prod(tensor_in, axis) + def abs(self, tensor): + tensor = self.astensor(tensor) + return torch.abs(tensor) + def ones(self, shape): return torch.Tensor(torch.ones(shape)) + def zeros(self, shape): + return torch.Tensor(torch.zeros(shape)) + def power(self, tensor_in_1, tensor_in_2): tensor_in_1 = self.astensor(tensor_in_1) tensor_in_2 = self.astensor(tensor_in_2) @@ -99,8 +106,19 @@ def where(self, mask, tensor_in_1, tensor_in_2): tensor_in_2 = self.astensor(tensor_in_2) return mask * tensor_in_1 + (1-mask) * tensor_in_2 - def concatenate(self, sequence): - return torch.cat(sequence) + def concatenate(self, sequence, axis=0): + """ + Join a sequence of arrays along an existing axis. + + Args: + sequence: sequence of tensors + axis: dimension along which to concatenate + + Returns: + output: the concatenated tensor + + """ + return torch.cat(sequence, dim=axis) def simple_broadcast(self, *args): """ @@ -134,6 +152,22 @@ def simple_broadcast(self, *args): for arg in args] return broadcast + def einsum(self, subscripts, *operands): + """ + This function provides a way of computing multilinear expressions (i.e. + sums of products) using the Einstein summation convention. + + Args: + subscripts: str, specifies the subscripts for summation + operands: list of array_like, these are the tensors for the operation + + Returns: + tensor: the calculation based on the Einstein summation convention + """ + ops = tuple(self.astensor(op) for op in operands) + return torch.einsum(subscripts, ops) + + def poisson(self, n, lam): return self.normal(n,lam, self.sqrt(lam)) diff --git a/pyhf/tensor/tensorflow_backend.py b/pyhf/tensor/tensorflow_backend.py index 8d5737804f..baeef1ecb1 100644 --- a/pyhf/tensor/tensorflow_backend.py +++ b/pyhf/tensor/tensorflow_backend.py @@ -79,9 +79,16 @@ def product(self, tensor_in, axis=None): tensor_in = self.astensor(tensor_in) return tf.reduce_prod(tensor_in) if axis is None else tf.reduce_prod(tensor_in, axis) + def abs(self, tensor): + tensor = self.astensor(tensor) + return tf.abs(tensor) + def ones(self, shape): return tf.ones(shape) + def zeros(self, shape): + return tf.zeros(shape) + def power(self, tensor_in_1, tensor_in_2): tensor_in_1 = self.astensor(tensor_in_1) tensor_in_2 = self.astensor(tensor_in_2) @@ -113,8 +120,19 @@ def where(self, mask, tensor_in_1, tensor_in_2): tensor_in_2 = self.astensor(tensor_in_2) return mask * tensor_in_1 + (1-mask) * tensor_in_2 - def concatenate(self, sequence): - return tf.concat(sequence, axis=0) + def concatenate(self, sequence, axis=0): + """ + Join a sequence of arrays along an existing axis. + + Args: + sequence: sequence of tensors + axis: dimension along which to concatenate + + Returns: + output: the concatenated tensor + + """ + return tf.concat(sequence, axis=axis) def simple_broadcast(self, *args): """ @@ -160,6 +178,23 @@ def generic_len(a): tf.tile(tf.slice(arg, [0], [1]), tf.stack([max_dim])) for arg in args] return broadcast + def einsum(self, subscripts, *operands): + """ + A generalized contraction between tensors of arbitrary dimension. + + This function returns a tensor whose elements are defined by equation, + which is written in a shorthand form inspired by the Einstein summation + convention. + + Args: + subscripts: str, specifies the subscripts for summation + operands: list of array_like, these are the tensors for the operation + + Returns: + tensor: the calculation based on the Einstein summation convention + """ + return tf.einsum(subscripts, *operands) + def poisson(self, n, lam): # could be changed to actual Poisson easily return self.normal(n, lam, self.sqrt(lam)) diff --git a/tests/test_tensor.py b/tests/test_tensor.py index d52dee1da0..05ec0e519c 100644 --- a/tests/test_tensor.py +++ b/tests/test_tensor.py @@ -52,10 +52,36 @@ def test_common_tensor_backends(): == [[1, 1, 1], [2, 3, 4], [5, 6, 7]] assert list(map(tb.tolist, tb.simple_broadcast([1], [2, 3, 4], [5, 6, 7]))) \ == [[1, 1, 1], [2, 3, 4], [5, 6, 7]] + assert tb.tolist(tb.ones((4,5))) == [[1.]*5]*4 + assert tb.tolist(tb.zeros((4,5))) == [[0.]*5]*4 + assert tb.tolist(tb.abs([-1,-2])) == [1,2] with pytest.raises(Exception): tb.simple_broadcast([1], [2, 3], [5, 6, 7]) +def test_einsum(): + tf_sess = tf.Session() + backends = [numpy_backend(poisson_from_normal=True), + pytorch_backend(), + tensorflow_backend(session=tf_sess), + mxnet_backend() #no einsum in mxnet + ] + + + + for b in backends[:-1]: + pyhf.set_backend(b) + + x = np.arange(20).reshape(5,4).tolist() + assert np.all(b.tolist(b.einsum('ij->ji',x)) == np.asarray(x).T.tolist()) + assert b.tolist(b.einsum('i,j->ij',b.astensor([1,1,1]),b.astensor([1,2,3]))) == [[1,2,3]]*3 + + for b in backends[-1:]: + pyhf.set_backend(b) + x = np.arange(20).reshape(5,4).tolist() + with pytest.raises(NotImplementedError): + assert b.einsum('ij->ji',[1,2,3]) + def test_pdf_eval(): tf_sess = tf.Session() backends = [numpy_backend(poisson_from_normal=True),