2019-06-06 14:21:15 +02:00
|
|
|
/*******************************************************************************
|
|
|
|
* Copyright (c) 2015-2018 Skymind, Inc.
|
|
|
|
*
|
|
|
|
* This program and the accompanying materials are made available under the
|
|
|
|
* terms of the Apache License, Version 2.0 which is available at
|
|
|
|
* https://www.apache.org/licenses/LICENSE-2.0.
|
|
|
|
*
|
|
|
|
* Unless required by applicable law or agreed to in writing, software
|
|
|
|
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
|
|
|
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
|
|
|
* License for the specific language governing permissions and limitations
|
|
|
|
* under the License.
|
|
|
|
*
|
|
|
|
* SPDX-License-Identifier: Apache-2.0
|
|
|
|
******************************************************************************/
|
|
|
|
|
|
|
|
//
|
|
|
|
// @author raver119@gmail.com
|
|
|
|
//
|
|
|
|
|
|
|
|
#include <ops/declarable/helpers/top_k.h>
|
|
|
|
#include <MmulHelper.h>
|
|
|
|
#include <NDArrayFactory.h>
|
|
|
|
#include <Status.h>
|
|
|
|
|
|
|
|
namespace nd4j {
|
|
|
|
namespace ops {
|
|
|
|
namespace helpers {
|
2019-08-23 18:20:50 +02:00
|
|
|
nd4j::LaunchContext* defaultContext = nd4j::LaunchContext::defaultContext();
|
2019-06-06 14:21:15 +02:00
|
|
|
|
|
|
|
template <typename T>
|
|
|
|
static void swapRows_(NDArray* matrix, int theFirst, int theSecond) {
|
|
|
|
|
|
|
|
if (theFirst != theSecond)
|
|
|
|
for (int i = 0; i < matrix->columns(); i++) {
|
|
|
|
T e0 = matrix->e<T>(theFirst, i);
|
|
|
|
T e1 = matrix->e<T>(theSecond, i);
|
|
|
|
|
|
|
|
matrix->p<T>(theFirst, i, e1);
|
|
|
|
matrix->p<T>(theSecond, i, e0);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
BUILD_SINGLE_TEMPLATE(template void swapRows_, (NDArray* matrix, int theFirst, int theSecond), FLOAT_TYPES);
|
|
|
|
|
|
|
|
void swapRows(NDArray* matrix, int theFirst, int theSecond) {
|
|
|
|
BUILD_SINGLE_SELECTOR(matrix->dataType(), swapRows_, (matrix, theFirst, theSecond), FLOAT_TYPES);
|
|
|
|
}
|
|
|
|
|
|
|
|
template <typename T>
|
|
|
|
static void invertLowerMatrix_(NDArray* inputMatrix, NDArray* invertedMatrix) {
|
|
|
|
int n = inputMatrix->rows();
|
|
|
|
invertedMatrix->assign(0.f);
|
|
|
|
|
|
|
|
PRAGMA_OMP_PARALLEL_FOR_IF(n > Environment::getInstance()->elementwiseThreshold())
|
|
|
|
for (int i = 0; i < n; i++)
|
|
|
|
invertedMatrix->p(i, i, 1.0f);
|
|
|
|
|
|
|
|
if (inputMatrix->isIdentityMatrix()) return;
|
|
|
|
|
|
|
|
PRAGMA_OMP_PARALLEL_FOR_IF(n > Environment::getInstance()->elementwiseThreshold())
|
|
|
|
for (int i = 1; i < n; i++)
|
Merge master to upstream (#7945)
* Shugeo strided slice zeros (#14)
* Modified strided_slice op to properly work with empty-like shapes.
* Fixed test for reduce_mean with empty-like input.
* [WIP] Last merge (#15)
* correct logsoftmax looss (#2)
* Small SameDiff listener fix (#4)
* Various fixes (#6)
* #7839 Fix for asXMatrix and tests
* #7866 EmbeddingSequenceLayer dtype fix + test
* #7856 SameDiff save/load stream methods
* #7859 RegressionEvaluation rank 4 fix + tests + axis configuration
* EvaluationBinary 3d/4d
* More evaluation 3d/4d tests
* #7847 Evaluation empty checks
* Small test ifx
* #7848 Fix median edge case
* Improve DL4J samediff layer tests
* [WIP] FastText wrapper implemented (#8)
* FastText implemented
* Some fixes
* Fix shapes for wordsNearest
* Validation of input vectors
* Fixes
* Fixed test
* Thread tagged
* Some tweaks
* setContextClassLoader for DeallocatorServiceThread
* Numpy format tests (#1)
* Various fixes (#11)
* #7852 SameDiff gather fix
* #7892 SameDiff placeholder to constant conversion
* #7890 validate input rank for MLN/CG init methods
* Fix broken permute shape calculation
* Permute and gather fixes
* Tests
* #7850 LogSumExp fix + test
* Handful of test fixes
* Empty arrays with non-scalar shapes (#10)
* minor rearrangements for lambdas
* empty tensors with non-scalar shapes
* numpy empty tensors with non-scalar shapes
* few more empty tweaks
* Small fixes
* conv3d signature update
* micro fix in batchnorm mkldnn
* Import fixes
* Fix
* MKL-DNN update
* Small fill fix
* fill with empty input + test
* Fixes
* Small error improvement
* Fix
* one special test
* couple of fixes for lstm
* Rewrite TFGraphMapper.getNDArrayFromTensor to be maintainable and less error prone
* Fixes
* FP16
* Unsigned
* BFloat16
* Fill op - empty tweaks
* - couple of fixes for empty arrays construction
- stack updated
* strided slice fix
* one transform test
* provide method for reducing shapeInfo in case of input array is empty
* Fixed reduceAlongDimensions to use empty input properly.
* couple of broadcast tests
* couple of tests broadcast tests + tweak to make them pass
* add check of non-empty to methods producing sub-arrays
* Fixed reshapeC with zeros in shape.
* complete empty check in reduce_... legacy ops
* Concat and cumsum/prod
* Tweak to empty shape inference on import
* add empty check to the rest of reduce legacy ops
* one more test
* correct typo in evalReduceShapeInfoEmpty
* Added tests for reduce_* ops to tests with zero shapes.
* few more tests for empty reductions
* Fixed strided_slice op with empty case and tests.
* one more empty reduction test
* Fixed strided_slice test.
* add empty check to NDArray::reshapei
* infOrMax
* empty min/max with infinity tests
* made unstack working correctly with empty arrays
* few IndexReduce tests + tweaks for empty shapes
* add test for empty concat
* few tests fixed
* Validation fix for reductions on empty shapes
* Reverse fix
* Reduction shape calc fixes
* SameDiff.generateOutputVariable: don't use shape function to determine number of outputs
* Range fix
* - NDArray constructor updated for scalars/empty arrays
- few tests fixed
* More fixes
* Empty creator fixes
* concat fix
* concat fix
* TF import tests: allow 'both all NaN' and 'both all inf' to pass
* Slice, zero fraction, and reshape fixes
* transpose, gather
* Zero fraction
* scalar cast fix
* Empty reduction axis support
* few more tests fixed
* Fixed input checks conforming with TF for concat op and tests.
* few tests fixed
* matmul scalar shape fix
* Fixed checkout for data type and scalarity with concat to allow non-empty scalars with vector concats.
* broadcast bool fix
* few more tests
* few more tests
* correct evalReduceShapeInfoEmpty
* argmax/argmin + tests
* one more empty edge case + one more test
* argmax/argmin/realdiv_bp tweaks
* empty reshape test + fix
* Helper fixes
* Small fixes
* Gather test fix
* Gather test fix
* Small fixes
* reduce scalar zero values
* scalar mean workaround
* Remove debug code
* along dim mean workaround
* one more test
* - equalsTo() tweak for empty arrays
- one more test
* broadcast tweaks
* [WIP] Fixing outstanding issues for NLP (#9)
* Avoid using not-inited objects
* Test fixed.
* Redundant method avoided for models like FastText
* KMeans++ implementation
* KMeans++ implementation
* Disable parallel execution
* KMeans++
* Tests
* Dev branch merge (#16)
* SameDiff: convertDataType and gradient check util improvements (#12)
* GradCheck util improvements
* StopGradient constructor + test
* SameDiff: Add datatype conversion
* Javadoc and add DataType.isNumerical()
* Small fix
* Fix SameDiff TF import test cases intermediate naming (workaround for bad default)
* TFGraphTestAllHelper: check intermediates in execution order
* Add missing debug listener
* [WIP] lstmBlock fix + other changes (#13)
- fixes lstmBlock issue
- changes NDArray method reshape(), permute(), transpose() by making them return instance instead of pointer
- CheckNumerics op
- fixes for ReduceBool IsInfOrNan & IsFinite
* Small test fix
* CheckNumerics op wrapper
* Fix some issues on master (#17)
* Fix DataVec test issue
* Fix issue with dl4j SameDiff output layer
* Dtype fix for lambda layers
* #7912 BertIterator dtype fix (use float32 not global default)
* [WIP] Next set of CUDA stuff (#7)
New CUDA implementations and improvements
* bad file
* Dev branch master merge (#23)
* SameDiff: convertDataType and gradient check util improvements (#12)
* GradCheck util improvements
* StopGradient constructor + test
* SameDiff: Add datatype conversion
* Javadoc and add DataType.isNumerical()
* Small fix
* Fix SameDiff TF import test cases intermediate naming (workaround for bad default)
* TFGraphTestAllHelper: check intermediates in execution order
* Add missing debug listener
* [WIP] lstmBlock fix + other changes (#13)
- fixes lstmBlock issue
- changes NDArray method reshape(), permute(), transpose() by making them return instance instead of pointer
- CheckNumerics op
- fixes for ReduceBool IsInfOrNan & IsFinite
* Small test fix
* CheckNumerics op wrapper
* Compatibility of deserialization (#18)
Signed-off-by: Alexander Stoyakin <alexander.stoyakin@gmail.com>
* SameDiff: add activation gradient checking support for debugging (#19)
* SameDiff gradient checker: first pass on activation gradient checks
* Fixes + tests for activation gradient checking
* Javadoc
* [WIP] Some nd4j data type corrections (#20)
* Adjust data type
* Set correct Data type.
* Size of proper data type.
* fix averaged cpu load (#22)
* SameDiff ops, TF import and fixes (#24)
* CheckNumerics tests + fixes + misc fixes
Signed-off-by: AlexDBlack <blacka101@gmail.com>
* Fake quant
Signed-off-by: AlexDBlack <blacka101@gmail.com>
* Fixes
Signed-off-by: AlexDBlack <blacka101@gmail.com>
* FakeQuantWithMinMaxArgs
Signed-off-by: AlexDBlack <blacka101@gmail.com>
* CheckNumerics fix
Signed-off-by: AlexDBlack <blacka101@gmail.com>
* Fix libnd4j ALL_INTS and ALL_FLOATS declaration (uint and bfloat types)
Signed-off-by: AlexDBlack <blacka101@gmail.com>
* Small fix
Signed-off-by: AlexDBlack <blacka101@gmail.com>
* Javadoc
Signed-off-by: AlexDBlack <blacka101@gmail.com>
* Exception tweak
Signed-off-by: AlexDBlack <blacka101@gmail.com>
* fix
Signed-off-by: AlexDBlack <blacka101@gmail.com>
* Fix for out of scope stack allocated var use
Signed-off-by: AlexDBlack <blacka101@gmail.com>
* Ignores
Signed-off-by: AlexDBlack <blacka101@gmail.com>
* Ignore for known failing test (already logged issue)
Signed-off-by: AlexDBlack <blacka101@gmail.com>
* Merge upstream to fork (#25)
* Add thousand-separator commas to TotalParams (#7915)
* Add thousand-separator commas to TotalParams
The number of parameters can be quite large, and it would help the reading of the summary printout to have the TotalParams column & values at the bottom have thousand-separator-commas in them.
* Add thousand-separator commas to MultiLayerNetwork
Corresponding change to MultiLayerNetwork
Signed-off-by: Jxtps Jxtps <jxtps435@gmail.com>
* Update contributing and issue/PR templates (#7934)
Signed-off-by: AlexDBlack <blacka101@gmail.com>
* Fix link to AdaDelta paper (#7942)
Fix link to AdaDelta paper hosted on matthewzeiler.com
Signed-off-by: Jxtps
* Fixes, and ignores for known/logged failing issues (#7943)
Signed-off-by: AlexDBlack <blacka101@gmail.com>
* SameDiff + DL4J/SameDiff: Multiple fixes (#28)
* #7919 HDF5 attribute buffer length fix
Signed-off-by: AlexDBlack <blacka101@gmail.com>
* #7909 Arbiter constructor exception ux improvements
Signed-off-by: AlexDBlack <blacka101@gmail.com>
* #7925 RNN output layer length checks
Signed-off-by: AlexDBlack <blacka101@gmail.com>
* #7939 Add listener for validating inputs are not incorrectly modified
Signed-off-by: AlexDBlack <blacka101@gmail.com>
* #7939 Integrate NonInplaceValidationListener into tests
* #7844 DL4J SameDiff fixes for variable minibatch size
* DL4J SameDiff fixes - ensure gradient for input placeholder is available
Signed-off-by: AlexDBlack <blacka101@gmail.com>
* Tweaks to ExternalErrorsFunction - use placeholders, make more robust
* Another fix
* More fixes
* More SameDiff/DL4J fixes
* Scope out scalar array creation in BaseScalarOp
* Remove debug code
Signed-off-by: AlexDBlack <blacka101@gmail.com>
* [WIP] Final dev branch merge (#29)
* SameDiff: convertDataType and gradient check util improvements (#12)
* GradCheck util improvements
* StopGradient constructor + test
* SameDiff: Add datatype conversion
* Javadoc and add DataType.isNumerical()
* Small fix
* Fix SameDiff TF import test cases intermediate naming (workaround for bad default)
* TFGraphTestAllHelper: check intermediates in execution order
* Add missing debug listener
* [WIP] lstmBlock fix + other changes (#13)
- fixes lstmBlock issue
- changes NDArray method reshape(), permute(), transpose() by making them return instance instead of pointer
- CheckNumerics op
- fixes for ReduceBool IsInfOrNan & IsFinite
* Small test fix
* CheckNumerics op wrapper
* Compatibility of deserialization (#18)
Signed-off-by: Alexander Stoyakin <alexander.stoyakin@gmail.com>
* SameDiff: add activation gradient checking support for debugging (#19)
* SameDiff gradient checker: first pass on activation gradient checks
* Fixes + tests for activation gradient checking
* Javadoc
* [WIP] Some nd4j data type corrections (#20)
* Adjust data type
* Set correct Data type.
* Size of proper data type.
* fix averaged cpu load (#22)
* [WIP] Multiple dataset iterators (#27)
* Splitting dataset into arbitrary number
* Fixes
* Multiple split of iterator
* Test
* Test
* Some fixes
* signature change
* one more tweak
Signed-off-by: raver119 <raver119@gmail.com>
* one more test for sequential use of DataSetIteratorSplitter
Signed-off-by: raver119 <raver119@gmail.com>
* Fixes
* Fixes
* one more test for Alexander
Signed-off-by: raver119 <raver119@gmail.com>
* Some fixes
* Some fixes
* one more test for Alexander
Signed-off-by: raver119 <raver119@gmail.com>
* minor test fix
Signed-off-by: raver119 <raver119@gmail.com>
* Some fixes
* Some fixes
* couple of assertions tweaked
Signed-off-by: raver119 <raver119@gmail.com>
* MDS splitter test :/
Signed-off-by: raver119 <raver119@gmail.com>
* Minor refactoring
* Multi dataset
* Some fixes
* More tests
* Small number of test fixes/improvements (failures on CI) (#31)
Signed-off-by: AlexDBlack <blacka101@gmail.com>
* [WIP] More CUDA stuff (#26)
* initial commit
Signed-off-by: raver119 <raver119@gmail.com>
* LRN BP CUDA
Signed-off-by: raver119 <raver119@gmail.com>
* less memory
Signed-off-by: raver119 <raver119@gmail.com>
* Fixed bug with crop_and_resize op helper.
* get rid of unnecessary index-calculation dunction
Signed-off-by: Yurii <yurii@skymind.io>
* Fixed sort with nth_element cuda-based helper.
* Refactored nth_element.
* Refactored nth_element op and tests.
* Modified usage of dim array with sortTad routine.
* Refactored main routine of helper for non_max_image_suppression op.
* non_max_image_suppression op helper with cuda kernel implementation. Initial revision.
* fix vol2col cuda kernel
* meh
Signed-off-by: raver119 <raver119@gmail.com>
* topK concept
Signed-off-by: raver119 <raver119@gmail.com>
* unsorted topK with scanWitdh of 1
Signed-off-by: raver119 <raver119@gmail.com>
* correct vol2col tests
* sorted/unsorted topK
Signed-off-by: raver119 <raver119@gmail.com>
* implementation and fixing col2im/col2vol
* Corrected usage flags with input/output with reverse op.
* dup is const now
Signed-off-by: raver119 <raver119@gmail.com>
* percentile op
Signed-off-by: raver119 <raver119@gmail.com>
* group tests for mapool2d
Signed-off-by: Yurii <yurii@skymind.io>
* special test for george
Signed-off-by: raver119 <raver119@gmail.com>
* less threads for sortTad
Signed-off-by: raver119 <raver119@gmail.com>
* provide conv2d for cuda
Signed-off-by: Yurii <yurii@skymind.io>
* remove auther in sort tad kernel code
Signed-off-by: Yurii <yurii@skymind.io>
* provide depthwise_conv2d for cuda
Signed-off-by: Yurii <yurii@skymind.io>
* - max_pooling_with_argmax
- null check for special use
Signed-off-by: raver119 <raver119@gmail.com>
* dts cuda
Signed-off-by: raver119 <raver119@gmail.com>
* provide sconv2d for cuda
Signed-off-by: Yurii <yurii@skymind.io>
* std cuda
Signed-off-by: raver119 <raver119@gmail.com>
* Refactored non_max_suppression op to conform TF implementation.
* Improved suppression helper.
* provide pooling3d for cuda
Signed-off-by: Yurii <yurii@skymind.io>
* minor lstm rearrangements
Signed-off-by: raver119 <raver119@gmail.com>
* more of minor lstm rearrangements
Signed-off-by: raver119 <raver119@gmail.com>
* (bi)dynamic_rnn
Signed-off-by: raver119 <raver119@gmail.com>
* templates init order
Signed-off-by: raver119 <raver119@gmail.com>
* Refactored non_max_suppression op.
* Added cuda kernel for non_max_suppression.
* CPU sort by key/value
Signed-off-by: raver119 <raver119@gmail.com>
* CPU sort TAD by key/value
Signed-off-by: raver119 <raver119@gmail.com>
* CPU sort TAD by key/value tests
Signed-off-by: raver119 <raver119@gmail.com>
* Eliminate compiler error with cuda implementation.
* - repaired gradCheck in cuda
- provide conv2d_bp for cuda
Signed-off-by: Yurii <yurii@skymind.io>
* missed signature
Signed-off-by: raver119 <raver119@gmail.com>
* provide depthwise_conv2d_bp for cuda
Signed-off-by: Yurii <yurii@skymind.io>
* Implementation of lup helper with cuda kernel. Initial commit.
* further work on backprops for convolutions
Signed-off-by: Yurii <yurii@skymind.io>
* CUDA linear sort by key/val
Signed-off-by: raver119 <raver119@gmail.com>
* CUDA tad sort by key/val
Signed-off-by: raver119 <raver119@gmail.com>
* start providing of backprop for pooling2d/3d
Signed-off-by: Yurii <yurii@skymind.io>
* Added atomicAdd for bool datatype.
* dynamic partition concept
Signed-off-by: raver119 <raver119@gmail.com>
* dynamic partition concept
Signed-off-by: raver119 <raver119@gmail.com>
* dynamic partition scalar CUDA
Signed-off-by: raver119 <raver119@gmail.com>
* important comment
Signed-off-by: raver119 <raver119@gmail.com>
* fix pooling2d/3d backprop helpers
Signed-off-by: Yurii <yurii@skymind.io>
* Added non-linear test with dynamic_partition.
* Improved test for dynamic_partition.
* dynamic_partition TAD concept
Signed-off-by: raver119 <raver119@gmail.com>
* - dynamic_partition TAD CUDA impl
- dynamic_partition TAD CPU fix
Signed-off-by: raver119 <raver119@gmail.com>
* - rewrite cpu code for usampling2d/3d
- write cuda code for usampling2d/3d
Signed-off-by: Yurii <yurii@skymind.io>
* dynamic_stitch CUDA vector case
Signed-off-by: raver119 <raver119@gmail.com>
* dynamic_stitch CUDA TAD case concept
Signed-off-by: raver119 <raver119@gmail.com>
* dynamic_stitch CUDA TAD case impl
Signed-off-by: raver119 <raver119@gmail.com>
* Added tests for dynamic_stitch 3D-4D cases.
* minor tests tweaks
Signed-off-by: raver119 <raver119@gmail.com>
* Fixed type check for dynamic stitch.
* min/max bp
Signed-off-by: raver119 <raver119@gmail.com>
* rewrite code for upsampling2d/3d cpu
Signed-off-by: Yurii <yurii@skymind.io>
* reduce min/max/norm_max bp
Signed-off-by: raver119 <raver119@gmail.com>
* lup implementation. Additional enhancements.
* provide code for upsamling2d/3d backprop
Signed-off-by: Yurii <yurii@skymind.io>
* weightedCrossEntropyWithLogits
Signed-off-by: raver119 <raver119@gmail.com>
* Fixed template math atomicMul for 64bit ints.
* Refactored dynamic_partition_bp op.
* inverseBroadcast fix
Signed-off-by: raver119 <raver119@gmail.com>
* DynamicPartitionBP test datatype fixed.
* - nd4j_atomicMul Windows fix
- cpu/NDArrayLambda.hpp excluded from CUDA
Signed-off-by: raver119 <raver119@gmail.com>
2019-06-27 17:37:04 +02:00
|
|
|
invertedMatrix->t<T>(i, i - 1) = -inputMatrix->t<T>(i, i - 1);
|
2019-06-06 14:21:15 +02:00
|
|
|
|
|
|
|
//PRAGMA_OMP_PARALLEL_FOR_SIMD
|
|
|
|
for (int i = 2; i < n; i++) {
|
|
|
|
for (int j = i - 2; j > -1; --j)
|
|
|
|
for (int k = 0; k < i; k++)
|
|
|
|
invertedMatrix->t<T>(i, j) -= (invertedMatrix->t<T>(k, j) * inputMatrix->t<T>(i, k));
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
BUILD_SINGLE_TEMPLATE(template void invertLowerMatrix_, (NDArray* inputMatrix, NDArray* invertedMatrix);, FLOAT_TYPES);
|
|
|
|
|
|
|
|
void invertLowerMatrix(NDArray* inputMatrix, NDArray* invertedMatrix) {
|
|
|
|
BUILD_SINGLE_SELECTOR(inputMatrix->dataType(), invertLowerMatrix_, (inputMatrix, invertedMatrix), FLOAT_TYPES);
|
|
|
|
}
|
|
|
|
|
|
|
|
template <typename T>
|
|
|
|
static void _invertUpperMatrix(NDArray* inputMatrix, NDArray* invertedMatrix) {
|
|
|
|
int n = inputMatrix->rows();
|
|
|
|
invertedMatrix->setIdentity();
|
|
|
|
|
|
|
|
if (inputMatrix->isIdentityMatrix()) { // the inverse for I is I
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
PRAGMA_OMP_PARALLEL_FOR_IF(n > Environment::getInstance()->elementwiseThreshold())
|
|
|
|
for (int i = 0; i < n; i++)
|
2019-07-12 10:51:51 +02:00
|
|
|
invertedMatrix->t<T>(i, i) /= inputMatrix->t<T>(i, i);
|
2019-06-06 14:21:15 +02:00
|
|
|
|
|
|
|
PRAGMA_OMP_PARALLEL_FOR_IF(n > Environment::getInstance()->elementwiseThreshold())
|
|
|
|
for (int i = 0; i < n - 1; i++)
|
2019-07-12 10:51:51 +02:00
|
|
|
invertedMatrix->t<T>(i, i + 1) -= (inputMatrix->t<T>(i, i + 1) * invertedMatrix->t<T>(i + 1, i + 1) / inputMatrix->t<T>(i, i));
|
2019-06-06 14:21:15 +02:00
|
|
|
|
|
|
|
// PRAGMA_OMP_PARALLEL_FOR_SIMD
|
|
|
|
for (int i = n - 2; i > - 1; i--) {
|
|
|
|
for (int j = i + 2; j < n; j++)
|
|
|
|
for (int k = i; k < n; k++)
|
|
|
|
invertedMatrix->t<T>(i, j) -= ((invertedMatrix->t<T>(k, j) * inputMatrix->t<T>(i, k) / inputMatrix->t<T>(i, i)));
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
BUILD_SINGLE_TEMPLATE(template void _invertUpperMatrix, (NDArray* inputMatrix, NDArray* invertedMatrix);, FLOAT_TYPES);
|
|
|
|
|
|
|
|
void invertUpperMatrix(NDArray* inputMatrix, NDArray* invertedMatrix) {
|
|
|
|
BUILD_SINGLE_SELECTOR(inputMatrix->dataType(), _invertUpperMatrix, (inputMatrix, invertedMatrix), FLOAT_TYPES);
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
template <typename T>
|
|
|
|
static NDArray lup_(NDArray* input, NDArray* compound, NDArray* permutation) {
|
|
|
|
|
|
|
|
const int rowNum = input->rows();
|
|
|
|
const int columnNum = input->columns();
|
|
|
|
|
|
|
|
NDArray determinant = NDArrayFactory::create<T>(1.f);
|
|
|
|
NDArray compoundMatrix = *input; // copy
|
2019-08-23 18:20:50 +02:00
|
|
|
NDArray permutationMatrix(input, false, defaultContext); // has same shape as input and contiguous strides
|
2019-06-06 14:21:15 +02:00
|
|
|
permutationMatrix.setIdentity();
|
|
|
|
|
|
|
|
T pivotValue; // = T(0.0);
|
|
|
|
int pivot; // = -1;
|
|
|
|
int swapCount = 0;
|
|
|
|
|
|
|
|
for(int i = 0; i < rowNum; i++ ) {
|
|
|
|
pivotValue = T(0.0);
|
|
|
|
pivot = -1;
|
2019-07-12 10:51:51 +02:00
|
|
|
PRAGMA_OMP_PARALLEL_FOR //_ARGS(firstprivate(pivot,pivotValue))
|
2019-06-06 14:21:15 +02:00
|
|
|
for(int rowCounter = i; rowCounter < rowNum; rowCounter++ ) {
|
2019-07-12 10:51:51 +02:00
|
|
|
if (nd4j::math::nd4j_abs(compoundMatrix.t<T>(rowCounter, i)) > pivotValue) {
|
|
|
|
pivotValue = nd4j::math::nd4j_abs(compoundMatrix.t<T>(rowCounter, i));
|
2019-06-06 14:21:15 +02:00
|
|
|
pivot = rowCounter;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-07-12 10:51:51 +02:00
|
|
|
if( pivotValue > T(0.00001)) {
|
2019-06-06 14:21:15 +02:00
|
|
|
swapRows(&compoundMatrix, pivot, i);
|
|
|
|
swapRows(&permutationMatrix, pivot, i);
|
|
|
|
if (pivot != i)
|
|
|
|
swapCount++;
|
|
|
|
|
|
|
|
for( int j = i + 1; j < rowNum; j++ ) {
|
2019-07-12 10:51:51 +02:00
|
|
|
compoundMatrix.t<T>(j, i) /= compoundMatrix.t<T>(i, i);
|
|
|
|
PRAGMA_OMP_PARALLEL_FOR
|
2019-06-06 14:21:15 +02:00
|
|
|
for( int k = i + 1; k < rowNum; k++ ) {
|
2019-07-12 10:51:51 +02:00
|
|
|
compoundMatrix.t<T>(j, k) -= compoundMatrix.t<T>(j, i) * compoundMatrix.t<T>(i, k);
|
2019-06-06 14:21:15 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
for (int e = 0; e < rowNum; e++) {
|
|
|
|
// nd4j_printf("Compound matrix diag %i %f.\n", e, (*compoundMatrix)(e, e));
|
|
|
|
determinant *= compoundMatrix.e<T>(e, e);
|
|
|
|
}
|
|
|
|
if (swapCount % 2) determinant = -determinant;
|
|
|
|
if (compound != nullptr)
|
|
|
|
compound->assign(compoundMatrix);
|
|
|
|
if (permutation != nullptr)
|
|
|
|
permutation->assign(permutationMatrix);
|
|
|
|
return determinant;
|
|
|
|
}
|
|
|
|
|
|
|
|
BUILD_SINGLE_TEMPLATE(template NDArray lup_, (NDArray* input, NDArray* output, NDArray* permutation), FLOAT_TYPES);
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
template <typename T>
|
|
|
|
static int determinant_(NDArray* input, NDArray* output) {
|
|
|
|
|
|
|
|
Nd4jLong n = input->sizeAt(-1);
|
|
|
|
Nd4jLong n2 = n * n;
|
|
|
|
|
2019-08-23 18:20:50 +02:00
|
|
|
auto matrix = NDArrayFactory::create(input->ordering(), {n, n}, input->dataType(), defaultContext); //, block.getWorkspace());
|
2019-06-06 14:21:15 +02:00
|
|
|
|
|
|
|
for (int e = 0; e < output->lengthOf(); e++) {
|
|
|
|
for (int k = e * n2, row = 0; k < (e + 1) * n2; ++k, ++row)
|
|
|
|
matrix.p(row, input->e<T>(k));
|
|
|
|
output->p(e, lup_<T>(&matrix, (NDArray*)nullptr, (NDArray*)nullptr));
|
|
|
|
}
|
|
|
|
|
|
|
|
return Status::OK();
|
|
|
|
}
|
|
|
|
|
|
|
|
BUILD_SINGLE_TEMPLATE(template int determinant_, (NDArray* input, NDArray* output), FLOAT_TYPES);
|
|
|
|
|
|
|
|
int determinant(nd4j::LaunchContext * context, NDArray* input, NDArray* output) {
|
2019-08-23 18:20:50 +02:00
|
|
|
defaultContext = context;
|
2019-06-06 14:21:15 +02:00
|
|
|
BUILD_SINGLE_SELECTOR(input->dataType(), return determinant_, (input, output), FLOAT_TYPES);
|
|
|
|
}
|
|
|
|
|
|
|
|
template <typename T>
|
2019-07-12 10:51:51 +02:00
|
|
|
int logAbsDeterminant_(NDArray* input, NDArray* output) {
|
2019-06-06 14:21:15 +02:00
|
|
|
|
|
|
|
Nd4jLong n = input->sizeAt(-1);
|
|
|
|
Nd4jLong n2 = n * n;
|
|
|
|
|
2019-08-23 18:20:50 +02:00
|
|
|
NDArray matrix = NDArrayFactory::create(input->ordering(), {n, n}, input->dataType(), defaultContext); //, block.getWorkspace());
|
2019-06-06 14:21:15 +02:00
|
|
|
for (int e = 0; e < output->lengthOf(); e++) {
|
|
|
|
for (int k = e * n2, row = 0; k < (e + 1) * n2; ++k, ++row) {
|
|
|
|
matrix.p(row, input->e<T>(k));
|
|
|
|
}
|
|
|
|
NDArray det = lup_<T>(&matrix, (NDArray*)nullptr, (NDArray*)nullptr);
|
|
|
|
if (det.e<T>(0) != 0.f)
|
|
|
|
output->p(e, nd4j::math::nd4j_log<T,T>(nd4j::math::nd4j_abs(det.t<T>(0))));
|
|
|
|
}
|
|
|
|
|
|
|
|
return ND4J_STATUS_OK;
|
|
|
|
}
|
|
|
|
|
2019-07-12 10:51:51 +02:00
|
|
|
BUILD_SINGLE_TEMPLATE(template int logAbsDeterminant_, (NDArray* input, NDArray* output), FLOAT_TYPES);
|
2019-06-06 14:21:15 +02:00
|
|
|
|
2019-07-12 10:51:51 +02:00
|
|
|
int logAbsDeterminant(nd4j::LaunchContext * context, NDArray* input, NDArray* output) {
|
|
|
|
BUILD_SINGLE_SELECTOR(input->dataType(), return logAbsDeterminant_, (input, output), FLOAT_TYPES);
|
2019-06-06 14:21:15 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
template <typename T>
|
2019-07-12 10:51:51 +02:00
|
|
|
static int inverse_(NDArray* input, NDArray* output) {
|
2019-06-06 14:21:15 +02:00
|
|
|
|
|
|
|
auto n = input->sizeAt(-1);
|
|
|
|
auto n2 = n * n;
|
|
|
|
auto totalCount = output->lengthOf() / n2;
|
|
|
|
|
|
|
|
output->assign(0.f); // fill up output tensor with zeros
|
2019-08-23 18:20:50 +02:00
|
|
|
auto matrix = NDArrayFactory::create('c', {n, n}, DataTypeUtils::fromT<T>(), defaultContext); //, block.getWorkspace());
|
|
|
|
auto compound = NDArrayFactory::create('c', {n, n}, DataTypeUtils::fromT<T>(), defaultContext); //, block.getWorkspace());
|
|
|
|
auto permutation = NDArrayFactory::create('c', {n, n}, DataTypeUtils::fromT<T>(), defaultContext);
|
|
|
|
auto lowerMatrix = NDArrayFactory::create('c', {n, n}, DataTypeUtils::fromT<T>(), defaultContext);
|
|
|
|
auto upperMatrix = NDArrayFactory::create('c', {n, n}, DataTypeUtils::fromT<T>(), defaultContext);
|
2019-06-06 14:21:15 +02:00
|
|
|
|
|
|
|
for (int e = 0; e < totalCount; e++) {
|
|
|
|
if (e)
|
|
|
|
matrix.assign(0.f);
|
|
|
|
|
|
|
|
for (int k = e * n2, row = 0; k < (e + 1) * n2; k++) {
|
|
|
|
matrix.p(row++, input->e<T>(k));
|
|
|
|
}
|
|
|
|
T det = lup_<T>(&matrix, &compound, &permutation).template e<T>(0);
|
|
|
|
|
|
|
|
// FIXME: and how this is going to work on float16?
|
2019-07-12 10:51:51 +02:00
|
|
|
if (nd4j::math::nd4j_abs<T>(det) < T(0.000001)) {
|
2019-06-06 14:21:15 +02:00
|
|
|
nd4j_printf("matrix_inverse: The matrix %i has no inverse due determinant is %lf. Quiting...\n", e, det);
|
|
|
|
matrix.printIndexedBuffer("Wrong matrix");
|
|
|
|
return ND4J_STATUS_VALIDATION;
|
|
|
|
}
|
|
|
|
lowerMatrix.setIdentity(); // set up U to identity matrix
|
|
|
|
for (int k = 1; k < n; k++) { // and then put all values under main diagonal on to it
|
|
|
|
for (int j = 0; j < k; j++)
|
2019-07-12 10:51:51 +02:00
|
|
|
lowerMatrix.template t<T>(k, j) = compound.template t<T>(k, j);
|
2019-06-06 14:21:15 +02:00
|
|
|
}
|
|
|
|
upperMatrix.setIdentity(); // set up U to identity matrix
|
|
|
|
for (int k = 0; k < n; k++) { // and then put all values under main diagonal on to it
|
|
|
|
for (int j = k; j < n; j++)
|
2019-07-12 10:51:51 +02:00
|
|
|
upperMatrix.template t<T>(k, j) = compound.template e<T>(k, j);
|
2019-06-06 14:21:15 +02:00
|
|
|
}
|
|
|
|
invertUpperMatrix(&upperMatrix, &matrix);
|
|
|
|
|
|
|
|
invertLowerMatrix(&lowerMatrix, &upperMatrix);
|
|
|
|
|
|
|
|
nd4j::MmulHelper::mmul(&matrix, &upperMatrix, &compound, 1.0, 0.0);
|
|
|
|
nd4j::MmulHelper::mmul(&compound, &permutation, &matrix, 1.0, 0.0);
|
|
|
|
for (int k = e * n2, row = 0; k < (e + 1) * n2; k++) {
|
2019-07-12 10:51:51 +02:00
|
|
|
output->t<T>(k) = matrix.template t<T>(row++);
|
2019-06-06 14:21:15 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return Status::OK();
|
|
|
|
}
|
|
|
|
|
|
|
|
int inverse(nd4j::LaunchContext * context, NDArray* input, NDArray* output) {
|
2019-08-23 18:20:50 +02:00
|
|
|
defaultContext = context;
|
2019-07-12 10:51:51 +02:00
|
|
|
BUILD_SINGLE_SELECTOR(input->dataType(), return inverse_, (input, output), FLOAT_TYPES);
|
2019-06-06 14:21:15 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
template <typename T>
|
|
|
|
static bool checkCholeskyInput_(nd4j::LaunchContext * context, NDArray const* input) {
|
|
|
|
//std::unique_ptr<NDArray> matrix(NDArrayFactory::create_('c', {n, n}, input->dataType())); //, block.getWorkspace());
|
|
|
|
std::unique_ptr<ResultSet> lastMatrixList(input->allTensorsAlongDimension({input->rankOf() - 2, input->rankOf()-1}));
|
|
|
|
for (size_t i = 0; i < lastMatrixList->size(); i++) {
|
|
|
|
auto thisMatrix = lastMatrixList->at(i);
|
|
|
|
// check for symmetric
|
|
|
|
for (Nd4jLong r = 0; r < thisMatrix->rows(); r++)
|
|
|
|
for (Nd4jLong c = 0; c < thisMatrix->columns(); c++)
|
|
|
|
if (nd4j::math::nd4j_abs(thisMatrix->e<T>(r, c) - lastMatrixList->at(i)->e<T>(c,r)) > T(1.e-6f)) return false;
|
|
|
|
|
|
|
|
NDArray output = NDArrayFactory::create<T>(0., context);
|
|
|
|
if (ND4J_STATUS_OK != determinant(context, thisMatrix, &output)) return false;
|
|
|
|
if (output.e<T>(0) <= T(0)) return 0;
|
|
|
|
NDArray reversedMatrix(*thisMatrix);
|
|
|
|
if (ND4J_STATUS_OK != inverse(context, thisMatrix, &reversedMatrix)) return false;
|
|
|
|
if (ND4J_STATUS_OK != determinant(context, &reversedMatrix, &output)) return false;
|
|
|
|
if (output.e<T>(0) <= T(0)) return 0;
|
|
|
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
BUILD_SINGLE_TEMPLATE(template bool checkCholeskyInput_, (nd4j::LaunchContext * context, NDArray const* input), FLOAT_TYPES);
|
|
|
|
|
|
|
|
bool checkCholeskyInput(nd4j::LaunchContext * context, NDArray const* input) {
|
|
|
|
BUILD_SINGLE_SELECTOR(input->dataType(), return checkCholeskyInput_, (context, input), FLOAT_TYPES);
|
|
|
|
}
|
|
|
|
|
|
|
|
template <typename T>
|
|
|
|
int cholesky_(NDArray* input, NDArray* output, bool inplace) {
|
|
|
|
|
|
|
|
auto n = input->sizeAt(-1);
|
|
|
|
auto n2 = n * n;
|
|
|
|
auto totalCount = output->lengthOf() / n2;
|
|
|
|
if (!inplace)
|
|
|
|
output->assign(0.f); // fill up output tensor with zeros only inplace=false
|
|
|
|
|
2019-08-23 18:20:50 +02:00
|
|
|
std::unique_ptr<NDArray> matrix(NDArrayFactory::create_('c', {n, n}, input->dataType(), defaultContext)); //, block.getWorkspace());
|
|
|
|
std::unique_ptr<NDArray> lowerMatrix(NDArrayFactory::create_('c',{n, n}, input->dataType(), defaultContext));
|
2019-06-06 14:21:15 +02:00
|
|
|
|
|
|
|
for (int e = 0; e < totalCount; e++) {
|
|
|
|
|
|
|
|
// fill up matrix
|
|
|
|
for (int k = e * n2, l = 0; k < (e + 1) * n2; k++) {
|
|
|
|
matrix->p(l++, input->e<T>(k));
|
|
|
|
}
|
|
|
|
//if (e) // from the second loop need to zero matrix
|
|
|
|
lowerMatrix->assign(0.f);
|
|
|
|
|
|
|
|
for (Nd4jLong col = 0; col < n; col++) {
|
|
|
|
for (Nd4jLong row = 0; row < col; row++) {
|
|
|
|
T rowSum = 0;
|
|
|
|
for (Nd4jLong k = 0; k < row; ++k)
|
|
|
|
rowSum += (lowerMatrix->e<T>(col, k) * lowerMatrix->e<T>(row, k));
|
|
|
|
lowerMatrix->p(col, row, (matrix->e<T>(row, col) - rowSum) / lowerMatrix->e<double>(row, row));
|
|
|
|
}
|
|
|
|
T diagonalSum = 0;
|
|
|
|
for (Nd4jLong k = 0; k < col; ++k)
|
|
|
|
diagonalSum += lowerMatrix->e<T>(col, k) * lowerMatrix->e<T>(col, k);
|
|
|
|
lowerMatrix->p(col, col, nd4j::math::nd4j_sqrt<T, T>(matrix->e<T>(col, col) - diagonalSum));
|
|
|
|
//nd4j_printf("%i: ", col);
|
|
|
|
//lowerMatrix->printIndexedBuffer("Lower matrix");
|
|
|
|
}
|
|
|
|
for (int k = e * n2, l = 0; k < (e + 1) * n2; k++) {
|
|
|
|
output->p(k, lowerMatrix->e<T>(l++));
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return ND4J_STATUS_OK;
|
|
|
|
}
|
|
|
|
|
|
|
|
int cholesky(nd4j::LaunchContext * context, NDArray* input, NDArray* output, bool inplace) {
|
2019-08-23 18:20:50 +02:00
|
|
|
defaultContext = context;
|
2019-06-06 14:21:15 +02:00
|
|
|
BUILD_SINGLE_SELECTOR(input->dataType(), return cholesky_, (input, output, inplace), FLOAT_TYPES);
|
|
|
|
}
|
|
|
|
|
|
|
|
template <typename T>
|
|
|
|
int logdetFunctor_(NDArray* input, NDArray* output) {
|
|
|
|
std::unique_ptr<NDArray> tempOutput(input->dup());
|
|
|
|
int res = cholesky_<T>(input, tempOutput.get(), false);
|
|
|
|
if (res != ND4J_STATUS_OK)
|
|
|
|
return res;
|
|
|
|
auto n = input->sizeAt(-1);
|
|
|
|
auto totalCount = output->lengthOf();
|
|
|
|
std::vector<T> d(n);
|
|
|
|
std::unique_ptr<ResultSet> matricies(tempOutput->allTensorsAlongDimension({input->rankOf()-2, input->rankOf() - 1}));
|
|
|
|
std::unique_ptr<ResultSet> inputMatricies(input->allTensorsAlongDimension({input->rankOf()-2, input->rankOf() - 1}));
|
|
|
|
for (Nd4jLong e = 0; e < totalCount; e++) {
|
|
|
|
|
|
|
|
//d[0] = inputMatricies->at(e)->t<T>(0, 0);
|
|
|
|
for (size_t i = 0; i < n; ++i) {
|
|
|
|
output->t<T>(e) += nd4j::math::nd4j_log<T,T>(nd4j::math::nd4j_pow<T,T,T>(matricies->at(e)->t<T>(i, i), T(2)));
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return ND4J_STATUS_OK;
|
|
|
|
}
|
|
|
|
|
|
|
|
int logdetFunctor(nd4j::LaunchContext * context, NDArray* input, NDArray* output) {
|
|
|
|
BUILD_SINGLE_SELECTOR(input->dataType(), return logdetFunctor_, (input, output), FLOAT_TYPES);
|
|
|
|
}
|
|
|
|
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|