cavis/libnd4j/tests_cpu/layers_tests/TadTests.cpp

444 lines
15 KiB
C++
Raw Normal View History

2019-06-06 14:21:15 +02:00
/*******************************************************************************
* Copyright (c) 2015-2018 Skymind, Inc.
*
* This program and the accompanying materials are made available under the
* terms of the Apache License, Version 2.0 which is available at
* https://www.apache.org/licenses/LICENSE-2.0.
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
*
* SPDX-License-Identifier: Apache-2.0
******************************************************************************/
//
// @author raver119@gmail.com
//
#ifndef LIBND4J_TADTESTS_H
#define LIBND4J_TADTESTS_H
#include "testlayers.h"
#include <array/NDArray.h>
2019-06-06 14:21:15 +02:00
#include <helpers/TAD.h>
#include <array>
#include <helpers/ConstantTadHelper.h>
using namespace sd;
2019-06-06 14:21:15 +02:00
class TadTests : public testing::Test {
public:
int numLoops = 100000000;
int extLoops = 1000;
int intLoops = 1000;
};
TEST_F(TadTests, Test4DTad1) {
NDArray* arraySource = sd::NDArrayFactory::linspace(1.0f, 10000.0f, 10000);
2019-06-06 14:21:15 +02:00
Nd4jLong badShape[] = {4, 2, 1, 4, 4, 80, 16, 4, 1, 8192, -1, 99};
Nd4jLong goodShape[] = {4, 2, 1, 4, 4, 16, 16, 4, 1, 8192, 1, 99};
std::vector<float> buff = arraySource->getBufferAsVector<float>();
NDArray* arrayExp = new NDArray(buff.data(), goodShape);
NDArray* arrayBad = new NDArray(buff.data(), badShape);
int dim = 1;
shape::TAD tad;
tad.init(arrayBad->getShapeInfo(), &dim, 1);
tad.createTadOnlyShapeInfo();
tad.createOffsets();
int exp[] = { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95 };
for (int e = 0; e < 32; e++)
ASSERT_EQ((int) tad.tadOffsets[e], exp[e]);
delete arrayExp;
delete arrayBad;
delete arraySource;
}
TEST_F(TadTests, TestNumTads1) {
auto x = NDArrayFactory::create<float>('c', {2, 3});
auto y = NDArrayFactory::create<float>('c', {2, 2});
std::vector<int> dim({0});
Nd4jLong tadLengthX = shape::tadLength(x.getShapeInfo(), dim.data(), dim.size());
Nd4jLong numTadsX = x.lengthOf() / tadLengthX;
Nd4jLong tadLengthY = shape::tadLength(y.getShapeInfo(), dim.data(), dim.size());
Nd4jLong numTadsY = y.lengthOf() / tadLengthY;
ASSERT_EQ(2, tadLengthX);
ASSERT_EQ(3, numTadsX);
ASSERT_EQ(2, tadLengthY);
ASSERT_EQ(2, numTadsY);
}
TEST_F(TadTests, TestShapeTad_1) {
float buff[] = {1,2,3,4,5,6,7,8,9,10,11,12,13,14,16,16,17,18,19,20,21,22,23,24};
Nd4jLong shapeInfo[] = {3, 2, 3, 4, 12, 4, 1, 8192, 1, 99};
NDArray input(buff, shapeInfo);
std::vector<int> dimensions = {0,1,2};
Nd4jLong tadLength = shape::tadLength(input.getShapeInfo(), dimensions.data(), dimensions.size());
Nd4jLong numTads = input.lengthOf() / tadLength;
shape::TAD tad;
tad.init(input.getShapeInfo(), dimensions.data(), dimensions.size());
tad.createTadOnlyShapeInfo();
tad.createOffsets();
auto tadShapeInfo = new Nd4jLong[shape::shapeInfoLength(tad.tadOnlyShapeInfo[0])];
std::memcpy(tadShapeInfo, tad.tadOnlyShapeInfo, shape::shapeInfoByteLength(tad.tadOnlyShapeInfo));
float* tadBuff = reinterpret_cast<float*>(input.getBuffer()) + tad.tadOffsets[0];
NDArray tadArr(tadBuff, tadShapeInfo);
ASSERT_TRUE(numTads==1);
Shyrma temp (#131) * - specifying template instantiation for certain types in float16 and bloat16 Signed-off-by: Yurii <iuriish@yahoo.com> * - polishing bfloat16 and float16 member functions template specialization Signed-off-by: Yurii <iuriish@yahoo.com> * - rewrite and overload array +-*/ scalar and scalar +-*/ arr in NDAray class Signed-off-by: Yurii <iuriish@yahoo.com> * - make corrections which have to do with and rvalue lvalue conversions Signed-off-by: Yurii <iuriish@yahoo.com> * - provide move semantic in NDArray operators array +-/* array Signed-off-by: Yurii <iuriish@yahoo.com> * float16/bfloat16 tweaks Signed-off-by: raver119 <raver119@gmail.com> * one more tweak Signed-off-by: raver119 <raver119@gmail.com> * - make float16 and bfloat16 to compile successfully on cuda Signed-off-by: Yurii <iuriish@yahoo.com> * - do not use resources of view-like arrays when move semantics is applied Signed-off-by: Yurii <iuriish@yahoo.com> * - get rid of pointers in signatures NDArray methods 1 Signed-off-by: Yurii <iuriish@yahoo.com> * - correction of signature of NDArray::dup method Signed-off-by: Yurii <iuriish@yahoo.com> * - correction of signature of NDArray::reduceAlongDimension method Signed-off-by: Yurii <iuriish@yahoo.com> * - signature correction of NDArray::applyIndexReduce and applyTrueBroadcast methods Signed-off-by: Yurii <iuriish@yahoo.com> * - signature correction of NDArray::applyReduce3 and varianceAlongDimension methods Signed-off-by: Yurii <iuriish@yahoo.com> * - signature correction of NDArray::tensorsAlongDimension and diagonal methods Signed-off-by: Yurii <iuriish@yahoo.com> * - signature correction of NDArray::allTensorsAlongDimension Signed-off-by: Yurii <iuriish@yahoo.com> * - signature correction of NDArray::reduceAlongDimension 2 Signed-off-by: Yurii <iuriish@yahoo.com> * - signature correction of NDArray::applyTransform 2 Signed-off-by: Yurii <iuriish@yahoo.com> * - signature correction of NDArray::applyPairwiseTransform 2 Signed-off-by: Yurii <iuriish@yahoo.com> * - signature correction of NDArray::applyBroadcast 2 Signed-off-by: Yurii <iuriish@yahoo.com> * - signature correction of NDArray::applyTrueBroadcast 2 Signed-off-by: Yurii <iuriish@yahoo.com> * - signature correction of NDArray::applyScalar and applyScalarArr Signed-off-by: Yurii <iuriish@yahoo.com> * - signature correction of NDArray::lambda methods Signed-off-by: Yurii <iuriish@yahoo.com> * - signature correction of NDArray::reduce3 methods 2 Signed-off-by: Yurii <iuriish@yahoo.com> * - signature correction of following NDArray methods: add/sub/mul/div row/column and fillAsTriangular Signed-off-by: Yurii <iuriish@yahoo.com> * - signature correction of NDArray::tileToShape methods Signed-off-by: Yurii <iuriish@yahoo.com> * - signature correction of NDArray::isShapeSameStrict method Signed-off-by: Yurii <iuriish@yahoo.com> * minor corrections in tests Signed-off-by: Yurii <iuriish@yahoo.com> * - replace reduce op in batchnorm mkldnn Signed-off-by: Yurii <iuriish@yahoo.com> * - add explicit templates instantiations for operator+(NDArray&&. const scalar) Signed-off-by: Yurii <iuriish@yahoo.com> * - corrections of casts in float16/bfloat16 Signed-off-by: Yurii <iuriish@yahoo.com> * - provide move semantics in following NDArray methods: transform, applyTrueBroadcast, transpose, reshape, permute Signed-off-by: Yurii <iuriish@yahoo.com> * - get rid of input array A duplicate in svd cuda op Signed-off-by: Yurii <iuriish@yahoo.com> * - avoid available bug in svd cuda API Signed-off-by: Yurii <iuriish@yahoo.com> * - add temporary global memory buffer in svd cuda when calcUV = false and m != n Signed-off-by: Yurii <iuriish@yahoo.com> * - remove test with blfoat16 type for betainC Signed-off-by: Yurii <iuriish@yahoo.com> * - resolve conflicts after master has been merged in Signed-off-by: Yurii <iuriish@yahoo.com> * - changed type of affected input array in fused_batch_norm Signed-off-by: Yurii <iuriish@yahoo.com> * - add several explicit type castings Signed-off-by: Yurii <iuriish@yahoo.com> * - add ND4J_EXPORT to operators Signed-off-by: Yurii <iuriish@yahoo.com> * - add explicit template types in instantiations of template arithm operators of NDArray class Signed-off-by: Yurii <iuriish@yahoo.com> * - one more test fix Signed-off-by: Yurii <iuriish@yahoo.com> Co-authored-by: raver119 <raver119@gmail.com>
2019-12-20 20:35:39 +01:00
ASSERT_TRUE(input.isSameShapeStrict(tadArr));
2019-06-06 14:21:15 +02:00
ASSERT_TRUE(input.equalsTo(&tadArr));
delete[] tadShapeInfo;
}
TEST_F(TadTests, TadNoAxis_1) {
auto array = NDArrayFactory::create<float>('c', {2, 3});
shape::TAD tad;
tad.init(array.shapeInfo(), nullptr, 0);
tad.createTadOnlyShapeInfo();
tad.createOffsets();
ASSERT_TRUE(tad.wholeThing);
ASSERT_TRUE(shape::equalsStrict(tad.tadOnlyShapeInfo, array.shapeInfo()));
}
TEST_F(TadTests, TadEdgeCase_1) {
auto array = NDArrayFactory::create<float>('c', {5, 4, 1});
auto exp = NDArrayFactory::create<float>('c', {5, 4});
array.linspace(1);
profiling of stack and unstack ops (#261) * - profiling of stack and unstack ops Signed-off-by: Yurii <iuriish@yahoo.com> * - fix bug in cpu concat op Signed-off-by: Yurii <iuriish@yahoo.com> * - correction of cuda stack and unstack Signed-off-by: Yurii <iuriish@yahoo.com> * - change shape.h method which operates with unity dimensions strides Signed-off-by: Yurii <iuriish@yahoo.com> * - rearrange stack tests Signed-off-by: Yurii <iuriish@yahoo.com> * - correct evaluation of smallest stride for moving through contiguous axis Signed-off-by: Yurii <iuriish@yahoo.com> * - forgot to update signature of function strideOverContigAxis in cuda concat and split ops Signed-off-by: Yurii <iuriish@yahoo.com> * - remove ShapeUtils::shapeAsString method applied before input arrays validations Signed-off-by: Yurii <iuriish@yahoo.com> * - further removing of ShapeUtils::shapeAsString Signed-off-by: Yurii <iuriish@yahoo.com> * - take sub-array shapeIndo/offset calculation out of NDArray class - add possibility of contiguous memory copy in execTransformAny op if opNum == assign Signed-off-by: Yurii <iuriish@yahoo.com> * - correct test_empty_scatter_2 in EmptyTests.cpp Signed-off-by: Yurii <iuriish@yahoo.com> * - profiling of slice op Signed-off-by: Yurii <iuriish@yahoo.com> * - get rid of contiguous memcpy for some cases in concat and split ops Signed-off-by: Yurii <iuriish@yahoo.com> * - forgot to declare oid nd4j::SpecialMethods<T>::splitCpuGeneric Signed-off-by: Yurii <iuriish@yahoo.com> * - correct typo in calculation of threads in cuda split op Signed-off-by: Yurii <iuriish@yahoo.com> * - forgot to correct another set of threads variables in split cuda ops Signed-off-by: Yurii <iuriish@yahoo.com> * - further conflicts resolving Signed-off-by: Yurii <iuriish@yahoo.com> Co-authored-by: raver119 <raver119@gmail.com>
2020-03-03 05:32:37 +01:00
auto tad = array(0, {2});
2019-06-06 14:21:15 +02:00
ASSERT_TRUE(exp.isSameShape(tad));
}
TEST_F(TadTests, TestEdgeCase_2) {
Shyrma temp (#131) * - specifying template instantiation for certain types in float16 and bloat16 Signed-off-by: Yurii <iuriish@yahoo.com> * - polishing bfloat16 and float16 member functions template specialization Signed-off-by: Yurii <iuriish@yahoo.com> * - rewrite and overload array +-*/ scalar and scalar +-*/ arr in NDAray class Signed-off-by: Yurii <iuriish@yahoo.com> * - make corrections which have to do with and rvalue lvalue conversions Signed-off-by: Yurii <iuriish@yahoo.com> * - provide move semantic in NDArray operators array +-/* array Signed-off-by: Yurii <iuriish@yahoo.com> * float16/bfloat16 tweaks Signed-off-by: raver119 <raver119@gmail.com> * one more tweak Signed-off-by: raver119 <raver119@gmail.com> * - make float16 and bfloat16 to compile successfully on cuda Signed-off-by: Yurii <iuriish@yahoo.com> * - do not use resources of view-like arrays when move semantics is applied Signed-off-by: Yurii <iuriish@yahoo.com> * - get rid of pointers in signatures NDArray methods 1 Signed-off-by: Yurii <iuriish@yahoo.com> * - correction of signature of NDArray::dup method Signed-off-by: Yurii <iuriish@yahoo.com> * - correction of signature of NDArray::reduceAlongDimension method Signed-off-by: Yurii <iuriish@yahoo.com> * - signature correction of NDArray::applyIndexReduce and applyTrueBroadcast methods Signed-off-by: Yurii <iuriish@yahoo.com> * - signature correction of NDArray::applyReduce3 and varianceAlongDimension methods Signed-off-by: Yurii <iuriish@yahoo.com> * - signature correction of NDArray::tensorsAlongDimension and diagonal methods Signed-off-by: Yurii <iuriish@yahoo.com> * - signature correction of NDArray::allTensorsAlongDimension Signed-off-by: Yurii <iuriish@yahoo.com> * - signature correction of NDArray::reduceAlongDimension 2 Signed-off-by: Yurii <iuriish@yahoo.com> * - signature correction of NDArray::applyTransform 2 Signed-off-by: Yurii <iuriish@yahoo.com> * - signature correction of NDArray::applyPairwiseTransform 2 Signed-off-by: Yurii <iuriish@yahoo.com> * - signature correction of NDArray::applyBroadcast 2 Signed-off-by: Yurii <iuriish@yahoo.com> * - signature correction of NDArray::applyTrueBroadcast 2 Signed-off-by: Yurii <iuriish@yahoo.com> * - signature correction of NDArray::applyScalar and applyScalarArr Signed-off-by: Yurii <iuriish@yahoo.com> * - signature correction of NDArray::lambda methods Signed-off-by: Yurii <iuriish@yahoo.com> * - signature correction of NDArray::reduce3 methods 2 Signed-off-by: Yurii <iuriish@yahoo.com> * - signature correction of following NDArray methods: add/sub/mul/div row/column and fillAsTriangular Signed-off-by: Yurii <iuriish@yahoo.com> * - signature correction of NDArray::tileToShape methods Signed-off-by: Yurii <iuriish@yahoo.com> * - signature correction of NDArray::isShapeSameStrict method Signed-off-by: Yurii <iuriish@yahoo.com> * minor corrections in tests Signed-off-by: Yurii <iuriish@yahoo.com> * - replace reduce op in batchnorm mkldnn Signed-off-by: Yurii <iuriish@yahoo.com> * - add explicit templates instantiations for operator+(NDArray&&. const scalar) Signed-off-by: Yurii <iuriish@yahoo.com> * - corrections of casts in float16/bfloat16 Signed-off-by: Yurii <iuriish@yahoo.com> * - provide move semantics in following NDArray methods: transform, applyTrueBroadcast, transpose, reshape, permute Signed-off-by: Yurii <iuriish@yahoo.com> * - get rid of input array A duplicate in svd cuda op Signed-off-by: Yurii <iuriish@yahoo.com> * - avoid available bug in svd cuda API Signed-off-by: Yurii <iuriish@yahoo.com> * - add temporary global memory buffer in svd cuda when calcUV = false and m != n Signed-off-by: Yurii <iuriish@yahoo.com> * - remove test with blfoat16 type for betainC Signed-off-by: Yurii <iuriish@yahoo.com> * - resolve conflicts after master has been merged in Signed-off-by: Yurii <iuriish@yahoo.com> * - changed type of affected input array in fused_batch_norm Signed-off-by: Yurii <iuriish@yahoo.com> * - add several explicit type castings Signed-off-by: Yurii <iuriish@yahoo.com> * - add ND4J_EXPORT to operators Signed-off-by: Yurii <iuriish@yahoo.com> * - add explicit template types in instantiations of template arithm operators of NDArray class Signed-off-by: Yurii <iuriish@yahoo.com> * - one more test fix Signed-off-by: Yurii <iuriish@yahoo.com> Co-authored-by: raver119 <raver119@gmail.com>
2019-12-20 20:35:39 +01:00
auto array = NDArrayFactory::create<float>('f', {2, 3, 1}, {1, 4, 2, 5, 3, 6});
2019-06-06 14:21:15 +02:00
for (int e = 0 ; e < array.lengthOf(); e++) {
profiling of stack and unstack ops (#261) * - profiling of stack and unstack ops Signed-off-by: Yurii <iuriish@yahoo.com> * - fix bug in cpu concat op Signed-off-by: Yurii <iuriish@yahoo.com> * - correction of cuda stack and unstack Signed-off-by: Yurii <iuriish@yahoo.com> * - change shape.h method which operates with unity dimensions strides Signed-off-by: Yurii <iuriish@yahoo.com> * - rearrange stack tests Signed-off-by: Yurii <iuriish@yahoo.com> * - correct evaluation of smallest stride for moving through contiguous axis Signed-off-by: Yurii <iuriish@yahoo.com> * - forgot to update signature of function strideOverContigAxis in cuda concat and split ops Signed-off-by: Yurii <iuriish@yahoo.com> * - remove ShapeUtils::shapeAsString method applied before input arrays validations Signed-off-by: Yurii <iuriish@yahoo.com> * - further removing of ShapeUtils::shapeAsString Signed-off-by: Yurii <iuriish@yahoo.com> * - take sub-array shapeIndo/offset calculation out of NDArray class - add possibility of contiguous memory copy in execTransformAny op if opNum == assign Signed-off-by: Yurii <iuriish@yahoo.com> * - correct test_empty_scatter_2 in EmptyTests.cpp Signed-off-by: Yurii <iuriish@yahoo.com> * - profiling of slice op Signed-off-by: Yurii <iuriish@yahoo.com> * - get rid of contiguous memcpy for some cases in concat and split ops Signed-off-by: Yurii <iuriish@yahoo.com> * - forgot to declare oid nd4j::SpecialMethods<T>::splitCpuGeneric Signed-off-by: Yurii <iuriish@yahoo.com> * - correct typo in calculation of threads in cuda split op Signed-off-by: Yurii <iuriish@yahoo.com> * - forgot to correct another set of threads variables in split cuda ops Signed-off-by: Yurii <iuriish@yahoo.com> * - further conflicts resolving Signed-off-by: Yurii <iuriish@yahoo.com> Co-authored-by: raver119 <raver119@gmail.com>
2020-03-03 05:32:37 +01:00
auto tad = array(e, {0,1});
Shyrma temp (#131) * - specifying template instantiation for certain types in float16 and bloat16 Signed-off-by: Yurii <iuriish@yahoo.com> * - polishing bfloat16 and float16 member functions template specialization Signed-off-by: Yurii <iuriish@yahoo.com> * - rewrite and overload array +-*/ scalar and scalar +-*/ arr in NDAray class Signed-off-by: Yurii <iuriish@yahoo.com> * - make corrections which have to do with and rvalue lvalue conversions Signed-off-by: Yurii <iuriish@yahoo.com> * - provide move semantic in NDArray operators array +-/* array Signed-off-by: Yurii <iuriish@yahoo.com> * float16/bfloat16 tweaks Signed-off-by: raver119 <raver119@gmail.com> * one more tweak Signed-off-by: raver119 <raver119@gmail.com> * - make float16 and bfloat16 to compile successfully on cuda Signed-off-by: Yurii <iuriish@yahoo.com> * - do not use resources of view-like arrays when move semantics is applied Signed-off-by: Yurii <iuriish@yahoo.com> * - get rid of pointers in signatures NDArray methods 1 Signed-off-by: Yurii <iuriish@yahoo.com> * - correction of signature of NDArray::dup method Signed-off-by: Yurii <iuriish@yahoo.com> * - correction of signature of NDArray::reduceAlongDimension method Signed-off-by: Yurii <iuriish@yahoo.com> * - signature correction of NDArray::applyIndexReduce and applyTrueBroadcast methods Signed-off-by: Yurii <iuriish@yahoo.com> * - signature correction of NDArray::applyReduce3 and varianceAlongDimension methods Signed-off-by: Yurii <iuriish@yahoo.com> * - signature correction of NDArray::tensorsAlongDimension and diagonal methods Signed-off-by: Yurii <iuriish@yahoo.com> * - signature correction of NDArray::allTensorsAlongDimension Signed-off-by: Yurii <iuriish@yahoo.com> * - signature correction of NDArray::reduceAlongDimension 2 Signed-off-by: Yurii <iuriish@yahoo.com> * - signature correction of NDArray::applyTransform 2 Signed-off-by: Yurii <iuriish@yahoo.com> * - signature correction of NDArray::applyPairwiseTransform 2 Signed-off-by: Yurii <iuriish@yahoo.com> * - signature correction of NDArray::applyBroadcast 2 Signed-off-by: Yurii <iuriish@yahoo.com> * - signature correction of NDArray::applyTrueBroadcast 2 Signed-off-by: Yurii <iuriish@yahoo.com> * - signature correction of NDArray::applyScalar and applyScalarArr Signed-off-by: Yurii <iuriish@yahoo.com> * - signature correction of NDArray::lambda methods Signed-off-by: Yurii <iuriish@yahoo.com> * - signature correction of NDArray::reduce3 methods 2 Signed-off-by: Yurii <iuriish@yahoo.com> * - signature correction of following NDArray methods: add/sub/mul/div row/column and fillAsTriangular Signed-off-by: Yurii <iuriish@yahoo.com> * - signature correction of NDArray::tileToShape methods Signed-off-by: Yurii <iuriish@yahoo.com> * - signature correction of NDArray::isShapeSameStrict method Signed-off-by: Yurii <iuriish@yahoo.com> * minor corrections in tests Signed-off-by: Yurii <iuriish@yahoo.com> * - replace reduce op in batchnorm mkldnn Signed-off-by: Yurii <iuriish@yahoo.com> * - add explicit templates instantiations for operator+(NDArray&&. const scalar) Signed-off-by: Yurii <iuriish@yahoo.com> * - corrections of casts in float16/bfloat16 Signed-off-by: Yurii <iuriish@yahoo.com> * - provide move semantics in following NDArray methods: transform, applyTrueBroadcast, transpose, reshape, permute Signed-off-by: Yurii <iuriish@yahoo.com> * - get rid of input array A duplicate in svd cuda op Signed-off-by: Yurii <iuriish@yahoo.com> * - avoid available bug in svd cuda API Signed-off-by: Yurii <iuriish@yahoo.com> * - add temporary global memory buffer in svd cuda when calcUV = false and m != n Signed-off-by: Yurii <iuriish@yahoo.com> * - remove test with blfoat16 type for betainC Signed-off-by: Yurii <iuriish@yahoo.com> * - resolve conflicts after master has been merged in Signed-off-by: Yurii <iuriish@yahoo.com> * - changed type of affected input array in fused_batch_norm Signed-off-by: Yurii <iuriish@yahoo.com> * - add several explicit type castings Signed-off-by: Yurii <iuriish@yahoo.com> * - add ND4J_EXPORT to operators Signed-off-by: Yurii <iuriish@yahoo.com> * - add explicit template types in instantiations of template arithm operators of NDArray class Signed-off-by: Yurii <iuriish@yahoo.com> * - one more test fix Signed-off-by: Yurii <iuriish@yahoo.com> Co-authored-by: raver119 <raver119@gmail.com>
2019-12-20 20:35:39 +01:00
ASSERT_NEAR(tad.e<float>(0), array.e<float>(e), 1e-5);
2019-06-06 14:21:15 +02:00
}
}
TEST_F(TadTests, TadEdgeCase_2) {
auto array = NDArrayFactory::create<float>('c', {2, 3, 4});
profiling of stack and unstack ops (#261) * - profiling of stack and unstack ops Signed-off-by: Yurii <iuriish@yahoo.com> * - fix bug in cpu concat op Signed-off-by: Yurii <iuriish@yahoo.com> * - correction of cuda stack and unstack Signed-off-by: Yurii <iuriish@yahoo.com> * - change shape.h method which operates with unity dimensions strides Signed-off-by: Yurii <iuriish@yahoo.com> * - rearrange stack tests Signed-off-by: Yurii <iuriish@yahoo.com> * - correct evaluation of smallest stride for moving through contiguous axis Signed-off-by: Yurii <iuriish@yahoo.com> * - forgot to update signature of function strideOverContigAxis in cuda concat and split ops Signed-off-by: Yurii <iuriish@yahoo.com> * - remove ShapeUtils::shapeAsString method applied before input arrays validations Signed-off-by: Yurii <iuriish@yahoo.com> * - further removing of ShapeUtils::shapeAsString Signed-off-by: Yurii <iuriish@yahoo.com> * - take sub-array shapeIndo/offset calculation out of NDArray class - add possibility of contiguous memory copy in execTransformAny op if opNum == assign Signed-off-by: Yurii <iuriish@yahoo.com> * - correct test_empty_scatter_2 in EmptyTests.cpp Signed-off-by: Yurii <iuriish@yahoo.com> * - profiling of slice op Signed-off-by: Yurii <iuriish@yahoo.com> * - get rid of contiguous memcpy for some cases in concat and split ops Signed-off-by: Yurii <iuriish@yahoo.com> * - forgot to declare oid nd4j::SpecialMethods<T>::splitCpuGeneric Signed-off-by: Yurii <iuriish@yahoo.com> * - correct typo in calculation of threads in cuda split op Signed-off-by: Yurii <iuriish@yahoo.com> * - forgot to correct another set of threads variables in split cuda ops Signed-off-by: Yurii <iuriish@yahoo.com> * - further conflicts resolving Signed-off-by: Yurii <iuriish@yahoo.com> Co-authored-by: raver119 <raver119@gmail.com>
2020-03-03 05:32:37 +01:00
auto tad = array(0, {0,2});
2019-06-06 14:21:15 +02:00
Shyrma temp (#131) * - specifying template instantiation for certain types in float16 and bloat16 Signed-off-by: Yurii <iuriish@yahoo.com> * - polishing bfloat16 and float16 member functions template specialization Signed-off-by: Yurii <iuriish@yahoo.com> * - rewrite and overload array +-*/ scalar and scalar +-*/ arr in NDAray class Signed-off-by: Yurii <iuriish@yahoo.com> * - make corrections which have to do with and rvalue lvalue conversions Signed-off-by: Yurii <iuriish@yahoo.com> * - provide move semantic in NDArray operators array +-/* array Signed-off-by: Yurii <iuriish@yahoo.com> * float16/bfloat16 tweaks Signed-off-by: raver119 <raver119@gmail.com> * one more tweak Signed-off-by: raver119 <raver119@gmail.com> * - make float16 and bfloat16 to compile successfully on cuda Signed-off-by: Yurii <iuriish@yahoo.com> * - do not use resources of view-like arrays when move semantics is applied Signed-off-by: Yurii <iuriish@yahoo.com> * - get rid of pointers in signatures NDArray methods 1 Signed-off-by: Yurii <iuriish@yahoo.com> * - correction of signature of NDArray::dup method Signed-off-by: Yurii <iuriish@yahoo.com> * - correction of signature of NDArray::reduceAlongDimension method Signed-off-by: Yurii <iuriish@yahoo.com> * - signature correction of NDArray::applyIndexReduce and applyTrueBroadcast methods Signed-off-by: Yurii <iuriish@yahoo.com> * - signature correction of NDArray::applyReduce3 and varianceAlongDimension methods Signed-off-by: Yurii <iuriish@yahoo.com> * - signature correction of NDArray::tensorsAlongDimension and diagonal methods Signed-off-by: Yurii <iuriish@yahoo.com> * - signature correction of NDArray::allTensorsAlongDimension Signed-off-by: Yurii <iuriish@yahoo.com> * - signature correction of NDArray::reduceAlongDimension 2 Signed-off-by: Yurii <iuriish@yahoo.com> * - signature correction of NDArray::applyTransform 2 Signed-off-by: Yurii <iuriish@yahoo.com> * - signature correction of NDArray::applyPairwiseTransform 2 Signed-off-by: Yurii <iuriish@yahoo.com> * - signature correction of NDArray::applyBroadcast 2 Signed-off-by: Yurii <iuriish@yahoo.com> * - signature correction of NDArray::applyTrueBroadcast 2 Signed-off-by: Yurii <iuriish@yahoo.com> * - signature correction of NDArray::applyScalar and applyScalarArr Signed-off-by: Yurii <iuriish@yahoo.com> * - signature correction of NDArray::lambda methods Signed-off-by: Yurii <iuriish@yahoo.com> * - signature correction of NDArray::reduce3 methods 2 Signed-off-by: Yurii <iuriish@yahoo.com> * - signature correction of following NDArray methods: add/sub/mul/div row/column and fillAsTriangular Signed-off-by: Yurii <iuriish@yahoo.com> * - signature correction of NDArray::tileToShape methods Signed-off-by: Yurii <iuriish@yahoo.com> * - signature correction of NDArray::isShapeSameStrict method Signed-off-by: Yurii <iuriish@yahoo.com> * minor corrections in tests Signed-off-by: Yurii <iuriish@yahoo.com> * - replace reduce op in batchnorm mkldnn Signed-off-by: Yurii <iuriish@yahoo.com> * - add explicit templates instantiations for operator+(NDArray&&. const scalar) Signed-off-by: Yurii <iuriish@yahoo.com> * - corrections of casts in float16/bfloat16 Signed-off-by: Yurii <iuriish@yahoo.com> * - provide move semantics in following NDArray methods: transform, applyTrueBroadcast, transpose, reshape, permute Signed-off-by: Yurii <iuriish@yahoo.com> * - get rid of input array A duplicate in svd cuda op Signed-off-by: Yurii <iuriish@yahoo.com> * - avoid available bug in svd cuda API Signed-off-by: Yurii <iuriish@yahoo.com> * - add temporary global memory buffer in svd cuda when calcUV = false and m != n Signed-off-by: Yurii <iuriish@yahoo.com> * - remove test with blfoat16 type for betainC Signed-off-by: Yurii <iuriish@yahoo.com> * - resolve conflicts after master has been merged in Signed-off-by: Yurii <iuriish@yahoo.com> * - changed type of affected input array in fused_batch_norm Signed-off-by: Yurii <iuriish@yahoo.com> * - add several explicit type castings Signed-off-by: Yurii <iuriish@yahoo.com> * - add ND4J_EXPORT to operators Signed-off-by: Yurii <iuriish@yahoo.com> * - add explicit template types in instantiations of template arithm operators of NDArray class Signed-off-by: Yurii <iuriish@yahoo.com> * - one more test fix Signed-off-by: Yurii <iuriish@yahoo.com> Co-authored-by: raver119 <raver119@gmail.com>
2019-12-20 20:35:39 +01:00
ASSERT_EQ(3, tad.lengthOf());
2019-06-06 14:21:15 +02:00
}
TEST_F(TadTests, test_Tad_Ews_optimization_1) {
shape::TAD xTad;
std::array<int,2> array = {1,2};
ASSERT_TRUE(xTad.dimensionsDescending(3, array.data(), array.size()));
}
TEST_F(TadTests, test_Tad_Ews_optimization_2) {
shape::TAD xTad;
std::array<int,2> array = {0,2};
ASSERT_FALSE(xTad.dimensionsDescending(3, array.data(), array.size()));
}
TEST_F(TadTests, test_Tad_Ews_optimization_3) {
shape::TAD xTad;
std::array<int,1> array = {1};
ASSERT_TRUE(xTad.dimensionsDescending(2, array.data(), array.size()));
}
TEST_F(TadTests, test_Tad_Ews_optimization_4) {
shape::TAD xTad;
std::array<int,1> array = {0};
ASSERT_TRUE(xTad.dimensionsDescending(1, array.data(), array.size()));
}
TEST_F(TadTests, test_Tad_Ews_optimization_5) {
shape::TAD xTad;
std::array<int,2> array = {2,3};
ASSERT_TRUE(xTad.dimensionsDescending(4, array.data(), array.size()));
}
TEST_F(TadTests, test_TAD_empty_dims_1) {
Nd4jLong xShape[8] = {2, 150, 1, 3, 1, 16384, 3, 99};
shape::TAD xTad;
xTad.init(xShape, reinterpret_cast<int*>(112L), 0);
xTad.createTadOnlyShapeInfo();
xTad.createOffsets();
}
TEST_F(TadTests, test_tad_order_1) {
Nd4jLong xShape[8] = {2, 150, 10, 10, 1, 8192, 1, 99};
Nd4jLong tShape[8] = {2, 1, 10, 1, 1, 8192, 1, 99};
shape::TAD xTad;
int dim = 1;
xTad.init(xShape, &dim, 1);
xTad.createTadOnlyShapeInfo();
ASSERT_TRUE(shape::equalsStrict(tShape, xTad.tadOnlyShapeInfo));
}
TEST_F(TadTests, test_tad_order_2) {
Nd4jLong xShape[8] = {2, 150, 10, 10, 1, 8192, 1, 99};
Nd4jLong tShape[8] = {2, 1, 150, 1, 10, 8192, 10, 99};
shape::TAD xTad;
int dim = 0;
xTad.init(xShape, &dim, 1);
xTad.createTadOnlyShapeInfo();
ASSERT_TRUE(shape::equalsStrict(tShape, xTad.tadOnlyShapeInfo));
}
TEST_F(TadTests, test_tad_order_3) {
Nd4jLong xShape[10] = {3, 10, 20, 30, 600 ,30, 1, 8192, 1, 99};
Nd4jLong tShape[8] = {2, 1, 30, 1, 1, 8192, 1, 99};
shape::TAD xTad;
int dim = 2;
xTad.init(xShape, &dim, 1);
xTad.createTadOnlyShapeInfo();
ASSERT_TRUE(shape::equalsStrict(tShape, xTad.tadOnlyShapeInfo));
}
TEST_F(TadTests, test_tad_order_4) {
Nd4jLong xShape[10] = {3, 10, 20, 30, 600 ,30, 1, 8192, 1, 99};
Nd4jLong tShape[8] = {2, 20, 30, 30, 1, 8192, 1, 99};
shape::TAD xTad;
int dim[2] = {1, 2};
xTad.init(xShape, dim, 2);
xTad.createTadOnlyShapeInfo();
ASSERT_TRUE(shape::equalsStrict(tShape, xTad.tadOnlyShapeInfo));
}
TEST_F(TadTests, test_column_1) {
auto x = NDArrayFactory::create<float>('c', {5, 2});
auto tadPack = sd::ConstantTadHelper::getInstance()->tadForDimensions(x.shapeInfo(), 0);
2019-06-06 14:21:15 +02:00
ASSERT_EQ(1, shape::rank(tadPack.primaryShapeInfo()));
ASSERT_EQ(5, shape::length(tadPack.primaryShapeInfo()));
ASSERT_TRUE(shape::isVector(tadPack.primaryShapeInfo()));
auto scalarViewPack = sd::ConstantTadHelper::getInstance()->tadForDimensions(tadPack.primaryShapeInfo(), 0);
2019-06-06 14:21:15 +02:00
ASSERT_TRUE(shape::equalsStrict(tadPack.primaryShapeInfo(), scalarViewPack.primaryShapeInfo()));
}
///////////////////////////////////////////////////////////////////
TEST_F(TadTests, calcOffsets_1) {
Nd4jLong shapeInfoF[10] = {3, 2,3,4, 1,2,6, 8192, 1, 102};
Nd4jLong shapeInfoC[10] = {3, 2,3,4, 12,4,1, 8192, 1, 99};
Nd4jLong shapeInfoFC[10] = {3, 2,3,4, 1,2,6, 8192, 1, 99};;
Nd4jLong expOffsetsF[24] = {0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23};
Nd4jLong expOffsetsC[24] = {0,12,4,16,8,20,1,13,5,17,9,21,2,14,6,18,10,22,3,15,7,19,11,23};
Nd4jLong offsets[24];
shape::calcOffsets(shapeInfoF, offsets, 'f');
for (int e = 0; e < 24; e++)
ASSERT_TRUE(offsets[e] == expOffsetsF[e]);
shape::calcOffsets(shapeInfoC, offsets, 'f');
for (int e = 0; e < 24; e++)
ASSERT_TRUE(offsets[e] == expOffsetsC[e]);
shape::calcOffsets(shapeInfoFC, offsets, 'f');
for (int e = 0; e < 24; e++)
ASSERT_TRUE(offsets[e] == expOffsetsF[e]);
}
Dev branch merge: dev_20190606 (#7904) * correct logsoftmax looss (#2) * Small SameDiff listener fix (#4) * Various fixes (#6) * #7839 Fix for asXMatrix and tests * #7866 EmbeddingSequenceLayer dtype fix + test * #7856 SameDiff save/load stream methods * #7859 RegressionEvaluation rank 4 fix + tests + axis configuration * EvaluationBinary 3d/4d * More evaluation 3d/4d tests * #7847 Evaluation empty checks * Small test ifx * #7848 Fix median edge case * Improve DL4J samediff layer tests * [WIP] FastText wrapper implemented (#8) * FastText implemented * Some fixes * Fix shapes for wordsNearest * Validation of input vectors * Fixes * Fixed test * Thread tagged * Some tweaks * setContextClassLoader for DeallocatorServiceThread * Numpy format tests (#1) * Various fixes (#11) * #7852 SameDiff gather fix * #7892 SameDiff placeholder to constant conversion * #7890 validate input rank for MLN/CG init methods * Fix broken permute shape calculation * Permute and gather fixes * Tests * #7850 LogSumExp fix + test * Handful of test fixes * Empty arrays with non-scalar shapes (#10) * minor rearrangements for lambdas * empty tensors with non-scalar shapes * numpy empty tensors with non-scalar shapes * few more empty tweaks * Small fixes * conv3d signature update * micro fix in batchnorm mkldnn * Import fixes * Fix * MKL-DNN update * Small fill fix * fill with empty input + test * Fixes * Small error improvement * Fix * one special test * couple of fixes for lstm * Rewrite TFGraphMapper.getNDArrayFromTensor to be maintainable and less error prone * Fixes * FP16 * Unsigned * BFloat16 * Fill op - empty tweaks * - couple of fixes for empty arrays construction - stack updated * strided slice fix * one transform test * provide method for reducing shapeInfo in case of input array is empty * Fixed reduceAlongDimensions to use empty input properly. * couple of broadcast tests * couple of tests broadcast tests + tweak to make them pass * add check of non-empty to methods producing sub-arrays * Fixed reshapeC with zeros in shape. * complete empty check in reduce_... legacy ops * Concat and cumsum/prod * Tweak to empty shape inference on import * add empty check to the rest of reduce legacy ops * one more test * correct typo in evalReduceShapeInfoEmpty * Added tests for reduce_* ops to tests with zero shapes. * few more tests for empty reductions * Fixed strided_slice op with empty case and tests. * one more empty reduction test * Fixed strided_slice test. * add empty check to NDArray::reshapei * infOrMax * empty min/max with infinity tests * made unstack working correctly with empty arrays * few IndexReduce tests + tweaks for empty shapes * add test for empty concat * few tests fixed * Validation fix for reductions on empty shapes * Reverse fix * Reduction shape calc fixes * SameDiff.generateOutputVariable: don't use shape function to determine number of outputs * Range fix * - NDArray constructor updated for scalars/empty arrays - few tests fixed * More fixes * Empty creator fixes * concat fix * concat fix * TF import tests: allow 'both all NaN' and 'both all inf' to pass * Slice, zero fraction, and reshape fixes * transpose, gather * Zero fraction * scalar cast fix * Empty reduction axis support * few more tests fixed * Fixed input checks conforming with TF for concat op and tests. * few tests fixed * matmul scalar shape fix * Fixed checkout for data type and scalarity with concat to allow non-empty scalars with vector concats. * broadcast bool fix * few more tests * few more tests * correct evalReduceShapeInfoEmpty * argmax/argmin + tests * one more empty edge case + one more test * argmax/argmin/realdiv_bp tweaks * empty reshape test + fix * Helper fixes * Small fixes * Gather test fix * Gather test fix * Small fixes * reduce scalar zero values * scalar mean workaround * Remove debug code * along dim mean workaround * one more test * - equalsTo() tweak for empty arrays - one more test * broadcast tweaks
2019-06-15 13:34:34 +02:00
2019-06-06 14:21:15 +02:00
/////////////////////////////////////////////////////////////////
TEST_F(TadTests, outerArrayIndexes_1) {
NDArray x('c', {2,3,4,5}, sd::DataType::FLOAT32);
2019-06-06 14:21:15 +02:00
Nd4jLong maxIdxs[120];
NDArray y1('c', {3,5}, sd::DataType::FLOAT32);
2019-06-06 14:21:15 +02:00
const std::vector<int> dimsToExclude1 = {0,2};
const int n1[] = {20,25,30,35, 80,85,90,95};
int minIdx = 5;
int N = shape::outerArrayIndexes(maxIdxs, minIdx, x.getShapeInfo(), y1.getShapeInfo(), dimsToExclude1.data());
ASSERT_TRUE(N == x.lengthOf()/y1.lengthOf());
for(int i = 0; i < N; ++i)
ASSERT_TRUE(n1[i] == maxIdxs[i]);
NDArray y2('c', {4,5}, sd::DataType::FLOAT32);
2019-06-06 14:21:15 +02:00
const std::vector<int> dimsToExclude2 = {0,1};
const int n2[] = {12,32,52, 72,92,112};
minIdx = 12;
N = shape::outerArrayIndexes(maxIdxs, minIdx, x.getShapeInfo(), y2.getShapeInfo(), dimsToExclude2.data());
ASSERT_TRUE(N == x.lengthOf()/y2.lengthOf());
for(int i = 0; i < N; ++i)
ASSERT_TRUE(n2[i] == maxIdxs[i]);
NDArray y3('c', {2,5}, sd::DataType::FLOAT32);
2019-06-06 14:21:15 +02:00
const std::vector<int> dimsToExclude3 = {1,2};
const int n3[] = {64,69,74,79,84,89,94,99,104,109,114,119};
minIdx = 9;
N = shape::outerArrayIndexes(maxIdxs, minIdx, x.getShapeInfo(), y3.getShapeInfo(), dimsToExclude3.data());
ASSERT_TRUE(N == x.lengthOf()/y3.lengthOf());
for(int i = 0; i < N; ++i)
ASSERT_TRUE(n3[i] == maxIdxs[i]);
NDArray y4('c', {2,3}, sd::DataType::FLOAT32);
2019-06-06 14:21:15 +02:00
const std::vector<int> dimsToExclude4 = {2,3};
const int n4[] = {20,21,22,23,24,25,26,27,28,29,30,31,32,33,34,35,36,37,38,39};
minIdx = 1;
N = shape::outerArrayIndexes(maxIdxs, minIdx, x.getShapeInfo(), y4.getShapeInfo(), dimsToExclude4.data());
ASSERT_TRUE(N == x.lengthOf()/y4.lengthOf());
for(int i = 0; i < N; ++i)
ASSERT_TRUE(n4[i] == maxIdxs[i]);
NDArray y5('c', {2,4}, sd::DataType::FLOAT32);
2019-06-06 14:21:15 +02:00
const std::vector<int> dimsToExclude5 = {1,3};
const int n5[] = {65,66,67,68,69, 85,86,87,88,89, 105,106,107,108,109};
minIdx = 5;
N = shape::outerArrayIndexes(maxIdxs, minIdx, x.getShapeInfo(), y5.getShapeInfo(), dimsToExclude5.data());
ASSERT_TRUE(N == x.lengthOf()/y5.lengthOf());
for(int i = 0; i < N; ++i)
ASSERT_TRUE(n5[i] == maxIdxs[i]);
NDArray y6('c', {2,3,4}, sd::DataType::FLOAT32);
2019-06-06 14:21:15 +02:00
const std::vector<int> dimsToExclude6 = {3};
const int n6[] = {65,66,67,68,69};
minIdx = 13;
N = shape::outerArrayIndexes(maxIdxs, minIdx, x.getShapeInfo(), y6.getShapeInfo(), dimsToExclude6.data());
ASSERT_TRUE(N == x.lengthOf()/y6.lengthOf());
for(int i = 0; i < N; ++i)
ASSERT_TRUE(n6[i] == maxIdxs[i]);
NDArray y7('c', {4}, sd::DataType::FLOAT32);
2019-06-06 14:21:15 +02:00
const std::vector<int> dimsToExclude7 = {0,1,3};
const int n7[] = {15,16,17,18,19, 35,36,37,38,39, 55,56,57,58,59, 75,76,77,78,79, 95,96,97,98,99, 115,116,117,118,119};
minIdx = 3;
N = shape::outerArrayIndexes(maxIdxs, minIdx, x.getShapeInfo(), y7.getShapeInfo(), dimsToExclude7.data());
ASSERT_TRUE(N == x.lengthOf()/y7.lengthOf());
for(int i = 0; i < N; ++i)
ASSERT_TRUE(n7[i] == maxIdxs[i]);
NDArray y8('c', {5}, sd::DataType::FLOAT32);
2019-06-06 14:21:15 +02:00
const std::vector<int> dimsToExclude8 = {0,1,2};
const int n8[] = {0,5,10,15, 20,25,30,35, 40,45,50,55, 60,65,70,75, 80,85,90,95, 100,105,110,115};
minIdx = 0;
N = shape::outerArrayIndexes(maxIdxs, minIdx, x.getShapeInfo(), y8.getShapeInfo(), dimsToExclude8.data());
ASSERT_TRUE(N == x.lengthOf()/y8.lengthOf());
for(int i = 0; i < N; ++i)
ASSERT_TRUE(n8[i] == maxIdxs[i]);
NDArray y9('c', {2}, sd::DataType::FLOAT32);
2019-06-06 14:21:15 +02:00
const std::vector<int> dimsToExclude9 = {1,2,3};
const int n9[] = {60,61,62,63,64,65,66,67,68,69,70,71,72,73,74,75,76,77,78,79,80,81,82,83,84,85,86,87,88,89,90,91,92,93,94,95,96,97,98,99,100,101,102,103,104,105,106,107,108,109,110,111,112,113,114,115,116,117,118,119};
minIdx = 1;
N = shape::outerArrayIndexes(maxIdxs, minIdx, x.getShapeInfo(), y9.getShapeInfo(), dimsToExclude9.data());
ASSERT_TRUE(N == x.lengthOf()/y9.lengthOf());
for(int i = 0; i < N; ++i)
ASSERT_TRUE(n9[i] == maxIdxs[i]);
NDArray y10('c', {3,4,5}, sd::DataType::FLOAT32);
2019-06-06 14:21:15 +02:00
const std::vector<int> dimsToExclude10 = {0};
const int n10[] = {11, 71};
minIdx = 11;
N = shape::outerArrayIndexes(maxIdxs, minIdx, x.getShapeInfo(), y10.getShapeInfo(), dimsToExclude10.data());
ASSERT_TRUE(N == x.lengthOf()/y10.lengthOf());
for(int i = 0; i < N; ++i)
ASSERT_TRUE(n10[i] == maxIdxs[i]);
NDArray y11('c', {2,4,5}, sd::DataType::FLOAT32);
2019-06-06 14:21:15 +02:00
const std::vector<int> dimsToExclude11 = {1};
const int n11[] = {66, 86, 106};
minIdx = 26;
N = shape::outerArrayIndexes(maxIdxs, minIdx, x.getShapeInfo(), y11.getShapeInfo(), dimsToExclude11.data());
ASSERT_TRUE(N == x.lengthOf()/y11.lengthOf());
for(int i = 0; i < N; ++i)
ASSERT_TRUE(n11[i] == maxIdxs[i]);
NDArray y12('c', {3,2}, sd::DataType::FLOAT32);
2019-06-06 14:21:15 +02:00
const std::vector<int> dimsToExclude12 = {0,2};
const int n12[] = {0,2,4,5,7,9,10,12,14,15,17,19,60,62,64,65,67,69,70,72,74,75,77,79};
minIdx = 0;
N = shape::outerArrayIndexes(maxIdxs, minIdx, x.getShapeInfo(), y12.getShapeInfo(), dimsToExclude12.data());
for(int i = 0; i < N; ++i)
ASSERT_TRUE(n12[i] == maxIdxs[i]);
NDArray y13('c', {3,2}, sd::DataType::FLOAT32);
2019-06-06 14:21:15 +02:00
const std::vector<int> dimsToExclude13 = {0,2};
const int n13[] = {1,3,6,8,11,13,16,18,61,63,66,68,71,73,76,78};
minIdx = 1;
N = shape::outerArrayIndexes(maxIdxs, minIdx, x.getShapeInfo(), y13.getShapeInfo(), dimsToExclude13.data());
for(int i = 0; i < N; ++i)
ASSERT_TRUE(n13[i] == maxIdxs[i]);
NDArray y14('c', {4,5}, sd::DataType::FLOAT32);
2019-06-06 14:21:15 +02:00
const int n14[] = {12,32,52, 72,92,112};
minIdx = 12;
N = shape::outerArrayIndexes(maxIdxs, minIdx, x.getShapeInfo(), y14.getShapeInfo(), nullptr);
ASSERT_TRUE(N == x.lengthOf()/y14.lengthOf());
for(int i = 0; i < N; ++i)
ASSERT_TRUE(n14[i] == maxIdxs[i]);
NDArray y15('c', {3,4,5}, sd::DataType::FLOAT32);
2019-06-06 14:21:15 +02:00
const int n15[] = {11, 71};
minIdx = 11;
N = shape::outerArrayIndexes(maxIdxs, minIdx, x.getShapeInfo(), y15.getShapeInfo(), nullptr);
ASSERT_TRUE(N == x.lengthOf()/y15.lengthOf());
for(int i = 0; i < N; ++i)
ASSERT_TRUE(n15[i] == maxIdxs[i]);
}
#endif //LIBND4J_TADTESTS_H