2019-06-06 14:21:15 +02:00
|
|
|
/*******************************************************************************
|
|
|
|
* Copyright (c) 2015-2018 Skymind, Inc.
|
|
|
|
*
|
|
|
|
* This program and the accompanying materials are made available under the
|
|
|
|
* terms of the Apache License, Version 2.0 which is available at
|
|
|
|
* https://www.apache.org/licenses/LICENSE-2.0.
|
|
|
|
*
|
|
|
|
* Unless required by applicable law or agreed to in writing, software
|
|
|
|
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
|
|
|
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
|
|
|
* License for the specific language governing permissions and limitations
|
|
|
|
* under the License.
|
|
|
|
*
|
|
|
|
* SPDX-License-Identifier: Apache-2.0
|
|
|
|
******************************************************************************/
|
|
|
|
|
|
|
|
//
|
|
|
|
// Created by raver119 on 21.11.17.
|
|
|
|
//
|
|
|
|
|
|
|
|
#include "testlayers.h"
|
|
|
|
#include <memory>
|
2020-03-02 10:49:41 +01:00
|
|
|
#include <array/NDArray.h>
|
|
|
|
#include <helpers/DebugHelper.h>
|
2019-06-06 14:21:15 +02:00
|
|
|
#include <ops/declarable/headers/parity_ops.h>
|
|
|
|
|
2020-03-02 10:49:41 +01:00
|
|
|
using namespace sd;
|
2019-06-06 14:21:15 +02:00
|
|
|
|
|
|
|
//////////////////////////////////////////////////////////////////////
|
|
|
|
class NDArrayTest2 : public testing::Test {
|
|
|
|
public:
|
|
|
|
|
|
|
|
};
|
|
|
|
|
|
|
|
|
|
|
|
TEST_F(NDArrayTest2, Test_ByteVector_1) {
|
|
|
|
auto x = NDArrayFactory::create<float>('c', {10, 10});
|
|
|
|
x.linspace(1);
|
|
|
|
|
|
|
|
auto vec = x.asByteVector();
|
|
|
|
|
|
|
|
auto restored = new NDArray((float *)vec.data(), x.shapeInfo(), x.getContext(), false);
|
|
|
|
|
|
|
|
|
|
|
|
ASSERT_TRUE(x.equalsTo(restored));
|
|
|
|
|
|
|
|
delete restored;
|
|
|
|
}
|
|
|
|
|
|
|
|
TEST_F(NDArrayTest2, Test_ByteVector_2) {
|
|
|
|
auto x = NDArrayFactory::create<bfloat16>('c', {10, 10});
|
|
|
|
x.linspace(1);
|
|
|
|
|
|
|
|
auto vec = x.asByteVector();
|
|
|
|
|
|
|
|
auto restored = new NDArray((bfloat16 *)vec.data(), x.shapeInfo(), x.getContext(), false);
|
|
|
|
|
|
|
|
ASSERT_TRUE(x.equalsTo(restored));
|
|
|
|
|
|
|
|
delete restored;
|
|
|
|
}
|
|
|
|
|
|
|
|
TEST_F(NDArrayTest2, Test_ByteVector_3) {
|
|
|
|
auto x = NDArrayFactory::create<double>('c', {10, 10});
|
|
|
|
x.linspace(1);
|
|
|
|
|
|
|
|
auto vec = x.asByteVector();
|
|
|
|
|
|
|
|
auto restored = new NDArray((double *)vec.data(), x.shapeInfo(), x.getContext(), false);
|
|
|
|
|
|
|
|
ASSERT_TRUE(x.equalsTo(restored));
|
|
|
|
|
|
|
|
delete restored;
|
|
|
|
}
|
|
|
|
|
|
|
|
TEST_F(NDArrayTest2, Test_Reshape_Scalar_1) {
|
|
|
|
auto x = NDArrayFactory::create<double>('c', {1, 1}, {1.0});
|
|
|
|
auto e = NDArrayFactory::create<double>(1.0);
|
|
|
|
|
|
|
|
x.reshapei({});
|
|
|
|
|
|
|
|
ASSERT_EQ(e, x);
|
|
|
|
ASSERT_EQ(e.rankOf(), x.rankOf());
|
|
|
|
}
|
|
|
|
|
|
|
|
TEST_F(NDArrayTest2, Test_Reshape_Scalar_2) {
|
|
|
|
auto x = NDArrayFactory::create<double>('c', {1, 1}, {1.0});
|
|
|
|
auto e = NDArrayFactory::create<double>('c', {1}, {1.0});
|
|
|
|
|
|
|
|
x.reshapei({1});
|
|
|
|
|
|
|
|
ASSERT_EQ(e, x);
|
|
|
|
ASSERT_EQ(e.rankOf(), x.rankOf());
|
|
|
|
}
|
|
|
|
|
|
|
|
TEST_F(NDArrayTest2, Test_IndexReduce_1) {
|
2019-11-30 14:02:07 +01:00
|
|
|
auto x = NDArrayFactory::create<double>('c', {1, 5}, {1, 2, 3, 4, 5});
|
2019-06-06 14:21:15 +02:00
|
|
|
|
|
|
|
ExtraArguments extras({3.0, 0.0, 10.0});
|
|
|
|
int idx = x.indexReduceNumber(indexreduce::FirstIndex, &extras).e<int>(0);
|
|
|
|
|
|
|
|
ASSERT_EQ(2, idx);
|
|
|
|
}
|
|
|
|
|
|
|
|
//////////////////////////////////////////////////////////////////////
|
|
|
|
TEST_F(NDArrayTest2, SetIdentity_test_1) {
|
|
|
|
|
|
|
|
auto x = NDArrayFactory::create<double>('c', {1, 5});
|
|
|
|
auto xExp = NDArrayFactory::create<double>('c', {1, 5}, {1, 0, 0, 0, 0});
|
|
|
|
|
|
|
|
x.setIdentity();
|
|
|
|
ASSERT_TRUE(x.equalsTo(&xExp));
|
|
|
|
}
|
|
|
|
|
|
|
|
//////////////////////////////////////////////////////////////////////
|
|
|
|
TEST_F(NDArrayTest2, SetIdentity_test_2) {
|
|
|
|
|
|
|
|
auto x = NDArrayFactory::create<double>('f', {1, 5});
|
|
|
|
auto xExp = NDArrayFactory::create<double>('f', {1, 5}, {1, 0, 0, 0, 0});
|
|
|
|
|
|
|
|
x.setIdentity();
|
|
|
|
|
|
|
|
ASSERT_TRUE(x.equalsTo(&xExp));
|
|
|
|
}
|
|
|
|
|
|
|
|
//////////////////////////////////////////////////////////////////////
|
|
|
|
TEST_F(NDArrayTest2, SetIdentity_test_3) {
|
|
|
|
|
|
|
|
auto x = NDArrayFactory::create<double>('f', {1, 1});
|
|
|
|
auto xExp = NDArrayFactory::create<double>('f', {1, 1}, {1});
|
|
|
|
|
|
|
|
x.setIdentity();
|
|
|
|
|
|
|
|
ASSERT_TRUE(x.equalsTo(&xExp));
|
|
|
|
}
|
|
|
|
|
|
|
|
//////////////////////////////////////////////////////////////////////
|
|
|
|
TEST_F(NDArrayTest2, SetIdentity_test_4) {
|
|
|
|
|
|
|
|
auto x = NDArrayFactory::create<double>('f', {2, 1});
|
|
|
|
auto xExp = NDArrayFactory::create<double>('f', {2, 1}, {1,0});
|
|
|
|
|
|
|
|
x.setIdentity();
|
|
|
|
|
|
|
|
ASSERT_TRUE(x.equalsTo(&xExp));
|
|
|
|
}
|
|
|
|
|
|
|
|
//////////////////////////////////////////////////////////////////////
|
|
|
|
TEST_F(NDArrayTest2, SetIdentity_test_5) {
|
|
|
|
|
|
|
|
auto x = NDArrayFactory::create<double>('f', {2, 2});
|
|
|
|
auto xExp = NDArrayFactory::create<double>('f', {2, 2}, {1,0,0,1});
|
|
|
|
|
|
|
|
x.setIdentity();
|
|
|
|
|
|
|
|
ASSERT_TRUE(x.equalsTo(&xExp));
|
|
|
|
}
|
|
|
|
|
|
|
|
////////////////////////////////////////////////////////////////////
|
|
|
|
TEST_F(NDArrayTest2, SetIdentity_test_6) {
|
|
|
|
|
|
|
|
auto x = NDArrayFactory::create<float>('c', {3, 2});
|
2019-11-30 14:02:07 +01:00
|
|
|
auto xExp = NDArrayFactory::create<float>('c', {3, 2}, {1.f, 0.f, 0.f, 1.f, 0.f, 0.f});
|
2019-06-06 14:21:15 +02:00
|
|
|
|
|
|
|
x.setIdentity();
|
|
|
|
|
|
|
|
ASSERT_TRUE(x.equalsTo(&xExp));
|
|
|
|
}
|
|
|
|
|
|
|
|
////////////////////////////////////////////////////////////////////
|
|
|
|
TEST_F(NDArrayTest2, SetIdentity_test_7) {
|
|
|
|
|
|
|
|
auto x = NDArrayFactory::create<float>('c', {3, 4});
|
2019-11-30 14:02:07 +01:00
|
|
|
auto xExp = NDArrayFactory::create<float>('c', {3, 4}, {1.f, 0.f, 0.f, 0.f, 0.f, 1.f, 0.f, 0.f, 0.f, 0.f, 1.f, 0.f});
|
2019-06-06 14:21:15 +02:00
|
|
|
|
|
|
|
x.setIdentity();
|
|
|
|
|
|
|
|
ASSERT_TRUE(x.equalsTo(&xExp));
|
|
|
|
}
|
|
|
|
|
|
|
|
#ifdef ALLOWED_3D_IDENTITY
|
|
|
|
////////////////////////////////////////////////////////////////////
|
|
|
|
TEST_F(NDArrayTest2, SetIdentity_test_8) {
|
|
|
|
|
|
|
|
auto x = NDArrayFactory::create<float>('c', {3, 3, 3});
|
|
|
|
auto xExp = NDArrayFactory::create<float>('c', {3, 3, 3}, {1.,0.,0. ,0.,0.,0., 0.,0.,0., 0.,0.,0. ,0.,1.,0., 0.,0.,0., 0.,0.,0. ,0.,0.,0., 0.,0.,1.});
|
|
|
|
x.setIdentity();
|
|
|
|
|
|
|
|
ASSERT_TRUE(x.equalsTo(&xExp));
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
|
|
|
////////////////////////////////////////////////////////////////////
|
|
|
|
TEST_F(NDArrayTest2, Test_AllReduce3_1) {
|
2019-11-30 14:02:07 +01:00
|
|
|
auto x = NDArrayFactory::create<double>('c', {2, 3}, {1, 2, 3, 1, 2, 3});
|
|
|
|
auto y = NDArrayFactory::create<double>('c', {2, 3}, {2, 3, 4, 2, 3, 4});
|
|
|
|
auto exp = NDArrayFactory::create<double>('c', {2, 2}, {1.73205, 1.73205, 1.73205, 1.73205});
|
2019-06-06 14:21:15 +02:00
|
|
|
|
2019-12-20 20:35:39 +01:00
|
|
|
auto z = x.applyAllReduce3(reduce3::EuclideanDistance, y, {1});
|
2019-06-06 14:21:15 +02:00
|
|
|
|
|
|
|
ASSERT_TRUE(exp.isSameShape(z));
|
|
|
|
ASSERT_TRUE(exp.equalsTo(z));
|
|
|
|
}
|
|
|
|
|
|
|
|
////////////////////////////////////////////////////////////////////
|
|
|
|
TEST_F(NDArrayTest2, Test_AllReduce3_2) {
|
2019-11-30 14:02:07 +01:00
|
|
|
auto x = NDArrayFactory::create<double>('c', {2, 3}, {1, 2, 3, 2, 3, 4 });
|
|
|
|
auto y = NDArrayFactory::create<double>('c', {2, 3}, {1, 2, 3, 2, 3, 4});
|
|
|
|
auto exp = NDArrayFactory::create<double>('c', {2, 2}, {0., 1.73205, 1.73205, 0.});
|
2019-06-06 14:21:15 +02:00
|
|
|
|
2019-12-20 20:35:39 +01:00
|
|
|
auto z = x.applyAllReduce3(reduce3::EuclideanDistance, y, {1});
|
2019-06-06 14:21:15 +02:00
|
|
|
|
|
|
|
ASSERT_TRUE(exp.isSameShape(z));
|
|
|
|
ASSERT_TRUE(exp.equalsTo(z));
|
|
|
|
}
|
|
|
|
|
|
|
|
////////////////////////////////////////////////////////////////////
|
|
|
|
TEST_F(NDArrayTest2, mmul_test1) {
|
|
|
|
|
2019-11-30 14:02:07 +01:00
|
|
|
auto x = NDArrayFactory::create<double>('c', {4, 1}, {1, 2, 3, 4});
|
|
|
|
auto y = NDArrayFactory::create<double>('c', {1, 4}, {1, 2, 3, 4});
|
|
|
|
auto exp = NDArrayFactory::create<double>('c', {4, 4}, {1,2, 3, 4,2,4, 6, 8,3,6, 9,12,4,8,12,16});
|
2019-06-06 14:21:15 +02:00
|
|
|
|
|
|
|
auto result = mmul(x, y);
|
|
|
|
ASSERT_TRUE(exp.isSameShape(&result));
|
|
|
|
ASSERT_TRUE(exp.equalsTo(&result));
|
|
|
|
|
|
|
|
}
|
|
|
|
|
|
|
|
////////////////////////////////////////////////////////////////////
|
|
|
|
TEST_F(NDArrayTest2, mmul_test2) {
|
|
|
|
|
2019-11-30 14:02:07 +01:00
|
|
|
auto x = NDArrayFactory::create<double>('c', {4, 1}, {1, 2, 3, 4});
|
|
|
|
auto y = NDArrayFactory::create<double>('c', {1, 4}, {1, 2, 3, 4});
|
|
|
|
auto exp = NDArrayFactory::create<double>('c', {1, 1}, {30});
|
2019-06-06 14:21:15 +02:00
|
|
|
|
|
|
|
auto result = mmul(y ,x);
|
|
|
|
|
|
|
|
ASSERT_TRUE(exp.isSameShape(&result));
|
|
|
|
ASSERT_TRUE(exp.equalsTo(&result));
|
|
|
|
|
|
|
|
}
|
|
|
|
|
|
|
|
////////////////////////////////////////////////////////////////////
|
|
|
|
TEST_F(NDArrayTest2, mmul_test3) {
|
|
|
|
|
2019-11-30 14:02:07 +01:00
|
|
|
auto x = NDArrayFactory::create<double>('c', {4, 1}, {1, 2, 3, 4});
|
|
|
|
auto exp = NDArrayFactory::create<double>('c', {4, 4}, {1. ,0.2 ,0.3 ,0.4 ,0.2,0.04,0.06,0.08,0.3,0.06,0.09,0.12,0.4,0.08,0.12,0.16});
|
|
|
|
auto w = NDArrayFactory::create<double>( x.ordering(), {(int)x.lengthOf(), 1}, x.getContext()); // column-vector
|
|
|
|
auto wT = NDArrayFactory::create<double>(x.ordering(), {1, (int)x.lengthOf()}, x.getContext()); // row-vector (transposed w)
|
2019-06-06 14:21:15 +02:00
|
|
|
|
|
|
|
w = x / (float)10.;
|
|
|
|
w.p(0, 1.);
|
|
|
|
wT.assign(&w);
|
|
|
|
|
|
|
|
auto result = mmul(w ,wT);
|
|
|
|
|
|
|
|
ASSERT_TRUE(exp.isSameShape(&result));
|
|
|
|
ASSERT_TRUE(exp.equalsTo(&result));
|
|
|
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
TEST_F(NDArrayTest2, Test_Streamline_1) {
|
|
|
|
auto x = NDArrayFactory::create<float>('c', {3, 4, 6});
|
|
|
|
auto y = NDArrayFactory::create<float>('c', {3, 4, 6});
|
|
|
|
x.linspace(1);
|
|
|
|
y.linspace(1);
|
|
|
|
|
|
|
|
x.permutei({1, 0, 2});
|
|
|
|
y.permutei({1, 0, 2});
|
|
|
|
|
|
|
|
y.streamline();
|
|
|
|
|
|
|
|
ASSERT_TRUE(x.isSameShape(&y));
|
|
|
|
ASSERT_TRUE(x.equalsTo(&y));
|
2019-12-20 20:35:39 +01:00
|
|
|
ASSERT_FALSE(x.isSameShapeStrict(y));
|
2019-06-06 14:21:15 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
TEST_F(NDArrayTest2, Test_Streamline_2) {
|
|
|
|
auto x = NDArrayFactory::create<double>('c', {3, 4, 6});
|
|
|
|
auto y = NDArrayFactory::create<double>('f', {3, 4, 6});
|
|
|
|
x.linspace(1);
|
|
|
|
y.linspace(1);
|
|
|
|
|
|
|
|
ASSERT_TRUE(x.isSameShape(&y));
|
|
|
|
ASSERT_TRUE(x.equalsTo(&y));
|
|
|
|
|
|
|
|
y.streamline('c');
|
|
|
|
|
|
|
|
ASSERT_TRUE(x.isSameShape(&y));
|
|
|
|
ASSERT_TRUE(x.equalsTo(&y));
|
|
|
|
}
|
|
|
|
|
|
|
|
TEST_F(NDArrayTest2, Test_Enforce_1) {
|
|
|
|
auto x = NDArrayFactory::create<float>('c', {4, 1, 1, 4});
|
|
|
|
auto exp = NDArrayFactory::create<float>('c', {4, 4});
|
|
|
|
|
|
|
|
x.linspace(1);
|
|
|
|
exp.linspace(1);
|
|
|
|
|
|
|
|
x.enforce({4, 4}, 'c');
|
|
|
|
|
2019-12-20 20:35:39 +01:00
|
|
|
ASSERT_TRUE(exp.isSameShapeStrict(x));
|
2019-06-06 14:21:15 +02:00
|
|
|
ASSERT_TRUE(exp.equalsTo(&x));
|
|
|
|
}
|
|
|
|
|
|
|
|
TEST_F(NDArrayTest2, TestVector_1) {
|
2019-11-30 14:02:07 +01:00
|
|
|
auto x = NDArrayFactory::create<double>('c', {2, 3});
|
|
|
|
auto row = NDArrayFactory::create<double>('c', {3}, {1, 2, 3});
|
|
|
|
auto exp = NDArrayFactory::create<double>('c', {2, 3}, {1, 2, 3, 1, 2, 3});
|
2019-06-06 14:21:15 +02:00
|
|
|
|
2019-12-20 20:35:39 +01:00
|
|
|
x.addiRowVector(row);
|
2019-06-06 14:21:15 +02:00
|
|
|
|
|
|
|
ASSERT_TRUE(exp.equalsTo(&x));
|
|
|
|
}
|
|
|
|
|
|
|
|
//////////////////////////////////////////////////////////////////////
|
|
|
|
TEST_F(NDArrayTest2, Operator_Plus_Test_5)
|
|
|
|
{
|
|
|
|
|
|
|
|
auto x = NDArrayFactory::create<float>('c', {8, 8, 8});
|
|
|
|
auto y = NDArrayFactory::create<float>('c', {8, 1, 8});
|
|
|
|
auto expected = NDArrayFactory::create<float>('c', {8, 8, 8});
|
|
|
|
|
|
|
|
x = 1.;
|
|
|
|
y = 2.;
|
|
|
|
expected = 3.;
|
|
|
|
|
|
|
|
auto result = x + y;
|
|
|
|
|
|
|
|
ASSERT_TRUE(expected.isSameShape(&result));
|
|
|
|
ASSERT_TRUE(expected.equalsTo(&result));
|
|
|
|
}
|
|
|
|
|
|
|
|
//////////////////////////////////////////////////////////////////////
|
|
|
|
TEST_F(NDArrayTest2, Operator_Plus_Test_6) {
|
|
|
|
|
2019-11-30 14:02:07 +01:00
|
|
|
auto x = NDArrayFactory::create<double>('c', {3, 3, 3});
|
|
|
|
auto y = NDArrayFactory::create<double>('c', {3, 1, 3});
|
|
|
|
auto expected = NDArrayFactory::create<double>('c', {3, 3, 3}, {2., 4., 6., 5., 7., 9., 8.,10.,12., 14.,16.,18.,17.,19.,21.,20.,22.,24., 26.,28.,30.,29.,31.,33.,32.,34.,36.});
|
2019-06-06 14:21:15 +02:00
|
|
|
x.linspace(1);
|
|
|
|
y.linspace(1);
|
|
|
|
|
|
|
|
auto result = x + y;
|
|
|
|
|
|
|
|
ASSERT_TRUE(expected.isSameShape(&result));
|
|
|
|
ASSERT_TRUE(expected.equalsTo(&result));
|
|
|
|
}
|
|
|
|
|
|
|
|
//////////////////////////////////////////////////////////////////////
|
|
|
|
TEST_F(NDArrayTest2, tileToShape_test1) {
|
|
|
|
|
2019-11-30 14:02:07 +01:00
|
|
|
auto x = NDArrayFactory::create<double>('c', {2, 2}, {1,2,3,4});
|
|
|
|
auto exp = NDArrayFactory::create<double>('c', {2, 2, 2}, {1,2,3,4,1,2,3,4});
|
2019-06-06 14:21:15 +02:00
|
|
|
|
2019-12-20 20:35:39 +01:00
|
|
|
x.tileToShape({2,2,2}, x);
|
2019-06-06 14:21:15 +02:00
|
|
|
|
|
|
|
ASSERT_TRUE(x.isSameShape(&exp));
|
|
|
|
ASSERT_TRUE(x.equalsTo(&exp));
|
|
|
|
}
|
|
|
|
|
|
|
|
//////////////////////////////////////////////////////////////////////
|
|
|
|
TEST_F(NDArrayTest2, tileToShape_test2) {
|
|
|
|
|
2019-11-30 14:02:07 +01:00
|
|
|
auto x = NDArrayFactory::create<double>('c', {2, 1, 2}, {1,2,3,4});
|
|
|
|
auto exp = NDArrayFactory::create<double>('c', {2, 3, 2}, {1,2,1,2,1,2,3,4,3,4,3,4});
|
2019-06-06 14:21:15 +02:00
|
|
|
|
2019-12-20 20:35:39 +01:00
|
|
|
x.tileToShape({2,3,2}, x);
|
2019-06-06 14:21:15 +02:00
|
|
|
|
|
|
|
ASSERT_TRUE(x.isSameShape(&exp));
|
|
|
|
ASSERT_TRUE(x.equalsTo(&exp));
|
|
|
|
}
|
|
|
|
|
|
|
|
//////////////////////////////////////////////////////////////////////
|
|
|
|
TEST_F(NDArrayTest2, tileToShape_test3) {
|
|
|
|
|
2019-11-30 14:02:07 +01:00
|
|
|
auto x = NDArrayFactory::create<double>('c', {2, 2}, {1,2,3,4});
|
|
|
|
auto result = NDArrayFactory::create<double>('c', {2, 2, 2});
|
|
|
|
auto exp = NDArrayFactory::create<double>('c', {2, 2, 2}, {1,2,3,4,1,2,3,4});
|
2019-06-06 14:21:15 +02:00
|
|
|
|
2019-12-20 20:35:39 +01:00
|
|
|
x.tileToShape({2,2,2}, result);
|
2019-06-06 14:21:15 +02:00
|
|
|
// result.printIndexedBuffer();
|
|
|
|
|
|
|
|
ASSERT_TRUE(result.isSameShape(&exp));
|
|
|
|
ASSERT_TRUE(result.equalsTo(&exp));
|
|
|
|
}
|
|
|
|
|
|
|
|
//////////////////////////////////////////////////////////////////////
|
|
|
|
TEST_F(NDArrayTest2, tileToShape_test4) {
|
|
|
|
|
2019-11-30 14:02:07 +01:00
|
|
|
auto x = NDArrayFactory::create<double>('c', {2, 1, 2}, {1,2,3,4});
|
|
|
|
auto result = NDArrayFactory::create<double>('c', {2, 3, 2});
|
|
|
|
auto exp = NDArrayFactory::create<double>('c', {2, 3, 2}, {1,2,1,2,1,2,3,4,3,4,3,4});
|
2019-06-06 14:21:15 +02:00
|
|
|
|
2019-12-20 20:35:39 +01:00
|
|
|
x.tileToShape({2,3,2}, result);
|
2019-06-06 14:21:15 +02:00
|
|
|
|
|
|
|
ASSERT_TRUE(result.isSameShape(&exp));
|
|
|
|
ASSERT_TRUE(result.equalsTo(&exp));
|
|
|
|
}
|
|
|
|
|
|
|
|
#ifndef __CUDABLAS__
|
|
|
|
|
|
|
|
TEST_F(NDArrayTest2, Test_TriplewiseLambda_1) {
|
2019-11-30 14:02:07 +01:00
|
|
|
auto t = NDArrayFactory::create<double>('c', {3, 3}, {1, 1, 1, 1, 1, 1, 1, 1, 1});
|
|
|
|
auto u = NDArrayFactory::create<double>('c', {3, 3}, {2, 2, 2, 2, 2, 2, 2, 2, 2});
|
|
|
|
auto v = NDArrayFactory::create<double>('c', {3, 3}, {3, 3, 3, 3, 3, 3, 3, 3, 3});
|
|
|
|
auto exp = NDArrayFactory::create<double>('c', {3, 3}, {7, 7, 7, 7, 7, 7, 7, 7, 7});
|
2019-06-06 14:21:15 +02:00
|
|
|
|
|
|
|
float extra = 1.0f;
|
|
|
|
|
2019-11-30 14:02:07 +01:00
|
|
|
auto la = LAMBDA_DDD(_t, _u, _v, extra) {
|
2019-06-06 14:21:15 +02:00
|
|
|
return _t + _u + _v + extra;
|
|
|
|
};
|
|
|
|
|
2019-12-20 20:35:39 +01:00
|
|
|
t.applyTriplewiseLambda<double>(u, v, la, t);
|
2019-06-06 14:21:15 +02:00
|
|
|
|
|
|
|
ASSERT_TRUE(t.equalsTo(&exp));
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
TEST_F(NDArrayTest2, Test_TriplewiseLambda_2) {
|
2019-11-30 14:02:07 +01:00
|
|
|
auto t = NDArrayFactory::create<double>('c', {3, 3}, {1, 1, 1, 1, 1, 1, 1, 1, 1});
|
|
|
|
auto u = NDArrayFactory::create<double>('f', {3, 3}, {2, 2, 2, 2, 2, 2, 2, 2, 2});
|
|
|
|
auto v = NDArrayFactory::create<double>('c', {3, 3}, {3, 3, 3, 3, 3, 3, 3, 3, 3});
|
|
|
|
auto exp = NDArrayFactory::create<double>('c', {3, 3}, {7, 7, 7, 7, 7, 7, 7, 7, 7});
|
2019-06-06 14:21:15 +02:00
|
|
|
|
|
|
|
float extra = 1.0f;
|
|
|
|
|
2019-11-30 14:02:07 +01:00
|
|
|
auto la = LAMBDA_DDD(_t, _u, _v, extra) {
|
2019-06-06 14:21:15 +02:00
|
|
|
return _t + _u + _v + extra;
|
|
|
|
};
|
|
|
|
|
2019-12-20 20:35:39 +01:00
|
|
|
t.applyTriplewiseLambda<double>(u, v, la, t);
|
2019-06-06 14:21:15 +02:00
|
|
|
|
|
|
|
ASSERT_TRUE(t.equalsTo(&exp));
|
|
|
|
}
|
|
|
|
|
|
|
|
////////////////////////////////////////////////////////////////////
|
|
|
|
TEST_F(NDArrayTest2, Test_Indexed_Lambda) {
|
2019-11-30 14:02:07 +01:00
|
|
|
auto x = NDArrayFactory::create<double>('c', {2, 2});
|
|
|
|
auto exp = NDArrayFactory::create<double>('c', {2, 2}, {0, 1, 2, 3});
|
2019-06-06 14:21:15 +02:00
|
|
|
|
2019-11-30 14:02:07 +01:00
|
|
|
auto lambda = ILAMBDA_D(_x) {
|
2019-06-06 14:21:15 +02:00
|
|
|
return (float) _idx;
|
|
|
|
};
|
|
|
|
|
2019-12-20 20:35:39 +01:00
|
|
|
x.applyIndexedLambda<double>(lambda, x);
|
2019-06-06 14:21:15 +02:00
|
|
|
|
|
|
|
ASSERT_TRUE(exp.equalsTo(&x));
|
|
|
|
}
|
|
|
|
|
|
|
|
#endif
|
|
|
|
|
|
|
|
TEST_F(NDArrayTest2, Test_PermuteEquality_1) {
|
2019-11-30 14:02:07 +01:00
|
|
|
auto x = NDArrayFactory::create<double>('c', {1, 60});
|
|
|
|
auto exp = NDArrayFactory::create<double>('c', {3, 5, 4}, {1.0, 6.0, 11.0, 16.0, 2.0, 7.0, 12.0, 17.0, 3.0, 8.0, 13.0, 18.0, 4.0, 9.0, 14.0, 19.0, 5.0, 10.0, 15.0, 20.0, 21.0, 26.0, 31.0, 36.0, 22.0, 27.0, 32.0, 37.0, 23.0, 28.0, 33.0, 38.0, 24.0, 29.0, 34.0, 39.0, 25.0, 30.0, 35.0, 40.0, 41.0, 46.0, 51.0, 56.0, 42.0, 47.0, 52.0, 57.0, 43.0, 48.0, 53.0, 58.0, 44.0, 49.0, 54.0, 59.0, 45.0, 50.0, 55.0, 60.0});
|
2019-06-06 14:21:15 +02:00
|
|
|
x.linspace(1);
|
|
|
|
x.reshapei('c', {3, 4, 5});
|
|
|
|
|
|
|
|
x.permutei({0, 2, 1});
|
|
|
|
x.streamline();
|
|
|
|
|
|
|
|
// x.printShapeInfo("{0, 2, 1} shape");
|
|
|
|
// x.printBuffer("{0, 2, 1} data");
|
|
|
|
|
|
|
|
ASSERT_TRUE(exp.isSameShape(&x));
|
|
|
|
ASSERT_TRUE(exp.equalsTo(&x));
|
|
|
|
}
|
|
|
|
|
|
|
|
TEST_F(NDArrayTest2, Test_PermuteEquality_0) {
|
2019-11-30 14:02:07 +01:00
|
|
|
auto x = NDArrayFactory::create<double>('c', {1, 60});
|
2019-06-06 14:21:15 +02:00
|
|
|
x.linspace(1);
|
2019-11-30 14:02:07 +01:00
|
|
|
auto exp = NDArrayFactory::create<double>('c', {3, 4, 5}, {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0, 25.0, 26.0, 27.0, 28.0, 29.0, 30.0, 31.0, 32.0, 33.0, 34.0, 35.0, 36.0, 37.0, 38.0, 39.0, 40.0, 41.0, 42.0, 43.0, 44.0, 45.0, 46.0, 47.0, 48.0, 49.0, 50.0, 51.0, 52.0, 53.0, 54.0, 55.0, 56.0, 57.0, 58.0, 59.0, 60.0});
|
2019-06-06 14:21:15 +02:00
|
|
|
x.reshapei('c', {3, 4, 5});
|
|
|
|
|
|
|
|
x.permutei({0, 1, 2});
|
|
|
|
x.streamline();
|
|
|
|
|
|
|
|
// x.printShapeInfo("{0, 1, 2} shape");
|
|
|
|
// x.printBuffer("{0, 1, 2} data");
|
|
|
|
|
|
|
|
ASSERT_TRUE(exp.isSameShape(&x));
|
|
|
|
ASSERT_TRUE(exp.equalsTo(&x));
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
TEST_F(NDArrayTest2, Test_PermuteEquality_2) {
|
2019-11-30 14:02:07 +01:00
|
|
|
auto x = NDArrayFactory::create<double>('c', {1, 60});
|
2019-06-06 14:21:15 +02:00
|
|
|
x.linspace(1);
|
2019-11-30 14:02:07 +01:00
|
|
|
auto exp = NDArrayFactory::create<double>('c', {4, 3, 5}, {1.0, 2.0, 3.0, 4.0, 5.0, 21.0, 22.0, 23.0, 24.0, 25.0, 41.0, 42.0, 43.0, 44.0, 45.0, 6.0, 7.0, 8.0, 9.0, 10.0, 26.0, 27.0, 28.0, 29.0, 30.0, 46.0, 47.0, 48.0, 49.0, 50.0, 11.0, 12.0, 13.0, 14.0, 15.0, 31.0, 32.0, 33.0, 34.0, 35.0, 51.0, 52.0, 53.0, 54.0, 55.0, 16.0, 17.0, 18.0, 19.0, 20.0, 36.0, 37.0, 38.0, 39.0, 40.0, 56.0, 57.0, 58.0, 59.0, 60.0});
|
2019-06-06 14:21:15 +02:00
|
|
|
x.reshapei('c', {3, 4, 5});
|
|
|
|
|
|
|
|
x.permutei({1, 0, 2});
|
|
|
|
x.streamline();
|
|
|
|
|
|
|
|
// x.printShapeInfo("{1, 0, 2} shape");
|
|
|
|
// x.printBuffer("{1, 0, 2} data");
|
|
|
|
|
|
|
|
ASSERT_TRUE(exp.isSameShape(&x));
|
|
|
|
ASSERT_TRUE(exp.equalsTo(&x));
|
|
|
|
}
|
|
|
|
|
|
|
|
TEST_F(NDArrayTest2, Test_PermuteEquality_3) {
|
2019-11-30 14:02:07 +01:00
|
|
|
auto x = NDArrayFactory::create<double>('c', {1, 60});
|
2019-06-06 14:21:15 +02:00
|
|
|
x.linspace(1);
|
2019-11-30 14:02:07 +01:00
|
|
|
auto exp = NDArrayFactory::create<double>('c', {4, 5, 3}, {1.0, 21.0, 41.0, 2.0, 22.0, 42.0, 3.0, 23.0, 43.0, 4.0, 24.0, 44.0, 5.0, 25.0, 45.0, 6.0, 26.0, 46.0, 7.0, 27.0, 47.0, 8.0, 28.0, 48.0, 9.0, 29.0, 49.0, 10.0, 30.0, 50.0, 11.0, 31.0, 51.0, 12.0, 32.0, 52.0, 13.0, 33.0, 53.0, 14.0, 34.0, 54.0, 15.0, 35.0, 55.0, 16.0, 36.0, 56.0, 17.0, 37.0, 57.0, 18.0, 38.0, 58.0, 19.0, 39.0, 59.0, 20.0, 40.0, 60.0});
|
2019-06-06 14:21:15 +02:00
|
|
|
x.reshapei('c', {3, 4, 5});
|
|
|
|
|
|
|
|
x.permutei({1, 2, 0});
|
|
|
|
x.streamline();
|
|
|
|
|
|
|
|
// x.printShapeInfo("{1, 2, 0} shape");
|
|
|
|
// x.printBuffer("{1, 2, 0} data");
|
|
|
|
|
|
|
|
ASSERT_TRUE(exp.isSameShape(&x));
|
|
|
|
ASSERT_TRUE(exp.equalsTo(&x));
|
|
|
|
}
|
|
|
|
|
|
|
|
TEST_F(NDArrayTest2, Test_PermuteEquality_4) {
|
2019-11-30 14:02:07 +01:00
|
|
|
auto x = NDArrayFactory::create<double>('c', {1, 60});
|
2019-06-06 14:21:15 +02:00
|
|
|
x.linspace(1);
|
2019-11-30 14:02:07 +01:00
|
|
|
auto exp = NDArrayFactory::create<double>('c', {5, 3, 4}, {1.0, 6.0, 11.0, 16.0, 21.0, 26.0, 31.0, 36.0, 41.0, 46.0, 51.0, 56.0, 2.0, 7.0, 12.0, 17.0, 22.0, 27.0, 32.0, 37.0, 42.0, 47.0, 52.0, 57.0, 3.0, 8.0, 13.0, 18.0, 23.0, 28.0, 33.0, 38.0, 43.0, 48.0, 53.0, 58.0, 4.0, 9.0, 14.0, 19.0, 24.0, 29.0, 34.0, 39.0, 44.0, 49.0, 54.0, 59.0, 5.0, 10.0, 15.0, 20.0, 25.0, 30.0, 35.0, 40.0, 45.0, 50.0, 55.0, 60.0});
|
2019-06-06 14:21:15 +02:00
|
|
|
x.reshapei('c', {3, 4, 5});
|
|
|
|
|
|
|
|
x.permutei({2, 0, 1});
|
|
|
|
x.streamline();
|
|
|
|
|
|
|
|
// x.printShapeInfo("{2, 0, 1} shape");
|
|
|
|
// x.printBuffer("{2, 0, 1} data");
|
|
|
|
|
|
|
|
ASSERT_TRUE(exp.isSameShape(&x));
|
|
|
|
ASSERT_TRUE(exp.equalsTo(&x));
|
|
|
|
}
|
|
|
|
|
|
|
|
TEST_F(NDArrayTest2, Test_PermuteEquality_5) {
|
2019-11-30 14:02:07 +01:00
|
|
|
auto x = NDArrayFactory::create<double>('c', {1, 60});
|
2019-06-06 14:21:15 +02:00
|
|
|
x.linspace(1);
|
2019-11-30 14:02:07 +01:00
|
|
|
auto exp = NDArrayFactory::create<double>('c', {5, 4, 3},
|
2019-06-06 14:21:15 +02:00
|
|
|
{1.0, 21.0, 41.0, 6.0, 26.0, 46.0, 11.0, 31.0, 51.0, 16.0, 36.0, 56.0, 2.0, 22.0, 42.0, 7.0,
|
|
|
|
27.0, 47.0, 12.0, 32.0, 52.0, 17.0, 37.0, 57.0, 3.0, 23.0, 43.0, 8.0, 28.0, 48.0, 13.0, 33.0,
|
|
|
|
53.0, 18.0, 38.0, 58.0, 4.0, 24.0, 44.0, 9.0, 29.0, 49.0, 14.0, 34.0, 54.0, 19.0, 39.0, 59.0,
|
|
|
|
5.0, 25.0, 45.0, 10.0, 30.0, 50.0, 15.0, 35.0, 55.0, 20.0, 40.0, 60.0});
|
|
|
|
x.reshapei('c', {3, 4, 5});
|
|
|
|
|
|
|
|
x.permutei({2, 1, 0});
|
|
|
|
x.streamline();
|
|
|
|
|
|
|
|
// x.printShapeInfo("{2, 0, 1} shape");
|
|
|
|
// x.printBuffer("{2, 0, 1} data");
|
|
|
|
|
|
|
|
ASSERT_TRUE(exp.isSameShape(&x));
|
|
|
|
ASSERT_TRUE(exp.equalsTo(&x));
|
|
|
|
|
|
|
|
}
|
|
|
|
|
|
|
|
////////////////////////////////////////////////////////////////////
|
|
|
|
TEST_F(NDArrayTest2, fillAsTriangular_test1) {
|
|
|
|
|
2019-11-30 14:02:07 +01:00
|
|
|
auto x = NDArrayFactory::create<double>('c', {4, 4}, {1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16});
|
|
|
|
auto exp = NDArrayFactory::create<double>('c', {4, 4}, {1,0,0,0,5,6,0,0,9,10,11,0 ,13,14,15,16});
|
2019-06-06 14:21:15 +02:00
|
|
|
|
2019-12-20 20:35:39 +01:00
|
|
|
x.fillAsTriangular<double>(0., 0, 0, x, 'u');
|
2019-06-06 14:21:15 +02:00
|
|
|
|
|
|
|
ASSERT_TRUE(exp.isSameShape(&x));
|
|
|
|
ASSERT_TRUE(exp.equalsTo(&x));
|
|
|
|
|
|
|
|
}
|
|
|
|
|
|
|
|
////////////////////////////////////////////////////////////////////
|
|
|
|
TEST_F(NDArrayTest2, fillAsTriangular_test2) {
|
|
|
|
|
2019-11-30 14:02:07 +01:00
|
|
|
auto x = NDArrayFactory::create<double>('c', {4, 4}, {1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16});
|
|
|
|
auto exp = NDArrayFactory::create<double>('c', {4, 4}, {0,0,0,0,5,0,0,0,9,10,0 ,0 ,13,14,15,0});
|
2019-06-06 14:21:15 +02:00
|
|
|
|
2019-12-20 20:35:39 +01:00
|
|
|
x.fillAsTriangular<double>(0., 0, -1, x, 'u');
|
2019-06-06 14:21:15 +02:00
|
|
|
|
|
|
|
ASSERT_TRUE(exp.isSameShape(&x));
|
|
|
|
ASSERT_TRUE(exp.equalsTo(&x));
|
|
|
|
|
|
|
|
}
|
|
|
|
|
|
|
|
////////////////////////////////////////////////////////////////////
|
|
|
|
TEST_F(NDArrayTest2, fillAsTriangular_test3) {
|
|
|
|
|
2019-11-30 14:02:07 +01:00
|
|
|
auto x = NDArrayFactory::create<double>('c', {4, 4}, {1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16});
|
|
|
|
auto exp = NDArrayFactory::create<double>('c', {4, 4}, {1,2,3,4,0,6,7,8,0,0 ,11,12,0 ,0 , 0,16});
|
2019-06-06 14:21:15 +02:00
|
|
|
|
2019-12-20 20:35:39 +01:00
|
|
|
x.fillAsTriangular<double>(0., 0, 0, x, 'l');
|
2019-06-06 14:21:15 +02:00
|
|
|
|
|
|
|
ASSERT_TRUE(exp.isSameShape(&x));
|
|
|
|
ASSERT_TRUE(exp.equalsTo(&x));
|
|
|
|
|
|
|
|
}
|
|
|
|
|
|
|
|
////////////////////////////////////////////////////////////////////
|
|
|
|
TEST_F(NDArrayTest2, fillAsTriangular_test4) {
|
|
|
|
|
2019-11-30 14:02:07 +01:00
|
|
|
auto x = NDArrayFactory::create<double>('c', {4, 4}, {1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16});
|
|
|
|
auto exp = NDArrayFactory::create<double>('c', {4, 4}, {0,2,3,4,0,0,7,8,0,0 , 0,12, 0, 0, 0, 0});
|
2019-06-06 14:21:15 +02:00
|
|
|
|
2019-12-20 20:35:39 +01:00
|
|
|
x.fillAsTriangular<double>(0., 1, 0, x, 'l');
|
2019-06-06 14:21:15 +02:00
|
|
|
|
|
|
|
ASSERT_TRUE(exp.isSameShape(&x));
|
|
|
|
ASSERT_TRUE(exp.equalsTo(&x));
|
|
|
|
}
|
|
|
|
|
|
|
|
////////////////////////////////////////////////////////////////////
|
|
|
|
TEST_F(NDArrayTest2, Test_DType_Conversion_1) {
|
2019-11-30 14:02:07 +01:00
|
|
|
auto x = NDArrayFactory::create<double>('c', {2, 3}, {1, 2, 3, 4, 5, 6});
|
2019-06-06 14:21:15 +02:00
|
|
|
|
|
|
|
auto xd = x.template asT<double>();
|
|
|
|
|
2019-12-20 20:35:39 +01:00
|
|
|
auto xf = xd.template asT<double>();
|
2019-06-06 14:21:15 +02:00
|
|
|
|
|
|
|
ASSERT_TRUE(x.isSameShape(xf));
|
|
|
|
ASSERT_TRUE(x.equalsTo(xf));
|
|
|
|
}
|
|
|
|
|
|
|
|
////////////////////////////////////////////////////////////////////
|
|
|
|
TEST_F(NDArrayTest2, Test_ScalarArray_Assign_1) {
|
|
|
|
auto x = NDArrayFactory::create<float>('c', {2, 2});
|
|
|
|
auto y = NDArrayFactory::create<float>(2.0f);
|
|
|
|
auto exp = NDArrayFactory::create<float>('c', {2, 2}, {2.0f, 2.0f, 2.0f, 2.0f});
|
|
|
|
|
|
|
|
x.assign(y);
|
|
|
|
|
|
|
|
ASSERT_TRUE(exp.isSameShape(&x));
|
|
|
|
ASSERT_TRUE(exp.equalsTo(&x));
|
|
|
|
}
|
|
|
|
|
|
|
|
////////////////////////////////////////////////////////////////////
|
|
|
|
TEST_F(NDArrayTest2, Test_Reshape_To_Vector_1) {
|
|
|
|
auto x = NDArrayFactory::create<float>('c', {2, 3}, {1.f, 2.f, 3.f, 4.f, 5.f, 6.f});
|
|
|
|
auto exp = NDArrayFactory::create<float>('c', {6}, {1.f, 2.f, 3.f, 4.f, 5.f, 6.f});
|
|
|
|
|
|
|
|
x.reshapei({-1});
|
|
|
|
|
|
|
|
ASSERT_TRUE(exp.isSameShape(x));
|
|
|
|
ASSERT_TRUE(exp.equalsTo(x));
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
TEST_F(NDArrayTest2, Test_toIndexedString_1) {
|
|
|
|
auto x = NDArrayFactory::create<float>('c', {2, 2}, {1.5f, 2.5f, 3.f, 4.5f});
|
|
|
|
|
|
|
|
auto str = x.asIndexedString();
|
|
|
|
std::string exp = "[1.5, 2.5, 3, 4.5]";
|
|
|
|
|
|
|
|
ASSERT_EQ(exp, str);
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
//////////////////////////////////////////////////////////////////////
|
|
|
|
TEST_F(NDArrayTest2, permute_test4) {
|
|
|
|
|
|
|
|
Nd4jLong arr1ShapeInfo[] = {6, 1, 1, 4, 3, 2, 2, 48, 48, 12, 4, 2, 1, 8192, 1, 99};
|
|
|
|
Nd4jLong arr2ShapeInfo[] = {6, 1, 2, 2, 1, 4, 3, 48, 2, 1, 48, 12, 4, 8192, 0, 99};
|
|
|
|
|
|
|
|
|
|
|
|
auto arr1Buffer = new float[786432];
|
|
|
|
auto arr2Buffer = new float[786432];
|
|
|
|
|
2020-03-02 10:49:41 +01:00
|
|
|
NDArray arr1(arr1Buffer, arr1ShapeInfo, sd::LaunchContext ::defaultContext());
|
|
|
|
NDArray arr2(arr2Buffer, arr2ShapeInfo, sd::LaunchContext ::defaultContext());
|
2019-06-06 14:21:15 +02:00
|
|
|
|
|
|
|
const std::vector<int> perm = {0, 4, 5, 1, 2, 3};
|
|
|
|
auto arr1P = arr1.permute(perm);
|
|
|
|
// arr1P->printShapeInfo();
|
|
|
|
|
|
|
|
// ASSERT_TRUE(arr1.isSameShapeStrict(&arr2));
|
2019-12-20 20:35:39 +01:00
|
|
|
ASSERT_TRUE(arr1P.isSameShapeStrict(arr2));
|
2019-06-06 14:21:15 +02:00
|
|
|
delete []arr1Buffer;
|
|
|
|
delete []arr2Buffer;
|
|
|
|
}
|
|
|
|
|
|
|
|
////////////////////////////////////////////////////////////////////////////////
|
|
|
|
TEST_F(NDArrayTest2, TestStdDev3) {
|
|
|
|
|
|
|
|
// autoarray('c', {10, 10});
|
|
|
|
auto array = NDArrayFactory::create<double>('c', {2, 2}, {0.2946, 0.2084, 0.0345, 0.7368});
|
|
|
|
const int len = array.lengthOf();
|
|
|
|
|
|
|
|
double sum = 0.;
|
|
|
|
for(int i=0; i < len; ++i)
|
|
|
|
sum += array.e<double>(i);
|
|
|
|
|
|
|
|
const double mean = sum / len;
|
|
|
|
|
|
|
|
double diffSquared = 0.;
|
|
|
|
for(int i=0; i < len; ++i)
|
|
|
|
diffSquared += (array.e<double>(i) - mean) * (array.e<double>(i) - mean);
|
|
|
|
|
|
|
|
const double trueVariance = math::nd4j_sqrt<double, double>(diffSquared / len);
|
|
|
|
const double trueVarianceCorr = math::nd4j_sqrt<double, double>(diffSquared / (len - 1));
|
|
|
|
|
|
|
|
const double variance = array.varianceNumber(variance::SummaryStatsStandardDeviation, false).e<double>(0);
|
|
|
|
const double varianceCorr = array.varianceNumber(variance::SummaryStatsStandardDeviation, true).e<double>(0);
|
|
|
|
|
|
|
|
// printf("%s expected %.10f calculated %.10f\n","variance :", trueVariance, variance );
|
|
|
|
// printf("%s expected %.10f calculated %.10f\n","variance corrected:", trueVarianceCorr, varianceCorr);
|
|
|
|
|
|
|
|
ASSERT_NEAR(trueVariance, variance, 1e-8);
|
|
|
|
ASSERT_NEAR(trueVarianceCorr, varianceCorr, 1e-8);
|
|
|
|
}
|
|
|
|
|
|
|
|
////////////////////////////////////////////////////////////////////
|
|
|
|
TEST_F(NDArrayTest2, Test_Linspace_1) {
|
|
|
|
auto exp = NDArrayFactory::create<double>('c',{1,5}, {1., 2., 3., 4., 5.});
|
|
|
|
auto x = NDArrayFactory::create<double>('c', {1, 5});
|
|
|
|
x.linspace(1);
|
|
|
|
|
|
|
|
ASSERT_TRUE(x.equalsTo(&exp));
|
|
|
|
}
|
|
|
|
|
|
|
|
////////////////////////////////////////////////////////////////////
|
|
|
|
TEST_F(NDArrayTest2, Test_Linspace_2) {
|
|
|
|
auto exp = NDArrayFactory::create<double>('c',{1,5}, {1., 3., 5., 7., 9.});
|
|
|
|
auto x = NDArrayFactory::create<double>('c', {1, 5});
|
|
|
|
|
|
|
|
x.linspace(1, 2);
|
|
|
|
|
|
|
|
ASSERT_TRUE(x.equalsTo(&exp));
|
|
|
|
}
|
|
|
|
|
|
|
|
////////////////////////////////////////////////////////////////////
|
|
|
|
TEST_F(NDArrayTest2, Test_Linspace_3) {
|
|
|
|
|
|
|
|
auto exp = NDArrayFactory::create<double>('c',{1,5}, {1., 4., 7., 10., 13.});
|
|
|
|
|
|
|
|
auto x = NDArrayFactory::create<double>('c', {1, 5});
|
|
|
|
x.linspace(1,3);
|
|
|
|
|
|
|
|
ASSERT_TRUE(x.equalsTo(&exp));
|
|
|
|
}
|
|
|
|
|
|
|
|
////////////////////////////////////////////////////////////////////
|
|
|
|
TEST_F(NDArrayTest2, Test_Linspace_4) {
|
|
|
|
auto exp = NDArrayFactory::create<double>('c',{1,5}, {-1., -2., -3., -4., -5.});
|
|
|
|
|
|
|
|
auto x = NDArrayFactory::create<double>('c', {1, 5});
|
|
|
|
x.linspace(-1, -1);
|
|
|
|
|
|
|
|
ASSERT_TRUE(x.equalsTo(&exp));
|
|
|
|
}
|
|
|
|
|
|
|
|
////////////////////////////////////////////////////////////////////
|
|
|
|
TEST_F(NDArrayTest2, Test_Linspace_5) {
|
|
|
|
auto exp = NDArrayFactory::create<double>('c',{1,5}, {9., 8., 7., 6., 5.});
|
|
|
|
|
|
|
|
auto x = NDArrayFactory::create<double>('c', {1, 5});
|
|
|
|
x.linspace(9, -1);
|
|
|
|
|
|
|
|
ASSERT_TRUE(x.equalsTo(&exp));
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
////////////////////////////////////////////////////////////////////
|
|
|
|
TEST_F(NDArrayTest2, allTensorsAlongDimension_test1) {
|
|
|
|
|
2019-11-30 14:02:07 +01:00
|
|
|
auto x = NDArrayFactory::create<double>('c', {4}, {1, 2, 3, 4});
|
|
|
|
auto exp = NDArrayFactory::create<double>('c', {4}, {1, 2, 3, 4});
|
2019-06-06 14:21:15 +02:00
|
|
|
|
|
|
|
auto set = x.allTensorsAlongDimension({0});
|
|
|
|
// set->at(0)->printShapeInfo();
|
|
|
|
// set->at(0)->printIndexedBuffer();
|
|
|
|
|
2019-12-20 20:35:39 +01:00
|
|
|
ASSERT_TRUE(set.size() == 1);
|
|
|
|
ASSERT_TRUE(exp.isSameShape(set.at(0)));
|
|
|
|
ASSERT_TRUE(exp.equalsTo(set.at(0)));
|
2019-06-06 14:21:15 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
////////////////////////////////////////////////////////////////////
|
|
|
|
TEST_F(NDArrayTest2, scalar_get_test1) {
|
|
|
|
|
|
|
|
auto scalar1 = NDArrayFactory::create(20.f);
|
|
|
|
|
2020-03-02 10:49:41 +01:00
|
|
|
NDArray arr('c', {2,2}, {0., 10., 20., 30.}, sd::DataType::FLOAT32);
|
2019-06-06 14:21:15 +02:00
|
|
|
|
|
|
|
NDArray scalar2 = arr.e(2);
|
|
|
|
|
|
|
|
ASSERT_TRUE(scalar1.isSameShape(scalar2));
|
|
|
|
ASSERT_TRUE(scalar1.equalsTo(scalar2));
|
|
|
|
ASSERT_TRUE(scalar1.dataType() == scalar2.dataType());
|
|
|
|
}
|
|
|
|
|
|
|
|
////////////////////////////////////////////////////////////////////
|
|
|
|
TEST_F(NDArrayTest2, scalar_get_test2) {
|
|
|
|
|
|
|
|
auto scalar1 = NDArrayFactory::create(20.f);
|
|
|
|
|
2020-03-02 10:49:41 +01:00
|
|
|
NDArray arr('f', {2,2}, {0., 10., 20., 30.}, sd::DataType::FLOAT32);
|
2019-06-06 14:21:15 +02:00
|
|
|
|
|
|
|
NDArray scalar2 = arr.e(1);
|
|
|
|
|
|
|
|
ASSERT_TRUE(scalar1.isSameShape(scalar2));
|
|
|
|
ASSERT_TRUE(scalar1.equalsTo(scalar2));
|
|
|
|
ASSERT_TRUE(scalar1.dataType() == scalar2.dataType());
|
|
|
|
}
|
|
|
|
|
|
|
|
////////////////////////////////////////////////////////////////////
|
|
|
|
TEST_F(NDArrayTest2, scalar_set_test1) {
|
|
|
|
|
|
|
|
NDArray scalar1 = NDArrayFactory::create(20.f);
|
|
|
|
|
2020-03-02 10:49:41 +01:00
|
|
|
NDArray arr('c', {2,2}, {0., 10., -20., 30.}, sd::DataType::FLOAT32);
|
|
|
|
NDArray exp('c', {2,2}, {0., 10., 20., 30.}, sd::DataType::FLOAT32);
|
2019-06-06 14:21:15 +02:00
|
|
|
|
|
|
|
arr.p(2, scalar1);
|
|
|
|
|
|
|
|
ASSERT_TRUE(exp.equalsTo(arr));
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
////////////////////////////////////////////////////////////////////
|
|
|
|
TEST_F(NDArrayTest2, scalar_set_test2) {
|
|
|
|
|
|
|
|
NDArray scalar1 = NDArrayFactory::create(20.f);
|
|
|
|
|
2020-03-02 10:49:41 +01:00
|
|
|
NDArray arr('f', {2,2}, {0., 10., -20., 30.}, sd::DataType::FLOAT32);
|
|
|
|
NDArray exp('f', {2,2}, {0., 10., 20., 30.}, sd::DataType::FLOAT32);
|
2019-06-06 14:21:15 +02:00
|
|
|
|
|
|
|
arr.p(1, scalar1);
|
|
|
|
|
|
|
|
ASSERT_TRUE(exp.equalsTo(arr));
|
|
|
|
}
|
|
|
|
|
|
|
|
TEST_F(NDArrayTest2, big_dup_test) {
|
2019-11-30 14:02:07 +01:00
|
|
|
// auto arr = NDArrayFactory::linspace<double>(1.0f, 10000000.0f, 100000000);
|
|
|
|
auto arr = NDArrayFactory::linspace<double>(1.0f, 1000.0f, 10000);
|
2019-12-20 20:35:39 +01:00
|
|
|
auto dup = new NDArray(arr->dup('c'));
|
2019-06-06 14:21:15 +02:00
|
|
|
|
|
|
|
ASSERT_EQ(*arr, *dup);
|
|
|
|
|
|
|
|
delete arr;
|
|
|
|
delete dup;
|
|
|
|
}
|
|
|
|
|
|
|
|
TEST_F(NDArrayTest2, debugInfoTest_1) {
|
|
|
|
NDArray testArray('c', {2, 4, 4, 4}, {
|
|
|
|
91., 82., 37., 64., 55., 46., 73., 28., 119., 12., 112., 13., 14., 114., 16., 117.,
|
|
|
|
51., 42., 67., 24., 15., 56., 93., 28., 109., 82., 12., 113., 114., 14., 116., 11.,
|
|
|
|
31., 22., 87., 44., 55., 46., 73., 28., -119., 12., 112., 13., 14., 114., 16., 117.,
|
|
|
|
91., -82., 37., 64., -55.1, 0, 73., 28., -119., 12., 112., 13., 14., 114., 16.2, 117.,
|
|
|
|
91., -82., 37., 64., 55., 46., 73., 28., -119., 12., 112., 13., 14., 114., 16., 117.,
|
|
|
|
51., 42., 67., 24., 15., 0., 93., 28., 109., 82., 12., 113., 114., 14., 116., 11.,
|
|
|
|
31., 22., 87., 44., 55., 46., 73., 28., 119., 12., 112., 13., 14., 114., 16., 117.,
|
2020-03-02 10:49:41 +01:00
|
|
|
91., 82., 37., 64., -3, 0, 73., 28., 119., 12., 112., 13., 140., 110., 160., 107.}, sd::DataType::DOUBLE);
|
|
|
|
NDArray res(sd::DataType::DOUBLE);
|
2019-06-06 14:21:15 +02:00
|
|
|
DebugInfo info = DebugHelper::debugStatistics(&testArray);
|
|
|
|
DebugInfo exp; // = {}
|
2020-03-02 10:49:41 +01:00
|
|
|
sd::ops::reduce_min minOp;
|
|
|
|
sd::ops::reduce_mean meanOp;
|
|
|
|
sd::ops::reduce_max maxOp;
|
|
|
|
sd::ops::reduce_stdev stdevOp;
|
2019-06-06 14:21:15 +02:00
|
|
|
|
|
|
|
minOp.execute({&testArray}, {&res}, {}, {}, {});
|
|
|
|
exp._minValue = res.e<double>(0);
|
|
|
|
meanOp.execute({&testArray}, {&res}, {}, {}, {});
|
|
|
|
exp._meanValue = res.e<double>(0);
|
|
|
|
maxOp.execute({&testArray}, {&res}, {}, {}, {});
|
|
|
|
exp._maxValue = res.e<double>(0);
|
|
|
|
stdevOp.execute({&testArray}, {&res}, {}, {}, {});
|
|
|
|
exp._stdDevValue = res.e<double>(0);
|
|
|
|
exp._zeroCount = 3;
|
|
|
|
exp._negativeCount = 7;
|
|
|
|
exp._positiveCount = 118;
|
|
|
|
exp._infCount = 0;
|
|
|
|
exp._nanCount = 0;
|
|
|
|
printf("Output statistics %lf %lf %lf %lf\n", info._minValue, info._maxValue, info._meanValue, info._stdDevValue);
|
|
|
|
printf("Expect statistics %lf %lf %lf %lf\n", exp._minValue, exp._maxValue, exp._meanValue, exp._stdDevValue);
|
|
|
|
printf("%lld %lld %lld %lld %lld\n", info._zeroCount, info._negativeCount, info._positiveCount, info._infCount, info._nanCount);
|
|
|
|
ASSERT_EQ(exp, info);
|
|
|
|
}
|
|
|
|
|
|
|
|
TEST_F(NDArrayTest2, debugInfoTest_2) {
|
|
|
|
NDArray testArray('c', {2, 4, 4, 4}, {
|
|
|
|
91., 82., 37., 64., 55., 46., 73., 28., 119., 12., 112., 13., 14., 114., 16., 117.,
|
|
|
|
51., 42., 67., 24., 15., 56., 93., 28., 109., 82., 12., 113., 114., 14., 116., 11.,
|
|
|
|
31., 22., 87., 44., 55., 46., 73., 28., -119., 12., 112., 13., 14., 114., 16., 117.,
|
|
|
|
91., -82., 37., 64., -55.1, 0, 73., 28., -119., 12., 112., 13., 14., 114., 16.2, 117.,
|
|
|
|
91., -82., 37., 64., 55., 46., 73., 28., -119., 12., 112., 13., 14., 114., 16., 117.,
|
|
|
|
51., 42., 67., 24., 15., 0., 93., 28., 109., 82., 12., 113., 114., 14., 116., 11.,
|
|
|
|
31., 22., 87., 44., 55., 46., 73., 28., 119., 12., 112., 13., 14., 114., 16., 117.,
|
2020-03-02 10:49:41 +01:00
|
|
|
91., 82., 37., 64., -3, 0, 73., 28., 119., 12., 112., 13., 140., 110., 160., 107.}, sd::DataType::DOUBLE);
|
2019-06-06 14:21:15 +02:00
|
|
|
|
|
|
|
DebugInfo info;
|
|
|
|
DebugInfo exp; // = {}
|
|
|
|
exp._minValue = -119;
|
|
|
|
exp._maxValue = 160.;
|
|
|
|
exp._meanValue = 51.328906;
|
|
|
|
exp._stdDevValue = 52.385694;
|
|
|
|
exp._zeroCount = 3;
|
|
|
|
exp._negativeCount = 7;
|
|
|
|
exp._positiveCount = 118;
|
|
|
|
exp._infCount = 0;
|
|
|
|
exp._nanCount = 0;
|
|
|
|
DebugHelper::retrieveDebugStatistics(&info, &testArray);
|
|
|
|
printf("Output statistics %lf %lf %lf %lf\n", info._minValue, info._maxValue, info._meanValue, info._stdDevValue);
|
|
|
|
printf("Expect statistics %lf %lf %lf %lf\n", exp._minValue, exp._maxValue, exp._meanValue, exp._stdDevValue);
|
|
|
|
printf("%lld %lld %lld %lld %lld\n", info._zeroCount, info._negativeCount, info._positiveCount, info._infCount, info._nanCount);
|
|
|
|
//printf("%lf %lf %lf %lf\n", info._minValue, info._maxValue, info._meanValue, info._stdDevValue);
|
|
|
|
//printf("%lld %lld %lld %lld %lld\n", info._zeroCount, info._negativeCount, info._positiveCount, info._infCount, info._nanCount);
|
|
|
|
ASSERT_EQ(exp, info);
|
|
|
|
}
|
|
|
|
|
|
|
|
//////////////////////////////////////////////////////////////////////
|
|
|
|
TEST_F(NDArrayTest2, test_subarray_ews_1) {
|
|
|
|
|
2020-03-02 10:49:41 +01:00
|
|
|
NDArray x('c', {10, 5}, sd::DataType::FLOAT32);
|
2019-06-06 14:21:15 +02:00
|
|
|
auto subArr1 = x.subarray({NDIndex::all(), NDIndex::point(2)});
|
|
|
|
|
2019-12-20 20:35:39 +01:00
|
|
|
ASSERT_EQ(5, subArr1.ews());
|
2019-06-06 14:21:15 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
//////////////////////////////////////////////////////////////////////
|
|
|
|
TEST_F(NDArrayTest2, test_subarray_ews_2) {
|
|
|
|
|
2020-03-02 10:49:41 +01:00
|
|
|
NDArray x('f', {10, 5}, sd::DataType::FLOAT32);
|
2019-06-06 14:21:15 +02:00
|
|
|
auto subArr1 = x.subarray({NDIndex::all(), NDIndex::point(2)});
|
|
|
|
|
2019-12-20 20:35:39 +01:00
|
|
|
ASSERT_EQ(1, subArr1.ews());
|
2019-06-06 14:21:15 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
//////////////////////////////////////////////////////////////////////
|
|
|
|
TEST_F(NDArrayTest2, test_subarray_ews_3) {
|
|
|
|
|
2020-03-02 10:49:41 +01:00
|
|
|
NDArray x('c', {10, 5}, sd::DataType::FLOAT32);
|
2019-06-06 14:21:15 +02:00
|
|
|
auto subArr1 = x.subarray({NDIndex::point(2), NDIndex::all()});
|
|
|
|
|
2019-12-20 20:35:39 +01:00
|
|
|
ASSERT_EQ(1, subArr1.ews());
|
2019-06-06 14:21:15 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
//////////////////////////////////////////////////////////////////////
|
|
|
|
TEST_F(NDArrayTest2, test_subarray_ews_4) {
|
|
|
|
|
2020-03-02 10:49:41 +01:00
|
|
|
NDArray x('f', {10, 5}, sd::DataType::FLOAT32);
|
2019-06-06 14:21:15 +02:00
|
|
|
auto subArr1 = x.subarray({NDIndex::point(2), NDIndex::all()});
|
|
|
|
|
2019-12-20 20:35:39 +01:00
|
|
|
ASSERT_EQ(10, subArr1.ews());
|
2019-06-06 14:21:15 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
//////////////////////////////////////////////////////////////////////
|
|
|
|
TEST_F(NDArrayTest2, subarray_1) {
|
|
|
|
|
2020-03-02 10:49:41 +01:00
|
|
|
NDArray x('c', {2,3,4}, {1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24}, sd::DataType::FLOAT32);
|
|
|
|
NDArray y('f', {2,3,4}, {1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24}, sd::DataType::FLOAT32);
|
2019-06-06 14:21:15 +02:00
|
|
|
|
|
|
|
Nd4jLong shapeExpX0[] = {1, 2, 12, 8192, 12, 99};
|
|
|
|
float buffExpX0[] = {1.000000, 13.000000};
|
|
|
|
float buffExpX1[] = {2.000000, 14.000000};
|
|
|
|
Nd4jLong shapeExpX2[] = {3, 2, 1, 1, 12, 4, 1, 8192, 12, 99};
|
|
|
|
float buffExpX2[] = {1.000000, 13.000000};
|
|
|
|
Nd4jLong shapeExpX3[] = {2, 2, 4, 12, 1, 8192, 0, 99};
|
|
|
|
float buffExpX3[] = {9.000000, 10.000000, 11.000000, 12.000000, 21.000000, 22.000000, 23.000000, 24.000000};
|
|
|
|
Nd4jLong shapeExpX4[] = {3, 2, 1, 4, 12, 4, 1, 8192, 0, 99};
|
|
|
|
float buffExpX4[] = {9.000000, 10.000000, 11.000000, 12.000000, 21.000000, 22.000000, 23.000000, 24.000000};
|
2020-02-20 19:19:01 +01:00
|
|
|
Nd4jLong shapeExpX5[] = {2, 2, 3, 12, 4, 8192, 4, 99};
|
2019-06-06 14:21:15 +02:00
|
|
|
float buffExpX5[] = {4.000000, 8.000000, 12.000000, 16.000000, 20.000000, 24.000000};
|
|
|
|
|
|
|
|
Nd4jLong shapeExpY0[] = {1, 2, 1, 8192, 1, 102};
|
|
|
|
float buffExpY0[] = {1.000000, 2.000000};
|
|
|
|
float buffExpY1[] = {7.000000, 8.000000};
|
|
|
|
Nd4jLong shapeExpY2[] = {3, 2, 1, 1, 1, 2, 6, 8192, 1, 102};
|
|
|
|
float buffExpY2[] = {1.000000, 2.000000};
|
|
|
|
Nd4jLong shapeExpY3[] = {2, 2, 4, 1, 6, 8192, 0, 102};
|
|
|
|
float buffExpY3[] = {5.000000, 11.000000, 17.000000, 23.000000, 6.000000, 12.000000, 18.000000, 24.000000};
|
|
|
|
Nd4jLong shapeExpY4[] = {3, 2, 1, 4, 1, 2, 6, 8192, 0, 102};
|
|
|
|
float buffExpY4[] = {5.000000, 11.000000, 17.000000, 23.000000, 6.000000, 12.000000, 18.000000, 24.000000};
|
|
|
|
Nd4jLong shapeExpY5[] = {2, 2, 3, 1, 2, 8192, 1, 102};
|
|
|
|
float buffExpY5[] = {19.000000, 21.000000, 23.000000, 20.000000, 22.000000, 24.000000};
|
|
|
|
|
|
|
|
|
|
|
|
NDArray x0 = x(0, {1,2});
|
|
|
|
for(int i = 0; i < shape::shapeInfoLength(x0.rankOf()); ++i)
|
|
|
|
ASSERT_TRUE(x0.getShapeInfo()[i] == shapeExpX0[i]);
|
|
|
|
for(int i = 0; i < x0.lengthOf(); ++i)
|
|
|
|
ASSERT_TRUE(x0.e<float>(i) == buffExpX0[i]);
|
|
|
|
|
|
|
|
NDArray x1 = x(1, {1,2});
|
|
|
|
for(int i = 0; i < shape::shapeInfoLength(x1.rankOf()); ++i)
|
|
|
|
ASSERT_TRUE(x1.getShapeInfo()[i] == shapeExpX0[i]);
|
|
|
|
for(int i = 0; i < x1.lengthOf(); ++i)
|
|
|
|
ASSERT_TRUE(x1.e<float>(i) == buffExpX1[i]);
|
|
|
|
|
|
|
|
NDArray x2 = x(0, {1,2}, true);
|
|
|
|
for(int i = 0; i < shape::shapeInfoLength(x2.rankOf()); ++i)
|
|
|
|
ASSERT_TRUE(x2.getShapeInfo()[i] == shapeExpX2[i]);
|
|
|
|
for(int i = 0; i < x2.lengthOf(); ++i)
|
|
|
|
ASSERT_TRUE(x2.e<float>(i) == buffExpX2[i]);
|
|
|
|
|
|
|
|
NDArray x3 = x(2, {1});
|
|
|
|
for(int i = 0; i < shape::shapeInfoLength(x3.rankOf()); ++i)
|
|
|
|
ASSERT_TRUE(x3.getShapeInfo()[i] == shapeExpX3[i]);
|
|
|
|
for(int i = 0; i < x3.lengthOf(); ++i)
|
|
|
|
ASSERT_TRUE(x3.e<float>(i) == buffExpX3[i]);
|
|
|
|
|
|
|
|
NDArray x4 = x(2, {1}, true);
|
|
|
|
for(int i = 0; i < shape::shapeInfoLength(x4.rankOf()); ++i)
|
|
|
|
ASSERT_TRUE(x4.getShapeInfo()[i] == shapeExpX4[i]);
|
|
|
|
for(int i = 0; i < x4.lengthOf(); ++i)
|
|
|
|
ASSERT_TRUE(x4.e<float>(i) == buffExpX4[i]);
|
|
|
|
|
|
|
|
NDArray x5 = x(3, {2});
|
|
|
|
for(int i = 0; i < shape::shapeInfoLength(x5.rankOf()); ++i)
|
|
|
|
ASSERT_TRUE(x5.getShapeInfo()[i] == shapeExpX5[i]);
|
|
|
|
for(int i = 0; i < x5.lengthOf(); ++i)
|
|
|
|
ASSERT_TRUE(x5.e<float>(i) == buffExpX5[i]);
|
|
|
|
|
|
|
|
// ******************* //
|
|
|
|
NDArray y0 = y(0, {1,2});
|
|
|
|
for(int i = 0; i < shape::shapeInfoLength(y0.rankOf()); ++i)
|
|
|
|
ASSERT_TRUE(y0.getShapeInfo()[i] == shapeExpY0[i]);
|
|
|
|
for(int i = 0; i < y0.lengthOf(); ++i)
|
|
|
|
ASSERT_TRUE(y0.e<float>(i) == buffExpY0[i]);
|
|
|
|
|
|
|
|
NDArray y1 = y(1, {1,2});
|
|
|
|
for(int i = 0; i < shape::shapeInfoLength(y1.rankOf()); ++i)
|
|
|
|
ASSERT_TRUE(y1.getShapeInfo()[i] == shapeExpY0[i]);
|
|
|
|
for(int i = 0; i < y1.lengthOf(); ++i)
|
|
|
|
ASSERT_TRUE(y1.e<float>(i) == buffExpY1[i]);
|
|
|
|
|
|
|
|
NDArray y2 = y(0, {1,2}, true);
|
|
|
|
for(int i = 0; i < shape::shapeInfoLength(y2.rankOf()); ++i)
|
|
|
|
ASSERT_TRUE(y2.getShapeInfo()[i] == shapeExpY2[i]);
|
|
|
|
for(int i = 0; i < y2.lengthOf(); ++i)
|
|
|
|
ASSERT_TRUE(y2.e<float>(i) == buffExpY2[i]);
|
|
|
|
|
|
|
|
NDArray y3 = y(2, {1});
|
|
|
|
for(int i = 0; i < shape::shapeInfoLength(y3.rankOf()); ++i)
|
|
|
|
ASSERT_TRUE(y3.getShapeInfo()[i] == shapeExpY3[i]);
|
|
|
|
for(int i = 0; i < y3.lengthOf(); ++i)
|
|
|
|
ASSERT_TRUE(y3.e<float>(i) == buffExpY3[i]);
|
|
|
|
|
|
|
|
NDArray y4 = y(2, {1}, true);
|
|
|
|
for(int i = 0; i < shape::shapeInfoLength(y4.rankOf()); ++i)
|
|
|
|
ASSERT_TRUE(y4.getShapeInfo()[i] == shapeExpY4[i]);
|
|
|
|
for(int i = 0; i < y4.lengthOf(); ++i)
|
|
|
|
ASSERT_TRUE(y4.e<float>(i) == buffExpY4[i]);
|
|
|
|
|
|
|
|
NDArray y5 = y(3, {2});
|
|
|
|
for(int i = 0; i < shape::shapeInfoLength(y5.rankOf()); ++i)
|
|
|
|
ASSERT_TRUE(y5.getShapeInfo()[i] == shapeExpY5[i]);
|
|
|
|
for(int i = 0; i < y5.lengthOf(); ++i)
|
|
|
|
ASSERT_TRUE(y5.e<float>(i) == buffExpY5[i]);
|
|
|
|
|
|
|
|
}
|
|
|
|
|
|
|
|
TEST_F(NDArrayTest2, test_subarray_interval_1) {
|
|
|
|
|
2020-03-02 10:49:41 +01:00
|
|
|
NDArray x('f', {10, 10}, sd::DataType::FLOAT32);
|
2019-06-06 14:21:15 +02:00
|
|
|
auto subArr1 = x.subarray({NDIndex::all(), NDIndex::interval(0,9)});
|
|
|
|
|
2019-12-20 20:35:39 +01:00
|
|
|
ASSERT_EQ(10, subArr1.sizeAt(0));
|
|
|
|
ASSERT_EQ(9, subArr1.sizeAt(1));
|
2019-06-06 14:21:15 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
TEST_F(NDArrayTest2, test_subarray_interval_2) {
|
|
|
|
|
2020-03-02 10:49:41 +01:00
|
|
|
NDArray x('c', {10, 10}, sd::DataType::FLOAT32);
|
2019-06-06 14:21:15 +02:00
|
|
|
auto subArr1 = x.subarray({NDIndex::all(), NDIndex::interval(0,9)});
|
|
|
|
|
2019-12-20 20:35:39 +01:00
|
|
|
ASSERT_EQ(10, subArr1.sizeAt(0));
|
|
|
|
ASSERT_EQ(9, subArr1.sizeAt(1));
|
2019-06-06 14:21:15 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
TEST_F(NDArrayTest2, test_subarray_3d_cf) {
|
2020-03-02 10:49:41 +01:00
|
|
|
NDArray f('f', {10, 20, 30}, sd::DataType::FLOAT32);
|
|
|
|
NDArray c('c', {10, 20, 30}, sd::DataType::FLOAT32);
|
2019-06-06 14:21:15 +02:00
|
|
|
|
|
|
|
auto subarrayF = f({0,0, 0,0, 2,3}, true);
|
|
|
|
|
|
|
|
auto subarrayC = c({2,3, 0,0, 0,0}, true);
|
|
|
|
}
|
|
|
|
|
|
|
|
TEST_F(NDArrayTest2, test_broadcast_row_1) {
|
|
|
|
auto x = NDArrayFactory::create<float>('c', {10, 5});
|
|
|
|
auto y = NDArrayFactory::create<float>('c', {5}, {1.f, 1.f, 1.f, 1.f, 1.f});
|
|
|
|
auto e = NDArrayFactory::create<float>('c', {10, 5});
|
|
|
|
e.assign(1.0f);
|
|
|
|
|
|
|
|
x += y;
|
|
|
|
|
|
|
|
ASSERT_EQ(e, x);
|
|
|
|
}
|
|
|
|
|
|
|
|
TEST_F(NDArrayTest2, test_broadcast_column_1) {
|
|
|
|
auto x = NDArrayFactory::create<float>('c', {5, 10});
|
|
|
|
auto y = NDArrayFactory::create<float>('c', {5, 1}, {1.f, 1.f, 1.f, 1.f, 1.f});
|
|
|
|
auto e = NDArrayFactory::create<float>('c', {5, 10});
|
|
|
|
e.assign(1.0f);
|
|
|
|
|
|
|
|
x += y;
|
|
|
|
|
|
|
|
ASSERT_EQ(e, x);
|
|
|
|
}
|
|
|
|
|
|
|
|
TEST_F(NDArrayTest2, test_broadcast_column_2) {
|
|
|
|
auto x = NDArrayFactory::create<float>('c', {5, 10});
|
|
|
|
auto y = NDArrayFactory::create<float>('c', {5, 1}, {1.f, 1.f, 1.f, 1.f, 1.f});
|
|
|
|
auto e = NDArrayFactory::create<float>('c', {5, 10});
|
|
|
|
e.assign(1.0f);
|
|
|
|
|
2019-12-20 20:35:39 +01:00
|
|
|
x.applyTrueBroadcast(BroadcastOpsTuple::Add(), y, x, false);
|
2019-06-06 14:21:15 +02:00
|
|
|
|
|
|
|
ASSERT_EQ(e, x);
|
|
|
|
}
|
|
|
|
|
|
|
|
TEST_F(NDArrayTest2, test_broadcast_column_3) {
|
|
|
|
auto x = NDArrayFactory::create<float>('c', {5, 10});
|
|
|
|
auto y = NDArrayFactory::create<float>('c', {5, 1}, {1.f, 1.f, 1.f, 1.f, 1.f});
|
|
|
|
auto e = NDArrayFactory::create<float>('c', {5, 10});
|
|
|
|
e.assign(1.0f);
|
|
|
|
|
2019-12-20 20:35:39 +01:00
|
|
|
x.applyTrueBroadcast(BroadcastOpsTuple::Add(), y, x);
|
2019-06-06 14:21:15 +02:00
|
|
|
|
|
|
|
ASSERT_EQ(e, x);
|
|
|
|
}
|
|
|
|
|
|
|
|
TEST_F(NDArrayTest2, test_broadcast_column_4) {
|
|
|
|
auto x = NDArrayFactory::create<float>('f', {10, 5});
|
|
|
|
auto y = NDArrayFactory::create<float>('f', {5}, {1.f, 1.f, 1.f, 1.f, 1.f});
|
|
|
|
auto e = NDArrayFactory::create<float>('f', {10, 5});
|
|
|
|
e.assign(1.0f);
|
|
|
|
|
2019-12-20 20:35:39 +01:00
|
|
|
x.applyTrueBroadcast(BroadcastOpsTuple::Add(), y, x);
|
2019-06-06 14:21:15 +02:00
|
|
|
|
|
|
|
ASSERT_EQ(e, x);
|
|
|
|
}
|
|
|
|
|
|
|
|
TEST_F(NDArrayTest2, test_not_tiled_1) {
|
|
|
|
auto x = NDArrayFactory::create<float>('c', {4, 12, 128, 128});
|
|
|
|
auto y = NDArrayFactory::create<float>('c', {4, 1, 128, 128});
|
|
|
|
auto e = NDArrayFactory::create<float>('c', {4, 12, 128, 128});
|
|
|
|
y.assign(1.0f);
|
|
|
|
e.assign(1.0f);
|
|
|
|
|
|
|
|
x += y;
|
|
|
|
|
|
|
|
ASSERT_EQ(e, x);
|
|
|
|
}
|
|
|
|
|
|
|
|
TEST_F(NDArrayTest2, test_not_tiled_2) {
|
|
|
|
auto x = NDArrayFactory::create<float>('c', {4, 128, 768});
|
|
|
|
auto y = NDArrayFactory::create<float>('c', {4, 128, 1});
|
|
|
|
auto e = NDArrayFactory::create<float>('c', {4, 128, 768});
|
|
|
|
y.assign(1.0f);
|
|
|
|
e.assign(1.0f);
|
|
|
|
|
|
|
|
x += y;
|
|
|
|
|
|
|
|
ASSERT_EQ(e, x);
|
|
|
|
}
|
|
|
|
|
|
|
|
TEST_F(NDArrayTest2, test_long_sum_1) {
|
|
|
|
auto x = NDArrayFactory::create<Nd4jLong>('c', {2, 2}, {1, 2, 3, 4});
|
|
|
|
|
2019-12-20 20:35:39 +01:00
|
|
|
auto z = x.reduceAlongDimension(reduce::Sum, {0});
|
2019-06-06 14:21:15 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
//////////////////////////////////////////////////////////////////////
|
|
|
|
TEST_F(NDArrayTest2, reshapei_1) {
|
|
|
|
|
|
|
|
Nd4jLong shapeInfo1[] = {6, 2,1,2,1,7,1, 7,7,14,28,1,1, 8192, 0, 99};
|
|
|
|
Nd4jLong shapeInfo2[] = {2, 4, 7, 7, 1, 8192, 1, 99};
|
|
|
|
|
|
|
|
auto buffer = new float[shape::length(shapeInfo1)];
|
|
|
|
NDArray x(buffer, shapeInfo1);
|
|
|
|
|
|
|
|
const bool canReshape = x.reshapei({4,7});
|
|
|
|
|
|
|
|
ASSERT_FALSE(canReshape);
|
|
|
|
ASSERT_TRUE(shape::equalsStrict(x.getShapeInfo(), shapeInfo2));
|
|
|
|
|
|
|
|
delete[] buffer;
|
|
|
|
}
|
|
|
|
|
|
|
|
//////////////////////////////////////////////////////////////////////
|
|
|
|
TEST_F(NDArrayTest2, reshapei_2) {
|
|
|
|
|
|
|
|
Nd4jLong shapeInfo1[] = {6, 1,2,1,2,7,1, 28,7,7,14,1,1, 8192, 0, 99};
|
|
|
|
Nd4jLong shapeInfo2[] = {2, 4, 7, 7, 1, 8192, 1, 99};
|
|
|
|
|
|
|
|
auto buffer = new float[shape::length(shapeInfo1)];
|
|
|
|
NDArray x(buffer, shapeInfo1);
|
|
|
|
|
|
|
|
const bool canReshape = x.reshapei({4,7});
|
|
|
|
|
|
|
|
ASSERT_FALSE(canReshape);
|
|
|
|
ASSERT_TRUE(shape::equalsStrict(x.getShapeInfo(), shapeInfo2));
|
|
|
|
|
|
|
|
delete[] buffer;
|
|
|
|
}
|
|
|
|
|
|
|
|
//////////////////////////////////////////////////////////////////////
|
|
|
|
TEST_F(NDArrayTest2, trueBroadcast_1) {
|
|
|
|
|
|
|
|
NDArray x('f', {2, 3}, {1., 2., 3., 4., 5., 6.});
|
|
|
|
NDArray y('f', {1, 3}, {5., 4., 3.});
|
2020-03-02 10:49:41 +01:00
|
|
|
NDArray z('c', {2, 3}, sd::DataType::DOUBLE);
|
2019-06-06 14:21:15 +02:00
|
|
|
|
|
|
|
auto exp = x - y;
|
2020-03-02 10:49:41 +01:00
|
|
|
x.applyTrueBroadcast(sd::BroadcastOpsTuple::Subtract(), y, z);
|
2019-06-06 14:21:15 +02:00
|
|
|
|
|
|
|
// exp.printIndexedBuffer();
|
|
|
|
// z.printIndexedBuffer();
|
|
|
|
|
|
|
|
ASSERT_TRUE(exp.equalsTo(z));
|
|
|
|
}
|
|
|
|
|
|
|
|
//////////////////////////////////////////////////////////////////////
|
|
|
|
TEST_F(NDArrayTest2, reduce_1) {
|
|
|
|
|
2020-03-02 10:49:41 +01:00
|
|
|
NDArray arr6('f', {1, 1, 4, 4, 4, 4}, sd::DataType::DOUBLE);
|
|
|
|
NDArray exp('f', {1, 1, 4, 4}, sd::DataType::DOUBLE);
|
2019-06-06 14:21:15 +02:00
|
|
|
|
|
|
|
arr6.linspace(1);
|
|
|
|
|
2020-03-02 10:49:41 +01:00
|
|
|
NDArray arr6s = arr6.reduceAlongDimension(sd::reduce::Sum, {2,3});
|
2019-06-06 14:21:15 +02:00
|
|
|
|
|
|
|
for (int i = 0; i < 4; i++) {
|
|
|
|
for (int j = 0; j < 4; j++) {
|
|
|
|
double sum = 0;
|
|
|
|
for (int x = 0; x < 4; x++) {
|
|
|
|
for (int y = 0; y < 4; y++) {
|
|
|
|
Nd4jLong indices[] = {0, 0, x, y, i, j};
|
2019-09-11 19:12:09 +02:00
|
|
|
Nd4jLong offset = shape::getOffset(arr6.getShapeInfo(), indices);
|
2019-06-06 14:21:15 +02:00
|
|
|
sum += ((double*)arr6.getBuffer())[offset];
|
|
|
|
}
|
|
|
|
}
|
|
|
|
exp.p<double>(0, 0, i, j, sum);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// arr6s->printShapeInfo();
|
|
|
|
// exp.printShapeInfo();
|
|
|
|
// exp.printIndexedBuffer();
|
|
|
|
// arr6s->printIndexedBuffer();
|
|
|
|
|
|
|
|
ASSERT_TRUE(exp.equalsTo(arr6s));
|
|
|
|
}
|
|
|
|
|
|
|
|
//////////////////////////////////////////////////////////////////////
|
|
|
|
TEST_F(NDArrayTest2, reduce3_1) {
|
|
|
|
|
|
|
|
NDArray x('c', {1,4}, {1,2,3,4});
|
|
|
|
NDArray y('c', {1,4}, {2,3,4,5});
|
|
|
|
NDArray exp('c', {4}, {1,1,1,1});
|
|
|
|
|
2020-03-02 10:49:41 +01:00
|
|
|
NDArray z = x.applyReduce3(sd::reduce3::EuclideanDistance, y, {0}, nullptr);
|
2019-06-06 14:21:15 +02:00
|
|
|
|
|
|
|
ASSERT_TRUE(exp.isSameShape(z));
|
|
|
|
ASSERT_TRUE(exp.equalsTo(z));
|
|
|
|
}
|
|
|
|
|
|
|
|
TEST_F(NDArrayTest2, all_tads_1) {
|
|
|
|
auto x = NDArrayFactory::create<float>('c', {3, 5});
|
|
|
|
|
|
|
|
auto arrays = x.allTensorsAlongDimension({1});
|
2019-12-20 20:35:39 +01:00
|
|
|
ASSERT_EQ(3, arrays.size());
|
2019-06-06 14:21:15 +02:00
|
|
|
}
|
2019-06-15 13:34:34 +02:00
|
|
|
|
|
|
|
TEST_F(NDArrayTest2, test_trueBroadcast_empty_1) {
|
|
|
|
auto x = NDArrayFactory::create<float>('c', {0, 2});
|
|
|
|
auto y = NDArrayFactory::create<float>('c', {1, 2});
|
|
|
|
|
|
|
|
auto z = x + y;
|
|
|
|
|
|
|
|
ASSERT_EQ(x, z);
|
|
|
|
}
|
|
|
|
|
|
|
|
TEST_F(NDArrayTest2, test_trueBroadcast_empty_2) {
|
|
|
|
auto x = NDArrayFactory::create<float>('c', {0, 2});
|
|
|
|
auto y = NDArrayFactory::create<float>('c', {1, 2});
|
|
|
|
|
|
|
|
auto z = y + x;
|
|
|
|
|
|
|
|
ASSERT_EQ(x, z);
|
Merge master to upstream (#7945)
* Shugeo strided slice zeros (#14)
* Modified strided_slice op to properly work with empty-like shapes.
* Fixed test for reduce_mean with empty-like input.
* [WIP] Last merge (#15)
* correct logsoftmax looss (#2)
* Small SameDiff listener fix (#4)
* Various fixes (#6)
* #7839 Fix for asXMatrix and tests
* #7866 EmbeddingSequenceLayer dtype fix + test
* #7856 SameDiff save/load stream methods
* #7859 RegressionEvaluation rank 4 fix + tests + axis configuration
* EvaluationBinary 3d/4d
* More evaluation 3d/4d tests
* #7847 Evaluation empty checks
* Small test ifx
* #7848 Fix median edge case
* Improve DL4J samediff layer tests
* [WIP] FastText wrapper implemented (#8)
* FastText implemented
* Some fixes
* Fix shapes for wordsNearest
* Validation of input vectors
* Fixes
* Fixed test
* Thread tagged
* Some tweaks
* setContextClassLoader for DeallocatorServiceThread
* Numpy format tests (#1)
* Various fixes (#11)
* #7852 SameDiff gather fix
* #7892 SameDiff placeholder to constant conversion
* #7890 validate input rank for MLN/CG init methods
* Fix broken permute shape calculation
* Permute and gather fixes
* Tests
* #7850 LogSumExp fix + test
* Handful of test fixes
* Empty arrays with non-scalar shapes (#10)
* minor rearrangements for lambdas
* empty tensors with non-scalar shapes
* numpy empty tensors with non-scalar shapes
* few more empty tweaks
* Small fixes
* conv3d signature update
* micro fix in batchnorm mkldnn
* Import fixes
* Fix
* MKL-DNN update
* Small fill fix
* fill with empty input + test
* Fixes
* Small error improvement
* Fix
* one special test
* couple of fixes for lstm
* Rewrite TFGraphMapper.getNDArrayFromTensor to be maintainable and less error prone
* Fixes
* FP16
* Unsigned
* BFloat16
* Fill op - empty tweaks
* - couple of fixes for empty arrays construction
- stack updated
* strided slice fix
* one transform test
* provide method for reducing shapeInfo in case of input array is empty
* Fixed reduceAlongDimensions to use empty input properly.
* couple of broadcast tests
* couple of tests broadcast tests + tweak to make them pass
* add check of non-empty to methods producing sub-arrays
* Fixed reshapeC with zeros in shape.
* complete empty check in reduce_... legacy ops
* Concat and cumsum/prod
* Tweak to empty shape inference on import
* add empty check to the rest of reduce legacy ops
* one more test
* correct typo in evalReduceShapeInfoEmpty
* Added tests for reduce_* ops to tests with zero shapes.
* few more tests for empty reductions
* Fixed strided_slice op with empty case and tests.
* one more empty reduction test
* Fixed strided_slice test.
* add empty check to NDArray::reshapei
* infOrMax
* empty min/max with infinity tests
* made unstack working correctly with empty arrays
* few IndexReduce tests + tweaks for empty shapes
* add test for empty concat
* few tests fixed
* Validation fix for reductions on empty shapes
* Reverse fix
* Reduction shape calc fixes
* SameDiff.generateOutputVariable: don't use shape function to determine number of outputs
* Range fix
* - NDArray constructor updated for scalars/empty arrays
- few tests fixed
* More fixes
* Empty creator fixes
* concat fix
* concat fix
* TF import tests: allow 'both all NaN' and 'both all inf' to pass
* Slice, zero fraction, and reshape fixes
* transpose, gather
* Zero fraction
* scalar cast fix
* Empty reduction axis support
* few more tests fixed
* Fixed input checks conforming with TF for concat op and tests.
* few tests fixed
* matmul scalar shape fix
* Fixed checkout for data type and scalarity with concat to allow non-empty scalars with vector concats.
* broadcast bool fix
* few more tests
* few more tests
* correct evalReduceShapeInfoEmpty
* argmax/argmin + tests
* one more empty edge case + one more test
* argmax/argmin/realdiv_bp tweaks
* empty reshape test + fix
* Helper fixes
* Small fixes
* Gather test fix
* Gather test fix
* Small fixes
* reduce scalar zero values
* scalar mean workaround
* Remove debug code
* along dim mean workaround
* one more test
* - equalsTo() tweak for empty arrays
- one more test
* broadcast tweaks
* [WIP] Fixing outstanding issues for NLP (#9)
* Avoid using not-inited objects
* Test fixed.
* Redundant method avoided for models like FastText
* KMeans++ implementation
* KMeans++ implementation
* Disable parallel execution
* KMeans++
* Tests
* Dev branch merge (#16)
* SameDiff: convertDataType and gradient check util improvements (#12)
* GradCheck util improvements
* StopGradient constructor + test
* SameDiff: Add datatype conversion
* Javadoc and add DataType.isNumerical()
* Small fix
* Fix SameDiff TF import test cases intermediate naming (workaround for bad default)
* TFGraphTestAllHelper: check intermediates in execution order
* Add missing debug listener
* [WIP] lstmBlock fix + other changes (#13)
- fixes lstmBlock issue
- changes NDArray method reshape(), permute(), transpose() by making them return instance instead of pointer
- CheckNumerics op
- fixes for ReduceBool IsInfOrNan & IsFinite
* Small test fix
* CheckNumerics op wrapper
* Fix some issues on master (#17)
* Fix DataVec test issue
* Fix issue with dl4j SameDiff output layer
* Dtype fix for lambda layers
* #7912 BertIterator dtype fix (use float32 not global default)
* [WIP] Next set of CUDA stuff (#7)
New CUDA implementations and improvements
* bad file
* Dev branch master merge (#23)
* SameDiff: convertDataType and gradient check util improvements (#12)
* GradCheck util improvements
* StopGradient constructor + test
* SameDiff: Add datatype conversion
* Javadoc and add DataType.isNumerical()
* Small fix
* Fix SameDiff TF import test cases intermediate naming (workaround for bad default)
* TFGraphTestAllHelper: check intermediates in execution order
* Add missing debug listener
* [WIP] lstmBlock fix + other changes (#13)
- fixes lstmBlock issue
- changes NDArray method reshape(), permute(), transpose() by making them return instance instead of pointer
- CheckNumerics op
- fixes for ReduceBool IsInfOrNan & IsFinite
* Small test fix
* CheckNumerics op wrapper
* Compatibility of deserialization (#18)
Signed-off-by: Alexander Stoyakin <alexander.stoyakin@gmail.com>
* SameDiff: add activation gradient checking support for debugging (#19)
* SameDiff gradient checker: first pass on activation gradient checks
* Fixes + tests for activation gradient checking
* Javadoc
* [WIP] Some nd4j data type corrections (#20)
* Adjust data type
* Set correct Data type.
* Size of proper data type.
* fix averaged cpu load (#22)
* SameDiff ops, TF import and fixes (#24)
* CheckNumerics tests + fixes + misc fixes
Signed-off-by: AlexDBlack <blacka101@gmail.com>
* Fake quant
Signed-off-by: AlexDBlack <blacka101@gmail.com>
* Fixes
Signed-off-by: AlexDBlack <blacka101@gmail.com>
* FakeQuantWithMinMaxArgs
Signed-off-by: AlexDBlack <blacka101@gmail.com>
* CheckNumerics fix
Signed-off-by: AlexDBlack <blacka101@gmail.com>
* Fix libnd4j ALL_INTS and ALL_FLOATS declaration (uint and bfloat types)
Signed-off-by: AlexDBlack <blacka101@gmail.com>
* Small fix
Signed-off-by: AlexDBlack <blacka101@gmail.com>
* Javadoc
Signed-off-by: AlexDBlack <blacka101@gmail.com>
* Exception tweak
Signed-off-by: AlexDBlack <blacka101@gmail.com>
* fix
Signed-off-by: AlexDBlack <blacka101@gmail.com>
* Fix for out of scope stack allocated var use
Signed-off-by: AlexDBlack <blacka101@gmail.com>
* Ignores
Signed-off-by: AlexDBlack <blacka101@gmail.com>
* Ignore for known failing test (already logged issue)
Signed-off-by: AlexDBlack <blacka101@gmail.com>
* Merge upstream to fork (#25)
* Add thousand-separator commas to TotalParams (#7915)
* Add thousand-separator commas to TotalParams
The number of parameters can be quite large, and it would help the reading of the summary printout to have the TotalParams column & values at the bottom have thousand-separator-commas in them.
* Add thousand-separator commas to MultiLayerNetwork
Corresponding change to MultiLayerNetwork
Signed-off-by: Jxtps Jxtps <jxtps435@gmail.com>
* Update contributing and issue/PR templates (#7934)
Signed-off-by: AlexDBlack <blacka101@gmail.com>
* Fix link to AdaDelta paper (#7942)
Fix link to AdaDelta paper hosted on matthewzeiler.com
Signed-off-by: Jxtps
* Fixes, and ignores for known/logged failing issues (#7943)
Signed-off-by: AlexDBlack <blacka101@gmail.com>
* SameDiff + DL4J/SameDiff: Multiple fixes (#28)
* #7919 HDF5 attribute buffer length fix
Signed-off-by: AlexDBlack <blacka101@gmail.com>
* #7909 Arbiter constructor exception ux improvements
Signed-off-by: AlexDBlack <blacka101@gmail.com>
* #7925 RNN output layer length checks
Signed-off-by: AlexDBlack <blacka101@gmail.com>
* #7939 Add listener for validating inputs are not incorrectly modified
Signed-off-by: AlexDBlack <blacka101@gmail.com>
* #7939 Integrate NonInplaceValidationListener into tests
* #7844 DL4J SameDiff fixes for variable minibatch size
* DL4J SameDiff fixes - ensure gradient for input placeholder is available
Signed-off-by: AlexDBlack <blacka101@gmail.com>
* Tweaks to ExternalErrorsFunction - use placeholders, make more robust
* Another fix
* More fixes
* More SameDiff/DL4J fixes
* Scope out scalar array creation in BaseScalarOp
* Remove debug code
Signed-off-by: AlexDBlack <blacka101@gmail.com>
* [WIP] Final dev branch merge (#29)
* SameDiff: convertDataType and gradient check util improvements (#12)
* GradCheck util improvements
* StopGradient constructor + test
* SameDiff: Add datatype conversion
* Javadoc and add DataType.isNumerical()
* Small fix
* Fix SameDiff TF import test cases intermediate naming (workaround for bad default)
* TFGraphTestAllHelper: check intermediates in execution order
* Add missing debug listener
* [WIP] lstmBlock fix + other changes (#13)
- fixes lstmBlock issue
- changes NDArray method reshape(), permute(), transpose() by making them return instance instead of pointer
- CheckNumerics op
- fixes for ReduceBool IsInfOrNan & IsFinite
* Small test fix
* CheckNumerics op wrapper
* Compatibility of deserialization (#18)
Signed-off-by: Alexander Stoyakin <alexander.stoyakin@gmail.com>
* SameDiff: add activation gradient checking support for debugging (#19)
* SameDiff gradient checker: first pass on activation gradient checks
* Fixes + tests for activation gradient checking
* Javadoc
* [WIP] Some nd4j data type corrections (#20)
* Adjust data type
* Set correct Data type.
* Size of proper data type.
* fix averaged cpu load (#22)
* [WIP] Multiple dataset iterators (#27)
* Splitting dataset into arbitrary number
* Fixes
* Multiple split of iterator
* Test
* Test
* Some fixes
* signature change
* one more tweak
Signed-off-by: raver119 <raver119@gmail.com>
* one more test for sequential use of DataSetIteratorSplitter
Signed-off-by: raver119 <raver119@gmail.com>
* Fixes
* Fixes
* one more test for Alexander
Signed-off-by: raver119 <raver119@gmail.com>
* Some fixes
* Some fixes
* one more test for Alexander
Signed-off-by: raver119 <raver119@gmail.com>
* minor test fix
Signed-off-by: raver119 <raver119@gmail.com>
* Some fixes
* Some fixes
* couple of assertions tweaked
Signed-off-by: raver119 <raver119@gmail.com>
* MDS splitter test :/
Signed-off-by: raver119 <raver119@gmail.com>
* Minor refactoring
* Multi dataset
* Some fixes
* More tests
* Small number of test fixes/improvements (failures on CI) (#31)
Signed-off-by: AlexDBlack <blacka101@gmail.com>
* [WIP] More CUDA stuff (#26)
* initial commit
Signed-off-by: raver119 <raver119@gmail.com>
* LRN BP CUDA
Signed-off-by: raver119 <raver119@gmail.com>
* less memory
Signed-off-by: raver119 <raver119@gmail.com>
* Fixed bug with crop_and_resize op helper.
* get rid of unnecessary index-calculation dunction
Signed-off-by: Yurii <yurii@skymind.io>
* Fixed sort with nth_element cuda-based helper.
* Refactored nth_element.
* Refactored nth_element op and tests.
* Modified usage of dim array with sortTad routine.
* Refactored main routine of helper for non_max_image_suppression op.
* non_max_image_suppression op helper with cuda kernel implementation. Initial revision.
* fix vol2col cuda kernel
* meh
Signed-off-by: raver119 <raver119@gmail.com>
* topK concept
Signed-off-by: raver119 <raver119@gmail.com>
* unsorted topK with scanWitdh of 1
Signed-off-by: raver119 <raver119@gmail.com>
* correct vol2col tests
* sorted/unsorted topK
Signed-off-by: raver119 <raver119@gmail.com>
* implementation and fixing col2im/col2vol
* Corrected usage flags with input/output with reverse op.
* dup is const now
Signed-off-by: raver119 <raver119@gmail.com>
* percentile op
Signed-off-by: raver119 <raver119@gmail.com>
* group tests for mapool2d
Signed-off-by: Yurii <yurii@skymind.io>
* special test for george
Signed-off-by: raver119 <raver119@gmail.com>
* less threads for sortTad
Signed-off-by: raver119 <raver119@gmail.com>
* provide conv2d for cuda
Signed-off-by: Yurii <yurii@skymind.io>
* remove auther in sort tad kernel code
Signed-off-by: Yurii <yurii@skymind.io>
* provide depthwise_conv2d for cuda
Signed-off-by: Yurii <yurii@skymind.io>
* - max_pooling_with_argmax
- null check for special use
Signed-off-by: raver119 <raver119@gmail.com>
* dts cuda
Signed-off-by: raver119 <raver119@gmail.com>
* provide sconv2d for cuda
Signed-off-by: Yurii <yurii@skymind.io>
* std cuda
Signed-off-by: raver119 <raver119@gmail.com>
* Refactored non_max_suppression op to conform TF implementation.
* Improved suppression helper.
* provide pooling3d for cuda
Signed-off-by: Yurii <yurii@skymind.io>
* minor lstm rearrangements
Signed-off-by: raver119 <raver119@gmail.com>
* more of minor lstm rearrangements
Signed-off-by: raver119 <raver119@gmail.com>
* (bi)dynamic_rnn
Signed-off-by: raver119 <raver119@gmail.com>
* templates init order
Signed-off-by: raver119 <raver119@gmail.com>
* Refactored non_max_suppression op.
* Added cuda kernel for non_max_suppression.
* CPU sort by key/value
Signed-off-by: raver119 <raver119@gmail.com>
* CPU sort TAD by key/value
Signed-off-by: raver119 <raver119@gmail.com>
* CPU sort TAD by key/value tests
Signed-off-by: raver119 <raver119@gmail.com>
* Eliminate compiler error with cuda implementation.
* - repaired gradCheck in cuda
- provide conv2d_bp for cuda
Signed-off-by: Yurii <yurii@skymind.io>
* missed signature
Signed-off-by: raver119 <raver119@gmail.com>
* provide depthwise_conv2d_bp for cuda
Signed-off-by: Yurii <yurii@skymind.io>
* Implementation of lup helper with cuda kernel. Initial commit.
* further work on backprops for convolutions
Signed-off-by: Yurii <yurii@skymind.io>
* CUDA linear sort by key/val
Signed-off-by: raver119 <raver119@gmail.com>
* CUDA tad sort by key/val
Signed-off-by: raver119 <raver119@gmail.com>
* start providing of backprop for pooling2d/3d
Signed-off-by: Yurii <yurii@skymind.io>
* Added atomicAdd for bool datatype.
* dynamic partition concept
Signed-off-by: raver119 <raver119@gmail.com>
* dynamic partition concept
Signed-off-by: raver119 <raver119@gmail.com>
* dynamic partition scalar CUDA
Signed-off-by: raver119 <raver119@gmail.com>
* important comment
Signed-off-by: raver119 <raver119@gmail.com>
* fix pooling2d/3d backprop helpers
Signed-off-by: Yurii <yurii@skymind.io>
* Added non-linear test with dynamic_partition.
* Improved test for dynamic_partition.
* dynamic_partition TAD concept
Signed-off-by: raver119 <raver119@gmail.com>
* - dynamic_partition TAD CUDA impl
- dynamic_partition TAD CPU fix
Signed-off-by: raver119 <raver119@gmail.com>
* - rewrite cpu code for usampling2d/3d
- write cuda code for usampling2d/3d
Signed-off-by: Yurii <yurii@skymind.io>
* dynamic_stitch CUDA vector case
Signed-off-by: raver119 <raver119@gmail.com>
* dynamic_stitch CUDA TAD case concept
Signed-off-by: raver119 <raver119@gmail.com>
* dynamic_stitch CUDA TAD case impl
Signed-off-by: raver119 <raver119@gmail.com>
* Added tests for dynamic_stitch 3D-4D cases.
* minor tests tweaks
Signed-off-by: raver119 <raver119@gmail.com>
* Fixed type check for dynamic stitch.
* min/max bp
Signed-off-by: raver119 <raver119@gmail.com>
* rewrite code for upsampling2d/3d cpu
Signed-off-by: Yurii <yurii@skymind.io>
* reduce min/max/norm_max bp
Signed-off-by: raver119 <raver119@gmail.com>
* lup implementation. Additional enhancements.
* provide code for upsamling2d/3d backprop
Signed-off-by: Yurii <yurii@skymind.io>
* weightedCrossEntropyWithLogits
Signed-off-by: raver119 <raver119@gmail.com>
* Fixed template math atomicMul for 64bit ints.
* Refactored dynamic_partition_bp op.
* inverseBroadcast fix
Signed-off-by: raver119 <raver119@gmail.com>
* DynamicPartitionBP test datatype fixed.
* - nd4j_atomicMul Windows fix
- cpu/NDArrayLambda.hpp excluded from CUDA
Signed-off-by: raver119 <raver119@gmail.com>
2019-06-27 17:37:04 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
TEST_F(NDArrayTest2, test_subarray_followed_by_reshape_1) {
|
|
|
|
|
2020-03-02 10:49:41 +01:00
|
|
|
NDArray x('c', {5, 1, 3}, sd::DataType::FLOAT32);
|
|
|
|
NDArray e('c', {1, 3}, {7.f, 8.f, 9.f}, sd::DataType::FLOAT32);
|
Merge master to upstream (#7945)
* Shugeo strided slice zeros (#14)
* Modified strided_slice op to properly work with empty-like shapes.
* Fixed test for reduce_mean with empty-like input.
* [WIP] Last merge (#15)
* correct logsoftmax looss (#2)
* Small SameDiff listener fix (#4)
* Various fixes (#6)
* #7839 Fix for asXMatrix and tests
* #7866 EmbeddingSequenceLayer dtype fix + test
* #7856 SameDiff save/load stream methods
* #7859 RegressionEvaluation rank 4 fix + tests + axis configuration
* EvaluationBinary 3d/4d
* More evaluation 3d/4d tests
* #7847 Evaluation empty checks
* Small test ifx
* #7848 Fix median edge case
* Improve DL4J samediff layer tests
* [WIP] FastText wrapper implemented (#8)
* FastText implemented
* Some fixes
* Fix shapes for wordsNearest
* Validation of input vectors
* Fixes
* Fixed test
* Thread tagged
* Some tweaks
* setContextClassLoader for DeallocatorServiceThread
* Numpy format tests (#1)
* Various fixes (#11)
* #7852 SameDiff gather fix
* #7892 SameDiff placeholder to constant conversion
* #7890 validate input rank for MLN/CG init methods
* Fix broken permute shape calculation
* Permute and gather fixes
* Tests
* #7850 LogSumExp fix + test
* Handful of test fixes
* Empty arrays with non-scalar shapes (#10)
* minor rearrangements for lambdas
* empty tensors with non-scalar shapes
* numpy empty tensors with non-scalar shapes
* few more empty tweaks
* Small fixes
* conv3d signature update
* micro fix in batchnorm mkldnn
* Import fixes
* Fix
* MKL-DNN update
* Small fill fix
* fill with empty input + test
* Fixes
* Small error improvement
* Fix
* one special test
* couple of fixes for lstm
* Rewrite TFGraphMapper.getNDArrayFromTensor to be maintainable and less error prone
* Fixes
* FP16
* Unsigned
* BFloat16
* Fill op - empty tweaks
* - couple of fixes for empty arrays construction
- stack updated
* strided slice fix
* one transform test
* provide method for reducing shapeInfo in case of input array is empty
* Fixed reduceAlongDimensions to use empty input properly.
* couple of broadcast tests
* couple of tests broadcast tests + tweak to make them pass
* add check of non-empty to methods producing sub-arrays
* Fixed reshapeC with zeros in shape.
* complete empty check in reduce_... legacy ops
* Concat and cumsum/prod
* Tweak to empty shape inference on import
* add empty check to the rest of reduce legacy ops
* one more test
* correct typo in evalReduceShapeInfoEmpty
* Added tests for reduce_* ops to tests with zero shapes.
* few more tests for empty reductions
* Fixed strided_slice op with empty case and tests.
* one more empty reduction test
* Fixed strided_slice test.
* add empty check to NDArray::reshapei
* infOrMax
* empty min/max with infinity tests
* made unstack working correctly with empty arrays
* few IndexReduce tests + tweaks for empty shapes
* add test for empty concat
* few tests fixed
* Validation fix for reductions on empty shapes
* Reverse fix
* Reduction shape calc fixes
* SameDiff.generateOutputVariable: don't use shape function to determine number of outputs
* Range fix
* - NDArray constructor updated for scalars/empty arrays
- few tests fixed
* More fixes
* Empty creator fixes
* concat fix
* concat fix
* TF import tests: allow 'both all NaN' and 'both all inf' to pass
* Slice, zero fraction, and reshape fixes
* transpose, gather
* Zero fraction
* scalar cast fix
* Empty reduction axis support
* few more tests fixed
* Fixed input checks conforming with TF for concat op and tests.
* few tests fixed
* matmul scalar shape fix
* Fixed checkout for data type and scalarity with concat to allow non-empty scalars with vector concats.
* broadcast bool fix
* few more tests
* few more tests
* correct evalReduceShapeInfoEmpty
* argmax/argmin + tests
* one more empty edge case + one more test
* argmax/argmin/realdiv_bp tweaks
* empty reshape test + fix
* Helper fixes
* Small fixes
* Gather test fix
* Gather test fix
* Small fixes
* reduce scalar zero values
* scalar mean workaround
* Remove debug code
* along dim mean workaround
* one more test
* - equalsTo() tweak for empty arrays
- one more test
* broadcast tweaks
* [WIP] Fixing outstanding issues for NLP (#9)
* Avoid using not-inited objects
* Test fixed.
* Redundant method avoided for models like FastText
* KMeans++ implementation
* KMeans++ implementation
* Disable parallel execution
* KMeans++
* Tests
* Dev branch merge (#16)
* SameDiff: convertDataType and gradient check util improvements (#12)
* GradCheck util improvements
* StopGradient constructor + test
* SameDiff: Add datatype conversion
* Javadoc and add DataType.isNumerical()
* Small fix
* Fix SameDiff TF import test cases intermediate naming (workaround for bad default)
* TFGraphTestAllHelper: check intermediates in execution order
* Add missing debug listener
* [WIP] lstmBlock fix + other changes (#13)
- fixes lstmBlock issue
- changes NDArray method reshape(), permute(), transpose() by making them return instance instead of pointer
- CheckNumerics op
- fixes for ReduceBool IsInfOrNan & IsFinite
* Small test fix
* CheckNumerics op wrapper
* Fix some issues on master (#17)
* Fix DataVec test issue
* Fix issue with dl4j SameDiff output layer
* Dtype fix for lambda layers
* #7912 BertIterator dtype fix (use float32 not global default)
* [WIP] Next set of CUDA stuff (#7)
New CUDA implementations and improvements
* bad file
* Dev branch master merge (#23)
* SameDiff: convertDataType and gradient check util improvements (#12)
* GradCheck util improvements
* StopGradient constructor + test
* SameDiff: Add datatype conversion
* Javadoc and add DataType.isNumerical()
* Small fix
* Fix SameDiff TF import test cases intermediate naming (workaround for bad default)
* TFGraphTestAllHelper: check intermediates in execution order
* Add missing debug listener
* [WIP] lstmBlock fix + other changes (#13)
- fixes lstmBlock issue
- changes NDArray method reshape(), permute(), transpose() by making them return instance instead of pointer
- CheckNumerics op
- fixes for ReduceBool IsInfOrNan & IsFinite
* Small test fix
* CheckNumerics op wrapper
* Compatibility of deserialization (#18)
Signed-off-by: Alexander Stoyakin <alexander.stoyakin@gmail.com>
* SameDiff: add activation gradient checking support for debugging (#19)
* SameDiff gradient checker: first pass on activation gradient checks
* Fixes + tests for activation gradient checking
* Javadoc
* [WIP] Some nd4j data type corrections (#20)
* Adjust data type
* Set correct Data type.
* Size of proper data type.
* fix averaged cpu load (#22)
* SameDiff ops, TF import and fixes (#24)
* CheckNumerics tests + fixes + misc fixes
Signed-off-by: AlexDBlack <blacka101@gmail.com>
* Fake quant
Signed-off-by: AlexDBlack <blacka101@gmail.com>
* Fixes
Signed-off-by: AlexDBlack <blacka101@gmail.com>
* FakeQuantWithMinMaxArgs
Signed-off-by: AlexDBlack <blacka101@gmail.com>
* CheckNumerics fix
Signed-off-by: AlexDBlack <blacka101@gmail.com>
* Fix libnd4j ALL_INTS and ALL_FLOATS declaration (uint and bfloat types)
Signed-off-by: AlexDBlack <blacka101@gmail.com>
* Small fix
Signed-off-by: AlexDBlack <blacka101@gmail.com>
* Javadoc
Signed-off-by: AlexDBlack <blacka101@gmail.com>
* Exception tweak
Signed-off-by: AlexDBlack <blacka101@gmail.com>
* fix
Signed-off-by: AlexDBlack <blacka101@gmail.com>
* Fix for out of scope stack allocated var use
Signed-off-by: AlexDBlack <blacka101@gmail.com>
* Ignores
Signed-off-by: AlexDBlack <blacka101@gmail.com>
* Ignore for known failing test (already logged issue)
Signed-off-by: AlexDBlack <blacka101@gmail.com>
* Merge upstream to fork (#25)
* Add thousand-separator commas to TotalParams (#7915)
* Add thousand-separator commas to TotalParams
The number of parameters can be quite large, and it would help the reading of the summary printout to have the TotalParams column & values at the bottom have thousand-separator-commas in them.
* Add thousand-separator commas to MultiLayerNetwork
Corresponding change to MultiLayerNetwork
Signed-off-by: Jxtps Jxtps <jxtps435@gmail.com>
* Update contributing and issue/PR templates (#7934)
Signed-off-by: AlexDBlack <blacka101@gmail.com>
* Fix link to AdaDelta paper (#7942)
Fix link to AdaDelta paper hosted on matthewzeiler.com
Signed-off-by: Jxtps
* Fixes, and ignores for known/logged failing issues (#7943)
Signed-off-by: AlexDBlack <blacka101@gmail.com>
* SameDiff + DL4J/SameDiff: Multiple fixes (#28)
* #7919 HDF5 attribute buffer length fix
Signed-off-by: AlexDBlack <blacka101@gmail.com>
* #7909 Arbiter constructor exception ux improvements
Signed-off-by: AlexDBlack <blacka101@gmail.com>
* #7925 RNN output layer length checks
Signed-off-by: AlexDBlack <blacka101@gmail.com>
* #7939 Add listener for validating inputs are not incorrectly modified
Signed-off-by: AlexDBlack <blacka101@gmail.com>
* #7939 Integrate NonInplaceValidationListener into tests
* #7844 DL4J SameDiff fixes for variable minibatch size
* DL4J SameDiff fixes - ensure gradient for input placeholder is available
Signed-off-by: AlexDBlack <blacka101@gmail.com>
* Tweaks to ExternalErrorsFunction - use placeholders, make more robust
* Another fix
* More fixes
* More SameDiff/DL4J fixes
* Scope out scalar array creation in BaseScalarOp
* Remove debug code
Signed-off-by: AlexDBlack <blacka101@gmail.com>
* [WIP] Final dev branch merge (#29)
* SameDiff: convertDataType and gradient check util improvements (#12)
* GradCheck util improvements
* StopGradient constructor + test
* SameDiff: Add datatype conversion
* Javadoc and add DataType.isNumerical()
* Small fix
* Fix SameDiff TF import test cases intermediate naming (workaround for bad default)
* TFGraphTestAllHelper: check intermediates in execution order
* Add missing debug listener
* [WIP] lstmBlock fix + other changes (#13)
- fixes lstmBlock issue
- changes NDArray method reshape(), permute(), transpose() by making them return instance instead of pointer
- CheckNumerics op
- fixes for ReduceBool IsInfOrNan & IsFinite
* Small test fix
* CheckNumerics op wrapper
* Compatibility of deserialization (#18)
Signed-off-by: Alexander Stoyakin <alexander.stoyakin@gmail.com>
* SameDiff: add activation gradient checking support for debugging (#19)
* SameDiff gradient checker: first pass on activation gradient checks
* Fixes + tests for activation gradient checking
* Javadoc
* [WIP] Some nd4j data type corrections (#20)
* Adjust data type
* Set correct Data type.
* Size of proper data type.
* fix averaged cpu load (#22)
* [WIP] Multiple dataset iterators (#27)
* Splitting dataset into arbitrary number
* Fixes
* Multiple split of iterator
* Test
* Test
* Some fixes
* signature change
* one more tweak
Signed-off-by: raver119 <raver119@gmail.com>
* one more test for sequential use of DataSetIteratorSplitter
Signed-off-by: raver119 <raver119@gmail.com>
* Fixes
* Fixes
* one more test for Alexander
Signed-off-by: raver119 <raver119@gmail.com>
* Some fixes
* Some fixes
* one more test for Alexander
Signed-off-by: raver119 <raver119@gmail.com>
* minor test fix
Signed-off-by: raver119 <raver119@gmail.com>
* Some fixes
* Some fixes
* couple of assertions tweaked
Signed-off-by: raver119 <raver119@gmail.com>
* MDS splitter test :/
Signed-off-by: raver119 <raver119@gmail.com>
* Minor refactoring
* Multi dataset
* Some fixes
* More tests
* Small number of test fixes/improvements (failures on CI) (#31)
Signed-off-by: AlexDBlack <blacka101@gmail.com>
* [WIP] More CUDA stuff (#26)
* initial commit
Signed-off-by: raver119 <raver119@gmail.com>
* LRN BP CUDA
Signed-off-by: raver119 <raver119@gmail.com>
* less memory
Signed-off-by: raver119 <raver119@gmail.com>
* Fixed bug with crop_and_resize op helper.
* get rid of unnecessary index-calculation dunction
Signed-off-by: Yurii <yurii@skymind.io>
* Fixed sort with nth_element cuda-based helper.
* Refactored nth_element.
* Refactored nth_element op and tests.
* Modified usage of dim array with sortTad routine.
* Refactored main routine of helper for non_max_image_suppression op.
* non_max_image_suppression op helper with cuda kernel implementation. Initial revision.
* fix vol2col cuda kernel
* meh
Signed-off-by: raver119 <raver119@gmail.com>
* topK concept
Signed-off-by: raver119 <raver119@gmail.com>
* unsorted topK with scanWitdh of 1
Signed-off-by: raver119 <raver119@gmail.com>
* correct vol2col tests
* sorted/unsorted topK
Signed-off-by: raver119 <raver119@gmail.com>
* implementation and fixing col2im/col2vol
* Corrected usage flags with input/output with reverse op.
* dup is const now
Signed-off-by: raver119 <raver119@gmail.com>
* percentile op
Signed-off-by: raver119 <raver119@gmail.com>
* group tests for mapool2d
Signed-off-by: Yurii <yurii@skymind.io>
* special test for george
Signed-off-by: raver119 <raver119@gmail.com>
* less threads for sortTad
Signed-off-by: raver119 <raver119@gmail.com>
* provide conv2d for cuda
Signed-off-by: Yurii <yurii@skymind.io>
* remove auther in sort tad kernel code
Signed-off-by: Yurii <yurii@skymind.io>
* provide depthwise_conv2d for cuda
Signed-off-by: Yurii <yurii@skymind.io>
* - max_pooling_with_argmax
- null check for special use
Signed-off-by: raver119 <raver119@gmail.com>
* dts cuda
Signed-off-by: raver119 <raver119@gmail.com>
* provide sconv2d for cuda
Signed-off-by: Yurii <yurii@skymind.io>
* std cuda
Signed-off-by: raver119 <raver119@gmail.com>
* Refactored non_max_suppression op to conform TF implementation.
* Improved suppression helper.
* provide pooling3d for cuda
Signed-off-by: Yurii <yurii@skymind.io>
* minor lstm rearrangements
Signed-off-by: raver119 <raver119@gmail.com>
* more of minor lstm rearrangements
Signed-off-by: raver119 <raver119@gmail.com>
* (bi)dynamic_rnn
Signed-off-by: raver119 <raver119@gmail.com>
* templates init order
Signed-off-by: raver119 <raver119@gmail.com>
* Refactored non_max_suppression op.
* Added cuda kernel for non_max_suppression.
* CPU sort by key/value
Signed-off-by: raver119 <raver119@gmail.com>
* CPU sort TAD by key/value
Signed-off-by: raver119 <raver119@gmail.com>
* CPU sort TAD by key/value tests
Signed-off-by: raver119 <raver119@gmail.com>
* Eliminate compiler error with cuda implementation.
* - repaired gradCheck in cuda
- provide conv2d_bp for cuda
Signed-off-by: Yurii <yurii@skymind.io>
* missed signature
Signed-off-by: raver119 <raver119@gmail.com>
* provide depthwise_conv2d_bp for cuda
Signed-off-by: Yurii <yurii@skymind.io>
* Implementation of lup helper with cuda kernel. Initial commit.
* further work on backprops for convolutions
Signed-off-by: Yurii <yurii@skymind.io>
* CUDA linear sort by key/val
Signed-off-by: raver119 <raver119@gmail.com>
* CUDA tad sort by key/val
Signed-off-by: raver119 <raver119@gmail.com>
* start providing of backprop for pooling2d/3d
Signed-off-by: Yurii <yurii@skymind.io>
* Added atomicAdd for bool datatype.
* dynamic partition concept
Signed-off-by: raver119 <raver119@gmail.com>
* dynamic partition concept
Signed-off-by: raver119 <raver119@gmail.com>
* dynamic partition scalar CUDA
Signed-off-by: raver119 <raver119@gmail.com>
* important comment
Signed-off-by: raver119 <raver119@gmail.com>
* fix pooling2d/3d backprop helpers
Signed-off-by: Yurii <yurii@skymind.io>
* Added non-linear test with dynamic_partition.
* Improved test for dynamic_partition.
* dynamic_partition TAD concept
Signed-off-by: raver119 <raver119@gmail.com>
* - dynamic_partition TAD CUDA impl
- dynamic_partition TAD CPU fix
Signed-off-by: raver119 <raver119@gmail.com>
* - rewrite cpu code for usampling2d/3d
- write cuda code for usampling2d/3d
Signed-off-by: Yurii <yurii@skymind.io>
* dynamic_stitch CUDA vector case
Signed-off-by: raver119 <raver119@gmail.com>
* dynamic_stitch CUDA TAD case concept
Signed-off-by: raver119 <raver119@gmail.com>
* dynamic_stitch CUDA TAD case impl
Signed-off-by: raver119 <raver119@gmail.com>
* Added tests for dynamic_stitch 3D-4D cases.
* minor tests tweaks
Signed-off-by: raver119 <raver119@gmail.com>
* Fixed type check for dynamic stitch.
* min/max bp
Signed-off-by: raver119 <raver119@gmail.com>
* rewrite code for upsampling2d/3d cpu
Signed-off-by: Yurii <yurii@skymind.io>
* reduce min/max/norm_max bp
Signed-off-by: raver119 <raver119@gmail.com>
* lup implementation. Additional enhancements.
* provide code for upsamling2d/3d backprop
Signed-off-by: Yurii <yurii@skymind.io>
* weightedCrossEntropyWithLogits
Signed-off-by: raver119 <raver119@gmail.com>
* Fixed template math atomicMul for 64bit ints.
* Refactored dynamic_partition_bp op.
* inverseBroadcast fix
Signed-off-by: raver119 <raver119@gmail.com>
* DynamicPartitionBP test datatype fixed.
* - nd4j_atomicMul Windows fix
- cpu/NDArrayLambda.hpp excluded from CUDA
Signed-off-by: raver119 <raver119@gmail.com>
2019-06-27 17:37:04 +02:00
|
|
|
|
|
|
|
x.linspace(1.);
|
|
|
|
|
|
|
|
auto s = x({2,3, 0,0, 0,0});
|
|
|
|
|
|
|
|
// s.printIndexedBuffer("s");
|
|
|
|
|
|
|
|
auto r = s.reshape(x.ordering(), {1, 3});
|
|
|
|
// r.printIndexedBuffer("r");
|
|
|
|
|
|
|
|
ASSERT_EQ(e, r);
|
2020-02-12 10:38:10 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
TEST_F(NDArrayTest2, test_numpy_import_1) {
|
|
|
|
std::string fname("./resources/arr_3,4_float32.npy");
|
|
|
|
auto exp = NDArrayFactory::create<float>('c', {3, 4});
|
|
|
|
exp.linspace(0);
|
|
|
|
|
|
|
|
auto array = NDArrayFactory::fromNpyFile(fname.c_str());
|
|
|
|
|
|
|
|
ASSERT_EQ(exp, array);
|
2019-06-15 13:34:34 +02:00
|
|
|
}
|