From a35926c6e9d2f3d265fe1c71a24c02a4a1212031 Mon Sep 17 00:00:00 2001 From: Yurii Shyrma Date: Sat, 31 Aug 2019 20:57:39 +0300 Subject: [PATCH] - add parameter alpha to elu and lrelu_bp (#213) * - add parameter alpha to elu and lrelu_bp Signed-off-by: Yurii * - forgot to correct header activations.h Signed-off-by: Yurii --- libnd4j/include/loops/legacy_ops.h | 4 +- .../declarable/generic/activations/elu.cpp | 20 +++++--- .../declarable/generic/activations/lrelu.cpp | 16 +++--- .../ops/declarable/headers/activations.h | 10 ++-- .../declarable/helpers/cpu/legacy_helper.cpp | 26 ++++++---- .../declarable/helpers/cuda/legacy/relu.cu | 26 ++++++---- .../ops/declarable/helpers/legacy_helpers.h | 4 +- libnd4j/include/ops/ops.h | 16 +++--- libnd4j/include/templatemath.h | 20 ++++---- .../layers_tests/DeclarableOpsTests3.cpp | 51 ++++++------------- .../layers_tests/DeclarableOpsTests5.cpp | 2 +- 11 files changed, 97 insertions(+), 98 deletions(-) diff --git a/libnd4j/include/loops/legacy_ops.h b/libnd4j/include/loops/legacy_ops.h index b0d891287..c298dde3a 100644 --- a/libnd4j/include/loops/legacy_ops.h +++ b/libnd4j/include/loops/legacy_ops.h @@ -116,7 +116,6 @@ #define TRANSFORM_STRICT_OPS \ - (3, ELUDerivative), \ (4, TanhDerivative), \ (5, HardTanhDerivative), \ (6, SigmoidDerivative), \ @@ -148,7 +147,6 @@ (32, ATan), \ (33, HardTanh), \ (34, SoftSign), \ - (35, ELU), \ (36, HardSigmoid), \ (37, RationalTanh) ,\ (38, RectifiedTanh) ,\ @@ -211,6 +209,8 @@ (4, ReverseDivide),\ (5, ReverseSubtract),\ (6, MaxPairwise),\ + (7, ELU), \ + (8, ELUDerivative), \ (13, MinPairwise),\ (14, CopyPws),\ (15, Mod),\ diff --git a/libnd4j/include/ops/declarable/generic/activations/elu.cpp b/libnd4j/include/ops/declarable/generic/activations/elu.cpp index 03c0cf834..03670ddab 100644 --- a/libnd4j/include/ops/declarable/generic/activations/elu.cpp +++ b/libnd4j/include/ops/declarable/generic/activations/elu.cpp @@ -25,12 +25,14 @@ #include namespace nd4j { namespace ops { - CONFIGURABLE_OP_IMPL(elu, 1, 1, true, 0, 0) { + CONFIGURABLE_OP_IMPL(elu, 1, 1, true, -2, 0) { + auto input = INPUT_VARIABLE(0); auto output = OUTPUT_VARIABLE(0); - input->applyTransform(nd4j::transform::ELU, output, nullptr); - STORE_RESULT(output); + const auto alpha = block.numT() > 0 ? T_ARG(0) : 1.f; + + input->applyScalar(nd4j::scalar::ELU, alpha, output); return Status::OK(); } @@ -41,14 +43,18 @@ namespace nd4j { ->setAllowedOutputTypes(0, {ALL_FLOATS}); } - CONFIGURABLE_OP_IMPL(elu_bp, 2, 1, true, 0, 0) { + CONFIGURABLE_OP_IMPL(elu_bp, 2, 1, true, -2, 0) { + auto input = INPUT_VARIABLE(0); auto epsilon = INPUT_VARIABLE(1); - auto z = OUTPUT_VARIABLE(0); + auto output = OUTPUT_VARIABLE(0); + + const auto alpha = block.numT() > 0 ? T_ARG(0) : 1.f; + + // input->applyPairwiseTransform(pairwise::ELUDerivativeE, epsilon, output); + helpers::eluDerivative(block.launchContext(), input, epsilon, output, alpha); - //input->applyPairwiseTransform(pairwise::ELUDerivativeE, epsilon, z, nullptr); - helpers::eluDerivative(block.launchContext(), input, epsilon, z); return Status::OK(); } diff --git a/libnd4j/include/ops/declarable/generic/activations/lrelu.cpp b/libnd4j/include/ops/declarable/generic/activations/lrelu.cpp index 68a460b56..ef65c4822 100644 --- a/libnd4j/include/ops/declarable/generic/activations/lrelu.cpp +++ b/libnd4j/include/ops/declarable/generic/activations/lrelu.cpp @@ -25,15 +25,15 @@ #include namespace nd4j { namespace ops { - CONFIGURABLE_OP_IMPL(lrelu, 1, 1, true, 1, 0) { + CONFIGURABLE_OP_IMPL(lrelu, 1, 1, true, -2, 0) { auto input = INPUT_VARIABLE(0); auto output = OUTPUT_VARIABLE(0); - float t = block.numT() > 0 ? T_ARG(0) : 0.0f; + float alpha = block.numT() > 0 ? T_ARG(0) : 0.01f; - input->applyScalar(nd4j::scalar::LeakyRELU, t, output); + input->applyScalar(nd4j::scalar::LeakyRELU, alpha, output); STORE_RESULT(output); - + return Status::OK(); } @@ -42,15 +42,17 @@ namespace nd4j { ->setAllowedInputTypes(0, DataType::ANY) ->setAllowedOutputTypes(0, {ALL_FLOATS}); } - - CONFIGURABLE_OP_IMPL(lrelu_bp, 2, 1, true, 0, 0) { + + CONFIGURABLE_OP_IMPL(lrelu_bp, 2, 1, true, -2, 0) { auto input = INPUT_VARIABLE(0); auto epsilon = INPUT_VARIABLE(1); auto z = OUTPUT_VARIABLE(0); + float alpha = block.numT() > 0 ? T_ARG(0) : 0.01f; + //input->applyPairwiseTransform(pairwise::LRELUDerivativeE, epsilon, z, nullptr); - helpers::leakyReluDerivative(block.launchContext(), input, epsilon, z); + helpers::leakyReluDerivative(block.launchContext(), input, epsilon, z, alpha); return Status::OK(); } diff --git a/libnd4j/include/ops/declarable/headers/activations.h b/libnd4j/include/ops/declarable/headers/activations.h index ecc55351a..9d0b22198 100644 --- a/libnd4j/include/ops/declarable/headers/activations.h +++ b/libnd4j/include/ops/declarable/headers/activations.h @@ -82,8 +82,8 @@ namespace nd4j { * Math is: x < 0 ? alpha * x : x; */ #if NOT_EXCLUDED(OP_lrelu) - DECLARE_CONFIGURABLE_OP(lrelu, 1, 1, true, 0, 0); - DECLARE_CONFIGURABLE_OP(lrelu_bp, 2, 1, true, 0, 0); + DECLARE_CONFIGURABLE_OP(lrelu, 1, 1, true, -2, 0); + DECLARE_CONFIGURABLE_OP(lrelu_bp, 2, 1, true, -2, 0); #endif /** @@ -91,8 +91,8 @@ namespace nd4j { * Math is: x >= 0 ? x : exp(x) - 1; */ #if NOT_EXCLUDED(OP_elu) - DECLARE_CONFIGURABLE_OP(elu, 1, 1, true, 0, 0); - DECLARE_CONFIGURABLE_OP(elu_bp, 2, 1, true, 0, 0); + DECLARE_CONFIGURABLE_OP(elu, 1, 1, true, -2, 0); + DECLARE_CONFIGURABLE_OP(elu_bp, 2, 1, true, -2, 0); #endif /** @@ -157,7 +157,7 @@ namespace nd4j { /** * This is Concatenated RELU implementation. * What happens inside: RELU(Concat((x, -x, {-1}))) - * + * * PLEASE NOTE: Concatenation will double amount of features available in input */ #if NOT_EXCLUDED(OP_crelu) diff --git a/libnd4j/include/ops/declarable/helpers/cpu/legacy_helper.cpp b/libnd4j/include/ops/declarable/helpers/cpu/legacy_helper.cpp index d673e64bd..09cb2df2e 100644 --- a/libnd4j/include/ops/declarable/helpers/cpu/legacy_helper.cpp +++ b/libnd4j/include/ops/declarable/helpers/cpu/legacy_helper.cpp @@ -81,29 +81,35 @@ namespace helpers { } template - static void leakyReluDerivative_(NDArray* input, NDArray* epsilon, NDArray* output) { - auto functor = LAMBDA_TT(x, y){ - return x >= (T)0.f? y : T(0.f); + static void leakyReluDerivative_(NDArray* input, NDArray* epsilon, NDArray* output, const float alpha) { + + const T alphaT = static_cast(alpha); + + auto functor = LAMBDA_TT(x, y, alphaT) { + return x < 0 ? alphaT * y : y; }; input->applyPairwiseLambda(epsilon, functor, output); } - void leakyReluDerivative(nd4j::LaunchContext * context, NDArray* theFirst, NDArray* theSecond, NDArray* theOutput) { - BUILD_SINGLE_SELECTOR(theFirst->dataType(), leakyReluDerivative_, (theFirst, theSecond, theOutput), FLOAT_TYPES); + void leakyReluDerivative(nd4j::LaunchContext * context, NDArray* theFirst, NDArray* theSecond, NDArray* theOutput, const float alpha) { + BUILD_SINGLE_SELECTOR(theFirst->dataType(), leakyReluDerivative_, (theFirst, theSecond, theOutput, alpha), FLOAT_TYPES); } template - static void eluDerivative_(NDArray* input, NDArray* epsilon, NDArray* output) { - auto functor = LAMBDA_TT(x, y){ - return y * nd4j::math::nd4j_eluderivative(x); + static void eluDerivative_(NDArray* input, NDArray* epsilon, NDArray* output, const float alpha) { + + const T alphaT = static_cast(alpha); + + auto functor = LAMBDA_TT(x, y, alphaT){ + return y * nd4j::math::nd4j_eluderivative(x, alphaT); }; input->applyPairwiseLambda(epsilon, functor, output); } - void eluDerivative(nd4j::LaunchContext * context, NDArray* theFirst, NDArray* theSecond, NDArray* theOutput) { - BUILD_SINGLE_SELECTOR(theFirst->dataType(), eluDerivative_, (theFirst, theSecond, theOutput), FLOAT_TYPES); + void eluDerivative(nd4j::LaunchContext * context, NDArray* theFirst, NDArray* theSecond, NDArray* theOutput, const float alpha) { + BUILD_SINGLE_SELECTOR(theFirst->dataType(), eluDerivative_, (theFirst, theSecond, theOutput, alpha), FLOAT_TYPES); } template diff --git a/libnd4j/include/ops/declarable/helpers/cuda/legacy/relu.cu b/libnd4j/include/ops/declarable/helpers/cuda/legacy/relu.cu index a0f30a116..c2dd4919d 100644 --- a/libnd4j/include/ops/declarable/helpers/cuda/legacy/relu.cu +++ b/libnd4j/include/ops/declarable/helpers/cuda/legacy/relu.cu @@ -66,29 +66,35 @@ namespace nd4j { } template - linkage void leakyReluDerivative_(NDArray* input, NDArray* epsilon, NDArray* output) { - auto functor = LAMBDA_TT(x, y){ - return x >= (T)0.f? y : T(0.f); + linkage void leakyReluDerivative_(NDArray* input, NDArray* epsilon, NDArray* output, const float alpha) { + + const T alphaT = static_cast(alpha); + + auto functor = LAMBDA_TT(x, y, alphaT) { + return x < 0 ? alphaT * y : y; }; input->applyPairwiseLambda(epsilon, functor, output); } - void leakyReluDerivative(nd4j::LaunchContext * context, NDArray* theFirst, NDArray* theSecond, NDArray* theOutput) { - BUILD_SINGLE_SELECTOR(theFirst->dataType(), leakyReluDerivative_, (theFirst, theSecond, theOutput), FLOAT_TYPES); + void leakyReluDerivative(nd4j::LaunchContext * context, NDArray* theFirst, NDArray* theSecond, NDArray* theOutput, const float alpha) { + BUILD_SINGLE_SELECTOR(theFirst->dataType(), leakyReluDerivative_, (theFirst, theSecond, theOutput, alpha), FLOAT_TYPES); } template - linkage void eluDerivative_(NDArray* input, NDArray* epsilon, NDArray* output) { - auto functor = LAMBDA_TT(x, y){ - return y * nd4j::math::nd4j_eluderivative(x); + linkage void eluDerivative_(NDArray* input, NDArray* epsilon, NDArray* output, const float alpha) { + + const T alphaT = static_cast(alpha); + + auto functor = LAMBDA_TT(x, y, alphaT){ + return y * nd4j::math::nd4j_eluderivative(x, alphaT); }; input->applyPairwiseLambda(epsilon, functor, output); } - void eluDerivative(nd4j::LaunchContext * context, NDArray* theFirst, NDArray* theSecond, NDArray* theOutput) { - BUILD_SINGLE_SELECTOR(theFirst->dataType(), eluDerivative_, (theFirst, theSecond, theOutput), FLOAT_TYPES); + void eluDerivative(nd4j::LaunchContext * context, NDArray* theFirst, NDArray* theSecond, NDArray* theOutput, const float alpha) { + BUILD_SINGLE_SELECTOR(theFirst->dataType(), eluDerivative_, (theFirst, theSecond, theOutput, alpha), FLOAT_TYPES); } template diff --git a/libnd4j/include/ops/declarable/helpers/legacy_helpers.h b/libnd4j/include/ops/declarable/helpers/legacy_helpers.h index 476c743ea..dfe338864 100644 --- a/libnd4j/include/ops/declarable/helpers/legacy_helpers.h +++ b/libnd4j/include/ops/declarable/helpers/legacy_helpers.h @@ -46,8 +46,8 @@ namespace helpers { void reluDerivative(nd4j::LaunchContext * context, NDArray* theFirst, NDArray* theSecond); void reluDerivative(nd4j::LaunchContext * context, NDArray* theFirst, NDArray* theSecond, NDArray* theOutput); void relu6Derivative(nd4j::LaunchContext * context, NDArray* theFirst, NDArray* theSecond, NDArray* theOutput); - void leakyReluDerivative(nd4j::LaunchContext * context, NDArray* theFirst, NDArray* theSecond, NDArray* theOutput); - void eluDerivative(nd4j::LaunchContext * context, NDArray* theFirst, NDArray* theSecond, NDArray* theOutput); + void leakyReluDerivative(nd4j::LaunchContext * context, NDArray* theFirst, NDArray* theSecond, NDArray* theOutput, const float alpha); + void eluDerivative(nd4j::LaunchContext * context, NDArray* theFirst, NDArray* theSecond, NDArray* theOutput, const float alpha); void seluDerivative(nd4j::LaunchContext * context, NDArray* theFirst, NDArray* theSecond, NDArray* theOutput); void cubeDerivative(nd4j::LaunchContext * context, NDArray* theFirst, NDArray* theSecond, NDArray* theOutput); void reduceNorm1(nd4j::LaunchContext * context, NDArray* theFirst, NDArray* theSecond, NDArray* theOutput); diff --git a/libnd4j/include/ops/ops.h b/libnd4j/include/ops/ops.h index e4fef2c3c..a80e274ca 100644 --- a/libnd4j/include/ops/ops.h +++ b/libnd4j/include/ops/ops.h @@ -2271,26 +2271,26 @@ namespace simdOps { } }; - template + template class ELU { public: no_op_exec_special_same no_op_exec_special_same_cuda - op_def static X op(X d1, X *params) { - return nd4j::math::nd4j_elu(d1); + op_def static Z op(X d1, Y d2, Z *params) { + return nd4j::math::nd4j_elu(d1, static_cast(d2)); } }; - template + template class ELUDerivative { public: no_op_exec_special_same no_op_exec_special_same_cuda - op_def static X op(X d1, X *params) { - return nd4j::math::nd4j_eluderivative(d1); + op_def static Z op(X d1, Y d2, Z *params) { + return nd4j::math::nd4j_eluderivative(d1, static_cast(d2)); } }; @@ -3716,7 +3716,7 @@ namespace simdOps { return reduction; } - op_def static Z op(X d1, X d2, Z *extraParamsRef) { + op_def static Z op(X d1, X d2, Z *extraParamsRef) { double eps = nd4j::math::nd4j_abs(extraParamsRef[2]); return static_cast(!nd4j::math::nd4j_eq(d1, d2, eps)); } @@ -4540,4 +4540,4 @@ namespace simdOps { } #endif - + diff --git a/libnd4j/include/templatemath.h b/libnd4j/include/templatemath.h index bfa098cee..68b59f2d4 100644 --- a/libnd4j/include/templatemath.h +++ b/libnd4j/include/templatemath.h @@ -130,13 +130,12 @@ namespace nd4j { } template - math_def inline Z nd4j_elu(T val) { - if (val >= (T) 0.f) return val; - else return nd4j_exp(val) - (Z) 1.0f; - //return val >= 0.0 ? val : (nd4j_exp(val) - 1.0); + math_def inline Z nd4j_elu(T val, T alpha) { + if (val >= (T) 0.f) + return val; + return static_cast(alpha) * (nd4j_exp(val) - static_cast(1.0f)); } - template math_def inline Z nd4j_leakyrelu(T val,T alpha) { if (val < (T) 0.0f) @@ -145,13 +144,14 @@ namespace nd4j { return val; } - template - math_def inline Z nd4j_eluderivative(T val) { - if (val >= (T) 0.0f) return (Z) 1.0f; - else return nd4j_exp(val); + math_def inline Z nd4j_eluderivative(T val, T alpha) { + if (val >= static_cast(0.0f)) + return static_cast(1.0f); + return static_cast(alpha) * nd4j_exp(val); //return val >= 0.0 ? 1.0 : nd4j_exp(val); } + template math_def inline Z nd4j_sin(T val); @@ -283,7 +283,7 @@ namespace nd4j { #ifdef NATIVE_HALFS if (value < (float16) 0.f) { return float16(__hneg(value.data)); - } else + } else return value; #else return (float16) fabsf((float) value); diff --git a/libnd4j/tests_cpu/layers_tests/DeclarableOpsTests3.cpp b/libnd4j/tests_cpu/layers_tests/DeclarableOpsTests3.cpp index 3b4ff6cd0..1ec9650f9 100644 --- a/libnd4j/tests_cpu/layers_tests/DeclarableOpsTests3.cpp +++ b/libnd4j/tests_cpu/layers_tests/DeclarableOpsTests3.cpp @@ -2794,53 +2794,42 @@ TEST_F(DeclarableOpsTests3, svd_test11) { TEST_F(DeclarableOpsTests3, elu_test1) { auto x = NDArrayFactory::create('c', {3,3}, {0.1, .2, .3, -.4,-.5,-.6, .7, .8, .9}); -// auto expS = NDArrayFactory::create('c', {3}); -// auto expU = NDArrayFactory::create('c', {3,3}); - auto exp = NDArrayFactory::create('c', {3,3}, {.1, .2, .3, -0.32968, -0.393469, -0.451188, .7, .8, .9}); + auto exp = NDArrayFactory::create('c', {3,3}, {.1, .2, .3, 0.5*-0.32968, 0.5*-0.393469, 0.5*-0.451188, .7, .8, .9}); nd4j::ops::elu op; - auto results = op.execute({&x}, {}, {}); + auto results = op.execute({&x}, {0.5}, {}); ASSERT_EQ(ND4J_STATUS_OK, results->status()); auto s = results->at(0); -// auto u = results->at(1); -// auto v = results->at(2); -// s->printIndexedBuffer("ELU"); ASSERT_TRUE(exp.equalsTo(s)); delete results; } /////////////////////////////////////////////////////////////////// -TEST_F(DeclarableOpsTests3, elu_test2) { +TEST_F(DeclarableOpsTests3, elu_bp_test1) { - auto x = NDArrayFactory::create('c', {3, 3}, {0.1, .2, .3, -.4, -.5, -.6, .7, .8, .9}); - auto eps = NDArrayFactory::create('c', {3,3}); - eps.assign(2.); -// auto expU = NDArrayFactory::create('c', {3,3}); - auto exp = NDArrayFactory::create('c', {3, 3}, {2, 2, 2, 1.34064, 1.213061, 1.097623, 2, 2, 2}); + auto x = NDArrayFactory::create('c', {3, 3}, {0.1, .2, .3, -.4, -.5, -.6, .7, .8, .9}); + auto eps = NDArrayFactory::create('c', {3,3}); + eps.assign(2.); + auto exp = NDArrayFactory::create('c', {3, 3}, {2, 2, 2, 0.5*1.34064, 0.5*1.213061, 0.5*1.097623, 2, 2, 2}); - nd4j::ops::elu_bp op; - auto results = op.execute({ &x, &eps }, {}, {}); + nd4j::ops::elu_bp op; + auto results = op.execute({ &x, &eps }, {0.5}, {}); - ASSERT_EQ(ND4J_STATUS_OK, results->status()); + ASSERT_EQ(ND4J_STATUS_OK, results->status()); - auto s = results->at(0); -// auto u = results->at(1); -// auto v = results->at(2); -// s->printIndexedBuffer("ELU_BP"); - ASSERT_TRUE(exp.equalsTo(s)); + auto s = results->at(0); + ASSERT_TRUE(exp.equalsTo(s)); - delete results; + delete results; } /////////////////////////////////////////////////////////////////// TEST_F(DeclarableOpsTests3, lrelu_test1) { auto x = NDArrayFactory::create('c', {3,3}, {1, 2, 3, -4,-5,-6, 7, 8, 9}); -// auto expS = NDArrayFactory::create('c', {3}); -// auto expU = NDArrayFactory::create('c', {3,3}); auto exp = NDArrayFactory::create('c', {3,3}, {1, 2, 3, -0.8, -1., -1.2, 7, 8, 9}); nd4j::ops::lrelu op; @@ -2849,20 +2838,16 @@ TEST_F(DeclarableOpsTests3, lrelu_test1) { ASSERT_EQ(ND4J_STATUS_OK, results->status()); auto s = results->at(0); -// auto u = results->at(1); -// auto v = results->at(2); -// s->printIndexedBuffer("LRELU"); ASSERT_TRUE(exp.equalsTo(s)); delete results; } -TEST_F(DeclarableOpsTests3, lrelu_test2) { +TEST_F(DeclarableOpsTests3, lrelu_bp_test1) { auto x = NDArrayFactory::create('c', {3,3}, {1, 2, 3, -4,-5,-6, 7, 8, 9}); -// auto expS = NDArrayFactory::create('c', {3}); auto eps = NDArrayFactory::create('c', {3,3}, {2,2,2,2,2,2,2, 2,2}); - auto exp = NDArrayFactory::create('c', {3,3}, {2, 2, 2, 0, 0, 0, 2, 2, 2}); + auto exp = NDArrayFactory::create('c', {3,3}, {2, 2, 2, 0.4, 0.4, 0.4, 2, 2, 2}); nd4j::ops::lrelu_bp op; auto results = op.execute({&x, &eps}, {0.2}, {}); @@ -2870,9 +2855,6 @@ TEST_F(DeclarableOpsTests3, lrelu_test2) { ASSERT_EQ(ND4J_STATUS_OK, results->status()); auto s = results->at(0); -// auto u = results->at(1); -// auto v = results->at(2); -// s->printIndexedBuffer("LRELU_BP"); ASSERT_TRUE(exp.equalsTo(s)); delete results; @@ -2882,8 +2864,6 @@ TEST_F(DeclarableOpsTests3, lrelu_test2) { TEST_F(DeclarableOpsTests3, selu_test1) { auto x = NDArrayFactory::create('c', {3,3}, {1, 2, 3, -4,-5,-6, 7, 8, 9}); -// auto expS = NDArrayFactory::create('c', {3}); -// auto expU = NDArrayFactory::create('c', {3,3}); auto exp = NDArrayFactory::create('c', {3,3}, {1.050701, 2.101402, 3.152103, -1.725899, -1.746253, -1.753742, 7.354907, 8.405608, 9.456309}); nd4j::ops::selu op; @@ -2892,7 +2872,6 @@ TEST_F(DeclarableOpsTests3, selu_test1) { ASSERT_EQ(ND4J_STATUS_OK, results->status()); auto s = results->at(0); -// s->printIndexedBuffer("SELU"); ASSERT_TRUE(exp.equalsTo(s)); delete results; diff --git a/libnd4j/tests_cpu/layers_tests/DeclarableOpsTests5.cpp b/libnd4j/tests_cpu/layers_tests/DeclarableOpsTests5.cpp index 3af53bad0..86acca29c 100644 --- a/libnd4j/tests_cpu/layers_tests/DeclarableOpsTests5.cpp +++ b/libnd4j/tests_cpu/layers_tests/DeclarableOpsTests5.cpp @@ -2761,7 +2761,7 @@ TEST_F(DeclarableOpsTests5, ELU_1) { auto exp = NDArrayFactory::create('c', {2, 2, 2}, { -0.63212055, 2. , 1.5, -0.753403, 1., 2., 2., 1.}); auto res = NDArrayFactory::create('c', {2, 2, 2}); - input.applyTransform(transform::ELU, &res); + input.applyScalar(nd4j::scalar::ELU, 1.f, &res); ASSERT_TRUE(res.equalsTo(&exp)); }