[WIP] few fixes for tests (#177)

* nd4j-aeron profiles

Signed-off-by: raver119 <raver119@gmail.com>

* nd4j-aeron profiles

Signed-off-by: raver119 <raver119@gmail.com>

* skip one long test

Signed-off-by: raver119 <raver119@gmail.com>

* skip one long test

Signed-off-by: raver119 <raver119@gmail.com>

* kryo profile

Signed-off-by: raver119 <raver119@gmail.com>

* few more profiles

Signed-off-by: raver119 <raver119@gmail.com>

* few more profiles

Signed-off-by: raver119 <raver119@gmail.com>

* few more profiles

Signed-off-by: raver119 <raver119@gmail.com>
master
raver119 2020-01-22 16:12:30 +03:00 committed by GitHub
parent a25bb6a11c
commit 25db3a44f1
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
6 changed files with 523 additions and 13 deletions

View File

@ -726,6 +726,39 @@ public class Nd4jCuda extends org.nd4j.nativeblas.Nd4jCudaHelper {
// #endif //DEV_TESTS_ERRORREFERENCE_H // #endif //DEV_TESTS_ERRORREFERENCE_H
// Parsed from execution/Engine.h
/*******************************************************************************
* Copyright (c) 2019 Konduit K.K.
*
* This program and the accompanying materials are made available under the
* terms of the Apache License, Version 2.0 which is available at
* https://www.apache.org/licenses/LICENSE-2.0.
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
*
* SPDX-License-Identifier: Apache-2.0
******************************************************************************/
//
// @author raver119@gmail.com
//
// #ifndef SD_ENGINE_H
// #define SD_ENGINE_H
/** enum samediff::Engine */
public static final int
ENGINE_CPU = 0,
ENGINE_CUDA = 1;
// #endif //SD_ENGINE_H
// Parsed from memory/MemoryType.h // Parsed from memory/MemoryType.h
// //
@ -4147,6 +4180,7 @@ public native @Cast("bool") boolean isOptimalRequirementsMet();
* these methods suited for FlatBuffers use * these methods suited for FlatBuffers use
*/ */
public native @Cast("Nd4jLong*") @StdVector LongPointer getShapeAsVector(); public native @Cast("Nd4jLong*") @StdVector LongPointer getShapeAsVector();
public native @StdVector IntPointer getShapeAsVectorInt();
public native @Cast("Nd4jLong*") @StdVector LongPointer getShapeInfoAsVector(); public native @Cast("Nd4jLong*") @StdVector LongPointer getShapeInfoAsVector();
public native @Cast("int64_t*") @StdVector LongPointer getShapeInfoAsFlatVector(); public native @Cast("int64_t*") @StdVector LongPointer getShapeInfoAsFlatVector();
public native @Cast("int64_t*") @StdVector LongPointer getShapeAsFlatVector(); public native @Cast("int64_t*") @StdVector LongPointer getShapeAsFlatVector();
@ -6187,6 +6221,7 @@ public native @Cast("bool") boolean isOptimalRequirementsMet();
// #include <graph/VariableSpace.h> // #include <graph/VariableSpace.h>
// #include <graph/ContextPrototype.h> // #include <graph/ContextPrototype.h>
// #include <memory/Workspace.h> // #include <memory/Workspace.h>
// #include <execution/Engine.h>
// CUDA-specific includes // CUDA-specific includes
// #ifdef __CUDACC__ // #ifdef __CUDACC__
@ -6237,12 +6272,13 @@ public native @Cast("bool") boolean isOptimalRequirementsMet();
// this method returns workspace for object allocations // this method returns workspace for object allocations
public native Workspace oWorkspace(); public native Workspace oWorkspace();
public native void setVariableSpace(VariableSpace variableSpace); public native void setVariableSpace(VariableSpace variableSpace);
public native RandomBuffer getRNG(); public native RandomBuffer getRNG();
public native void setRNG(RandomBuffer rng); public native void setRNG(RandomBuffer rng);
public native void setTargetEngine(@Cast("samediff::Engine") int engine);
public native VariableSpace getVariableSpace(); public native VariableSpace getVariableSpace();
public native LaunchContext launchContext(); public native LaunchContext launchContext();
@ -6395,6 +6431,11 @@ public native @Cast("bool") boolean isOptimalRequirementsMet();
// #include <dll.h> // #include <dll.h>
// #include <RandomGenerator.h> // #include <RandomGenerator.h>
// #include <ops/declarable/OpDescriptor.h> // #include <ops/declarable/OpDescriptor.h>
// #include <execution/Engine.h>
// #ifndef __STANDALONE_BUILD__
// #include <config.h>
// #endif
@Namespace("nd4j::graph") @NoOffset public static class ContextPrototype extends Pointer { @Namespace("nd4j::graph") @NoOffset public static class ContextPrototype extends Pointer {
static { Loader.load(); } static { Loader.load(); }
@ -6440,6 +6481,8 @@ public native @Cast("bool") boolean isOptimalRequirementsMet();
public native @Cast("bool*") @StdVector BooleanPointer getBArguments(); public native @Cast("bool*") @StdVector BooleanPointer getBArguments();
public native @StdVector IntPointer getAxis(); public native @StdVector IntPointer getAxis();
public native @Cast("samediff::Engine") int engine();
public native @Cast("size_t") long numT(); public native @Cast("size_t") long numT();
public native @Cast("size_t") long numI(); public native @Cast("size_t") long numI();
public native @Cast("size_t") long numB(); public native @Cast("size_t") long numB();
@ -9004,6 +9047,7 @@ public static final int PREALLOC_SIZE = 33554432;
// #define SD_PLATFORMHELPER_H // #define SD_PLATFORMHELPER_H
// #include <ShapeUtils.h> // #include <ShapeUtils.h>
// #include <execution/Engine.h>
// #include <graph/Context.h> // #include <graph/Context.h>
// #include <string> // #include <string>
// #include <pointercast.h> // #include <pointercast.h>
@ -9019,6 +9063,8 @@ public static final int PREALLOC_SIZE = 33554432;
public native @StdString BytePointer name(); public native @StdString BytePointer name();
public native @Cast("samediff::Engine") int engine();
public native @Cast("Nd4jLong") long hash(); public native @Cast("Nd4jLong") long hash();
/** /**
@ -9632,6 +9678,7 @@ public static final int PREALLOC_SIZE = 33554432;
// #include <mutex> // #include <mutex>
// #include <ops/declarable/DeclarableOp.h> // #include <ops/declarable/DeclarableOp.h>
// #include <ops/declarable/PlatformHelper.h> // #include <ops/declarable/PlatformHelper.h>
// #include <execution/Engine.h>
// handlers part // handlers part
// #include <cstdlib> // #include <cstdlib>
@ -9669,13 +9716,13 @@ public static final int PREALLOC_SIZE = 33554432;
public native void registerHelper(PlatformHelper op); public native void registerHelper(PlatformHelper op);
public native @Cast("bool") boolean hasHelper(@Cast("Nd4jLong") long hash); public native @Cast("bool") boolean hasHelper(@Cast("Nd4jLong") long hash, @Cast("samediff::Engine") int engine);
public native DeclarableOp getOperation(@Cast("char*") String name); public native DeclarableOp getOperation(@Cast("char*") String name);
public native DeclarableOp getOperation(@Cast("char*") BytePointer name); public native DeclarableOp getOperation(@Cast("char*") BytePointer name);
public native DeclarableOp getOperation(@Cast("Nd4jLong") long hash); public native DeclarableOp getOperation(@Cast("Nd4jLong") long hash);
public native PlatformHelper getPlatformHelper(@Cast("Nd4jLong") long hash); public native PlatformHelper getPlatformHelper(@Cast("Nd4jLong") long hash, @Cast("samediff::Engine") int engine);
public native @Cast("Nd4jLong*") @StdVector LongPointer getAllHashes(); public native @Cast("Nd4jLong*") @StdVector LongPointer getAllHashes();
@ -9801,6 +9848,7 @@ public static final int PREALLOC_SIZE = 33554432;
// #include <cuda_runtime_api.h> // #include <cuda_runtime_api.h>
// #include <cuda_runtime.h> // #include <cuda_runtime.h>
// #include <cuda_device_runtime_api.h> // #include <cuda_device_runtime_api.h>
// #include "config.h"
// #endif // #endif
// used for MKLDNN etc // used for MKLDNN etc

View File

@ -747,6 +747,39 @@ public class Nd4jCpu extends org.nd4j.nativeblas.Nd4jCpuHelper {
// #endif //DEV_TESTS_ERRORREFERENCE_H // #endif //DEV_TESTS_ERRORREFERENCE_H
// Parsed from execution/Engine.h
/*******************************************************************************
* Copyright (c) 2019 Konduit K.K.
*
* This program and the accompanying materials are made available under the
* terms of the Apache License, Version 2.0 which is available at
* https://www.apache.org/licenses/LICENSE-2.0.
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
*
* SPDX-License-Identifier: Apache-2.0
******************************************************************************/
//
// @author raver119@gmail.com
//
// #ifndef SD_ENGINE_H
// #define SD_ENGINE_H
/** enum samediff::Engine */
public static final int
ENGINE_CPU = 0,
ENGINE_CUDA = 1;
// #endif //SD_ENGINE_H
// Parsed from Environment.h // Parsed from Environment.h
/******************************************************************************* /*******************************************************************************
@ -4150,6 +4183,7 @@ public native @Cast("bool") boolean isOptimalRequirementsMet();
* these methods suited for FlatBuffers use * these methods suited for FlatBuffers use
*/ */
public native @Cast("Nd4jLong*") @StdVector LongPointer getShapeAsVector(); public native @Cast("Nd4jLong*") @StdVector LongPointer getShapeAsVector();
public native @StdVector IntPointer getShapeAsVectorInt();
public native @Cast("Nd4jLong*") @StdVector LongPointer getShapeInfoAsVector(); public native @Cast("Nd4jLong*") @StdVector LongPointer getShapeInfoAsVector();
public native @Cast("int64_t*") @StdVector LongPointer getShapeInfoAsFlatVector(); public native @Cast("int64_t*") @StdVector LongPointer getShapeInfoAsFlatVector();
public native @Cast("int64_t*") @StdVector LongPointer getShapeAsFlatVector(); public native @Cast("int64_t*") @StdVector LongPointer getShapeAsFlatVector();
@ -6190,6 +6224,7 @@ public native @Cast("bool") boolean isOptimalRequirementsMet();
// #include <graph/VariableSpace.h> // #include <graph/VariableSpace.h>
// #include <graph/ContextPrototype.h> // #include <graph/ContextPrototype.h>
// #include <memory/Workspace.h> // #include <memory/Workspace.h>
// #include <execution/Engine.h>
// CUDA-specific includes // CUDA-specific includes
// #ifdef __CUDACC__ // #ifdef __CUDACC__
@ -6240,12 +6275,13 @@ public native @Cast("bool") boolean isOptimalRequirementsMet();
// this method returns workspace for object allocations // this method returns workspace for object allocations
public native Workspace oWorkspace(); public native Workspace oWorkspace();
public native void setVariableSpace(VariableSpace variableSpace); public native void setVariableSpace(VariableSpace variableSpace);
public native RandomBuffer getRNG(); public native RandomBuffer getRNG();
public native void setRNG(RandomBuffer rng); public native void setRNG(RandomBuffer rng);
public native void setTargetEngine(@Cast("samediff::Engine") int engine);
public native VariableSpace getVariableSpace(); public native VariableSpace getVariableSpace();
public native LaunchContext launchContext(); public native LaunchContext launchContext();
@ -6398,6 +6434,11 @@ public native @Cast("bool") boolean isOptimalRequirementsMet();
// #include <dll.h> // #include <dll.h>
// #include <RandomGenerator.h> // #include <RandomGenerator.h>
// #include <ops/declarable/OpDescriptor.h> // #include <ops/declarable/OpDescriptor.h>
// #include <execution/Engine.h>
// #ifndef __STANDALONE_BUILD__
// #include <config.h>
// #endif
@Namespace("nd4j::graph") @NoOffset public static class ContextPrototype extends Pointer { @Namespace("nd4j::graph") @NoOffset public static class ContextPrototype extends Pointer {
static { Loader.load(); } static { Loader.load(); }
@ -6443,6 +6484,8 @@ public native @Cast("bool") boolean isOptimalRequirementsMet();
public native @Cast("bool*") @StdVector BooleanPointer getBArguments(); public native @Cast("bool*") @StdVector BooleanPointer getBArguments();
public native @StdVector IntPointer getAxis(); public native @StdVector IntPointer getAxis();
public native @Cast("samediff::Engine") int engine();
public native @Cast("size_t") long numT(); public native @Cast("size_t") long numT();
public native @Cast("size_t") long numI(); public native @Cast("size_t") long numI();
public native @Cast("size_t") long numB(); public native @Cast("size_t") long numB();
@ -11319,6 +11362,7 @@ public static final int TAD_THRESHOLD = TAD_THRESHOLD();
// #define SD_PLATFORMHELPER_H // #define SD_PLATFORMHELPER_H
// #include <ShapeUtils.h> // #include <ShapeUtils.h>
// #include <execution/Engine.h>
// #include <graph/Context.h> // #include <graph/Context.h>
// #include <string> // #include <string>
// #include <pointercast.h> // #include <pointercast.h>
@ -11334,6 +11378,8 @@ public static final int TAD_THRESHOLD = TAD_THRESHOLD();
public native @StdString BytePointer name(); public native @StdString BytePointer name();
public native @Cast("samediff::Engine") int engine();
public native @Cast("Nd4jLong") long hash(); public native @Cast("Nd4jLong") long hash();
/** /**
@ -11843,6 +11889,7 @@ public static final int TAD_THRESHOLD = TAD_THRESHOLD();
// #include <mutex> // #include <mutex>
// #include <ops/declarable/DeclarableOp.h> // #include <ops/declarable/DeclarableOp.h>
// #include <ops/declarable/PlatformHelper.h> // #include <ops/declarable/PlatformHelper.h>
// #include <execution/Engine.h>
// handlers part // handlers part
// #include <cstdlib> // #include <cstdlib>
@ -11880,13 +11927,13 @@ public static final int TAD_THRESHOLD = TAD_THRESHOLD();
public native void registerHelper(PlatformHelper op); public native void registerHelper(PlatformHelper op);
public native @Cast("bool") boolean hasHelper(@Cast("Nd4jLong") long hash); public native @Cast("bool") boolean hasHelper(@Cast("Nd4jLong") long hash, @Cast("samediff::Engine") int engine);
public native DeclarableOp getOperation(@Cast("char*") String name); public native DeclarableOp getOperation(@Cast("char*") String name);
public native DeclarableOp getOperation(@Cast("char*") BytePointer name); public native DeclarableOp getOperation(@Cast("char*") BytePointer name);
public native DeclarableOp getOperation(@Cast("Nd4jLong") long hash); public native DeclarableOp getOperation(@Cast("Nd4jLong") long hash);
public native PlatformHelper getPlatformHelper(@Cast("Nd4jLong") long hash); public native PlatformHelper getPlatformHelper(@Cast("Nd4jLong") long hash, @Cast("samediff::Engine") int engine);
public native @Cast("Nd4jLong*") @StdVector LongPointer getAllHashes(); public native @Cast("Nd4jLong*") @StdVector LongPointer getAllHashes();
@ -14099,6 +14146,21 @@ public static final int TAD_THRESHOLD = TAD_THRESHOLD();
public Pow() { super((Pointer)null); allocate(); } public Pow() { super((Pointer)null); allocate(); }
private native void allocate(); private native void allocate();
} }
@Namespace("nd4j::ops") public static class Pow_bp extends DeclarableCustomOp {
static { Loader.load(); }
/** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
public Pow_bp(Pointer p) { super(p); }
/** Native array allocator. Access with {@link Pointer#position(long)}. */
public Pow_bp(long size) { super((Pointer)null); allocateArray(size); }
private native void allocateArray(long size);
@Override public Pow_bp position(long position) {
return (Pow_bp)super.position(position);
}
public Pow_bp() { super((Pointer)null); allocate(); }
private native void allocate();
public native ShapeList calculateOutputShape(ShapeList inputShape, @ByRef Context block);
}
// #endif // #endif
/** /**
@ -16746,7 +16808,7 @@ public static final int TAD_THRESHOLD = TAD_THRESHOLD();
/******************************************************************************* /*******************************************************************************
* Copyright (c) 2015-2018 Skymind, Inc. * Copyright (c) 2015-2018 Skymind, Inc.
* Copyright (c) 2019 Konduit K.K. * Copyright (c) 2019-2020 Konduit K.K.
* *
* This program and the accompanying materials are made available under the * This program and the accompanying materials are made available under the
* terms of the Apache License, Version 2.0 which is available at * terms of the Apache License, Version 2.0 which is available at
@ -17899,6 +17961,34 @@ public static final int TAD_THRESHOLD = TAD_THRESHOLD();
} }
// #endif // #endif
/**
* This op calculates lgamma function lgamma(x) = log(Gamma(x))
*
* Input arrays:
* 0: x - input matrix
*
* Output array:
* 0: log of Gamma(x)
*
*/
// #if NOT_EXCLUDED(OP_lgamma)
@Namespace("nd4j::ops") public static class lgamma extends DeclarableOp {
static { Loader.load(); }
/** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
public lgamma(Pointer p) { super(p); }
/** Native array allocator. Access with {@link Pointer#position(long)}. */
public lgamma(long size) { super((Pointer)null); allocateArray(size); }
private native void allocateArray(long size);
@Override public lgamma position(long position) {
return (lgamma)super.position(position);
}
public lgamma() { super((Pointer)null); allocate(); }
private native void allocate();
public native ShapeList calculateOutputShape(ShapeList inputShape, @ByRef Context block);
}
// #endif
/** /**
* This op calculates digamma function psi(x) = derivative of log(Gamma(x)) * This op calculates digamma function psi(x) = derivative of log(Gamma(x))
* *
@ -18931,6 +19021,39 @@ public static final int TAD_THRESHOLD = TAD_THRESHOLD();
} }
// #endif // #endif
/**
* triangular_solve op. - reverse Gaussian method for solve systems of linear equations.
*
* input params:
* 0 - the tensor with dimension (x * y * z * ::: * M * M) - left parts of equations
* 1 - the tensor with dimension (x * y * z * ::: * M * K) - right parts of equations
*
* boolean args:
* 0 - lower - default is true (optional) - left part is lower triangular matrix
* 1 - adjoint - default is false (optional) - indicate input matrix or its adjoint (hermitian addition) should be used
*
* return value:
* tensor with dimension (x * y * z * ::: * M * K) with solutions
*
*/
// #if NOT_EXCLUDED(OP_triangular_solve)
@Namespace("nd4j::ops") public static class triangular_solve extends DeclarableCustomOp {
static { Loader.load(); }
/** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
public triangular_solve(Pointer p) { super(p); }
/** Native array allocator. Access with {@link Pointer#position(long)}. */
public triangular_solve(long size) { super((Pointer)null); allocateArray(size); }
private native void allocateArray(long size);
@Override public triangular_solve position(long position) {
return (triangular_solve)super.position(position);
}
public triangular_solve() { super((Pointer)null); allocate(); }
private native void allocate();
public native ShapeList calculateOutputShape(ShapeList inputShape, @ByRef Context block);
}
// #endif
/** /**
* lu op. - make LUP decomposition of given batch of 2D square matricies * lu op. - make LUP decomposition of given batch of 2D square matricies
* *
@ -20394,6 +20517,41 @@ public static final int TAD_THRESHOLD = TAD_THRESHOLD();
} }
// #endif // #endif
/**
* This op make area interpolated resize (as OpenCV INTER_AREA algorithm) for given tensor
*
* input array:
* 0 - images - 4D-Tensor with shape (batch, sizeX, sizeY, channels)
* 1 - size - 1D-Tensor with 2 values (newWidth, newHeight) (if missing a pair of integer args should be provided).
*
* int args: - proveded only when size tensor is missing
* 0 - new height
* 1 - new width
* boolean args:
* 0 - align_corners - optional (default is false)
*
* output array:
* the 4D-Tensor with resized image (shape is {batch, newWidth, newHeight, channels})
*
*/
// #if NOT_EXCLUDED(OP_resize_area)
@Namespace("nd4j::ops") public static class resize_area extends DeclarableCustomOp {
static { Loader.load(); }
/** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
public resize_area(Pointer p) { super(p); }
/** Native array allocator. Access with {@link Pointer#position(long)}. */
public resize_area(long size) { super((Pointer)null); allocateArray(size); }
private native void allocateArray(long size);
@Override public resize_area position(long position) {
return (resize_area)super.position(position);
}
public resize_area() { super((Pointer)null); allocate(); }
private native void allocate();
public native ShapeList calculateOutputShape(ShapeList inputShape, @ByRef Context block);
}
// #endif
/** /**
* This op make interpolated resize for given tensor with given algorithm. * This op make interpolated resize for given tensor with given algorithm.
* Supported algorithms are bilinear, bicubic, nearest_neighbor. * Supported algorithms are bilinear, bicubic, nearest_neighbor.
@ -21162,6 +21320,36 @@ public static final int TAD_THRESHOLD = TAD_THRESHOLD();
public native ShapeList calculateOutputShape(ShapeList inputShape, @ByRef Context block); public native ShapeList calculateOutputShape(ShapeList inputShape, @ByRef Context block);
} }
// #endif // #endif
/*
* multinomial (categorical) random generator draws samples from a multinomial distribution
*
* Input array:
* 0 - 2D ndarray with unnormalized log-probabilities with shape [batch_size (N), num_classes (K)]
* 1 - array with one int value of samples number, number of independent samples to draw for each experiment 1,N.
* Int arguments:
* 0 - optional argument, corresponds to dimension with batch_size
* 1 - optional argument, integer type to use for the output. Default int64.
*
* Output array:
* 0 - 2D ndarray with the drawn samples of shape [batch_size, num_samples]
*/
// #if NOT_EXCLUDED(OP_random_multinomial)
@Namespace("nd4j::ops") public static class random_multinomial extends DeclarableCustomOp {
static { Loader.load(); }
/** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
public random_multinomial(Pointer p) { super(p); }
/** Native array allocator. Access with {@link Pointer#position(long)}. */
public random_multinomial(long size) { super((Pointer)null); allocateArray(size); }
private native void allocateArray(long size);
@Override public random_multinomial position(long position) {
return (random_multinomial)super.position(position);
}
public random_multinomial() { super((Pointer)null); allocate(); }
private native void allocate();
public native ShapeList calculateOutputShape(ShapeList inputShape, @ByRef Context block);
}
// #endif
// #if NOT_EXCLUDED(OP_random_normal) // #if NOT_EXCLUDED(OP_random_normal)
@Namespace("nd4j::ops") public static class random_normal extends DeclarableCustomOp { @Namespace("nd4j::ops") public static class random_normal extends DeclarableCustomOp {

View File

@ -18,6 +18,7 @@ package org.nd4j.aeron.ipc;
import org.agrona.concurrent.UnsafeBuffer; import org.agrona.concurrent.UnsafeBuffer;
import org.apache.commons.lang3.time.StopWatch; import org.apache.commons.lang3.time.StopWatch;
import org.junit.Ignore;
import org.junit.Test; import org.junit.Test;
import org.nd4j.BaseND4JTest; import org.nd4j.BaseND4JTest;
import org.nd4j.linalg.api.ndarray.INDArray; import org.nd4j.linalg.api.ndarray.INDArray;
@ -57,7 +58,10 @@ public class AeronNDArraySerdeTest extends BaseND4JTest {
@Test @Test
@Ignore // timeout, skip step ignored
public void testToAndFromCompressedLarge() { public void testToAndFromCompressedLarge() {
skipUnlessIntegrationTests();
INDArray arr = Nd4j.zeros((int) 1e7); INDArray arr = Nd4j.zeros((int) 1e7);
INDArray compress = Nd4j.getCompressor().compress(arr, "GZIP"); INDArray compress = Nd4j.getCompressor().compress(arr, "GZIP");
assertTrue(compress.isCompressed()); assertTrue(compress.isCompressed());

View File

@ -67,15 +67,105 @@
<profiles> <profiles>
<profile> <profile>
<id>testresources</id> <id>testresources</id>
<!-- Put nd4j-native in profile so that CUDA-only builds succeed --> </profile>
<profile>
<id>nd4j-tests-cpu</id>
<activation>
<activeByDefault>false</activeByDefault>
</activation>
<dependencies> <dependencies>
<dependency> <dependency>
<groupId>org.nd4j</groupId> <groupId>org.nd4j</groupId>
<artifactId>nd4j-native</artifactId> <artifactId>nd4j-native</artifactId>
<version>${project.version}</version> <version>${project.version}</version>
<scope>test</scope>
</dependency> </dependency>
</dependencies> </dependencies>
<build>
<plugins>
<plugin>
<groupId>org.apache.maven.plugins</groupId>
<artifactId>maven-surefire-plugin</artifactId>
<configuration>
<environmentVariables>
<LD_LIBRARY_PATH>${env.LD_LIBRARY_PATH}:${user.dir}:${libnd4jhome}/blasbuild/cpu/blas/</LD_LIBRARY_PATH>
</environmentVariables>
<testSourceDirectory>src/test/java</testSourceDirectory>
<includes>
<include>*.java</include>
<include>**/*.java</include>
<include>**/Test*.java</include>
<include>**/*Test.java</include>
<include>**/*TestCase.java</include>
</includes>
<junitArtifactName>junit:junit</junitArtifactName>
<systemPropertyVariables>
<org.nd4j.linalg.defaultbackend>org.nd4j.linalg.cpu.nativecpu.CpuBackend</org.nd4j.linalg.defaultbackend>
<org.nd4j.linalg.tests.backendstorun>org.nd4j.linalg.cpu.nativecpu.CpuBackend</org.nd4j.linalg.tests.backendstorun>
</systemPropertyVariables>
<!--
Maximum heap size was set to 8g, as a minimum required value for tests run.
Depending on a build machine, default value is not always enough.
For testing large zoo models, this may not be enough (so comment it out).
-->
<argLine>-Ddtype=float -Xmx8g</argLine>
</configuration>
</plugin>
</plugins>
</build>
</profile>
<profile>
<id>nd4j-tests-cuda</id>
<activation>
<activeByDefault>false</activeByDefault>
</activation>
<dependencies>
<dependency>
<groupId>org.nd4j</groupId>
<artifactId>nd4j-cuda-10.2</artifactId>
<version>${project.version}</version>
</dependency>
</dependencies>
<build>
<plugins>
<plugin>
<groupId>org.apache.maven.plugins</groupId>
<artifactId>maven-surefire-plugin</artifactId>
<dependencies>
<dependency>
<groupId>org.apache.maven.surefire</groupId>
<artifactId>surefire-junit47</artifactId>
<version>2.19.1</version>
</dependency>
</dependencies>
<configuration>
<environmentVariables>
<LD_LIBRARY_PATH>${env.LD_LIBRARY_PATH}:${user.dir}:${libnd4jhome}/blasbuild/cuda/blas/</LD_LIBRARY_PATH>
</environmentVariables>
<testSourceDirectory>src/test/java</testSourceDirectory>
<includes>
<include>*.java</include>
<include>**/*.java</include>
<include>**/Test*.java</include>
<include>**/*Test.java</include>
<include>**/*TestCase.java</include>
</includes>
<junitArtifactName>junit:junit</junitArtifactName>
<systemPropertyVariables>
<org.nd4j.linalg.defaultbackend>org.nd4j.linalg.jcublas.JCublasBackend</org.nd4j.linalg.defaultbackend>
<org.nd4j.linalg.tests.backendstorun>org.nd4j.linalg.jcublas.JCublasBackend</org.nd4j.linalg.tests.backendstorun>
</systemPropertyVariables>
<!--
Maximum heap size was set to 6g, as a minimum required value for tests run.
Depending on a build machine, default value is not always enough.
-->
<argLine>-Ddtype=float -Xmx6g</argLine>
</configuration>
</plugin>
</plugins>
</build>
</profile> </profile>
</profiles> </profiles>
</project> </project>

View File

@ -58,15 +58,105 @@
<profiles> <profiles>
<profile> <profile>
<id>testresources</id> <id>testresources</id>
<!-- Put nd4j-native in profile so that CUDA-only builds succeed --> </profile>
<profile>
<id>nd4j-tests-cpu</id>
<activation>
<activeByDefault>false</activeByDefault>
</activation>
<dependencies> <dependencies>
<dependency> <dependency>
<groupId>org.nd4j</groupId> <groupId>org.nd4j</groupId>
<artifactId>nd4j-native</artifactId> <artifactId>nd4j-native</artifactId>
<version>${project.version}</version> <version>${project.version}</version>
<scope>test</scope>
</dependency> </dependency>
</dependencies> </dependencies>
<build>
<plugins>
<plugin>
<groupId>org.apache.maven.plugins</groupId>
<artifactId>maven-surefire-plugin</artifactId>
<configuration>
<environmentVariables>
<LD_LIBRARY_PATH>${env.LD_LIBRARY_PATH}:${user.dir}:${libnd4jhome}/blasbuild/cpu/blas/</LD_LIBRARY_PATH>
</environmentVariables>
<testSourceDirectory>src/test/java</testSourceDirectory>
<includes>
<include>*.java</include>
<include>**/*.java</include>
<include>**/Test*.java</include>
<include>**/*Test.java</include>
<include>**/*TestCase.java</include>
</includes>
<junitArtifactName>junit:junit</junitArtifactName>
<systemPropertyVariables>
<org.nd4j.linalg.defaultbackend>org.nd4j.linalg.cpu.nativecpu.CpuBackend</org.nd4j.linalg.defaultbackend>
<org.nd4j.linalg.tests.backendstorun>org.nd4j.linalg.cpu.nativecpu.CpuBackend</org.nd4j.linalg.tests.backendstorun>
</systemPropertyVariables>
<!--
Maximum heap size was set to 8g, as a minimum required value for tests run.
Depending on a build machine, default value is not always enough.
For testing large zoo models, this may not be enough (so comment it out).
-->
<argLine>-Ddtype=float -Xmx8g</argLine>
</configuration>
</plugin>
</plugins>
</build>
</profile>
<profile>
<id>nd4j-tests-cuda</id>
<activation>
<activeByDefault>false</activeByDefault>
</activation>
<dependencies>
<dependency>
<groupId>org.nd4j</groupId>
<artifactId>nd4j-cuda-10.2</artifactId>
<version>${project.version}</version>
</dependency>
</dependencies>
<build>
<plugins>
<plugin>
<groupId>org.apache.maven.plugins</groupId>
<artifactId>maven-surefire-plugin</artifactId>
<dependencies>
<dependency>
<groupId>org.apache.maven.surefire</groupId>
<artifactId>surefire-junit47</artifactId>
<version>2.19.1</version>
</dependency>
</dependencies>
<configuration>
<environmentVariables>
<LD_LIBRARY_PATH>${env.LD_LIBRARY_PATH}:${user.dir}:${libnd4jhome}/blasbuild/cuda/blas/</LD_LIBRARY_PATH>
</environmentVariables>
<testSourceDirectory>src/test/java</testSourceDirectory>
<includes>
<include>*.java</include>
<include>**/*.java</include>
<include>**/Test*.java</include>
<include>**/*Test.java</include>
<include>**/*TestCase.java</include>
</includes>
<junitArtifactName>junit:junit</junitArtifactName>
<systemPropertyVariables>
<org.nd4j.linalg.defaultbackend>org.nd4j.linalg.jcublas.JCublasBackend</org.nd4j.linalg.defaultbackend>
<org.nd4j.linalg.tests.backendstorun>org.nd4j.linalg.jcublas.JCublasBackend</org.nd4j.linalg.tests.backendstorun>
</systemPropertyVariables>
<!--
Maximum heap size was set to 6g, as a minimum required value for tests run.
Depending on a build machine, default value is not always enough.
-->
<argLine>-Ddtype=float -Xmx6g</argLine>
</configuration>
</plugin>
</plugins>
</build>
</profile> </profile>
</profiles> </profiles>

View File

@ -126,15 +126,105 @@
<profiles> <profiles>
<profile> <profile>
<id>testresources</id> <id>testresources</id>
<!-- Put nd4j-native in profile so that CUDA-only builds succeed --> </profile>
<profile>
<id>nd4j-tests-cpu</id>
<activation>
<activeByDefault>false</activeByDefault>
</activation>
<dependencies> <dependencies>
<dependency> <dependency>
<groupId>org.nd4j</groupId> <groupId>org.nd4j</groupId>
<artifactId>nd4j-native</artifactId> <artifactId>nd4j-native</artifactId>
<version>${project.version}</version> <version>${project.version}</version>
<scope>test</scope>
</dependency> </dependency>
</dependencies> </dependencies>
<build>
<plugins>
<plugin>
<groupId>org.apache.maven.plugins</groupId>
<artifactId>maven-surefire-plugin</artifactId>
<configuration>
<environmentVariables>
<LD_LIBRARY_PATH>${env.LD_LIBRARY_PATH}:${user.dir}:${libnd4jhome}/blasbuild/cpu/blas/</LD_LIBRARY_PATH>
</environmentVariables>
<testSourceDirectory>src/test/java</testSourceDirectory>
<includes>
<include>*.java</include>
<include>**/*.java</include>
<include>**/Test*.java</include>
<include>**/*Test.java</include>
<include>**/*TestCase.java</include>
</includes>
<junitArtifactName>junit:junit</junitArtifactName>
<systemPropertyVariables>
<org.nd4j.linalg.defaultbackend>org.nd4j.linalg.cpu.nativecpu.CpuBackend</org.nd4j.linalg.defaultbackend>
<org.nd4j.linalg.tests.backendstorun>org.nd4j.linalg.cpu.nativecpu.CpuBackend</org.nd4j.linalg.tests.backendstorun>
</systemPropertyVariables>
<!--
Maximum heap size was set to 8g, as a minimum required value for tests run.
Depending on a build machine, default value is not always enough.
For testing large zoo models, this may not be enough (so comment it out).
-->
<argLine>-Ddtype=float -Xmx8g</argLine>
</configuration>
</plugin>
</plugins>
</build>
</profile>
<profile>
<id>nd4j-tests-cuda</id>
<activation>
<activeByDefault>false</activeByDefault>
</activation>
<dependencies>
<dependency>
<groupId>org.nd4j</groupId>
<artifactId>nd4j-cuda-10.2</artifactId>
<version>${project.version}</version>
</dependency>
</dependencies>
<build>
<plugins>
<plugin>
<groupId>org.apache.maven.plugins</groupId>
<artifactId>maven-surefire-plugin</artifactId>
<dependencies>
<dependency>
<groupId>org.apache.maven.surefire</groupId>
<artifactId>surefire-junit47</artifactId>
<version>2.19.1</version>
</dependency>
</dependencies>
<configuration>
<environmentVariables>
<LD_LIBRARY_PATH>${env.LD_LIBRARY_PATH}:${user.dir}:${libnd4jhome}/blasbuild/cuda/blas/</LD_LIBRARY_PATH>
</environmentVariables>
<testSourceDirectory>src/test/java</testSourceDirectory>
<includes>
<include>*.java</include>
<include>**/*.java</include>
<include>**/Test*.java</include>
<include>**/*Test.java</include>
<include>**/*TestCase.java</include>
</includes>
<junitArtifactName>junit:junit</junitArtifactName>
<systemPropertyVariables>
<org.nd4j.linalg.defaultbackend>org.nd4j.linalg.jcublas.JCublasBackend</org.nd4j.linalg.defaultbackend>
<org.nd4j.linalg.tests.backendstorun>org.nd4j.linalg.jcublas.JCublasBackend</org.nd4j.linalg.tests.backendstorun>
</systemPropertyVariables>
<!--
Maximum heap size was set to 6g, as a minimum required value for tests run.
Depending on a build machine, default value is not always enough.
-->
<argLine>-Ddtype=float -Xmx6g</argLine>
</configuration>
</plugin>
</plugins>
</build>
</profile> </profile>
</profiles> </profiles>
</project> </project>