cavis/libnd4j/include/loops/cuda/random.cu

481 lines
23 KiB
Plaintext
Raw Normal View History

2019-06-06 14:21:15 +02:00
/*******************************************************************************
* Copyright (c) 2015-2018 Skymind, Inc.
*
* This program and the accompanying materials are made available under the
* terms of the Apache License, Version 2.0 which is available at
* https://www.apache.org/licenses/LICENSE-2.0.
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
*
* SPDX-License-Identifier: Apache-2.0
******************************************************************************/
//
// @author raver119@gmail.com
//
#include <op_boilerplate.h>
#include <loops/random.h>
#include <dll.h>
#include <cuda.h>
#include <cuda_runtime.h>
#include <helpers/DebugHelper.h>
#include <specials_cuda.h>
using namespace randomOps;
template <typename T, typename OpClass>
static inline __device__ void randomSingleGeneric(
Nd4jPointer state,
void *z,
Nd4jLong *zShapeBuffer,
void *extraArguments) {
functions::random::RandomFunction<T>::template execTransformCuda<OpClass>(
state,
z,
zShapeBuffer,
extraArguments);
}
template <typename T, typename OpClass>
static inline __device__ void randomDoubleGeneric(
Nd4jPointer state,
void *x,
Nd4jLong *xShapeBuffer,
void *z,
Nd4jLong *zShapeBuffer,
void *extraArguments) {
functions::random::RandomFunction<T>::template execTransformCuda<OpClass>(
state,
x,
xShapeBuffer,
z,
zShapeBuffer,
extraArguments);
}
template <typename T, typename OpClass>
static inline __device__ void randomTripleGeneric(
Nd4jPointer state,
void *x,
Nd4jLong *xShapeBuffer,
void *y,
Nd4jLong *yShapeBuffer,
void *z,
Nd4jLong *zShapeBuffer,
void *extraArguments) {
functions::random::RandomFunction<T>::template execTransformCuda<OpClass>(
state,
x,
xShapeBuffer,
y,
yShapeBuffer,
z,
zShapeBuffer,
extraArguments);
}
#ifndef __CLION_IDE__
// here we generate kernels for target operations
DISPATCH_KERNEL_SIMPLE(randomSingle_, randomSingleGeneric, float, INPUT(Nd4jPointer state, void *z, Nd4jLong *zShapeBuffer, void *extraArguments), PARAMS(state, z, zShapeBuffer, extraArguments), OPS_A(RANDOM_OPS))
DISPATCH_KERNEL_SIMPLE(randomSingle_, randomSingleGeneric, double, INPUT(Nd4jPointer state, void *z, Nd4jLong *zShapeBuffer, void *extraArguments), PARAMS(state, z, zShapeBuffer, extraArguments), OPS_A(RANDOM_OPS))
DISPATCH_KERNEL_SIMPLE(randomSingle_, randomSingleGeneric, float16, INPUT(Nd4jPointer state, void *z, Nd4jLong *zShapeBuffer, void *extraArguments), PARAMS(state, z, zShapeBuffer, extraArguments), OPS_A(RANDOM_OPS))
DISPATCH_KERNEL_SIMPLE(randomSingle_, randomSingleGeneric, bfloat16, INPUT(Nd4jPointer state, void *z, Nd4jLong *zShapeBuffer, void *extraArguments), PARAMS(state, z, zShapeBuffer, extraArguments), OPS_A(RANDOM_OPS))
DISPATCH_KERNEL_SIMPLE(randomDouble_, randomDoubleGeneric, float, INPUT(Nd4jPointer state, void *x, Nd4jLong *xShapeBuffer, void *z, Nd4jLong *zShapeBuffer, void *extraArguments), PARAMS(state, x, xShapeBuffer, z, zShapeBuffer, extraArguments), OPS_A(RANDOM_OPS))
DISPATCH_KERNEL_SIMPLE(randomDouble_, randomDoubleGeneric, double, INPUT(Nd4jPointer state, void *x, Nd4jLong *xShapeBuffer, void *z, Nd4jLong *zShapeBuffer, void *extraArguments), PARAMS(state, x, xShapeBuffer, z, zShapeBuffer, extraArguments), OPS_A(RANDOM_OPS))
DISPATCH_KERNEL_SIMPLE(randomDouble_, randomDoubleGeneric, float16, INPUT(Nd4jPointer state, void *x, Nd4jLong *xShapeBuffer, void *z, Nd4jLong *zShapeBuffer, void *extraArguments), PARAMS(state, x, xShapeBuffer, z, zShapeBuffer, extraArguments), OPS_A(RANDOM_OPS))
DISPATCH_KERNEL_SIMPLE(randomDouble_, randomDoubleGeneric, bfloat16, INPUT(Nd4jPointer state, void *x, Nd4jLong *xShapeBuffer, void *z, Nd4jLong *zShapeBuffer, void *extraArguments), PARAMS(state, x, xShapeBuffer, z, zShapeBuffer, extraArguments), OPS_A(RANDOM_OPS))
DISPATCH_KERNEL_SIMPLE(randomTriple_, randomTripleGeneric, float, INPUT(Nd4jPointer state, void *x, Nd4jLong *xShapeBuffer, void *y, Nd4jLong *yShapeBuffer, void *z, Nd4jLong *zShapeBuffer, void *extraArguments), PARAMS(state, x, xShapeBuffer, y, yShapeBuffer, z, zShapeBuffer, extraArguments), OPS_A(RANDOM_OPS))
DISPATCH_KERNEL_SIMPLE(randomTriple_, randomTripleGeneric, double, INPUT(Nd4jPointer state, void *x, Nd4jLong *xShapeBuffer, void *y, Nd4jLong *yShapeBuffer, void *z, Nd4jLong *zShapeBuffer, void *extraArguments), PARAMS(state, x, xShapeBuffer, y, yShapeBuffer, z, zShapeBuffer, extraArguments), OPS_A(RANDOM_OPS))
DISPATCH_KERNEL_SIMPLE(randomTriple_, randomTripleGeneric, float16, INPUT(Nd4jPointer state, void *x, Nd4jLong *xShapeBuffer, void *y, Nd4jLong *yShapeBuffer, void *z, Nd4jLong *zShapeBuffer, void *extraArguments), PARAMS(state, x, xShapeBuffer, y, yShapeBuffer, z, zShapeBuffer, extraArguments), OPS_A(RANDOM_OPS))
DISPATCH_KERNEL_SIMPLE(randomTriple_, randomTripleGeneric, bfloat16, INPUT(Nd4jPointer state, void *x, Nd4jLong *xShapeBuffer, void *y, Nd4jLong *yShapeBuffer, void *z, Nd4jLong *zShapeBuffer, void *extraArguments), PARAMS(state, x, xShapeBuffer, y, yShapeBuffer, z, zShapeBuffer, extraArguments), OPS_A(RANDOM_OPS))
#endif
namespace functions {
namespace random {
template<typename T>
template<typename OpClass>
void _CUDA_D RandomFunction<T>::execTransformCuda(Nd4jPointer state, void *vx, Nd4jLong *xShapeBuffer, void *vy, Nd4jLong *yShapeBuffer, void *vz, Nd4jLong *zShapeBuffer, void *vextraArguments) {
auto x = reinterpret_cast<T*>(vx);
auto y = reinterpret_cast<T*>(vy);
auto z = reinterpret_cast<T*>(vz);
auto extraArguments = reinterpret_cast<T*>(vextraArguments);
if (OpClass::requiresSpecial) {
OpClass::specialOpCuda(state, x, xShapeBuffer, y, yShapeBuffer, z, zShapeBuffer, extraArguments);
return;
} else {
__shared__ Nd4jLong length;
__shared__ int xEWS;
__shared__ int yEWS;
__shared__ int zEWS;
__shared__ char xOrder;
__shared__ char yOrder;
__shared__ char zOrder;
__shared__ nd4j::graph::RandomGenerator *buffer;
__shared__ unsigned char *cB;
__shared__ unsigned char *dB;
nd4j::graph::RandomGenerator *devBuffer;
if (threadIdx.x == 0) {
length = shape::length(zShapeBuffer);
xEWS = shape::elementWiseStride(xShapeBuffer);
yEWS = shape::elementWiseStride(yShapeBuffer);
zEWS = shape::elementWiseStride(zShapeBuffer);
xOrder = shape::order(xShapeBuffer);
yOrder = shape::order(yShapeBuffer);
zOrder = shape::order(zShapeBuffer);
extern __shared__ unsigned char shmem[];
buffer = (nd4j::graph::RandomGenerator *) shmem;
cB = shmem;
devBuffer = reinterpret_cast<nd4j::graph::RandomGenerator *> (state);
dB = reinterpret_cast<unsigned char *> (state);
}
__syncthreads();
// using this loop instead of memcpy
for (int e = threadIdx.x; e < sizeof(nd4j::graph::RandomGenerator); e+= blockDim.x)
2019-06-06 14:21:15 +02:00
cB[e] = dB[e];
2019-06-06 14:21:15 +02:00
__syncthreads();
int tid = blockIdx.x * blockDim.x + threadIdx.x;
if (xEWS >= 1 && yEWS >= 1 && zEWS >= 1 && xOrder == yOrder && xOrder == zOrder) {
for (Nd4jLong e = tid; e < length; e += blockDim.x * gridDim.x) {
z[e * zEWS] = OpClass::op(x[e * xEWS], y[e * yEWS], e, length, buffer, extraArguments);
}
} else {
for (Nd4jLong i = tid; i < length; i += blockDim.x * gridDim.x) {
auto xOffset2 = shape::getIndexOffset(i, xShapeBuffer, length);
auto yOffset2 = shape::getIndexOffset(i, yShapeBuffer, length);
auto zOffset2 = shape::getIndexOffset(i, zShapeBuffer, length);
z[zOffset2] = OpClass::op(x[xOffset2], y[yOffset2], i, length, buffer, extraArguments);
}
}
}
};
template<typename T>
template<typename OpClass>
void _CUDA_D RandomFunction<T>::execTransformCuda(Nd4jPointer state, void *vx, Nd4jLong *xShapeBuffer, void *vz, Nd4jLong *zShapeBuffer, void *vextraArguments) {
auto x = reinterpret_cast<T*>(vx);
auto z = reinterpret_cast<T*>(vz);
auto extraArguments = reinterpret_cast<T*>(vextraArguments);
__shared__ Nd4jLong length;
__shared__ int xEWS;
__shared__ int zEWS;
__shared__ char xOrder;
__shared__ char zOrder;
__shared__ nd4j::graph::RandomGenerator *buffer;
__shared__ unsigned char *cB;
__shared__ unsigned char *dB;
__shared__ nd4j::graph::RandomGenerator *devBuffer;
if (threadIdx.x == 0) {
extern __shared__ unsigned char shmem[];
buffer = (nd4j::graph::RandomGenerator *) shmem;
cB = shmem;
devBuffer = reinterpret_cast<nd4j::graph::RandomGenerator *> (state);
dB = reinterpret_cast<unsigned char *> (state);
length = shape::length(zShapeBuffer);
xEWS = shape::elementWiseStride(xShapeBuffer);
zEWS = shape::elementWiseStride(zShapeBuffer);
xOrder = shape::order(xShapeBuffer);
zOrder = shape::order(zShapeBuffer);
}
__syncthreads();
// using this loop instead of memcpy
for (int e = threadIdx.x; e < sizeof(nd4j::graph::RandomGenerator); e+= blockDim.x)
2019-06-06 14:21:15 +02:00
cB[e] = dB[e];
2019-06-06 14:21:15 +02:00
__syncthreads();
if (xEWS >= 1 && zEWS >= 1 && xOrder == zOrder) {
for (Nd4jLong e = blockIdx.x * blockDim.x + threadIdx.x; e < length; e += blockDim.x * gridDim.x) {
z[e * zEWS] = OpClass::op(x[e * xEWS], e, length, buffer, extraArguments);
}
} else {
for (Nd4jLong i = blockIdx.x * blockDim.x + threadIdx.x; i < length; i += blockDim.x * gridDim.x) {
auto xOffset2 = shape::getIndexOffset(i, xShapeBuffer, length);
auto zOffset2 = shape::getIndexOffset(i, zShapeBuffer, length);
z[zOffset2] = OpClass::op(x[xOffset2], i, length, buffer, extraArguments);
}
}
}
template<typename T>
template<typename OpClass>
void _CUDA_D RandomFunction<T>::execTransformCuda(Nd4jPointer state, void *vz, Nd4jLong *zShapeBuffer, void *vextraArguments) {
auto z = reinterpret_cast<T*>(vz);
auto extraArguments = reinterpret_cast<T*>(vextraArguments);
__shared__ Nd4jLong length;
__shared__ Nd4jLong ews;
__shared__ nd4j::graph::RandomGenerator *buffer;
__shared__ unsigned char *cB;
__shared__ unsigned char *dB;
__shared__ nd4j::graph::RandomGenerator *devBuffer;
if (threadIdx.x == 0) {
extern __shared__ unsigned char shmem[];
buffer = (nd4j::graph::RandomGenerator *) shmem;
cB = shmem;
devBuffer = reinterpret_cast<nd4j::graph::RandomGenerator *> (state);
dB = reinterpret_cast<unsigned char *> (state);
length = shape::length(zShapeBuffer);
ews = shape::elementWiseStride(zShapeBuffer);
}
__syncthreads();
// using this loop instead of memcpy
for (int e = threadIdx.x; e < sizeof(nd4j::graph::RandomGenerator); e+= blockDim.x)
2019-06-06 14:21:15 +02:00
cB[e] = dB[e];
2019-06-06 14:21:15 +02:00
__syncthreads();
int tid = blockIdx.x * blockDim.x + threadIdx.x;
if (ews > 0) {
for (Nd4jLong i = tid; i < length; i += blockDim.x * gridDim.x) {
z[i * ews] = OpClass::op(i, length, buffer, extraArguments);
}
} else {
for (Nd4jLong i = tid; i < length; i += blockDim.x * gridDim.x) {
auto zOffset2 = shape::getIndexOffset(i, zShapeBuffer, length);
z[zOffset2] = OpClass::op(i, length, buffer, extraArguments);
}
}
}
template <>
_CUDA_H void RandomFunction<float>::executeCudaSingle(dim3& launchDims, cudaStream_t* stream, int opNum, Nd4jPointer stateHost, void *vz, Nd4jLong *zShapeBuffer, void *vextraArguments) {
auto z = reinterpret_cast<float*>(vz);
auto extraArguments = reinterpret_cast<float*>(vextraArguments);
// this macro builds bunch of IF/ELSE selectors for kernel launch
DISPATCH_SIMPLE(randomSingle, float, PARAMS(stateHost, z, zShapeBuffer, extraArguments), OPS_A(RANDOM_OPS))
DEBUG_KERNEL(stream, opNum);
}
template <>
_CUDA_H void RandomFunction<float16>::executeCudaSingle(dim3& launchDims, cudaStream_t* stream, int opNum, Nd4jPointer stateHost, void *vz, Nd4jLong *zShapeBuffer, void *vextraArguments) {
auto z = reinterpret_cast<float16*>(vz);
auto extraArguments = reinterpret_cast<float16*>(vextraArguments);
// this macro builds bunch of IF/ELSE selectors for kernel launch
DISPATCH_SIMPLE(randomSingle, float16, PARAMS(stateHost, z, zShapeBuffer, extraArguments), OPS_A(RANDOM_OPS))
DEBUG_KERNEL(stream, opNum);
}
template <>
_CUDA_H void RandomFunction<bfloat16>::executeCudaSingle(dim3& launchDims, cudaStream_t* stream, int opNum, Nd4jPointer stateHost, void *vz, Nd4jLong *zShapeBuffer, void *vextraArguments) {
auto z = reinterpret_cast<bfloat16*>(vz);
auto extraArguments = reinterpret_cast<bfloat16*>(vextraArguments);
// this macro builds bunch of IF/ELSE selectors for kernel launch
DISPATCH_SIMPLE(randomSingle, bfloat16, PARAMS(stateHost, z, zShapeBuffer, extraArguments), OPS_A(RANDOM_OPS))
DEBUG_KERNEL(stream, opNum);
}
template <>
_CUDA_H void RandomFunction<double>::executeCudaSingle(dim3& launchDims, cudaStream_t *stream, int opNum, Nd4jPointer stateHost, void *vz, Nd4jLong *zShapeBuffer, void *vextraArguments) {
auto z = reinterpret_cast<double*>(vz);
auto extraArguments = reinterpret_cast<double*>(vextraArguments);
// this macro builds bunch of IF/ELSE selectors for kernel launch
DISPATCH_SIMPLE(randomSingle, double, PARAMS(stateHost, z, zShapeBuffer, extraArguments), OPS_A(RANDOM_OPS))
DEBUG_KERNEL(stream, opNum);
}
template <>
_CUDA_H void RandomFunction<float>::executeCudaDouble(dim3& launchDims, cudaStream_t* stream, int opNum, Nd4jPointer stateHost, void *vx, Nd4jLong *xShapeBuffer, void *vz, Nd4jLong *zShapeBuffer, void *vextraArguments) {
auto x = reinterpret_cast<float*>(vx);
auto z = reinterpret_cast<float*>(vz);
auto extraArguments = reinterpret_cast<float*>(vextraArguments);
// this macro builds bunch of IF/ELSE selectors for kernel launch
DISPATCH_SIMPLE(randomDouble, float, PARAMS(stateHost, x, xShapeBuffer, z, zShapeBuffer, extraArguments), OPS_A(RANDOM_OPS))
DEBUG_KERNEL(stream, opNum);
}
template <>
_CUDA_H void RandomFunction<float16>::executeCudaDouble(dim3& launchDims, cudaStream_t* stream, int opNum, Nd4jPointer stateHost, void *vx, Nd4jLong *xShapeBuffer, void *vz, Nd4jLong *zShapeBuffer, void *vextraArguments) {
auto x = reinterpret_cast<float16*>(vx);
auto z = reinterpret_cast<float16*>(vz);
auto extraArguments = reinterpret_cast<float16*>(vextraArguments);
// this macro builds bunch of IF/ELSE selectors for kernel launch
DISPATCH_SIMPLE(randomDouble, float16, PARAMS(stateHost, x, xShapeBuffer, z, zShapeBuffer, extraArguments), OPS_A(RANDOM_OPS))
DEBUG_KERNEL(stream, opNum);
}
template <>
_CUDA_H void RandomFunction<bfloat16>::executeCudaDouble(dim3& launchDims, cudaStream_t* stream, int opNum, Nd4jPointer stateHost, void *vx, Nd4jLong *xShapeBuffer, void *vz, Nd4jLong *zShapeBuffer, void *vextraArguments) {
auto x = reinterpret_cast<bfloat16*>(vx);
auto z = reinterpret_cast<bfloat16*>(vz);
auto extraArguments = reinterpret_cast<bfloat16*>(vextraArguments);
// this macro builds bunch of IF/ELSE selectors for kernel launch
DISPATCH_SIMPLE(randomDouble, bfloat16, PARAMS(stateHost, x, xShapeBuffer, z, zShapeBuffer, extraArguments), OPS_A(RANDOM_OPS))
DEBUG_KERNEL(stream, opNum);
}
template <>
_CUDA_H void RandomFunction<double>::executeCudaDouble(dim3& launchDims, cudaStream_t* stream, int opNum, Nd4jPointer stateHost, void *vx, Nd4jLong *xShapeBuffer, void *vz, Nd4jLong *zShapeBuffer, void *vextraArguments) {
auto x = reinterpret_cast<double*>(vx);
auto z = reinterpret_cast<double*>(vz);
auto extraArguments = reinterpret_cast<double*>(vextraArguments);
// this macro builds bunch of IF/ELSE selectors for kernel launch
DISPATCH_SIMPLE(randomDouble, double, PARAMS(stateHost, x, xShapeBuffer, z, zShapeBuffer, extraArguments), OPS_A(RANDOM_OPS))
DEBUG_KERNEL(stream, opNum);
}
template <>
_CUDA_H void RandomFunction<float>::executeCudaTriple(dim3& launchDims, cudaStream_t* stream, int opNum, Nd4jPointer stateHost, void *vx, Nd4jLong *xShapeBuffer, void *vy, Nd4jLong *yShapeBuffer, void *vz, Nd4jLong *zShapeBuffer, void *vextraArguments) {
auto x = reinterpret_cast<float*>(vx);
auto y = reinterpret_cast<float*>(vy);
auto z = reinterpret_cast<float*>(vz);
auto extraArguments = reinterpret_cast<float*>(vextraArguments);
// this macro builds bunch of IF/ELSE selectors for kernel launch
DISPATCH_SIMPLE(randomTriple, float, PARAMS(stateHost, x, xShapeBuffer, y, yShapeBuffer, z, zShapeBuffer, extraArguments), OPS_A(RANDOM_OPS))
DEBUG_KERNEL(stream, opNum);
}
template <>
_CUDA_H void RandomFunction<float16>::executeCudaTriple(dim3& launchDims, cudaStream_t* stream, int opNum, Nd4jPointer stateHost, void *vx, Nd4jLong *xShapeBuffer, void *vy, Nd4jLong *yShapeBuffer, void *vz, Nd4jLong *zShapeBuffer, void *vextraArguments) {
auto x = reinterpret_cast<float16*>(vx);
auto y = reinterpret_cast<float16*>(vy);
auto z = reinterpret_cast<float16*>(vz);
auto extraArguments = reinterpret_cast<float16*>(vextraArguments);
// this macro builds bunch of IF/ELSE selectors for kernel launch
DISPATCH_SIMPLE(randomTriple, float16, PARAMS(stateHost, x, xShapeBuffer, y, yShapeBuffer, z, zShapeBuffer, extraArguments), OPS_A(RANDOM_OPS))
DEBUG_KERNEL(stream, opNum);
}
template <>
_CUDA_H void RandomFunction<bfloat16>::executeCudaTriple(dim3& launchDims, cudaStream_t* stream, int opNum, Nd4jPointer stateHost, void *vx, Nd4jLong *xShapeBuffer, void *vy, Nd4jLong *yShapeBuffer, void *vz, Nd4jLong *zShapeBuffer, void *vextraArguments) {
auto x = reinterpret_cast<bfloat16*>(vx);
auto y = reinterpret_cast<bfloat16*>(vy);
auto z = reinterpret_cast<bfloat16*>(vz);
auto extraArguments = reinterpret_cast<bfloat16*>(vextraArguments);
// this macro builds bunch of IF/ELSE selectors for kernel launch
DISPATCH_SIMPLE(randomTriple, bfloat16, PARAMS(stateHost, x, xShapeBuffer, y, yShapeBuffer, z, zShapeBuffer, extraArguments), OPS_A(RANDOM_OPS))
DEBUG_KERNEL(stream, opNum);
}
template <>
_CUDA_H void RandomFunction<double>::executeCudaTriple(dim3& launchDims, cudaStream_t* stream, int opNum, Nd4jPointer stateHost, void *vx, Nd4jLong *xShapeBuffer, void *vy, Nd4jLong *yShapeBuffer, void *vz, Nd4jLong *zShapeBuffer, void *vextraArguments) {
auto x = reinterpret_cast<double*>(vx);
auto y = reinterpret_cast<double*>(vy);
auto z = reinterpret_cast<double*>(vz);
auto extraArguments = reinterpret_cast<double*>(vextraArguments);
// this macro builds bunch of IF/ELSE selectors for kernel launch
DISPATCH_SIMPLE(randomTriple, double, PARAMS(stateHost, x, xShapeBuffer, y, yShapeBuffer, z, zShapeBuffer, extraArguments), OPS_A(RANDOM_OPS))
DEBUG_KERNEL(stream, opNum);
}
[WIP] multi-device support (#80) * fix pad javadoc and @see links. (#72) Signed-off-by: Robert Altena <Rob@Ra-ai.com> * [WIP] More fixes (#73) * special tests for ConstantTadHelper/ConstantShapeHelper Signed-off-by: raver119 <raver119@gmail.com> * release methods for data buffers Signed-off-by: raver119 <raver119@gmail.com> * delete temporary buffer Java side Signed-off-by: raver119 <raver119@gmail.com> * delete temporary buffer Java side Signed-off-by: raver119 <raver119@gmail.com> * delete temporary TadPack C++/Java side (#74) Signed-off-by: raver119 <raver119@gmail.com> * Zoo model TF import test updates (#75) * argLine fix, update compression_gru comment * updated comment for xception * undid but commented argLine change * updated xlnet comment * copyright headers * - new NDArray methods like()/ulike() (#77) - fix for depthwise_conv2d_bp + special test Signed-off-by: raver119 <raver119@gmail.com> * upsampling2d fix CUDA Signed-off-by: raver119 <raver119@gmail.com> * DL4J trace logging (#79) * MLN/CG trace logging for debugging Signed-off-by: AlexDBlack <blacka101@gmail.com> * Tiny tweak Signed-off-by: AlexDBlack <blacka101@gmail.com> * strided_slice_bp shape fn leak fix Signed-off-by: raver119 <raver119@gmail.com> * SameDiff fixes and naming (#78) * remove SDVariable inplace methods * import methods * npe fix in OpVal * removed SameDiff inplace ops from tests * Naming updates, moved to centralized methods in SameDiff, should use op_#:# for everything * quick fixes * javadoc * SDVariable eval with placeholders * use regex match * better matching * initial commit Signed-off-by: raver119 <raver119@gmail.com> * initial commit Signed-off-by: raver119 <raver119@gmail.com> * fix javadoc. (#76) * fix javadoc. Signed-off-by: Robert Altena <Rob@Ra-ai.com> * replace most @see with @link s. Signed-off-by: Robert Altena <Rob@Ra-ai.com> * 4 additional tests Signed-off-by: raver119 <raver119@gmail.com> * launch context reorganization Signed-off-by: raver119 <raver119@gmail.com> * LaunchContext reorganization Signed-off-by: raver119 <raver119@gmail.com> * per-device LaunchContext Signed-off-by: raver119 <raver119@gmail.com> * Various DL4J/ND4J fixes (#81) * #7954 Force refresh of UI when switching tabs on overview page Signed-off-by: AlexDBlack <blacka101@gmail.com> * #8017 Concurrent modification exception (synchronize) fix Signed-off-by: AlexDBlack <blacka101@gmail.com> * #8033 Don't initialize updater in middle of writing memory crash dump Signed-off-by: AlexDBlack <blacka101@gmail.com> * #8208 Fix shape checks for ND4J int[] creator methods Signed-off-by: AlexDBlack <blacka101@gmail.com> * #6385 #7992 Keras import naming fixes + cleanup Signed-off-by: AlexDBlack <blacka101@gmail.com> * #8016 Upsampling3D - add NDHWC format support Signed-off-by: AlexDBlack <blacka101@gmail.com> * ContextBuffers as separate entity Signed-off-by: raver119 <raver119@gmail.com> * Refactor NativeOps.h to export C functions * Actually export functions from NativeOps.h * Adapt the Java wrappers in ND4J generated with JavaCPP * Create C wrappers for some of the C++ classes currently used by ND4J * ContextBuffers as separate entity Signed-off-by: raver119 <raver119@gmail.com> * remove duplicate code in createBufferDetached. (#83) Signed-off-by: Robert Altena <Rob@Ra-ai.com> * Keras model import - updater lr fix (#84) * Keras model import - updater lr fix Signed-off-by: eraly <susan.eraly@gmail.com> * Keras model import - updater lr fix, cleanup Signed-off-by: eraly <susan.eraly@gmail.com> * ContextBuffers as separate entity Signed-off-by: raver119 <raver119@gmail.com> * ContextBuffers as separate entity Signed-off-by: raver119 <raver119@gmail.com> * Fix functions of OpaqueVariablesSet * thread-local buffers/affinity Signed-off-by: raver119 <raver119@gmail.com> * thread safety for LaunchContext Signed-off-by: raver119 <raver119@gmail.com> * more of thread safety Signed-off-by: raver119 <raver119@gmail.com> * one more multi threaded test Signed-off-by: raver119 <raver119@gmail.com> * SameDiff Convolution Config validation, better output methods (#82) * Conv Config validation & tests Signed-off-by: Ryan Nett <rnett@skymind.io> * stackOutputs utility method Signed-off-by: Ryan Nett <rnett@skymind.io> * use constructor for validation, support negative kernel sizes (infered from weights) Signed-off-by: Ryan Nett <rnett@skymind.io> * better output methods Signed-off-by: Ryan Nett <rnett@skymind.io> * move output to be with fit and evaluate Signed-off-by: Ryan Nett <rnett@skymind.io> * fixes Signed-off-by: Ryan Nett <rnett@skymind.io> * more fixes Signed-off-by: Ryan Nett <rnett@skymind.io> * refactor duplicate code from pad methods. (#86) * refactor duplicate code from pad methods. Signed-off-by: Robert Altena <Rob@Ra-ai.com> * replace switch with if. Signed-off-by: Robert Altena <Rob@Ra-ai.com> * Various ND4J/DL4J fixes and improvements (#87) * Reshape and reallocate - small fixes Signed-off-by: AlexDBlack <blacka101@gmail.com> * Reshape and reallocate - small fixes Signed-off-by: AlexDBlack <blacka101@gmail.com> * #6488 ElementWiseVertex broadcast support Signed-off-by: AlexDBlack <blacka101@gmail.com> * Constructors and broadcast supported it Transforms.max/min Signed-off-by: AlexDBlack <blacka101@gmail.com> * #8054 ElementWiseVertex now supports broadcast inputs Signed-off-by: AlexDBlack <blacka101@gmail.com> * #8057 Nd4j.create overload dtype fix Signed-off-by: AlexDBlack <blacka101@gmail.com> * #7551 ND4J Shape validation fix Signed-off-by: AlexDBlack <blacka101@gmail.com> * [WIP] Numpy boolean import (#91) * numpy bool type Signed-off-by: raver119 <raver119@gmail.com> * numpy bool java side Signed-off-by: raver119 <raver119@gmail.com> * remove create method with unused parameter. (#89) * remove create method with unused parameter. * removed more unused methods. Signed-off-by: Robert Altena <Rob@Ra-ai.com> * removing more unused code. Signed-off-by: Robert Altena <Rob@Ra-ai.com> * last removal of unused code. Signed-off-by: Robert Altena <Rob@Ra-ai.com> * remove createSparse methods. (#92) Signed-off-by: Robert Altena <Rob@Ra-ai.com> * Various ND4J/DL4J fixes (#90) * Deprecate Old*Op instances Signed-off-by: AlexDBlack <blacka101@gmail.com> * #8063 #8054 Broadcast exceptions + cleanup inplace ops Signed-off-by: AlexDBlack <blacka101@gmail.com> * Small fix Signed-off-by: AlexDBlack <blacka101@gmail.com> * Remove bad test condition Signed-off-by: AlexDBlack <blacka101@gmail.com> * #7993 Fix shape function issue in crop_and_resize op Signed-off-by: AlexDBlack <blacka101@gmail.com> * DL4J SameDiff lambda layer fix Signed-off-by: AlexDBlack <blacka101@gmail.com> * #8029 Fix for pnorm backprop math Signed-off-by: AlexDBlack <blacka101@gmail.com> * #8038 Fix Op profiler NaN/Inf triggering + add tests (#93) Signed-off-by: AlexDBlack <blacka101@gmail.com> * createUninitializedDetached refactoring. (#94) * wip * update interface, add null implementations. * Breaking one test in a weird way. Signed-off-by: Robert Altena <Rob@Ra-ai.com> * createUninitializedDetached refactored. Signed-off-by: Robert Altena <Rob@Ra-ai.com> * cuda build fix for issues introduced by recent refactoring Signed-off-by: raver119 <raver119@gmail.com> * [WIP] More of CUDA (#95) * initial commit Signed-off-by: raver119 <raver119@gmail.com> * Implementation of hashcode cuda helper. Working edition. * Fixed parallel test input arangements. * Fixed tests for hashcode op. * Fixed shape calculation for image:crop_and_resize op and test. * NativeOps tests. Initial test suite. * Added tests for indexReduce methods. * Added test on execBroadcast with NDArray as dimensions. * Added test on execBroadcastBool with NDArray as dimensions. * Added tests on execPairwiseTransform and execPairwiseTransofrmBool. * Added tests for execReduce with scalar results. * Added reduce tests for non-empty dims array. * Added tests for reduce3. * Added tests for execScalar. * Added tests for execSummaryStats. * - provide cpu/cuda code for batch_to_space - testing it Signed-off-by: Yurii <yurii@skymind.io> * - remove old test for batch_to_space (had wrong format and numbers were not checked) Signed-off-by: Yurii <yurii@skymind.io> * Fixed complilation errors with test. * Added test for execTransformFloat. * Added test for execTransformSame. * Added test for execTransformBool. * Added test for execTransformStrict. * Added tests for execScalar/execScalarBool with TADs. * Added test for flatten. * - provide cpu/cuda code for space_to_Batch operaion Signed-off-by: Yurii <yurii@skymind.io> * Added test for concat. * comment unnecessary stuff in s_t_b Signed-off-by: Yurii <yurii@skymind.io> * Added test for specialConcat. * Added tests for memcpy/set routines. * Fixed pullRow cuda test. * Added pullRow test. * Added average test. * - correct typo in NDArray::applyPairwiseTransform(nd4j::pairwise::BoolOps op...) Signed-off-by: Yurii <yurii@skymind.io> * - debugging and fixing cuda tests in JavaInteropTests file Signed-off-by: Yurii <yurii@skymind.io> * - correct some tests Signed-off-by: Yurii <yurii@skymind.io> * Added test for shuffle. * Fixed ops declarations. * Restored omp and added shuffle test. * Added convertTypes test. * Added tests for execRandom. Eliminated usage of RandomBuffer with NativeOps. * Added sort tests. * Added tests for execCustomOp. * - further debuging and fixing tests terminated with crash Signed-off-by: Yurii <yurii@skymind.io> * Added tests for calculateOutputShapes. * Addded Benchmarks test. * Commented benchmark tests. * change assertion Signed-off-by: raver119 <raver119@gmail.com> * Added tests for apply_sgd op. Added cpu helper for that op. * Implement cuda helper for aplly_sgd op. Fixed tests for NativeOps. * Added test for assign broadcastable. * Added tests for assign_bp op. * Added tests for axpy op. * - assign/execScalar/execTransformAny signature change - minor test fix Signed-off-by: raver119 <raver119@gmail.com> * Fixed axpy op. * meh Signed-off-by: raver119 <raver119@gmail.com> * - fix tests for nativeOps::concat Signed-off-by: Yurii <yurii@skymind.io> * sequential transform/scalar Signed-off-by: raver119 <raver119@gmail.com> * allow nested parallelism Signed-off-by: raver119 <raver119@gmail.com> * assign_bp leak fix Signed-off-by: raver119 <raver119@gmail.com> * block setRNG fix Signed-off-by: raver119 <raver119@gmail.com> * enable parallelism by default Signed-off-by: raver119 <raver119@gmail.com> * enable nested parallelism by default Signed-off-by: raver119 <raver119@gmail.com> * Added cuda implementation for row_count helper. * Added implementation for tnse gains op helper. * - take into account possible situations when input arrays are empty in reduce_ cuda stuff Signed-off-by: Yurii <yurii@skymind.io> * Implemented tsne/edge_forces op cuda-based helper. Parallelized cpu-based helper for edge_forces. * Added kernel for tsne/symmetrized op heleper. * Implementation of tsne/symmetrized op cuda helper. Working edition. * Eliminated waste printfs. * Added test for broadcastgradientargs op. * host-only fallback for empty reduce float Signed-off-by: raver119 <raver119@gmail.com> * - some tests fixes Signed-off-by: Yurii <yurii@skymind.io> * - correct the rest of reduce_ stuff Signed-off-by: Yurii <yurii@skymind.io> * - further correction of reduce_ stuff Signed-off-by: Yurii <yurii@skymind.io> * Added test for Cbow op. Also added cuda implementation for cbow helpers. * - improve code of stack operation for scalar case Signed-off-by: Yurii <yurii@skymind.io> * - provide cuda kernel for gatherND operation Signed-off-by: Yurii <yurii@skymind.io> * Implementation of cbow helpers with cuda kernels. * minor tests tweaks Signed-off-by: raver119 <raver119@gmail.com> * minor tests tweaks Signed-off-by: raver119 <raver119@gmail.com> * - further correction of cuda stuff Signed-off-by: Yurii <yurii@skymind.io> * Implementatation of cbow op helper with cuda kernels. Working edition. * Skip random testing for cudablas case. * lstmBlockCell context fix Signed-off-by: raver119 <raver119@gmail.com> * Added tests for ELU and ELU_BP ops. * Added tests for eq_scalar, gt_scalar, gte_scalar and lte_scalar ops. * Added tests for neq_scalar. * Added test for noop. * - further work on clipbynorm_bp Signed-off-by: Yurii <yurii@skymind.io> * - get rid of concat op call, use instead direct concat helper call Signed-off-by: Yurii <yurii@skymind.io> * lstmBlockCell context fix Signed-off-by: raver119 <raver119@gmail.com> * Added tests for lrelu and lrelu_bp. * Added tests for selu and selu_bp. * Fixed lrelu derivative helpers. * - some corrections in lstm Signed-off-by: Yurii <yurii@skymind.io> * operator * result shape fix Signed-off-by: raver119 <raver119@gmail.com> * - correct typo in lstmCell Signed-off-by: Yurii <yurii@skymind.io> * few tests fixed Signed-off-by: raver119 <raver119@gmail.com> * CUDA inverse broadcast bool fix Signed-off-by: raver119 <raver119@gmail.com> * disable MMAP test for CUDA Signed-off-by: raver119 <raver119@gmail.com> * BooleanOp syncToDevice Signed-off-by: raver119 <raver119@gmail.com> * meh Signed-off-by: raver119 <raver119@gmail.com> * additional data types for im2col/col2im Signed-off-by: raver119 <raver119@gmail.com> * Added test for firas_sparse op. * one more RandomBuffer test excluded Signed-off-by: raver119 <raver119@gmail.com> * Added tests for flatten op. * Added test for Floor op. * bunch of tests fixed Signed-off-by: raver119 <raver119@gmail.com> * mmulDot tests fixed Signed-off-by: raver119 <raver119@gmail.com> * more tests fixed Signed-off-by: raver119 <raver119@gmail.com> * Implemented floordiv_bp op and tests. * Fixed scalar case with cuda implementation for bds. * - work on cuda kernel for clip_by_norm backprop op is completed Signed-off-by: Yurii <yurii@skymind.io> * Eliminate cbow crach. * more tests fixed Signed-off-by: raver119 <raver119@gmail.com> * more tests fixed Signed-off-by: raver119 <raver119@gmail.com> * Eliminated abortion with batched nlp test. * more tests fixed Signed-off-by: raver119 <raver119@gmail.com> * Fixed shared flag initializing. * disabled bunch of cpu workspaces tests Signed-off-by: raver119 <raver119@gmail.com> * scalar operators fix: missing registerSpecialUse call Signed-off-by: raver119 <raver119@gmail.com> * Fixed logdet for cuda and tests. * - correct clipBynorm_bp Signed-off-by: Yurii <yurii@skymind.io> * Fixed crop_and_resize shape datatype. * - correct some mmul tests Signed-off-by: Yurii <yurii@skymind.io> * build fix Signed-off-by: raver119 <raver119@gmail.com> * exclude two methods for JNI Signed-off-by: raver119 <raver119@gmail.com> * exclude two methods for JNI Signed-off-by: raver119 <raver119@gmail.com> * exclude two methods for JNI (#97) Signed-off-by: raver119 <raver119@gmail.com> * temporary stack fix Signed-off-by: raver119 <raver119@gmail.com> * round robin affinity test Signed-off-by: raver119 <raver119@gmail.com> * get rid of legacy CudaContext methods Signed-off-by: raver119 <raver119@gmail.com> * get rid of legacy ContextPool classes/methods Signed-off-by: raver119 <raver119@gmail.com> * one legacy test removed Signed-off-by: raver119 <raver119@gmail.com> * few more fields rearranged Signed-off-by: raver119 <raver119@gmail.com> * OpaqueLaunchContext Signed-off-by: raver119 <raver119@gmail.com> * OpaqueLaunchContext++ Signed-off-by: raver119 <raver119@gmail.com> * more of OpaqueLaunchContext methods Signed-off-by: raver119 <raver119@gmail.com> * LaunchContext -> CudaContext Signed-off-by: raver119 <raver119@gmail.com> * AffinityManger changes Signed-off-by: raver119 <raver119@gmail.com> * AffinityManger changes Signed-off-by: raver119 <raver119@gmail.com> * cusolver handles Signed-off-by: raver119 <raver119@gmail.com> * typo Signed-off-by: raver119 <raver119@gmail.com> * cusolver method Signed-off-by: raver119 <raver119@gmail.com> * cusolver handle propagated Signed-off-by: raver119 <raver119@gmail.com> * blas/solver handles Signed-off-by: raver119 <raver119@gmail.com> * one more test Signed-off-by: raver119 <raver119@gmail.com> * legacy concat implementations replaced with new CustomOp Signed-off-by: raver119 <raver119@gmail.com> * one more test Signed-off-by: raver119 <raver119@gmail.com> * concat now uses way more blocks Signed-off-by: raver119 <raver119@gmail.com> * print Signed-off-by: raver119 <raver119@gmail.com> * no more triple template mmul Signed-off-by: raver119 <raver119@gmail.com> * bunch of kernels have dtypes reconsidered Signed-off-by: raver119 <raver119@gmail.com> * bunch of kernels have dtypes reconsidered Signed-off-by: raver119 <raver119@gmail.com> * bitonic sort reorganized Signed-off-by: raver119 <raver119@gmail.com> * bunch of cpu stuff removed from cuda scope Signed-off-by: raver119 <raver119@gmail.com> * bunch of cpu stuff removed from cuda scope Signed-off-by: raver119 <raver119@gmail.com> * type conversions moved to generic impl Signed-off-by: raver119 <raver119@gmail.com> * cpu data types pass Signed-off-by: raver119 <raver119@gmail.com> * non_max_suppression Signed-off-by: raver119 <raver119@gmail.com> * sortByValue fix Signed-off-by: raver119 <raver119@gmail.com> * ignore all mixed datatype tests for mmul Signed-off-by: raver119 <raver119@gmail.com> * special handling of OpProfiler exceptions Signed-off-by: raver119 <raver119@gmail.com> * - one failing concat test in cpp - Nd4j.tile now uses op internally Signed-off-by: raver119 <raver119@gmail.com> * get back dtype exception for legacy arrays deserialization Signed-off-by: raver119 <raver119@gmail.com>
2019-08-14 15:52:34 +02:00
template<typename T>
template<typename OpClass>
void RandomFunction<T>::execTransform(Nd4jPointer state, void *x, Nd4jLong *xShapeBuffer, void *y, Nd4jLong *yShapeBuffer, void *z, Nd4jLong *zShapeBuffer, void *extraArguments) {
}
template<typename T>
template<typename OpClass>
void RandomFunction<T>::execTransform(Nd4jPointer state, void *x, Nd4jLong *xShapeBuffer, void *z, Nd4jLong *zShapeBuffer, void *extraArguments) {
}
template<typename T>
template<typename OpClass>
void RandomFunction<T>::execTransform(Nd4jPointer state, void *z, Nd4jLong *zShapeBuffer, void *extraArguments) {
}
template<typename T>
void RandomFunction<T>::execTransform(int opNum, Nd4jPointer state, void *x, Nd4jLong *xShapeBuffer, void *z, Nd4jLong *zShapeBuffer, void *extraArguments) {
}
template<typename T>
void RandomFunction<T>::execTransform(int opNum, Nd4jPointer state, void *x, Nd4jLong *xShapeBuffer, void *y, Nd4jLong *yShapeBuffer, void *z, Nd4jLong *zShapeBuffer, void *extraArguments) {
}
template<typename T>
void RandomFunction<T>::execTransform(int opNum, Nd4jPointer state, void *z, Nd4jLong *zShapeBuffer, void *extraArguments) {
}
2019-06-06 14:21:15 +02:00
BUILD_SINGLE_TEMPLATE(template class ND4J_EXPORT RandomFunction, , FLOAT_TYPES);
}
}