* fix pad javadoc and @see links. (#72) Signed-off-by: Robert Altena <Rob@Ra-ai.com> * [WIP] More fixes (#73) * special tests for ConstantTadHelper/ConstantShapeHelper Signed-off-by: raver119 <raver119@gmail.com> * release methods for data buffers Signed-off-by: raver119 <raver119@gmail.com> * delete temporary buffer Java side Signed-off-by: raver119 <raver119@gmail.com> * delete temporary buffer Java side Signed-off-by: raver119 <raver119@gmail.com> * delete temporary TadPack C++/Java side (#74) Signed-off-by: raver119 <raver119@gmail.com> * Zoo model TF import test updates (#75) * argLine fix, update compression_gru comment * updated comment for xception * undid but commented argLine change * updated xlnet comment * copyright headers * - new NDArray methods like()/ulike() (#77) - fix for depthwise_conv2d_bp + special test Signed-off-by: raver119 <raver119@gmail.com> * upsampling2d fix CUDA Signed-off-by: raver119 <raver119@gmail.com> * DL4J trace logging (#79) * MLN/CG trace logging for debugging Signed-off-by: AlexDBlack <blacka101@gmail.com> * Tiny tweak Signed-off-by: AlexDBlack <blacka101@gmail.com> * strided_slice_bp shape fn leak fix Signed-off-by: raver119 <raver119@gmail.com> * SameDiff fixes and naming (#78) * remove SDVariable inplace methods * import methods * npe fix in OpVal * removed SameDiff inplace ops from tests * Naming updates, moved to centralized methods in SameDiff, should use op_#:# for everything * quick fixes * javadoc * SDVariable eval with placeholders * use regex match * better matching * initial commit Signed-off-by: raver119 <raver119@gmail.com> * initial commit Signed-off-by: raver119 <raver119@gmail.com> * fix javadoc. (#76) * fix javadoc. Signed-off-by: Robert Altena <Rob@Ra-ai.com> * replace most @see with @link s. Signed-off-by: Robert Altena <Rob@Ra-ai.com> * 4 additional tests Signed-off-by: raver119 <raver119@gmail.com> * launch context reorganization Signed-off-by: raver119 <raver119@gmail.com> * LaunchContext reorganization Signed-off-by: raver119 <raver119@gmail.com> * per-device LaunchContext Signed-off-by: raver119 <raver119@gmail.com> * Various DL4J/ND4J fixes (#81) * #7954 Force refresh of UI when switching tabs on overview page Signed-off-by: AlexDBlack <blacka101@gmail.com> * #8017 Concurrent modification exception (synchronize) fix Signed-off-by: AlexDBlack <blacka101@gmail.com> * #8033 Don't initialize updater in middle of writing memory crash dump Signed-off-by: AlexDBlack <blacka101@gmail.com> * #8208 Fix shape checks for ND4J int[] creator methods Signed-off-by: AlexDBlack <blacka101@gmail.com> * #6385 #7992 Keras import naming fixes + cleanup Signed-off-by: AlexDBlack <blacka101@gmail.com> * #8016 Upsampling3D - add NDHWC format support Signed-off-by: AlexDBlack <blacka101@gmail.com> * ContextBuffers as separate entity Signed-off-by: raver119 <raver119@gmail.com> * Refactor NativeOps.h to export C functions * Actually export functions from NativeOps.h * Adapt the Java wrappers in ND4J generated with JavaCPP * Create C wrappers for some of the C++ classes currently used by ND4J * ContextBuffers as separate entity Signed-off-by: raver119 <raver119@gmail.com> * remove duplicate code in createBufferDetached. (#83) Signed-off-by: Robert Altena <Rob@Ra-ai.com> * Keras model import - updater lr fix (#84) * Keras model import - updater lr fix Signed-off-by: eraly <susan.eraly@gmail.com> * Keras model import - updater lr fix, cleanup Signed-off-by: eraly <susan.eraly@gmail.com> * ContextBuffers as separate entity Signed-off-by: raver119 <raver119@gmail.com> * ContextBuffers as separate entity Signed-off-by: raver119 <raver119@gmail.com> * Fix functions of OpaqueVariablesSet * thread-local buffers/affinity Signed-off-by: raver119 <raver119@gmail.com> * thread safety for LaunchContext Signed-off-by: raver119 <raver119@gmail.com> * more of thread safety Signed-off-by: raver119 <raver119@gmail.com> * one more multi threaded test Signed-off-by: raver119 <raver119@gmail.com> * SameDiff Convolution Config validation, better output methods (#82) * Conv Config validation & tests Signed-off-by: Ryan Nett <rnett@skymind.io> * stackOutputs utility method Signed-off-by: Ryan Nett <rnett@skymind.io> * use constructor for validation, support negative kernel sizes (infered from weights) Signed-off-by: Ryan Nett <rnett@skymind.io> * better output methods Signed-off-by: Ryan Nett <rnett@skymind.io> * move output to be with fit and evaluate Signed-off-by: Ryan Nett <rnett@skymind.io> * fixes Signed-off-by: Ryan Nett <rnett@skymind.io> * more fixes Signed-off-by: Ryan Nett <rnett@skymind.io> * refactor duplicate code from pad methods. (#86) * refactor duplicate code from pad methods. Signed-off-by: Robert Altena <Rob@Ra-ai.com> * replace switch with if. Signed-off-by: Robert Altena <Rob@Ra-ai.com> * Various ND4J/DL4J fixes and improvements (#87) * Reshape and reallocate - small fixes Signed-off-by: AlexDBlack <blacka101@gmail.com> * Reshape and reallocate - small fixes Signed-off-by: AlexDBlack <blacka101@gmail.com> * #6488 ElementWiseVertex broadcast support Signed-off-by: AlexDBlack <blacka101@gmail.com> * Constructors and broadcast supported it Transforms.max/min Signed-off-by: AlexDBlack <blacka101@gmail.com> * #8054 ElementWiseVertex now supports broadcast inputs Signed-off-by: AlexDBlack <blacka101@gmail.com> * #8057 Nd4j.create overload dtype fix Signed-off-by: AlexDBlack <blacka101@gmail.com> * #7551 ND4J Shape validation fix Signed-off-by: AlexDBlack <blacka101@gmail.com> * [WIP] Numpy boolean import (#91) * numpy bool type Signed-off-by: raver119 <raver119@gmail.com> * numpy bool java side Signed-off-by: raver119 <raver119@gmail.com> * remove create method with unused parameter. (#89) * remove create method with unused parameter. * removed more unused methods. Signed-off-by: Robert Altena <Rob@Ra-ai.com> * removing more unused code. Signed-off-by: Robert Altena <Rob@Ra-ai.com> * last removal of unused code. Signed-off-by: Robert Altena <Rob@Ra-ai.com> * remove createSparse methods. (#92) Signed-off-by: Robert Altena <Rob@Ra-ai.com> * Various ND4J/DL4J fixes (#90) * Deprecate Old*Op instances Signed-off-by: AlexDBlack <blacka101@gmail.com> * #8063 #8054 Broadcast exceptions + cleanup inplace ops Signed-off-by: AlexDBlack <blacka101@gmail.com> * Small fix Signed-off-by: AlexDBlack <blacka101@gmail.com> * Remove bad test condition Signed-off-by: AlexDBlack <blacka101@gmail.com> * #7993 Fix shape function issue in crop_and_resize op Signed-off-by: AlexDBlack <blacka101@gmail.com> * DL4J SameDiff lambda layer fix Signed-off-by: AlexDBlack <blacka101@gmail.com> * #8029 Fix for pnorm backprop math Signed-off-by: AlexDBlack <blacka101@gmail.com> * #8038 Fix Op profiler NaN/Inf triggering + add tests (#93) Signed-off-by: AlexDBlack <blacka101@gmail.com> * createUninitializedDetached refactoring. (#94) * wip * update interface, add null implementations. * Breaking one test in a weird way. Signed-off-by: Robert Altena <Rob@Ra-ai.com> * createUninitializedDetached refactored. Signed-off-by: Robert Altena <Rob@Ra-ai.com> * cuda build fix for issues introduced by recent refactoring Signed-off-by: raver119 <raver119@gmail.com> * [WIP] More of CUDA (#95) * initial commit Signed-off-by: raver119 <raver119@gmail.com> * Implementation of hashcode cuda helper. Working edition. * Fixed parallel test input arangements. * Fixed tests for hashcode op. * Fixed shape calculation for image:crop_and_resize op and test. * NativeOps tests. Initial test suite. * Added tests for indexReduce methods. * Added test on execBroadcast with NDArray as dimensions. * Added test on execBroadcastBool with NDArray as dimensions. * Added tests on execPairwiseTransform and execPairwiseTransofrmBool. * Added tests for execReduce with scalar results. * Added reduce tests for non-empty dims array. * Added tests for reduce3. * Added tests for execScalar. * Added tests for execSummaryStats. * - provide cpu/cuda code for batch_to_space - testing it Signed-off-by: Yurii <yurii@skymind.io> * - remove old test for batch_to_space (had wrong format and numbers were not checked) Signed-off-by: Yurii <yurii@skymind.io> * Fixed complilation errors with test. * Added test for execTransformFloat. * Added test for execTransformSame. * Added test for execTransformBool. * Added test for execTransformStrict. * Added tests for execScalar/execScalarBool with TADs. * Added test for flatten. * - provide cpu/cuda code for space_to_Batch operaion Signed-off-by: Yurii <yurii@skymind.io> * Added test for concat. * comment unnecessary stuff in s_t_b Signed-off-by: Yurii <yurii@skymind.io> * Added test for specialConcat. * Added tests for memcpy/set routines. * Fixed pullRow cuda test. * Added pullRow test. * Added average test. * - correct typo in NDArray::applyPairwiseTransform(nd4j::pairwise::BoolOps op...) Signed-off-by: Yurii <yurii@skymind.io> * - debugging and fixing cuda tests in JavaInteropTests file Signed-off-by: Yurii <yurii@skymind.io> * - correct some tests Signed-off-by: Yurii <yurii@skymind.io> * Added test for shuffle. * Fixed ops declarations. * Restored omp and added shuffle test. * Added convertTypes test. * Added tests for execRandom. Eliminated usage of RandomBuffer with NativeOps. * Added sort tests. * Added tests for execCustomOp. * - further debuging and fixing tests terminated with crash Signed-off-by: Yurii <yurii@skymind.io> * Added tests for calculateOutputShapes. * Addded Benchmarks test. * Commented benchmark tests. * change assertion Signed-off-by: raver119 <raver119@gmail.com> * Added tests for apply_sgd op. Added cpu helper for that op. * Implement cuda helper for aplly_sgd op. Fixed tests for NativeOps. * Added test for assign broadcastable. * Added tests for assign_bp op. * Added tests for axpy op. * - assign/execScalar/execTransformAny signature change - minor test fix Signed-off-by: raver119 <raver119@gmail.com> * Fixed axpy op. * meh Signed-off-by: raver119 <raver119@gmail.com> * - fix tests for nativeOps::concat Signed-off-by: Yurii <yurii@skymind.io> * sequential transform/scalar Signed-off-by: raver119 <raver119@gmail.com> * allow nested parallelism Signed-off-by: raver119 <raver119@gmail.com> * assign_bp leak fix Signed-off-by: raver119 <raver119@gmail.com> * block setRNG fix Signed-off-by: raver119 <raver119@gmail.com> * enable parallelism by default Signed-off-by: raver119 <raver119@gmail.com> * enable nested parallelism by default Signed-off-by: raver119 <raver119@gmail.com> * Added cuda implementation for row_count helper. * Added implementation for tnse gains op helper. * - take into account possible situations when input arrays are empty in reduce_ cuda stuff Signed-off-by: Yurii <yurii@skymind.io> * Implemented tsne/edge_forces op cuda-based helper. Parallelized cpu-based helper for edge_forces. * Added kernel for tsne/symmetrized op heleper. * Implementation of tsne/symmetrized op cuda helper. Working edition. * Eliminated waste printfs. * Added test for broadcastgradientargs op. * host-only fallback for empty reduce float Signed-off-by: raver119 <raver119@gmail.com> * - some tests fixes Signed-off-by: Yurii <yurii@skymind.io> * - correct the rest of reduce_ stuff Signed-off-by: Yurii <yurii@skymind.io> * - further correction of reduce_ stuff Signed-off-by: Yurii <yurii@skymind.io> * Added test for Cbow op. Also added cuda implementation for cbow helpers. * - improve code of stack operation for scalar case Signed-off-by: Yurii <yurii@skymind.io> * - provide cuda kernel for gatherND operation Signed-off-by: Yurii <yurii@skymind.io> * Implementation of cbow helpers with cuda kernels. * minor tests tweaks Signed-off-by: raver119 <raver119@gmail.com> * minor tests tweaks Signed-off-by: raver119 <raver119@gmail.com> * - further correction of cuda stuff Signed-off-by: Yurii <yurii@skymind.io> * Implementatation of cbow op helper with cuda kernels. Working edition. * Skip random testing for cudablas case. * lstmBlockCell context fix Signed-off-by: raver119 <raver119@gmail.com> * Added tests for ELU and ELU_BP ops. * Added tests for eq_scalar, gt_scalar, gte_scalar and lte_scalar ops. * Added tests for neq_scalar. * Added test for noop. * - further work on clipbynorm_bp Signed-off-by: Yurii <yurii@skymind.io> * - get rid of concat op call, use instead direct concat helper call Signed-off-by: Yurii <yurii@skymind.io> * lstmBlockCell context fix Signed-off-by: raver119 <raver119@gmail.com> * Added tests for lrelu and lrelu_bp. * Added tests for selu and selu_bp. * Fixed lrelu derivative helpers. * - some corrections in lstm Signed-off-by: Yurii <yurii@skymind.io> * operator * result shape fix Signed-off-by: raver119 <raver119@gmail.com> * - correct typo in lstmCell Signed-off-by: Yurii <yurii@skymind.io> * few tests fixed Signed-off-by: raver119 <raver119@gmail.com> * CUDA inverse broadcast bool fix Signed-off-by: raver119 <raver119@gmail.com> * disable MMAP test for CUDA Signed-off-by: raver119 <raver119@gmail.com> * BooleanOp syncToDevice Signed-off-by: raver119 <raver119@gmail.com> * meh Signed-off-by: raver119 <raver119@gmail.com> * additional data types for im2col/col2im Signed-off-by: raver119 <raver119@gmail.com> * Added test for firas_sparse op. * one more RandomBuffer test excluded Signed-off-by: raver119 <raver119@gmail.com> * Added tests for flatten op. * Added test for Floor op. * bunch of tests fixed Signed-off-by: raver119 <raver119@gmail.com> * mmulDot tests fixed Signed-off-by: raver119 <raver119@gmail.com> * more tests fixed Signed-off-by: raver119 <raver119@gmail.com> * Implemented floordiv_bp op and tests. * Fixed scalar case with cuda implementation for bds. * - work on cuda kernel for clip_by_norm backprop op is completed Signed-off-by: Yurii <yurii@skymind.io> * Eliminate cbow crach. * more tests fixed Signed-off-by: raver119 <raver119@gmail.com> * more tests fixed Signed-off-by: raver119 <raver119@gmail.com> * Eliminated abortion with batched nlp test. * more tests fixed Signed-off-by: raver119 <raver119@gmail.com> * Fixed shared flag initializing. * disabled bunch of cpu workspaces tests Signed-off-by: raver119 <raver119@gmail.com> * scalar operators fix: missing registerSpecialUse call Signed-off-by: raver119 <raver119@gmail.com> * Fixed logdet for cuda and tests. * - correct clipBynorm_bp Signed-off-by: Yurii <yurii@skymind.io> * Fixed crop_and_resize shape datatype. * - correct some mmul tests Signed-off-by: Yurii <yurii@skymind.io> * build fix Signed-off-by: raver119 <raver119@gmail.com> * exclude two methods for JNI Signed-off-by: raver119 <raver119@gmail.com> * exclude two methods for JNI Signed-off-by: raver119 <raver119@gmail.com> * exclude two methods for JNI (#97) Signed-off-by: raver119 <raver119@gmail.com> * temporary stack fix Signed-off-by: raver119 <raver119@gmail.com> * round robin affinity test Signed-off-by: raver119 <raver119@gmail.com> * get rid of legacy CudaContext methods Signed-off-by: raver119 <raver119@gmail.com> * get rid of legacy ContextPool classes/methods Signed-off-by: raver119 <raver119@gmail.com> * one legacy test removed Signed-off-by: raver119 <raver119@gmail.com> * few more fields rearranged Signed-off-by: raver119 <raver119@gmail.com> * OpaqueLaunchContext Signed-off-by: raver119 <raver119@gmail.com> * OpaqueLaunchContext++ Signed-off-by: raver119 <raver119@gmail.com> * more of OpaqueLaunchContext methods Signed-off-by: raver119 <raver119@gmail.com> * LaunchContext -> CudaContext Signed-off-by: raver119 <raver119@gmail.com> * AffinityManger changes Signed-off-by: raver119 <raver119@gmail.com> * AffinityManger changes Signed-off-by: raver119 <raver119@gmail.com> * cusolver handles Signed-off-by: raver119 <raver119@gmail.com> * typo Signed-off-by: raver119 <raver119@gmail.com> * cusolver method Signed-off-by: raver119 <raver119@gmail.com> * cusolver handle propagated Signed-off-by: raver119 <raver119@gmail.com> * blas/solver handles Signed-off-by: raver119 <raver119@gmail.com> * one more test Signed-off-by: raver119 <raver119@gmail.com> * legacy concat implementations replaced with new CustomOp Signed-off-by: raver119 <raver119@gmail.com> * one more test Signed-off-by: raver119 <raver119@gmail.com> * concat now uses way more blocks Signed-off-by: raver119 <raver119@gmail.com> * print Signed-off-by: raver119 <raver119@gmail.com> * no more triple template mmul Signed-off-by: raver119 <raver119@gmail.com> * bunch of kernels have dtypes reconsidered Signed-off-by: raver119 <raver119@gmail.com> * bunch of kernels have dtypes reconsidered Signed-off-by: raver119 <raver119@gmail.com> * bitonic sort reorganized Signed-off-by: raver119 <raver119@gmail.com> * bunch of cpu stuff removed from cuda scope Signed-off-by: raver119 <raver119@gmail.com> * bunch of cpu stuff removed from cuda scope Signed-off-by: raver119 <raver119@gmail.com> * type conversions moved to generic impl Signed-off-by: raver119 <raver119@gmail.com> * cpu data types pass Signed-off-by: raver119 <raver119@gmail.com> * non_max_suppression Signed-off-by: raver119 <raver119@gmail.com> * sortByValue fix Signed-off-by: raver119 <raver119@gmail.com> * ignore all mixed datatype tests for mmul Signed-off-by: raver119 <raver119@gmail.com> * special handling of OpProfiler exceptions Signed-off-by: raver119 <raver119@gmail.com> * - one failing concat test in cpp - Nd4j.tile now uses op internally Signed-off-by: raver119 <raver119@gmail.com> * get back dtype exception for legacy arrays deserialization Signed-off-by: raver119 <raver119@gmail.com>
280 lines
14 KiB
Plaintext
280 lines
14 KiB
Plaintext
/*******************************************************************************
|
|
* Copyright (c) 2015-2018 Skymind, Inc.
|
|
*
|
|
* This program and the accompanying materials are made available under the
|
|
* terms of the Apache License, Version 2.0 which is available at
|
|
* https://www.apache.org/licenses/LICENSE-2.0.
|
|
*
|
|
* Unless required by applicable law or agreed to in writing, software
|
|
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
|
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
|
* License for the specific language governing permissions and limitations
|
|
* under the License.
|
|
*
|
|
* SPDX-License-Identifier: Apache-2.0
|
|
******************************************************************************/
|
|
|
|
//
|
|
// @author Yurii Shyrma (iuriish@yahoo.com), created on 20.04.2018
|
|
//
|
|
|
|
|
|
#include<ops/declarable/helpers/transforms.h>
|
|
#include <array/ResultSet.h>
|
|
#include <helpers/ShapeUtils.h>
|
|
#include <numeric>
|
|
#include <NDArrayFactory.h>
|
|
#include <helpers/TAD.h>
|
|
#include <exceptions/cuda_exception.h>
|
|
#include <PointersManager.h>
|
|
#include <ConstantTadHelper.h>
|
|
|
|
namespace nd4j {
|
|
namespace ops {
|
|
namespace helpers {
|
|
///////////////////////////////////////////////////////////////////
|
|
// x - input, y - paddings, z - output
|
|
template<typename X, typename Y>
|
|
__global__ static void padCuda(const int mode,
|
|
const void *vx, const Nd4jLong *xShapeInfo,
|
|
const void *vy, const Nd4jLong *yShapeInfo,
|
|
void *vz, const Nd4jLong *zShapeInfo,
|
|
const void *vPadVal) {
|
|
|
|
const X padVal = *reinterpret_cast<const X*>(vPadVal);
|
|
|
|
const auto x = reinterpret_cast<const X*>(vx);
|
|
const auto y = reinterpret_cast<const Y*>(vy);
|
|
auto z = reinterpret_cast<X*>(vz);
|
|
|
|
__shared__ int rank, rankMinusOne;
|
|
__shared__ Nd4jLong zLen, yLen, totalThreads, *coords, *xShape, *zShape, *xStride, *zStride, shift1, shift2, yStride0;
|
|
|
|
if (threadIdx.x == 0) {
|
|
|
|
extern __shared__ unsigned char shmem[];
|
|
coords = reinterpret_cast<Nd4jLong*>(shmem);
|
|
zLen = shape::length(zShapeInfo);
|
|
xShape = shape::shapeOf(const_cast<Nd4jLong*>(xShapeInfo));
|
|
zShape = shape::shapeOf(const_cast<Nd4jLong*>(zShapeInfo));
|
|
xStride = shape::stride(const_cast<Nd4jLong*>(xShapeInfo));
|
|
zStride = shape::stride(const_cast<Nd4jLong*>(zShapeInfo));
|
|
yStride0 = shape::stride(const_cast<Nd4jLong*>(yShapeInfo))[0];
|
|
rank = shape::rank(xShapeInfo);
|
|
zLen = shape::length(zShapeInfo);
|
|
yLen = 2 * rank;
|
|
rankMinusOne = rank - 1;
|
|
totalThreads = gridDim.x * blockDim.x;
|
|
shift1 = mode == 1 ? 0 : 1; // REFLECT : SYMMETRIC
|
|
shift2 = mode == 1 ? 2 : 1; // REFLECT : SYMMETRIC
|
|
}
|
|
|
|
__syncthreads();
|
|
|
|
auto xzCoord = coords + threadIdx.x * rank; // we use xzCoord storage both for x and z arrays
|
|
|
|
const auto tid = blockIdx.x * blockDim.x + threadIdx.x;
|
|
|
|
if(mode == 0) { // CONSTANT case
|
|
|
|
for (Nd4jLong i = tid; i < zLen; i += totalThreads) {
|
|
|
|
shape::index2coords(rank, zShape, i, zLen, xzCoord);
|
|
const auto zOffset = shape::getOffset(0, zShape, zStride, xzCoord, rank);
|
|
|
|
bool within = true;
|
|
for(int j = rankMinusOne; j >= 0; --j) {
|
|
if(xShape[j] == zShape[j]) continue;
|
|
const auto left = y[shape::getIndexOffset(yStride0 * j, yShapeInfo, yLen)];
|
|
if(xzCoord[j] < left || xzCoord[j] >= left + xShape[j]) {within = false; break;}
|
|
else {xzCoord[j] = xzCoord[j] - left;}
|
|
}
|
|
|
|
if(within)
|
|
z[zOffset] = x[shape::getOffset(0, xShape, xStride, xzCoord, rank)];
|
|
else
|
|
z[zOffset] = padVal;
|
|
}
|
|
}
|
|
else { // REFLECT and SYMMETRIC cases
|
|
|
|
for (Nd4jLong i = tid; i < zLen; i += totalThreads) {
|
|
|
|
shape::index2coords(rank, zShape, i, zLen, xzCoord);
|
|
const auto zOffset = shape::getOffset(0, zShape, zStride, xzCoord, rank);
|
|
|
|
for(int j = rankMinusOne; j >= 0; --j) {
|
|
|
|
if(xShape[j] == zShape[j]) continue;
|
|
xzCoord[j] = xzCoord[j] - y[shape::getIndexOffset(yStride0 * j, yShapeInfo, yLen)]; // are ready to fill middle (within input dimension range)
|
|
if(xzCoord[j] < 0) xzCoord[j] = -xzCoord[j] - shift1; // means fill from left
|
|
else if(xzCoord[j] >= xShape[j]) xzCoord[j] = 2 * xShape[j] - xzCoord[j] - shift2; // means fill from right
|
|
}
|
|
|
|
const auto xOffset = shape::getOffset(0, xShape, xStride, xzCoord, rank);
|
|
z[zOffset] = x[xOffset];
|
|
}
|
|
}
|
|
}
|
|
|
|
///////////////////////////////////////////////////////////////////
|
|
template<typename X, typename Y>
|
|
static void padCudaLauncher(const int blocksPerGrid, const int threadsPerBlock, const int sharedMem, const cudaStream_t *stream,
|
|
const int mode,
|
|
const void *vx, const Nd4jLong *xShapeInfo,
|
|
const void *vy, const Nd4jLong *yShapeInfo,
|
|
void *vz, const Nd4jLong *zShapeInfo,
|
|
const void* padVal) {
|
|
|
|
padCuda<X,Y><<<blocksPerGrid, threadsPerBlock, sharedMem, *stream>>>(mode, vx, xShapeInfo, vy, yShapeInfo, vz, zShapeInfo, padVal);
|
|
}
|
|
|
|
///////////////////////////////////////////////////////////////////
|
|
void pad(nd4j::LaunchContext * context, const int mode, const NDArray& input, const NDArray& paddings, NDArray& output, const NDArray& padValue) {
|
|
|
|
PointersManager manager(context, "pad");
|
|
|
|
NDArray::prepareSpecialUse({&output}, {&input, &paddings, &padValue});
|
|
|
|
const int threadsPerBlock = MAX_NUM_THREADS / 4;
|
|
const int blocksPerGrid = (output.lengthOf() + threadsPerBlock - 1) / threadsPerBlock;
|
|
const int sharedMem = 8 * threadsPerBlock * output.rankOf() + 128;
|
|
|
|
const auto xType = input.dataType();
|
|
const auto yType = paddings.dataType();
|
|
|
|
BUILD_DOUBLE_SELECTOR(xType, yType, padCudaLauncher, (blocksPerGrid, threadsPerBlock, sharedMem, context->getCudaStream(), mode, input.getSpecialBuffer(), input.getSpecialShapeInfo(), paddings.getSpecialBuffer(), paddings.getSpecialShapeInfo(), output.getSpecialBuffer(), output.getSpecialShapeInfo(), padValue.getSpecialBuffer()), LIBND4J_TYPES, INDEXING_TYPES);
|
|
|
|
NDArray::registerSpecialUse({&output}, {&input, &paddings, &padValue});
|
|
manager.synchronize();
|
|
}
|
|
|
|
|
|
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
|
|
template <typename T>
|
|
static __global__ void mirrorPadLinearKernel(void const* vx, Nd4jLong* xShape, void* vz, Nd4jLong* zShape, Nd4jLong leftSide, Nd4jLong leftSideCorrected, Nd4jLong xLen, Nd4jLong len, Nd4jLong zLen) {
|
|
|
|
__shared__ T const* x;
|
|
__shared__ T* z;
|
|
if (threadIdx.x == 0) {
|
|
x = reinterpret_cast<T const*>(vx);
|
|
z = reinterpret_cast<T*>(vz);
|
|
}
|
|
__syncthreads();
|
|
auto start = blockIdx.x * blockDim.x + threadIdx.x;
|
|
auto step = blockDim.x * gridDim.x;
|
|
|
|
for(int i = start; i < zLen; i+= step) {
|
|
auto zIndex = shape::getIndexOffset(i, zShape, zLen);
|
|
auto xIndex = shape::getIndexOffset(len - i, xShape, xLen);
|
|
|
|
if (i < leftSide) // left side
|
|
xIndex = shape::getIndexOffset(leftSideCorrected - i, xShape, xLen);
|
|
|
|
else if(i >= leftSide && i < leftSide + xLen) // middle
|
|
xIndex = shape::getIndexOffset(i - leftSide, xShape, xLen);
|
|
|
|
// else // right side
|
|
// z[i] = x[len - i];
|
|
z[zIndex] = x[xIndex];
|
|
}
|
|
|
|
}
|
|
|
|
template <typename F, typename I>
|
|
static __global__ void mirrorPadKernel(void const* vx, Nd4jLong* xShape, void* vz, Nd4jLong* zShape, Nd4jLong outLen, void const* paddings, Nd4jLong* paddingShape, int reflBorder) {
|
|
|
|
__shared__ F const* x;
|
|
__shared__ I const* pads;
|
|
__shared__ F* z;
|
|
__shared__ Nd4jLong zRank, rank;
|
|
__shared__ Nd4jLong* xShapeOf, *xStrideOf, *padsShapeOf, *padsStrideOf;
|
|
__shared__ Nd4jLong* zShapeOf, *zStrideOf;
|
|
__shared__ Nd4jLong* xIdx;
|
|
if (threadIdx.x == 0) {
|
|
extern __shared__ unsigned char shmem[];
|
|
xIdx = reinterpret_cast<Nd4jLong*>(shmem);
|
|
rank = shape::rank(xShape);
|
|
|
|
x = reinterpret_cast<F const*>(vx);//
|
|
pads = reinterpret_cast<I const*>(paddings);
|
|
z = reinterpret_cast<F*>(vz);
|
|
xShapeOf = shape::shapeOf(xShape);
|
|
xStrideOf = shape::stride(xShape);
|
|
zShapeOf = shape::shapeOf(zShape);
|
|
zRank = shape::rank(zShape);
|
|
zStrideOf = shape::stride(zShape);
|
|
padsShapeOf = shape::shapeOf(paddingShape);
|
|
padsStrideOf = shape::stride(paddingShape);
|
|
}
|
|
__syncthreads();
|
|
auto start = threadIdx.x + blockIdx.x * blockDim.x;
|
|
auto step = blockDim.x * gridDim.x;
|
|
|
|
for(Nd4jLong i = start; i < outLen; i+= step) {
|
|
auto xzCoord = xIdx + threadIdx.x * rank;
|
|
//auto zxCoord = xIdx + (threadIdx.x + threadIdx.x % 2 + 1) * rank;
|
|
|
|
shape::index2coords(rank, zShapeOf, i, xzCoord);
|
|
auto outOffset = shape::getOffset(0, zShapeOf, zStrideOf, xzCoord, rank);
|
|
// auto intStep = blockDim.y * gridDim.y;
|
|
for(int j = 0; j < rank; j++) {
|
|
|
|
const Nd4jLong inLen = shape::sizeAt(xShape, j);
|
|
Nd4jLong coords[2] = {j, 0};
|
|
auto padOffset = shape::getOffset(0, padsShapeOf, padsStrideOf, coords, 2); // padding already has rank 2
|
|
const auto leftSide = pads[padOffset];
|
|
const auto leftSideCorrected = leftSide - reflBorder;
|
|
const Nd4jLong len = 2 * (inLen - 1) + leftSide + reflBorder;
|
|
|
|
if(xzCoord[j] < leftSide) // left side
|
|
xzCoord[j] = leftSideCorrected - xzCoord[j];
|
|
|
|
else if(xzCoord[j] >= leftSide && xzCoord[j] < leftSide + inLen) // middle
|
|
xzCoord[j] = xzCoord[j] - leftSide;
|
|
|
|
else if (len > xzCoord[j]) // right side
|
|
xzCoord[j] = len - xzCoord[j];
|
|
else
|
|
xzCoord[j] = xzCoord[j] - len;
|
|
}
|
|
|
|
auto inOffset = shape::getOffset(0, xShapeOf, xStrideOf, xzCoord, rank);
|
|
z[outOffset] = x[inOffset];
|
|
}
|
|
}
|
|
|
|
template<typename F, typename I>
|
|
static void mirrorPad_(nd4j::LaunchContext * context, const NDArray& input, const NDArray& paddings, NDArray& output, const int mode) {
|
|
// mode: 0 - REFLECT, else - SYMMETRIC
|
|
const int reflBorder = (bool)mode ? 1 : 0;
|
|
const int rank = input.rankOf();
|
|
const Nd4jLong outLen = output.lengthOf();
|
|
auto stream = context->getCudaStream();
|
|
NDArray::prepareSpecialUse({&output}, {&input, &paddings});
|
|
|
|
if(rank <= 1) {
|
|
|
|
const Nd4jLong inLen = input.lengthOf();
|
|
const auto leftSide = paddings.e<Nd4jLong>(0);
|
|
const auto leftSideCorrected = leftSide - reflBorder;
|
|
const Nd4jLong len = 2*(inLen-1) + leftSide + reflBorder;
|
|
|
|
mirrorPadLinearKernel<F><<<256, 512, 256, *stream>>>(input.getSpecialBuffer(), input.getSpecialShapeInfo(), output.specialBuffer(), output.specialShapeInfo(), leftSide, leftSideCorrected, inLen, len, outLen);
|
|
nd4j::DebugHelper::checkErrorCode(stream, "helpers::mirrorPadLinearKernel(...) failed");
|
|
}
|
|
else {
|
|
mirrorPadKernel<F, I><<<256, 256, 8192, *stream>>>(input.getSpecialBuffer(), input.getSpecialShapeInfo(), output.specialBuffer(), output.specialShapeInfo(), outLen, paddings.getSpecialBuffer(), paddings.getSpecialShapeInfo(), reflBorder);
|
|
nd4j::DebugHelper::checkErrorCode(stream, "helpers::mirrorPadKernel(...) failed");
|
|
}
|
|
NDArray::registerSpecialUse({&output}, {&input, &paddings});
|
|
}
|
|
|
|
void mirrorPad(nd4j::LaunchContext * context, const NDArray& input, const NDArray& paddings, NDArray& output, const int mode) {
|
|
BUILD_DOUBLE_SELECTOR(input.dataType(), paddings.dataType(), mirrorPad_, (context, input, paddings, output, mode), LIBND4J_TYPES, INDEXING_TYPES);
|
|
}
|
|
|
|
|
|
}
|
|
}
|
|
} |