/******************************************************************************* * Copyright (c) 2015-2018 Skymind, Inc. * * This program and the accompanying materials are made available under the * terms of the Apache License, Version 2.0 which is available at * https://www.apache.org/licenses/LICENSE-2.0. * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the * License for the specific language governing permissions and limitations * under the License. * * SPDX-License-Identifier: Apache-2.0 ******************************************************************************/ // // @author raver119@gmail.com // #include #include #include #include #include #include #include #include #include #include #include using namespace simdOps; template static __global__ void broadcastSimple( void const* x, Nd4jLong const* xShapeInfo, void const* y, Nd4jLong const* yShapeInfo, void *z, Nd4jLong const* zShapeInfo, int *dimension, int dimensionLength, Nd4jLong const* tadOnlyShapeInfo, Nd4jLong const* tadOffsets, Nd4jLong const* tadOnlyShapeInfoZ, Nd4jLong const* tadOffsetsZ) { functions::broadcast::Broadcast::template transformCuda(x,xShapeInfo,y,yShapeInfo,z,zShapeInfo,dimension,dimensionLength,tadOnlyShapeInfo,tadOffsets,tadOnlyShapeInfoZ,tadOffsetsZ); } template static __global__ void broadcastSimple(const void const* x, const Nd4jLong const* xShapeInfo, const void const* y, const Nd4jLong const* yShapeInfo, void *z, const Nd4jLong const* zShapeInfo ) { functions::broadcast::Broadcast::template transformCuda(x, xShapeInfo, y, yShapeInfo, z, zShapeInfo); } template static __global__ void broadcastInverseSimple( void const* x, Nd4jLong const* xShapeInfo, void const* y, Nd4jLong const* yShapeInfo, void *z, Nd4jLong const* zShapeInfo, int *dimension, int dimensionLength, Nd4jLong const* tadOnlyShapeInfo, Nd4jLong const* tadOffsets, Nd4jLong const* tadOnlyShapeInfoZ, Nd4jLong const* tadOffsetsZ) { functions::broadcast::Broadcast::template transformInverseCuda(x,xShapeInfo,y,yShapeInfo,z,zShapeInfo,dimension,dimensionLength,tadOnlyShapeInfo,tadOffsets,tadOnlyShapeInfoZ,tadOffsetsZ); } namespace functions { namespace broadcast { static Nd4jLong __device__ __noinline__ getIndexOffset(Nd4jLong index, const Nd4jLong *shapeInfo) { return shape::getIndexOffset(index, shapeInfo); } static Nd4jLong __device__ __noinline__ length(const Nd4jLong *shapeInfo) { return shape::length(shapeInfo); } template template __host__ void Broadcast::intermediateBroadcast(dim3 launchDims, cudaStream_t *stream, void const* x, Nd4jLong const* xShapeInfo, void const* y, Nd4jLong const* yShapeInfo, void* z, Nd4jLong const* zShapeInfo, int *dimension, int dimensionLength, Nd4jLong const* tadOnlyShapeInfo, Nd4jLong const* tadOffsets, Nd4jLong const* tadOnlyShapeInfoZ, Nd4jLong const* tadOffsetsZ) { broadcastSimple<<>>(x, xShapeInfo, y, yShapeInfo, z, zShapeInfo, dimension, dimensionLength, tadOnlyShapeInfo, tadOffsets, tadOnlyShapeInfoZ, tadOffsetsZ); } template template __host__ void Broadcast::intermediateBroadcast(dim3 launchDims, cudaStream_t *stream, const void *x, const Nd4jLong *xShapeInfo, const void *y, const Nd4jLong *yShapeInfo, void *z, const Nd4jLong *zShapeInfo) { broadcastSimple<<>>(x, xShapeInfo, y, yShapeInfo, z, zShapeInfo); } template __host__ void Broadcast::execBroadcast(dim3 launchDims, cudaStream_t *stream, int opNum, void const* x, Nd4jLong const* xShapeInfo, void const* y, Nd4jLong const* yShapeInfo, void *z, Nd4jLong const* zShapeInfo, int *dimension, int dimensionLength, Nd4jLong const* tadOnlyShapeInfo, Nd4jLong const* tadOffsets, Nd4jLong const* tadOnlyShapeInfoZ, Nd4jLong const* tadOffsetsZ) { DISPATCH_BY_OPNUM_TTT(intermediateBroadcast, PARAMS(launchDims, stream, x, xShapeInfo, y, yShapeInfo, z, zShapeInfo, dimension, dimensionLength, tadOnlyShapeInfo, tadOffsets, tadOnlyShapeInfoZ, tadOffsetsZ), OPS_A(BROADCAST_OPS)) DEBUG_KERNEL(stream, opNum); } template __host__ void Broadcast::execBroadcast(dim3 launchDims, cudaStream_t *stream, const int opNum, const void *x, const Nd4jLong *xShapeInfo, const void *y, const Nd4jLong *yShapeInfo, void *z, const Nd4jLong const* zShapeInfo) { DISPATCH_BY_OPNUM_TTT(intermediateBroadcast, PARAMS(launchDims, stream, x, xShapeInfo, y, yShapeInfo, z, zShapeInfo), OPS_A(BROADCAST_OPS)) DEBUG_KERNEL(stream, opNum); } template template __host__ void Broadcast::intermediateInverseBroadcast(dim3 launchDims, cudaStream_t *stream, void const* x, Nd4jLong const* xShapeInfo, void const* y, Nd4jLong const* yShapeInfo, void *z, Nd4jLong const* zShapeInfo, int *dimension, int dimensionLength, Nd4jLong const* tadOnlyShapeInfo, Nd4jLong const* tadOffsets, Nd4jLong const* tadOnlyShapeInfoZ, Nd4jLong const* tadOffsetsZ) { broadcastInverseSimple<<>>(x, xShapeInfo, y, yShapeInfo, z, zShapeInfo, dimension, dimensionLength, tadOnlyShapeInfo, tadOffsets, tadOnlyShapeInfoZ, tadOffsetsZ); } template __host__ void Broadcast::execInverseBroadcast(dim3 launchDims, cudaStream_t *stream, int opNum, void const* x, Nd4jLong const* xShapeInfo, void const* y, Nd4jLong const* yShapeInfo, void *z, Nd4jLong const* zShapeInfo, int *dimension, int dimensionLength, Nd4jLong const* tadOnlyShapeInfo, Nd4jLong const* tadOffsets, Nd4jLong const* tadOnlyShapeInfoZ, Nd4jLong const* tadOffsetsZ) { DISPATCH_BY_OPNUM_TTT(intermediateInverseBroadcast, PARAMS(launchDims, stream, x, xShapeInfo, y, yShapeInfo, z, zShapeInfo, dimension, dimensionLength, tadOnlyShapeInfo, tadOffsets, tadOnlyShapeInfoZ, tadOffsetsZ), OPS_A(BROADCAST_OPS)) DEBUG_KERNEL(stream, opNum); } template template __device__ void Broadcast::transformInverseCuda( void const* vx, Nd4jLong const* xShapeInfo, void const* vy, Nd4jLong const* yShapeInfo, void* vz, Nd4jLong const* zShapeInfo, int *dimension, int dimensionLength, Nd4jLong const* tadOnlyShapeInfo, Nd4jLong const* tadOffsets, Nd4jLong const* tadOnlyShapeInfoZ, Nd4jLong const* tadOffsetsZ) { if (tadOnlyShapeInfoZ == nullptr) { tadOnlyShapeInfoZ = tadOnlyShapeInfo; tadOffsetsZ = tadOffsets; } auto x = reinterpret_cast(vx); auto y = reinterpret_cast(vy); auto z = reinterpret_cast(vz); //decompose in to several sub tads after //moving all dimensions (in sorted order) //to the back. //permuted version of the x shape info for setting up the tad problem __shared__ Nd4jLong tadLength; __shared__ Nd4jLong tadEWS; __shared__ int numTads; __shared__ Nd4jLong xEWS; __shared__ Nd4jLong zEWS; if (threadIdx.x == 0) { tadLength = length(tadOnlyShapeInfo); tadEWS = shape::elementWiseStride(tadOnlyShapeInfo); numTads = length(yShapeInfo) / tadLength; xEWS = shape::elementWiseStride(xShapeInfo); zEWS = shape::elementWiseStride(tadOnlyShapeInfoZ); } __syncthreads(); auto xOrder = shape::order(xShapeInfo); auto yOrder = shape::order(tadOnlyShapeInfo); auto zOrder = shape::order(tadOnlyShapeInfoZ); for (int r = blockIdx.x; r < numTads; r += gridDim.x) { auto rY = y + tadOffsets[r]; auto rZ = z + tadOffsetsZ[r]; if(tadEWS > 0 && zEWS > 0 && xEWS > 0 && dimensionLength == 1 && xOrder == yOrder && xOrder == zOrder) { for (int i = threadIdx.x; i < tadLength; i+= blockDim.x) rZ[i * zEWS] = OpType::op(x[i * xEWS], rY[i * tadEWS]); } else { // it is expected that x and z tads and y array all have the same length for (Nd4jLong i = threadIdx.x; i < tadLength; i+= blockDim.x) { auto xOffset = getIndexOffset(i, xShapeInfo); auto yOffset = getIndexOffset(i, tadOnlyShapeInfo); auto zOffset = getIndexOffset(i, tadOnlyShapeInfoZ); rZ[zOffset] = OpType::op(x[xOffset], rY[yOffset]); } } } } template template __device__ void Broadcast::transformCuda( void const* vx, Nd4jLong const* xShapeInfo, void const* vy, Nd4jLong const* yShapeInfo, void *vz, Nd4jLong const* zShapeInfo, int *dimension, int dimensionLength, Nd4jLong const* tadOnlyShapeInfo, Nd4jLong const* tadOffsets, Nd4jLong const* tadOnlyShapeInfoZ, Nd4jLong const* tadOffsetsZ) { if (tadOnlyShapeInfoZ == nullptr) { tadOnlyShapeInfoZ = tadOnlyShapeInfo; tadOffsetsZ = tadOffsets; } auto x = reinterpret_cast(vx); auto y = reinterpret_cast(vy); auto z = reinterpret_cast(vz); //decompose in to several sub tads after //moving all dimensions (in sorted order) //to the back. //permuted version of the x shape info for setting up the tad problem __shared__ Nd4jLong tadLength; __shared__ Nd4jLong tadEWS; __shared__ int numTads; __shared__ Nd4jLong yEWS; __shared__ Nd4jLong zEWS; if (threadIdx.x == 0) { tadLength = length(tadOnlyShapeInfo); tadEWS = shape::elementWiseStride(tadOnlyShapeInfo); numTads = length(xShapeInfo) / tadLength; yEWS = shape::elementWiseStride(yShapeInfo); zEWS = shape::elementWiseStride(tadOnlyShapeInfoZ); } __syncthreads(); auto xOrder = shape::order(tadOnlyShapeInfo); auto yOrder = shape::order(yShapeInfo); auto zOrder = shape::order(tadOnlyShapeInfoZ); for (int r = blockIdx.x; r < numTads; r += gridDim.x) { auto rX = x + tadOffsets[r]; auto rZ = z + tadOffsetsZ[r]; if(tadEWS > 0 && zEWS > 0 && yEWS > 0 && xOrder == yOrder && xOrder == zOrder) { for (int i = threadIdx.x; i < tadLength; i+= blockDim.x) rZ[i * zEWS] = OpType::op(rX[i * tadEWS], y[i * yEWS]); } else { // it is expected that x and z tads and y array all have the same length for (Nd4jLong i = threadIdx.x; i < tadLength; i+= blockDim.x) { auto xOffset = getIndexOffset(i, tadOnlyShapeInfo); auto yOffset = getIndexOffset(i, yShapeInfo); auto zOffset = getIndexOffset(i, tadOnlyShapeInfoZ); rZ[zOffset] = OpType::op(rX[xOffset], y[yOffset]); } } } } //////////////////////////////////////////////////////////////////////// template template __device__ void Broadcast::transformCuda( const void *vx, const Nd4jLong *xShapeInfo, const void *vy, const Nd4jLong *yShapeInfo, void *vz, const Nd4jLong *zShapeInfo) { const X* x = reinterpret_cast(vx); const Y* y = reinterpret_cast(vy); Z* z = reinterpret_cast(vz); __shared__ Nd4jLong zLen; __shared__ int rank; __shared__ bool xzSameOffsets, yzSameOffsets; if (threadIdx.x == 0) { zLen = shape::length(zShapeInfo); rank = shape::rank(zShapeInfo); xzSameOffsets = shape::haveSameShapeAndStrides(xShapeInfo, zShapeInfo); yzSameOffsets = shape::haveSameShapeAndStrides(yShapeInfo, zShapeInfo); } __syncthreads(); const auto tid = blockIdx.x * blockDim.x + threadIdx.x; int xCoords[MAX_RANK], yCoords[MAX_RANK], zCoords[MAX_RANK]; for (int i = tid; i < zLen; i += blockDim.x * gridDim.x) { shape::index2coords(i, zShapeInfo, zCoords); for (uint j = 0; j < rank; ++j) { xCoords[j] = shape::sizeAt(xShapeInfo, j) == 1 ? 0 : zCoords[j]; yCoords[j] = shape::sizeAt(yShapeInfo, j) == 1 ? 0 : zCoords[j]; } const auto zOffset = shape::getOffset(zShapeInfo, zCoords); const auto xOffset = xzSameOffsets ? zOffset : shape::getOffset(xShapeInfo, xCoords); const auto yOffset = yzSameOffsets ? zOffset : shape::getOffset(yShapeInfo, yCoords); z[zOffset] = OpType::op(x[xOffset], y[yOffset]); } } /* BUILD_PAIRWISE_TEMPLATE(template class ND4J_EXPORT Broadcast, , PAIRWISE_TYPES_0); BUILD_PAIRWISE_TEMPLATE(template class ND4J_EXPORT Broadcast, , PAIRWISE_TYPES_1); BUILD_PAIRWISE_TEMPLATE(template class ND4J_EXPORT Broadcast, , PAIRWISE_TYPES_2); BUILD_PAIRWISE_TEMPLATE(template class ND4J_EXPORT Broadcast, , PAIRWISE_TYPES_3); BUILD_PAIRWISE_TEMPLATE(template class ND4J_EXPORT Broadcast, , PAIRWISE_TYPES_4); BUILD_PAIRWISE_TEMPLATE(template class ND4J_EXPORT Broadcast, , PAIRWISE_TYPES_5); BUILD_PAIRWISE_TEMPLATE(template class ND4J_EXPORT Broadcast, , PAIRWISE_TYPES_6); BUILD_PAIRWISE_TEMPLATE(template class ND4J_EXPORT Broadcast, , PAIRWISE_TYPES_7); BUILD_PAIRWISE_TEMPLATE(template class ND4J_EXPORT Broadcast, , PAIRWISE_TYPES_8); BUILD_PAIRWISE_TEMPLATE(template class ND4J_EXPORT Broadcast, , PAIRWISE_TYPES_9); */ } }