/* ****************************************************************************** * * * This program and the accompanying materials are made available under the * terms of the Apache License, Version 2.0 which is available at * https://www.apache.org/licenses/LICENSE-2.0. * * See the NOTICE file distributed with this work for additional * information regarding copyright ownership. * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the * License for the specific language governing permissions and limitations * under the License. * * SPDX-License-Identifier: Apache-2.0 ******************************************************************************/ // // @author Yurii Shyrma (iuriish@yahoo.com), created on 08.11.2018 // @author raver119@gmail.com // #include "../scalar_int.h" #include #include #include "../legacy_ops.h" using namespace simdOps; //////////////////////////////////////////////////////////////////////// template __global__ void scalarAlongDimension(void const* x, Nd4jLong const* xShapeInfo, void *extraParams, void *z, Nd4jLong const* zShapeInfo, void const* scalars, int *dimension, int dimensionLength, Nd4jLong const* tadShapeInfo, Nd4jLong const* tadOffsets, Nd4jLong const* tadShapeInfoZ, Nd4jLong const* tadOffsetsZ) { functions::scalar::ScalarIntTransform::template transformCuda(x, xShapeInfo, extraParams, z, zShapeInfo, scalars, dimension, dimensionLength, tadShapeInfo, tadOffsets, tadShapeInfoZ, tadOffsetsZ); } //////////////////////////////////////////////////////////////////////// template __global__ void scalarSimpleShaped(void const* x, void const* y, Nd4jLong const* xShapeInfo, void *params, void *z, Nd4jLong const* zShapeInfo, int *allocationBuffer) { functions::scalar::ScalarIntTransform::template transformCuda(y, x, xShapeInfo, params, z, zShapeInfo, allocationBuffer); } // *********************************************************************// // *********************************************************************// namespace functions { namespace scalar { //////////////////////////////////////////////////////////////////////// template template __device__ void ScalarIntTransform::transformCuda(void const* vscalar, void const* vy, Nd4jLong const* yShapeInfo, void *vparams, void *vz, Nd4jLong const* zShapeInfo, int *allocationBuffer) { auto scalar = reinterpret_cast(vscalar)[0]; auto y = reinterpret_cast(vy); auto params = reinterpret_cast(vparams); auto z = reinterpret_cast(vz); auto yRank = shape::rank(yShapeInfo); auto yEWS = shape::elementWiseStride(yShapeInfo); auto yShape = shape::shapeOf(yShapeInfo); auto yStride = shape::stride(yShapeInfo); auto zRank = shape::rank(zShapeInfo); auto zEWS = shape::elementWiseStride(zShapeInfo); auto zShape = shape::shapeOf(zShapeInfo); auto zStride = shape::stride(zShapeInfo); int totalThreads = gridDim.x * blockDim.x; int tid = blockIdx.x * blockDim.x + threadIdx.x; __shared__ int len; if(threadIdx.x == 0) len = shape::length(yShapeInfo); __syncthreads(); if(yEWS >= 1 && zEWS >= 1 && shape::order(yShapeInfo) == shape::order(zShapeInfo)) { transformCuda(len, vscalar, vy, yEWS, vparams, vz, zEWS, allocationBuffer); } else { for (Nd4jLong i = tid; i < len; i+= totalThreads) z[shape::getIndexOffset(i, zShapeInfo)] = OpType::op(y[shape::getIndexOffset(i, yShapeInfo)], scalar, params); } } //////////////////////////////////////////////////////////////////////// template template __device__ void ScalarIntTransform::transformCuda(Nd4jLong len, void const* vx, void const* vy, Nd4jLong yEWS, void *vparams, void *vz, Nd4jLong zEWS, int *allocationBuffer) { auto x = reinterpret_cast(vx)[0]; auto y = reinterpret_cast(vy); auto z = reinterpret_cast(vz); auto params = reinterpret_cast(vparams); int totalThreads = gridDim.x * blockDim.x; int tid = blockIdx.x * blockDim.x + threadIdx.x; Nd4jLong i = tid; if(yEWS == 1 && zEWS == 1) { for (; i < len; i += totalThreads) z[i] = OpType::op(y[i], x, params); } else { for (; i < len; i += totalThreads) z[i * zEWS] = OpType::op(y[i * yEWS], x, params); } } //////////////////////////////////////////////////////////////////////// template template __device__ void ScalarIntTransform::transformCuda(void const* vx, Nd4jLong const* xShapeInfo, void *vextraParams, void *vz, Nd4jLong const* zShapeInfo, void const* vscalars, int *dimension, int dimensionLength, Nd4jLong const* tadShapeInfo, Nd4jLong const* tadOffsets, Nd4jLong const* tadShapeInfoZ, Nd4jLong const* tadOffsetsZ) { auto x = reinterpret_cast(vx); auto scalars = reinterpret_cast(vscalars); auto z = reinterpret_cast(vz); auto extraParams = reinterpret_cast(vextraParams); if (tadShapeInfoZ == nullptr) { tadShapeInfoZ = tadShapeInfo; tadOffsetsZ = tadOffsets; } // tad preparation auto tadEws = shape::elementWiseStride(tadShapeInfo); auto zEws = shape::elementWiseStride(tadShapeInfoZ); auto tadLength = shape::length(tadShapeInfo);//shape::tadLength(xShapeInfo, dimension, dimensionLength); auto numTads =shape::length(xShapeInfo) / tadLength; if (tadEws > 0 && zEws > 0 && shape::order(tadShapeInfo) == shape::order(zShapeInfo)) { // main loop, rolling over tads for (int r = blockIdx.x; r < numTads; r += gridDim.x) { X *oZ = z + tadOffsetsZ[r]; auto oX = x + tadOffsets[r]; auto s = scalars[r]; for (int f = threadIdx.x; f < tadLength; f += blockDim.x) oZ[f * zEws] = OpType::op(oX[f * tadEws], s, extraParams); } } else { // main loop, rolling over tads for (int r = blockIdx.x; r < numTads; r += gridDim.x) { X *oZ = z + tadOffsetsZ[r]; auto oX = x + tadOffsets[r]; auto s = scalars[r]; for (int f = threadIdx.x; f < tadLength; f += blockDim.x) oZ[shape::getIndexOffset(f, tadShapeInfoZ)] = OpType::op(oX[shape::getIndexOffset(f, tadShapeInfo)], s, extraParams); } } } //////////////////////////////////////////////////////////////////////// template template _CUDA_H void ScalarIntTransform::intermediateAlongDimension(dim3& launchDims, cudaStream_t *stream, void const* x, Nd4jLong const* xShapeInfo, void *z, Nd4jLong const* zShapeInfo, void const* scalars, void *extraParams, int *dimension, int dimensionLength, Nd4jLong const* tadShapeInfo, Nd4jLong const* tadOffsets, Nd4jLong const* tadShapeInfoZ, Nd4jLong const* tadOffsetsZ) { scalarAlongDimension<<>>(x, xShapeInfo, extraParams, z, zShapeInfo, scalars, dimension, dimensionLength, tadShapeInfo, tadOffsets, tadShapeInfoZ, tadOffsetsZ); } //////////////////////////////////////////////////////////////////////// template template void _CUDA_H ScalarIntTransform::intermediateShaped(dim3& launchDims, cudaStream_t *stream, void const* vx, Nd4jLong const* xShapeInfo, void *vz, Nd4jLong const* zShapeInfo, void const* vscalar, void *vextraParams, int *allocPointer){ scalarSimpleShaped<<>>(vx, vscalar, xShapeInfo, vextraParams, vz, zShapeInfo, allocPointer); } //////////////////////////////////////////////////////////////////////// template void ScalarIntTransform::executeCudaShaped(dim3& launchDims, cudaStream_t *stream, int opNum, void const* vx, Nd4jLong const* xShapeInfo, void *vz, Nd4jLong const* zShapeInfo, void const* vscalar, void* vextraParams) { if (sd::Environment::getInstance().isDebugAndVerbose()) printf("H14 opNum:[%i]\n", opNum); DISPATCH_BY_OPNUM_T(intermediateShaped, PARAMS(launchDims, stream, vx, xShapeInfo, vz, zShapeInfo, vscalar, vextraParams, nullptr), SCALAR_INT_OPS); } //////////////////////////////////////////////////////////////////////// template void ScalarIntTransform::executeCudaAlongDimension(dim3& launchDims, cudaStream_t *stream, int opNum, void const* vx, Nd4jLong const* xShapeInfo, void *vz, Nd4jLong const* zShapeInfo, void const* vscalars, void *vextraParams, int *dimension, int dimensionLength, Nd4jLong const* tadShapeInfo, Nd4jLong const* tadOffsets, Nd4jLong const* tadShapeInfoZ, Nd4jLong const* tadOffsetsZ) { DISPATCH_BY_OPNUM_T(intermediateAlongDimension, PARAMS(launchDims, stream, vx, xShapeInfo, vz, zShapeInfo, vscalars, vextraParams, dimension, dimensionLength, tadShapeInfo, tadOffsets, tadShapeInfoZ, tadOffsetsZ), SCALAR_INT_OPS); } BUILD_SINGLE_TEMPLATE(template class ND4J_EXPORT ScalarIntTransform, , INTEGER_TYPES); } }