/******************************************************************************* * Copyright (c) 2015-2018 Skymind, Inc. * * This program and the accompanying materials are made available under the * terms of the Apache License, Version 2.0 which is available at * https://www.apache.org/licenses/LICENSE-2.0. * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the * License for the specific language governing permissions and limitations * under the License. * * SPDX-License-Identifier: Apache-2.0 ******************************************************************************/ // // @author raver119@gmail.com // @author Yurii Shyrma (iuriish@yahoo.com) // #include #include #include #include #include #include #include #include using namespace simdOps; namespace functions { namespace reduce { template template void _CUDA_H ReduceFloatFunction::execScalar(void *vx, Nd4jLong *xShapeInfo, void *vextraParams, void *vz, Nd4jLong *zShapeInfo) { auto x = reinterpret_cast(vx); auto z = reinterpret_cast(vz); auto extraParams = reinterpret_cast(vextraParams); const Nd4jLong length = shape::length(xShapeInfo); auto xEws = shape::elementWiseStride(xShapeInfo); if (shape::isEmpty(xShapeInfo)) { if (std::is_same>::value) { z[0] = nd4j::DataTypeUtils::nanOrZero(); } else { z[0] = OpType::startingValue(x); } return; } if(nd4j::ArrayOptions::arrayType(xShapeInfo) == nd4j::ArrayType::EMPTY) { if(nd4j::ArrayOptions::arrayType(zShapeInfo) == nd4j::ArrayType::EMPTY) return; const auto startingVal = OpType::startingValue(x); for (uint i = 0; i < length; i++) z[i] = startingVal; return; } if (xEws > 0) { z[0] = execScalar(x, xEws, length, extraParams); } else { auto startingValue = OpType::startingValue(x); uint xShapeInfoCast[MAX_RANK]; const bool canCastX = nd4j::DataTypeUtils::castShapeInfo(xShapeInfo, xShapeInfoCast); int maxThreads = nd4j::math::nd4j_min(64, nd4j::Environment::getInstance()->maxThreads()); Z intermediate[64]; PRAGMA_OMP_SIMD for (auto e = 0; e < maxThreads; e++) intermediate[e] = OpType::startingValue(x); auto func = PRAGMA_THREADS_FOR { for (auto i = start; i < stop; i++) intermediate[thread_id] = OpType::update(intermediate[thread_id], OpType::op(x[shape::indexOffset(i, xShapeInfo, xShapeInfoCast, canCastX)], extraParams), extraParams); }; maxThreads = samediff::Threads::parallel_for(func, 0, length, 1, maxThreads); // merge results for (int e = 1; e < maxThreads; e++) intermediate[0] = OpType::update(intermediate[0], intermediate[e], extraParams); // write out results z[0] = OpType::postProcess(intermediate[0], length, extraParams); } } template template Z _CUDA_H ReduceFloatFunction::execScalar(void *vx, Nd4jLong *xShapeInfo, void *vextraParams) { auto x = reinterpret_cast(vx); auto extraParams = reinterpret_cast(vextraParams); const Nd4jLong length = shape::length(xShapeInfo); int xEws = shape::elementWiseStride(xShapeInfo); if (xEws > 0) { return execScalar(x, xEws, length, extraParams); } else { auto startingValue = OpType::startingValue(x); uint xShapeInfoCast[MAX_RANK]; bool canCastX = nd4j::DataTypeUtils::castShapeInfo(xShapeInfo, xShapeInfoCast); for (auto i = 0; i < length; i++) startingValue = OpType::update(startingValue, OpType::op(x[shape::indexOffset(i, xShapeInfo, xShapeInfoCast, canCastX)], extraParams), extraParams); return OpType::postProcess(startingValue, length, extraParams); } } template Y ReduceFloatFunction::execScalar(const int opNum, void *x, Nd4jLong *xShapeInfo, void *extraParams) { RETURNING_DISPATCH_BY_OPNUM_TT(execScalar, PARAMS(x, xShapeInfo, extraParams), REDUCE_FLOAT_OPS); } template void ReduceFloatFunction::execScalar(const int opNum, void *x, Nd4jLong *xShapeInfo, void *extraParams, void *z, Nd4jLong *zShapeInfo) { DISPATCH_BY_OPNUM_TT(execScalar, PARAMS(x, xShapeInfo, extraParams, z, zShapeInfo), REDUCE_FLOAT_OPS); } template void ReduceFloatFunction::exec(const int opNum, void *x, Nd4jLong *xShapeInfo, void *extraParams, void *z, Nd4jLong *zShapeInfo, int *dimension, int dimensionLength, Nd4jLong *tadShapeInfo, Nd4jLong *tadOffset, int64_t start, int64_t stop) { DISPATCH_BY_OPNUM_TT(exec, PARAMS(x, xShapeInfo, extraParams, z, zShapeInfo, dimension, dimensionLength, tadShapeInfo, tadOffset, start, stop), REDUCE_FLOAT_OPS); } template template void _CUDA_H ReduceFloatFunction::exec(void *vx, Nd4jLong *xShapeInfo, void *vextraParams, void *vresult, Nd4jLong *zShapeInfo, int *dimension, int dimensionLength, Nd4jLong *tadShapeInfo, Nd4jLong *tadOffset, int64_t start, int64_t stop) { auto x = reinterpret_cast(vx); auto z = reinterpret_cast(vresult); auto extraParams = reinterpret_cast(vextraParams); auto resultLength = shape::length(zShapeInfo); if(nd4j::ArrayOptions::arrayType(xShapeInfo) == nd4j::ArrayType::EMPTY) { if(nd4j::ArrayOptions::arrayType(zShapeInfo) == nd4j::ArrayType::EMPTY) return; const auto startingVal = std::is_same>::value ? nd4j::DataTypeUtils::nanOrZero() : static_cast(OpType::startingValue(x)); for (uint i = 0; i < resultLength; i++) z[i] = startingVal; return; } //pre squeezed: this is for keeping the pointer to the original //shape information for tad offset //the squeezed information doesn't render the right strides for //tad offset // || tad.wholeThing if (resultLength == 1 || dimension == nullptr || dimensionLength == shape::rank(xShapeInfo)) { z[0] = execScalar(x, xShapeInfo, extraParams); return; } if (OpType::requiresSpecialAccumulation) { OpType::execSpecial(x, xShapeInfo, extraParams, z, zShapeInfo, dimension, dimensionLength, tadShapeInfo, tadOffset); return; } auto tadOnlyShapeInfo = tadShapeInfo; auto tadOffsets = tadOffset; if (tadOnlyShapeInfo == nullptr || tadOffsets == nullptr) { if (dimensionLength < 0) return; auto tadPack = nd4j::ConstantTadHelper::getInstance()->tadForDimensions(xShapeInfo, dimension, dimensionLength); tadOnlyShapeInfo = tadPack.primaryShapeInfo(); tadOffsets = tadPack.primaryOffsets(); } #ifdef INLINE_LOOPS nd4j::ReductionLoops::template loopReduce(x, xShapeInfo, z, zShapeInfo, tadOnlyShapeInfo, tadOffsets, extraParams, start, stop); #else nd4j::ReductionFloatLoops::template innerloopReduce(x, xShapeInfo, z, zShapeInfo, tadOnlyShapeInfo, tadOffsets, extraParams, start, stop); #endif } template template void _CUDA_H ReduceFloatFunction::exec(void *x, Nd4jLong *xShapeInfo, void *extraParams, void *vresult, Nd4jLong *resultShapeInfo) { // FIXME: wtf??? auto z = reinterpret_cast(vresult); z[0] = execScalar(x, xShapeInfo, extraParams); } template template Z _CUDA_H ReduceFloatFunction::execScalar(void *vx, Nd4jLong xEws, Nd4jLong length, void *vextraParams) { auto x = reinterpret_cast(vx); auto extraParams = reinterpret_cast(vextraParams); int maxThreads = nd4j::math::nd4j_min(64, nd4j::Environment::getInstance()->maxThreads()); Z intermediate[64]; PRAGMA_OMP_SIMD for (auto e = 0; e < maxThreads; e++) intermediate[e] = OpType::startingValue(x); auto func = PRAGMA_THREADS_FOR { if (xEws == 1) { for (auto i = start; i < stop; i++) intermediate[thread_id] = OpType::update(intermediate[thread_id], OpType::op(x[i], extraParams), extraParams); } else { for (auto i = start; i < stop; i++) intermediate[thread_id] = OpType::update(intermediate[thread_id], OpType::op(x[i * xEws], extraParams), extraParams); } }; maxThreads = samediff::Threads::parallel_for(func, 0, length, 1, maxThreads); // merge results for (int e = 1; e < maxThreads; e++) intermediate[0] = OpType::update(intermediate[0], intermediate[e], extraParams); // return result return OpType::postProcess(intermediate[0], length, extraParams); } BUILD_DOUBLE_TEMPLATE(template class ND4J_EXPORT ReduceFloatFunction, , LIBND4J_TYPES, FLOAT_TYPES); } }