* fix double consumption of rng on cpu Signed-off-by: raver119 <raver119@gmail.com> * Shyrma docs (#222) * - documenting and profiling matrix_set_diag cuda kernel Signed-off-by: Yurii <yurii@skymind.io> * - correct formula of pnorm pooling in cuda 2d/3d kernels - remove helper matrix_diag which duplicates work of helper matrix_set_diag Signed-off-by: Yurii <yurii@skymind.io> * cublasHandle sharing + lock Signed-off-by: raver119 <raver119@gmail.com> * cublasHandle sharing + lock Signed-off-by: raver119 <raver119@gmail.com> * Documentation from serialization/deserialization in NLP (#221) * refactoring Signed-off-by: Alexander Stoyakin <alexander.stoyakin@gmail.com> * Javadocs Signed-off-by: Alexander Stoyakin <alexander.stoyakin@gmail.com> * Javadoc fixed Signed-off-by: Alexander Stoyakin <alexander.stoyakin@gmail.com> * Cleanup Signed-off-by: Alexander Stoyakin <alexander.stoyakin@gmail.com> * dedicated lock for getCudaCublasHandle Signed-off-by: raver119 <raver119@gmail.com> * Small fixes (#223) Signed-off-by: AlexDBlack <blacka101@gmail.com> * ELU DL4J fixes (#224) Signed-off-by: AlexDBlack <blacka101@gmail.com> * javadoc (#225) Signed-off-by: Robert Altena <Rob@Ra-ai.com> * Small test compilation fix (#226) Signed-off-by: AlexDBlack <blacka101@gmail.com> * #8182 remove spark version suffix (#227) Signed-off-by: AlexDBlack <blacka101@gmail.com> * [WIP] Thread safety (#229) * sync after cublas*gemm Signed-off-by: raver119 <raver119@gmail.com> * mutex for CublasHelper Signed-off-by: raver119 <raver119@gmail.com> * don't store cublasHandle in LaunchContext, it's per-device anyway Signed-off-by: raver119 <raver119@gmail.com> * some printout Signed-off-by: raver119 <raver119@gmail.com> * check for field instead Signed-off-by: raver119 <raver119@gmail.com> * pew-pew Signed-off-by: raver119 <raver119@gmail.com> * don't release ContextBuffers until device changed Signed-off-by: raver119 <raver119@gmail.com> * small tweak Signed-off-by: raver119 <raver119@gmail.com> * some logging in sgemm Signed-off-by: raver119 <raver119@gmail.com> * stream sync Signed-off-by: raver119 <raver119@gmail.com> * some more logging Signed-off-by: raver119 <raver119@gmail.com> * some more error checks Signed-off-by: raver119 <raver119@gmail.com> * one fancy test Signed-off-by: raver119 <raver119@gmail.com> * one fancy test Signed-off-by: raver119 <raver119@gmail.com> * minor AffinityManager fix Signed-off-by: raver119 <raver119@gmail.com> * cudaEvent error logging improvement Signed-off-by: raver119 <raver119@gmail.com> * ConstantHelper thread safety Signed-off-by: raver119 <raver119@gmail.com> * - minor corrections in ConstantTadHelper Signed-off-by: Yurii <yurii@skymind.io> * ConstantShapeHelper thread safety Signed-off-by: raver119 <raver119@gmail.com> * ConstantTadHelper.cu updated Signed-off-by: raver119 <raver119@gmail.com> * logging off Signed-off-by: raver119 <raver119@gmail.com> * logging off Signed-off-by: raver119 <raver119@gmail.com>
75 lines
2.9 KiB
C++
75 lines
2.9 KiB
C++
/*******************************************************************************
|
|
* Copyright (c) 2015-2018 Skymind, Inc.
|
|
*
|
|
* This program and the accompanying materials are made available under the
|
|
* terms of the Apache License, Version 2.0 which is available at
|
|
* https://www.apache.org/licenses/LICENSE-2.0.
|
|
*
|
|
* Unless required by applicable law or agreed to in writing, software
|
|
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
|
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
|
* License for the specific language governing permissions and limitations
|
|
* under the License.
|
|
*
|
|
* SPDX-License-Identifier: Apache-2.0
|
|
******************************************************************************/
|
|
|
|
//
|
|
// @author Yurii Shyrma (iuriish@yahoo.com)
|
|
//
|
|
|
|
#include "ResultSet.h"
|
|
#include <ops/declarable/helpers/matrixSetDiag.h>
|
|
|
|
namespace nd4j {
|
|
namespace ops {
|
|
namespace helpers {
|
|
|
|
|
|
//////////////////////////////////////////////////////////////////////////
|
|
template<typename T>
|
|
void matrixSetDiag_(const NDArray& input, const NDArray& diagonal, NDArray& output, const bool zeroPad) {
|
|
|
|
// input and output are the same array (x == z) when zeroPad = true
|
|
// xRank = zRank, xRank = yRank + 1
|
|
// xLen = zLen
|
|
|
|
const T* x = input.bufferAsT<T>();
|
|
const T* y = diagonal.bufferAsT<T>();
|
|
T* z = output.bufferAsT<T>();
|
|
|
|
const Nd4jLong* xShapeInfo = input.getShapeInfo();
|
|
const Nd4jLong* yShapeInfo = diagonal.getShapeInfo();
|
|
const Nd4jLong* zShapeInfo = output.getShapeInfo();
|
|
|
|
const bool areSameOffsets = shape::haveSameShapeAndStrides(xShapeInfo, zShapeInfo); // shapes are definitely the same, but strides might not
|
|
|
|
const int xRank = input.rankOf();
|
|
const auto xLen = input.lengthOf();
|
|
|
|
std::vector<Nd4jLong> coords(xRank); // we use the same coordinates storage both for input and output since their ranks are the same
|
|
|
|
PRAGMA_OMP_PARALLEL_FOR_ARGS(firstprivate(coords))
|
|
for (Nd4jLong i = 0; i < xLen; ++i) {
|
|
|
|
shape::index2coords(xRank, xShapeInfo + 1, i, xLen, coords.data());
|
|
|
|
const auto xOffset = shape::getOffset(0, xShapeInfo + 1, xShapeInfo + xRank + 1, coords.data(), xRank);
|
|
const auto zOffset = areSameOffsets ? xOffset : shape::getOffset(0, zShapeInfo + 1, zShapeInfo + xRank + 1, coords.data(), xRank);
|
|
|
|
// condition to be on diagonal of innermost matrix
|
|
if(coords[xRank - 2] == coords[xRank - 1])
|
|
z[zOffset] = y[shape::getOffset(0, yShapeInfo + 1, yShapeInfo + xRank, coords.data(), xRank - 1)];
|
|
else
|
|
z[zOffset] = zeroPad ? static_cast<T>(0) : x[xOffset];
|
|
}
|
|
}
|
|
|
|
//////////////////////////////////////////////////////////////////////////
|
|
void matrixSetDiag(nd4j::LaunchContext* context, const NDArray& input, const NDArray& diagonal, NDArray& output, const bool zeroPad) {
|
|
BUILD_SINGLE_SELECTOR(input.dataType(), matrixSetDiag_, (input, diagonal, output, zeroPad), LIBND4J_TYPES);
|
|
}
|
|
|
|
}
|
|
}
|
|
} |