Shyrma mmul (#58)
* - get rid of some copy procedures in mmulHelper ops Signed-off-by: Yurii <iuriish@yahoo.com> * - further work on embedding cuda api for batched gemm (cublasGemmBatchedEx) in our mmulHelper class Signed-off-by: Yurii <iuriish@yahoo.com> * - further work on cuda batched gamm api Signed-off-by: Yurii <iuriish@yahoo.com> * - write own cuda kernel performing batched gemm Signed-off-by: Yurii <iuriish@yahoo.com> * missing include in MmulHelper Signed-off-by: raver119 <raver119@gmail.com> * - forgot to keep in code previous correct kernels for mmulNxN, since it may happen that new onw will fail for some reason in future Signed-off-by: Yurii <iuriish@yahoo.com> * disable old tensordot Signed-off-by: raver119 <raver119@gmail.com> * - rewrite cuda kernels for usualGemm and usualGemv Signed-off-by: Yurii <iuriish@yahoo.com> * - profiling mmul helpers Signed-off-by: Yurii <iuriish@yahoo.com> * - prints to check shapes were added Signed-off-by: Yurii <iuriish@yahoo.com> * - correct type of output array Cin mmulNxN Signed-off-by: Yurii <iuriish@yahoo.com> * - take into account possible nans in C array Signed-off-by: Yurii <iuriish@yahoo.com> * slightly change numThreads message Signed-off-by: raver119 <raver119@gmail.com> * - make corrections in accordance to given notes in pr review Signed-off-by: Yurii <iuriish@yahoo.com>master
parent
da1944e8e1
commit
66b84b38cf
|
@ -1286,6 +1286,11 @@ namespace nd4j {
|
||||||
*/
|
*/
|
||||||
Nd4jLong sizeAt(const int dim) const;
|
Nd4jLong sizeAt(const int dim) const;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* returns stride of "dim" dimension
|
||||||
|
*/
|
||||||
|
Nd4jLong strideAt(const int dim) const;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* returns order of array
|
* returns order of array
|
||||||
*/
|
*/
|
||||||
|
|
|
@ -1439,9 +1439,21 @@ Nd4jLong NDArray::sizeAt(const int dim) const {
|
||||||
throw std::runtime_error("Bad size index requested");
|
throw std::runtime_error("Bad size index requested");
|
||||||
|
|
||||||
if (dim >= 0)
|
if (dim >= 0)
|
||||||
return this->_shapeInfo[1+dim];
|
return shape::shapeOf(_shapeInfo)[dim];
|
||||||
else
|
else
|
||||||
return this->_shapeInfo[1+(this->rankOf() + dim)];
|
return shape::shapeOf(_shapeInfo)[this->rankOf() + dim];
|
||||||
|
}
|
||||||
|
|
||||||
|
//////////////////////////////////////////////////////////////////////////
|
||||||
|
Nd4jLong NDArray::strideAt(const int dim) const {
|
||||||
|
|
||||||
|
if (dim >= this->rankOf() || dim < -this->rankOf())
|
||||||
|
throw std::runtime_error("NDArray::strideAt: Bad size index requested");
|
||||||
|
|
||||||
|
if (dim >= 0)
|
||||||
|
return shape::stride(_shapeInfo)[dim];
|
||||||
|
else
|
||||||
|
return shape::stride(_shapeInfo)[this->rankOf() + dim];
|
||||||
}
|
}
|
||||||
|
|
||||||
//////////////////////////////////////////////////////////////////////////
|
//////////////////////////////////////////////////////////////////////////
|
||||||
|
|
|
@ -1,5 +1,6 @@
|
||||||
/*******************************************************************************
|
/*******************************************************************************
|
||||||
* Copyright (c) 2015-2018 Skymind, Inc.
|
* Copyright (c) 2015-2018 Skymind, Inc.
|
||||||
|
* Copyright (c) 2019 Konduit K.K.
|
||||||
*
|
*
|
||||||
* This program and the accompanying materials are made available under the
|
* This program and the accompanying materials are made available under the
|
||||||
* terms of the Apache License, Version 2.0 which is available at
|
* terms of the Apache License, Version 2.0 which is available at
|
||||||
|
@ -53,7 +54,7 @@ namespace nd4j {
|
||||||
|
|
||||||
#ifndef __JAVACPP_HACK__
|
#ifndef __JAVACPP_HACK__
|
||||||
/**
|
/**
|
||||||
* modif - (can be empty) vector containing a subsequence of permutation/reshaping arrays (in any order), user must take care of correctness of such arrays by himself
|
* modif - (can be empty) vector containing a subsequence of permutation/reshaping arrays (in any order), user must take care of correctness of such arrays by himself
|
||||||
*/
|
*/
|
||||||
static void tensorDot(const nd4j::NDArray* a, const nd4j::NDArray* b, nd4j::NDArray* c, const std::vector<std::vector<Nd4jLong>>& modifA, const std::vector<std::vector<Nd4jLong>>& modifB, const std::vector<std::vector<Nd4jLong>>& modifC);
|
static void tensorDot(const nd4j::NDArray* a, const nd4j::NDArray* b, nd4j::NDArray* c, const std::vector<std::vector<Nd4jLong>>& modifA, const std::vector<std::vector<Nd4jLong>>& modifB, const std::vector<std::vector<Nd4jLong>>& modifC);
|
||||||
static nd4j::NDArray* tensorDot(const nd4j::NDArray* a, const nd4j::NDArray* b, const std::vector<std::vector<Nd4jLong>>& modifA, const std::vector<std::vector<Nd4jLong>>& modifB);
|
static nd4j::NDArray* tensorDot(const nd4j::NDArray* a, const nd4j::NDArray* b, const std::vector<std::vector<Nd4jLong>>& modifA, const std::vector<std::vector<Nd4jLong>>& modifB);
|
||||||
|
|
|
@ -1,5 +1,6 @@
|
||||||
/*******************************************************************************
|
/*******************************************************************************
|
||||||
* Copyright (c) 2015-2018 Skymind, Inc.
|
* Copyright (c) 2015-2018 Skymind, Inc.
|
||||||
|
* Copyright (c) 2019 Konduit K.K.
|
||||||
*
|
*
|
||||||
* This program and the accompanying materials are made available under the
|
* This program and the accompanying materials are made available under the
|
||||||
* terms of the Apache License, Version 2.0 which is available at
|
* terms of the Apache License, Version 2.0 which is available at
|
||||||
|
|
|
@ -21,6 +21,7 @@
|
||||||
#include "../MmulHelper.h"
|
#include "../MmulHelper.h"
|
||||||
#include <NDArrayFactory.h>
|
#include <NDArrayFactory.h>
|
||||||
#include <helpers/BlasHelper.h>
|
#include <helpers/BlasHelper.h>
|
||||||
|
#include <helpers/ShapeUtils.h>
|
||||||
#include <exceptions/datatype_exception.h>
|
#include <exceptions/datatype_exception.h>
|
||||||
#include <execution/Threads.h>
|
#include <execution/Threads.h>
|
||||||
|
|
||||||
|
@ -28,110 +29,124 @@
|
||||||
namespace nd4j {
|
namespace nd4j {
|
||||||
|
|
||||||
//////////////////////////////////////////////////////////////////////////////
|
//////////////////////////////////////////////////////////////////////////////
|
||||||
// MXK x KxN = MxN
|
// MXK x KxN = MxN -> actual sequence of axes doesn't matter
|
||||||
template <typename T1, typename T2, typename T3>
|
template <typename T1, typename T2, typename T3>
|
||||||
static void usualGemm(const char cOrder, const bool transA, const bool transB, const int M, const int N, const int K, const double alpha, const void* vA, const int lda, const void* vB, const int ldb, const double beta, void* vC, const int ldc) {
|
static void usualGemm(const NDArray* vA, const NDArray* vB, NDArray* vC,
|
||||||
|
const int aMaxis, const int aKaxis, const int bKaxis, const int bNaxis, const int cMaxis, const int cNaxis,
|
||||||
|
const double alpha, const double beta) {
|
||||||
|
|
||||||
T1* A = reinterpret_cast<T1*>(const_cast<void*>(vA));
|
const T1* A = vA->bufferAsT<T1>();
|
||||||
T2* B = reinterpret_cast<T2*>(const_cast<void*>(vB));
|
const T2* B = vB->bufferAsT<T2>();
|
||||||
T3* C = reinterpret_cast<T3*>(vC);
|
T3* C = vC->bufferAsT<T3>();
|
||||||
T3 alphaZ(alpha), betaZ(beta);
|
|
||||||
|
|
||||||
const bool flagC = cOrder == 'f';
|
|
||||||
const bool flagA = (flagC && transA) || (!flagC && !transA);
|
|
||||||
const bool flagB = (flagC && transB) || (!flagC && !transB);
|
|
||||||
|
|
||||||
// PRAGMA_OMP_PARALLEL_FOR_ARGS(OMP_IF(M*N > Environment::getInstance()->elementwiseThreshold()) schedule(guided))
|
const T3 alphaZ = alpha;
|
||||||
// for(uint row = 0; row < M; ++row) {
|
const T3 betaZ = beta;
|
||||||
|
|
||||||
// T3* c = flagC ? (C + row) : (C + row * ldc);
|
const bool betaPersent = beta;
|
||||||
|
|
||||||
// for(uint col = 0; col < N; ++col)
|
const Nd4jLong* aShapeInfo = vA->getShapeInfo();
|
||||||
// c[flagC ? col * ldc : col] = 0;
|
const Nd4jLong* bShapeInfo = vB->getShapeInfo();
|
||||||
|
const Nd4jLong* cShapeInfo = vC->getShapeInfo();
|
||||||
|
|
||||||
// for(uint i = 0; i < K; ++i) {
|
const int aRank = vA->rankOf();
|
||||||
|
const int bRank = vB->rankOf();
|
||||||
// T3* b = flagB ? (B + i * ldb) : (B + i);
|
const int cRank = vC->rankOf();
|
||||||
// T3* a = flagA ? (A + row * lda + i) : (A + row + i * lda);
|
|
||||||
|
|
||||||
// if(flagC) {
|
const Nd4jLong cLen = vC->lengthOf();
|
||||||
// PRAGMA_OMP_SIMD
|
|
||||||
// for(uint col = 0; col < N; ++col) {
|
|
||||||
// if(betaZ)
|
|
||||||
// c[col * ldc] += a * b[flagB ? col : col * ldb] + betaZ * c[col * ldc];
|
|
||||||
// else
|
|
||||||
// c[col * ldc] += a * b[flagB ? col : col * ldb];
|
|
||||||
// }
|
|
||||||
// }
|
|
||||||
// else {
|
|
||||||
// PRAGMA_OMP_SIMD
|
|
||||||
// for(uint col = 0; col < N; ++col) {
|
|
||||||
// if(betaZ)
|
|
||||||
// c[col] += a * b[flagB ? col : col * ldb] + betaZ * c[col];
|
|
||||||
// else
|
|
||||||
// c[col] += a * b[flagB ? col : col * ldb];
|
|
||||||
// }
|
|
||||||
// }
|
|
||||||
// }
|
|
||||||
// }
|
|
||||||
|
|
||||||
auto func = PRAGMA_THREADS_FOR_2D { ;
|
const int K = vA->sizeAt(aKaxis);
|
||||||
for (auto row = start_x; row < stop_x; row += inc_x) {
|
|
||||||
for (auto col = start_y; col < stop_y; col += inc_y) {
|
|
||||||
T3 *c = flagC ? (C + row + col * ldc) : (C + row * ldc + col);
|
|
||||||
T3 val = 0;
|
|
||||||
|
|
||||||
PRAGMA_OMP_SIMD
|
|
||||||
for (uint i = 0; i < K; ++i) {
|
|
||||||
T3 a = flagA ? *(A + row * lda + i) : *(A + row + i * lda);
|
|
||||||
T3 b = flagB ? *(B + col + i * ldb) : *(B + col * ldb + i);
|
|
||||||
val += alphaZ * a * b;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (betaZ)
|
|
||||||
*c = val + betaZ * *c;
|
|
||||||
else
|
|
||||||
*c = val;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
};
|
|
||||||
|
|
||||||
samediff::Threads::parallel_for(func, 0, M, 1, 0, N, 1);
|
|
||||||
}
|
|
||||||
|
|
||||||
//////////////////////////////////////////////////////////////////////////////
|
|
||||||
// MXN x N = M
|
|
||||||
template <typename T1, typename T2, typename T3>
|
|
||||||
static void usualGemv(const char aOrder, const int M, const int N, const double alpha, const void* vA, const int lda, const void* vX, const int incx, const double beta, void* vY, const int incy) {
|
|
||||||
|
|
||||||
T1* A = reinterpret_cast<T1*>(const_cast<void*>(vA));
|
|
||||||
T2* X = reinterpret_cast<T2*>(const_cast<void*>(vX));
|
|
||||||
T3* Y = reinterpret_cast<T3*>(vY);
|
|
||||||
T3 alphaZ(alpha), betaZ(beta);
|
|
||||||
|
|
||||||
const bool flagA = aOrder == 'f';
|
|
||||||
|
|
||||||
auto func = PRAGMA_THREADS_FOR {
|
auto func = PRAGMA_THREADS_FOR {
|
||||||
for (auto row = start; row < stop; row += increment) {
|
|
||||||
|
|
||||||
T3 *y = Y + row * incy;
|
std::vector<Nd4jLong> aCoords(2), bCoords(2), cCoords(2);
|
||||||
T3 val = 0;
|
|
||||||
|
|
||||||
PRAGMA_OMP_SIMD
|
for (auto i = start; i < stop; ++i) {
|
||||||
for (int i = 0; i < N; ++i) {
|
|
||||||
T3 a = flagA ? *(A + row + i * lda) : *(A + row * lda + i);
|
// evaluate C coordinates
|
||||||
T3 x = *(X + i * incx);
|
shape::index2coords(i, cShapeInfo, cCoords.data());
|
||||||
val += alphaZ * a * x;
|
|
||||||
|
// evaluate A coordinates
|
||||||
|
aCoords[aMaxis] = cCoords[cMaxis];
|
||||||
|
aCoords[aKaxis] = 0;
|
||||||
|
|
||||||
|
// evaluate B coordinates
|
||||||
|
bCoords[bKaxis] = 0;
|
||||||
|
bCoords[bNaxis] = cCoords[cNaxis];
|
||||||
|
|
||||||
|
auto aOffset = shape::getOffset(aShapeInfo, aCoords.data());
|
||||||
|
auto bOffset = shape::getOffset(bShapeInfo, bCoords.data());
|
||||||
|
|
||||||
|
T3 val = A[aOffset] * B[bOffset]; // first iteration
|
||||||
|
|
||||||
|
for (uint j = 1; j < K; ++j) { // rest iterations
|
||||||
|
aOffset += shape::stride(aShapeInfo)[aKaxis];
|
||||||
|
bOffset += shape::stride(bShapeInfo)[bKaxis];
|
||||||
|
val = val + A[aOffset] * B[bOffset];
|
||||||
}
|
}
|
||||||
|
|
||||||
if (betaZ)
|
auto cOffset = shape::getOffset(cShapeInfo, cCoords.data());
|
||||||
*y = val + betaZ * *y;
|
|
||||||
|
if(betaPersent)
|
||||||
|
C[cOffset] = alphaZ * val + betaZ * C[cOffset];
|
||||||
else
|
else
|
||||||
*y = val;
|
C[cOffset] = alphaZ * val;
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
samediff::Threads::parallel_for(func, 0, M);
|
samediff::Threads::parallel_tad(func, 0, cLen);
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
//////////////////////////////////////////////////////////////////////////////
|
||||||
|
// MXN x N = M -> actual sequence of {M,N} axes doesn't matter
|
||||||
|
template <typename T1, typename T2, typename T3>
|
||||||
|
static void usualGemv(const NDArray* vA, const NDArray* vX, NDArray* vY, const int incx, const int incy, const int aMaxis, const double alpha, const double beta) {
|
||||||
|
|
||||||
|
const T1* A = vA->bufferAsT<T1>();
|
||||||
|
const T2* X = vX->bufferAsT<T2>();
|
||||||
|
T3* Y = vY->bufferAsT<T3>();
|
||||||
|
|
||||||
|
const T3 alphaZ = alpha;
|
||||||
|
const T3 betaZ = beta;
|
||||||
|
|
||||||
|
const bool betaPersent = beta;
|
||||||
|
|
||||||
|
const Nd4jLong* aShapeInfo = vA->getShapeInfo();
|
||||||
|
const Nd4jLong* xShapeInfo = vX->getShapeInfo();
|
||||||
|
const Nd4jLong* yShapeInfo = vY->getShapeInfo();
|
||||||
|
|
||||||
|
const int N = vX->lengthOf();
|
||||||
|
const int M = vY->lengthOf();
|
||||||
|
|
||||||
|
const auto aMstride = vA->strideAt(aMaxis);
|
||||||
|
const auto aNstride = vA->strideAt(aMaxis == 0 ? 1 : 0);
|
||||||
|
|
||||||
|
auto func = PRAGMA_THREADS_FOR {
|
||||||
|
|
||||||
|
for (auto i = start; i < stop; ++i) {
|
||||||
|
|
||||||
|
// evaluate offsets
|
||||||
|
auto aOffset = i * aMstride;
|
||||||
|
auto xOffset = 0;
|
||||||
|
|
||||||
|
T3 val = A[aOffset] * X[xOffset]; // first iteration
|
||||||
|
|
||||||
|
for (uint j = 1; j < N; ++j) { // rest iterations
|
||||||
|
aOffset += aNstride;
|
||||||
|
xOffset += incx;
|
||||||
|
val = val + A[aOffset] * X[xOffset];
|
||||||
|
}
|
||||||
|
|
||||||
|
auto yOffset = i * incy;
|
||||||
|
|
||||||
|
if(betaPersent)
|
||||||
|
Y[yOffset] = alphaZ * val + betaZ * Y[yOffset];
|
||||||
|
else
|
||||||
|
Y[yOffset] = alphaZ * val;
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
samediff::Threads::parallel_tad(func, 0, M);
|
||||||
}
|
}
|
||||||
|
|
||||||
//////////////////////////////////////////////////////////////////////////////
|
//////////////////////////////////////////////////////////////////////////////
|
||||||
|
@ -144,12 +159,17 @@ static void usualDot(const Nd4jLong length, const double alpha, const void* vX,
|
||||||
T3* Z = reinterpret_cast<T3*>(vZ);
|
T3* Z = reinterpret_cast<T3*>(vZ);
|
||||||
T3 alphaZ(alpha), betaZ(beta);
|
T3 alphaZ(alpha), betaZ(beta);
|
||||||
|
|
||||||
|
const bool betaPersent = beta;
|
||||||
|
|
||||||
T3 sum = 0;
|
T3 sum = 0;
|
||||||
PRAGMA_OMP_PARALLEL_FOR_ARGS(OMP_IF(length > Environment::getInstance()->elementwiseThreshold()) schedule(guided) reduction(OMP_SUMT:sum))
|
PRAGMA_OMP_PARALLEL_FOR_ARGS(OMP_IF(length > Environment::getInstance()->elementwiseThreshold()) schedule(guided) reduction(OMP_SUMT:sum))
|
||||||
for(int i = 0; i < length; ++i)
|
for(int i = 0; i < length; ++i)
|
||||||
sum += X[i * incx] * Y[i * incy];
|
sum += X[i * incx] * Y[i * incy];
|
||||||
|
|
||||||
*Z = alphaZ * sum + betaZ * *Z;
|
if(betaPersent)
|
||||||
|
*Z = alphaZ * sum + betaZ * *Z;
|
||||||
|
else
|
||||||
|
*Z = alphaZ * sum;
|
||||||
}
|
}
|
||||||
|
|
||||||
//////////////////////////////////////////////////////////////////////////////
|
//////////////////////////////////////////////////////////////////////////////
|
||||||
|
@ -164,16 +184,15 @@ NDArray* MmulHelper::mmulMxM(const NDArray* A, const NDArray* B, NDArray* C, con
|
||||||
if(A->rankOf() != 2)
|
if(A->rankOf() != 2)
|
||||||
throw std::runtime_error("MmulHelper::mmulMxM: rank of A array is not equal 2 !");
|
throw std::runtime_error("MmulHelper::mmulMxM: rank of A array is not equal 2 !");
|
||||||
if(B->rankOf() != 2)
|
if(B->rankOf() != 2)
|
||||||
throw std::runtime_error("MmulHelper::mmulMxM: rank of B array is not equal 2 !");
|
throw std::runtime_error("MmulHelper::mmulMxM: rank of B array is not equal 2 !");
|
||||||
|
|
||||||
const auto M = A->sizeAt(0);
|
const auto M = A->sizeAt(0);
|
||||||
const auto K = A->sizeAt(1);
|
const auto K = A->sizeAt(1);
|
||||||
const auto N = B->sizeAt(1);
|
const auto N = B->sizeAt(1);
|
||||||
const auto bRows = B->sizeAt(0);
|
|
||||||
|
|
||||||
if(C != nullptr && C->rankOf() != 2)
|
if(C != nullptr && C->rankOf() != 2)
|
||||||
throw std::runtime_error("MmulHelper::mmulMxM: rank of C array is not equal 2 !");
|
throw std::runtime_error("MmulHelper::mmulMxM: rank of C array is not equal 2 !");
|
||||||
if(bRows != K)
|
if(B->sizeAt(0) != K)
|
||||||
throw std::runtime_error("MmulHelper::mmulMxM: B array has wrong number of rows !");
|
throw std::runtime_error("MmulHelper::mmulMxM: B array has wrong number of rows !");
|
||||||
if(C != nullptr && C->sizeAt(0) != M)
|
if(C != nullptr && C->sizeAt(0) != M)
|
||||||
throw std::runtime_error("MmulHelper::mmulMxM: C array has wrong number of rows !");
|
throw std::runtime_error("MmulHelper::mmulMxM: C array has wrong number of rows !");
|
||||||
|
@ -181,61 +200,79 @@ NDArray* MmulHelper::mmulMxM(const NDArray* A, const NDArray* B, NDArray* C, con
|
||||||
throw std::runtime_error("MmulHelper::mmulMxM: C array has wrong number of columns !");
|
throw std::runtime_error("MmulHelper::mmulMxM: C array has wrong number of columns !");
|
||||||
|
|
||||||
if(C == nullptr)
|
if(C == nullptr)
|
||||||
C = new NDArray(outOrder, {M,N}, DataTypeUtils::pickPairwiseResultType(A->dataType(), B->dataType()), A->getContext());
|
C = new NDArray(outOrder, {M,N}, DataTypeUtils::pickPairwiseResultType(A->dataType(), B->dataType()), A->getContext());
|
||||||
|
|
||||||
NDArray *pA(const_cast<NDArray*>(A)), *pB(const_cast<NDArray*>(B)), *pC(const_cast<NDArray*>(C));
|
const auto aType = A->dataType();
|
||||||
|
const auto bType = B->dataType();
|
||||||
const auto cOrder = C->ordering();
|
const auto cType = C->dataType();
|
||||||
|
|
||||||
if(A->ews() != 1)
|
|
||||||
pA = pA->dup(cOrder);
|
|
||||||
if(B->ews() != 1)
|
|
||||||
pB = pB->dup(cOrder);
|
|
||||||
if(C->ews() != 1)
|
|
||||||
pC = pC->dup(cOrder);
|
|
||||||
|
|
||||||
const auto aOrder = pA->ordering();
|
|
||||||
const auto bOrder = pB->ordering();
|
|
||||||
|
|
||||||
const bool transA = aOrder != cOrder;
|
|
||||||
const bool transB = bOrder != cOrder;
|
|
||||||
|
|
||||||
const CBLAS_ORDER blasOrder = cOrder == 'f' ? CblasColMajor : CblasRowMajor;
|
|
||||||
const CBLAS_TRANSPOSE transAblas = transA ? CblasTrans : CblasNoTrans;
|
|
||||||
const CBLAS_TRANSPOSE transBblas = transB ? CblasTrans : CblasNoTrans;
|
|
||||||
|
|
||||||
const int lda = aOrder == 'f' ? M : K;
|
|
||||||
const int ldb = bOrder == 'f' ? K : N;
|
|
||||||
const int ldc = cOrder == 'f' ? M : N;
|
|
||||||
|
|
||||||
const auto aType = pA->dataType();
|
|
||||||
const auto bType = pB->dataType();
|
|
||||||
const auto cType = pC->dataType();
|
|
||||||
|
|
||||||
const bool AB(aType == bType), AC(aType == cType), ABC(AB && AC);
|
const bool AB(aType == bType), AC(aType == cType), ABC(AB && AC);
|
||||||
const bool hasGemm = BlasHelper::getInstance()->hasGEMM(aType);
|
const bool hasGemm = BlasHelper::getInstance()->hasGEMM(aType);
|
||||||
|
|
||||||
// we'll use platform-specific gemm here eventually. maybe tomorrow.
|
const bool typeDouble = hasGemm && ABC && aType == DataType::DOUBLE;
|
||||||
// TODO: put proper _gemm here
|
const bool typeFloat = hasGemm && ABC && aType == DataType::FLOAT32;
|
||||||
if (ABC && hasGemm && aType == DataType::FLOAT32) {
|
|
||||||
BlasHelper::getInstance()->sgemm()(blasOrder, transAblas, transBblas, M, N, K, (float) alpha, reinterpret_cast<float *>(pA->getBuffer()), lda, reinterpret_cast<float *>(pB->getBuffer()), ldb, (float) beta, reinterpret_cast<float *>(pC->getBuffer()), ldc);
|
if(!typeFloat && !typeDouble) {
|
||||||
}
|
BUILD_SINGLE_SELECTOR_THRICE(aType, usualGemm, (A, B, C, 0, 1, 0, 1, 0, 1, alpha, beta), NUMERIC_TYPES);
|
||||||
else if (ABC && hasGemm && aType == DataType::DOUBLE) {
|
// BUILD_TRIPLE_SELECTOR(aType, bType, cType, usualGemm, (A, B, C, 0, 1, 0, 1, 0, 1, alpha, beta), LIBND4J_TYPES, FLOAT_TYPES, FLOAT_TYPES);
|
||||||
BlasHelper::getInstance()->dgemm()(blasOrder, transAblas, transBblas, M, N, K, (double) alpha, reinterpret_cast<double *>(pA->getBuffer()), lda, reinterpret_cast<double *>(pB->getBuffer()), ldb, (double) beta, reinterpret_cast<double *>(pC->getBuffer()), ldc);
|
|
||||||
}
|
}
|
||||||
else {
|
else {
|
||||||
BUILD_SINGLE_SELECTOR_THRICE(aType, usualGemm, (cOrder, transA, transB, M, N, K, alpha, pA->getBuffer(), lda, pB->getBuffer(), ldb, beta, pC->getBuffer(), ldc), NUMERIC_TYPES);
|
|
||||||
//BUILD_TRIPLE_SELECTOR(aType, bType, cType, usualGemm, (cOrder, transA, transB, M, N, K, alpha, pA->getBuffer(), lda, pB->getBuffer(), ldb, beta, pC->getBuffer(), ldc), LIBND4J_TYPES, FLOAT_TYPES, FLOAT_TYPES);
|
|
||||||
}
|
|
||||||
|
|
||||||
if(pC != C) {
|
std::vector<NDArray*> toDelete;
|
||||||
C->assign(pC);
|
|
||||||
delete pC;
|
NDArray *pA(const_cast<NDArray*>(A)), *pB(const_cast<NDArray*>(B)), *pC(const_cast<NDArray*>(C));
|
||||||
|
|
||||||
|
bool aMcont = M == 1 || A->strideAt(0) == 1;
|
||||||
|
bool aKcont = K == 1 || A->strideAt(1) == 1;
|
||||||
|
bool bKcont = K == 1 || B->strideAt(0) == 1;
|
||||||
|
bool bNcont = N == 1 || B->strideAt(1) == 1;
|
||||||
|
bool cMcont = M == 1 || C->strideAt(0) == 1;
|
||||||
|
bool cNcont = N == 1 || C->strideAt(1) == 1;
|
||||||
|
|
||||||
|
if(!aMcont && !aKcont) {
|
||||||
|
pA = A->dup('f');
|
||||||
|
toDelete.push_back(pA);
|
||||||
|
aMcont = true;
|
||||||
|
}
|
||||||
|
if(!bKcont && !bNcont) {
|
||||||
|
pB = B->dup('f');
|
||||||
|
toDelete.push_back(pB);
|
||||||
|
bKcont = true;
|
||||||
|
}
|
||||||
|
if(!cMcont && !cNcont) {
|
||||||
|
pC = C->dup('f');
|
||||||
|
toDelete.push_back(pC);
|
||||||
|
cMcont = true;
|
||||||
|
}
|
||||||
|
|
||||||
|
const CBLAS_ORDER blasOrder = cMcont ? CblasColMajor : CblasRowMajor;
|
||||||
|
|
||||||
|
const bool transA = (!aMcont && cMcont) || (aMcont && !cMcont);
|
||||||
|
const bool transB = (!bKcont && cMcont) || (bKcont && !cMcont);
|
||||||
|
|
||||||
|
const CBLAS_TRANSPOSE transAblas = transA ? CblasTrans : CblasNoTrans;
|
||||||
|
const CBLAS_TRANSPOSE transBblas = transB ? CblasTrans : CblasNoTrans;
|
||||||
|
|
||||||
|
const int lda = (aMcont && aKcont) ? M : !aMcont ? pA->strideAt(0) : pA->strideAt(1);
|
||||||
|
const int ldb = (bKcont && bNcont) ? K : !bKcont ? pB->strideAt(0) : pB->strideAt(1);
|
||||||
|
const int ldc = (cMcont && cNcont) ? M : !cMcont ? pC->strideAt(0) : pC->strideAt(1);
|
||||||
|
|
||||||
|
if(typeFloat) {
|
||||||
|
BlasHelper::getInstance()->sgemm()(blasOrder, transAblas, transBblas, M, N, K, (float) alpha, reinterpret_cast<float *>(pA->getBuffer()), lda, reinterpret_cast<float *>(pB->getBuffer()), ldb, (float) beta, reinterpret_cast<float *>(pC->getBuffer()), ldc);
|
||||||
|
}
|
||||||
|
else if(typeDouble) {
|
||||||
|
BlasHelper::getInstance()->dgemm()(blasOrder, transAblas, transBblas, M, N, K, (double) alpha, reinterpret_cast<double *>(pA->getBuffer()), lda, reinterpret_cast<double *>(pB->getBuffer()), ldb, (double) beta, reinterpret_cast<double *>(pC->getBuffer()), ldc);
|
||||||
|
}
|
||||||
|
|
||||||
|
if(pC != C) {
|
||||||
|
C->assign(pC);
|
||||||
|
delete pC;
|
||||||
|
}
|
||||||
|
if(pA != A)
|
||||||
|
delete pA;
|
||||||
|
if(pB != B)
|
||||||
|
delete pB;
|
||||||
}
|
}
|
||||||
if(pA != A)
|
|
||||||
delete pA;
|
|
||||||
if(pB != B)
|
|
||||||
delete pB;
|
|
||||||
|
|
||||||
return C;
|
return C;
|
||||||
}
|
}
|
||||||
|
@ -243,6 +280,7 @@ NDArray* MmulHelper::mmulMxM(const NDArray* A, const NDArray* B, NDArray* C, con
|
||||||
////////////////////////////////////////////////////////////////////////////
|
////////////////////////////////////////////////////////////////////////////
|
||||||
// MXN x N = M
|
// MXN x N = M
|
||||||
NDArray* MmulHelper::mmulMxV(const NDArray* A, const NDArray* X, nd4j::NDArray* Y, const double alpha, const double beta, const char outOrder) {
|
NDArray* MmulHelper::mmulMxV(const NDArray* A, const NDArray* X, nd4j::NDArray* Y, const double alpha, const double beta, const char outOrder) {
|
||||||
|
|
||||||
if (X->dataType() != A->dataType())
|
if (X->dataType() != A->dataType())
|
||||||
throw datatype_exception::build("mmulMxV expects all data types to be the same", A->dataType(), X->dataType());
|
throw datatype_exception::build("mmulMxV expects all data types to be the same", A->dataType(), X->dataType());
|
||||||
|
|
||||||
|
@ -254,56 +292,65 @@ NDArray* MmulHelper::mmulMxV(const NDArray* A, const NDArray* X, nd4j::NDArray*
|
||||||
if(A->rankOf() != 2)
|
if(A->rankOf() != 2)
|
||||||
throw std::runtime_error("MmulHelper::mmulMxV: rank of A array is not equal 2 !");
|
throw std::runtime_error("MmulHelper::mmulMxV: rank of A array is not equal 2 !");
|
||||||
if(!shape::isCommonVector(X->getShapeInfo(), xLenDim))
|
if(!shape::isCommonVector(X->getShapeInfo(), xLenDim))
|
||||||
throw std::runtime_error("MmulHelper::mmulMxV: X array must be vector !");
|
throw std::runtime_error("MmulHelper::mmulMxV: X array must be vector !");
|
||||||
|
|
||||||
const auto M = A->sizeAt(0);
|
const auto M = A->sizeAt(0);
|
||||||
const auto N = A->sizeAt(1);
|
const auto N = A->sizeAt(1);
|
||||||
|
|
||||||
if(Y != nullptr && !shape::isCommonVector(Y->getShapeInfo(), yLenDim))
|
if(Y != nullptr && !shape::isCommonVector(Y->getShapeInfo(), yLenDim))
|
||||||
throw std::runtime_error("MmulHelper::mmulMxV: Y array must be vector !");
|
throw std::runtime_error("MmulHelper::mmulMxV: Y array must be vector !");
|
||||||
if(X->lengthOf() != N)
|
if(X->lengthOf() != N)
|
||||||
throw std::runtime_error("MmulHelper::mmulMxV: X vector has wrong length !");
|
throw std::runtime_error("MmulHelper::mmulMxV: X vector has wrong length !");
|
||||||
if(Y != nullptr && Y->lengthOf() != M)
|
if(Y != nullptr && Y->lengthOf() != M)
|
||||||
throw std::runtime_error("MmulHelper::mmulMxV: Y array has wrong length !");
|
throw std::runtime_error("MmulHelper::mmulMxV: Y array has wrong length !");
|
||||||
|
|
||||||
if(Y == nullptr)
|
if(Y == nullptr)
|
||||||
Y = new NDArray(outOrder, {M}, DataTypeUtils::pickPairwiseResultType(A->dataType(), X->dataType()), A->getContext());
|
Y = new NDArray(outOrder, {M}, DataTypeUtils::pickPairwiseResultType(A->dataType(), X->dataType()), A->getContext());
|
||||||
|
|
||||||
NDArray *pA(const_cast<NDArray*>(A));
|
|
||||||
|
|
||||||
if(A->ews() != 1)
|
|
||||||
pA = pA->dup();
|
|
||||||
|
|
||||||
CBLAS_ORDER blasOrder;
|
|
||||||
int lda;
|
|
||||||
if (pA->ordering() == 'f') {blasOrder = CblasColMajor; lda = M; }
|
|
||||||
else {blasOrder = CblasRowMajor; lda = N; }
|
|
||||||
|
|
||||||
const int incx = X->stridesOf()[xLenDim];
|
const int incx = X->stridesOf()[xLenDim];
|
||||||
const int incy = Y->stridesOf()[yLenDim];
|
const int incy = Y->stridesOf()[yLenDim];
|
||||||
|
|
||||||
const auto aType = pA->dataType();
|
const auto aType = A->dataType();
|
||||||
const auto xType = X->dataType();
|
const auto xType = X->dataType();
|
||||||
const auto yType = Y->dataType();
|
const auto yType = Y->dataType();
|
||||||
|
|
||||||
const bool AX(aType == xType), AY(aType == yType), AXY(AX && AY);
|
const bool AX(aType == xType), AY(aType == yType), AXY(AX && AY);
|
||||||
const bool hasGemv = BlasHelper::getInstance()->hasGEMV(aType);
|
const bool hasGemv = BlasHelper::getInstance()->hasGEMV(aType);
|
||||||
|
|
||||||
// choose appropriate cuda gemm api depending on data types
|
const bool typeDouble = hasGemv && AXY && aType == DataType::DOUBLE;
|
||||||
if(AXY && hasGemv && aType == DataType::DOUBLE) {
|
const bool typeFloat = hasGemv && AXY && aType == DataType::FLOAT32;
|
||||||
BlasHelper::getInstance()->dgemv()(blasOrder, CblasNoTrans, M, N, alpha, (double*)pA->getBuffer(), lda, (double*)X->getBuffer(), incx, beta, (double*)Y->getBuffer(), incy);
|
|
||||||
}
|
if(!typeDouble && !typeFloat) {
|
||||||
else if(AXY && hasGemv && aType == DataType::FLOAT32) {
|
BUILD_SINGLE_SELECTOR_THRICE(aType, usualGemv, (A, X, Y, incx, incy, 0, alpha, beta), NUMERIC_TYPES);
|
||||||
BlasHelper::getInstance()->sgemv()(blasOrder, CblasNoTrans, M, N, (float)alpha, (float*)pA->getBuffer(), lda, (float*)X->getBuffer(), incx, (float)beta, (float*)Y->getBuffer(), incy);
|
// BUILD_TRIPLE_SELECTOR(aType, xType, yType, usualGemv, (A, X, Y, incx, incy, 0, alpha, beta), LIBND4J_TYPES, FLOAT_TYPES, FLOAT_TYPES);
|
||||||
}
|
}
|
||||||
else {
|
else {
|
||||||
BUILD_SINGLE_SELECTOR_THRICE(aType, usualGemv, (pA->ordering(), M, N, alpha, pA->getBuffer(), lda, X->getBuffer(), incx, beta, Y->getBuffer(), incy), NUMERIC_TYPES);
|
|
||||||
//BUILD_TRIPLE_SELECTOR(aType, xType, yType, usualGemv, (pA->ordering(), M, N, alpha, pA->getBuffer(), lda, X->getBuffer(), incx, beta, Y->getBuffer(), incy), LIBND4J_TYPES, FLOAT_TYPES, FLOAT_TYPES);
|
NDArray *pA(const_cast<NDArray*>(A));
|
||||||
|
|
||||||
|
bool aMcont = M == 1 || A->strideAt(0) == 1;
|
||||||
|
bool aNcont = N == 1 || A->strideAt(1) == 1;
|
||||||
|
|
||||||
|
if(!aMcont && !aNcont) {
|
||||||
|
pA = A->dup('f');
|
||||||
|
aMcont = true;
|
||||||
|
}
|
||||||
|
const CBLAS_ORDER blasOrder = aMcont ? CblasColMajor : CblasRowMajor;
|
||||||
|
|
||||||
|
const int lda = (aMcont && aNcont) ? M : !aMcont ? pA->strideAt(0) : pA->strideAt(1);
|
||||||
|
|
||||||
|
// choose appropriate cuda gemm api depending on data types
|
||||||
|
if(typeDouble) {
|
||||||
|
BlasHelper::getInstance()->dgemv()(blasOrder, CblasNoTrans, M, N, alpha, (double*)pA->getBuffer(), lda, (double*)X->getBuffer(), incx, beta, (double*)Y->getBuffer(), incy);
|
||||||
|
}
|
||||||
|
else if(typeFloat) {
|
||||||
|
BlasHelper::getInstance()->sgemv()(blasOrder, CblasNoTrans, M, N, (float)alpha, (float*)pA->getBuffer(), lda, (float*)X->getBuffer(), incx, (float)beta, (float*)Y->getBuffer(), incy);
|
||||||
|
}
|
||||||
|
|
||||||
|
if(pA != A)
|
||||||
|
delete pA;
|
||||||
}
|
}
|
||||||
|
|
||||||
if(pA != A)
|
|
||||||
delete pA;
|
|
||||||
|
|
||||||
return Y;
|
return Y;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -330,22 +377,327 @@ NDArray* MmulHelper::dot(const NDArray* X, const NDArray* Y, nd4j::NDArray* Z, c
|
||||||
if(Y->lengthOf() != length)
|
if(Y->lengthOf() != length)
|
||||||
throw std::runtime_error("MmulHelper::dot cuda: lengths of input vectors are different !");
|
throw std::runtime_error("MmulHelper::dot cuda: lengths of input vectors are different !");
|
||||||
|
|
||||||
if(Z == nullptr)
|
if(Z == nullptr)
|
||||||
Z = new NDArray(DataTypeUtils::pickPairwiseResultType(X->dataType(), Y->dataType()), X->getContext());
|
Z = new NDArray(DataTypeUtils::pickPairwiseResultType(X->dataType(), Y->dataType()), X->getContext());
|
||||||
|
|
||||||
const Nd4jLong incx = X->stridesOf()[xLenDim];
|
const Nd4jLong incx = X->stridesOf()[xLenDim];
|
||||||
const Nd4jLong incy = Y->stridesOf()[yLenDim];
|
const Nd4jLong incy = Y->stridesOf()[yLenDim];
|
||||||
|
|
||||||
const auto xType = X->dataType();
|
const auto xType = X->dataType();
|
||||||
const auto yType = Y->dataType();
|
const auto yType = Y->dataType();
|
||||||
const auto zType = Z->dataType();
|
const auto zType = Z->dataType();
|
||||||
|
|
||||||
BUILD_SINGLE_SELECTOR_THRICE(xType, usualDot, (length, alpha, X->getBuffer(), incx, Y->getBuffer(), incy, beta, Z->getBuffer()), NUMERIC_TYPES);
|
BUILD_SINGLE_SELECTOR_THRICE(xType, usualDot, (length, alpha, X->getBuffer(), incx, Y->getBuffer(), incy, beta, Z->getBuffer()), NUMERIC_TYPES);
|
||||||
//BUILD_TRIPLE_SELECTOR(xType, yType, zType, usualDot, (length, alpha, X->getBuffer(), incx, Y->getBuffer(), incy, beta, Z->getBuffer()), LIBND4J_TYPES, FLOAT_TYPES, FLOAT_TYPES);
|
//BUILD_TRIPLE_SELECTOR(xType, yType, zType, usualDot, (length, alpha, X->getBuffer(), incx, Y->getBuffer(), incy, beta, Z->getBuffer()), LIBND4J_TYPES, FLOAT_TYPES, FLOAT_TYPES);
|
||||||
|
|
||||||
return Z;
|
return Z;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
//////////////////////////////////////////////////////////////////////////////
|
||||||
|
// [bS,M,K] x [bS,K,N] = [bS,M,N]
|
||||||
|
// [bS,M,K] x [K,N] = [bS,M,N]
|
||||||
|
// [M,K] x [bS,K,N] = [bS,M,N]
|
||||||
|
// bS could stand for several axes
|
||||||
|
template <typename T1, typename T2, typename T3>
|
||||||
|
static void batchedGemm(const NDArray* vA, const NDArray* vB, NDArray* vC,
|
||||||
|
const int* aBatchDims, const int* bBatchDims, const int* cBatchDims,
|
||||||
|
const int aMaxis, const int aKaxis, const int bKaxis, const int bNaxis, const int cMaxis, const int cNaxis,
|
||||||
|
const double alpha, const double beta) {
|
||||||
|
|
||||||
|
const T1* A = vA->bufferAsT<T1>();
|
||||||
|
const T2* B = vB->bufferAsT<T2>();
|
||||||
|
T3* C = vC->bufferAsT<T3>();
|
||||||
|
|
||||||
|
const T3 alphaZ = alpha;
|
||||||
|
const T3 betaZ = beta;
|
||||||
|
|
||||||
|
const bool betaPersent = beta;
|
||||||
|
|
||||||
|
const Nd4jLong* aShapeInfo = vA->getShapeInfo();
|
||||||
|
const Nd4jLong* bShapeInfo = vB->getShapeInfo();
|
||||||
|
const Nd4jLong* cShapeInfo = vC->getShapeInfo();
|
||||||
|
|
||||||
|
const int aRank = vA->rankOf();
|
||||||
|
const int bRank = vB->rankOf();
|
||||||
|
const int cRank = vC->rankOf();
|
||||||
|
|
||||||
|
const Nd4jLong cLen = vC->lengthOf();
|
||||||
|
|
||||||
|
const int K = vA->sizeAt(aKaxis);
|
||||||
|
|
||||||
|
auto func = PRAGMA_THREADS_FOR {
|
||||||
|
|
||||||
|
std::vector<Nd4jLong> aCoords(aRank), bCoords(bRank), cCoords(cRank);
|
||||||
|
|
||||||
|
for (auto i = start; i < stop; ++i) {
|
||||||
|
|
||||||
|
// evaluate C coordinates
|
||||||
|
shape::index2coords(i, cShapeInfo, cCoords.data());
|
||||||
|
|
||||||
|
// calculate index of current batch
|
||||||
|
Nd4jLong batchInd;
|
||||||
|
if(cRank > 2)
|
||||||
|
batchInd = shape::coords2index(cShapeInfo, cCoords.data(), cRank - 2, cBatchDims);
|
||||||
|
|
||||||
|
// evaluate A coordinates
|
||||||
|
if(aRank > 2)
|
||||||
|
shape::index2coords(batchInd, aShapeInfo, aCoords.data(), aRank - 2, aBatchDims);
|
||||||
|
aCoords[aMaxis] = cCoords[cMaxis];
|
||||||
|
aCoords[aKaxis] = 0;
|
||||||
|
|
||||||
|
// evaluate B coordinates
|
||||||
|
if(bRank > 2)
|
||||||
|
shape::index2coords(batchInd, bShapeInfo, bCoords.data(), bRank - 2, bBatchDims);
|
||||||
|
bCoords[bKaxis] = 0;
|
||||||
|
bCoords[bNaxis] = cCoords[cNaxis];
|
||||||
|
|
||||||
|
auto aOffset = shape::getOffset(aShapeInfo, aCoords.data());
|
||||||
|
auto bOffset = shape::getOffset(bShapeInfo, bCoords.data());
|
||||||
|
|
||||||
|
T3 val = A[aOffset] * B[bOffset]; // first iteration
|
||||||
|
|
||||||
|
for (uint j = 1; j < K; ++j) { // rest iterations
|
||||||
|
aOffset += shape::stride(aShapeInfo)[aKaxis];
|
||||||
|
bOffset += shape::stride(bShapeInfo)[bKaxis];
|
||||||
|
val = val + A[aOffset] * B[bOffset];
|
||||||
|
}
|
||||||
|
|
||||||
|
auto cOffset = shape::getOffset(cShapeInfo, cCoords.data());
|
||||||
|
|
||||||
|
if(betaPersent)
|
||||||
|
C[cOffset] = alphaZ * val + betaZ * C[cOffset];
|
||||||
|
else
|
||||||
|
C[cOffset] = alphaZ * val;
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
samediff::Threads::parallel_tad(func, 0, cLen);
|
||||||
|
}
|
||||||
|
|
||||||
|
//////////////////////////////////////////////////////////////////////////
|
||||||
|
// [bS,M,K] x [bS,K,N] = [bS,M,N]
|
||||||
|
// [bS,M,K] x [K,N] = [bS,M,N]
|
||||||
|
// [M,K] x [bS,K,N] = [bS,M,N]
|
||||||
|
// bS could stand for several axes
|
||||||
|
NDArray* MmulHelper::mmulNxN(const NDArray* A, const NDArray* B, NDArray* C, const double alpha, const double beta, const char outOrder) {
|
||||||
|
|
||||||
|
const int aRank = A->rankOf();
|
||||||
|
const int bRank = B->rankOf();
|
||||||
|
|
||||||
|
// input ranks validation
|
||||||
|
if(aRank > bRank && bRank != 2)
|
||||||
|
throw std::runtime_error("MmulHelper::mmulNxN: rank of B array should be equal 2 !");
|
||||||
|
else if(bRank > aRank && aRank != 2)
|
||||||
|
throw std::runtime_error("MmulHelper::mmulNxN: rank of A array should be equal 2 !");
|
||||||
|
else if (aRank == bRank ) {
|
||||||
|
for(int i = 0; i < aRank - 2; ++i)
|
||||||
|
if(A->sizeAt(i) != B->sizeAt(i))
|
||||||
|
throw std::runtime_error("MmulHelper::mmulNxN: shapes of A and B arrays are not suitable for matrix multiplication !");
|
||||||
|
}
|
||||||
|
|
||||||
|
if(A->sizeAt(-1) != B->sizeAt(-2))
|
||||||
|
throw std::runtime_error("MmulHelper::mmulNxN: shapes of A and B arrays are not suitable for matrix multiplication !");
|
||||||
|
|
||||||
|
// validation of C array
|
||||||
|
std::vector<Nd4jLong> cExpectedShape = aRank > bRank ? A->getShapeAsVector() : B->getShapeAsVector();
|
||||||
|
cExpectedShape[cExpectedShape.size() - 2] = A->sizeAt(-2);
|
||||||
|
cExpectedShape[cExpectedShape.size() - 1] = B->sizeAt(-1);
|
||||||
|
|
||||||
|
if(C != nullptr ) {
|
||||||
|
if(!C->isSameShape(cExpectedShape))
|
||||||
|
throw std::runtime_error("MmulHelper::mmulNxN: shape of C array is not suitable for AxB matrix multiplication !");
|
||||||
|
}
|
||||||
|
else {
|
||||||
|
C = new NDArray(outOrder, cExpectedShape, B->dataType());
|
||||||
|
}
|
||||||
|
|
||||||
|
const int cRank = C->rankOf();
|
||||||
|
|
||||||
|
const int aMaxis(aRank-2), aKaxis(aRank-1), bKaxis(bRank-2), bNaxis(bRank-1), cMaxis(cRank-2), cNaxis(cRank-1);
|
||||||
|
|
||||||
|
std::vector<int> aBatchDims, bBatchDims, cBatchDims;
|
||||||
|
|
||||||
|
if(aRank > 2)
|
||||||
|
aBatchDims = ShapeUtils::evalDimsToExclude(aRank, {aMaxis, aKaxis});
|
||||||
|
if(bRank > 2)
|
||||||
|
bBatchDims = ShapeUtils::evalDimsToExclude(bRank, {bKaxis, bNaxis});
|
||||||
|
if(cRank > 2)
|
||||||
|
cBatchDims = ShapeUtils::evalDimsToExclude(cRank, {cMaxis, cNaxis});
|
||||||
|
|
||||||
|
// BUILD_TRIPLE_SELECTOR(A->dataType(), B->dataType(), C->dataType(), batchedGemm, (A, B, C, aBatchDims.data(), bBatchDims.data(), cBatchDims.data(), aMaxis, aKaxis, bKaxis, bNaxis, cMaxis, cNaxis, alpha, beta), LIBND4J_TYPES, FLOAT_TYPES, FLOAT_TYPES);
|
||||||
|
BUILD_SINGLE_SELECTOR_THRICE(A->dataType(), batchedGemm, (A, B, C, aBatchDims.data(), bBatchDims.data(), cBatchDims.data(), aMaxis, aKaxis, bKaxis, bNaxis, cMaxis, cNaxis, alpha, beta), NUMERIC_TYPES);
|
||||||
|
|
||||||
|
return C;
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
//////////////////////////////////////////////////////////////////////////
|
||||||
|
NDArray* MmulHelper::mmulNxN(const NDArray* A, const NDArray* B, NDArray* C, const double alpha, const double beta, const char outOrder) {
|
||||||
|
|
||||||
|
const int aRank = A->rankOf();
|
||||||
|
const int bRank = B->rankOf();
|
||||||
|
|
||||||
|
// input ranks validation
|
||||||
|
if(aRank > bRank && bRank != 2)
|
||||||
|
throw std::runtime_error("MmulHelper::mmulNxN: rank of B array should be equal 2 !");
|
||||||
|
else if(bRank > aRank && aRank != 2)
|
||||||
|
throw std::runtime_error("MmulHelper::mmulNxN: rank of A array should be equal 2 !");
|
||||||
|
else if (aRank == bRank ) {
|
||||||
|
for(int i = 0; i < aRank - 2; ++i)
|
||||||
|
if(A->sizeAt(i) != B->sizeAt(i))
|
||||||
|
throw std::runtime_error("MmulHelper::mmulNxN: shapes of A and B arrays are not suitable for matrix multiplication !");
|
||||||
|
}
|
||||||
|
|
||||||
|
if(A->sizeAt(-1) != B->sizeAt(-2))
|
||||||
|
throw std::runtime_error("MmulHelper::mmulNxN: shapes of A and B arrays are not suitable for matrix multiplication !");
|
||||||
|
|
||||||
|
// validation of C array
|
||||||
|
std::vector<Nd4jLong> cExpectedShape = aRank > bRank ? A->getShapeAsVector() : B->getShapeAsVector();
|
||||||
|
cExpectedShape[cExpectedShape.size() - 2] = A->sizeAt(-2);
|
||||||
|
cExpectedShape[cExpectedShape.size() - 1] = B->sizeAt(-1);
|
||||||
|
|
||||||
|
if(C != nullptr ) {
|
||||||
|
if(!C->isSameShape(cExpectedShape))
|
||||||
|
throw std::runtime_error("MmulHelper::mmulNxN: shape of C array is not suitable for AxB matrix multiplication !");
|
||||||
|
}
|
||||||
|
else {
|
||||||
|
C = new NDArray(outOrder, cExpectedShape, B->dataType());
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
// multiplication
|
||||||
|
const std::vector<int> dimsToExclude = ShapeUtils::evalDimsToExclude(C->rankOf(), {-2, -1});
|
||||||
|
const Nd4jLong numOfSubArrs = ShapeUtils::getNumOfSubArrs(C->getShapeInfo(), dimsToExclude);
|
||||||
|
std::vector<Nd4jLong> idxRanges(2 * C->rankOf());
|
||||||
|
|
||||||
|
// #pragma omp parallel for schedule(guided) firstprivate(idxRanges)
|
||||||
|
for(Nd4jLong i = 0; i < numOfSubArrs; ++i) {
|
||||||
|
|
||||||
|
ShapeUtils::evalIdxRangesForSubArr(i, C->getShapeInfo(), dimsToExclude, idxRanges.data());
|
||||||
|
NDArray cSubArr = (*C)(idxRanges);
|
||||||
|
|
||||||
|
if(aRank > bRank) {
|
||||||
|
NDArray aSubArr = (*A)(idxRanges);
|
||||||
|
mmulMxM(&aSubArr, B, &cSubArr, 1., 0., outOrder);
|
||||||
|
}
|
||||||
|
else if(bRank > aRank) {
|
||||||
|
NDArray bSubArr = (*B)(idxRanges);
|
||||||
|
mmulMxM(A, &bSubArr, &cSubArr, 1., 0, outOrder);
|
||||||
|
}
|
||||||
|
else {
|
||||||
|
NDArray aSubArr = (*A)(idxRanges);
|
||||||
|
NDArray bSubArr = (*B)(idxRanges);
|
||||||
|
mmulMxM(&aSubArr, &bSubArr, &cSubArr, 1., 0., outOrder);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return C;
|
||||||
|
}
|
||||||
|
|
||||||
|
//////////////////////////////////////////////////////////////////////////////
|
||||||
|
// MXK x KxN = MxN
|
||||||
|
template <typename T1, typename T2, typename T3>
|
||||||
|
static void usualGemm(const char cOrder, const bool transA, const bool transB, const int M, const int N, const int K, const double alpha, const void* vA, const int lda, const void* vB, const int ldb, const double beta, void* vC, const int ldc) {
|
||||||
|
|
||||||
|
T1* A = reinterpret_cast<T1*>(const_cast<void*>(vA));
|
||||||
|
T2* B = reinterpret_cast<T2*>(const_cast<void*>(vB));
|
||||||
|
T3* C = reinterpret_cast<T3*>(vC);
|
||||||
|
T3 alphaZ(alpha), betaZ(beta);
|
||||||
|
|
||||||
|
const bool flagC = cOrder == 'f';
|
||||||
|
const bool flagA = (flagC && transA) || (!flagC && !transA);
|
||||||
|
const bool flagB = (flagC && transB) || (!flagC && !transB);
|
||||||
|
|
||||||
|
// PRAGMA_OMP_PARALLEL_FOR_ARGS(OMP_IF(M*N > Environment::getInstance()->elementwiseThreshold()) schedule(guided))
|
||||||
|
// for(uint row = 0; row < M; ++row) {
|
||||||
|
|
||||||
|
// T3* c = flagC ? (C + row) : (C + row * ldc);
|
||||||
|
|
||||||
|
// for(uint col = 0; col < N; ++col)
|
||||||
|
// c[flagC ? col * ldc : col] = 0;
|
||||||
|
|
||||||
|
// for(uint i = 0; i < K; ++i) {
|
||||||
|
|
||||||
|
// T3* b = flagB ? (B + i * ldb) : (B + i);
|
||||||
|
// T3* a = flagA ? (A + row * lda + i) : (A + row + i * lda);
|
||||||
|
|
||||||
|
// if(flagC) {
|
||||||
|
// for(uint col = 0; col < N; ++col) {
|
||||||
|
// if(betaZ)
|
||||||
|
// c[col * ldc] += a * b[flagB ? col : col * ldb] + betaZ * c[col * ldc];
|
||||||
|
// else
|
||||||
|
// c[col * ldc] += a * b[flagB ? col : col * ldb];
|
||||||
|
// }
|
||||||
|
// }
|
||||||
|
// else {
|
||||||
|
// for(uint col = 0; col < N; ++col) {
|
||||||
|
// if(betaZ)
|
||||||
|
// c[col] += a * b[flagB ? col : col * ldb] + betaZ * c[col];
|
||||||
|
// else
|
||||||
|
// c[col] += a * b[flagB ? col : col * ldb];
|
||||||
|
// }
|
||||||
|
// }
|
||||||
|
// }
|
||||||
|
// }
|
||||||
|
|
||||||
|
auto func = PRAGMA_THREADS_FOR_2D { ;
|
||||||
|
for (auto row = start_x; row < stop_x; row += inc_x) {
|
||||||
|
for (auto col = start_y; col < stop_y; col += inc_y) {
|
||||||
|
T3 *c = flagC ? (C + row + col * ldc) : (C + row * ldc + col);
|
||||||
|
T3 val = 0;
|
||||||
|
|
||||||
|
for (uint i = 0; i < K; ++i) {
|
||||||
|
T3 a = flagA ? *(A + row * lda + i) : *(A + row + i * lda);
|
||||||
|
T3 b = flagB ? *(B + col + i * ldb) : *(B + col * ldb + i);
|
||||||
|
val += alphaZ * a * b;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (betaZ)
|
||||||
|
*c = val + betaZ * *c;
|
||||||
|
else
|
||||||
|
*c = val;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
samediff::Threads::parallel_tad(func, 0, M, 1, 0, N, 1);
|
||||||
|
}
|
||||||
|
|
||||||
|
//////////////////////////////////////////////////////////////////////////////
|
||||||
|
// MXN x N = M
|
||||||
|
template <typename T1, typename T2, typename T3>
|
||||||
|
static void usualGemv(const char aOrder, const int M, const int N, const double alpha, const void* vA, const int lda, const void* vX, const int incx, const double beta, void* vY, const int incy) {
|
||||||
|
|
||||||
|
T1* A = reinterpret_cast<T1*>(const_cast<void*>(vA));
|
||||||
|
T2* X = reinterpret_cast<T2*>(const_cast<void*>(vX));
|
||||||
|
T3* Y = reinterpret_cast<T3*>(vY);
|
||||||
|
T3 alphaZ(alpha), betaZ(beta);
|
||||||
|
|
||||||
|
const bool flagA = aOrder == 'f';
|
||||||
|
|
||||||
|
auto func = PRAGMA_THREADS_FOR {
|
||||||
|
for (auto row = start; row < stop; row += increment) {
|
||||||
|
|
||||||
|
T3 *y = Y + row * incy;
|
||||||
|
T3 val = 0;
|
||||||
|
|
||||||
|
for (int i = 0; i < N; ++i) {
|
||||||
|
T3 a = flagA ? *(A + row + i * lda) : *(A + row * lda + i);
|
||||||
|
T3 x = *(X + i * incx);
|
||||||
|
val += alphaZ * a * x;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (betaZ)
|
||||||
|
*y = val + betaZ * *y;
|
||||||
|
else
|
||||||
|
*y = val;
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
samediff::Threads::parallel_tad(func, 0, M);
|
||||||
|
}
|
||||||
|
*/
|
||||||
|
|
||||||
//BUILD_TRIPLE_TEMPLATE(template void usualGemm, (const char cOrder, const bool transA, const bool transB, const int M, const int N, const int K, const double alpha, const void* A, const int lda, const void* B, const int ldb, const double beta, void* C, const int ldc), LIBND4J_TYPES, FLOAT_TYPES, FLOAT_TYPES);
|
//BUILD_TRIPLE_TEMPLATE(template void usualGemm, (const char cOrder, const bool transA, const bool transB, const int M, const int N, const int K, const double alpha, const void* A, const int lda, const void* B, const int ldb, const double beta, void* C, const int ldc), LIBND4J_TYPES, FLOAT_TYPES, FLOAT_TYPES);
|
||||||
//BUILD_TRIPLE_TEMPLATE(template void usualGemv, (const char aOrder, const int M, const int N, const double alpha, const void* A, const int lda, const void* B, const int incx, const double beta, void* C, const int incy), LIBND4J_TYPES, FLOAT_TYPES, FLOAT_TYPES);
|
//BUILD_TRIPLE_TEMPLATE(template void usualGemv, (const char aOrder, const int M, const int N, const double alpha, const void* A, const int lda, const void* B, const int incx, const double beta, void* C, const int incy), LIBND4J_TYPES, FLOAT_TYPES, FLOAT_TYPES);
|
||||||
//BUILD_TRIPLE_TEMPLATE(template void usualDot, (const Nd4jLong length, const double alpha, const void* vX, const Nd4jLong incx, const void* vY, const Nd4jLong incy, const double beta, void* vZ), LIBND4J_TYPES, FLOAT_TYPES, FLOAT_TYPES);
|
//BUILD_TRIPLE_TEMPLATE(template void usualDot, (const Nd4jLong length, const double alpha, const void* vX, const Nd4jLong incx, const void* vY, const Nd4jLong incy, const double beta, void* vZ), LIBND4J_TYPES, FLOAT_TYPES, FLOAT_TYPES);
|
||||||
|
|
File diff suppressed because it is too large
Load Diff
|
@ -184,69 +184,6 @@ NDArray* nd4j::MmulHelper::tensorDot(const nd4j::NDArray* a, const nd4j::NDArray
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
|
||||||
//////////////////////////////////////////////////////////////////////////
|
|
||||||
NDArray* MmulHelper::mmulNxN(const NDArray* A, const NDArray* B, NDArray* C, const double alpha, const double beta, const char outOrder) {
|
|
||||||
|
|
||||||
const int aRank = A->rankOf();
|
|
||||||
const int bRank = B->rankOf();
|
|
||||||
|
|
||||||
// input ranks validation
|
|
||||||
if(aRank > bRank && bRank != 2)
|
|
||||||
throw std::runtime_error("MmulHelper::mmulNxN: rank of B array should be equal 2 !");
|
|
||||||
else if(bRank > aRank && aRank != 2)
|
|
||||||
throw std::runtime_error("MmulHelper::mmulNxN: rank of A array should be equal 2 !");
|
|
||||||
else if (aRank == bRank ) {
|
|
||||||
for(int i = 0; i < aRank - 2; ++i)
|
|
||||||
if(A->sizeAt(i) != B->sizeAt(i))
|
|
||||||
throw std::runtime_error("MmulHelper::mmulNxN: shapes of A and B arrays are not suitable for matrix multiplication !");
|
|
||||||
}
|
|
||||||
|
|
||||||
if(A->sizeAt(-1) != B->sizeAt(-2))
|
|
||||||
throw std::runtime_error("MmulHelper::mmulNxN: shapes of A and B arrays are not suitable for matrix multiplication !");
|
|
||||||
|
|
||||||
// validation of C array
|
|
||||||
std::vector<Nd4jLong> cExpectedShape = aRank > bRank ? A->getShapeAsVector() : B->getShapeAsVector();
|
|
||||||
cExpectedShape[cExpectedShape.size() - 2] = A->sizeAt(-2);
|
|
||||||
cExpectedShape[cExpectedShape.size() - 1] = B->sizeAt(-1);
|
|
||||||
|
|
||||||
if(C != nullptr ) {
|
|
||||||
if(!C->isSameShape(cExpectedShape))
|
|
||||||
throw std::runtime_error("MmulHelper::mmulNxN: shape of C array is not suitable for AxB matrix multiplication !");
|
|
||||||
}
|
|
||||||
else {
|
|
||||||
C = new NDArray(outOrder, cExpectedShape, B->dataType());
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
// multiplication
|
|
||||||
const std::vector<int> dimsToExclude = ShapeUtils::evalDimsToExclude(C->rankOf(), {-2, -1});
|
|
||||||
const Nd4jLong numOfSubArrs = ShapeUtils::getNumOfSubArrs(C->getShapeInfo(), dimsToExclude);
|
|
||||||
std::vector<Nd4jLong> idxRanges(2 * C->rankOf());
|
|
||||||
|
|
||||||
// #pragma omp parallel for schedule(guided) firstprivate(idxRanges)
|
|
||||||
for(Nd4jLong i = 0; i < numOfSubArrs; ++i) {
|
|
||||||
|
|
||||||
ShapeUtils::evalIdxRangesForSubArr(i, C->getShapeInfo(), dimsToExclude, idxRanges.data());
|
|
||||||
NDArray cSubArr = (*C)(idxRanges);
|
|
||||||
|
|
||||||
if(aRank > bRank) {
|
|
||||||
NDArray aSubArr = (*A)(idxRanges);
|
|
||||||
mmulMxM(&aSubArr, B, &cSubArr, 1., 0., outOrder);
|
|
||||||
}
|
|
||||||
else if(bRank > aRank) {
|
|
||||||
NDArray bSubArr = (*B)(idxRanges);
|
|
||||||
mmulMxM(A, &bSubArr, &cSubArr, 1., 0, outOrder);
|
|
||||||
}
|
|
||||||
else {
|
|
||||||
NDArray aSubArr = (*A)(idxRanges);
|
|
||||||
NDArray bSubArr = (*B)(idxRanges);
|
|
||||||
mmulMxM(&aSubArr, &bSubArr, &cSubArr, 1., 0., outOrder);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return C;
|
|
||||||
}
|
|
||||||
|
|
||||||
//////////////////////////////////////////////////////////////////////////
|
//////////////////////////////////////////////////////////////////////////
|
||||||
nd4j::NDArray* MmulHelper::mmul(const nd4j::NDArray* A, const nd4j::NDArray* B, nd4j::NDArray* C , const double alpha, const double beta, const char outOrder) {
|
nd4j::NDArray* MmulHelper::mmul(const nd4j::NDArray* A, const nd4j::NDArray* B, nd4j::NDArray* C , const double alpha, const double beta, const char outOrder) {
|
||||||
|
|
||||||
|
|
|
@ -901,6 +901,10 @@ namespace shape {
|
||||||
*/
|
*/
|
||||||
ND4J_EXPORT _CUDA_HD void index2coords(Nd4jLong index, const Nd4jLong *shapeInfo, Nd4jLong *coords);
|
ND4J_EXPORT _CUDA_HD void index2coords(Nd4jLong index, const Nd4jLong *shapeInfo, Nd4jLong *coords);
|
||||||
ND4J_EXPORT _CUDA_HD void index2coords(Nd4jLong index, const int rank, const Nd4jLong *shape, Nd4jLong *coords);
|
ND4J_EXPORT _CUDA_HD void index2coords(Nd4jLong index, const int rank, const Nd4jLong *shape, Nd4jLong *coords);
|
||||||
|
/**
|
||||||
|
* take into account only dimensions stored in tadDims, tadDims must be sorted in increasing order!
|
||||||
|
*/
|
||||||
|
ND4J_EXPORT _CUDA_HD void index2coords(Nd4jLong index, const Nd4jLong *shapeInfo, Nd4jLong *coords, const int dimsSize, const int* tadDims);
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
@ -910,6 +914,10 @@ namespace shape {
|
||||||
*/
|
*/
|
||||||
ND4J_EXPORT _CUDA_HD Nd4jLong coords2index(const Nd4jLong *shapeInfo, const Nd4jLong *coords);
|
ND4J_EXPORT _CUDA_HD Nd4jLong coords2index(const Nd4jLong *shapeInfo, const Nd4jLong *coords);
|
||||||
ND4J_EXPORT _CUDA_HD Nd4jLong coords2index(const int rank, const Nd4jLong *shape, const Nd4jLong *coords);
|
ND4J_EXPORT _CUDA_HD Nd4jLong coords2index(const int rank, const Nd4jLong *shape, const Nd4jLong *coords);
|
||||||
|
/**
|
||||||
|
* take into account only dimensions stored in tadDims, tadDims must be sorted in increasing order!
|
||||||
|
*/
|
||||||
|
ND4J_EXPORT _CUDA_HD Nd4jLong coords2index(const Nd4jLong *shapeInfo, const Nd4jLong *coords, const int dimsSize, const int* tadDims);
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* increment n-dimensional array by one iteration by changing coord appropriately
|
* increment n-dimensional array by one iteration by changing coord appropriately
|
||||||
|
@ -1762,6 +1770,19 @@ INLINEDEF _CUDA_HD Nd4jLong coords2index(const int rank, const Nd4jLong *shape,
|
||||||
return index;
|
return index;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
INLINEDEF _CUDA_HD Nd4jLong coords2index(const Nd4jLong *shapeInfo, const Nd4jLong *coords, const int dimsSize, const int* tadDims) {
|
||||||
|
|
||||||
|
Nd4jLong index, shift = 1;;
|
||||||
|
|
||||||
|
index = coords[tadDims[dimsSize - 1]];
|
||||||
|
for(uint i = dimsSize - 1; i >= 1; --i) {
|
||||||
|
shift *= shapeInfo[tadDims[i]];
|
||||||
|
index += shift * coords[i - 1];
|
||||||
|
}
|
||||||
|
|
||||||
|
return index;
|
||||||
|
}
|
||||||
|
|
||||||
template <typename T>
|
template <typename T>
|
||||||
INLINEDEF _CUDA_HD void fill(T* buffer, T value, Nd4jLong length) {
|
INLINEDEF _CUDA_HD void fill(T* buffer, T value, Nd4jLong length) {
|
||||||
|
|
||||||
|
@ -3957,9 +3978,13 @@ INLINEDEF _CUDA_H bool reshapeC(const int oldRank, const Nd4jLong* oldShapeInfo,
|
||||||
oldStart = oldStop++;
|
oldStart = oldStop++;
|
||||||
}
|
}
|
||||||
|
|
||||||
newShapeInfo[2 * newRank + 3] = shape::order(oldShapeInfo); // order
|
// rest of strides should be unities (if there is remainder in strides space, that is newStart < newRank)
|
||||||
newShapeInfo[2 * newRank + 2] = shape::elementWiseStride(oldShapeInfo); // ews
|
for (int i = newStart; i < newRank; ++i)
|
||||||
newShapeInfo[2 * newRank + 1] = shape::type(oldShapeInfo); // type
|
newStrides[i] = 1;
|
||||||
|
|
||||||
|
newShapeInfo[2 * newRank + 3] = shape::order(oldShapeInfo); // order
|
||||||
|
newShapeInfo[2 * newRank + 2] = shape::elementWiseStride(oldShapeInfo); // ews
|
||||||
|
newShapeInfo[2 * newRank + 1] = shape::type(oldShapeInfo); // type
|
||||||
|
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
@ -4705,6 +4730,16 @@ INLINEDEF void _CUDA_HD index2coords(Nd4jLong index, const int rank, const Nd4jL
|
||||||
coords[0] = index; // last iteration
|
coords[0] = index; // last iteration
|
||||||
}
|
}
|
||||||
|
|
||||||
|
//////////////////////////////////////////////////////////////////////
|
||||||
|
INLINEDEF void _CUDA_HD index2coords(Nd4jLong index, const Nd4jLong *shapeInfo, Nd4jLong *coords, const int dimsSize, const int* tadDims) {
|
||||||
|
|
||||||
|
for(uint i = dimsSize - 1; i > 0; --i) {
|
||||||
|
coords[tadDims[i]] = index % shapeInfo[1 + tadDims[i]];
|
||||||
|
index /= shapeInfo[1 + tadDims[i]];
|
||||||
|
}
|
||||||
|
coords[tadDims[0]] = index; // last iteration
|
||||||
|
}
|
||||||
|
|
||||||
//////////////////////////////////////////////////////////////////////
|
//////////////////////////////////////////////////////////////////////
|
||||||
INLINEDEF _CUDA_HD void calcOffsets(const Nd4jLong *xShapeInfo, Nd4jLong*& xOffsets, const Nd4jLong *yShapeInfo, Nd4jLong*& yOffsets, const Nd4jLong* zShapeInfo, Nd4jLong*& zOffsets, const char order) {
|
INLINEDEF _CUDA_HD void calcOffsets(const Nd4jLong *xShapeInfo, Nd4jLong*& xOffsets, const Nd4jLong *yShapeInfo, Nd4jLong*& yOffsets, const Nd4jLong* zShapeInfo, Nd4jLong*& zOffsets, const char order) {
|
||||||
|
|
||||||
|
|
|
@ -1,5 +1,6 @@
|
||||||
/*******************************************************************************
|
/*******************************************************************************
|
||||||
* Copyright (c) 2015-2018 Skymind, Inc.
|
* Copyright (c) 2015-2018 Skymind, Inc.
|
||||||
|
* Copyright (c) 2019 Konduit K.K.
|
||||||
*
|
*
|
||||||
* This program and the accompanying materials are made available under the
|
* This program and the accompanying materials are made available under the
|
||||||
* terms of the Apache License, Version 2.0 which is available at
|
* terms of the Apache License, Version 2.0 which is available at
|
||||||
|
|
|
@ -1,5 +1,6 @@
|
||||||
/*******************************************************************************
|
/*******************************************************************************
|
||||||
* Copyright (c) 2015-2018 Skymind, Inc.
|
* Copyright (c) 2015-2018 Skymind, Inc.
|
||||||
|
* Copyright (c) 2019 Konduit K.K.
|
||||||
*
|
*
|
||||||
* This program and the accompanying materials are made available under the
|
* This program and the accompanying materials are made available under the
|
||||||
* terms of the Apache License, Version 2.0 which is available at
|
* terms of the Apache License, Version 2.0 which is available at
|
||||||
|
|
|
@ -1,5 +1,6 @@
|
||||||
/*******************************************************************************
|
/*******************************************************************************
|
||||||
* Copyright (c) 2015-2018 Skymind, Inc.
|
* Copyright (c) 2015-2018 Skymind, Inc.
|
||||||
|
* Copyright (c) 2019 Konduit K.K.
|
||||||
*
|
*
|
||||||
* This program and the accompanying materials are made available under the
|
* This program and the accompanying materials are made available under the
|
||||||
* terms of the Apache License, Version 2.0 which is available at
|
* terms of the Apache License, Version 2.0 which is available at
|
||||||
|
@ -1798,6 +1799,7 @@ TEST_F(HelpersTests1, tensordot_test_6) {
|
||||||
|
|
||||||
// [iC, bS*oH*oW, kW*kH] x [iC, kH*kW, mC] = [iC, bS*oH*oW, mC]
|
// [iC, bS*oH*oW, kW*kH] x [iC, kH*kW, mC] = [iC, bS*oH*oW, mC]
|
||||||
MmulHelper::tensorDot(&a, &b, &cR, {{1,0,4,5,2,3}, {iC,bS*oH*oW,kW*kH}}, {{2,0,1,3},{iC,kH*kW,mC}}, {{3,0,1,2,4},{iC, bS*oH*oW, mC}});
|
MmulHelper::tensorDot(&a, &b, &cR, {{1,0,4,5,2,3}, {iC,bS*oH*oW,kW*kH}}, {{2,0,1,3},{iC,kH*kW,mC}}, {{3,0,1,2,4},{iC, bS*oH*oW, mC}});
|
||||||
|
// c.printBuffer();
|
||||||
|
|
||||||
ASSERT_TRUE(c.isSameShape(expected));
|
ASSERT_TRUE(c.isSameShape(expected));
|
||||||
ASSERT_TRUE(c.equalsTo(expected));
|
ASSERT_TRUE(c.equalsTo(expected));
|
||||||
|
|
|
@ -1891,7 +1891,7 @@ TEST_F(NDArrayTest, TestMMulMultiDim) {
|
||||||
ASSERT_TRUE(result->isSameShape(&expected));
|
ASSERT_TRUE(result->isSameShape(&expected));
|
||||||
|
|
||||||
//result->printShapeInfo("result shape");
|
//result->printShapeInfo("result shape");
|
||||||
//result->printBuffer("result buffer");
|
// result->printBuffer("result buffer");
|
||||||
ASSERT_TRUE(result->equalsTo(&expected));
|
ASSERT_TRUE(result->equalsTo(&expected));
|
||||||
delete result;
|
delete result;
|
||||||
}
|
}
|
||||||
|
|
|
@ -61,8 +61,10 @@ public:
|
||||||
|
|
||||||
TEST_F(PerformanceTests, test_maxpooling2d_1) {
|
TEST_F(PerformanceTests, test_maxpooling2d_1) {
|
||||||
std::vector<Nd4jLong> valuesX;
|
std::vector<Nd4jLong> valuesX;
|
||||||
auto x = NDArrayFactory::create<float>('c', {32, 3, 224, 224});
|
// auto x = NDArrayFactory::create<float>('c', {32, 3, 224, 224});
|
||||||
auto z = NDArrayFactory::create<float>('c', {32, 3, 224, 224});
|
// auto z = NDArrayFactory::create<float>('c', {32, 3, 224, 224});
|
||||||
|
auto x = NDArrayFactory::create<float>('c', {8, 3, 64, 64});
|
||||||
|
auto z = NDArrayFactory::create<float>('c', {8, 3, 64, 64});
|
||||||
x.linspace(1.0f);
|
x.linspace(1.0f);
|
||||||
Nd4jLong k = 5;
|
Nd4jLong k = 5;
|
||||||
|
|
||||||
|
|
|
@ -1,5 +1,6 @@
|
||||||
/*******************************************************************************
|
/*******************************************************************************
|
||||||
* Copyright (c) 2015-2018 Skymind, Inc.
|
* Copyright (c) 2015-2018 Skymind, Inc.
|
||||||
|
* Copyright (c) 2019 Konduit K.K.
|
||||||
*
|
*
|
||||||
* This program and the accompanying materials are made available under the
|
* This program and the accompanying materials are made available under the
|
||||||
* terms of the Apache License, Version 2.0 which is available at
|
* terms of the Apache License, Version 2.0 which is available at
|
||||||
|
@ -274,4 +275,28 @@ TEST_F(PlaygroundTests, test_relubp_1) {
|
||||||
|
|
||||||
nd4j_printf("Time: %lld; BW: %f GB/s\n", time, bw);
|
nd4j_printf("Time: %lld; BW: %f GB/s\n", time, bw);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
//////////////////////////////////////////////////////////////////////
|
||||||
|
TEST_F(PlaygroundTests, my) {
|
||||||
|
|
||||||
|
int bS=1, iH=56,iW=56, iC=144,mC=1, kH=3,kW=3, sH=1,sW=1, pH=0,pW=0, dH=1,dW=1;
|
||||||
|
int oC=iC*mC;
|
||||||
|
int oH=56,oW=56;
|
||||||
|
int paddingMode = 1; // 1-SAME, 0-VALID;
|
||||||
|
int dataFormat = 1; // 1-NHWC, 0-NCHW
|
||||||
|
|
||||||
|
auto input = NDArrayFactory::create<float>('c', {bS, iH, iW, iC});
|
||||||
|
auto weights = NDArrayFactory::create<float>('c', {kH, kW, iC, mC});
|
||||||
|
|
||||||
|
input = 2.;
|
||||||
|
weights.linspace(0.1, 0.1);
|
||||||
|
|
||||||
|
nd4j::ops::depthwise_conv2d op;
|
||||||
|
auto results = op.execute({&input, &weights}, {}, {kH,kW, sH,sW, pH,pW, dH,dW, paddingMode, dataFormat});
|
||||||
|
|
||||||
|
delete results;
|
||||||
|
}
|
||||||
|
|
||||||
*/
|
*/
|
||||||
|
|
||||||
|
|
||||||
|
|
|
@ -106,7 +106,7 @@ public class NativeOpsHolder {
|
||||||
boolean logInit = Boolean.parseBoolean(logInitProperty);
|
boolean logInit = Boolean.parseBoolean(logInitProperty);
|
||||||
|
|
||||||
if(logInit) {
|
if(logInit) {
|
||||||
log.info("Number of threads used for OpenMP: {}", deviceNativeOps.ompGetMaxThreads());
|
log.info("Number of threads used for linear algebra: {}", deviceNativeOps.ompGetMaxThreads());
|
||||||
}
|
}
|
||||||
} catch (Exception | Error e) {
|
} catch (Exception | Error e) {
|
||||||
throw new RuntimeException(
|
throw new RuntimeException(
|
||||||
|
|
|
@ -4600,6 +4600,11 @@ public native @Cast("bool") boolean isOptimalRequirementsMet();
|
||||||
*/
|
*/
|
||||||
public native @Cast("Nd4jLong") long sizeAt(int dim);
|
public native @Cast("Nd4jLong") long sizeAt(int dim);
|
||||||
|
|
||||||
|
/**
|
||||||
|
* returns stride of "dim" dimension
|
||||||
|
*/
|
||||||
|
public native @Cast("Nd4jLong") long strideAt(int dim);
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* returns order of array
|
* returns order of array
|
||||||
*/
|
*/
|
||||||
|
@ -8019,6 +8024,12 @@ public static final int PREALLOC_SIZE = 33554432;
|
||||||
@Namespace("shape") public static native void index2coords(@Cast("Nd4jLong") long index, int rank, @Cast("const Nd4jLong*") LongPointer shape, @Cast("Nd4jLong*") LongPointer coords);
|
@Namespace("shape") public static native void index2coords(@Cast("Nd4jLong") long index, int rank, @Cast("const Nd4jLong*") LongPointer shape, @Cast("Nd4jLong*") LongPointer coords);
|
||||||
@Namespace("shape") public static native void index2coords(@Cast("Nd4jLong") long index, int rank, @Cast("const Nd4jLong*") LongBuffer shape, @Cast("Nd4jLong*") LongBuffer coords);
|
@Namespace("shape") public static native void index2coords(@Cast("Nd4jLong") long index, int rank, @Cast("const Nd4jLong*") LongBuffer shape, @Cast("Nd4jLong*") LongBuffer coords);
|
||||||
@Namespace("shape") public static native void index2coords(@Cast("Nd4jLong") long index, int rank, @Cast("const Nd4jLong*") long[] shape, @Cast("Nd4jLong*") long[] coords);
|
@Namespace("shape") public static native void index2coords(@Cast("Nd4jLong") long index, int rank, @Cast("const Nd4jLong*") long[] shape, @Cast("Nd4jLong*") long[] coords);
|
||||||
|
/**
|
||||||
|
* take into account only dimensions stored in tadDims, tadDims must be sorted in increasing order!
|
||||||
|
*/
|
||||||
|
@Namespace("shape") public static native void index2coords(@Cast("Nd4jLong") long index, @Cast("const Nd4jLong*") LongPointer shapeInfo, @Cast("Nd4jLong*") LongPointer coords, int dimsSize, @Const IntPointer tadDims);
|
||||||
|
@Namespace("shape") public static native void index2coords(@Cast("Nd4jLong") long index, @Cast("const Nd4jLong*") LongBuffer shapeInfo, @Cast("Nd4jLong*") LongBuffer coords, int dimsSize, @Const IntBuffer tadDims);
|
||||||
|
@Namespace("shape") public static native void index2coords(@Cast("Nd4jLong") long index, @Cast("const Nd4jLong*") long[] shapeInfo, @Cast("Nd4jLong*") long[] coords, int dimsSize, @Const int[] tadDims);
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
@ -8032,6 +8043,12 @@ public static final int PREALLOC_SIZE = 33554432;
|
||||||
@Namespace("shape") public static native @Cast("Nd4jLong") long coords2index(int rank, @Cast("const Nd4jLong*") LongPointer shape, @Cast("const Nd4jLong*") LongPointer coords);
|
@Namespace("shape") public static native @Cast("Nd4jLong") long coords2index(int rank, @Cast("const Nd4jLong*") LongPointer shape, @Cast("const Nd4jLong*") LongPointer coords);
|
||||||
@Namespace("shape") public static native @Cast("Nd4jLong") long coords2index(int rank, @Cast("const Nd4jLong*") LongBuffer shape, @Cast("const Nd4jLong*") LongBuffer coords);
|
@Namespace("shape") public static native @Cast("Nd4jLong") long coords2index(int rank, @Cast("const Nd4jLong*") LongBuffer shape, @Cast("const Nd4jLong*") LongBuffer coords);
|
||||||
@Namespace("shape") public static native @Cast("Nd4jLong") long coords2index(int rank, @Cast("const Nd4jLong*") long[] shape, @Cast("const Nd4jLong*") long[] coords);
|
@Namespace("shape") public static native @Cast("Nd4jLong") long coords2index(int rank, @Cast("const Nd4jLong*") long[] shape, @Cast("const Nd4jLong*") long[] coords);
|
||||||
|
/**
|
||||||
|
* take into account only dimensions stored in tadDims, tadDims must be sorted in increasing order!
|
||||||
|
*/
|
||||||
|
@Namespace("shape") public static native @Cast("Nd4jLong") long coords2index(@Cast("const Nd4jLong*") LongPointer shapeInfo, @Cast("const Nd4jLong*") LongPointer coords, int dimsSize, @Const IntPointer tadDims);
|
||||||
|
@Namespace("shape") public static native @Cast("Nd4jLong") long coords2index(@Cast("const Nd4jLong*") LongBuffer shapeInfo, @Cast("const Nd4jLong*") LongBuffer coords, int dimsSize, @Const IntBuffer tadDims);
|
||||||
|
@Namespace("shape") public static native @Cast("Nd4jLong") long coords2index(@Cast("const Nd4jLong*") long[] shapeInfo, @Cast("const Nd4jLong*") long[] coords, int dimsSize, @Const int[] tadDims);
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* increment n-dimensional array by one iteration by changing coord appropriately
|
* increment n-dimensional array by one iteration by changing coord appropriately
|
||||||
|
@ -9088,6 +9105,8 @@ public static final int PREALLOC_SIZE = 33554432;
|
||||||
|
|
||||||
//////////////////////////////////////////////////////////////////////
|
//////////////////////////////////////////////////////////////////////
|
||||||
|
|
||||||
|
//////////////////////////////////////////////////////////////////////
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
|
@ -4600,6 +4600,11 @@ public native @Cast("bool") boolean isOptimalRequirementsMet();
|
||||||
*/
|
*/
|
||||||
public native @Cast("Nd4jLong") long sizeAt(int dim);
|
public native @Cast("Nd4jLong") long sizeAt(int dim);
|
||||||
|
|
||||||
|
/**
|
||||||
|
* returns stride of "dim" dimension
|
||||||
|
*/
|
||||||
|
public native @Cast("Nd4jLong") long strideAt(int dim);
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* returns order of array
|
* returns order of array
|
||||||
*/
|
*/
|
||||||
|
@ -8019,6 +8024,12 @@ public static final int PREALLOC_SIZE = 33554432;
|
||||||
@Namespace("shape") public static native void index2coords(@Cast("Nd4jLong") long index, int rank, @Cast("const Nd4jLong*") LongPointer shape, @Cast("Nd4jLong*") LongPointer coords);
|
@Namespace("shape") public static native void index2coords(@Cast("Nd4jLong") long index, int rank, @Cast("const Nd4jLong*") LongPointer shape, @Cast("Nd4jLong*") LongPointer coords);
|
||||||
@Namespace("shape") public static native void index2coords(@Cast("Nd4jLong") long index, int rank, @Cast("const Nd4jLong*") LongBuffer shape, @Cast("Nd4jLong*") LongBuffer coords);
|
@Namespace("shape") public static native void index2coords(@Cast("Nd4jLong") long index, int rank, @Cast("const Nd4jLong*") LongBuffer shape, @Cast("Nd4jLong*") LongBuffer coords);
|
||||||
@Namespace("shape") public static native void index2coords(@Cast("Nd4jLong") long index, int rank, @Cast("const Nd4jLong*") long[] shape, @Cast("Nd4jLong*") long[] coords);
|
@Namespace("shape") public static native void index2coords(@Cast("Nd4jLong") long index, int rank, @Cast("const Nd4jLong*") long[] shape, @Cast("Nd4jLong*") long[] coords);
|
||||||
|
/**
|
||||||
|
* take into account only dimensions stored in tadDims, tadDims must be sorted in increasing order!
|
||||||
|
*/
|
||||||
|
@Namespace("shape") public static native void index2coords(@Cast("Nd4jLong") long index, @Cast("const Nd4jLong*") LongPointer shapeInfo, @Cast("Nd4jLong*") LongPointer coords, int dimsSize, @Const IntPointer tadDims);
|
||||||
|
@Namespace("shape") public static native void index2coords(@Cast("Nd4jLong") long index, @Cast("const Nd4jLong*") LongBuffer shapeInfo, @Cast("Nd4jLong*") LongBuffer coords, int dimsSize, @Const IntBuffer tadDims);
|
||||||
|
@Namespace("shape") public static native void index2coords(@Cast("Nd4jLong") long index, @Cast("const Nd4jLong*") long[] shapeInfo, @Cast("Nd4jLong*") long[] coords, int dimsSize, @Const int[] tadDims);
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
@ -8032,6 +8043,12 @@ public static final int PREALLOC_SIZE = 33554432;
|
||||||
@Namespace("shape") public static native @Cast("Nd4jLong") long coords2index(int rank, @Cast("const Nd4jLong*") LongPointer shape, @Cast("const Nd4jLong*") LongPointer coords);
|
@Namespace("shape") public static native @Cast("Nd4jLong") long coords2index(int rank, @Cast("const Nd4jLong*") LongPointer shape, @Cast("const Nd4jLong*") LongPointer coords);
|
||||||
@Namespace("shape") public static native @Cast("Nd4jLong") long coords2index(int rank, @Cast("const Nd4jLong*") LongBuffer shape, @Cast("const Nd4jLong*") LongBuffer coords);
|
@Namespace("shape") public static native @Cast("Nd4jLong") long coords2index(int rank, @Cast("const Nd4jLong*") LongBuffer shape, @Cast("const Nd4jLong*") LongBuffer coords);
|
||||||
@Namespace("shape") public static native @Cast("Nd4jLong") long coords2index(int rank, @Cast("const Nd4jLong*") long[] shape, @Cast("const Nd4jLong*") long[] coords);
|
@Namespace("shape") public static native @Cast("Nd4jLong") long coords2index(int rank, @Cast("const Nd4jLong*") long[] shape, @Cast("const Nd4jLong*") long[] coords);
|
||||||
|
/**
|
||||||
|
* take into account only dimensions stored in tadDims, tadDims must be sorted in increasing order!
|
||||||
|
*/
|
||||||
|
@Namespace("shape") public static native @Cast("Nd4jLong") long coords2index(@Cast("const Nd4jLong*") LongPointer shapeInfo, @Cast("const Nd4jLong*") LongPointer coords, int dimsSize, @Const IntPointer tadDims);
|
||||||
|
@Namespace("shape") public static native @Cast("Nd4jLong") long coords2index(@Cast("const Nd4jLong*") LongBuffer shapeInfo, @Cast("const Nd4jLong*") LongBuffer coords, int dimsSize, @Const IntBuffer tadDims);
|
||||||
|
@Namespace("shape") public static native @Cast("Nd4jLong") long coords2index(@Cast("const Nd4jLong*") long[] shapeInfo, @Cast("const Nd4jLong*") long[] coords, int dimsSize, @Const int[] tadDims);
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* increment n-dimensional array by one iteration by changing coord appropriately
|
* increment n-dimensional array by one iteration by changing coord appropriately
|
||||||
|
@ -9088,6 +9105,8 @@ public static final int PREALLOC_SIZE = 33554432;
|
||||||
|
|
||||||
//////////////////////////////////////////////////////////////////////
|
//////////////////////////////////////////////////////////////////////
|
||||||
|
|
||||||
|
//////////////////////////////////////////////////////////////////////
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
Loading…
Reference in New Issue