2019-06-06 14:21:15 +02:00
/*******************************************************************************
* Copyright ( c ) 2015 - 2018 Skymind , Inc .
*
* This program and the accompanying materials are made available under the
* terms of the Apache License , Version 2.0 which is available at
* https : //www.apache.org/licenses/LICENSE-2.0.
*
* Unless required by applicable law or agreed to in writing , software
* distributed under the License is distributed on an " AS IS " BASIS , WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND , either express or implied . See the
* License for the specific language governing permissions and limitations
* under the License .
*
* SPDX - License - Identifier : Apache - 2.0
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
//
// @author Yurii Shyrma (iuriish@yahoo.com), created on 20.04.2018
//
# include <ops/declarable/helpers/transforms.h>
# include <array/ResultSet.h>
# include <helpers/ShapeUtils.h>
# include <numeric>
# include <NDArrayFactory.h>
# include <helpers/TAD.h>
# include <helpers/ConstantTadHelper.h>
# include <Loops.h>
2019-07-20 07:58:44 +02:00
# include <graph/RandomGenerator.h>
2019-06-06 14:21:15 +02:00
namespace nd4j {
namespace ops {
namespace helpers {
//////////////////////////////////////////////////////////////////////////
template < typename T >
static void triuBP_ ( nd4j : : LaunchContext * context , const NDArray & input , const NDArray & gradO , NDArray & gradI , const int diagonal ) {
auto dOdI = NDArray ( & gradO ) ; // dO/dI
const_cast < NDArray & > ( input ) . fillAsTriangular < T > ( 0 , diagonal , dOdI . sizeAt ( - 1 ) , ' b ' , & dOdI ) ;
int dLen = dOdI . lengthOf ( ) ;
PRAGMA_OMP_PARALLEL_FOR_IF ( dLen > Environment : : getInstance ( ) - > elementwiseThreshold ( ) )
for ( int i = 0 ; i < dLen ; + + i ) {
2019-07-12 10:51:51 +02:00
if ( dOdI . t < T > ( i ) ! = static_cast < T > ( 0.f ) )
dOdI . t < T > ( i ) = static_cast < T > ( 1.f ) ;
2019-06-06 14:21:15 +02:00
}
// FIXME: !!!
gradI . assign ( dOdI * gradO ) ; // chain rule: dLoss/dI = dO/dI * dLoss/dO
}
void triuBP ( nd4j : : LaunchContext * context , const NDArray & input , const NDArray & gradO , NDArray & gradI , const int diagonal ) {
BUILD_SINGLE_SELECTOR ( gradO . dataType ( ) , triuBP_ , ( context , input , gradO , gradI , diagonal ) , LIBND4J_TYPES ) ;
}
BUILD_SINGLE_TEMPLATE ( template void triuBP_ , ( nd4j : : LaunchContext * context , const NDArray & input , const NDArray & gradO , NDArray & gradI , const int diagonal ) , LIBND4J_TYPES ) ;
//////////////////////////////////////////////////////////////////////////
template < typename T >
static void trace_ ( const NDArray & input , NDArray & output ) {
const int inRank = input . rankOf ( ) ;
auto setOfSubArrs = input . allTensorsAlongDimension ( { inRank - 2 , inRank - 1 } ) ;
PRAGMA_OMP_PARALLEL_FOR_IF ( setOfSubArrs - > size ( ) > Environment : : getInstance ( ) - > tadThreshold ( ) )
for ( int i = 0 ; i < setOfSubArrs - > size ( ) ; + + i )
output . p ( i , setOfSubArrs - > at ( i ) - > getTrace ( ) ) ;
delete setOfSubArrs ;
}
void trace ( nd4j : : LaunchContext * context , const NDArray & input , NDArray & output ) {
BUILD_SINGLE_SELECTOR ( input . dataType ( ) , trace_ , ( input , output ) , LIBND4J_TYPES ) ;
}
BUILD_SINGLE_TEMPLATE ( template void trace_ , ( const NDArray & input , NDArray & output ) , LIBND4J_TYPES ) ;
//////////////////////////////////////////////////////////////////////////
template < typename T >
2019-07-20 07:58:44 +02:00
void randomShuffle_ ( NDArray & input , NDArray & output , nd4j : : graph : : RandomGenerator & rng , const bool isInplace ) {
2019-06-06 14:21:15 +02:00
// check edge cases first
int temp ;
const int firstDim = input . sizeAt ( 0 ) ;
if ( input . lengthOf ( ) = = 1 | | firstDim = = 1 ) {
if ( ! isInplace )
output . assign ( input ) ;
}
else if ( input . isVector ( ) | | shape : : isLikeVector ( input . getShapeInfo ( ) , temp ) ) {
// apply Fisher-Yates shuffle
if ( isInplace ) {
2019-07-20 07:58:44 +02:00
//PRAGMA_OMP_PARALLEL_FOR_IF((firstDim-1) > Environment::getInstance()->tadThreshold())
2019-06-06 14:21:15 +02:00
for ( int i = firstDim - 1 ; i > 0 ; - - i ) {
2019-07-20 07:58:44 +02:00
int r = rng . relativeInt ( i ) % i ;
2019-06-06 14:21:15 +02:00
if ( i = = r )
continue ;
2019-07-20 07:58:44 +02:00
T t0 = input . t < T > ( i ) ;
T t1 = input . t < T > ( r ) ;
2019-06-06 14:21:15 +02:00
//math::nd4j_swap<T>(input(i), input(r));
2019-07-20 07:58:44 +02:00
input . t < T > ( i ) = t1 ;
input . t < T > ( r ) = t0 ;
2019-06-06 14:21:15 +02:00
}
}
else {
std : : vector < int > indices ( firstDim ) ;
std : : iota ( indices . begin ( ) , indices . end ( ) , 0 ) ;
output . p < T > ( Nd4jLong ( 0 ) , input . e < T > ( 0 ) ) ;
PRAGMA_OMP_PARALLEL_FOR_IF ( ( firstDim - 1 ) > Environment : : getInstance ( ) - > tadThreshold ( ) )
for ( int i = firstDim - 1 ; i > 0 ; - - i ) {
2019-07-20 07:58:44 +02:00
int r = rng . relativeInt ( i ) % i ;
output . t < T > ( i ) = input . t < T > ( indices [ r ] ) ;
2019-06-06 14:21:15 +02:00
if ( i = = r )
continue ;
2019-07-20 07:58:44 +02:00
output . t < T > ( r ) = input . t < T > ( indices [ i ] ) ;
2019-06-06 14:21:15 +02:00
math : : nd4j_swap < int > ( indices [ i ] , indices [ r ] ) ;
}
rng . rewindH ( firstDim - 1 ) ;
}
}
else {
// evaluate sub-arrays list of input array through all dimensions excluding first one
std : : vector < int > dimensions = ShapeUtils : : evalDimsToExclude ( input . rankOf ( ) , { 0 } ) ;
auto subArrsListIn = input . allTensorsAlongDimension ( dimensions ) ;
// apply Fisher-Yates shuffle
if ( isInplace ) {
2019-07-20 07:58:44 +02:00
//PRAGMA_OMP_PARALLEL_FOR_IF((firstDim-1) > Environment::getInstance()->elementwiseThreshold())
for ( int i = firstDim - 1 ; i > 0 ; - - i ) {
int r = rng . relativeInt ( i ) % i ;
2019-06-06 14:21:15 +02:00
if ( i = = r )
continue ;
subArrsListIn - > at ( i ) - > swapUnsafe ( * subArrsListIn - > at ( r ) ) ;
}
}
else {
// evaluate sub-arrays list of output array through all dimensions excluding first one
auto subArrsListOut = output . allTensorsAlongDimension ( dimensions ) ;
std : : vector < int > indices ( firstDim ) ;
std : : iota ( indices . begin ( ) , indices . end ( ) , 0 ) ;
bool isZeroShuffled = false ;
2019-07-20 07:58:44 +02:00
//PRAGMA_OMP_PARALLEL_FOR_IF((firstDim-1) > Environment::getInstance()->tadThreshold())
for ( int i = firstDim - 1 ; i > 0 ; - - i ) {
int r = rng . relativeInt ( i ) % i ;
2019-06-06 14:21:15 +02:00
subArrsListOut - > at ( i ) - > assign ( subArrsListIn - > at ( indices [ r ] ) ) ;
if ( r = = 0 )
isZeroShuffled = true ;
if ( i = = r )
continue ;
subArrsListOut - > at ( r ) - > assign ( subArrsListIn - > at ( indices [ i ] ) ) ;
math : : nd4j_swap < int > ( indices [ i ] , indices [ r ] ) ;
}
if ( ! isZeroShuffled )
subArrsListOut - > at ( 0 ) - > assign ( subArrsListIn - > at ( 0 ) ) ;
delete subArrsListOut ;
}
rng . rewindH ( firstDim - 1 ) ;
delete subArrsListIn ;
}
}
2019-07-20 07:58:44 +02:00
void randomShuffle ( nd4j : : LaunchContext * context , NDArray & input , NDArray & output , nd4j : : graph : : RandomGenerator & rng , const bool isInplace ) {
2019-06-06 14:21:15 +02:00
BUILD_SINGLE_SELECTOR ( input . dataType ( ) , randomShuffle_ , ( input , output , rng , isInplace ) , LIBND4J_TYPES ) ;
}
2019-07-20 07:58:44 +02:00
BUILD_SINGLE_TEMPLATE ( template void randomShuffle_ , ( NDArray & input , NDArray & output , nd4j : : graph : : RandomGenerator & rng , const bool isInplace ) , LIBND4J_TYPES ) ;
2019-06-06 14:21:15 +02:00
//////////////////////////////////////////////////////////////////////////
template < typename T >
void pad_ ( const int mode , const NDArray & input , const NDArray & paddings , NDArray & output , const NDArray & padValue ) {
const T * x = input . bufferAsT < T > ( ) ;
T * z = output . bufferAsT < T > ( ) ;
const Nd4jLong * xShape = input . shapeOf ( ) ;
const Nd4jLong * zShape = output . shapeOf ( ) ;
const Nd4jLong * xStride = input . stridesOf ( ) ;
const Nd4jLong * zStride = output . stridesOf ( ) ;
const int rank = input . rankOf ( ) ; // both input and output have the same rank
const int rankMinusOne = rank - 1 ;
const auto zLen = output . lengthOf ( ) ;
std : : vector < Nd4jLong > coords ( rank ) ; // we use the same coordinates storage both for input and output since their ranks are the same
if ( mode = = 0 ) { // CONSTANT case
const T padVal = padValue . e < T > ( 0 ) ;
PRAGMA_OMP_PARALLEL_FOR_ARGS ( firstprivate ( coords ) )
for ( uint i = 0 ; i < zLen ; + + i ) {
shape : : index2coords ( rank , zShape , i , zLen , coords . data ( ) ) ;
const auto zOffset = shape : : getOffset ( 0 , zShape , zStride , coords . data ( ) , rank ) ;
bool within = true ;
for ( int j = rankMinusOne ; j > = 0 ; - - j ) {
if ( xShape [ j ] = = zShape [ j ] ) continue ;
const auto left = paddings . e < Nd4jLong > ( j , 0 ) ;
if ( coords [ j ] < left | | coords [ j ] > = left + xShape [ j ] ) { within = false ; break ; }
else { coords [ j ] = coords [ j ] - left ; }
}
if ( within )
z [ zOffset ] = x [ shape : : getOffset ( 0 , xShape , xStride , coords . data ( ) , rank ) ] ;
else
z [ zOffset ] = padVal ;
}
}
else { // REFLECT and SYMMETRIC cases
const Nd4jLong shift1 = mode = = 1 ? 0 : 1 ; // REFLECT : SYMMETRIC
const Nd4jLong shift2 = mode = = 1 ? 2 : 1 ; // REFLECT : SYMMETRIC
PRAGMA_OMP_PARALLEL_FOR_ARGS ( firstprivate ( coords ) )
for ( uint i = 0 ; i < zLen ; + + i ) {
shape : : index2coords ( rank , zShape , i , zLen , coords . data ( ) ) ;
const auto zOffset = shape : : getOffset ( 0 , zShape , zStride , coords . data ( ) , rank ) ;
for ( int j = rankMinusOne ; j > = 0 ; - - j ) {
if ( xShape [ j ] = = zShape [ j ] ) continue ;
coords [ j ] = coords [ j ] - paddings . e < Nd4jLong > ( j , 0 ) ; // are ready to fill middle (within input dimension range)
if ( coords [ j ] < 0 ) coords [ j ] = - coords [ j ] - shift1 ; // means fill from left
else if ( coords [ j ] > = xShape [ j ] ) coords [ j ] = 2 * xShape [ j ] - coords [ j ] - shift2 ; // means fill from right
}
const auto xOffset = shape : : getOffset ( 0 , xShape , xStride , coords . data ( ) , rank ) ;
z [ zOffset ] = x [ xOffset ] ;
}
}
}
// //////////////////////////////////////////////////////////////////////////
// template<typename T>
// void pad2_(const int mode, const NDArray& input, const NDArray& paddings, NDArray& output, NDArray const& padValue) {
// const int rank = output.rankOf();
// std::vector<int> dimsToExclude(rank);
// std::iota(dimsToExclude.begin(), dimsToExclude.end(), 0); // fill with 0, 1, ... rank-1
// Nd4jLong numLeft = paddings.e<Nd4jLong>(rank-1,0);
// Nd4jLong numRight = paddings.e<Nd4jLong>(rank-1,1);
// Nd4jLong inDimSize = input.sizeAt(rank-1);
// Nd4jLong outDimSize = output.sizeAt(rank-1);
// std::vector<std::vector<Nd4jLong>> outIdx = { std::vector<Nd4jLong>(2*rank), {numLeft, numLeft + inDimSize}, {0, numLeft}, {numLeft + inDimSize, outDimSize} };
// for(int i = 0; i < rank-1; ++i) {
// outIdx[0][2*i] = paddings.e<Nd4jLong>(i, 0);
// outIdx[0][2*i + 1] = outIdx[0][2*i] + input.sizeAt(i);
// }
// outIdx[0][2*rank-1] = outIdx[0][2*rank-2] = 0;
// // ***** populate innermost sub-arrays firstly ***** //
// dimsToExclude.pop_back();
// Nd4jLong startL = mode == 1 ? 1 : 0; // REFLECT or SYMMETRIC
// Nd4jLong startR = mode == 1 ? inDimSize-2 : inDimSize-1; // REFLECT or SYMMETRIC
// Nd4jLong numOfSubArrs = ShapeUtils::getNumOfSubArrs(input.getShapeInfo(), dimsToExclude);
// NDArray outSubArr0 = output(outIdx[0], true);
// PRAGMA_OMP_PARALLEL_FOR
// for(Nd4jLong j = 0; j < numOfSubArrs; ++j) {
// NDArray outSubArr1 = outSubArr0(j, dimsToExclude);
// NDArray inSubArr = input(j, dimsToExclude);
// NDArray outSubArrMid = outSubArr1(outIdx[1]);
// outSubArrMid.assign(inSubArr); // assign middle
// if(mode == 0) { // CONSTANT
// if(numLeft != 0) {
// NDArray temp = outSubArr1(outIdx[2]);
// temp.assign(padValue); // assign left
// }
// if(numRight != 0) {
// NDArray temp = outSubArr1(outIdx[3]);
// temp.assign(padValue); // assign right
// }
// }
// else { // REFLECT or SYMMETRIC
// for(Nd4jLong k = numLeft-1, e = startL; k >= 0; --k, ++e) // fill left side
// outSubArr1.t<T>(k) = inSubArr.t<T>(e);
// for(Nd4jLong k = numLeft + inDimSize, e = startR; k < outDimSize; ++k, --e) // fill right side
// outSubArr1.t<T>(k) = inSubArr.t<T>(e);
// }
// }
// // ***** fill rest of outer sub-arrays ***** //
// std::vector<Nd4jLong> outIdxInner(2, 0);
// std::vector<Nd4jLong> outIdxOuter(2, 0);
// for(int i = rankBorder - 1; i >= 0; --i) {
// dimsToExclude.pop_back();
// outIdxInner.push_back(0), outIdxInner.push_back(0);
// outIdxOuter.push_back(0), outIdxOuter.push_back(0);
// Nd4jLong numLeft = paddings.e<Nd4jLong>(i, 0);
// Nd4jLong numRight = paddings.e<Nd4jLong>(i, 1);
// if(numLeft == 0 && numRight == 0)
// continue;
// Nd4jLong inDimSize = input.sizeAt(i);
// Nd4jLong outDimSize = output.sizeAt(i);
// if(mode == 0) {
// outIdxOuter[0] = 0; outIdxOuter[1] = numLeft;
// outIdxInner[0] = numLeft + inDimSize; outIdxInner[1] = outDimSize;
// }
// startL = mode == 1 ? numLeft + 1 : numLeft; // REFLECT or SYMMETRIC
// startR = mode == 1 ? numLeft + inDimSize - 2 : numLeft + inDimSize-1; // REFLECT or SYMMETRIC
// numOfSubArrs = ShapeUtils::getNumOfSubArrs(output.getShapeInfo(), dimsToExclude);
// PRAGMA_OMP_PARALLEL_FOR_ARGS(firstprivate(outIdxOuter, outIdxInner))
// for(Nd4jLong j = 0; j < numOfSubArrs; ++j) {
// NDArray outSubArr = output(j, dimsToExclude);
// if(mode == 0) { // CONSTANT
// if(numLeft != 0) {
// NDArray tempO = outSubArr(outIdxOuter);
// tempO.assign(padValue); // assign left
// }
// if(numRight != 0) {
// NDArray tempI = outSubArr(outIdxInner);
// tempI.assign(padValue); // assign right
// }
// }
// else { // REFLECT or SYMMETRIC
// for(Nd4jLong k = numLeft-1, e = startL; k >= 0; --k, ++e) { // fill left side
// outIdxOuter[0] = k;
// outIdxOuter[1] = k+1;
// outIdxInner[0] = e;
// outIdxInner[1] = e+1;
// NDArray outSubArrInner = outSubArr(outIdxInner);
// NDArray outSubArrOuter = outSubArr(outIdxOuter);
// outSubArrOuter.assign(outSubArrInner);
// }
// for(Nd4jLong k = numLeft + inDimSize, e = startR; k < outDimSize; ++k, --e) { // fill right side
// outIdxOuter[0] = k;
// outIdxOuter[1] = k+1;
// outIdxInner[0] = e;
// outIdxInner[1] = e+1;
// NDArray outSubArrInner = outSubArr(outIdxInner);
// NDArray outSubArrOuter = outSubArr(outIdxOuter);
// outSubArrOuter.assign(outSubArrInner);
// }
// }
// }
// }
// }
void pad ( nd4j : : LaunchContext * context , const int mode , const NDArray & input , const NDArray & paddings , NDArray & output , NDArray const & padValue ) {
BUILD_SINGLE_SELECTOR ( input . dataType ( ) , pad_ , ( mode , input , paddings , output , padValue ) , LIBND4J_TYPES ) ;
}
BUILD_SINGLE_TEMPLATE ( template void pad_ , ( const int mode , const NDArray & input , const NDArray & paddings , NDArray & output , NDArray const & padValue ) , LIBND4J_TYPES ) ;
////////////////////////////////////////////////////////////////////////
/*// initial values of inIdx, outIdx, dim must be equal to zero
template < typename T >
static void recursiveLoopForPad_ ( const int mode , NDArray & input , const NDArray & paddings , NDArray & output , std : : vector < int > dimensions , int dim , int inIdx , int outIdx , NDArray & padValue ) {
int leftOffset ;
// dimensions are array of input dimensions, it is sorted in increasing order
// every time at the beginning we erase first element from it (not good idea to use vector for this purpose, but luckily it is small enough)
// then we use this array for tads building, every time while recursion the number of built tads becomes bigger
dimensions . erase ( dimensions . begin ( ) ) ;
// build tad basing on output array, also create auxiliary arrays pointing on required output array ranges
shape : : TAD tadOut ( output . getShapeInfo ( ) , dimensions . data ( ) , dimensions . size ( ) ) ;
tadOut . createTadOnlyShapeInfo ( ) ;
tadOut . createOffsets ( ) ;
auto subArrOut = NDArray ( output . getBuffer ( ) , tadOut . tadOnlyShapeInfo , output . getContext ( ) ) ;
auto subArr = NDArray ( output . getBuffer ( ) , tadOut . tadOnlyShapeInfo , output . getContext ( ) ) ;
// build tad basing on input array, also create auxiliary array pointing on required input array range
shape : : TAD tadIn ( input . getShapeInfo ( ) , dimensions . data ( ) , dimensions . size ( ) ) ;
tadIn . createTadOnlyShapeInfo ( ) ;
tadIn . createOffsets ( ) ;
auto subArrIn = NDArray ( input . getBuffer ( ) , tadIn . tadOnlyShapeInfo , output . getContext ( ) ) ;
// these indices take into account recursion and always point to actual tads numbers
if ( input . rankOf ( ) > 1 & & output . rankOf ( ) > 1 ) { // only for non-vector cases
outIdx = outIdx * output . sizeAt ( dim + 1 ) ;
inIdx = inIdx * input . sizeAt ( dim + 1 ) ;
}
// current input tad number, we add to it unity in a loop
int k = - 1 ;
// loop through current dimension
for ( int i = 0 ; i < output . sizeAt ( dim ) ; + + i ) {
// corresponds to outer range (relevant indices are absent in input)
leftOffset = paddings . e < int > ( dim , 0 ) ;
if ( i < leftOffset | | i > = ( input . sizeAt ( dim ) + leftOffset ) )
continue ;
// increase input tads number
+ + k ;
// recursion condition allows for the fact that tad can't reduce to scalar
if ( dim < input . rankOf ( ) - 2 )
recursiveLoopForPad ( mode , input , paddings , output , dimensions , dim + 1 , inIdx + k , outIdx + i , padValue ) ;
else if ( paddings . sizeAt ( 0 ) > dim + 1 ) {
leftOffset = paddings . e < int > ( dim + 1 , 0 ) ;
// shift buffers pointers to actual element position
if ( output . rankOf ( ) > 1 ) {
subArrOut . setBuffer ( reinterpret_cast < T * > ( output . getBuffer ( ) ) + tadOut . tadOffsets [ outIdx + i ] ) ;
subArrIn . setBuffer ( reinterpret_cast < T * > ( input . getBuffer ( ) ) + tadIn . tadOffsets [ inIdx + i - paddings . e < int > ( dim , 0 ) ] ) ;
}
else {
subArrOut . p ( i , subArrIn . e < T > ( i - leftOffset ) ) ;
}
// most inner loop, corresponds to last dim = rank-1
switch ( mode ) {
case 0 : // CONSTANT mode
for ( int j = 0 ; j < subArrOut . lengthOf ( ) ; + + j )
if ( j < leftOffset | | j > = ( subArrIn . lengthOf ( ) + leftOffset ) ) // firstly fill with zeros outer ranges
subArrOut . p ( j , ( T ) 0.f ) ;
else
subArrOut . p ( j , subArrIn . e < T > ( j - leftOffset ) ) ; // fill middle with elements of input array
break ;
case 1 : // REFLECT mode
for ( int j = 1 ; j < = leftOffset ; + + j ) // fill firstly left side
subArrOut . p ( leftOffset - j , subArrIn . e < T > ( j ) ) ;
for ( int j = 0 ; j < subArrIn . lengthOf ( ) ; + + j ) // fill middle
subArrOut . p ( leftOffset + j , subArrIn . e < T > ( j ) ) ;
for ( int j = ( subArrOut . lengthOf ( ) - leftOffset ) ; j < subArrOut . lengthOf ( ) ; + + j ) // fill right side
subArrOut . p ( j , subArrIn . e < T > ( subArrOut . lengthOf ( ) - j - 1 ) ) ;
break ;
case 2 : // SYMMETRIC mode
for ( int j = 1 ; j < = leftOffset ; + + j ) // fill firstly left side
subArrOut . p ( leftOffset - j , subArrIn . e < T > ( j - 1 ) ) ;
for ( int j = 0 ; j < subArrIn . lengthOf ( ) ; + + j ) // fill middle
subArrOut . p ( leftOffset + j , subArrIn . e < T > ( j ) ) ;
for ( int j = ( subArrOut . lengthOf ( ) - leftOffset ) ; j < subArrOut . lengthOf ( ) ; + + j ) // fill right side
subArrOut . p ( j , subArrIn . e < T > ( subArrOut . lengthOf ( ) - j ) ) ;
break ;
}
}
else {
if ( mode = = 0 & & input . rankOf ( ) < 2 )
subArrOut . p ( i , subArrIn . e < T > ( i - leftOffset ) ) ; // fill middle with elements of input array
}
}
// populate sub-array formed previously
leftOffset = paddings . e < int > ( dim , 0 ) ;
switch ( mode ) {
case 0 : // CONSTANT mode
for ( int j = 1 ; j < = leftOffset ; + + j ) {
// fill left side with padValue
if ( output . rankOf ( ) > 1 ) {
subArrOut . setBuffer (
reinterpret_cast < T * > ( output . getBuffer ( ) ) + tadOut . tadOffsets [ outIdx + leftOffset - j ] ) ;
subArrOut . assign ( padValue ) ;
}
else {
subArrOut . p ( j - 1 , padValue ) ;
}
}
// output.printIndexedBuffer("Output at");
for ( int j = ( output . sizeAt ( dim ) - leftOffset ) ; j < output . sizeAt ( dim ) ; + + j ) { // fill left side with zeros
if ( output . rankOf ( ) > 1 ) {
subArrOut . setBuffer ( reinterpret_cast < T * > ( output . getBuffer ( ) ) + tadOut . tadOffsets [ outIdx + j ] ) ;
subArrOut . assign ( padValue ) ;
}
else {
subArrOut . p ( j , padValue ) ;
}
}
break ;
case 1 : // REFLECT mode
for ( int j = 1 ; j < = leftOffset ; + + j ) { // fill left side
subArr . setBuffer ( reinterpret_cast < T * > ( output . getBuffer ( ) ) + tadOut . tadOffsets [ outIdx + leftOffset + j ] ) ;
subArrOut . setBuffer ( reinterpret_cast < T * > ( output . getBuffer ( ) ) + tadOut . tadOffsets [ outIdx + leftOffset - j ] ) ;
subArrOut . assign ( & subArr ) ;
}
for ( int j = ( output . sizeAt ( dim ) - leftOffset ) ; j < output . sizeAt ( dim ) ; + + j ) { // fill right side
subArr . setBuffer ( reinterpret_cast < T * > ( output . getBuffer ( ) ) + tadOut . tadOffsets [ outIdx + output . sizeAt ( dim ) + leftOffset - 1 - j ] ) ;
subArrOut . setBuffer ( reinterpret_cast < T * > ( output . getBuffer ( ) ) + tadOut . tadOffsets [ outIdx + j ] ) ;
subArrOut . assign ( & subArr ) ;
}
break ;
case 2 : // SYMMETRIC mode
for ( int j = 1 ; j < = leftOffset ; + + j ) { // fill left side
subArr . setBuffer ( reinterpret_cast < T * > ( output . getBuffer ( ) ) + tadOut . tadOffsets [ outIdx + leftOffset + j - 1 ] ) ;
subArrOut . setBuffer ( reinterpret_cast < T * > ( output . getBuffer ( ) ) + tadOut . tadOffsets [ outIdx + leftOffset - j ] ) ;
subArrOut . assign ( & subArr ) ;
}
for ( int j = ( output . sizeAt ( dim ) - leftOffset ) ; j < output . sizeAt ( dim ) ; + + j ) { // fill right side
subArr . setBuffer ( reinterpret_cast < T * > ( output . getBuffer ( ) ) + tadOut . tadOffsets [ outIdx + output . sizeAt ( dim ) + leftOffset - j ] ) ;
subArrOut . setBuffer ( reinterpret_cast < T * > ( output . getBuffer ( ) ) + tadOut . tadOffsets [ outIdx + j ] ) ;
subArrOut . assign ( & subArr ) ;
}
break ;
}
}
*/
/*
void recursiveLoopForPad ( const int mode , NDArray & input , const NDArray & paddings , NDArray & output , std : : vector < int > dimensions , int dim , int inIdx , int outIdx , NDArray & padValue ) {
BUILD_SINGLE_SELECTOR ( input . dataType ( ) , recursiveLoopForPad_ , ( mode , input , paddings , output , dimensions , dim , inIdx , outIdx , padValue ) , LIBND4J_TYPES ) ;
}
BUILD_SINGLE_TEMPLATE ( template void recursiveLoopForPad_ , ( const int mode , NDArray & input , const NDArray & paddings , NDArray & output , std : : vector < int > dimensions , int dim , int inIdx , int outIdx , NDArray & padValue ) , LIBND4J_TYPES ) ;
*/
////////////////////////////////////////////////////////////////////////
void invertPermutation ( nd4j : : LaunchContext * context , const NDArray & input , NDArray & output ) {
std : : set < int > uniqueElems ;
const int length = input . lengthOf ( ) ;
for ( int i = 0 ; i < length ; + + i ) {
int elem = input . e < int > ( i ) ;
if ( ! uniqueElems . insert ( elem ) . second ) // this operation forbids us to use #pragma omp
throw std : : runtime_error ( " helpers::invertPermutation function: input array contains duplicates ! " ) ;
if ( elem < 0 | | elem > length - 1 )
throw std : : runtime_error ( " helpers::invertPermutation function: element of input array is out of range (0, length-1) ! " ) ;
output . p < int > ( elem , i ) ;
}
}
////////////////////////////////////////////////////////////////////////
2019-08-02 19:01:03 +02:00
template < typename X , typename Y >
2019-06-06 14:21:15 +02:00
static void gatherND_ ( NDArray & input , NDArray & indices , NDArray & output ) {
2019-08-02 19:01:03 +02:00
const X * x = reinterpret_cast < X * > ( input . getBuffer ( ) ) ;
const Y * y = reinterpret_cast < Y * > ( indices . getBuffer ( ) ) ;
X * z = reinterpret_cast < X * > ( output . getBuffer ( ) ) ;
2019-06-06 14:21:15 +02:00
2019-08-02 19:01:03 +02:00
const int xRank = input . rankOf ( ) ;
const int yRank = indices . rankOf ( ) ;
const int zRank = output . rankOf ( ) ;
const int maxRank = nd4j : : math : : nd4j_max < int > ( yRank , nd4j : : math : : nd4j_max < int > ( xRank , zRank ) ) ;
2019-06-06 14:21:15 +02:00
2019-08-02 19:01:03 +02:00
const Nd4jLong zLen = output . lengthOf ( ) ;
2019-06-06 14:21:15 +02:00
2019-08-02 19:01:03 +02:00
const int yLastDim = indices . sizeAt ( - 1 ) ;
2019-06-06 14:21:15 +02:00
2019-08-02 19:01:03 +02:00
std : : vector < Nd4jLong > coords ( maxRank ) ;
2019-06-06 14:21:15 +02:00
2019-08-02 19:01:03 +02:00
PRAGMA_OMP_PARALLEL_FOR_ARGS ( if ( zLen > Environment : : getInstance ( ) - > elementwiseThreshold ( ) ) firstprivate ( coords ) )
for ( Nd4jLong i = 0 ; i < zLen ; + + i ) {
2019-06-06 14:21:15 +02:00
2019-08-02 19:01:03 +02:00
Nd4jLong * zCoordStart , * xCoordStart ;
2019-06-06 14:21:15 +02:00
2019-08-02 19:01:03 +02:00
if ( yLastDim = = xRank ) {
zCoordStart = coords . data ( ) ;
xCoordStart = coords . data ( ) ;
}
else if ( zRank > = xRank ) {
zCoordStart = coords . data ( ) ;
xCoordStart = coords . data ( ) + zRank - xRank ;
}
else {
zCoordStart = coords . data ( ) + xRank - zRank ;
xCoordStart = coords . data ( ) ;
}
2019-06-06 14:21:15 +02:00
2019-08-02 19:01:03 +02:00
shape : : index2coords ( zRank , output . shapeOf ( ) , i , zLen , zCoordStart ) ;
2019-06-06 14:21:15 +02:00
2019-08-02 19:01:03 +02:00
const auto zOffset = shape : : getOffset ( 0 , output . shapeOf ( ) , output . stridesOf ( ) , zCoordStart , zRank ) ;
2019-06-06 14:21:15 +02:00
2019-08-02 19:01:03 +02:00
// last y coordinate
uint coordToRestore ;
if ( yLastDim ! = xRank )
coordToRestore = static_cast < uint > ( zCoordStart [ yRank - 1 ] ) ;
2019-06-06 14:21:15 +02:00
2019-08-02 19:01:03 +02:00
zCoordStart [ yRank - 1 ] = 0 ;
const auto yOffset = shape : : getOffset ( 0 , indices . shapeOf ( ) , indices . stridesOf ( ) , zCoordStart , yRank ) ;
2019-06-06 14:21:15 +02:00
2019-08-02 19:01:03 +02:00
//restore z coordinate
if ( yLastDim ! = xRank )
zCoordStart [ yRank - 1 ] = coordToRestore ;
2019-06-06 14:21:15 +02:00
2019-08-02 19:01:03 +02:00
// construct coordinates for x
for ( uint j = 0 ; j < yLastDim ; + + j )
xCoordStart [ j ] = y [ yOffset + j * indices . stridesOf ( ) [ yRank - 1 ] ] ; // last stride
2019-06-06 14:21:15 +02:00
2019-08-02 19:01:03 +02:00
const auto xOffset = shape : : getOffset ( 0 , input . shapeOf ( ) , input . stridesOf ( ) , xCoordStart , xRank ) ;
z [ zOffset ] = x [ xOffset ] ;
2019-06-06 14:21:15 +02:00
}
2019-08-02 19:01:03 +02:00
}
2019-06-06 14:21:15 +02:00
2019-08-02 19:01:03 +02:00
////////////////////////////////////////////////////////////////////////
void gatherND ( nd4j : : LaunchContext * context , NDArray & input , NDArray & indices , NDArray & output ) {
BUILD_DOUBLE_SELECTOR ( input . dataType ( ) , indices . dataType ( ) , gatherND_ , ( input , indices , output ) , LIBND4J_TYPES , INTEGER_TYPES ) ;
}
BUILD_DOUBLE_TEMPLATE ( template void gatherND_ , ( NDArray & input , NDArray & indices , NDArray & output ) , LIBND4J_TYPES , INTEGER_TYPES ) ;
2019-06-06 14:21:15 +02:00
////////////////////////////////////////////////////////////////////////
template < typename T >
static void gather_ ( NDArray * input , const NDArray * indices , NDArray * output , const std : : vector < int > & intArgs ) {
int axis = intArgs . size ( ) > 0 ? intArgs [ 0 ] : 0 ;
const int inputRank = input - > rankOf ( ) ;
if ( axis < 0 )
axis + = inputRank ;
const int numOfIntArgs = intArgs . size ( ) ;
if ( indices ! = nullptr ) {
for ( int i = 0 ; i < indices - > lengthOf ( ) ; + + i )
if ( indices - > e < Nd4jLong > ( i ) > = input - > sizeAt ( axis ) )
throw std : : runtime_error ( " helpers::gather function: indices array contains wrong elements, each element must be smaller than corresponding dimension of input array ! " ) ;
// first case: indices consist of only one scalar
if ( indices - > isScalar ( ) ) {
if ( input - > rankOf ( ) < = 1 ) {
//For scalar indices, rank 0 or 1 input: can't do tensor along dimension 0 as this is whole array... instead, we want to get a scalar
auto idx = indices - > e < Nd4jLong > ( 0 ) ;
auto scalarNDArray = input - > e ( idx ) ;
output - > assign ( scalarNDArray ) ;
} else {
auto dimensions = ShapeUtils : : evalDimsToExclude ( input - > rankOf ( ) , { axis } ) ;
auto tadPack = nd4j : : ConstantTadHelper : : getInstance ( ) - > tadForDimensions ( input - > getShapeInfo ( ) , dimensions ) ;
auto tadArr = NDArray ( reinterpret_cast < void * > ( reinterpret_cast < T * > ( input - > getBuffer ( ) ) + tadPack . primaryOffsets ( ) [ indices - > e < Nd4jLong > ( 0 ) ] ) , tadPack . primaryShapeInfo ( ) , output - > getContext ( ) ) ;
output - > assign ( & tadArr ) ;
}
}
else if ( input - > rankOf ( ) = = 1 & & indices - > isVector ( ) ) {
// special case
PRAGMA_OMP_PARALLEL_FOR_IF ( indices - > lengthOf ( ) > Environment : : getInstance ( ) - > tadThreshold ( ) )
for ( int e = 0 ; e < indices - > lengthOf ( ) ; e + + )
output - > p ( e , input - > e < T > ( indices - > e < Nd4jLong > ( e ) ) ) ;
}
else {
std : : vector < int > dimsOut ( indices - > rankOf ( ) ) ;
std : : iota ( dimsOut . begin ( ) , dimsOut . end ( ) , axis ) ; // fill with axis, axis+1, ... indices->rankOf()-1
const Nd4jLong numOfSubArrs = ShapeUtils : : getNumOfSubArrs ( output - > getShapeInfo ( ) , dimsOut ) ;
PRAGMA_OMP_PARALLEL_FOR_IF ( numOfSubArrs > Environment : : getInstance ( ) - > tadThreshold ( ) )
for ( int i = 0 ; i < numOfSubArrs ; + + i ) {
NDArray subArrOut = ( * output ) ( i , dimsOut ) ;
NDArray subArrIn = ( * input ) ( indices - > e < Nd4jLong > ( i ) , { axis } ) ;
subArrOut . assign ( subArrIn ) ;
}
}
}
else {
for ( int i = 1 ; i < numOfIntArgs ; + + i )
if ( intArgs [ i ] > = input - > sizeAt ( axis ) )
throw std : : runtime_error ( " helpers::gather function: some of input indexes is larger than corresponding shape of input array ! " ) ;
// we only allow scalar/vector case here
if ( numOfIntArgs = = 2 ) { // scalar case
output - > assign ( ( * input ) ( intArgs [ 1 ] , { axis } ) ) ;
}
else { // vector case
const Nd4jLong numOfSubArrs = ShapeUtils : : getNumOfSubArrs ( output - > getShapeInfo ( ) , { axis } ) ;
PRAGMA_OMP_PARALLEL_FOR_IF ( numOfSubArrs > Environment : : getInstance ( ) - > tadThreshold ( ) )
for ( int i = 0 ; i < numOfSubArrs ; + + i ) {
NDArray subArrOut = ( * output ) ( i , { axis } ) ;
NDArray subArrIn = ( * input ) ( intArgs [ i + 1 ] , { axis } ) ;
subArrOut . assign ( subArrIn ) ;
}
}
}
}
void gather ( NDArray * input , const NDArray * indices , NDArray * output , const std : : vector < int > & intArgs ) {
BUILD_SINGLE_SELECTOR ( input - > dataType ( ) , gather_ , ( input , indices , output , intArgs ) , LIBND4J_TYPES ) ;
}
BUILD_SINGLE_TEMPLATE ( template void gather_ , ( NDArray * input , const NDArray * indices , NDArray * output , const std : : vector < int > & intArgs ) , LIBND4J_TYPES ) ;
//////////////////////////////////////////////////////////////////////////
void eye ( nd4j : : LaunchContext * context , NDArray & output ) {
const int rank = output . rankOf ( ) ;
auto arrs = output . allTensorsAlongDimension ( { rank - 2 , rank - 1 } ) ;
PRAGMA_OMP_PARALLEL_FOR_IF ( arrs - > size ( ) > Environment : : getInstance ( ) - > tadThreshold ( ) )
for ( int i = 0 ; i < arrs - > size ( ) ; + + i )
arrs - > at ( i ) - > setIdentity ( ) ;
delete arrs ;
}
//////////////////////////////////////////////////////////////////////////
void scatterUpdate ( nd4j : : LaunchContext * context , NDArray & input , NDArray & updates , const std : : vector < int > * intArgs ) {
int opCode = ( * intArgs ) [ 0 ] ;
int dimSize = ( * intArgs ) [ 1 ] ;
Nd4jLong e ;
Nd4jLong limg = 2 + dimSize ;
std : : vector < int > tadDimensions ( dimSize ) ;
for ( e = 2 ; e < limg ; e + + )
tadDimensions [ e - 2 ] = ( * intArgs ) [ e ] ;
std : : vector < int > dimsToExclude = ShapeUtils : : evalDimsToExclude ( input . rankOf ( ) , tadDimensions ) ;
// increasing counter to skip numIndices
e + + ;
std : : vector < int > indices ;
for ( ; e < intArgs - > size ( ) ; e + + )
indices . push_back ( ( * intArgs ) [ e ] ) ;
PRAGMA_OMP_PARALLEL_FOR
for ( Nd4jLong i = 0 ; i < indices . size ( ) ; + + i ) {
auto inSubArr = input ( indices [ i ] , dimsToExclude , true ) ;
auto updSubArr = updates ( i , dimsToExclude , true ) ;
if ( inSubArr . lengthOf ( ) ! = updSubArr . lengthOf ( ) )
continue ;
switch ( opCode ) {
case 0 :
inSubArr . applyPairwiseTransform ( pairwise : : Add , & updSubArr , & inSubArr , nullptr ) ;
break ;
case 1 :
inSubArr . applyPairwiseTransform ( pairwise : : Subtract , & updSubArr , & inSubArr , nullptr ) ;
break ;
case 2 :
inSubArr . applyPairwiseTransform ( pairwise : : Multiply , & updSubArr , & inSubArr , nullptr ) ;
break ;
case 3 :
inSubArr . applyPairwiseTransform ( pairwise : : Divide , & updSubArr , & inSubArr , nullptr ) ;
break ;
case 4 :
inSubArr . applyPairwiseTransform ( pairwise : : ReverseSubtract , & updSubArr , & inSubArr , nullptr ) ;
break ;
case 5 :
inSubArr . applyPairwiseTransform ( pairwise : : ReverseDivide , & updSubArr , & inSubArr , nullptr ) ;
break ;
case 6 :
inSubArr . applyPairwiseTransform ( pairwise : : CopyPws , & updSubArr , & inSubArr , nullptr ) ;
break ;
default :
continue ;
}
}
}
//////////////////////////////////////////////////////////////////////////
Merge master to upstream (#7945)
* Shugeo strided slice zeros (#14)
* Modified strided_slice op to properly work with empty-like shapes.
* Fixed test for reduce_mean with empty-like input.
* [WIP] Last merge (#15)
* correct logsoftmax looss (#2)
* Small SameDiff listener fix (#4)
* Various fixes (#6)
* #7839 Fix for asXMatrix and tests
* #7866 EmbeddingSequenceLayer dtype fix + test
* #7856 SameDiff save/load stream methods
* #7859 RegressionEvaluation rank 4 fix + tests + axis configuration
* EvaluationBinary 3d/4d
* More evaluation 3d/4d tests
* #7847 Evaluation empty checks
* Small test ifx
* #7848 Fix median edge case
* Improve DL4J samediff layer tests
* [WIP] FastText wrapper implemented (#8)
* FastText implemented
* Some fixes
* Fix shapes for wordsNearest
* Validation of input vectors
* Fixes
* Fixed test
* Thread tagged
* Some tweaks
* setContextClassLoader for DeallocatorServiceThread
* Numpy format tests (#1)
* Various fixes (#11)
* #7852 SameDiff gather fix
* #7892 SameDiff placeholder to constant conversion
* #7890 validate input rank for MLN/CG init methods
* Fix broken permute shape calculation
* Permute and gather fixes
* Tests
* #7850 LogSumExp fix + test
* Handful of test fixes
* Empty arrays with non-scalar shapes (#10)
* minor rearrangements for lambdas
* empty tensors with non-scalar shapes
* numpy empty tensors with non-scalar shapes
* few more empty tweaks
* Small fixes
* conv3d signature update
* micro fix in batchnorm mkldnn
* Import fixes
* Fix
* MKL-DNN update
* Small fill fix
* fill with empty input + test
* Fixes
* Small error improvement
* Fix
* one special test
* couple of fixes for lstm
* Rewrite TFGraphMapper.getNDArrayFromTensor to be maintainable and less error prone
* Fixes
* FP16
* Unsigned
* BFloat16
* Fill op - empty tweaks
* - couple of fixes for empty arrays construction
- stack updated
* strided slice fix
* one transform test
* provide method for reducing shapeInfo in case of input array is empty
* Fixed reduceAlongDimensions to use empty input properly.
* couple of broadcast tests
* couple of tests broadcast tests + tweak to make them pass
* add check of non-empty to methods producing sub-arrays
* Fixed reshapeC with zeros in shape.
* complete empty check in reduce_... legacy ops
* Concat and cumsum/prod
* Tweak to empty shape inference on import
* add empty check to the rest of reduce legacy ops
* one more test
* correct typo in evalReduceShapeInfoEmpty
* Added tests for reduce_* ops to tests with zero shapes.
* few more tests for empty reductions
* Fixed strided_slice op with empty case and tests.
* one more empty reduction test
* Fixed strided_slice test.
* add empty check to NDArray::reshapei
* infOrMax
* empty min/max with infinity tests
* made unstack working correctly with empty arrays
* few IndexReduce tests + tweaks for empty shapes
* add test for empty concat
* few tests fixed
* Validation fix for reductions on empty shapes
* Reverse fix
* Reduction shape calc fixes
* SameDiff.generateOutputVariable: don't use shape function to determine number of outputs
* Range fix
* - NDArray constructor updated for scalars/empty arrays
- few tests fixed
* More fixes
* Empty creator fixes
* concat fix
* concat fix
* TF import tests: allow 'both all NaN' and 'both all inf' to pass
* Slice, zero fraction, and reshape fixes
* transpose, gather
* Zero fraction
* scalar cast fix
* Empty reduction axis support
* few more tests fixed
* Fixed input checks conforming with TF for concat op and tests.
* few tests fixed
* matmul scalar shape fix
* Fixed checkout for data type and scalarity with concat to allow non-empty scalars with vector concats.
* broadcast bool fix
* few more tests
* few more tests
* correct evalReduceShapeInfoEmpty
* argmax/argmin + tests
* one more empty edge case + one more test
* argmax/argmin/realdiv_bp tweaks
* empty reshape test + fix
* Helper fixes
* Small fixes
* Gather test fix
* Gather test fix
* Small fixes
* reduce scalar zero values
* scalar mean workaround
* Remove debug code
* along dim mean workaround
* one more test
* - equalsTo() tweak for empty arrays
- one more test
* broadcast tweaks
* [WIP] Fixing outstanding issues for NLP (#9)
* Avoid using not-inited objects
* Test fixed.
* Redundant method avoided for models like FastText
* KMeans++ implementation
* KMeans++ implementation
* Disable parallel execution
* KMeans++
* Tests
* Dev branch merge (#16)
* SameDiff: convertDataType and gradient check util improvements (#12)
* GradCheck util improvements
* StopGradient constructor + test
* SameDiff: Add datatype conversion
* Javadoc and add DataType.isNumerical()
* Small fix
* Fix SameDiff TF import test cases intermediate naming (workaround for bad default)
* TFGraphTestAllHelper: check intermediates in execution order
* Add missing debug listener
* [WIP] lstmBlock fix + other changes (#13)
- fixes lstmBlock issue
- changes NDArray method reshape(), permute(), transpose() by making them return instance instead of pointer
- CheckNumerics op
- fixes for ReduceBool IsInfOrNan & IsFinite
* Small test fix
* CheckNumerics op wrapper
* Fix some issues on master (#17)
* Fix DataVec test issue
* Fix issue with dl4j SameDiff output layer
* Dtype fix for lambda layers
* #7912 BertIterator dtype fix (use float32 not global default)
* [WIP] Next set of CUDA stuff (#7)
New CUDA implementations and improvements
* bad file
* Dev branch master merge (#23)
* SameDiff: convertDataType and gradient check util improvements (#12)
* GradCheck util improvements
* StopGradient constructor + test
* SameDiff: Add datatype conversion
* Javadoc and add DataType.isNumerical()
* Small fix
* Fix SameDiff TF import test cases intermediate naming (workaround for bad default)
* TFGraphTestAllHelper: check intermediates in execution order
* Add missing debug listener
* [WIP] lstmBlock fix + other changes (#13)
- fixes lstmBlock issue
- changes NDArray method reshape(), permute(), transpose() by making them return instance instead of pointer
- CheckNumerics op
- fixes for ReduceBool IsInfOrNan & IsFinite
* Small test fix
* CheckNumerics op wrapper
* Compatibility of deserialization (#18)
Signed-off-by: Alexander Stoyakin <alexander.stoyakin@gmail.com>
* SameDiff: add activation gradient checking support for debugging (#19)
* SameDiff gradient checker: first pass on activation gradient checks
* Fixes + tests for activation gradient checking
* Javadoc
* [WIP] Some nd4j data type corrections (#20)
* Adjust data type
* Set correct Data type.
* Size of proper data type.
* fix averaged cpu load (#22)
* SameDiff ops, TF import and fixes (#24)
* CheckNumerics tests + fixes + misc fixes
Signed-off-by: AlexDBlack <blacka101@gmail.com>
* Fake quant
Signed-off-by: AlexDBlack <blacka101@gmail.com>
* Fixes
Signed-off-by: AlexDBlack <blacka101@gmail.com>
* FakeQuantWithMinMaxArgs
Signed-off-by: AlexDBlack <blacka101@gmail.com>
* CheckNumerics fix
Signed-off-by: AlexDBlack <blacka101@gmail.com>
* Fix libnd4j ALL_INTS and ALL_FLOATS declaration (uint and bfloat types)
Signed-off-by: AlexDBlack <blacka101@gmail.com>
* Small fix
Signed-off-by: AlexDBlack <blacka101@gmail.com>
* Javadoc
Signed-off-by: AlexDBlack <blacka101@gmail.com>
* Exception tweak
Signed-off-by: AlexDBlack <blacka101@gmail.com>
* fix
Signed-off-by: AlexDBlack <blacka101@gmail.com>
* Fix for out of scope stack allocated var use
Signed-off-by: AlexDBlack <blacka101@gmail.com>
* Ignores
Signed-off-by: AlexDBlack <blacka101@gmail.com>
* Ignore for known failing test (already logged issue)
Signed-off-by: AlexDBlack <blacka101@gmail.com>
* Merge upstream to fork (#25)
* Add thousand-separator commas to TotalParams (#7915)
* Add thousand-separator commas to TotalParams
The number of parameters can be quite large, and it would help the reading of the summary printout to have the TotalParams column & values at the bottom have thousand-separator-commas in them.
* Add thousand-separator commas to MultiLayerNetwork
Corresponding change to MultiLayerNetwork
Signed-off-by: Jxtps Jxtps <jxtps435@gmail.com>
* Update contributing and issue/PR templates (#7934)
Signed-off-by: AlexDBlack <blacka101@gmail.com>
* Fix link to AdaDelta paper (#7942)
Fix link to AdaDelta paper hosted on matthewzeiler.com
Signed-off-by: Jxtps
* Fixes, and ignores for known/logged failing issues (#7943)
Signed-off-by: AlexDBlack <blacka101@gmail.com>
* SameDiff + DL4J/SameDiff: Multiple fixes (#28)
* #7919 HDF5 attribute buffer length fix
Signed-off-by: AlexDBlack <blacka101@gmail.com>
* #7909 Arbiter constructor exception ux improvements
Signed-off-by: AlexDBlack <blacka101@gmail.com>
* #7925 RNN output layer length checks
Signed-off-by: AlexDBlack <blacka101@gmail.com>
* #7939 Add listener for validating inputs are not incorrectly modified
Signed-off-by: AlexDBlack <blacka101@gmail.com>
* #7939 Integrate NonInplaceValidationListener into tests
* #7844 DL4J SameDiff fixes for variable minibatch size
* DL4J SameDiff fixes - ensure gradient for input placeholder is available
Signed-off-by: AlexDBlack <blacka101@gmail.com>
* Tweaks to ExternalErrorsFunction - use placeholders, make more robust
* Another fix
* More fixes
* More SameDiff/DL4J fixes
* Scope out scalar array creation in BaseScalarOp
* Remove debug code
Signed-off-by: AlexDBlack <blacka101@gmail.com>
* [WIP] Final dev branch merge (#29)
* SameDiff: convertDataType and gradient check util improvements (#12)
* GradCheck util improvements
* StopGradient constructor + test
* SameDiff: Add datatype conversion
* Javadoc and add DataType.isNumerical()
* Small fix
* Fix SameDiff TF import test cases intermediate naming (workaround for bad default)
* TFGraphTestAllHelper: check intermediates in execution order
* Add missing debug listener
* [WIP] lstmBlock fix + other changes (#13)
- fixes lstmBlock issue
- changes NDArray method reshape(), permute(), transpose() by making them return instance instead of pointer
- CheckNumerics op
- fixes for ReduceBool IsInfOrNan & IsFinite
* Small test fix
* CheckNumerics op wrapper
* Compatibility of deserialization (#18)
Signed-off-by: Alexander Stoyakin <alexander.stoyakin@gmail.com>
* SameDiff: add activation gradient checking support for debugging (#19)
* SameDiff gradient checker: first pass on activation gradient checks
* Fixes + tests for activation gradient checking
* Javadoc
* [WIP] Some nd4j data type corrections (#20)
* Adjust data type
* Set correct Data type.
* Size of proper data type.
* fix averaged cpu load (#22)
* [WIP] Multiple dataset iterators (#27)
* Splitting dataset into arbitrary number
* Fixes
* Multiple split of iterator
* Test
* Test
* Some fixes
* signature change
* one more tweak
Signed-off-by: raver119 <raver119@gmail.com>
* one more test for sequential use of DataSetIteratorSplitter
Signed-off-by: raver119 <raver119@gmail.com>
* Fixes
* Fixes
* one more test for Alexander
Signed-off-by: raver119 <raver119@gmail.com>
* Some fixes
* Some fixes
* one more test for Alexander
Signed-off-by: raver119 <raver119@gmail.com>
* minor test fix
Signed-off-by: raver119 <raver119@gmail.com>
* Some fixes
* Some fixes
* couple of assertions tweaked
Signed-off-by: raver119 <raver119@gmail.com>
* MDS splitter test :/
Signed-off-by: raver119 <raver119@gmail.com>
* Minor refactoring
* Multi dataset
* Some fixes
* More tests
* Small number of test fixes/improvements (failures on CI) (#31)
Signed-off-by: AlexDBlack <blacka101@gmail.com>
* [WIP] More CUDA stuff (#26)
* initial commit
Signed-off-by: raver119 <raver119@gmail.com>
* LRN BP CUDA
Signed-off-by: raver119 <raver119@gmail.com>
* less memory
Signed-off-by: raver119 <raver119@gmail.com>
* Fixed bug with crop_and_resize op helper.
* get rid of unnecessary index-calculation dunction
Signed-off-by: Yurii <yurii@skymind.io>
* Fixed sort with nth_element cuda-based helper.
* Refactored nth_element.
* Refactored nth_element op and tests.
* Modified usage of dim array with sortTad routine.
* Refactored main routine of helper for non_max_image_suppression op.
* non_max_image_suppression op helper with cuda kernel implementation. Initial revision.
* fix vol2col cuda kernel
* meh
Signed-off-by: raver119 <raver119@gmail.com>
* topK concept
Signed-off-by: raver119 <raver119@gmail.com>
* unsorted topK with scanWitdh of 1
Signed-off-by: raver119 <raver119@gmail.com>
* correct vol2col tests
* sorted/unsorted topK
Signed-off-by: raver119 <raver119@gmail.com>
* implementation and fixing col2im/col2vol
* Corrected usage flags with input/output with reverse op.
* dup is const now
Signed-off-by: raver119 <raver119@gmail.com>
* percentile op
Signed-off-by: raver119 <raver119@gmail.com>
* group tests for mapool2d
Signed-off-by: Yurii <yurii@skymind.io>
* special test for george
Signed-off-by: raver119 <raver119@gmail.com>
* less threads for sortTad
Signed-off-by: raver119 <raver119@gmail.com>
* provide conv2d for cuda
Signed-off-by: Yurii <yurii@skymind.io>
* remove auther in sort tad kernel code
Signed-off-by: Yurii <yurii@skymind.io>
* provide depthwise_conv2d for cuda
Signed-off-by: Yurii <yurii@skymind.io>
* - max_pooling_with_argmax
- null check for special use
Signed-off-by: raver119 <raver119@gmail.com>
* dts cuda
Signed-off-by: raver119 <raver119@gmail.com>
* provide sconv2d for cuda
Signed-off-by: Yurii <yurii@skymind.io>
* std cuda
Signed-off-by: raver119 <raver119@gmail.com>
* Refactored non_max_suppression op to conform TF implementation.
* Improved suppression helper.
* provide pooling3d for cuda
Signed-off-by: Yurii <yurii@skymind.io>
* minor lstm rearrangements
Signed-off-by: raver119 <raver119@gmail.com>
* more of minor lstm rearrangements
Signed-off-by: raver119 <raver119@gmail.com>
* (bi)dynamic_rnn
Signed-off-by: raver119 <raver119@gmail.com>
* templates init order
Signed-off-by: raver119 <raver119@gmail.com>
* Refactored non_max_suppression op.
* Added cuda kernel for non_max_suppression.
* CPU sort by key/value
Signed-off-by: raver119 <raver119@gmail.com>
* CPU sort TAD by key/value
Signed-off-by: raver119 <raver119@gmail.com>
* CPU sort TAD by key/value tests
Signed-off-by: raver119 <raver119@gmail.com>
* Eliminate compiler error with cuda implementation.
* - repaired gradCheck in cuda
- provide conv2d_bp for cuda
Signed-off-by: Yurii <yurii@skymind.io>
* missed signature
Signed-off-by: raver119 <raver119@gmail.com>
* provide depthwise_conv2d_bp for cuda
Signed-off-by: Yurii <yurii@skymind.io>
* Implementation of lup helper with cuda kernel. Initial commit.
* further work on backprops for convolutions
Signed-off-by: Yurii <yurii@skymind.io>
* CUDA linear sort by key/val
Signed-off-by: raver119 <raver119@gmail.com>
* CUDA tad sort by key/val
Signed-off-by: raver119 <raver119@gmail.com>
* start providing of backprop for pooling2d/3d
Signed-off-by: Yurii <yurii@skymind.io>
* Added atomicAdd for bool datatype.
* dynamic partition concept
Signed-off-by: raver119 <raver119@gmail.com>
* dynamic partition concept
Signed-off-by: raver119 <raver119@gmail.com>
* dynamic partition scalar CUDA
Signed-off-by: raver119 <raver119@gmail.com>
* important comment
Signed-off-by: raver119 <raver119@gmail.com>
* fix pooling2d/3d backprop helpers
Signed-off-by: Yurii <yurii@skymind.io>
* Added non-linear test with dynamic_partition.
* Improved test for dynamic_partition.
* dynamic_partition TAD concept
Signed-off-by: raver119 <raver119@gmail.com>
* - dynamic_partition TAD CUDA impl
- dynamic_partition TAD CPU fix
Signed-off-by: raver119 <raver119@gmail.com>
* - rewrite cpu code for usampling2d/3d
- write cuda code for usampling2d/3d
Signed-off-by: Yurii <yurii@skymind.io>
* dynamic_stitch CUDA vector case
Signed-off-by: raver119 <raver119@gmail.com>
* dynamic_stitch CUDA TAD case concept
Signed-off-by: raver119 <raver119@gmail.com>
* dynamic_stitch CUDA TAD case impl
Signed-off-by: raver119 <raver119@gmail.com>
* Added tests for dynamic_stitch 3D-4D cases.
* minor tests tweaks
Signed-off-by: raver119 <raver119@gmail.com>
* Fixed type check for dynamic stitch.
* min/max bp
Signed-off-by: raver119 <raver119@gmail.com>
* rewrite code for upsampling2d/3d cpu
Signed-off-by: Yurii <yurii@skymind.io>
* reduce min/max/norm_max bp
Signed-off-by: raver119 <raver119@gmail.com>
* lup implementation. Additional enhancements.
* provide code for upsamling2d/3d backprop
Signed-off-by: Yurii <yurii@skymind.io>
* weightedCrossEntropyWithLogits
Signed-off-by: raver119 <raver119@gmail.com>
* Fixed template math atomicMul for 64bit ints.
* Refactored dynamic_partition_bp op.
* inverseBroadcast fix
Signed-off-by: raver119 <raver119@gmail.com>
* DynamicPartitionBP test datatype fixed.
* - nd4j_atomicMul Windows fix
- cpu/NDArrayLambda.hpp excluded from CUDA
Signed-off-by: raver119 <raver119@gmail.com>
2019-06-27 17:37:04 +02:00
void scatterSimple ( nd4j : : LaunchContext * context , const int opId , NDArray & input , const NDArray & updates , const NDArray & indices , const std : : vector < int > & dimensions ) {
2019-06-06 14:21:15 +02:00
// updates and indices have same length
const Nd4jLong len = indices . lengthOf ( ) ;
switch ( opId ) {
case 6 : { // copy
PRAGMA_OMP_PARALLEL_FOR_IF ( len > Environment : : getInstance ( ) - > elementwiseThreshold ( ) )
for ( uint i = 0 ; i < len ; + + i ) {
auto inSubArr = input ( i , dimensions ) ;
inSubArr . p ( indices . t < Nd4jLong > ( i ) , updates . e ( i ) ) ;
}
}
break ;
default :
throw std : : invalid_argument ( " helpers::scatterSimple: operation is not implemented for given id ! " ) ;
}
}
//////////////////////////////////////////////////////////////////////////
template < typename T >
static void mergeMaxIndex_ ( const std : : vector < NDArray * > & inArrs , NDArray & output ) {
const Nd4jLong numArgs = inArrs . size ( ) ;
auto x = inArrs [ 0 ] ;
PRAGMA_OMP_PARALLEL_FOR_IF ( x - > lengthOf ( ) > Environment : : getInstance ( ) - > elementwiseThreshold ( ) )
for ( Nd4jLong e = 0 ; e < x - > lengthOf ( ) ; e + + ) {
T max = - DataTypeUtils : : max < T > ( ) ;
Nd4jLong idx = 0 ;
for ( int i = 0 ; i < numArgs ; i + + ) {
T v = inArrs [ i ] - > e < T > ( e ) ;
if ( v > max ) {
max = v ;
idx = i ;
}
}
output . p ( e , idx ) ;
}
}
void mergeMaxIndex ( nd4j : : LaunchContext * context , const std : : vector < NDArray * > & inArrs , NDArray & output ) {
BUILD_SINGLE_SELECTOR ( inArrs [ 0 ] - > dataType ( ) , mergeMaxIndex_ , ( inArrs , output ) , LIBND4J_TYPES ) ;
}
BUILD_SINGLE_TEMPLATE ( template void mergeMaxIndex_ , ( const std : : vector < NDArray * > & inArrs , NDArray & output ) , LIBND4J_TYPES ) ;
//////////////////////////////////////////////////////////////////////////
template < typename T >
static void mergeMax_ ( const std : : vector < NDArray * > & inArrs , NDArray & output ) {
const Nd4jLong numArgs = inArrs . size ( ) ;
auto x = inArrs [ 0 ] ;
PRAGMA_OMP_PARALLEL_FOR_IF ( x - > lengthOf ( ) > Environment : : getInstance ( ) - > elementwiseThreshold ( ) )
for ( Nd4jLong e = 0 ; e < x - > lengthOf ( ) ; e + + ) {
T max = - DataTypeUtils : : max < T > ( ) ;
for ( int i = 0 ; i < numArgs ; i + + ) {
T v = inArrs [ i ] - > e < T > ( e ) ;
if ( v > max )
max = v ;
}
output . p ( e , max ) ;
}
}
void mergeMax ( nd4j : : LaunchContext * context , const std : : vector < NDArray * > & inArrs , NDArray & output ) {
BUILD_SINGLE_SELECTOR ( output . dataType ( ) , mergeMax_ , ( inArrs , output ) , LIBND4J_TYPES ) ;
}
BUILD_SINGLE_TEMPLATE ( template void mergeMax_ , ( const std : : vector < NDArray * > & inArrs , NDArray & output ) , LIBND4J_TYPES ) ;
//////////////////////////////////////////////////////////////////////////
template < typename T >
static void mergeAvg_ ( const std : : vector < NDArray * > & inArrs , NDArray & output ) {
const Nd4jLong numArgs = inArrs . size ( ) ;
const T factor = 1.f / numArgs ;
auto x = inArrs [ 0 ] ;
PRAGMA_OMP_PARALLEL_FOR_IF ( x - > lengthOf ( ) > Environment : : getInstance ( ) - > elementwiseThreshold ( ) )
for ( Nd4jLong e = 0 ; e < x - > lengthOf ( ) ; e + + ) {
T sum = 0. ;
for ( int i = 0 ; i < numArgs ; i + + ) {
T v = inArrs [ i ] - > e < T > ( e ) ;
sum + = v ;
}
output . p < T > ( e , sum * factor ) ;
}
}
void mergeAvg ( nd4j : : LaunchContext * context , const std : : vector < NDArray * > & inArrs , NDArray & output ) {
BUILD_SINGLE_SELECTOR ( output . dataType ( ) , mergeAvg_ , ( inArrs , output ) , LIBND4J_TYPES ) ;
}
BUILD_SINGLE_TEMPLATE ( template void mergeAvg_ , ( const std : : vector < NDArray * > & inArrs , NDArray & output ) , LIBND4J_TYPES ) ;
//////////////////////////////////////////////////////////////////////////
template < typename T >
static void mergeAdd_ ( const std : : vector < NDArray * > & inArrs , NDArray & output ) {
const Nd4jLong numArgs = inArrs . size ( ) ;
auto x = inArrs [ 0 ] ;
PRAGMA_OMP_PARALLEL_FOR_IF ( x - > lengthOf ( ) > Environment : : getInstance ( ) - > elementwiseThreshold ( ) )
for ( Nd4jLong e = 0 ; e < x - > lengthOf ( ) ; e + + ) {
T sum = ( T ) 0.f ;
for ( int i = 0 ; i < numArgs ; i + + )
sum + = inArrs [ i ] - > e < T > ( e ) ;
output . p ( e , sum ) ;
}
}
void mergeAdd ( nd4j : : LaunchContext * context , const std : : vector < NDArray * > & inArrs , NDArray & output ) {
BUILD_SINGLE_SELECTOR ( output . dataType ( ) , mergeAdd_ , ( inArrs , output ) , LIBND4J_TYPES ) ;
}
BUILD_SINGLE_TEMPLATE ( template void mergeAdd_ , ( const std : : vector < NDArray * > & inArrs , NDArray & output ) , LIBND4J_TYPES ) ;
//////////////////////////////////////////////////////////////////////////
template < typename T >
static void clipByNorm_ ( NDArray & input , NDArray & output , const std : : vector < int > & dimensions , const NDArray & clipNorm , const bool isInplace ) {
const int rank = input . rankOf ( ) ;
2019-08-02 19:01:03 +02:00
const auto norm2 = input . reduceAlongDims ( reduce : : Norm2 , dimensions ) ;
const T normActual = norm2 . e < T > ( 0 ) ;
const T normClip = clipNorm . e < T > ( 0 ) ;
2019-06-06 14:21:15 +02:00
if ( isInplace ) {
2019-08-02 19:01:03 +02:00
2019-06-06 14:21:15 +02:00
if ( norm2 . lengthOf ( ) = = 1 ) {
2019-08-02 19:01:03 +02:00
if ( normActual > normClip )
input * = ( normClip / normActual ) ;
2019-06-06 14:21:15 +02:00
}
else {
2019-08-02 19:01:03 +02:00
auto listOfInSubArrs = input . allTensorsAlongDimension ( dimensions ) ;
2019-06-06 14:21:15 +02:00
PRAGMA_OMP_PARALLEL_FOR
2019-08-02 19:01:03 +02:00
for ( Nd4jLong i = 0 ; i < listOfInSubArrs - > size ( ) ; + + i ) {
2019-06-06 14:21:15 +02:00
2019-08-02 19:01:03 +02:00
const T iNormActual = norm2 . e < T > ( i ) ;
if ( iNormActual > normClip )
* listOfInSubArrs - > at ( i ) * = normClip / iNormActual ;
2019-06-06 14:21:15 +02:00
}
2019-08-02 19:01:03 +02:00
delete listOfInSubArrs ;
2019-06-06 14:21:15 +02:00
}
}
else {
if ( norm2 . lengthOf ( ) = = 1 ) {
2019-08-02 19:01:03 +02:00
if ( normActual > normClip )
output . assign ( input * ( normClip / normActual ) ) ;
2019-06-06 14:21:15 +02:00
else
2019-08-02 19:01:03 +02:00
output . assign ( input ) ;
2019-06-06 14:21:15 +02:00
}
else {
2019-08-02 19:01:03 +02:00
auto listOfInSubArrs = input . allTensorsAlongDimension ( dimensions ) ;
auto listOfOutSubArrs = output . allTensorsAlongDimension ( dimensions ) ;
2019-06-06 14:21:15 +02:00
2019-08-02 19:01:03 +02:00
PRAGMA_OMP_PARALLEL_FOR
for ( Nd4jLong i = 0 ; i < listOfInSubArrs - > size ( ) ; + + i ) {
2019-06-06 14:21:15 +02:00
2019-08-02 19:01:03 +02:00
auto inputSubArr = listOfInSubArrs - > at ( i ) ;
auto outputSubArr = listOfOutSubArrs - > at ( i ) ;
outputSubArr - > assign ( inputSubArr ) ;
2019-06-06 14:21:15 +02:00
2019-08-02 19:01:03 +02:00
const T iNormActual = norm2 . e < T > ( i ) ;
2019-06-06 14:21:15 +02:00
2019-08-02 19:01:03 +02:00
if ( iNormActual > clipNorm . e < T > ( 0 ) )
* outputSubArr * = clipNorm / iNormActual ;
2019-06-06 14:21:15 +02:00
}
2019-08-02 19:01:03 +02:00
delete listOfInSubArrs ;
delete listOfOutSubArrs ;
2019-06-06 14:21:15 +02:00
}
}
}
2019-08-02 19:01:03 +02:00
//////////////////////////////////////////////////////////////////////////
void clipByNorm ( nd4j : : LaunchContext * context , NDArray & input , NDArray & output , const std : : vector < int > & dimensions , const NDArray & clipNorm , const bool isInplace ) {
BUILD_SINGLE_SELECTOR ( output . dataType ( ) , clipByNorm_ , ( input , output , dimensions , clipNorm , isInplace ) , FLOAT_TYPES ) ;
}
BUILD_SINGLE_TEMPLATE ( template void clipByNorm_ , ( NDArray & input , NDArray & output , const std : : vector < int > & dimensions , const NDArray & clipNorm , const bool isInplace ) , FLOAT_TYPES ) ;
2019-06-06 14:21:15 +02:00
template < typename T >
static void clipByGlobalNorm_ ( std : : vector < NDArray * > const & inputs , double clipNorm , nd4j : : memory : : Workspace * workspace , std : : vector < NDArray * > & outputs , bool isInplace ) {
NDArray globalNorm = NDArrayFactory : : create < T > ( 0 , inputs [ 0 ] - > getContext ( ) ) ; //sqrt(sum([l2norm(t)**2 for t in t_list]))
for ( auto input : inputs ) {
auto l2norm = input - > reduceNumber ( reduce : : Norm2 ) ;
globalNorm + = l2norm * l2norm ;
}
globalNorm . applyTransform ( transform : : Sqrt , nullptr , nullptr ) ; // = nd4j::math::nd4j_sqrt(globalNorm);
outputs [ inputs . size ( ) ] - > p ( 0 , globalNorm ) ;
const T factor = clipNorm / globalNorm . e < T > ( 0 ) ;
for ( size_t e = 0 ; e < inputs . size ( ) ; e + + ) {
// all-reduce
auto input = inputs [ e ] ;
auto output = outputs [ e ] ;
if ( globalNorm . e < double > ( 0 ) < = clipNorm ) {
output - > assign ( input ) ;
}
else {
auto lambda = LAMBDA_T ( _x , factor ) { return _x * factor ; } ;
input - > applyLambda < T > ( lambda , output ) ;
}
}
}
void clipByGlobalNorm ( nd4j : : LaunchContext * context , std : : vector < NDArray * > const & inputs , double clipNorm , nd4j : : memory : : Workspace * workspace , std : : vector < NDArray * > & outputs , bool isInplace ) {
BUILD_SINGLE_SELECTOR ( outputs [ 0 ] - > dataType ( ) , clipByGlobalNorm_ , ( inputs , clipNorm , workspace , outputs , isInplace ) , FLOAT_TYPES ) ;
}
BUILD_SINGLE_TEMPLATE ( template void clipByGlobalNorm_ , ( std : : vector < NDArray * > const & inputs , double clipNorm , nd4j : : memory : : Workspace * workspace , std : : vector < NDArray * > & outputs , bool isInplace ) , FLOAT_TYPES ) ;
//////////////////////////////////////////////////////////////////////////
template < typename T >
static void clipByNormBP_ ( const NDArray & input , const NDArray & gradO , NDArray & gradI /*output*/ , const std : : vector < int > & dimensions , const NDArray & clipNorm ) {
const int rank = input . rankOf ( ) ;
auto norm2 = input . reduceAlongDims ( reduce : : Norm2 , dimensions ) ;
if ( norm2 . lengthOf ( ) = = 1 ) {
const T N = norm2 . e < T > ( 0 ) ;
auto cn = clipNorm . e < T > ( 0 ) ;
if ( N > cn ) {
const T sumOfProd = ( input * gradO ) . reduceNumber ( reduce : : Sum ) . e < T > ( 0 ) ; // reduce to scalar
const T factor1 = static_cast < T > ( 1.f ) / N ;
2019-08-02 19:01:03 +02:00
const T factor3 = factor1 / ( N * N ) ; // 1 / (N*N*N)
2019-06-06 14:21:15 +02:00
auto lambda = LAMBDA_TT ( elem1 , elem2 , cn , sumOfProd , factor1 , factor3 ) {
return cn * ( factor1 * elem2 - factor3 * elem1 * sumOfProd ) ;
} ;
( const_cast < NDArray & > ( input ) ) . applyPairwiseLambda < T > ( const_cast < NDArray * > ( & gradO ) , lambda , & gradI ) ;
}
else
gradI . assign ( gradO ) ;
}
else {
2019-08-02 19:01:03 +02:00
const auto gradISubArrs = gradI . allTensorsAlongDimension ( { dimensions } ) ;
const auto gradOSubArrs = gradO . allTensorsAlongDimension ( { dimensions } ) ;
const auto inputSubArrs = input . allTensorsAlongDimension ( { dimensions } ) ;
2019-06-06 14:21:15 +02:00
2019-08-02 19:01:03 +02:00
auto cn = clipNorm . e < T > ( 0 ) ;
2019-06-06 14:21:15 +02:00
2019-08-02 19:01:03 +02:00
PRAGMA_OMP_PARALLEL_FOR
for ( Nd4jLong i = 0 ; i < gradISubArrs - > size ( ) ; + + i ) {
2019-06-06 14:21:15 +02:00
2019-08-02 19:01:03 +02:00
T N = norm2 . e < T > ( i ) ;
2019-06-06 14:21:15 +02:00
2019-08-02 19:01:03 +02:00
auto gradOSubArr = gradOSubArrs - > at ( i ) ;
auto gradISubArr = gradISubArrs - > at ( i ) ;
2019-06-06 14:21:15 +02:00
if ( N > cn ) {
2019-08-02 19:01:03 +02:00
auto inputSubArr = inputSubArrs - > at ( i ) ;
2019-06-06 14:21:15 +02:00
2019-08-02 19:01:03 +02:00
const T sumOfProd = ( * inputSubArr * * gradOSubArr ) . reduceNumber ( reduce : : Sum ) . e < T > ( 0 ) ; // reduce to scalar
2019-06-06 14:21:15 +02:00
const T factor1 = static_cast < T > ( 1.f ) / N ;
const T factor3 = factor1 / ( N * N ) ; // 1 / (N*N*N)
auto lambda = LAMBDA_TT ( elem1 , elem2 , cn , sumOfProd , factor1 , factor3 ) {
return cn * ( factor1 * elem2 - factor3 * elem1 * sumOfProd ) ;
} ;
2019-08-02 19:01:03 +02:00
inputSubArr - > applyPairwiseLambda < T > ( gradOSubArr , lambda , gradISubArr ) ;
2019-06-06 14:21:15 +02:00
}
else
2019-08-02 19:01:03 +02:00
gradISubArr - > assign ( gradOSubArr ) ;
2019-06-06 14:21:15 +02:00
}
2019-08-02 19:01:03 +02:00
delete gradISubArrs ;
delete gradOSubArrs ;
delete inputSubArrs ;
2019-06-06 14:21:15 +02:00
}
}
void clipByNormBP ( nd4j : : LaunchContext * context , const NDArray & input , const NDArray & gradO , NDArray & gradI /*output*/ , const std : : vector < int > & dimensions , const NDArray & clipNorm ) {
BUILD_SINGLE_SELECTOR ( gradI . dataType ( ) , clipByNormBP_ , ( input , gradO , gradI , dimensions , clipNorm ) , FLOAT_TYPES ) ;
}
BUILD_SINGLE_TEMPLATE ( template void clipByNormBP_ , ( const NDArray & input , const NDArray & gradO , NDArray & gradI /*output*/ , const std : : vector < int > & dimensions , const NDArray & clipNorm ) , FLOAT_TYPES ) ;
//////////////////////////////////////////////////////////////////////////
template < typename T >
static void clipByAveraged_ ( NDArray & input , NDArray & output , const std : : vector < int > & dimensions , const NDArray & clipNorm , const bool isInplace ) {
auto cn = clipNorm . e < T > ( 0 ) ;
if ( dimensions . size ( ) = = 0 ) {
// all-reduce
T n2 = input . reduceNumber ( reduce : : Norm2 ) . e < T > ( 0 ) / input . lengthOf ( ) ;
if ( n2 < = cn ) {
if ( ! isInplace )
output . assign ( input ) ;
}
else {
const T factor = cn / n2 ;
auto lambda = LAMBDA_T ( _x , factor ) { return _x * factor ; } ;
input . applyLambda < T > ( lambda , & output ) ;
}
}
else {
// along dimension
auto norm2 = input . reduceAlongDims ( reduce : : Norm2 , dimensions , false ) ;
if ( ! isInplace )
output . assign ( input ) ;
auto tads = output . allTensorsAlongDimension ( dimensions ) ;
// TODO: make this CUDA-compliant somehow
for ( int e = 0 ; e < tads - > size ( ) ; e + + ) {
T n2 = norm2 . e < T > ( e ) / tads - > at ( e ) - > lengthOf ( ) ;
const T factor = cn / n2 ;
if ( n2 > cn ) {
auto lambda = LAMBDA_T ( _x , factor ) { return _x * factor ; } ;
tads - > at ( e ) - > applyLambda < T > ( lambda , & output ) ;
}
}
delete tads ;
}
}
void clipByAveraged ( nd4j : : LaunchContext * context , NDArray & input , NDArray & output , const std : : vector < int > & dimensions , const NDArray & clipNorm , const bool isInplace ) {
BUILD_SINGLE_SELECTOR ( input . dataType ( ) , clipByAveraged_ , ( input , output , dimensions , clipNorm , isInplace ) , FLOAT_TYPES ) ;
}
BUILD_SINGLE_TEMPLATE ( template void clipByAveraged_ , ( NDArray & input , NDArray & output , const std : : vector < int > & dimensions , const NDArray & clipNorm , const bool isInplace ) , FLOAT_TYPES ) ;
/*
if ( d1 > params [ 1 ] )
return params [ 1 ] ;
else if ( d1 < params [ 0 ] )
return params [ 0 ] ;
else return d1 ;
*/
template < typename T >
static void clipByValue_ ( NDArray & input , double leftBound , double rightBound , NDArray & output ) {
auto routine = LAMBDA_T ( _x , leftBound , rightBound ) {
if ( _x > rightBound ) return rightBound ;
if ( _x < leftBound ) return leftBound ;
return _x ;
} ;
input . applyLambda < T > ( routine , & output ) ;
}
void clipByValue ( nd4j : : LaunchContext * context , NDArray & input , double leftBound , double rightBound , NDArray & output ) {
BUILD_SINGLE_SELECTOR ( input . dataType ( ) , clipByValue_ , ( input , leftBound , rightBound , output ) , FLOAT_TYPES ) ;
}
BUILD_SINGLE_TEMPLATE ( template void clipByValue_ , ( NDArray & input , double leftBound , double rightBound , NDArray & output ) ; , FLOAT_TYPES ) ;
//////////////////////////////////////////////////////////////////////////
template < typename T >
static void mirrorPad_ ( const NDArray & input , const NDArray & paddings , NDArray & output , const int mode ) {
// mode: 0 - REFLECT, else - SYMMETRIC
const int reflBorder = ( bool ) mode ? 1 : 0 ;
const int rank = input . rankOf ( ) ;
const Nd4jLong outLen = output . lengthOf ( ) ;
if ( rank < = 1 ) {
const Nd4jLong inLen = input . lengthOf ( ) ;
const auto leftSide = paddings . e < Nd4jLong > ( 0 ) ;
const auto leftSideCorrected = leftSide - reflBorder ;
const Nd4jLong len = 2 * ( inLen - 1 ) + leftSide + reflBorder ;
for ( int i = 0 ; i < outLen ; + + i ) {
if ( i < leftSide ) // left side
output . p ( i , input . e < T > ( leftSideCorrected - i ) ) ;
else if ( i > = leftSide & & i < leftSide + inLen ) // middle
output . p ( i , input . e < T > ( i - leftSide ) ) ;
else // right side
output . p ( i , input . e < T > ( len - i ) ) ;
}
}
else {
std : : vector < Nd4jLong > inIdx ( rank ) , outIdx ( rank ) ;
PRAGMA_OMP_PARALLEL_FOR_ARGS ( firstprivate ( inIdx , outIdx ) )
for ( int i = 0 ; i < outLen ; + + i ) {
shape : : index2coords ( rank , output . shapeOf ( ) , i , outIdx . data ( ) ) ;
for ( int j = 0 ; j < rank ; + + j ) {
const Nd4jLong inLen = input . sizeAt ( j ) ;
const auto leftSide = paddings . e < T > ( j , 0 ) ;
const auto leftSideCorrected = leftSide - reflBorder ;
const Nd4jLong len = 2 * ( inLen - 1 ) + leftSide + reflBorder ;
if ( outIdx [ j ] < leftSide ) // left side
inIdx [ j ] = leftSideCorrected - outIdx [ j ] ;
else if ( outIdx [ j ] > = leftSide & & outIdx [ j ] < leftSide + inLen ) // middle
inIdx [ j ] = outIdx [ j ] - leftSide ;
else // right side
inIdx [ j ] = len - outIdx [ j ] ;
}
auto outOffset = shape : : getOffset ( 0 , output . shapeOf ( ) , output . stridesOf ( ) , outIdx . data ( ) , rank ) ;
auto inOffset = shape : : getOffset ( 0 , input . shapeOf ( ) , input . stridesOf ( ) , inIdx . data ( ) , rank ) ;
reinterpret_cast < T * > ( output . buffer ( ) ) [ outOffset ] = reinterpret_cast < T * > ( input . getBuffer ( ) ) [ inOffset ] ;
}
}
}
void mirrorPad ( nd4j : : LaunchContext * context , const NDArray & input , const NDArray & paddings , NDArray & output , const int mode ) {
BUILD_SINGLE_SELECTOR ( input . dataType ( ) , mirrorPad_ , ( input , paddings , output , mode ) , LIBND4J_TYPES ) ;
}
BUILD_SINGLE_TEMPLATE ( template void mirrorPad_ , ( const NDArray & input , const NDArray & paddings , NDArray & output , const int mode ) , LIBND4J_TYPES ) ;
//////////////////////////////////////////////////////////////////////////
template < typename T >
static void concat_ ( const std : : vector < NDArray * > & inArrs , NDArray & output , const int axis ) {
2019-07-15 15:36:35 +02:00
nd4j : : SpecialMethods < T > : : concatCpuGeneric ( inArrs , output , axis ) ;
2019-06-06 14:21:15 +02:00
}
void concat ( nd4j : : LaunchContext * context , const std : : vector < NDArray * > & inArrs , NDArray & output , const int axis ) {
BUILD_SINGLE_SELECTOR ( output . dataType ( ) , concat_ , ( inArrs , output , axis ) , LIBND4J_TYPES ) ;
}
BUILD_SINGLE_TEMPLATE ( template void concat_ , ( const std : : vector < NDArray * > & inArrs , NDArray & output , const int axis ) , LIBND4J_TYPES ) ;
//////////////////////////////////////////////////////////////////////////
template < typename T >
static void tileBP_ ( const NDArray & gradO /*input*/ , NDArray & gradI /*output*/ , const std : : vector < Nd4jLong > reps ) {
T * gradIBuff = reinterpret_cast < T * > ( gradI . getBuffer ( ) ) ;
const T * gradOBuff = reinterpret_cast < T * > ( gradO . getBuffer ( ) ) ;
const Nd4jLong gradILen = gradI . lengthOf ( ) ;
const Nd4jLong gradOLen = gradO . lengthOf ( ) ; // gradOLen >= gradILen
const Nd4jLong gradIEWS = nd4j : : math : : nd4j_abs < Nd4jLong > ( gradI . ews ( ) ) ;
const Nd4jLong gradOEWS = gradO . ews ( ) ;
// initial zeroing of gradI content
if ( gradIEWS = = 1 )
memset ( gradIBuff , 0 , gradILen * sizeof ( T ) ) ;
else {
//PRAGMA_OMP_PARALLEL_FOR_SIMD
for ( int i = 0 ; i < gradILen * gradIEWS ; i + = gradIEWS )
gradIBuff [ i ] = static_cast < T > ( 0.f ) ;
}
if ( gradO . ordering ( ) = = ' c ' & & gradOEWS = = 1 ) {
//PRAGMA_OMP_PARALLEL_FOR_SIMD
for ( Nd4jLong i = 0 ; i < gradOLen ; + + i ) {
auto idx = shape : : subArrayIndex ( i , gradO . getShapeInfo ( ) , gradI . getShapeInfo ( ) ) ;
gradI . p ( idx , gradI . e < T > ( idx ) + gradOBuff [ i ] ) ;
}
}
else if ( gradO . ordering ( ) = = ' c ' & & gradOEWS > 1 ) {
//PRAGMA_OMP_PARALLEL_FOR_SIMD
for ( Nd4jLong i = 0 ; i < gradOLen ; + + i ) {
auto idx = shape : : subArrayIndex ( i , gradO . getShapeInfo ( ) , gradI . getShapeInfo ( ) ) ;
gradI . p ( idx , gradI . e < T > ( idx ) + gradOBuff [ i * gradOEWS ] ) ;
}
}
else {
//PRAGMA_OMP_PARALLEL_FOR_SIMD
for ( Nd4jLong i = 0 ; i < gradOLen ; + + i ) {
auto fidx = shape : : subArrayIndex ( i , gradO . getShapeInfo ( ) , gradI . getShapeInfo ( ) ) ;
gradI . p ( fidx , gradI . e < T > ( fidx ) + gradOBuff [ shape : : getIndexOffset ( i , gradO . getShapeInfo ( ) , gradOLen ) ] ) ;
}
}
}
void tileBP ( nd4j : : LaunchContext * context , const NDArray & gradO /*input*/ , NDArray & gradI /*output*/ , const std : : vector < Nd4jLong > reps ) {
BUILD_SINGLE_SELECTOR ( gradI . dataType ( ) , tileBP_ , ( gradO , gradI , reps ) , FLOAT_TYPES ) ;
}
BUILD_SINGLE_TEMPLATE ( template void tileBP_ , ( const NDArray & gradO /*input*/ , NDArray & gradI /*output*/ , const std : : vector < Nd4jLong > reps ) , FLOAT_TYPES ) ;
}
}
}