2019-06-06 14:21:15 +02:00
|
|
|
/*******************************************************************************
|
|
|
|
* Copyright (c) 2015-2018 Skymind, Inc.
|
|
|
|
*
|
|
|
|
* This program and the accompanying materials are made available under the
|
|
|
|
* terms of the Apache License, Version 2.0 which is available at
|
|
|
|
* https://www.apache.org/licenses/LICENSE-2.0.
|
|
|
|
*
|
|
|
|
* Unless required by applicable law or agreed to in writing, software
|
|
|
|
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
|
|
|
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
|
|
|
* License for the specific language governing permissions and limitations
|
|
|
|
* under the License.
|
|
|
|
*
|
|
|
|
* SPDX-License-Identifier: Apache-2.0
|
|
|
|
******************************************************************************/
|
|
|
|
|
|
|
|
//
|
2019-09-11 19:12:09 +02:00
|
|
|
// @author Yurii Shyrma (iuriish@yahoo.com)
|
2019-06-06 14:21:15 +02:00
|
|
|
//
|
|
|
|
|
|
|
|
#ifndef LIBND4J_SHAPEUTILS_H
|
|
|
|
#define LIBND4J_SHAPEUTILS_H
|
|
|
|
|
|
|
|
#include <vector>
|
2020-03-02 10:49:41 +01:00
|
|
|
#include <array/NDArray.h>
|
2019-06-06 14:21:15 +02:00
|
|
|
|
2020-03-02 10:49:41 +01:00
|
|
|
namespace sd {
|
2019-06-06 14:21:15 +02:00
|
|
|
|
2019-12-02 19:37:21 +01:00
|
|
|
class ND4J_EXPORT ShapeUtils {
|
2019-06-06 14:21:15 +02:00
|
|
|
|
|
|
|
public:
|
|
|
|
|
|
|
|
// evaluate shape for array resulting from tensorDot operation, also evaluate shapes and permutation dimensions for transposition of two input arrays
|
|
|
|
static std::vector<Nd4jLong> evalShapeForTensorDot(const Nd4jLong* aShapeInfo, const Nd4jLong* bShapeInfo, std::vector<int> axesA, std::vector<int> axesB, std::vector<int>& permutAt, std::vector<int>& permutBt, std::vector<Nd4jLong>& shapeAt, std::vector<Nd4jLong>& shapeBt);
|
|
|
|
static std::vector<Nd4jLong> evalShapeForTensorDot(const NDArray* a, const NDArray* b, const std::vector<int>& axesA, const std::vector<int>& axesB, std::vector<int>& permutAt, std::vector<int>& permutBt, std::vector<Nd4jLong>& shapeAt, std::vector<Nd4jLong>& shapeBt);
|
|
|
|
|
|
|
|
// evaluate resulting shape after reduce operation
|
2020-05-09 07:06:14 +02:00
|
|
|
static const Nd4jLong* evalReduceShapeInfo(const char order, std::vector<int>& dimensions, const NDArray& arr, const sd::DataType dataType, const bool keepDims = false, const bool supportOldShapes = false, sd::memory::Workspace* workspace = nullptr);
|
|
|
|
static const Nd4jLong* evalReduceShapeInfo(const char order, std::vector<int>& dimensions, const Nd4jLong* shapeInfo, const sd::DataType dataType, const bool keepDims = false, const bool supportOldShapes = false, sd::memory::Workspace* workspace = nullptr);
|
|
|
|
static const Nd4jLong* evalReduceShapeInfo(const char order, std::vector<int>& dimensions, const NDArray& arr, const bool keepDims = false, const bool supportOldShapes = false, sd::memory::Workspace* workspace = nullptr);
|
|
|
|
static const Nd4jLong* evalReduceShapeInfo(const char order, std::vector<int>& dimensions, const Nd4jLong* shapeInfo, const bool keepDims = false, const bool supportOldShapes = false, sd::memory::Workspace* workspace = nullptr);
|
2019-06-06 14:21:15 +02:00
|
|
|
|
2020-07-26 14:59:27 +02:00
|
|
|
|
|
|
|
// for example
|
|
|
|
// if rank = 3 and dimsToExclude = {0,2} then output = {1,0,2}, if rank = 3 and dimsToExclude = {2} then output = {0,1,2}
|
|
|
|
// if rank = 3 and dimsToExclude = {0} then output = {1,2,0}, if rank = 4 and dimsToExclude = {0,3} then output = {1,2,0,3}
|
|
|
|
static std::vector<int> evalDimsForReduceOp(const int rank, const std::vector<int>& dimsToExclude);
|
|
|
|
|
2019-06-15 13:34:34 +02:00
|
|
|
/**
|
|
|
|
* evaluate output shape for reduce operation when input shape is empty
|
|
|
|
* behavior is analogous to tf
|
|
|
|
*/
|
2020-05-09 07:06:14 +02:00
|
|
|
static const Nd4jLong* evalReduceShapeInfoEmpty(const char order, std::vector<int>& dimensions, const Nd4jLong *shapeInfo, const sd::DataType dataType, const bool keepDims, sd::memory::Workspace* workspace);
|
2019-06-15 13:34:34 +02:00
|
|
|
|
2019-06-06 14:21:15 +02:00
|
|
|
// evaluate shape for array which is result of repeat operation applied to arr
|
2019-08-21 20:10:29 +02:00
|
|
|
static std::vector<Nd4jLong> evalRepeatShape(int axis, const std::vector<int>& repeats, const NDArray& arr);
|
2019-06-06 14:21:15 +02:00
|
|
|
|
|
|
|
// evaluate shapeInfo of permuted array
|
2020-02-17 06:04:28 +01:00
|
|
|
// if setContigStrides = true, then set contiguous strides in output shapeInfo in accordance with arr order
|
2020-05-09 07:06:14 +02:00
|
|
|
static const Nd4jLong* evalPermShapeInfo(const int* dimensions, const int rank, const NDArray& arr, sd::memory::Workspace* workspace, const bool setContigStrides = false);
|
|
|
|
static const Nd4jLong* evalPermShapeInfo(const Nd4jLong* dimensions, const int rank, const NDArray& arr, sd::memory::Workspace* workspace);
|
2019-06-06 14:21:15 +02:00
|
|
|
|
|
|
|
// evaluate shapeInfo of transposed array
|
2020-02-17 06:04:28 +01:00
|
|
|
// if setContigStrides = true, then set contiguous strides in output shapeInfo in accordance with arr order
|
2020-05-09 07:06:14 +02:00
|
|
|
static const Nd4jLong* evalTranspShapeInfo(const NDArray& arr, sd::memory::Workspace* workspace, const bool setContigStrides = false);
|
2019-06-06 14:21:15 +02:00
|
|
|
|
|
|
|
static bool copyVectorPart(std::vector<int>& target, std::vector<int>& source, int rank, int offset);
|
|
|
|
|
|
|
|
// return new (shorter) sorted dimensions array without dimensions that are present in input vector
|
|
|
|
static std::vector<int> evalDimsToExclude(const int rank, const int dimsLen, const int* dimensions);
|
|
|
|
static std::vector<int> evalDimsToExclude(const int rank, const std::vector<int>& dimensions);
|
|
|
|
|
|
|
|
// check whether 2 arrays have mutually broadcastable shapes
|
|
|
|
// shape comparison starts from the end
|
|
|
|
static bool areShapesBroadcastable(const NDArray &arr1, const NDArray &arr2);
|
2020-05-09 07:06:14 +02:00
|
|
|
static bool areShapesBroadcastable(const Nd4jLong* shapeX, const Nd4jLong* shapeY);
|
2019-06-06 14:21:15 +02:00
|
|
|
static bool areShapesBroadcastable(const std::vector<Nd4jLong>& shape1, const std::vector<Nd4jLong>& shape2);
|
|
|
|
|
|
|
|
// check the possibility of broadcast operation, if true then return shapeInfo of resulting array
|
|
|
|
// if evalMinMax == false then array with larger rank has to be passed as first argument
|
2020-05-09 07:06:14 +02:00
|
|
|
static bool evalBroadcastShapeInfo(const NDArray& max, const NDArray& min, const bool evalMinMax, const Nd4jLong*& resultShapeInfo, sd::memory::Workspace* workspace);
|
|
|
|
static bool evalBroadcastShapeInfo(const Nd4jLong *max, const Nd4jLong *min, const bool evalMinMax, const Nd4jLong*& resultShapeInfo, sd::memory::Workspace* workspace);
|
2019-06-06 14:21:15 +02:00
|
|
|
|
|
|
|
// evaluate sorted vector of max axes to create tads along in case of simple broadcast operation
|
|
|
|
// if simple broadcast is not possible then empty vector is returned
|
|
|
|
// PLEASE NOTE: condition (rank_max >= rank_min) should be satisfied !
|
|
|
|
static std::vector<int> tadAxesForSimpleBroadcast(const NDArray& max, const NDArray& min);
|
|
|
|
|
|
|
|
// check the possibility of broadcast operation for set of arrays, if true then return resulting broadcasted shapeInfo
|
|
|
|
static bool evalCommonBroadcastShapeInfo(const std::vector<const NDArray*>& arrays, Nd4jLong*& resultShapeInfo, memory::Workspace* workspace = nullptr);
|
|
|
|
|
2019-10-01 08:10:19 +02:00
|
|
|
// return sorted vector of dimensions common (same) for two arrays, dimensions values corresponds to array with bigger rank
|
|
|
|
// for example if arr1{2,7}, arr2{2,5,4,7} then vector = {0,3}
|
2019-06-06 14:21:15 +02:00
|
|
|
static std::vector<int> getDimsWithSameShape(const NDArray& max, const NDArray& min);
|
|
|
|
|
|
|
|
// evaluate shapeInfo for resulting array of tile operation
|
2020-05-09 07:06:14 +02:00
|
|
|
static const Nd4jLong* evalTileShapeInfo(const NDArray& arr, const std::vector<Nd4jLong>& reps, sd::memory::Workspace* workspace);
|
2019-06-06 14:21:15 +02:00
|
|
|
|
|
|
|
// returns shape part of shapeInfo as std::vector
|
2020-05-09 07:06:14 +02:00
|
|
|
static std::vector<Nd4jLong> pullShapeFromShapeInfo(const Nd4jLong *shapeInfo);
|
2019-06-06 14:21:15 +02:00
|
|
|
|
|
|
|
static std::string shapeAsString(const NDArray* array);
|
|
|
|
static std::string shapeAsString(const std::vector<Nd4jLong>& shape);
|
|
|
|
static std::string shapeAsString(const Nd4jLong* shapeInfo);
|
|
|
|
static std::string shapeAsString(const int rank, const Nd4jLong* shapeInfo);
|
|
|
|
static std::string strideAsString(const NDArray* array);
|
|
|
|
|
2020-02-14 14:20:31 +01:00
|
|
|
static std::string shapeInfoAsString(const Nd4jLong* shapeInfo);
|
|
|
|
|
2019-11-03 11:37:19 +01:00
|
|
|
static std::vector<Nd4jLong> shapeAsVector(const Nd4jLong* shapeInfo);
|
|
|
|
|
2019-06-06 14:21:15 +02:00
|
|
|
// evaluate shapeInfo for diagonal array which is made using input arr elements as diagonal
|
2020-05-09 07:06:14 +02:00
|
|
|
static const Nd4jLong* evalDiagShapeInfo(const Nd4jLong* shapeInfo, sd::memory::Workspace* workspace);
|
2019-06-06 14:21:15 +02:00
|
|
|
|
|
|
|
static std::vector<int> evalBroadcastBackwardAxis(const Nd4jLong *operand, const Nd4jLong *result);
|
|
|
|
|
|
|
|
// utility to calculate matrix product shape with give source shapes and additional params
|
|
|
|
// returns ShapeList pointer with result shape
|
2020-05-09 07:06:14 +02:00
|
|
|
static const Nd4jLong* matrixProductShape(const Nd4jLong* theFirstShape, const Nd4jLong* theSecondShape, bool shouldTranspondFirst, bool shouldTranspondSecond, sd::DataType dtype, sd::memory::Workspace* workspace);
|
2019-06-06 14:21:15 +02:00
|
|
|
|
|
|
|
/**
|
|
|
|
* This method evaluates permutation vector necessary for reducing of shapeFrom to shapeTo
|
|
|
|
* if shapeFrom is identical to shapeTo (permutation is unnecessary) then empty vector is returned
|
|
|
|
* in case of permutation is impossible an exception is thrown
|
|
|
|
*/
|
|
|
|
static std::vector<int> evalPermutFromTo(const std::vector<Nd4jLong>& shapeFrom, const std::vector<Nd4jLong>& shapeTo);
|
|
|
|
|
|
|
|
/**
|
|
|
|
* This method composes shape (shape only, not whole shapeInfo!) using dimensions values and corresponding indexes,
|
|
|
|
* please note: the size of input vector dimsAndIdx must always be even, since the numbers of dimensions and indexes are the same,
|
|
|
|
* for example if dimsAndIdx = {dimC,dimB,dimA, 2,1,0} then output vector = {dimA,dimB,dimC}
|
|
|
|
*/
|
|
|
|
static std::vector<Nd4jLong> composeShapeUsingDimsAndIdx(const std::vector<int>& dimsAndIdx);
|
|
|
|
|
|
|
|
/**
|
|
|
|
* x * y = c, evaluate shape for array resulting from mmul operation
|
|
|
|
* possible cases: dot product (xRank=yRank=1), matrix-vector product (xRank=2, yRank=1), vector-matrix product (xRank=1, yRank=2), matrix-matrix product (xRank=yRank and rank >=2)
|
|
|
|
*/
|
|
|
|
static std::vector<Nd4jLong> evalShapeForMatmul(const Nd4jLong* xShapeInfo, const Nd4jLong* yShapeInfo, const bool transX, const bool transY);
|
|
|
|
|
|
|
|
/**
|
|
|
|
* evaluate number of sub-arrays along dimensions stored in dimsToExclude
|
|
|
|
* i.e. if shape is [2,3,4,5] and dimsToExclude={0,2}, then number of sub-arrays = 8
|
|
|
|
*/
|
|
|
|
static Nd4jLong getNumOfSubArrs(const Nd4jLong* shapeInfo, const std::vector<int>& dimsToExclude);
|
|
|
|
|
|
|
|
/**
|
|
|
|
* return shape without unities, for example if shape is [1,2,1,3] then [2,3] will be returned
|
|
|
|
* if unities are not present in given shapeInfo then exactly identical shape will be returned, for example [2,3] -> [2,3]
|
|
|
|
* edge case: if given shape is [1,1,1,...,1] (all dims are unities) then output will be empty and means scalar
|
|
|
|
*/
|
|
|
|
static std::vector<Nd4jLong> evalDimsWithoutUnities(const Nd4jLong* shapeInfo);
|
|
|
|
|
|
|
|
/**
|
|
|
|
* method returns false if permut == {0,1,2,...permut.size()-1} - in that case permutation is unnecessary
|
|
|
|
*/
|
|
|
|
FORCEINLINE static bool isPermutNecessary(const std::vector<int>& permut);
|
|
|
|
|
|
|
|
/**
|
|
|
|
* calculates strides using "dest" shape and given "order", also copies data type from "source" to "dest"
|
|
|
|
*/
|
|
|
|
static void updateStridesAndType(Nd4jLong* dest, const Nd4jLong* source, const char order);
|
|
|
|
|
|
|
|
/**
|
|
|
|
* calculates strides using "dest" shape and "order", also set "dtype" into "dest"
|
|
|
|
*/
|
|
|
|
static void updateStridesAndType(Nd4jLong* dest, const DataType dtype, const char order);
|
|
|
|
|
|
|
|
/**
|
|
|
|
* This method retuns number of bytes required for string tensor
|
|
|
|
* @param numStrings
|
|
|
|
* @return
|
|
|
|
*/
|
Oleh convert (#200)
* StringUtils for utf convertor raw implementation of all possible combinations, need to be add counter of bytes per symbol for any type and add api to call convertors and store data
Signed-off-by: Oleg <oleg.semeniv@gmail.com>
* StringUtils for utf convertor more corrections to support convertors
Signed-off-by: Oleg <oleg.semeniv@gmail.com>
* StringUtils for utf convertor some corrections and bug fixes, need review to discuss how to add multi-threading
Signed-off-by: Oleg <oleg.semeniv@gmail.com>
* StringUtils for utf convertor #8613 some corrections to move to multi-threading, add one test need discussion data inputs/outputs array presentation, need discussion the way of multi-threading
* StringUtils for utf convertor #8613 tests added some corrections to optimize build
Signed-off-by: Oleg <oleg.semeniv@gmail.com>
* StringUtils for utf convertor #8613 some corrections and code clean up
Signed-off-by: Oleg <oleg.semeniv@gmail.com>
* StringUtils for utf convertor #8613 code clean up and optimize usage, need update ndarray factory before replace std usage
Signed-off-by: Oleg <oleg.semeniv@gmail.com>
* StringUtils for utf convertor #8613 some staff to integrate converters into NDArrayFactory, update tests and add some functionality
Signed-off-by: Oleg <oleg.semeniv@gmail.com>
* StringUtils for utf convertor #8613 minor corrections and bug fix before discussion
* StringUtils for utf convertor #8613 some fixes and tets
* StringUtils for utf convertor #8613 some more staff to support different unicode
Signed-off-by: Oleg <oleg.semeniv@gmail.com>
* StringUtils for utf convertor #8613 fix linking bug
* StringUtils for utf convertor #8613 corrected several tests as defaults for string ndarray changed
* StringUtils for utf convertor #8613 replace some incorrect implementation, revert some test changes, need sync before testing
* StringUtils for utf convertor #8613 fixed several thing that were badly implemented yesterday, need optimization, testing (before testing have to be add support of u32 and u16 buffer visualization)
* StringUtils for utf convertor #8613 fixed to support u16 and u32, and convertor in ndarray, fix buffer print, etc
Signed-off-by: Oleg <oleg.semeniv@gmail.com>
* StringUtils for utf convertor #8613 merge master and sync with server
Signed-off-by: Oleg <oleg.semeniv@gmail.com>
* StringUtils for utf convertor #8613 some correction for string cast, need print check only asci support
Signed-off-by: Oleg <oleg.semeniv@gmail.com>
* StringUtils for utf convertor #8613 merge master, remove copies and add cast, need test, refactoring according review and clean up
* StringUtils for utf convertor #8613 fixed cast and copy issues
Signed-off-by: Oleg <oleg.semeniv@gmail.com>
* StringUtils for utf convertor #8613 fixed cuda and update tests
* StringUtils for utf convertor #8613 integration into NdArray, fix several tests for build pass, refactoring, etc
* - avoid ambiguity of NDArray ctrs overloading in some tests
Signed-off-by: Yurii <iuriish@yahoo.com>
* StringUtils for utf convertor #8613 NDArray string constructors added, updated NDArrayFactory, refactoring unicode and tests, etc
Signed-off-by: Oleg <oleg.semeniv@gmail.com>
* StringUtils for utf convertor #8613 fixed cuda build and test, refactoring and void* added to some functions
Signed-off-by: Oleg <oleg.semeniv@gmail.com>
* StringUtils for utf convertor #8613 void* integration, removed copy operation, refactoring, added tests for NDArray string constructors, etc
Signed-off-by: Oleg <oleg.semeniv@gmail.com>
* StringUtils for utf convertor #8613 several more fixes, improvements and updates
Signed-off-by: Oleg <oleg.semeniv@gmail.com>
* StringUtils for utf convertor #8613 master merge, code clean up and optimization before review
Signed-off-by: Oleg <oleg.semeniv@gmail.com>
* StringUtils for utf convertor #8613 minor fixes string element size define
Signed-off-by: Oleg <oleg.semeniv@gmail.com>
* StringUtils for utf convertor #8613 revert last changes as mistake
Signed-off-by: Oleg <oleg.semeniv@gmail.com>
* StringUtils for utf convertor #8613 fixed NDArray constructor build problem, remove order from string factory, fixed order use for factory via project, added catch of incorrect sync in cast of arrays to data types, fixed e method for strings, etc
Signed-off-by: Oleg <oleg.semeniv@gmail.com>
* StringUtils for utf convertor #8613 added javacpp hack, added multi-threading, minor corrections in license agreement
Signed-off-by: Oleg <oleg.semeniv@gmail.com>
* StringUtils for utf convertor #8613 windows builds fix, as "sting" is not treated as utf8
Signed-off-by: Oleg <oleg.semeniv@gmail.com>
Co-authored-by: Yurii Shyrma <iuriish@yahoo.com>
2020-01-31 14:30:49 +01:00
|
|
|
static FORCEINLINE Nd4jLong stringBufferHeaderRequirements(Nd4jLong numStrings) {
|
|
|
|
// we store +1 offset
|
|
|
|
return (numStrings + 1) * sizeof(Nd4jLong);
|
|
|
|
}
|
2019-10-01 08:10:19 +02:00
|
|
|
|
Oleh broadcast4d (#257)
* libnd4j raw implementation of native broadcast for special cases
Signed-off-by: Oleg <oleg.semeniv@gmail.com>
* libnd4j fixed bugs for special case of 4D loop broadcast, add some tests, need more testing and discussion
Signed-off-by: Oleg <oleg.semeniv@gmail.com>
* libnd4j added 3D and 5D cases support and tests, need testing with different orders
Signed-off-by: Oleg <oleg.semeniv@gmail.com>
* libnd4j correctd case selection for broadcast 3,4,5D loops, fixed several places for more stable behavior, clean up
Signed-off-by: Oleg <oleg.semeniv@gmail.com>
* libnd4j minor corrections to avoid some risks in strides selection, added tests and rename some variables
Signed-off-by: Oleg <oleg.semeniv@gmail.com>
* libnd4j optimize usage the stride selection for all loops in separate ShapeUtils method copyCertainStridesFromShapeInfo, merge master
Signed-off-by: Oleg <oleg.semeniv@gmail.com>
* libnd4j remove per request several tests for 3D, 4D and 5D broadcast loops
Signed-off-by: Oleg <oleg.semeniv@gmail.com>
* libnd4j removed some loac changes that had not been sync with serve playground, turn on new loops usage
2020-02-21 05:46:05 +01:00
|
|
|
/**
|
|
|
|
* This method selects strides based on dimentions required for broadcasting
|
|
|
|
* @param const pointer to input (Y) shape info for strides selection
|
|
|
|
* @param rank of input (X) to broadcasting
|
|
|
|
* @param dimentions size
|
|
|
|
* @param const pointer to dimentions for broadcasting
|
|
|
|
* @param pointer to output strides have to be pre allocated by 0
|
|
|
|
* @return
|
|
|
|
*/
|
|
|
|
static void copyCertainStridesFromShapeInfo(const Nd4jLong* inShapeInfo, const int nRank, const int dimsSize, const int* dims, Nd4jLong* outStrides);
|
|
|
|
|
2019-10-01 08:10:19 +02:00
|
|
|
/*
|
|
|
|
* check whether arr1/arr2 is sub-array of arr2/arr1,
|
|
|
|
* this method do not evaluate what array is sub-array, it returns true if arr1 is sub-array of arr2 or arr2 is sub-array of arr1
|
|
|
|
* sameDims is filled (and sorted) with dimensions values that match both in arr1 and arr2 shapes (unities are ignored)
|
|
|
|
* for example:
|
|
|
|
* if arr1{2,3} and arr2{2,4,3,7} then return true and sameDims contains {0,2}
|
|
|
|
* if arr1{1,1,3,1,3,1,1} and arr2{1,2,3,1,3} then return true and sameDims contains {2,4}
|
|
|
|
* if arr1{2,1,4,1,7,5} and arr2{1,1,4,5} then return true and sameDims contains {2,5}
|
|
|
|
|
|
|
|
static bool isSubArrayCase(const NDArray& arr1, const NDArray& arr2, std::vector<int>& sameDims);
|
|
|
|
*/
|
2020-03-03 05:32:37 +01:00
|
|
|
|
|
|
|
/*
|
|
|
|
* comparing of shapes, not strides
|
|
|
|
*/
|
|
|
|
static bool areShapesEqual(const Nd4jLong* shapeInfo, const std::vector<Nd4jLong>& shapeOnly);
|
2019-06-06 14:21:15 +02:00
|
|
|
};
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
//////////////////////////////////////////////////////////////////////////
|
|
|
|
///// IMLEMENTATION OF INLINE METHODS /////
|
|
|
|
//////////////////////////////////////////////////////////////////////////
|
|
|
|
FORCEINLINE bool ShapeUtils::isPermutNecessary(const std::vector<int>& permut) {
|
|
|
|
|
|
|
|
for(int i=0; i<permut.size(); ++i)
|
|
|
|
if(permut[i] != i)
|
|
|
|
return true;
|
|
|
|
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
}
|
|
|
|
|
|
|
|
#endif //LIBND4J_SHAPEUTILS_H
|