From f49c4ea9d01213cdf97f97271579f16b99d91eb7 Mon Sep 17 00:00:00 2001 From: raver119 Date: Sat, 10 Aug 2019 09:14:18 +0300 Subject: [PATCH] int -> long (#108) Signed-off-by: raver119 --- libnd4j/blas/NDArray.hpp | 2 +- libnd4j/blas/cpu/NDArray.cpp | 22 ++++----- libnd4j/blas/cpu/NDArrayLambda.hpp | 10 ++-- libnd4j/blas/cpu/NativeOps.cpp | 10 ++-- libnd4j/blas/cuda/NDArray.cu | 2 +- libnd4j/include/cnpy/cnpy.cpp | 2 +- libnd4j/include/graph/impl/FlatUtils.cpp | 4 +- libnd4j/include/helpers/TAD.h | 6 +-- libnd4j/include/helpers/impl/ShapeUtils.cpp | 6 +-- libnd4j/include/helpers/shape.h | 49 +++++++------------ .../ops/declarable/generic/convo/deconv2d.cpp | 2 +- .../declarable/generic/list/split_list.cpp | 2 +- .../declarable/generic/parity_ops/betaInc.cpp | 4 +- .../generic/parity_ops/polygamma.cpp | 2 +- .../declarable/generic/parity_ops/split_v.cpp | 4 +- .../declarable/generic/parity_ops/tear.cpp | 4 +- .../declarable/generic/parity_ops/zeta.cpp | 4 +- .../ops/declarable/generic/shape/reshape.cpp | 19 ++++--- .../ops/declarable/generic/shape/squeeze.cpp | 2 +- .../declarable/helpers/cpu/compare_elem.cpp | 4 +- .../ops/declarable/helpers/cpu/cross.cpp | 2 +- .../ops/declarable/helpers/cpu/transforms.cpp | 2 +- .../declarable/impl/LegacyReduceBoolOp.cpp | 2 +- libnd4j/include/ops/impl/specials.cpp | 6 +-- .../tests_cpu/layers_tests/LegacyOpsTests.cpp | 4 +- 25 files changed, 79 insertions(+), 97 deletions(-) diff --git a/libnd4j/blas/NDArray.hpp b/libnd4j/blas/NDArray.hpp index a6e81f88d..a0529d106 100644 --- a/libnd4j/blas/NDArray.hpp +++ b/libnd4j/blas/NDArray.hpp @@ -2938,7 +2938,7 @@ bool NDArray::reshapei(const char order, const std::vector& cshape) { if (numberNegativesOnes > 0) delete[] shape_; - int arrLength = 1; + Nd4jLong arrLength = 1; for(const auto& item : shape) arrLength *= item; diff --git a/libnd4j/blas/cpu/NDArray.cpp b/libnd4j/blas/cpu/NDArray.cpp index 9a7271b28..a79f81612 100644 --- a/libnd4j/blas/cpu/NDArray.cpp +++ b/libnd4j/blas/cpu/NDArray.cpp @@ -153,7 +153,7 @@ static void templatedSwap(void *xBuffer, void *yBuffer, Nd4jLong length) { auto y = reinterpret_cast(yBuffer); PRAGMA_OMP_PARALLEL_FOR_SIMD_ARGS(schedule(static)) - for (int i = 0; i < length; ++i) { + for (Nd4jLong i = 0; i < length; ++i) { auto temp = x[i]; x[i] = y[i]; y[i] = temp; @@ -272,7 +272,7 @@ NDArray NDArray::tile(const std::vector& reps) const { else { PRAGMA_OMP_PARALLEL_FOR_SIMD - for(int i=0; itemplate templatedAssign, (result.getBuffer(), xOffset, this->getBuffer(), yOffset), LIBND4J_TYPES); @@ -305,15 +305,14 @@ void NDArray::tile(const std::vector& reps, NDArray& target) const { } } else if(target.ordering() == 'c' && ews > 1) { -//#pragma omp parallel for simd if(targetLen > Environment::getInstance()->elementwiseThreshold()) schedule(guided) - for(int i=0; i Environment::getInstance()->elementwiseThreshold()) schedule(guided) - for(int i=0; i Environment::getInstance()->elementwiseThreshold()) schedule(guided) - for (int i = 0; i < targetLen; ++i) { + + for (Nd4jLong i = 0; i < targetLen; ++i) { auto yOffset = shape::subArrayOffset(i, target.getShapeInfo(), getShapeInfo()); BUILD_DOUBLE_SELECTOR(target.dataType(), dataType(), templatedDoubleAssign, (target.getBuffer(), i, getBuffer(), yOffset), LIBND4J_TYPES, LIBND4J_TYPES); } } else if(target.ordering() == 'c' && ews > 1) { -//#pragma omp parallel for simd if(targetLen > Environment::getInstance()->elementwiseThreshold()) schedule(guided) - for(int i=0; i Environment::getInstance()->elementwiseThreshold()) schedule(guided) - for(int i=0; igetOffset(e); auto uOffset = second->getOffset(e); @@ -50,7 +50,7 @@ void NDArray::applyTriplewiseLambda(NDArray* second, NDArray *third, const std:: } else { PRAGMA_OMP_PARALLEL_FOR_SIMD - for (int e = 0; e < _length; e++) { + for (Nd4jLong e = 0; e < _length; e++) { auto tOffset = this->getOffset(e); auto uOffset = second->getOffset(e); @@ -104,13 +104,13 @@ void NDArray::applyPairwiseLambda(const NDArray* other, const std::functionordering() == other->ordering() && this->ordering() == target->ordering() && (this->ews() == 1 && target->ews() == 1) && this->ews() == other->ews()) { PRAGMA_OMP_PARALLEL_FOR_SIMD - for (int e = 0; e < _length; e++) + for (Nd4jLong e = 0; e < _length; e++) z[e] = func(f[e], s[e]); } else { if (f == z) { PRAGMA_OMP_PARALLEL_FOR_SIMD - for (int e = 0; e < _length; e++) { + for (Nd4jLong e = 0; e < _length; e++) { auto xOffset = this->getOffset(e); auto yOffset = other->getOffset(e); @@ -120,7 +120,7 @@ void NDArray::applyPairwiseLambda(const NDArray* other, const std::functiongetOffset(e); auto yOffset = other->getOffset(e); diff --git a/libnd4j/blas/cpu/NativeOps.cpp b/libnd4j/blas/cpu/NativeOps.cpp index 705057fba..74bd072c8 100644 --- a/libnd4j/blas/cpu/NativeOps.cpp +++ b/libnd4j/blas/cpu/NativeOps.cpp @@ -1014,21 +1014,21 @@ void flattenGeneric(Nd4jPointer *extraPointers, if (len < ELEMENT_THRESHOLD) { PRAGMA_OMP_SIMD - for (int i = 0; i < len; i++) { + for (Nd4jLong i = 0; i < len; i++) { hZ[i * resultEleStride] = input[i * inputEleStride]; } } else { PRAGMA_OMP_PARALLEL_FOR_SIMD - for (int i = 0; i < len; i++) { + for (Nd4jLong i = 0; i < len; i++) { hZ[i * resultEleStride] = input[i * inputEleStride]; } } } else { int idx = 0; - for(int i = 0; i < len; i++) + for(Nd4jLong i = 0; i < len; i++) hZ[idx++] = input[shape::getIndexOffset(i, inputShapeInfo, len)]; } } @@ -1047,7 +1047,7 @@ void flattenGeneric(Nd4jPointer *extraPointers, if (order == 'f') { // 1. get c ordering coordinates auto cIndexCoordinates = new Nd4jLong[rank - 1]; - int divisor = 1; + Nd4jLong divisor = 1; for (int dim = rank - 1; dim > 0; dim--) { cIndexCoordinates[dim - 1] = (i / divisor) % xShape[dim]; divisor *= xShape[dim]; @@ -1056,7 +1056,7 @@ void flattenGeneric(Nd4jPointer *extraPointers, // 2. convert to f ordering index int fIndex = 0; - int multiplier = 1; + Nd4jLong multiplier = 1; for (int dim = 1; dim <= rank - 1; dim++) { fIndex += cIndexCoordinates[dim - 1] * multiplier; multiplier *= xShape[dim]; diff --git a/libnd4j/blas/cuda/NDArray.cu b/libnd4j/blas/cuda/NDArray.cu index 7d58803b3..67173c971 100644 --- a/libnd4j/blas/cuda/NDArray.cu +++ b/libnd4j/blas/cuda/NDArray.cu @@ -301,7 +301,7 @@ void* NDArray::specialBufferWithOffset(Nd4jLong offset) const { // change an array by repeating it the number of times given by reps. NDArray NDArray::tile(const std::vector& reps) const { int dim = reps.size(); - int product = 1; + Nd4jLong product = 1; for(const auto& item : reps) product *= item; if(product == 0) diff --git a/libnd4j/include/cnpy/cnpy.cpp b/libnd4j/include/cnpy/cnpy.cpp index ccc8f7600..a09b38bfd 100644 --- a/libnd4j/include/cnpy/cnpy.cpp +++ b/libnd4j/include/cnpy/cnpy.cpp @@ -670,7 +670,7 @@ void cnpy::npy_save(std::string fname, fwrite(&header[0],sizeof(char),header.size(),fp); } - unsigned int nels = 1; + unsigned long long nels = 1; for(int i = 0;i < ndims;i++) nels *= shape[i]; fwrite(data,sizeof(T),nels,fp); diff --git a/libnd4j/include/graph/impl/FlatUtils.cpp b/libnd4j/include/graph/impl/FlatUtils.cpp index 0e6eb06df..ad0c5112d 100644 --- a/libnd4j/include/graph/impl/FlatUtils.cpp +++ b/libnd4j/include/graph/impl/FlatUtils.cpp @@ -64,14 +64,14 @@ namespace nd4j { auto longPtr = reinterpret_cast(rawPtr); auto charPtr = reinterpret_cast(longPtr + length + 1); auto offsets = new Nd4jLong[length+1]; - for (int e = 0; e <= length; e++) { + for (Nd4jLong e = 0; e <= length; e++) { auto o = longPtr[e]; // FIXME: BE vs LE on partials //auto v = canKeep ? o : BitwiseUtils::swap_bytes(o); offsets[e] = o; } - for (int e = 0; e < length; e++) { + for (Nd4jLong e = 0; e < length; e++) { auto start = offsets[e]; auto end = offsets[e+1]; auto len = end - start; diff --git a/libnd4j/include/helpers/TAD.h b/libnd4j/include/helpers/TAD.h index bc94c519e..c49f1047d 100644 --- a/libnd4j/include/helpers/TAD.h +++ b/libnd4j/include/helpers/TAD.h @@ -492,7 +492,7 @@ namespace shape { //find the length of the elements we //are iterating over - int len = 1; + Nd4jLong len = 1; //left over index cursor for initializing elements int leftOverIndex = 0; for(int i = 0; i < rank; i++) { @@ -669,7 +669,7 @@ namespace shape { //find the length of the elements we //are iterating over - int len = 1; + Nd4jLong len = 1; //left over index cursor for initializing elements int leftOverIndex = 0; for(int i = 0; i < rank; i++) { @@ -787,7 +787,7 @@ namespace shape { Nd4jLong *ret2 = shape::sliceOfShapeBuffer(sliceIndex, permuted); Nd4jLong tensorLength = shape::prodLong(tensorShape,tadRank); - int compLength = shape::isVector(ret2) ? shape::length(ret2) : shape::prod(tensorShape,tadRank); + Nd4jLong compLength = shape::isVector(ret2) ? shape::length(ret2) : shape::prodLong(tensorShape,tadRank); // int temp; // const bool isLikeVector = shape::isLikeVector(ret2, temp); diff --git a/libnd4j/include/helpers/impl/ShapeUtils.cpp b/libnd4j/include/helpers/impl/ShapeUtils.cpp index c54dd168b..1274582b3 100644 --- a/libnd4j/include/helpers/impl/ShapeUtils.cpp +++ b/libnd4j/include/helpers/impl/ShapeUtils.cpp @@ -75,7 +75,7 @@ std::vector ShapeUtils::evalShapeForTensorDot(const Nd4jLong* aShapeIn permutBt = axesB; permutBt.insert(permutBt.end(), list_B.begin(), list_B.end()); - int n2 = 1; + Nd4jLong n2 = 1; for (int i = 0; i < axeAsize; i++) n2 *= aShapeInfo[axesA[i] + 1]; shapeAt = {-1, n2}; @@ -86,7 +86,7 @@ std::vector ShapeUtils::evalShapeForTensorDot(const Nd4jLong* aShapeIn oldShapeA[i] = aShapeInfo[list_A[i] + 1]; - int n3 = 1; + Nd4jLong n3 = 1; for (int i = 0; i < axeBsize; i++) n3 *= bShapeInfo[axesB[i] + 1]; shapeBt = {n3, -1}; @@ -553,7 +553,7 @@ std::vector ShapeUtils::getDimsWithSameShape(const NDArray& max, const NDAr Nd4jLong* ShapeUtils::evalTileShapeInfo(const NDArray& arr, const std::vector& reps, nd4j::memory::Workspace* workspace) { // check whether reps contains at least one zero (then throw exception) or whether all elements in reps are unities (then simply reshape or do nothing) int repsSize = reps.size(); - int product = 1; + Nd4jLong product = 1; for(const auto& item : reps) product *= item; if(product == 0) diff --git a/libnd4j/include/helpers/shape.h b/libnd4j/include/helpers/shape.h index b9b519510..705f06b99 100644 --- a/libnd4j/include/helpers/shape.h +++ b/libnd4j/include/helpers/shape.h @@ -127,7 +127,7 @@ namespace shape { ND4J_EXPORT _CUDA_HD int tadIndexForLinear(int linearIndex, int tadLength); - ND4J_EXPORT _CUDA_HD int tadLength(Nd4jLong *shapeInfo, int *dimension, int dimensionLength); + ND4J_EXPORT _CUDA_HD Nd4jLong tadLength(Nd4jLong *shapeInfo, int *dimension, int dimensionLength); ND4J_EXPORT _CUDA_HD bool canReshape(const int oldRank, Nd4jLong* oldShape, const int newRank, Nd4jLong* newShape, bool isFOrder); @@ -856,8 +856,6 @@ namespace shape { * Returns the prod of the data * up to the given length */ - ND4J_EXPORT _CUDA_HD int prod(Nd4jLong *data, int length); - ND4J_EXPORT _CUDA_HD Nd4jLong prodLong(const Nd4jLong *data, int length); /** @@ -1055,12 +1053,12 @@ __device__ INLINEDEF Nd4jLong *cuMalloc(Nd4jLong *buffer, long size) { * Length of a tad given * the shape information */ - INLINEDEF _CUDA_HD int tadLength(Nd4jLong *shapeInfo, int *dimension, int dimensionLength) { + INLINEDEF _CUDA_HD Nd4jLong tadLength(Nd4jLong *shapeInfo, int *dimension, int dimensionLength) { if(dimensionLength == 1) { return shape::shapeOf(shapeInfo)[dimension[0]]; } else { - int ret = 1; + Nd4jLong ret = 1; for(int i = 0; i < shape::rank(shapeInfo); i++) { for(int j = 0; j < dimensionLength; j++) { if(i == dimension[j]) @@ -1307,7 +1305,7 @@ __device__ INLINEDEF Nd4jLong *cuMalloc(Nd4jLong *buffer, long size) { traceNew(6); Nd4jLong *stride = new Nd4jLong[dimensions]; - int st = startNum; + Nd4jLong st = startNum; for (int j = 0; j < rank; j++) { stride[j] = st; st *= shape[j]; @@ -1326,7 +1324,7 @@ __device__ INLINEDEF Nd4jLong *cuMalloc(Nd4jLong *buffer, long size) { //int dimensions = rank; - int st = startNum; + Nd4jLong st = startNum; for (int j = 0; j < rank; j++) { ret[j] = st; st *= shape[j]; @@ -1361,7 +1359,7 @@ __device__ INLINEDEF Nd4jLong *cuMalloc(Nd4jLong *buffer, long size) { // } - int st = startNum; + Nd4jLong st = startNum; for (int j = rank - 1; j >= 0; j--) { stride[j] = st; st *= shape[j]; @@ -1383,7 +1381,7 @@ __device__ INLINEDEF Nd4jLong *cuMalloc(Nd4jLong *buffer, long size) { // } - int st = startNum; + Nd4jLong st = startNum; for (int j = rank - 1; j >= 0; j--) { ret[j] = st; st *= shape[j]; @@ -1513,8 +1511,8 @@ __device__ INLINEDEF Nd4jLong *cuMalloc(Nd4jLong *buffer, long size) { int oldnd; Nd4jLong *oldDims = shape::copyOf(rank, shape); Nd4jLong *oldStrides = shape::copyOf(rank, stride); - int np, op, last_stride; - int oldStart, oldStop, ok, newStart, newStop, nk; + Nd4jLong np, op, last_stride; + Nd4jLong oldStart, oldStop, ok, newStart, newStop, nk; traceNew(10); @@ -2042,13 +2040,12 @@ template * @return */ INLINEDEF _CUDA_HD char getOrder(int length, Nd4jLong *shape, Nd4jLong *stride, int elementStride) { - int sd = -1; + Nd4jLong sd = 1; int dim = -1; int i = -1; int cContiguous = 1; int isFortran = 1; - sd = 1; for (i = length - 1; i >= 0; --i) { dim = shape[i]; @@ -2235,7 +2232,7 @@ template INLINEDEF _CUDA_HD int oneDimEqualToLength(Nd4jLong *shape, int rank) { for(int i = 0; i < rank; i++) { - if(shape[i] == shape::prod(shape,rank)) + if(shape[i] == shape::prodLong(shape,rank)) return 1; } @@ -3103,11 +3100,11 @@ INLINEDEF _CUDA_HD bool haveSameShapeAndStrides(const Nd4jLong *shapeInfo1, cons if(shape::isVector(shape,rank)) { //return total length for row vectors if(dimensionLength == 1 && shape[0] == 1) { - return shape::prod(shape,rank); + return shape::prodLong(shape,rank); } } else if(rank == dimensionLength) - return shape::prod(shape,rank); + return shape::prodLong(shape,rank); int absSelta = nd4j::math::nd4j_abs(rank - dimensionLength); traceNew(27); auto ret2 = shape::removeIndex(shape, dimension, rank, dimensionLength); @@ -3554,18 +3551,6 @@ INLINEDEF _CUDA_HD bool haveSameShapeAndStrides(const Nd4jLong *shapeInfo1, cons return ret; } -/** - * Returns the prod of the data - * up to the given length - */ - INLINEDEF _CUDA_HD int prod(Nd4jLong *data, int length) { - int prod = 1; - for (int i = 0; i < length; i++) { - prod *= data[i]; - } - - return prod; - } /** * Returns the prod of the data @@ -3956,7 +3941,7 @@ INLINEDEF _CUDA_H bool reshapeC(const int oldRank, const Nd4jLong* oldShapeInfo, Nd4jLong* newStrides = shape::stride(newShapeInfo); const Nd4jLong* oldShape = shape::shapeOf(const_cast(oldShapeInfo)); const Nd4jLong* oldStrides = shape::stride(const_cast(oldShapeInfo)); - int oldStart(0), oldStop(1), newStart(0), newStop(1), newDim, oldDim; + Nd4jLong oldStart(0), oldStop(1), newStart(0), newStop(1), newDim, oldDim; while (newStart < newRank && oldStart < oldRank) { @@ -3995,11 +3980,11 @@ INLINEDEF _CUDA_H bool reshapeC(const int oldRank, const Nd4jLong* oldShapeInfo, INLINEDEF _CUDA_H bool canReshape(const int oldRank, Nd4jLong* oldShape, const int newRank, Nd4jLong* newShapeOf, bool isFOrder) { - int oldnd; + Nd4jLong oldnd; Nd4jLong* oldDims = shape::copyOf(oldRank, shape::shapeOf(oldShape)); Nd4jLong* oldStrides = shape::copyOf(oldRank, shape::stride(oldShape)); - int np, op, last_stride; - int oldStart, oldStop, ok, newStart, newStop, nk; + Nd4jLong np, op, last_stride; + Nd4jLong oldStart, oldStop, ok, newStart, newStop, nk; auto newStrides = new Nd4jLong[newRank]; oldnd = 0; diff --git a/libnd4j/include/ops/declarable/generic/convo/deconv2d.cpp b/libnd4j/include/ops/declarable/generic/convo/deconv2d.cpp index e204224fd..3a7450fc7 100644 --- a/libnd4j/include/ops/declarable/generic/convo/deconv2d.cpp +++ b/libnd4j/include/ops/declarable/generic/convo/deconv2d.cpp @@ -229,7 +229,7 @@ CUSTOM_OP_IMPL(deconv2d_bp, 3, 2, false, 0, 9) { // ----- calculation of gradB ----- // if(gradB) { if(gradB->rankOf() == 2) - gradB = new NDArray(gradB->reshape(gradB->ordering(), {(int)gradB->lengthOf()})); + gradB = new NDArray(gradB->reshape(gradB->ordering(), {gradB->lengthOf()})); gradO->reduceAlongDimension(reduce::Sum, gradB, {0, 2, 3}); // sum over bS, oH, oW if(gradB != OUTPUT_VARIABLE(2)) delete gradB; diff --git a/libnd4j/include/ops/declarable/generic/list/split_list.cpp b/libnd4j/include/ops/declarable/generic/list/split_list.cpp index 2147d0af1..f2399c9d3 100644 --- a/libnd4j/include/ops/declarable/generic/list/split_list.cpp +++ b/libnd4j/include/ops/declarable/generic/list/split_list.cpp @@ -53,7 +53,7 @@ namespace nd4j { // now let's build subarrays int cnt = 0; std::vector indices(2 * array->rankOf(), 0); - for (int e = 0; e < sizes->lengthOf(); e++) { + for (Nd4jLong e = 0; e < sizes->lengthOf(); e++) { int c_size = sizes->e(e); REQUIRE_TRUE(c_size > 0, 0, "Slice size should have postive value, but got %i instead", c_size); diff --git a/libnd4j/include/ops/declarable/generic/parity_ops/betaInc.cpp b/libnd4j/include/ops/declarable/generic/parity_ops/betaInc.cpp index 42fa92a14..9d0a935a9 100644 --- a/libnd4j/include/ops/declarable/generic/parity_ops/betaInc.cpp +++ b/libnd4j/include/ops/declarable/generic/parity_ops/betaInc.cpp @@ -42,10 +42,10 @@ CONFIGURABLE_OP_IMPL(betainc, 3, 1, false, 0, 0) { REQUIRE_TRUE(a->isSameShape(b) && a->isSameShape(x), 0, "CONFIGURABLE_OP betainc: all three input arrays must have the same shapes, bit got a=%s, b=%s and x=%s instead !", ShapeUtils::shapeAsString(a).c_str(), ShapeUtils::shapeAsString(b).c_str(), ShapeUtils::shapeAsString(x).c_str()); - int arrLen = a->lengthOf(); + Nd4jLong arrLen = a->lengthOf(); // FIXME: this stuff should be single op call. No sense rolling over couple of arrays twice - for(int i = 0; i < arrLen; ++i ) { + for(Nd4jLong i = 0; i < arrLen; ++i ) { REQUIRE_TRUE(a->e(i) > 0.f, 0, "BETAINC op: arrays a array must contain only elements > 0 !"); REQUIRE_TRUE(b->e(i) > 0.f, 0, "BETAINC op: arrays b array must contain only elements > 0 !"); REQUIRE_TRUE(0.f <= x->e(i) && x->e(i) <= 1.f, 0, "BETAINC op: all elements of x array must be within [0, 1] range!"); diff --git a/libnd4j/include/ops/declarable/generic/parity_ops/polygamma.cpp b/libnd4j/include/ops/declarable/generic/parity_ops/polygamma.cpp index f8cab85d4..0f850cd4b 100644 --- a/libnd4j/include/ops/declarable/generic/parity_ops/polygamma.cpp +++ b/libnd4j/include/ops/declarable/generic/parity_ops/polygamma.cpp @@ -35,7 +35,7 @@ CONFIGURABLE_OP_IMPL(polygamma, 2, 1, false, 0, 0) { REQUIRE_TRUE(n->isSameShape(x), 0, "POLYGAMMA op: two input arrays n and x must have the same shapes, but got n=%s and x=%s instead !", ShapeUtils::shapeAsString(n).c_str(), ShapeUtils::shapeAsString(x).c_str()); - int arrLen = n->lengthOf(); + Nd4jLong arrLen = n->lengthOf(); // FIXME: this shit should be single op call, not a loop! auto nPositive = n->reduceNumber(nd4j::reduce::IsPositive, nullptr); auto xPositive = x->reduceNumber(nd4j::reduce::IsPositive, nullptr); diff --git a/libnd4j/include/ops/declarable/generic/parity_ops/split_v.cpp b/libnd4j/include/ops/declarable/generic/parity_ops/split_v.cpp index fae7cce62..0e36c15ea 100644 --- a/libnd4j/include/ops/declarable/generic/parity_ops/split_v.cpp +++ b/libnd4j/include/ops/declarable/generic/parity_ops/split_v.cpp @@ -46,7 +46,7 @@ namespace ops { int pos = 0; std::vector indices(2 * input->rankOf()); - for (int e = 0; e < sizes->lengthOf(); e++) { + for (Nd4jLong e = 0; e < sizes->lengthOf(); e++) { int c_size = sizes->e(e); for (int d = 0; d < input->rankOf(); d++) { @@ -103,7 +103,7 @@ namespace ops { auto length = sizes->lengthOf(); int pos = 0; - for (int e = 0; e < length; e++) { + for (Nd4jLong e = 0; e < length; e++) { int c_size = sizes->e(e); diff --git a/libnd4j/include/ops/declarable/generic/parity_ops/tear.cpp b/libnd4j/include/ops/declarable/generic/parity_ops/tear.cpp index 131402620..090c29504 100644 --- a/libnd4j/include/ops/declarable/generic/parity_ops/tear.cpp +++ b/libnd4j/include/ops/declarable/generic/parity_ops/tear.cpp @@ -38,7 +38,7 @@ namespace nd4j { REQUIRE_TRUE(v >= 0 && v < input->rankOf(), 0, "Tear dimensions should be non-negative values, and lower then input rank. Got %i instead", v); auto tads = input->allTensorsAlongDimension(dims); - for (int e = 0; e < tads->size(); e++) { + for (Nd4jLong e = 0; e < tads->size(); e++) { auto outE = OUTPUT_VARIABLE(e); outE->assign(tads->at(e)); @@ -62,7 +62,7 @@ namespace nd4j { auto numTads = tadPack.numberOfTads(); auto result = SHAPELIST(); - for (int e = 0; e < numTads; e++) { + for (Nd4jLong e = 0; e < numTads; e++) { auto newShape = ConstantShapeHelper::getInstance()->createShapeInfo(block.dataType(), shape::order(inShape), shape::rank(tadPack.primaryShapeInfo()), shape::shapeOf(tadPack.primaryShapeInfo())); result->push_back(newShape); } diff --git a/libnd4j/include/ops/declarable/generic/parity_ops/zeta.cpp b/libnd4j/include/ops/declarable/generic/parity_ops/zeta.cpp index 7a64c8540..eb10e53af 100644 --- a/libnd4j/include/ops/declarable/generic/parity_ops/zeta.cpp +++ b/libnd4j/include/ops/declarable/generic/parity_ops/zeta.cpp @@ -34,10 +34,10 @@ namespace nd4j { REQUIRE_TRUE(x->isSameShape(q), 0, "ZETA op: two input arrays must have the same shapes, bot got x=%s and q=%s !", ShapeUtils::shapeAsString(x).c_str(), ShapeUtils::shapeAsString(q).c_str()); - int arrLen = x->lengthOf(); + Nd4jLong arrLen = x->lengthOf(); // FIXME: this should NOT be loop. - for(int i = 0; i < arrLen; ++i ) { + for(Nd4jLong i = 0; i < arrLen; ++i ) { REQUIRE_TRUE(x->e(i) > 1.f, 0, "ZETA op: all elements of x array must be > 1 !"); REQUIRE_TRUE(q->e(i) > 0.f, 0, "ZETA op: all elements of q array must be > 0 !"); } diff --git a/libnd4j/include/ops/declarable/generic/shape/reshape.cpp b/libnd4j/include/ops/declarable/generic/shape/reshape.cpp index c699bcdec..ef5fe26cf 100644 --- a/libnd4j/include/ops/declarable/generic/shape/reshape.cpp +++ b/libnd4j/include/ops/declarable/generic/shape/reshape.cpp @@ -54,14 +54,14 @@ namespace nd4j { int e2 = e; for (; e < (int) arguments->size(); e++) { if (arguments->at(e) == -1){ - long shapeLength = 1; + Nd4jLong shapeLength = 1; for(; e2 < e; e2++){ shapeLength *= arguments->at(e2); } for(e2 = e + 1; e2 < arguments->size(); e2++){ shapeLength *= arguments->at(e2); } - long realShape = x->lengthOf() / shapeLength; + Nd4jLong realShape = x->lengthOf() / shapeLength; shapeNew.push_back(realShape); } else{ @@ -109,16 +109,15 @@ namespace nd4j { for (int e = 0; e < (int) s->lengthOf(); e++) { auto dim = s->e(e); if (dim == -1){ - long shapeLength = 1; + Nd4jLong shapeLength = 1; for(int e2 = 0; e2 < e; e2++){ shapeLength *= s->e(e2); } for(int e2 = e + 1; e2 < (int) s->lengthOf(); e2++){ REQUIRE_TRUE(s->e(e2) != -1, 0, "Reshape : Only one unknown dimension (-1) is allowed."); - shapeLength *= - s->e(e2); + shapeLength *= s->e(e2); } - long realShape = x->lengthOf() / shapeLength; + Nd4jLong realShape = x->lengthOf() / shapeLength; shapeNew[e] = realShape; } else{ @@ -187,7 +186,7 @@ namespace nd4j { for (; e < (int) arguments->size(); e++) { if ((int) arguments->at(e) == -1){ - long shapeLength = 1; + Nd4jLong shapeLength = 1; for(; e2 < e; e2 ++){ shapeLength *= arguments->at(e2); } @@ -201,7 +200,7 @@ namespace nd4j { shapeNew.push_back(0); } else { //Standard case - long realShape = shape::length(inp) / shapeLength; + Nd4jLong realShape = shape::length(inp) / shapeLength; shapeNew.push_back(realShape); } } @@ -240,7 +239,7 @@ namespace nd4j { for (int e = 0; e < (int) y->lengthOf(); e++) { auto dim = y->e(e); if (dim == -1){ - long shapeLength = 1; + Nd4jLong shapeLength = 1; for(int e2 = 0; e2 < e; e2++){ shapeLength *= y->e(e2); } @@ -253,7 +252,7 @@ namespace nd4j { //Edge case for empty: shapeNew[e] = 0; } else { - long realShape = shape::length(inp) / shapeLength; + Nd4jLong realShape = shape::length(inp) / shapeLength; shapeNew[e] = realShape; } }else { diff --git a/libnd4j/include/ops/declarable/generic/shape/squeeze.cpp b/libnd4j/include/ops/declarable/generic/shape/squeeze.cpp index 6eb0f91ad..3b158ff3a 100644 --- a/libnd4j/include/ops/declarable/generic/shape/squeeze.cpp +++ b/libnd4j/include/ops/declarable/generic/shape/squeeze.cpp @@ -41,7 +41,7 @@ namespace nd4j { } else if (block.width() > 1) { auto a = INPUT_VARIABLE(1); - for (int e = 0; e < a->lengthOf(); e++) { + for (Nd4jLong e = 0; e < a->lengthOf(); e++) { int _a = a->e(e); if (_a < 0) diff --git a/libnd4j/include/ops/declarable/helpers/cpu/compare_elem.cpp b/libnd4j/include/ops/declarable/helpers/cpu/compare_elem.cpp index 45ce5483f..50a11f767 100644 --- a/libnd4j/include/ops/declarable/helpers/cpu/compare_elem.cpp +++ b/libnd4j/include/ops/declarable/helpers/cpu/compare_elem.cpp @@ -31,14 +31,14 @@ namespace nd4j { if(isStrictlyIncreasing) { PRAGMA_OMP_PARALLEL_FOR_SIMD_REDUCTION(+:sum) - for (int i = 0; i < length - 1; i++) { + for (Nd4jLong i = 0; i < length - 1; i++) { auto val0 = input->t(i); auto val1 = input->t(i + 1); sum += val0 >= val1 ? -1 : 0; } } else { PRAGMA_OMP_PARALLEL_FOR_SIMD_REDUCTION(+:sum) - for (int i = 0; i < length - 1; i++) { + for (Nd4jLong i = 0; i < length - 1; i++) { auto val0 = input->t(i); auto val1 = input->t(i + 1); sum += val0 > val1 ? -1 : 0; diff --git a/libnd4j/include/ops/declarable/helpers/cpu/cross.cpp b/libnd4j/include/ops/declarable/helpers/cpu/cross.cpp index 79864ff41..f61a53f30 100644 --- a/libnd4j/include/ops/declarable/helpers/cpu/cross.cpp +++ b/libnd4j/include/ops/declarable/helpers/cpu/cross.cpp @@ -39,7 +39,7 @@ void crossBatched(nd4j::LaunchContext * context, NDArray *a, NDArray *b, NDArray int tads = tadsA->size(); PRAGMA_OMP_PARALLEL_FOR_SIMD - for (int e = 0; e < tads; e++) { + for (Nd4jLong e = 0; e < tads; e++) { auto a_ = tadsA->at(e); auto b_ = tadsB->at(e); auto o_ = tadsO->at(e); diff --git a/libnd4j/include/ops/declarable/helpers/cpu/transforms.cpp b/libnd4j/include/ops/declarable/helpers/cpu/transforms.cpp index bb498183e..71641f215 100644 --- a/libnd4j/include/ops/declarable/helpers/cpu/transforms.cpp +++ b/libnd4j/include/ops/declarable/helpers/cpu/transforms.cpp @@ -43,7 +43,7 @@ static void triuBP_(nd4j::LaunchContext * context, const NDArray& input, const N int dLen = dOdI.lengthOf(); PRAGMA_OMP_PARALLEL_FOR_IF(dLen > Environment::getInstance()->elementwiseThreshold()) - for(int i = 0; i < dLen; ++i) { + for(Nd4jLong i = 0; i < dLen; ++i) { if(dOdI.t(i) != static_cast(0.f)) dOdI.t(i) = static_cast(1.f); } diff --git a/libnd4j/include/ops/declarable/impl/LegacyReduceBoolOp.cpp b/libnd4j/include/ops/declarable/impl/LegacyReduceBoolOp.cpp index 8270f665f..ac4bb33b7 100644 --- a/libnd4j/include/ops/declarable/impl/LegacyReduceBoolOp.cpp +++ b/libnd4j/include/ops/declarable/impl/LegacyReduceBoolOp.cpp @@ -94,7 +94,7 @@ namespace nd4j { //indices->printIndexedBuffer("indices"); std::vector dims(indices->lengthOf()); - for (int e = 0; e < indices->lengthOf(); e++) { + for (Nd4jLong e = 0; e < indices->lengthOf(); e++) { // lol otherwise we segfault on macOS int f = indices->e(e); dims[e] = f >= 0 ? f : f += x->rankOf(); diff --git a/libnd4j/include/ops/impl/specials.cpp b/libnd4j/include/ops/impl/specials.cpp index 8c7c04f60..5eb64fdb4 100644 --- a/libnd4j/include/ops/impl/specials.cpp +++ b/libnd4j/include/ops/impl/specials.cpp @@ -66,17 +66,17 @@ void SpecialMethods::concatCpuGeneric(const std::vector& inArrs, ND PRAGMA_OMP_PARALLEL_FOR_SIMD for (uint r = 0; r < numOfArrs; r++) { - const uint arrLen = inArrs[r]->lengthOf(); + const Nd4jLong arrLen = inArrs[r]->lengthOf(); const uint xEws = (arrLen == 1) ? 1 : inArrs[r]->stridesOf()[nonUnityDim[r]]; T *z = outBuff + zOffset[r]; T *x = inArrs[r]->bufferAsT(); if(outEws == 1 && xEws == 1) - for (uint e = 0; e < arrLen; e++) + for (Nd4jLong e = 0; e < arrLen; e++) z[e] = x[e]; else - for (uint e = 0; e < arrLen; e++) + for (Nd4jLong e = 0; e < arrLen; e++) z[e * outEws] = x[e * xEws]; } return; diff --git a/libnd4j/tests_cpu/layers_tests/LegacyOpsTests.cpp b/libnd4j/tests_cpu/layers_tests/LegacyOpsTests.cpp index 3219104bd..9151b70bd 100644 --- a/libnd4j/tests_cpu/layers_tests/LegacyOpsTests.cpp +++ b/libnd4j/tests_cpu/layers_tests/LegacyOpsTests.cpp @@ -382,7 +382,7 @@ TEST_F(LegacyOpsTests, Test_IsMax_1) { z.buffer(), z.shapeInfo(), z.specialBuffer(), z.specialShapeInfo(), extra, nullptr, nullptr); // z.printIndexedBuffer("z"); - for (int e = 0; e < z.lengthOf(); e++) { + for (Nd4jLong e = 0; e < z.lengthOf(); e++) { ASSERT_TRUE(z.e(e) >= 0); } } @@ -402,7 +402,7 @@ TEST_F(LegacyOpsTests, Test_IsMax_2) { z.buffer(), z.shapeInfo(), z.specialBuffer(), z.specialShapeInfo(), extra, nullptr, nullptr); // z.printIndexedBuffer("z"); - for (int e = 0; e < z.lengthOf(); e++) { + for (Nd4jLong e = 0; e < z.lengthOf(); e++) { if (e >= z.lengthOf() / 2) ASSERT_TRUE(z.e(e)); else