remove lengthLong (#236)

Signed-off-by: Robert Altena <Rob@Ra-ai.com>
master
Robert Altena 2019-09-05 10:19:39 +09:00 committed by Alex Black
parent 03c52ef9dd
commit f25e3e71e5
13 changed files with 57 additions and 83 deletions

View File

@ -135,7 +135,7 @@ public class EncodingHandler implements MessageHandler {
iterations.get().incrementAndGet();
if (boundary != null && atomicBoundary.get() < 0)
atomicBoundary.compareAndSet(-1, (int) (updates.lengthLong() * boundary));
atomicBoundary.compareAndSet(-1, (int) (updates.length() * boundary));
INDArray encoded;
@ -160,11 +160,11 @@ public class EncodingHandler implements MessageHandler {
double encLen = encoded.data().getInt(0);
// if updates are too dense - we fallback to bitmap encoding
if (encLen >= (updates.lengthLong() / 16)) {
if (encLen >= (updates.length() / 16)) {
log.debug("Switching back to bitmapEncoding: iteration {}, epoch {}, threshold {}, encoded length {}", iteration, epoch, currThreshold, encLen);
bitmapMode.get().set(true);
DataBuffer buffer = Nd4j.getDataBufferFactory().createInt(updates.lengthLong() / 16 + 5);
DataBuffer buffer = Nd4j.getDataBufferFactory().createInt(updates.length() / 16 + 5);
encoded = Nd4j.createArrayFromShapeBuffer(buffer, updates.shapeInfoDataBuffer());
Nd4j.getExecutioner().bitmapEncode(updates, encoded, currentThreshold.get().get());
@ -186,12 +186,12 @@ public class EncodingHandler implements MessageHandler {
}
} else {
//Dense bitmap updates
DataBuffer buffer = Nd4j.getDataBufferFactory().createInt(updates.lengthLong() / 16 + 5);
DataBuffer buffer = Nd4j.getDataBufferFactory().createInt(updates.length() / 16 + 5);
encoded = Nd4j.createArrayFromShapeBuffer(buffer, updates.shapeInfoDataBuffer());
long values = Nd4j.getExecutioner().bitmapEncode(updates, encoded, currentThreshold.get().get());
if (values < (updates.lengthLong() / 16 + 5) / 2) {
if (values < (updates.length() / 16 + 5) / 2) {
boolean current = bitmapMode.get().get();
bitmapMode.get().set(false);
if(!current) {

View File

@ -4641,17 +4641,6 @@ public abstract class BaseNDArray implements INDArray, Iterable {
return jvmShapeInfo.length;
}
/**
* Returns the total number of elements in the ndarray
*
* @return the number of elements in the ndarray
*/
@Override
@Deprecated
public long lengthLong() {
return jvmShapeInfo.length;
}
@Override
public INDArray broadcast(INDArray result) {
Nd4j.getCompressor().autoDecompress(this);

View File

@ -279,11 +279,6 @@ public abstract class BaseSparseNDArray implements ISparseNDArray {
return (int) length();
}
@Override
public long lengthLong() {
return length;
}
protected void init(long[] shape) {
if (shape.length == 1) {

View File

@ -2378,16 +2378,6 @@ public interface INDArray extends Serializable, AutoCloseable {
*/
long length();
/**
* Returns the total number of elements in the ndarray
*
* @return the number of elements in the ndarray
* @deprecated use {@link #length()}
*/
@Deprecated
long lengthLong();
/**
* Broadcasts this ndarray to be the specified shape
*

View File

@ -664,7 +664,7 @@ public class JCublasNDArray extends BaseNDArray {
//if (1 < 0) {
Nd4j.getExecutioner().commit();
DataBuffer buffer = Nd4j.createBuffer(this.lengthLong(), false);
DataBuffer buffer = Nd4j.createBuffer(this.length(), false);
AllocationPoint pointDst = AtomicAllocator.getInstance().getAllocationPoint(buffer);
AllocationPoint pointSrc = AtomicAllocator.getInstance().getAllocationPoint(this.data);
@ -686,10 +686,10 @@ public class JCublasNDArray extends BaseNDArray {
val perfD = PerformanceTracker.getInstance().helperStartTransaction();
if (pointSrc.isActualOnDeviceSide()) {
if (NativeOpsHolder.getInstance().getDeviceNativeOps().memcpyAsync(pointDst.getDevicePointer(), pointSrc.getDevicePointer(), this.lengthLong() * Nd4j.sizeOfDataType(buffer.dataType()), CudaConstants.cudaMemcpyDeviceToDevice, context.getOldStream()) == 0)
if (NativeOpsHolder.getInstance().getDeviceNativeOps().memcpyAsync(pointDst.getDevicePointer(), pointSrc.getDevicePointer(), this.length() * Nd4j.sizeOfDataType(buffer.dataType()), CudaConstants.cudaMemcpyDeviceToDevice, context.getOldStream()) == 0)
throw new ND4JIllegalStateException("memcpyAsync failed");
} else {
if (NativeOpsHolder.getInstance().getDeviceNativeOps().memcpyAsync(pointDst.getDevicePointer(), pointSrc.getHostPointer(), this.lengthLong() * Nd4j.sizeOfDataType(buffer.dataType()), CudaConstants.cudaMemcpyHostToDevice, context.getOldStream()) == 0)
if (NativeOpsHolder.getInstance().getDeviceNativeOps().memcpyAsync(pointDst.getDevicePointer(), pointSrc.getHostPointer(), this.length() * Nd4j.sizeOfDataType(buffer.dataType()), CudaConstants.cudaMemcpyHostToDevice, context.getOldStream()) == 0)
throw new ND4JIllegalStateException("memcpyAsync failed");
direction = MemcpyDirection.HOST_TO_DEVICE;
@ -738,7 +738,7 @@ public class JCublasNDArray extends BaseNDArray {
if (!this.isView()) {
Nd4j.getExecutioner().commit();
val buffer = Nd4j.createBuffer(this.dataType(), this.lengthLong(), false);
val buffer = Nd4j.createBuffer(this.dataType(), this.length(), false);
val pointDst = AtomicAllocator.getInstance().getAllocationPoint(buffer);
val pointSrc = AtomicAllocator.getInstance().getAllocationPoint(this.data);
@ -749,10 +749,10 @@ public class JCublasNDArray extends BaseNDArray {
val perfD = PerformanceTracker.getInstance().helperStartTransaction();
if (pointSrc.isActualOnDeviceSide()) {
if (NativeOpsHolder.getInstance().getDeviceNativeOps().memcpyAsync(pointDst.getDevicePointer(), pointSrc.getDevicePointer(), this.lengthLong() * Nd4j.sizeOfDataType(buffer.dataType()), CudaConstants.cudaMemcpyDeviceToDevice, context.getOldStream()) == 0)
if (NativeOpsHolder.getInstance().getDeviceNativeOps().memcpyAsync(pointDst.getDevicePointer(), pointSrc.getDevicePointer(), this.length() * Nd4j.sizeOfDataType(buffer.dataType()), CudaConstants.cudaMemcpyDeviceToDevice, context.getOldStream()) == 0)
throw new ND4JIllegalStateException("memcpyAsync failed");
} else {
if (NativeOpsHolder.getInstance().getDeviceNativeOps().memcpyAsync(pointDst.getDevicePointer(), pointSrc.getHostPointer(), this.lengthLong() * Nd4j.sizeOfDataType(buffer.dataType()), CudaConstants.cudaMemcpyHostToDevice, context.getOldStream()) == 0)
if (NativeOpsHolder.getInstance().getDeviceNativeOps().memcpyAsync(pointDst.getDevicePointer(), pointSrc.getHostPointer(), this.length() * Nd4j.sizeOfDataType(buffer.dataType()), CudaConstants.cudaMemcpyHostToDevice, context.getOldStream()) == 0)
throw new ND4JIllegalStateException("memcpyAsync failed");
direction = MemcpyDirection.HOST_TO_DEVICE;

View File

@ -424,7 +424,7 @@ public class JCublasNDArrayFactory extends BaseNativeNDArrayFactory {
val perfD = PerformanceTracker.getInstance().helperStartTransaction();
nativeOps.memcpyAsync(point.getDevicePointer(), point.getHostPointer(), ret.lengthLong() * Nd4j.sizeOfDataType(ret.data().dataType()), CudaConstants.cudaMemcpyHostToDevice, context.getSpecialStream());
nativeOps.memcpyAsync(point.getDevicePointer(), point.getHostPointer(), ret.length() * Nd4j.sizeOfDataType(ret.data().dataType()), CudaConstants.cudaMemcpyHostToDevice, context.getSpecialStream());
context.getSpecialStream().synchronize();
if (nativeOps.lastErrorCode() != 0)
@ -580,7 +580,7 @@ public class JCublasNDArrayFactory extends BaseNativeNDArrayFactory {
if (true) {
Nd4j.getExecutioner().push();
long len = target.lengthLong();
long len = target.length();
AtomicAllocator allocator = AtomicAllocator.getInstance();
@ -598,7 +598,7 @@ public class JCublasNDArrayFactory extends BaseNativeNDArrayFactory {
if (arrays[i].elementWiseStride() != 1)
throw new ND4JIllegalStateException("Native averaging is applicable only to continuous INDArrays");
if (arrays[i].lengthLong() != len)
if (arrays[i].length() != len)
throw new ND4JIllegalStateException("All arrays should have equal length for averaging");
AllocationPoint point = allocator.getAllocationPoint(arrays[i]);
@ -621,7 +621,7 @@ public class JCublasNDArrayFactory extends BaseNativeNDArrayFactory {
return target;
} else {
long len = target.lengthLong();
long len = target.length();
Nd4j.getExecutioner().commit();
@ -637,7 +637,7 @@ public class JCublasNDArrayFactory extends BaseNativeNDArrayFactory {
if (arrays[i].elementWiseStride() != 1)
throw new ND4JIllegalStateException("Native averaging is applicable only to continuous INDArrays");
if (arrays[i].lengthLong() != len)
if (arrays[i].length() != len)
throw new ND4JIllegalStateException("All arrays should have equal length for averaging");
((BaseCudaDataBuffer) arrays[i].data()).lazyAllocateHostPointer();
@ -689,7 +689,7 @@ public class JCublasNDArrayFactory extends BaseNativeNDArrayFactory {
Nd4j.getExecutioner().push();
long len = target != null ? target.lengthLong() : arrays[0].lengthLong();
long len = target != null ? target.length() : arrays[0].length();
AtomicAllocator allocator = AtomicAllocator.getInstance();
@ -707,7 +707,7 @@ public class JCublasNDArrayFactory extends BaseNativeNDArrayFactory {
if (arrays[i].elementWiseStride() != 1)
throw new ND4JIllegalStateException("Native averaging is applicable only to continuous INDArrays");
if (arrays[i].lengthLong() != len)
if (arrays[i].length() != len)
throw new ND4JIllegalStateException("All arrays should have equal length for averaging");
AllocationPoint point = allocator.getAllocationPoint(arrays[i]);
@ -744,7 +744,7 @@ public class JCublasNDArrayFactory extends BaseNativeNDArrayFactory {
/**
* We expect all operations are complete at this point
*/
long len = target == null ? arrays[0].lengthLong() : target.lengthLong();
long len = target == null ? arrays[0].length() : target.length();
val context = (CudaContext) AtomicAllocator.getInstance().getDeviceContext();
@ -758,7 +758,7 @@ public class JCublasNDArrayFactory extends BaseNativeNDArrayFactory {
if (arrays[i].elementWiseStride() != 1)
throw new ND4JIllegalStateException("Native averaging is applicable only to continuous INDArrays");
if (arrays[i].lengthLong() != len)
if (arrays[i].length() != len)
throw new ND4JIllegalStateException("All arrays should have equal length for averaging");
((BaseCudaDataBuffer) arrays[i].data()).lazyAllocateHostPointer();
@ -1303,7 +1303,7 @@ public class JCublasNDArrayFactory extends BaseNativeNDArrayFactory {
}
int numTads = (int)(tensor.lengthLong() / tadLength);
int numTads = (int)(tensor.length() / tadLength);
INDArray[] result = new INDArray[numTads];
long[] xPointers = new long[numTads];
@ -1378,7 +1378,7 @@ public class JCublasNDArrayFactory extends BaseNativeNDArrayFactory {
new CudaPointer(0));
// we're sending > 10m elements to radixSort
boolean isRadix = !x.isView() && (x.lengthLong() > 1024 * 1024 * 10);
boolean isRadix = !x.isView() && (x.length() > 1024 * 1024 * 10);
INDArray tmpX = x;
// we need to guarantee all threads are finished here

View File

@ -293,9 +293,9 @@ public class CudaExecutioner extends DefaultOpExecutioner {
Pointer yDevTadShapeInfo = null;
if (op.y() != null) {
if (dimension.length == 0 || (dimension.length == 1 && dimension[0] == Integer.MAX_VALUE )|| op.x().tensorAlongDimension(0, dimension).lengthLong() != op.y().lengthLong()) {
if (!op.isComplexAccumulation() && op.x().lengthLong() != op.y().lengthLong())
throw new ND4JIllegalStateException("Op.X [" + op.x().lengthLong() + "] and Op.Y [" + op.y().lengthLong() + "] lengths should match");
if (dimension.length == 0 || (dimension.length == 1 && dimension[0] == Integer.MAX_VALUE )|| op.x().tensorAlongDimension(0, dimension).length() != op.y().length()) {
if (!op.isComplexAccumulation() && op.x().length() != op.y().length())
throw new ND4JIllegalStateException("Op.X [" + op.x().length() + "] and Op.Y [" + op.y().length() + "] lengths should match");
if (!op.z().isScalar()) {
Pair<DataBuffer, DataBuffer> yTadBuffers = tadManager.getTADOnlyShapeInfo(op.y(), dimension);
@ -536,7 +536,7 @@ public class CudaExecutioner extends DefaultOpExecutioner {
} else {
if (op.y() != null) {
//2 options here: either pairwise, equal sizes - OR every X TAD vs. entirety of Y
if (op.x().lengthLong() == op.y().lengthLong()) {
if (op.x().length() == op.y().length()) {
//Pairwise
if (!wholeDims && op.x().tensorsAlongDimension(dimension) != op.y().tensorsAlongDimension(dimension)) {
throw new ND4JIllegalStateException("Number of TADs along dimension don't match: (x shape = " +
@ -548,11 +548,11 @@ public class CudaExecutioner extends DefaultOpExecutioner {
throw new ND4JIllegalStateException("TAD vs TAD comparison requires dimension (or other comparison mode was supposed to be used?)");
//Every X TAD vs. entirety of Y
val xTADSize = op.x().lengthLong() / op.x().tensorsAlongDimension(dimension);
val xTADSize = op.x().length() / op.x().tensorsAlongDimension(dimension);
if (xTADSize != op.y().length()) {
throw new ND4JIllegalStateException("Size of TADs along dimension don't match for pairwise execution:" +
" (x TAD size = " + xTADSize + ", y size = " + op.y().lengthLong());
" (x TAD size = " + xTADSize + ", y size = " + op.y().length());
}
}
}
@ -976,7 +976,7 @@ public class CudaExecutioner extends DefaultOpExecutioner {
if (op.y() != null) {
//2 options here: either pairwise, equal sizes - OR every X TAD vs. entirety of Y
if (op.x().lengthLong() == op.y().lengthLong()) {
if (op.x().length() == op.y().length()) {
//Pairwise
if (op.x().tensorsAlongDimension(dimension) != op.y().tensorsAlongDimension(dimension)) {
throw new ND4JIllegalStateException("Number of TADs along dimension don't match: (x shape = " +
@ -985,11 +985,11 @@ public class CudaExecutioner extends DefaultOpExecutioner {
}
} else {
//Every X TAD vs. entirety of Y
val xTADSize = op.x().lengthLong() / op.x().tensorsAlongDimension(dimension);
val xTADSize = op.x().length() / op.x().tensorsAlongDimension(dimension);
if (xTADSize != op.y().length()) {
throw new ND4JIllegalStateException("Size of TADs along dimension don't match for pairwise execution:" +
" (x TAD size = " + xTADSize + ", y size = " + op.y().lengthLong());
" (x TAD size = " + xTADSize + ", y size = " + op.y().length());
}
}
}
@ -2031,8 +2031,8 @@ public class CudaExecutioner extends DefaultOpExecutioner {
long compressedLength = buffer.getInt(0);
long originalLength = buffer.getInt(1);
if (target.lengthLong() != originalLength)
throw new ND4JIllegalStateException("originalLength ["+ originalLength+"] stored in encoded array doesn't match target length ["+ target.lengthLong()+"]");
if (target.length() != originalLength)
throw new ND4JIllegalStateException("originalLength ["+ originalLength+"] stored in encoded array doesn't match target length ["+ target.length()+"]");
DataBuffer result = target.data();
@ -2056,7 +2056,7 @@ public class CudaExecutioner extends DefaultOpExecutioner {
@Override
public long bitmapEncode(INDArray indArray, INDArray target, double threshold) {
long length = indArray.lengthLong();
long length = indArray.length();
long tLen = target.data().length();
if (tLen != (length / 16 + 5))
@ -2117,7 +2117,7 @@ public class CudaExecutioner extends DefaultOpExecutioner {
context.getBufferScalar(),
context.getBufferReduction());
nativeOps.decodeBitmap(extras, AtomicAllocator.getInstance().getPointer(encoded.data(), context), target.lengthLong(), AtomicAllocator.getInstance().getPointer(target, context), (LongPointer) AtomicAllocator.getInstance().getHostPointer(target.shapeInfoDataBuffer()));
nativeOps.decodeBitmap(extras, AtomicAllocator.getInstance().getPointer(encoded.data(), context), target.length(), AtomicAllocator.getInstance().getPointer(target, context), (LongPointer) AtomicAllocator.getInstance().getHostPointer(target.shapeInfoDataBuffer()));
if (nativeOps.lastErrorCode() != 0)
throw new RuntimeException(nativeOps.lastErrorMessage());

View File

@ -655,7 +655,7 @@ public class CudaGridExecutioner extends CudaExecutioner implements GridExecutio
op.setZ(ret);
} else {
// compare length
if (op.z().lengthLong() != ArrayUtil.prodLong(retShape))
if (op.z().length() != ArrayUtil.prodLong(retShape))
throw new ND4JIllegalStateException("Shape of target array for reduction [" + Arrays.toString(op.z().shape()) + "] doesn't match expected [" + Arrays.toString(retShape) + "]");
ret = op.z();

View File

@ -514,7 +514,7 @@ public class CpuNDArrayFactory extends BaseNativeNDArrayFactory {
int numTads = (int)(tensor.lengthLong() / tadLength);
int numTads = (int)(tensor.length() / tadLength);
INDArray[] result = new INDArray[numTads];
PointerPointer targets = new PointerPointer(numTads);
@ -693,7 +693,7 @@ public class CpuNDArrayFactory extends BaseNativeNDArrayFactory {
if (arrays.length == 1)
return target.addi(arrays[0]);
long len = target.lengthLong();
long len = target.length();
PointerPointer dataPointers = new PointerPointer(arrays.length);
@ -703,7 +703,7 @@ public class CpuNDArrayFactory extends BaseNativeNDArrayFactory {
if (arrays[i].elementWiseStride() != 1)
throw new ND4JIllegalStateException("Native accumulation is applicable only to continuous INDArrays");
if (arrays[i].lengthLong() != len)
if (arrays[i].length() != len)
throw new ND4JIllegalStateException("All arrays should have equal length for accumulation");
dataPointers.put(i, arrays[i].data().addressPointer());
@ -744,7 +744,7 @@ public class CpuNDArrayFactory extends BaseNativeNDArrayFactory {
return target.assign(arrays[0]);
}
long len = target != null ? target.lengthLong() : arrays[0].length();
long len = target != null ? target.length() : arrays[0].length();
PointerPointer dataPointers = new PointerPointer(arrays.length);
val firstType = arrays[0].dataType();
@ -757,7 +757,7 @@ public class CpuNDArrayFactory extends BaseNativeNDArrayFactory {
if (arrays[i].elementWiseStride() != 1)
throw new ND4JIllegalStateException("Native averaging is applicable only to continuous INDArrays");
if (arrays[i].lengthLong() != len)
if (arrays[i].length() != len)
throw new ND4JIllegalStateException("All arrays should have equal length for averaging");
dataPointers.put(i, arrays[i].data().addressPointer());

View File

@ -303,11 +303,11 @@ public class NativeOpExecutioner extends DefaultOpExecutioner {
}
} else {
//Every X TAD vs. entirety of Y
val xTADSize = op.x().lengthLong() / op.x().tensorsAlongDimension(dimension);
val xTADSize = op.x().length() / op.x().tensorsAlongDimension(dimension);
if (xTADSize != op.y().length()) {
throw new ND4JIllegalStateException("Size of TADs along dimension don't match for pairwise execution:" +
" (x TAD size = " + xTADSize + ", y size = " + op.y().lengthLong());
" (x TAD size = " + xTADSize + ", y size = " + op.y().length());
}
}
}
@ -329,7 +329,7 @@ public class NativeOpExecutioner extends DefaultOpExecutioner {
long xT = op.x().tensorsAlongDimension(dimension);
long yT = op.y().tensorsAlongDimension(dimension);
if (op.z().lengthLong() != xT * yT)
if (op.z().length() != xT * yT)
throw new ND4JIllegalStateException("Shape of target array for reduction [" + Arrays.toString(op.z().shape()) + "] doesn't match expected [" + (xT * yT) + "]");
}
@ -358,7 +358,7 @@ public class NativeOpExecutioner extends DefaultOpExecutioner {
// we're going to check, if that's TAD vs TAD comparison or TAD vs full array. if later - we're going slightly different route
boolean tvf = false;
if (op.y() != null) {
if (op.x().tensorAlongDimension(0, dimension).lengthLong() == op.y().lengthLong()) {
if (op.x().tensorAlongDimension(0, dimension).length() == op.y().length()) {
tvf = true;
}
}
@ -366,10 +366,10 @@ public class NativeOpExecutioner extends DefaultOpExecutioner {
if (op.isComplexAccumulation()) {
yTadBuffers = tadManager.getTADOnlyShapeInfo(op.y(), dimension);
if (op.x().tensorAlongDimension(0, dimension).lengthLong() != op.y().tensorAlongDimension(0, dimension).lengthLong())
if (op.x().tensorAlongDimension(0, dimension).length() != op.y().tensorAlongDimension(0, dimension).length())
throw new ND4JIllegalStateException("Impossible to issue AllDistances operation: TAD lengths mismatch along given dimension: " +
"x TAD length = " + op.x().tensorAlongDimension(0, dimension).lengthLong() + ", y TAD length " +
op.y().tensorAlongDimension(0, dimension).lengthLong());
"x TAD length = " + op.x().tensorAlongDimension(0, dimension).length() + ", y TAD length " +
op.y().tensorAlongDimension(0, dimension).length());
}
/**
@ -659,7 +659,7 @@ public class NativeOpExecutioner extends DefaultOpExecutioner {
//validateDataType(Nd4j.dataType(), op);
if (op.x().lengthLong() != op.z().lengthLong())
if (op.x().length() != op.z().length())
throw new ND4JIllegalStateException("op.X length should be equal to op.Z length: " +
"x.length()=" + op.x().length() + ", z.length()=" + op.z().length() + " - x shape info = ["
+ Arrays.toString(op.x().shapeInfoDataBuffer().asInt()) + "], z shape info = ["
@ -1449,8 +1449,8 @@ public class NativeOpExecutioner extends DefaultOpExecutioner {
long originalLength = buffer.getInt(1);
float threshold = buffer.getInt(2);
if (target.lengthLong() != originalLength)
throw new ND4JIllegalStateException("originalLength ["+ originalLength+"] stored in encoded array doesn't match target length ["+ target.lengthLong()+"]");
if (target.length() != originalLength)
throw new ND4JIllegalStateException("originalLength ["+ originalLength+"] stored in encoded array doesn't match target length ["+ target.length()+"]");
DataTypeEx typeDst = AbstractCompressor.getBufferTypeEx(target.data());
@ -1465,7 +1465,7 @@ public class NativeOpExecutioner extends DefaultOpExecutioner {
@Override
public long bitmapEncode(INDArray indArray, INDArray target, double threshold) {
long length = indArray.lengthLong();
long length = indArray.length();
long tLen = target.data().length();
if (tLen != (length / 16 + 5))

View File

@ -5155,7 +5155,7 @@ public class Nd4jTestsC extends BaseNd4jTest {
INDArray res = x.entropy(1);
assertEquals(10, res.lengthLong());
assertEquals(10, res.length());
for (int t = 0; t < x.rows(); t++) {
double exp = MathUtils.entropy(x.getRow(t).dup().data().asDouble());

View File

@ -415,7 +415,7 @@ public class ShufflesTests extends BaseNd4jTest {
for (int x = 0; x < newData.rows(); x++) {
INDArray row = newData.getRow(x);
for (int y = 0; y < row.lengthLong(); y++) {
for (int y = 0; y < row.length(); y++) {
if (Math.abs(row.getFloat(y) - newMap[x]) > Nd4j.EPS_THRESHOLD) {
System.out.print("Different data in a row");
return false;
@ -442,7 +442,7 @@ public class ShufflesTests extends BaseNd4jTest {
for (int x = 0; x < newData.rows(); x++) {
INDArray column = newData.getColumn(x);
double val = column.getDouble(0);
for (int y = 0; y < column.lengthLong(); y++) {
for (int y = 0; y < column.length(); y++) {
if (Math.abs(column.getFloat(y) - val) > Nd4j.EPS_THRESHOLD) {
System.out.print("Different data in a column: " + column.getFloat(y));
return false;

View File

@ -168,7 +168,7 @@ public class CompressionTests extends BaseNd4jTest {
INDArray decompressed = Nd4j.create(1, initial.length());
Nd4j.getExecutioner().thresholdDecode(compressed, decompressed);
log.info("Decompressed length: {}", decompressed.lengthLong());
log.info("Decompressed length: {}", decompressed.length());
assertEquals(exp_d, decompressed);
}