From 46f8c58502a6b90c33b6c904ac2a74a570fa7679 Mon Sep 17 00:00:00 2001 From: raver119 Date: Fri, 6 Sep 2019 08:57:53 +0300 Subject: [PATCH 01/11] - bits_hamming_distance dtype fix - DataTypeUtils::asString fixe + new dtypes added Signed-off-by: raver119 --- libnd4j/include/array/DataTypeUtils.h | 10 +++++-- .../generic/bitwise/bits_hamming_distance.cpp | 3 +- .../layers_tests/DataTypesValidationTests.cpp | 30 +++++++++++++++++++ 3 files changed, 38 insertions(+), 5 deletions(-) diff --git a/libnd4j/include/array/DataTypeUtils.h b/libnd4j/include/array/DataTypeUtils.h index 2a52ba6f5..8b3176c2b 100644 --- a/libnd4j/include/array/DataTypeUtils.h +++ b/libnd4j/include/array/DataTypeUtils.h @@ -335,8 +335,6 @@ FORCEINLINE std::string DataTypeUtils::asString(DataType dataType) { return std::string("INT8"); case INT16: return std::string("INT16"); - case UINT16: - return std::string("UINT16"); case INT32: return std::string("INT32"); case INT64: @@ -353,10 +351,16 @@ FORCEINLINE std::string DataTypeUtils::asString(DataType dataType) { return std::string("BOOL"); case UINT8: return std::string("UINT8"); + case UINT16: + return std::string("UINT16"); + case UINT32: + return std::string("UINT32"); + case UINT64: + return std::string("UINT64"); case UTF8: return std::string("UTF8"); default: - throw new std::runtime_error("Unknown data type used"); + throw std::runtime_error("Unknown data type used"); } } diff --git a/libnd4j/include/ops/declarable/generic/bitwise/bits_hamming_distance.cpp b/libnd4j/include/ops/declarable/generic/bitwise/bits_hamming_distance.cpp index ff72ff4b9..f2a39b270 100644 --- a/libnd4j/include/ops/declarable/generic/bitwise/bits_hamming_distance.cpp +++ b/libnd4j/include/ops/declarable/generic/bitwise/bits_hamming_distance.cpp @@ -48,8 +48,7 @@ namespace nd4j { getOpDescriptor() ->setAllowedInputTypes(0, {ALL_INTS}) ->setAllowedInputTypes(1, {ALL_INTS}) - ->setAllowedOutputTypes(0, {ALL_INDICES}) - ->setSameMode(true); + ->setAllowedOutputTypes(0, {ALL_INDICES}); } } } diff --git a/libnd4j/tests_cpu/layers_tests/DataTypesValidationTests.cpp b/libnd4j/tests_cpu/layers_tests/DataTypesValidationTests.cpp index 9de87b584..c018e58d0 100644 --- a/libnd4j/tests_cpu/layers_tests/DataTypesValidationTests.cpp +++ b/libnd4j/tests_cpu/layers_tests/DataTypesValidationTests.cpp @@ -129,3 +129,33 @@ TEST_F(DataTypesValidationTests, cast_1) { ASSERT_TRUE(1.f == x); ASSERT_TRUE(y == x); } + +TEST_F(DataTypesValidationTests, test_bits_hamming_distance_1) { + auto x = NDArrayFactory::create('c', {3}, {0b01011000, 0b01011111, 0b01111110}); + auto y = NDArrayFactory::create('c', {3}, {0b00010110, 0b01011000, 0b01011000}); + auto z = NDArrayFactory::create(0); + + Context ctx(1); + ctx.setInputArray(0, &x); + ctx.setInputArray(1, &y); + ctx.setOutputArray(0, &z); + + nd4j::ops::bits_hamming_distance op; + auto status = op.execute(&ctx); + ASSERT_NE(Status::OK(), status); +} + +TEST_F(DataTypesValidationTests, test_bits_hamming_distance_2) { + auto x = NDArrayFactory::create('c', {3}, {0b01011000, 0b01011111, 0b01111110}); + auto y = NDArrayFactory::create('c', {3}, {0b00010110, 0b01011000, 0b01011000}); + auto z = NDArrayFactory::create(0); + + Context ctx(1); + ctx.setInputArray(0, &x); + ctx.setInputArray(1, &y); + ctx.setOutputArray(0, &z); + + nd4j::ops::bits_hamming_distance op; + auto status = op.execute(&ctx); + ASSERT_EQ(Status::OK(), status); +} \ No newline at end of file From c99f98051306a550ac8fb4272245141497a7c702 Mon Sep 17 00:00:00 2001 From: Robert Altena Date: Mon, 9 Sep 2019 12:09:31 +0900 Subject: [PATCH 02/11] INDArray javadoc (#246) * javadoc Signed-off-by: Robert Altena * javadoc Signed-off-by: Robert Altena * javadoc Signed-off-by: Robert Altena * review fixes. Signed-off-by: Robert Altena --- .../nd4j/linalg/api/ndarray/BaseNDArray.java | 277 ------------------ .../linalg/api/ndarray/BaseSparseNDArray.java | 11 +- .../api/ndarray/BaseSparseNDArrayCOO.java | 18 +- .../org/nd4j/linalg/api/ndarray/INDArray.java | 203 +++++++------ .../linalg/api/ndarray/ISparseNDArray.java | 17 -- .../nd4j/linalg/jcublas/JCublasNDArray.java | 15 - .../linalg/jcublas/JcusparseNDArrayCSR.java | 5 - .../nd4j/linalg/cpu/nativecpu/NDArray.java | 7 - .../cpu/nativecpu/SparseNDArrayCSR.java | 5 - 9 files changed, 119 insertions(+), 439 deletions(-) diff --git a/nd4j/nd4j-backends/nd4j-api-parent/nd4j-api/src/main/java/org/nd4j/linalg/api/ndarray/BaseNDArray.java b/nd4j/nd4j-backends/nd4j-api-parent/nd4j-api/src/main/java/org/nd4j/linalg/api/ndarray/BaseNDArray.java index 126ba2466..9904168de 100644 --- a/nd4j/nd4j-backends/nd4j-api-parent/nd4j-api/src/main/java/org/nd4j/linalg/api/ndarray/BaseNDArray.java +++ b/nd4j/nd4j-backends/nd4j-api-parent/nd4j-api/src/main/java/org/nd4j/linalg/api/ndarray/BaseNDArray.java @@ -1724,12 +1724,6 @@ public abstract class BaseNDArray implements INDArray, Iterable { return z; } - /** - * Returns the elements at the specified indices - * - * @param indices the indices to getScalar - * @return the array with the specified elements - */ @Override public int getInt(int... indices) { return (int) getDouble(indices); @@ -1761,12 +1755,6 @@ public abstract class BaseNDArray implements INDArray, Iterable { return Shape.getLong(this, indices); } - /** - * Returns the elements at the specified indices - * - * @param indices the indices to get - * @return the array with the specified elements - */ @Override public double getDouble(int... indices) { autoProcessScalarCall(); @@ -1815,12 +1803,6 @@ public abstract class BaseNDArray implements INDArray, Iterable { return Shape.getDouble(this, indices); } - /** - * Returns the elements at the specified indices - * - * @param indices the indices to get - * @return the array with the specified elements - */ @Override public float getFloat(int... indices) { return (float) getDouble(indices); @@ -1831,9 +1813,6 @@ public abstract class BaseNDArray implements INDArray, Iterable { return (float) getDouble(indices); } - /** - * Test whether a matrix is scalar. - */ @Override public boolean isScalar() { if (isEmpty()) @@ -2000,11 +1979,6 @@ public abstract class BaseNDArray implements INDArray, Iterable { } } - /** - * Returns true if this ndarray is 2d - * - * @return true if the element is a matrix, false otherwise - */ public boolean isMatrix() { return rank() == 2; } @@ -2216,14 +2190,6 @@ public abstract class BaseNDArray implements INDArray, Iterable { return this; } - /** - * Mainly here for people coming from numpy. - * This is equivalent to a call to permute - * - * @param dimension the dimension to swap - * @param with the one to swap it with - * @return the swapped axes view - */ @Override public INDArray swapAxes(int dimension, int with) { int[] shape = ArrayUtil.range(0, shape().length); @@ -2263,12 +2229,6 @@ public abstract class BaseNDArray implements INDArray, Iterable { this.data = data; } - /** - * Number of slices: aka shape[0] - * - * @return the number of slices - * for this nd array - */ @Override public long slices() { return size(0); @@ -3377,18 +3337,6 @@ public abstract class BaseNDArray implements INDArray, Iterable { return Shape.offset(jvmShapeInfo.javaShapeInformation) + (idx); } - - - /** - * Returns the specified slice of this matrix. - * In matlab, this would be equivalent to (given a 2 x 2 x 2): - * A(:,:,x) where x is the slice you want to return. - *

- * The slice is always relative to the final dimension of the matrix. - * - * @param slice the slice to return - * @return the specified slice of this matrix - */ @Override public INDArray slice(long slice) { Nd4j.getCompressor().autoDecompress(this); @@ -3425,8 +3373,6 @@ public abstract class BaseNDArray implements INDArray, Iterable { return Nd4j.scalar(d); } - - @Override public int getTrailingOnes() { int numLeadingOnes = 0; @@ -3438,8 +3384,6 @@ public abstract class BaseNDArray implements INDArray, Iterable { return numLeadingOnes; } - - @Override public int getLeadingOnes() { int numLeadingOnes = 0; @@ -3451,16 +3395,6 @@ public abstract class BaseNDArray implements INDArray, Iterable { return numLeadingOnes; } - - - /** - * Returns the slice of this from the specified dimension - * - * @param slice the dimension to return from - * @param dimension the dimension of the slice to return - * @return the slice of this matrix from the specified dimension - * and dimension - */ @Override public INDArray slice(long slice, int dimension) { Nd4j.getCompressor().autoDecompress(this); @@ -3490,12 +3424,6 @@ public abstract class BaseNDArray implements INDArray, Iterable { } - /** - * Fetch a particular number on a multi dimensional scale. - * - * @param indexes the indexes to get a number from - * @return the number at the specified indices - */ @Override public INDArray getScalar(int[] indexes) { if (indexes.length > rank()) @@ -3717,9 +3645,6 @@ public abstract class BaseNDArray implements INDArray, Iterable { return (float) getDouble(i, j); } - /** - * Return transposed copy of this matrix. - */ @Override public INDArray transpose() { Preconditions.checkState(rank() >= 2, "Can't transpose array with rank < 2: array shape %ndShape", this); @@ -3727,7 +3652,6 @@ public abstract class BaseNDArray implements INDArray, Iterable { return permute(ArrayUtil.reverseCopy(ArrayUtil.range(0, rank()))); } - /** * * Return transposed version of this matrix. @@ -4047,12 +3971,6 @@ public abstract class BaseNDArray implements INDArray, Iterable { return norm2(false, dimension); } - /** - * Number of columns (shape[1]), throws an exception when - * called when not 2d - * - * @return the number of columns in the array (only 2d) - */ @Override public int columns() { // FIXME: int cast @@ -4068,13 +3986,6 @@ public abstract class BaseNDArray implements INDArray, Iterable { } - /** - * Returns the number of rows - * in the array (only 2d) throws an exception when - * called when not 2d - * - * @return the number of rows in the matrix - */ @Override public int rows() { // FIXME: @@ -4089,12 +4000,6 @@ public abstract class BaseNDArray implements INDArray, Iterable { throw new IllegalStateException("Rank is " + rank() + " rows() call is not valid"); } - - /** - * Flattens the array for linear indexing - * - * @return the flattened version of this array - */ @Override public INDArray ravel(char ordering) { Nd4j.getCompressor().autoDecompress(this); @@ -4104,11 +4009,6 @@ public abstract class BaseNDArray implements INDArray, Iterable { return dup(ordering).reshape(ordering, length()); } - /** - * Flattens the array for linear indexing - * - * @return the flattened version of this array - */ @Override public INDArray ravel() { return reshape(length()); @@ -4125,22 +4025,11 @@ public abstract class BaseNDArray implements INDArray, Iterable { } } - /** - * Reshape the matrix. Number of elements must not change. - * - * @param newRows - * @param newColumns - */ @Override public INDArray reshape(long newRows, long newColumns) { return reshape(new long[] {newRows, newColumns}); } - /** - * Get the specified column - * - * @param c - */ @Override public INDArray getColumn(long c) { Nd4j.getCompressor().autoDecompress(this); @@ -4383,11 +4272,6 @@ public abstract class BaseNDArray implements INDArray, Iterable { return create(new int[] {rows, length}); } - /** - * Get a copy of a row. - * - * @param r the row to get - */ @Override public INDArray getRow(long r) { if (isRowVector() && r == 0) @@ -4409,14 +4293,6 @@ public abstract class BaseNDArray implements INDArray, Iterable { return row.reshape(1, row.length()); } - - /** - * This method allows you to compare INDArray against other INDArray, with variable eps - * - * @param o - * @param eps - * @return - */ public boolean equalsWithEps(Object o, double eps) { Nd4j.getCompressor().autoDecompress(this); @@ -4563,11 +4439,6 @@ public abstract class BaseNDArray implements INDArray, Iterable { return shapeInformation.asNioLong(); } - /** - * Returns the shape(dimensions) of this array - * - * @return the shape of this matrix - */ public long[] shape() { return jvmShapeInfo.shape; } @@ -4577,11 +4448,6 @@ public abstract class BaseNDArray implements INDArray, Iterable { return Shape.shapeToString(this); } - /** - * Returns the stride(indices along the linear index for which each slice is accessed) of this array - * - * @return the stride of this array - */ @Override public long[] stride() { return jvmShapeInfo.stride; @@ -4598,13 +4464,6 @@ public abstract class BaseNDArray implements INDArray, Iterable { return jvmShapeInfo.order; } - /** - * Returns the size of this array - * along a particular dimension - * - * @param dimension the dimension to return from - * @return the shape of the specified dimension - */ @Override public long size(int dimension) { if (dimension < 0) @@ -4629,11 +4488,6 @@ public abstract class BaseNDArray implements INDArray, Iterable { return jvmShapeInfo.rank; } - /** - * Returns the total number of elements in the ndarray - * - * @return the number of elements in the ndarray - */ @Override public long length() { if (isEmpty()) @@ -4745,12 +4599,6 @@ public abstract class BaseNDArray implements INDArray, Iterable { } - /** - * Broadcasts this ndarray to be the specified shape - * - * @param shape the new shape of this ndarray - * @return the broadcasted ndarray - */ @Override public INDArray broadcast(long... shape) { return broadcast(Nd4j.createUninitialized(this.dataType(), shape, this.ordering())); @@ -4877,12 +4725,6 @@ public abstract class BaseNDArray implements INDArray, Iterable { } - /** - * See: http://www.mathworks.com/help/matlab/ref/permute.html - * - * @param rearrange the dimensions to swap to - * @return the newly permuted array - */ @Override public INDArray permute(int... rearrange) { Preconditions.checkArgument(rearrange.length == rank(), "Incorrect number of arguments for permute function:" + @@ -4911,14 +4753,6 @@ public abstract class BaseNDArray implements INDArray, Iterable { return value; } - /** - * An in-place version of permute. The array shape information (shape, strides) - * is modified by this operation (but not the data itself) - * See: http://www.mathworks.com/help/matlab/ref/permute.html - * - * @param rearrange the dimensions to swap to - * @return the current array - */ @Override public INDArray permutei(int... rearrange) { Preconditions.checkArgument(rearrange.length == rank(), "Incorrect number of arguments for permute function:" + @@ -5047,17 +4881,11 @@ public abstract class BaseNDArray implements INDArray, Iterable { return isMatrix() && rows() == columns(); } - /** - * Checks whether the matrix is a row vector. - */ @Override public boolean isRowVector() { return (rank() == 2 && rows() == 1) && length() > 1 || rank() == 1 && length() > 1; } - /** - * Checks whether the matrix is a column vector. - */ @Override public boolean isColumnVector() { return rank() == 2 && columns() == 1 && length() > 1; @@ -5112,12 +4940,6 @@ public abstract class BaseNDArray implements INDArray, Iterable { return toString(Long.MAX_VALUE, false, -1 * dataType().precision()); } - /** - * Returns a scalar (individual element) - * of a scalar ndarray - * - * @return the individual item in this ndarray - */ @Override public Object element() { @@ -5236,11 +5058,6 @@ public abstract class BaseNDArray implements INDArray, Iterable { return new FirstAxisIterator(this); } - /** - * Returns the start of where the ndarray is for the original data buffer - * - * @return - */ @Override public long originalOffset() { if (data().originalOffset() >= Integer.MAX_VALUE) @@ -5294,24 +5111,11 @@ public abstract class BaseNDArray implements INDArray, Iterable { data().read(s, headerData.getLeft(), headerData.getMiddle(), headerData.getRight()); } - - /** - * This method returns index of highest value along specified dimension(s) - * - * @param dimension - * @return - */ @Override public INDArray argMax(int... dimension) { return Nd4j.argMax(this, dimension); } - - /** - * This method returns True, if this INDArray instance is attached to some Workspace. False otherwise. - * - * @return - */ @Override public boolean isAttached() { if (isEmpty()) @@ -5324,13 +5128,6 @@ public abstract class BaseNDArray implements INDArray, Iterable { (data.originalDataBuffer() != null && data.originalDataBuffer().isAttached()); } - /** - * This method checks, if given attached INDArray is still in scope of its parent Workspace - *

- * PLEASE NOTE: if this INDArray isn't attached to any Workspace, this method will return true - * - * @return - */ @Override public boolean isInScope() { if (!isAttached()) @@ -5339,13 +5136,6 @@ public abstract class BaseNDArray implements INDArray, Iterable { return data.isInScope(); } - /** - * This metod detaches INDArray from Workspace, returning copy. Basically it's dup() into new memory chunk. - *

- * PLEASE NOTE: If this INDArray instance is NOT attached - it will be returned unmodified. - * - * @return - */ @Override public INDArray detach() { if (!isAttached()) @@ -5402,14 +5192,6 @@ public abstract class BaseNDArray implements INDArray, Iterable { } } - /** - * This method detaches INDArray from current Workspace, and attaches it to Workspace above, if any. - *

- * PLEASE NOTE: If this INDArray instance is NOT attached - it will be returned unmodified. - * PLEASE NOTE: If current Workspace is the top-tier one, effect will be equal to detach() call - detached copy will be returned - * - * @return - */ @Override public INDArray leverage() { WorkspaceUtils.assertValidArray(this, "Cannot leverage INDArray to new workspace"); @@ -5453,33 +5235,11 @@ public abstract class BaseNDArray implements INDArray, Iterable { } } - /** - * This method detaches INDArray from current Workspace, and attaches it to Workspace with a given Id - * - * PLEASE NOTE: If this INDArray instance is NOT attached - it will be returned unmodified. - * PLEASE NOTE: If Workspace with target Id wasn't created before - this array will be returned unmodified. - * PLEASE NOTE: If target workspace is the current one - this array will be returned unmodified. - * - * @param id - * @return - */ @Override public INDArray leverageTo(String id) { return leverageTo(id, false); } - /** - * This method detaches INDArray from current Workspace, and attaches it to Workspace with a given Id. - * If enforceExistence == true, and no workspace with the specified ID exists, then an {@link Nd4jNoSuchWorkspaceException} - * is thrown. Otherwise, if enforceExistance == false and no workspace with the specified ID exists, then the current - * INDArray is returned unmodified (same as {@link #leverage()} - * - * @param id ID of the workspace to leverage to - * @param enforceExistence If true, and the specified workspace does not exist: an {@link Nd4jNoSuchWorkspaceException} - * will be thrown. - * @return The INDArray, leveraged to the specified workspace - * @see #leverageTo(String) - */ @Override public INDArray leverageTo(String id, boolean enforceExistence) throws Nd4jNoSuchWorkspaceException { WorkspaceUtils.assertValidArray(this, "Cannot leverage INDArray to new workspace"); @@ -5518,16 +5278,6 @@ public abstract class BaseNDArray implements INDArray, Iterable { return copy; } - /** - * This method detaches INDArray from current Workspace, and attaches it to Workspace with a given Id, if a workspace - * with the given ID is open and active. - * - * If the workspace does not exist, or is not active, the array is detached from any workspaces. - * - * @param id ID of the workspace to leverage to - * @return The INDArray, leveraged to the specified workspace (if it exists and is active) otherwise the detached array - * @see #leverageTo(String) - */ public INDArray leverageOrDetach(String id){ if(!isAttached()){ return this; @@ -5539,30 +5289,11 @@ public abstract class BaseNDArray implements INDArray, Iterable { return leverageTo(id); } - /** - * This method pulls this INDArray into current Workspace. - * - * PLEASE NOTE: If there's no current Workspace - INDArray returned as is - * - * @return Migrated INDArray or this if no current workspace - * @see #migrate(boolean) - */ @Override public INDArray migrate() { return migrate(false); } - /** - * This method pulls this INDArray into current Workspace, or optionally detaches if no workspace is present.
- * That is:
- * If current workspace is present/active, INDArray is migrated to it.
- * If no current workspace is present/active, one of two things occur: - * 1. If detachOnNoWs arg is true: if there is no current workspace, INDArray is detached - * 2. If detachOnNoWs arg is false: this INDArray is returned as-is (no-op) - equivalent to {@link #migrate()} - * - * @param detachOnNoWs If true: detach on no WS. If false and no workspace: return this. - * @return Migrated INDArray - */ @Override public INDArray migrate(boolean detachOnNoWs){ WorkspaceUtils.assertValidArray(this, "Cannot leverage INDArray to new workspace"); @@ -5778,17 +5509,11 @@ public abstract class BaseNDArray implements INDArray, Iterable { throw new IllegalStateException("Unknown dataType: [" + type + "]"); } - /** - * This method returns true if this INDArray is special case: no-value INDArray - * - * @return - */ @Override public boolean isEmpty() { return Shape.isEmpty(jvmShapeInfo.javaShapeInformation); } - @Override public long[] shapeInfoJava() { return jvmShapeInfo.javaShapeInformation; @@ -5880,8 +5605,6 @@ public abstract class BaseNDArray implements INDArray, Iterable { throw new IllegalStateException("Cannot perform operation " + opName + " on empty array with datatype " + dataType()); } - - @Override public boolean closeable() { if (released || isAttached()) diff --git a/nd4j/nd4j-backends/nd4j-api-parent/nd4j-api/src/main/java/org/nd4j/linalg/api/ndarray/BaseSparseNDArray.java b/nd4j/nd4j-backends/nd4j-api-parent/nd4j-api/src/main/java/org/nd4j/linalg/api/ndarray/BaseSparseNDArray.java index c9c5cab37..ebe768cac 100644 --- a/nd4j/nd4j-backends/nd4j-api-parent/nd4j-api/src/main/java/org/nd4j/linalg/api/ndarray/BaseSparseNDArray.java +++ b/nd4j/nd4j-backends/nd4j-api-parent/nd4j-api/src/main/java/org/nd4j/linalg/api/ndarray/BaseSparseNDArray.java @@ -1296,7 +1296,7 @@ public abstract class BaseSparseNDArray implements ISparseNDArray { } @Override - public float getFloat(long[] indices) { + public float getFloat(long... indices) { return 0; } @@ -1321,7 +1321,7 @@ public abstract class BaseSparseNDArray implements ISparseNDArray { } @Override - public float getFloat(int[] indices) { + public float getFloat(int... indices) { return 0; } @@ -1514,23 +1514,16 @@ public abstract class BaseSparseNDArray implements ISparseNDArray { return isMatrix() && rows() == columns(); } - /** - * Checks whether the matrix is a row vector. - */ @Override public boolean isRowVector() { return rank == 2 && rows == 1; } - /** - * Checks whether the matrix is a column vector. - */ @Override public boolean isColumnVector() { return rank == 2 && columns == 1; } - @Override public boolean isMatrix() { if (isMatrix != null) diff --git a/nd4j/nd4j-backends/nd4j-api-parent/nd4j-api/src/main/java/org/nd4j/linalg/api/ndarray/BaseSparseNDArrayCOO.java b/nd4j/nd4j-backends/nd4j-api-parent/nd4j-api/src/main/java/org/nd4j/linalg/api/ndarray/BaseSparseNDArrayCOO.java index 85a7ec5ce..b50200dbb 100644 --- a/nd4j/nd4j-backends/nd4j-api-parent/nd4j-api/src/main/java/org/nd4j/linalg/api/ndarray/BaseSparseNDArrayCOO.java +++ b/nd4j/nd4j-backends/nd4j-api-parent/nd4j-api/src/main/java/org/nd4j/linalg/api/ndarray/BaseSparseNDArrayCOO.java @@ -683,12 +683,12 @@ public class BaseSparseNDArrayCOO extends BaseSparseNDArray { } @Override - public float getFloat(int[] indices) { + public float getFloat(int... indices) { return (float) getDouble(indices); } @Override - public float getFloat(long[] indices) { + public float getFloat(long... indices) { return 0; } @@ -860,11 +860,6 @@ public class BaseSparseNDArrayCOO extends BaseSparseNDArray { return Nd4j.createBuffer(Doubles.toArray(val)); } - /** - * Returns the indices of non-zero element of the vector - * - * @return indices in Databuffer - * */ @Override public DataBuffer getVectorCoordinates() { int idx; @@ -884,10 +879,6 @@ public class BaseSparseNDArrayCOO extends BaseSparseNDArray { return Nd4j.createBuffer(temp); } - /** - * Converts the sparse ndarray into a dense one - * @return a dense ndarray - */ @Override public INDArray toDense() { // TODO support view conversion @@ -1131,11 +1122,6 @@ public class BaseSparseNDArrayCOO extends BaseSparseNDArray { return null; } - /** - * This method returns true if this INDArray is special case: no-value INDArray - * - * @return - */ @Override public boolean isEmpty() { return false; diff --git a/nd4j/nd4j-backends/nd4j-api-parent/nd4j-api/src/main/java/org/nd4j/linalg/api/ndarray/INDArray.java b/nd4j/nd4j-backends/nd4j-api-parent/nd4j-api/src/main/java/org/nd4j/linalg/api/ndarray/INDArray.java index f9f04cc43..23e7f2fcc 100644 --- a/nd4j/nd4j-backends/nd4j-api-parent/nd4j-api/src/main/java/org/nd4j/linalg/api/ndarray/INDArray.java +++ b/nd4j/nd4j-backends/nd4j-api-parent/nd4j-api/src/main/java/org/nd4j/linalg/api/ndarray/INDArray.java @@ -1912,6 +1912,9 @@ public interface INDArray extends Serializable, AutoCloseable { */ INDArray getScalar(int... indices); + /** + * See {@link #getScalar(int[])} + */ INDArray getScalar(long... indices); /** @@ -1921,12 +1924,32 @@ public interface INDArray extends Serializable, AutoCloseable { */ int getInt(int... indices); + /** + * Get a long value at the specified index. + * @param index Index to get the integer at. + * @return long value at the specified index + */ long getLong(long index); + /** + * Get a long value at the specified indices. + * @param indices Indices to get the double at. Number of indices must match the array rank. + * @return long value at the specified index + */ long getLong(long... indices); + /** + * Get the numeric value at the specified index. + * @param index index to retreive. + * @return numeric value at the specified index. + */ Number getNumber(long index); + /** + * Get a numeric value at the specified indices. + * @param indices Indices to get the value from. Number of indices must match the array rank. + * @return Numeric value at the specified index + */ Number getNumber(long... indices); /** @@ -1936,6 +1959,9 @@ public interface INDArray extends Serializable, AutoCloseable { */ double getDouble(int... indices); + /** + * See {@link #getDouble(int[])} + */ double getDouble(long... indices); /** @@ -1944,10 +1970,12 @@ public interface INDArray extends Serializable, AutoCloseable { * @param indices the indices to getScalar * @return the array with the specified elements */ - float getFloat(int[] indices); - - float getFloat(long[] indices); + float getFloat(int... indices); + /** + * See {@link #getFloat(int...)} + */ + float getFloat(long... indices); /** * Get the double value at the specified linear index in the array @@ -1962,7 +1990,7 @@ public interface INDArray extends Serializable, AutoCloseable { * * @param i Dimension 0 (row) index * @param j Dimension 1 (column) index - * @return + * @return double value at the specified indices */ double getDouble(long i, long j); @@ -1984,7 +2012,6 @@ public interface INDArray extends Serializable, AutoCloseable { */ float getFloat(long i, long j); - /** * Returns a copy of this ndarray * @@ -2007,7 +2034,6 @@ public interface INDArray extends Serializable, AutoCloseable { */ INDArray ravel(); - /** * Returns a flattened version (row vector) of this ndarray * @@ -2015,10 +2041,9 @@ public interface INDArray extends Serializable, AutoCloseable { */ INDArray ravel(char order); - /** - * - * @param data + * Set the data for this ndarray. + * @param data new value for the ndarray data. */ void setData(DataBuffer data); @@ -2046,15 +2071,15 @@ public interface INDArray extends Serializable, AutoCloseable { int getLeadingOnes(); /** - * Returns the specified slice of this ndarray + * Returns the slice of this from the specified dimension * - * @param i the index of the slice to return - * @param dimension the dimension to return the slice for - * @return the specified slice of this ndarray + * @param i the index of the slice to return + * @param dimension the dimension of the slice to return + * @return the slice of this matrix from the specified dimension + * and dimension */ INDArray slice(long i, int dimension); - /** * Returns the specified slice of this ndarray * @@ -2063,23 +2088,21 @@ public interface INDArray extends Serializable, AutoCloseable { */ INDArray slice(long i); - /** - * Returns the start of where the ndarray is - * for the underlying data + * Returns the start of where the ndarray is for the underlying data * * @return the starting offset */ long offset(); - + // TODO: Unused untested method. /** * Returns the start of where the ndarray is for the original data buffer - * @return + * + * @return original offset. */ long originalOffset(); - /** * Reshapes the ndarray (can't change the length of the ndarray). Typically this will be a view, unless reshaping * without copying is impossible. @@ -2110,7 +2133,6 @@ public interface INDArray extends Serializable, AutoCloseable { */ INDArray reshape(char order, boolean enforceView, long... newShape); - /** * Reshapes the ndarray (can't change the length of the ndarray). Typically this will be a view, unless reshaping * without copying is impossible. @@ -2121,7 +2143,6 @@ public interface INDArray extends Serializable, AutoCloseable { */ INDArray reshape(char order, int rows, int columns); - /** * Reshapes the ndarray (can't change the length of the ndarray). Typically this will be a view, unless reshaping * without copying is impossible. @@ -2131,6 +2152,9 @@ public interface INDArray extends Serializable, AutoCloseable { */ INDArray reshape(long... newShape); + /** + * See {@link #reshape(long[])} + */ INDArray reshape(int[] shape); /** @@ -2150,7 +2174,6 @@ public interface INDArray extends Serializable, AutoCloseable { */ INDArray transpose(); - /** * Flip the rows and columns of a matrix, in-place * @@ -2216,8 +2239,11 @@ public interface INDArray extends Serializable, AutoCloseable { * @return the newly permuted array */ INDArray dimShuffle(Object[] rearrange, int[] newOrder, boolean[] broadCastable); - INDArray dimShuffle(Object[] rearrange, long[] newOrder, boolean[] broadCastable); + /** + * See {@link #dimShuffle(Object[], int[], boolean[]) + */ + INDArray dimShuffle(Object[] rearrange, long[] newOrder, boolean[] broadCastable); /** * Returns the specified column. @@ -2336,7 +2362,6 @@ public interface INDArray extends Serializable, AutoCloseable { */ boolean isScalar(); - /** * Returns the shape of this ndarray * @@ -2346,7 +2371,7 @@ public interface INDArray extends Serializable, AutoCloseable { /** * Returns shape descriptor of this ndarray - * @return + * @return shape descriptor */ LongShapeDescriptor shapeDescriptor(); @@ -2386,7 +2411,6 @@ public interface INDArray extends Serializable, AutoCloseable { */ INDArray broadcast(long... shape); - /** * Broadcasts this ndarray to be the specified shape * @@ -2394,7 +2418,6 @@ public interface INDArray extends Serializable, AutoCloseable { */ INDArray broadcast(INDArray result); - /** * Returns a scalar (individual element) * of a scalar ndarray @@ -2413,9 +2436,9 @@ public interface INDArray extends Serializable, AutoCloseable { /** * This method checks 2 INDArrays equality with given eps * - * @param o + * @param o INDArray to compare against. * @param eps Epsilon value to use for the quality operation - * @return + * @return True if ndarrays are equal within eps. */ boolean equalsWithEps(Object o, double eps); @@ -2448,7 +2471,7 @@ public interface INDArray extends Serializable, AutoCloseable { /** * Remainder operator * @param denominator the denominator - * @return + * @return remainder */ INDArray remainder(INDArray denominator); @@ -2456,75 +2479,86 @@ public interface INDArray extends Serializable, AutoCloseable { * Remainder operator * @param denominator the denominator * @param result the result array to put this in - * @return + * @return Remainder */ INDArray remainder(INDArray denominator, INDArray result); /** - * The scalar denominator + * The scalar remainder * @param denominator the denominator as a scalar - * @return + * @return Remainder */ INDArray remainder(Number denominator); /** - * - * @param denominator - * @param result - * @return + * The scalar remainder + * @param denominator the denominator as a scalar + * @param result the result array to put this in + * @return Remainder */ INDArray remainder(Number denominator, INDArray result); + // TODO: Unused untested method. /** * In place remainder - * @param denominator - * @return + * @param denominator the denominator + * @return Remainder */ INDArray remainderi(INDArray denominator); + // TODO: Unused untested method. /** * In place remainder - * @param denominator - * @return + * @param denominator the denominator + * @return Remainder */ INDArray remainderi(Number denominator); /** * remainder of division - * @param denominator the array of denominators for each element - * in this array - * @return + * @param denominator the array of denominators for each element in this array + * @return array of remainders */ INDArray fmod(INDArray denominator); /** * remainder of division - * @param denominator the + * @param denominator the array of denominators for each element in this array * @param result the result array - * @return + * @return array of remainders */ INDArray fmod(INDArray denominator, INDArray result); /** + * remainder of division by scalar. * - * @param denominator - * @return + * @param denominator the denominator + * @return array of remainders */ INDArray fmod(Number denominator); + /** + * remainder of division by scalar. + * + * @param denominator the denominator + * @param result the result array + * @return array of remainders + */ INDArray fmod(Number denominator, INDArray result); + // TODO: Unused untested method. /** * In place fmod - * @param denominator - * @return + * @param denominator the array of denominators for each element in this array + * @return array of remainders */ INDArray fmodi(INDArray denominator); + // TODO: Unused untested method. /** * In place fmod - * @param denominator - * @return + * @param denominator the denominator as a scalar + * @return array of remainders */ INDArray fmodi(Number denominator); @@ -2546,7 +2580,7 @@ public interface INDArray extends Serializable, AutoCloseable { * This method checks, if given attached INDArray is still in scope of its parent Workspace * * PLEASE NOTE: if this INDArray isn't attached to any Workspace, this method will return true - * @return + * @return true if attached to workspace. */ boolean isInScope(); @@ -2563,12 +2597,11 @@ public interface INDArray extends Serializable, AutoCloseable { /** * This method detaches INDArray from current Workspace, and attaches it to Workspace above, if any. * - * PLEASE NOTE: If this INDArray instance is - * NOT attached - it will be returned unmodified. + * PLEASE NOTE: If this INDArray instance is NOT attached - it will be returned unmodified. * PLEASE NOTE: If current Workspace is the top-tier one, * effect will be equal to detach() call - detached copy will be returned * - * @return + * @return this ndarray or a detached copy. */ INDArray leverage(); @@ -2576,8 +2609,6 @@ public interface INDArray extends Serializable, AutoCloseable { * This method detaches INDArray from current Workspace, and attaches it to Workspace with a given Id - if a workspace * with that ID exists. If no workspace with the specified ID exists, the current INDArray is returned unmodified. * - * @param id ID of the workspace to leverage to - * @return * @see #leverageTo(String, boolean) */ INDArray leverageTo(String id); @@ -2635,7 +2666,7 @@ public interface INDArray extends Serializable, AutoCloseable { * This method returns percentile value for this INDArray * * @param percentile target percentile in range of 0..100 - * @return + * @return percentile value */ Number percentileNumber(Number percentile); @@ -2648,29 +2679,28 @@ public interface INDArray extends Serializable, AutoCloseable { /** * This method returns median along given dimension(s) - * @param dimension + * @param dimension Dimension to calculate median * @return Median along specified dimensions */ INDArray median(int... dimension); /** - * This method returns median along given dimension(s) + * This method returns percentile along given dimension(s) * @param percentile target percentile in range of 0..100 * @param dimension Dimension to calculate percentile for - * @return + * @return array with percentiles */ INDArray percentile(Number percentile, int... dimension); - /** + /* * ------------ Sparse methods ------------ */ - /** * Return a array of non-major pointers * i.e. return the column indexes in case of row-major ndarray * @return a DataBuffer of indexes - * */ + */ DataBuffer getVectorCoordinates(); /** @@ -2681,7 +2711,7 @@ public interface INDArray extends Serializable, AutoCloseable { /** * Return the number of non-null element * @return nnz - * */ + */ int nnz(); /** @@ -2691,15 +2721,18 @@ public interface INDArray extends Serializable, AutoCloseable { * */ SparseFormat getFormat(); + //TODO: Undocumented but often used method. int[] flags(); + //TODO: Undocumented but often used method. int[] hiddenDimensions(); + //TODO: Undocumented but often used method. int[] sparseOffsets(); + //TODO: Undocumented but often used method. int underlyingRank(); - /** * Add an {@link INDArray} * to flatbuffers builder @@ -2710,19 +2743,19 @@ public interface INDArray extends Serializable, AutoCloseable { /** * This method returns true if this INDArray is special case: no-value INDArray - * @return + * @return True if empty. */ boolean isEmpty(); /** * This method returns shapeInformation as jvm long array - * @return + * @return shapeInformation */ long[] shapeInfoJava(); /** * This method returns dtype for this INDArray - * @return + * @return Datattype */ DataType dataType(); @@ -2734,45 +2767,45 @@ public interface INDArray extends Serializable, AutoCloseable { /** * This method checks if this INDArray instance is one of integer types - * @return + * @return true if integer type */ boolean isZ(); /** * This method checks if this INDArray instance has boolean type - * @return + * @return true if boolean type. */ boolean isB(); /** * This method checks if this INDArray instance has String type - * @return + * @return true if string type. */ boolean isS(); /** * This method cast elements of this INDArray to new data type * - * @param dataType - * @return + * @param dataType new datatype. + * @return this if datatype matches, otherwise a new array of specified datatype. */ INDArray castTo(DataType dataType); /** * This method checks if all elements within this array are non-zero (or true, in case of boolean) - * @return + * @return true if all non-zero. */ boolean all(); /** * This method checks if any of the elements within this array are non-zero (or true, in case of boolean) - * @return + * @return true if any non-zero. */ boolean any(); /** * This method checks if any of the elements within this array are non-zero (or true, in case of boolean) - * @return + * @return true if any non-zero */ boolean none(); @@ -2798,35 +2831,29 @@ public interface INDArray extends Serializable, AutoCloseable { /** * This method returns empty array with the same dtype/order/shape as this one - * @return + * @return empty array with the same dtype/order/shape */ INDArray like(); /** * This method returns uninitialized array with the same dtype/order/shape as this one - * @return + * @return uninitialized array with the same dtype/order/shape */ INDArray ulike(); - /** - * This method returns array with gains for Barnes-Hut-Tsne algorithm - * @return - */ - //INDArray[] gains(INDArray input, INDArray gradx, INDArray epsilon); - /** * Get a string representation of the array with configurable formatting * @param options format options */ String toString(@NonNull NDArrayStrings options); - /** * Get a string representation of the array * * @param maxElements Summarize if more than maxElements in the array * @param forceSummarize Force a summary instead of a full print * @param precision The number of decimals to print. Doesn't print trailing 0s if negative + * @return string representation of the array */ String toString(long maxElements, boolean forceSummarize, int precision); diff --git a/nd4j/nd4j-backends/nd4j-api-parent/nd4j-api/src/main/java/org/nd4j/linalg/api/ndarray/ISparseNDArray.java b/nd4j/nd4j-backends/nd4j-api-parent/nd4j-api/src/main/java/org/nd4j/linalg/api/ndarray/ISparseNDArray.java index e7dd5d1db..7eac57c8a 100644 --- a/nd4j/nd4j-backends/nd4j-api-parent/nd4j-api/src/main/java/org/nd4j/linalg/api/ndarray/ISparseNDArray.java +++ b/nd4j/nd4j-backends/nd4j-api-parent/nd4j-api/src/main/java/org/nd4j/linalg/api/ndarray/ISparseNDArray.java @@ -28,28 +28,11 @@ public interface ISparseNDArray extends INDArray { * * */ - /** - * Return a array of non-major pointers - * i.e. return the column indexes in case of row-major ndarray - * @return a DataBuffer of indexes - * */ DataBuffer getVectorCoordinates(); - /** - * Return a dense representation of the sparse ndarray - * */ INDArray toDense(); - /** - * Return the number of non-null element - * @return nnz - * */ int nnz(); - /** - * Return the sparse format (i.e COO, CSR, ...) - * @return format - * @see SparseFormat - * */ SparseFormat getFormat(); } diff --git a/nd4j/nd4j-backends/nd4j-backend-impls/nd4j-cuda/src/main/java/org/nd4j/linalg/jcublas/JCublasNDArray.java b/nd4j/nd4j-backends/nd4j-backend-impls/nd4j-cuda/src/main/java/org/nd4j/linalg/jcublas/JCublasNDArray.java index eb0db01a3..79d87a01e 100644 --- a/nd4j/nd4j-backends/nd4j-backend-impls/nd4j-cuda/src/main/java/org/nd4j/linalg/jcublas/JCublasNDArray.java +++ b/nd4j/nd4j-backends/nd4j-backend-impls/nd4j-cuda/src/main/java/org/nd4j/linalg/jcublas/JCublasNDArray.java @@ -544,13 +544,6 @@ public class JCublasNDArray extends BaseNDArray { return LongShapeDescriptor.fromShape(shape(), stride(), elementWiseStride(), ordering(), dataType(), isEmpty()); } - /** - * This method does direct array copy. Impossible to use on views or mixed orders. - * - * PLEASE NOTE: YOU SHOULD NEVER USE THIS METHOD, UNLESS YOU 100% CLEAR ABOUT IT - * - * @return - */ @Override public INDArray unsafeDuplication() { return unsafeDuplication(true); @@ -717,14 +710,6 @@ public class JCublasNDArray extends BaseNDArray { return copy; } - - /** - * This method pulls this INDArray into current Workspace. - * - * PLEASE NOTE: If there's no current Workspace - INDArray returned as is - * - * @return - */ @Override public INDArray migrate() { WorkspaceUtils.assertValidArray(this, "Cannot leverage INDArray to new workspace"); diff --git a/nd4j/nd4j-backends/nd4j-backend-impls/nd4j-cuda/src/main/java/org/nd4j/linalg/jcublas/JcusparseNDArrayCSR.java b/nd4j/nd4j-backends/nd4j-backend-impls/nd4j-cuda/src/main/java/org/nd4j/linalg/jcublas/JcusparseNDArrayCSR.java index 2698c299f..ae738f5c3 100644 --- a/nd4j/nd4j-backends/nd4j-backend-impls/nd4j-cuda/src/main/java/org/nd4j/linalg/jcublas/JcusparseNDArrayCSR.java +++ b/nd4j/nd4j-backends/nd4j-backend-impls/nd4j-cuda/src/main/java/org/nd4j/linalg/jcublas/JcusparseNDArrayCSR.java @@ -114,11 +114,6 @@ public class JcusparseNDArrayCSR extends BaseSparseNDArrayCSR { throw new UnsupportedOperationException(); } - /** - * This method returns true if this INDArray is special case: no-value INDArray - * - * @return - */ @Override public boolean isEmpty() { throw new UnsupportedOperationException(); diff --git a/nd4j/nd4j-backends/nd4j-backend-impls/nd4j-native/src/main/java/org/nd4j/linalg/cpu/nativecpu/NDArray.java b/nd4j/nd4j-backends/nd4j-backend-impls/nd4j-native/src/main/java/org/nd4j/linalg/cpu/nativecpu/NDArray.java index dc9ab6417..a6cd47fb0 100644 --- a/nd4j/nd4j-backends/nd4j-backend-impls/nd4j-native/src/main/java/org/nd4j/linalg/cpu/nativecpu/NDArray.java +++ b/nd4j/nd4j-backends/nd4j-backend-impls/nd4j-native/src/main/java/org/nd4j/linalg/cpu/nativecpu/NDArray.java @@ -459,13 +459,6 @@ public class NDArray extends BaseNDArray { return new BaseNDArrayProxy(this); } - /** - * This method does direct array copy. Impossible to use on views or mixed orders. - * - * PLEASE NOTE: YOU SHOULD NEVER USE THIS METHOD, UNLESS YOU 100% CLEAR ABOUT IT - * - * @return - */ @Override public INDArray unsafeDuplication() { WorkspaceUtils.assertValidArray(this, "Cannot duplicate array"); diff --git a/nd4j/nd4j-backends/nd4j-backend-impls/nd4j-native/src/main/java/org/nd4j/linalg/cpu/nativecpu/SparseNDArrayCSR.java b/nd4j/nd4j-backends/nd4j-backend-impls/nd4j-native/src/main/java/org/nd4j/linalg/cpu/nativecpu/SparseNDArrayCSR.java index b35662b98..f73f9dece 100644 --- a/nd4j/nd4j-backends/nd4j-backend-impls/nd4j-native/src/main/java/org/nd4j/linalg/cpu/nativecpu/SparseNDArrayCSR.java +++ b/nd4j/nd4j-backends/nd4j-backend-impls/nd4j-native/src/main/java/org/nd4j/linalg/cpu/nativecpu/SparseNDArrayCSR.java @@ -124,11 +124,6 @@ public class SparseNDArrayCSR extends BaseSparseNDArrayCSR { throw new UnsupportedOperationException(); } - /** - * This method returns true if this INDArray is special case: no-value INDArray - * - * @return - */ @Override public boolean isEmpty() { return false; From b582e69e3b0cc50b0599dc1a1e67fb488287c65c Mon Sep 17 00:00:00 2001 From: Alex Black Date: Mon, 9 Sep 2019 22:54:07 +1000 Subject: [PATCH 03/11] Small ND4J/SameDiff fixes (#248) * #8218 Fix Nd4j.hstack rank 1 case Signed-off-by: AlexDBlack * #8209 SameDiff: don't allow empty arrays (with 0s in shape) for variables Signed-off-by: AlexDBlack --- .../org/nd4j/autodiff/samediff/SameDiff.java | 6 +++-- .../linalg/factory/BaseNDArrayFactory.java | 16 +++++++++++--- .../nd4j/autodiff/samediff/SameDiffTests.java | 22 +++++++++++++++++++ .../test/java/org/nd4j/linalg/Nd4jTestsC.java | 13 +++++++++++ 4 files changed, 52 insertions(+), 5 deletions(-) diff --git a/nd4j/nd4j-backends/nd4j-api-parent/nd4j-api/src/main/java/org/nd4j/autodiff/samediff/SameDiff.java b/nd4j/nd4j-backends/nd4j-api-parent/nd4j-api/src/main/java/org/nd4j/autodiff/samediff/SameDiff.java index 1821a30a0..955677ca8 100644 --- a/nd4j/nd4j-backends/nd4j-api-parent/nd4j-api/src/main/java/org/nd4j/autodiff/samediff/SameDiff.java +++ b/nd4j/nd4j-backends/nd4j-api-parent/nd4j-api/src/main/java/org/nd4j/autodiff/samediff/SameDiff.java @@ -3367,7 +3367,9 @@ public class SameDiff extends SDBaseOps { */ public SDVariable var(@NonNull String name, @NonNull VariableType variableType, WeightInitScheme weightInitScheme, org.nd4j.linalg.api.buffer.DataType dataType, long... shape) { - + for(long l : shape){ + Preconditions.checkArgument(l != 0, "Cannot create variable with a shape that contains zeros (empty array shape) - got shape %s", shape); + } if (name == null || name.length() < 1) name = getNewVarName(); @@ -3582,7 +3584,7 @@ public class SameDiff extends SDBaseOps { Preconditions.checkState(arr.dataType().isFPType(), "Cannot create variable with non-floating point type:" + " provided array has datatype %s. Variables must be floating point type to be trainable by backpropagation.\n" + "For non floating point types, these should be created as placeholders or constants instead.", arr.dataType()); - + Preconditions.checkArgument(!arr.isEmpty(), "Empty arrays cannot be used when creating variables. Array shape: %ndShape", arr); if (name == null || name.length() < 1) name = getNewVarName(); diff --git a/nd4j/nd4j-backends/nd4j-api-parent/nd4j-api/src/main/java/org/nd4j/linalg/factory/BaseNDArrayFactory.java b/nd4j/nd4j-backends/nd4j-api-parent/nd4j-api/src/main/java/org/nd4j/linalg/factory/BaseNDArrayFactory.java index 1edf0d651..a664d9ee5 100644 --- a/nd4j/nd4j-backends/nd4j-api-parent/nd4j-api/src/main/java/org/nd4j/linalg/factory/BaseNDArrayFactory.java +++ b/nd4j/nd4j-backends/nd4j-api-parent/nd4j-api/src/main/java/org/nd4j/linalg/factory/BaseNDArrayFactory.java @@ -17,6 +17,7 @@ package org.nd4j.linalg.factory; +import lombok.NonNull; import lombok.val; import org.nd4j.base.Preconditions; import org.nd4j.linalg.api.blas.*; @@ -959,8 +960,18 @@ public abstract class BaseNDArrayFactory implements NDArrayFactory { * * @param arrs */ - public INDArray hstack(INDArray... arrs) { - return Nd4j.concat(1, arrs); + public INDArray hstack(@NonNull INDArray... arrs) { + int firstRank = arrs[0].rank(); + Preconditions.checkState(firstRank > 0 && firstRank <= 2, "Only rank 1 and 2 arrays may be horizontally stacked; first input has rank %ndRank shape %nhShape", arrs[0], arrs[0]); + for( int i=1; i Date: Mon, 9 Sep 2019 16:27:45 +0300 Subject: [PATCH 04/11] Shugeo cuda docs1 (#249) * Comments axis shifts. * Fixed LUP solver usage. Added helpers doc. * Switch off OMP for roll and lup. Fixed omp usage for ClipByGlobalNorm. * Switch off omp for ClipByGlobalNorm to reduce omp ambigiousness. --- .../ops/declarable/helpers/cpu/lup.cpp | 12 +- .../ops/declarable/helpers/cpu/roll.cpp | 8 +- .../ops/declarable/helpers/cpu/transforms.cpp | 17 +- .../ops/declarable/helpers/cuda/axis.cu | 8 +- .../ops/declarable/helpers/cuda/lup.cu | 618 +++++++++--------- 5 files changed, 318 insertions(+), 345 deletions(-) diff --git a/libnd4j/include/ops/declarable/helpers/cpu/lup.cpp b/libnd4j/include/ops/declarable/helpers/cpu/lup.cpp index 1e3c798e2..76817078b 100644 --- a/libnd4j/include/ops/declarable/helpers/cpu/lup.cpp +++ b/libnd4j/include/ops/declarable/helpers/cpu/lup.cpp @@ -50,13 +50,13 @@ namespace helpers { int n = inputMatrix->rows(); invertedMatrix->assign(0.f); - PRAGMA_OMP_PARALLEL_FOR_IF(n > Environment::getInstance()->elementwiseThreshold()) + // PRAGMA_OMP_PARALLEL_FOR_IF(n > Environment::getInstance()->elementwiseThreshold()) for (int i = 0; i < n; i++) invertedMatrix->p(i, i, 1.0f); if (inputMatrix->isIdentityMatrix()) return; - PRAGMA_OMP_PARALLEL_FOR_IF(n > Environment::getInstance()->elementwiseThreshold()) + //PRAGMA_OMP_PARALLEL_FOR_IF(n > Environment::getInstance()->elementwiseThreshold()) for (int i = 1; i < n; i++) invertedMatrix->t(i, i - 1) = -inputMatrix->t(i, i - 1); @@ -83,11 +83,11 @@ namespace helpers { return; } - PRAGMA_OMP_PARALLEL_FOR_IF(n > Environment::getInstance()->elementwiseThreshold()) + //PRAGMA_OMP_PARALLEL_FOR_IF(n > Environment::getInstance()->elementwiseThreshold()) for (int i = 0; i < n; i++) invertedMatrix->t(i, i) /= inputMatrix->t(i, i); - PRAGMA_OMP_PARALLEL_FOR_IF(n > Environment::getInstance()->elementwiseThreshold()) + //PRAGMA_OMP_PARALLEL_FOR_IF(n > Environment::getInstance()->elementwiseThreshold()) for (int i = 0; i < n - 1; i++) invertedMatrix->t(i, i + 1) -= (inputMatrix->t(i, i + 1) * invertedMatrix->t(i + 1, i + 1) / inputMatrix->t(i, i)); @@ -124,7 +124,7 @@ namespace helpers { for(int i = 0; i < rowNum; i++ ) { pivotValue = T(0.0); pivot = -1; - PRAGMA_OMP_PARALLEL_FOR //_ARGS(firstprivate(pivot,pivotValue)) + //PRAGMA_OMP_PARALLEL_FOR //_ARGS(firstprivate(pivot,pivotValue)) for(int rowCounter = i; rowCounter < rowNum; rowCounter++ ) { if (nd4j::math::nd4j_abs(compoundMatrix.t(rowCounter, i)) > pivotValue) { pivotValue = nd4j::math::nd4j_abs(compoundMatrix.t(rowCounter, i)); @@ -140,7 +140,7 @@ namespace helpers { for( int j = i + 1; j < rowNum; j++ ) { compoundMatrix.t(j, i) /= compoundMatrix.t(i, i); - PRAGMA_OMP_PARALLEL_FOR + //PRAGMA_OMP_PARALLEL_FOR for( int k = i + 1; k < rowNum; k++ ) { compoundMatrix.t(j, k) -= compoundMatrix.t(j, i) * compoundMatrix.t(i, k); } diff --git a/libnd4j/include/ops/declarable/helpers/cpu/roll.cpp b/libnd4j/include/ops/declarable/helpers/cpu/roll.cpp index da3cb3259..b3b65f816 100644 --- a/libnd4j/include/ops/declarable/helpers/cpu/roll.cpp +++ b/libnd4j/include/ops/declarable/helpers/cpu/roll.cpp @@ -43,7 +43,7 @@ namespace helpers { int remainShift = fullLen % actualShift; // stage 1) swap last actualShift elements with first ones. - PRAGMA_OMP_PARALLEL_FOR_IF(actualShift > Environment::getInstance()->elementwiseThreshold()) + //PRAGMA_OMP_PARALLEL_FOR //_IF(actualShift > Environment::getInstance()->elementwiseThreshold()) for (int e = 0; e < actualShift; ++e) { int sourceIndex = fullLen - actualShift + e; @@ -56,7 +56,7 @@ namespace helpers { } // stage 2) swap swapped actualShift elements with rest remainShiftCount times. - PRAGMA_OMP_PARALLEL_FOR_IF(shiftCount > Environment::getInstance()->tadThreshold()) + //PRAGMA_OMP_PARALLEL_FOR //_IF(shiftCount > Environment::getInstance()->tadThreshold()) for (int count = 1; count < shiftCount; ++count) { for (int e = 0; e < actualShift; ++e) { int destinationIndex = fullLen - (count + 1) * actualShift + e; @@ -91,7 +91,7 @@ namespace helpers { output->assign(input); auto source = output; //input; - for (auto i = 0; i < axes.size(); i++) { + for (size_t i = 0; i < axes.size(); i++) { int axe = axes[i]; if (axe == source->rankOf() - 1) {// last dimension std::unique_ptr listOfTensors(source->allTensorsAlongDimension({axe})); @@ -115,7 +115,7 @@ namespace helpers { std::unique_ptr listOfTensors(source->allTensorsAlongDimension({dims})); std::unique_ptr listOfOutTensors(output->allTensorsAlongDimension({dims})); - + // int fullLen = listOfTensors->size(); int sizeAt = input->sizeAt(axe); diff --git a/libnd4j/include/ops/declarable/helpers/cpu/transforms.cpp b/libnd4j/include/ops/declarable/helpers/cpu/transforms.cpp index a92e6713b..71181afe8 100644 --- a/libnd4j/include/ops/declarable/helpers/cpu/transforms.cpp +++ b/libnd4j/include/ops/declarable/helpers/cpu/transforms.cpp @@ -957,26 +957,27 @@ void clipByNorm(nd4j::LaunchContext * context, NDArray& input, NDArray& output, template static void clipByGlobalNorm_(std::vector const& inputs, double clipNorm, nd4j::memory::Workspace* workspace, std::vector& outputs, bool isInplace) { - NDArray globalNorm = NDArrayFactory::create(0, inputs[0]->getContext()); //sqrt(sum([l2norm(t)**2 for t in t_list])) - PRAGMA_OMP_PARALLEL_FOR + T globalNorm = 0; //NDArrayFactory::create(0, inputs[0]->getContext()); //sqrt(sum([l2norm(t)**2 for t in t_list])) +// PRAGMA_OMP_PARALLEL_FOR_SIMD_REDUCTION(sumT : globalNorm) for (size_t i = 0; i < inputs.size(); i++) { auto input = inputs[i]; auto l2norm = input->reduceNumber(reduce::Norm2); - globalNorm += l2norm * l2norm; + globalNorm += l2norm.t(0) * l2norm.t(0); } - globalNorm.applyTransform(transform::Sqrt, nullptr, nullptr);// = nd4j::math::nd4j_sqrt(globalNorm); - outputs[inputs.size()]->p(0, globalNorm); + //globalNorm.applyTransform(transform::Sqrt, nullptr, nullptr);// = nd4j::math::nd4j_sqrt(globalNorm); + auto normS = nd4j::math::nd4j_sqrt(globalNorm); + outputs[inputs.size()]->p(0, normS); - const T factor = clipNorm / globalNorm.e(0); + const T factor = clipNorm / normS; - PRAGMA_OMP_PARALLEL_FOR +// PRAGMA_OMP_PARALLEL_FOR for (size_t e = 0; e < inputs.size(); e++) { // all-reduce auto input = inputs[e]; auto output = outputs[e]; - if (globalNorm.e(0) <= clipNorm) { + if (normS <= clipNorm) { output->assign(input); } else { diff --git a/libnd4j/include/ops/declarable/helpers/cuda/axis.cu b/libnd4j/include/ops/declarable/helpers/cuda/axis.cu index a3b2bcd32..1236ae495 100644 --- a/libnd4j/include/ops/declarable/helpers/cuda/axis.cu +++ b/libnd4j/include/ops/declarable/helpers/cuda/axis.cu @@ -27,11 +27,11 @@ namespace helpers { void adjustAxis(Nd4jLong rank, NDArray* axisVector, std::vector& output) { output.resize(axisVector->lengthOf()); - axisVector->tickReadDevice(); - axisVector->syncToHost(); + axisVector->tickReadDevice(); // mark input as read on device + axisVector->syncToHost(); // sync to host for (int e = 0; e < axisVector->lengthOf(); e++) { auto ca = axisVector->e(e); - if (ca < 0) + if (ca < 0) // shift values on rank for negative vals ca += rank; output[e] = ca; @@ -41,7 +41,7 @@ namespace helpers { void adjustAxis(Nd4jLong rank, std::vector &axisVector) { for (int e = 0; e < axisVector.size(); e++) { auto a = axisVector[e]; - if (a < 0) + if (a < 0) // shift vals on rank for negative vals axisVector[e] = a + rank; } } diff --git a/libnd4j/include/ops/declarable/helpers/cuda/lup.cu b/libnd4j/include/ops/declarable/helpers/cuda/lup.cu index f11b56745..f0d1df1cc 100644 --- a/libnd4j/include/ops/declarable/helpers/cuda/lup.cu +++ b/libnd4j/include/ops/declarable/helpers/cuda/lup.cu @@ -31,34 +31,14 @@ namespace nd4j { namespace ops { namespace helpers { -// template -// static __device__ void swapRows_(T* matrix, Nd4jLong* shape, int theFirst, int theSecond, Nd4jLong N) { -// if (theFirst != theSecond) { -// auto start = threadIdx.x + blockIdx.x * blockDim.x; -// auto step = blockDim.x * gridDim.x; -// for (auto i = start; i < N; i += step) { -// Nd4jLong iCoord1[] = {theFirst, i}; -// Nd4jLong iCoord2[] = {theSecond, i}; -// auto iIndex1 = shape::getOffset(0, shape::shapeOf(shape), shape::stride(shape), iCoord1, 2); -// auto iIndex2 = shape::getOffset(0, shape::shapeOf(shape), shape::stride(shape), iCoord2, 2); -// //atomicExch(&matrix[iIndex1], matrix[iIndex2]); -// T e0 = matrix[iIndex1]; -// T e1 = matrix[iIndex2]; -// matrix[iIndex1] = e0; -// matrix[iIndex2] = e1; -// } -// } -// } -// BUILD_SINGLE_TEMPLATE(template void swapRows_, (NDArray* matrix, int theFirst, int theSecond), FLOAT_TYPES); -// -// void swapRows(NDArray* matrix, int theFirst, int theSecond) { -// BUILD_SINGLE_SELECTOR(matrix->dataType(), swapRows_, (matrix, theFirst, theSecond), FLOAT_TYPES); -// } + +// ------------------------------------------------------------------------------------------------------------------ // +// invert the second diagonal for lower diagonal matrix template static __global__ void invertKernelLow(void *invertedBuf, Nd4jLong *invertedShape, void *inputBuf, Nd4jLong *inputShape, Nd4jLong n) { - T *inverted = reinterpret_cast(invertedBuf); - T *input = reinterpret_cast(inputBuf); + T* inverted = reinterpret_cast(invertedBuf); + T* input = reinterpret_cast(inputBuf); auto start = threadIdx.x + blockIdx.x * blockDim.x; auto step = blockDim.x * gridDim.x; @@ -71,11 +51,13 @@ namespace helpers { auto dxIndex = shape::getOffset(0, shape::shapeOf(inputShape), shape::stride(inputShape), posX, 2); auto dyIndex = shape::getOffset(0, shape::shapeOf(inputShape), shape::stride(inputShape), posY, 2); auto zIndex = shape::getOffset(0, shape::shapeOf(invertedShape), shape::stride(invertedShape), pos, 2); + // invert lower triangular matrix inverted[zIndex] = -input[xIndex] / (input[dxIndex] * input[dyIndex]); // math::atomics::nd4j_atomicAdd(&inverted[zIndex], - input[xIndex] * inverted[iIndex] / input[dIndex]); } } - +// ------------------------------------------------------------------------------------------------------------------ // +// invert diagonal vals to upper diagonal matrix template static __global__ void upvertKernel(void *invertedBuf, Nd4jLong *invertedShape, void *inputBuf, Nd4jLong *inputShape, Nd4jLong n) { @@ -90,10 +72,13 @@ namespace helpers { auto xIndex = shape::getOffset(0, shape::shapeOf(inputShape), shape::stride(inputShape), pos, 2); auto zIndex = shape::getOffset(0, shape::shapeOf(invertedShape), shape::stride(invertedShape), pos, 2); // math::atomics::nd4j_atomicDiv(&inverted[zIndex], input[xIndex]); + // invert diagonal elements inverted[zIndex] /= input[xIndex]; } } +// ------------------------------------------------------------------------------------------------------------------ // +// invert upper second diagonal template static __global__ void upvertKernelUp(void *invertedBuf, Nd4jLong *invertedShape, void *inputBuf, Nd4jLong *inputShape, Nd4jLong n) { @@ -120,18 +105,17 @@ namespace helpers { for (int i = start; i < n - 1; i += step) { Nd4jLong pos[] = {i, i + 1}; - //Nd4jLong posY[] = {i, i}; Nd4jLong posX[] = {i + 1, i + 1}; auto xIndex = shape::getOffset(0, inputShapeOf, shape::stride(inputShape), pos, 2); -// auto yIndex = shape::getOffset(0, shape::shapeOf(inputShape), shape::stride(inputShape), posY, 2); -// auto yIndex = shape::getOffset(0, shape::shapeOf(inputShape), shape::stride(inputShape), pos, 2); auto iIndex = shape::getOffset(0, invertedShapeOf, invertedStride, posX, 2); auto zIndex = shape::getOffset(0, invertedShapeOf, invertedStride, pos, 2); + // invert upper matrix math::atomics::nd4j_atomicAdd(&inverted[zIndex], -input[xIndex] * inverted[iIndex]); // / input[yIndex]); //inputMatrix->t(i, i + 1) * invertedMatrix->t(i + 1, i + 1) / inputMatrix->t(i, i) } } +// ------------------------------------------------------------------------------------------------------------------ // template static __global__ void invertLowKernel(void *invertedBuf, Nd4jLong *invertedShape, void *inputBuf, Nd4jLong *inputShape, Nd4jLong n) { @@ -152,11 +136,14 @@ namespace helpers { auto dIndex = shape::getOffset(0, shape::shapeOf(inputShape), shape::stride(inputShape), posD, 2); auto zIndex = shape::getOffset(0, shape::shapeOf(invertedShape), shape::stride(invertedShape), posZ, 2); + // invert non-diagonal elements math::atomics::nd4j_atomicAdd(&inverted[zIndex], -inverted[yIndex] * input[xIndex] / input[dIndex]); } } } +// ------------------------------------------------------------------------------------------------------------------ // +// Invertion of upper triangular matrix non-diagonal elements when main and second diagonals already processed template static __global__ void invertUpKernel(void *invertedBuf, Nd4jLong *invertedShape, void *inputBuf, Nd4jLong *inputShape, Nd4jLong n) { @@ -183,18 +170,20 @@ namespace helpers { Nd4jLong posZ[] = {i, j}; Nd4jLong posY[] = {k, j}; Nd4jLong posX[] = {i, k}; -// Nd4jLong posD[] = {i, i}; - + // inversion with Joardan Gauss transformation auto xIndex = shape::getOffset(0, inputShapeOf, inputStrideOf, posX, 2); auto yIndex = shape::getOffset(0, invertedShapeOf, invertedStrideOf, posY, 2); - // auto dIndex = shape::getOffset(0, shape::shapeOf(inputShape), shape::stride(inputShape), posD, 2); auto zIndex = shape::getOffset(0, invertedShapeOf, invertedStrideOf, posZ, 2); - math::atomics::nd4j_atomicAdd(&inverted[zIndex], -inverted[yIndex] * input[xIndex]);// / input[dIndex]); -// printf("(%d, %d) inverted[%lld] = %lf (-inverted[%lld] * input[%lld]\n", blockIdx.x, threadIdx.x, zIndex, inverted[zIndex], yIndex, xIndex); + // invert upper non-diagonal elements + math::atomics::nd4j_atomicAdd(&inverted[zIndex], -inverted[yIndex] * input[xIndex]); } } } +// ------------------------------------------------------------------------------------------------------------------ // +// procedure to invert lower-triangular matrix. +// In current case lower triangular matrix has main diagonal with general values +// template static void invertLowerMatrix_(LaunchContext *context, NDArray *inputMatrix, NDArray *invertedMatrix) { int n = inputMatrix->rows(); @@ -204,20 +193,26 @@ namespace helpers { auto stream = context->getCudaStream(); + // invert lower matrix // invert main diagonal upvertKernel<<<1, n, 512, *stream>>>(invertedMatrix->specialBuffer(), invertedMatrix->specialShapeInfo(), inputMatrix->specialBuffer(), inputMatrix->specialShapeInfo(), n); // invert the second diagonal invertKernelLow<<<1, n, 512, *stream>>>(invertedMatrix->specialBuffer(), invertedMatrix->specialShapeInfo(), inputMatrix->specialBuffer(), inputMatrix->specialShapeInfo(), n); -// invertKernelLow<<<1, n, 128, *stream>>>(invertedMatrix->specialBuffer(), invertedMatrix->specialShapeInfo(), inputMatrix->specialBuffer(), inputMatrix->specialShapeInfo(), n); + // invert non-diagonal elements invertLowKernel<<>>(invertedMatrix->specialBuffer(), invertedMatrix->specialShapeInfo(), inputMatrix->specialBuffer(), inputMatrix->specialShapeInfo(), n); } +// ------------------------------------------------------------------------------------------------------------------ // +// caller for invert lower matrix routine void invertLowerMatrix(LaunchContext *context, NDArray *inputMatrix, NDArray *invertedMatrix) { NDArray::prepareSpecialUse({invertedMatrix}, {inputMatrix}); BUILD_SINGLE_SELECTOR(inputMatrix->dataType(), invertLowerMatrix_, (context, inputMatrix, invertedMatrix), FLOAT_NATIVE); NDArray::registerSpecialUse({invertedMatrix}, {inputMatrix}); } +// ------------------------------------------------------------------------------------------------------------------ // +// procedure to invert upper-triangular matrix. +// In current case upper triangular matrix has main diagonal with all ones on it. template static void invertUpperMatrix_(LaunchContext *context, NDArray* inputMatrix, NDArray* invertedMatrix) { int n = inputMatrix->rows(); @@ -227,342 +222,319 @@ namespace helpers { return; } - //upvertKernel<<<1, n, 128, *stream>>>(invertedMatrix->specialBuffer(), invertedMatrix->specialShapeInfo(), inputMatrix->specialBuffer(), inputMatrix->specialShapeInfo(), n); + // invert upper matrix + // invert the second diagonal upvertKernelUp<<<1, n, 512, *stream >>>(invertedMatrix->specialBuffer(), invertedMatrix->specialShapeInfo(), inputMatrix->specialBuffer(), inputMatrix->specialShapeInfo(), n); - invertedMatrix->tickWriteDevice(); - invertedMatrix->printIndexedBuffer("Step1 UP inversion"); + + // invert other elements invertUpKernel<<>>(invertedMatrix->specialBuffer(), invertedMatrix->specialShapeInfo(),inputMatrix->specialBuffer(), inputMatrix->specialShapeInfo(), n); } +// ------------------------------------------------------------------------------------------------------------------ // +// invertion of upper triangular matrix - runner routine void invertUpperMatrix(LaunchContext *context, NDArray *inputMatrix, NDArray *invertedMatrix) { NDArray::prepareSpecialUse({invertedMatrix}, {inputMatrix}); BUILD_SINGLE_SELECTOR(invertedMatrix->dataType(), invertUpperMatrix_, (context, inputMatrix, invertedMatrix), FLOAT_NATIVE); NDArray::prepareSpecialUse({invertedMatrix}, {inputMatrix}); } -// template -// static __global__ void lupKernel(T* compound, Nd4jLong* compoundShape, T* permutation, Nd4jLong* permutationShape, Nd4jLong rowNum) { -// int swapCount = 0; -// for(int i = blockIdx.x; i < rowNum; i += gridDim.x ) { -// auto pivotValue = T(0.0); -// auto pivot = -1; -// -// for(int rowCounter = i; rowCounter < rowNum; rowCounter++ ) { -// Nd4jLong rowCoord[] = {rowCounter, i}; -// auto rowPos = shape::getOffset(0, shape::shapeOf(compoundShape), shape::stride(compoundShape), rowCoord, 2); -// if(nd4j::math::nd4j_abs(compound[rowPos]) > pivotValue ) { -// pivotValue = nd4j::math::nd4j_abs(compound[rowPos]); -// pivot = rowCounter; -// } -// } -// -// if( pivotValue != T(0.0) ) { -// swapRows_(compound, compoundShape, pivot, i, rowNum); -// swapRows_(permutation, permutationShape, pivot, i, rowNum); -// if (pivot != i) -// swapCount++; -// -// for( int j = i + 1; j < rowNum; j++ ) { -// Nd4jLong posJIbuf[] = {j, i}; -// Nd4jLong posIIbuf[] = {i, i}; -// auto posJI = shape::getOffset(0, shape::shapeOf(compoundShape), shape::stride(compoundShape), posJIbuf, 2); -// auto posII = shape::getOffset(0, shape::shapeOf(compoundShape), shape::stride(compoundShape), posIIbuf, 2); -// -// compound[posJI] /= compound[posII]; -// for( int k = i + 1; k < rowNum; k++ ) { -// Nd4jLong posJKbuf[] = {j, k}; -// Nd4jLong posIKbuf[] = {i, k}; -// auto posJK = shape::getOffset(0, shape::shapeOf(compoundShape), shape::stride(compoundShape), posJKbuf, 2); -// auto posIK = shape::getOffset(0, shape::shapeOf(compoundShape), shape::stride(compoundShape), posIKbuf, 2); -// T arg = compound[posJI] * compound[posIK]; -// compound[posJK] -= arg; -// } -// } -// } -// } -// } +// ------------------------------------------------------------------------------------------------------------------ // + // determinant kernel - accumulation product of all values on the main diagonal + template + static __global__ void determinantKernel(T *compound, T *result, Nd4jLong len) { + auto start = blockIdx.x * blockDim.x + threadIdx.x; + auto step = blockDim.x * gridDim.x; + for (auto i = start; i < len; i += step) { + auto pos = i * len + i; //shape::getOffset(0, shape::shapeOf(shape), shape::stride(shape), di, 2); + // multiply all diagonal elements + math::atomics::nd4j_atomicMul(&result[0], compound[pos]); + } + } -// template - template - static __global__ void determinantKernel(T *compound, T *result, Nd4jLong len) { - //F tempRes = result[0]; +// ------------------------------------------------------------------------------------------------------------------ // + // determinant logarithm - accumulation sum of all logarithm values on the main diagonal. All in logarithic values + // should be positive + template + static __global__ void determinantLogKernel(T *compound, T *result, Nd4jLong len) { + auto start = blockIdx.x * blockDim.x + threadIdx.x; + auto step = blockDim.x * gridDim.x; + for (auto i = start; i < len; i += step) { + auto pos = i * len + i; //shape::getOffset(0, shape::shapeOf(shape), shape::stride(shape), di, 2); + // sum logs of all diagonal elements + math::atomics::nd4j_atomicAdd(result, math::nd4j_log(math::nd4j_abs(compound[pos]))); + } + } - auto start = blockIdx.x * blockDim.x + threadIdx.x; - auto step = blockDim.x * gridDim.x; - for (auto i = start; i < len; i += step) { - auto pos = i * len + i; //shape::getOffset(0, shape::shapeOf(shape), shape::stride(shape), di, 2); - math::atomics::nd4j_atomicMul(&result[0], compound[pos]); - } +// ------------------------------------------------------------------------------------------------------------------ // + // kernel to copy matrix with given shape to compound tensor with given pos + // output - a N-D tensor buffer with rank not less than 2, input - 2D square n x n matrix with n = rowLen + template + static __global__ void + fillMatrix(void *output, Nd4jLong *outShape, void *input, Nd4jLong *inputShape, Nd4jLong pos, Nd4jLong rowLen) { + __shared__ F *matrix; + __shared__ T *inputBuf; + __shared__ Nd4jLong inputLen; + __shared__ Nd4jLong n2; + + if (threadIdx.x == 0) { + matrix = reinterpret_cast(output); + inputBuf = reinterpret_cast(input); + inputLen = shape::length(inputShape); + n2 = rowLen * rowLen; + } + __syncthreads(); + + auto start = blockIdx.x * blockDim.x + threadIdx.x; + auto step = blockDim.x * gridDim.x; + + for (int k = pos + start, j = start; j < n2; k += step, j += step) { + auto xIndex = shape::getIndexOffset(k, inputShape, inputLen); + matrix[j] = (F) inputBuf[xIndex]; + } + } + +// ------------------------------------------------------------------------------------------------------------------ // +// same as above, but without type conversion + template + static __global__ void + returnMatrix(void *output, Nd4jLong *outputShape, void *input, Nd4jLong *inputShape, Nd4jLong pos, Nd4jLong rowLen) { + __shared__ T* matrix; + __shared__ T* outputBuf; + __shared__ Nd4jLong outputLen; + __shared__ Nd4jLong n2; + + if (threadIdx.x == 0) { + matrix = reinterpret_cast(input); + outputBuf = reinterpret_cast(output); + outputLen = shape::length(inputShape); + n2 = rowLen * rowLen; + } + __syncthreads(); + auto start = blockIdx.x * blockDim.x + threadIdx.x; + auto step = blockDim.x * gridDim.x; + + for (int k = pos + start, j = start; j < n2; k += step, j += step) { + auto zIndex = shape::getIndexOffset(k, outputShape, outputLen); + outputBuf[zIndex] = matrix[j]; + } + } + +// ------------------------------------------------------------------------------------------------------------------ // + // fill up permutaion matrix kernel. Permutation matrix filled with zeros and ones + template + static __global__ void fillUpPermutation(void *output, Nd4jLong *shape, int *source, int rowNum) { + F *permutation = reinterpret_cast(output); + + auto start = blockIdx.x * blockDim.x + threadIdx.x; + auto step = blockDim.x * gridDim.x; + for (auto i = start; i < rowNum; i += step) { + int val = source[i] - 1; + Nd4jLong posF[] = {i, val}; + auto pos = shape::getOffset(0, shape::shapeOf(shape), shape::stride(shape), posF, 2); + permutation[pos] = F(1.f); + } + } + +// ------------------------------------------------------------------------------------------------------------------ // + // LUP decomposition runner - using CUBLAS SOLVER + // if permutation is given, then using LUP decomposition, LU decomposition otherwise + // L - lower triangular, U - upper triangular, P - permutation matricies + // PA = LU + // + // input - A matrix nxn + // compound - C matrix L + U - I, or main diagonal and lower - L matrix, from the 2nd diagonal - U matrix + template + static void lup_(LaunchContext *context, NDArray *input, NDArray *compound, NDArray *permutation) { + auto stream = context->getCudaStream(); + auto n = input->rows(); + cusolverDnHandle_t cusolverH = nullptr; + // create solver handle + cusolverStatus_t status = cusolverDnCreate(&cusolverH); + if (CUSOLVER_STATUS_SUCCESS != status) { + throw cuda_exception::build("Cannot create cuSolver handle", status); + } + // set solver stream + status = cusolverDnSetStream(cusolverH, *stream); + if (CUSOLVER_STATUS_SUCCESS != status) { + throw cuda_exception::build("Cannot set up stream for cuda solver", status); + } + int lwork = 0; + int *d_info = nullptr; + // allocate memory for permutation vector + auto err = cudaMalloc((void **) &d_info, sizeof(int)); + if (err) { + throw cuda_exception::build("helpers::lup_: Cannot allocate memory for solver info buffer", err); } - template - static __global__ void determinantLogKernel(T *compound, T *result, Nd4jLong len) { -// F tempRes = (F)result[0]; + DataType dtype = input->dataType(); + switch (dtype) { // there are two implementations with cublas for LUP decomposition - double and float - auto start = blockIdx.x * blockDim.x + threadIdx.x; - auto step = blockDim.x * gridDim.x; - for (auto i = start; i < len; i += step) { - auto pos = i * len + i; //shape::getOffset(0, shape::shapeOf(shape), shape::stride(shape), di, 2); - math::atomics::nd4j_atomicAdd(result, math::nd4j_log(math::nd4j_abs(compound[pos]))); - } -// __syncthreads(); -// -// if (threadIdx.x == 0) { -// result[0] = (T)math::nd4j_log(math::nd4j_abs(tempRes)); -// } - } + case DataType::DOUBLE: { + double *d_work = nullptr; + // compute internal buffer size + double *matrix = reinterpret_cast(input->specialBuffer()); + status = cusolverDnDgetrf_bufferSize( + cusolverH, + n, + n, + matrix, + n, + &lwork); + if (CUSOLVER_STATUS_SUCCESS != status) { + throw cuda_exception::build("helpers::lup_: Cannot create cuSolver handle", status); + } - template - static __global__ void - fillMatrix(void *output, Nd4jLong *outShape, void *input, Nd4jLong *inputShape, Nd4jLong pos, Nd4jLong rowLen) { - __shared__ - F *matrix; - __shared__ - T *inputBuf; - __shared__ - Nd4jLong inputLen; - __shared__ - Nd4jLong n2; + err = cudaMalloc((void **) &d_work, sizeof(float) * lwork); + if (err) { + throw cuda_exception::build("helpers::lup_: Cannot allocate memory for solver data buffer", + err); + } - if (threadIdx.x == 0) { - matrix = reinterpret_cast(output); - inputBuf = reinterpret_cast(input); - inputLen = shape::length(inputShape); - n2 = rowLen * rowLen; - } - __syncthreads(); - auto start = blockIdx.x * blockDim.x + threadIdx.x; - auto step = blockDim.x * gridDim.x; - - for (int k = pos + start, j = start; j < n2; k += step, j += step) { - auto xIndex = shape::getIndexOffset(k, inputShape, inputLen); - matrix[j] = (F) inputBuf[xIndex]; - } - } - - template - static __global__ void - returnMatrix(void *output, Nd4jLong *outputShape, void *input, Nd4jLong *inputShape, Nd4jLong pos, - Nd4jLong rowLen) { - __shared__ T *matrix; - __shared__ T *outputBuf; - __shared__ Nd4jLong outputLen; - __shared__ Nd4jLong n2; - - if (threadIdx.x == 0) { - matrix = reinterpret_cast(input); - outputBuf = reinterpret_cast(output); - outputLen = shape::length(inputShape); - n2 = rowLen * rowLen; - } - __syncthreads(); - auto start = blockIdx.x * blockDim.x + threadIdx.x; - auto step = blockDim.x * gridDim.x; - - for (int k = pos + start, j = start; j < n2; k += step, j += step) { - auto zIndex = shape::getIndexOffset(k, outputShape, outputLen); - outputBuf[zIndex] = (T) matrix[j]; - } - } - - template - static __global__ void fillUpPermutation(void *output, Nd4jLong *shape, int *source, int rowNum) { - F *permutation = reinterpret_cast(output); - - auto start = blockIdx.x * blockDim.x + threadIdx.x; - auto step = blockDim.x * gridDim.x; - for (auto i = start; i < rowNum; i += step) { - int val = source[i] - 1; - Nd4jLong posF[] = {i, val}; - auto pos = shape::getOffset(0, shape::shapeOf(shape), shape::stride(shape), posF, 2); - permutation[pos] = F(1.f); - } - } - - template - static void lup_(LaunchContext *context, NDArray *input, NDArray *compound, NDArray *permutation) { - auto stream = context->getCudaStream(); - auto n = input->rows(); - cusolverDnHandle_t cusolverH = nullptr; - cusolverStatus_t status = cusolverDnCreate(&cusolverH); - if (CUSOLVER_STATUS_SUCCESS != status) { - throw cuda_exception::build("Cannot create cuSolver handle", status); - } - status = cusolverDnSetStream(cusolverH, *stream); - if (CUSOLVER_STATUS_SUCCESS != status) { - throw cuda_exception::build("Cannot set up stream for cuda solver", status); - } - int lwork = 0; - int *d_info = nullptr; - - auto err = cudaMalloc((void **) &d_info, sizeof(int)); - if (err) { - throw cuda_exception::build("helpers::lup_: Cannot allocate memory for solver info buffer", err); - } - - DataType dtype = input->dataType(); - switch (dtype) { - - case DataType::DOUBLE: { - double *d_work = nullptr; - err = cudaMalloc((void **) &d_work, sizeof(float) * lwork); - if (err) { - throw cuda_exception::build("helpers::lup_: Cannot allocate memory for solver data buffer", - err); - } - double *matrix = reinterpret_cast(input->specialBuffer()); - status = cusolverDnDgetrf_bufferSize( + if (permutation == nullptr) + status = cusolverDnDgetrf( cusolverH, n, n, matrix, n, - &lwork); - if (CUSOLVER_STATUS_SUCCESS != status) { - throw cuda_exception::build("helpers::lup_: Cannot create cuSolver handle", status); - } - if (permutation == nullptr) - status = cusolverDnDgetrf( - cusolverH, - n, - n, - matrix, - n, - d_work, - nullptr, - d_info); - else { - NDArray permutVector('c', {n}, nd4j::DataType::INT32, context); - int *permutationBuf = reinterpret_cast(permutVector.specialBuffer()); - status = cusolverDnDgetrf( - cusolverH, - n, - n, - matrix, - n, - d_work, - permutationBuf, - d_info); - fillUpPermutation << < n, n, 1024, *stream >> > - (permutation->specialBuffer(), permutation->specialShapeInfo(), permutationBuf, n); - permutation->tickWriteDevice(); - } - err = cudaFree(d_work); - if (err) { - throw cuda_exception::build("helpers::lup_: Cannot deallocate memory for solver data buffer", - err); - } - } - break; - case DataType::FLOAT32: { - float *matrix = reinterpret_cast(input->specialBuffer()); - float *d_work = nullptr; - err = cudaMalloc((void **) &d_work, sizeof(float) * lwork); - if (err) { - throw cuda_exception::build("helpers::lup_: Cannot allocate memory for solver data buffer", - err); - } - - status = cusolverDnSgetrf_bufferSize( + d_work, + nullptr, + d_info); + else { + NDArray permutVector('c', {n}, nd4j::DataType::INT32, context); + int *permutationBuf = reinterpret_cast(permutVector.specialBuffer()); + status = cusolverDnDgetrf( cusolverH, n, n, matrix, n, - &lwork); - if (CUSOLVER_STATUS_SUCCESS != status) { - throw cuda_exception::build("helpers::lup_: Cannot create cuSolver handle", status); - } - - if (permutation == nullptr) - status = cusolverDnSgetrf( - cusolverH, - n, - n, - matrix, - n, - d_work, - nullptr, - d_info); - else { - NDArray permutVector('c', {n}, nd4j::DataType::INT32, context); - int *permutationBuf = reinterpret_cast(permutVector.specialBuffer()); - status = cusolverDnSgetrf( - cusolverH, - n, - n, - matrix, - n, - d_work, - permutationBuf, - d_info); - fillUpPermutation <<< n, n, 128, *stream >> > - (permutation->specialBuffer(), permutation->specialShapeInfo(), permutationBuf, n); - permutation->tickWriteDevice(); - } - err = cudaFree(d_work); - if (err) { - throw cuda_exception::build("helpers::lup_: Cannot deallocate memory for solver data buffer", - err); - } - + d_work, + permutationBuf, + d_info); + fillUpPermutation << < n, n, 1024, *stream >> > + (permutation->specialBuffer(), permutation->specialShapeInfo(), permutationBuf, n); + permutation->tickWriteDevice(); + } + err = cudaFree(d_work); + if (err) { + throw cuda_exception::build("helpers::lup_: Cannot deallocate memory for solver data buffer", + err); } } - if (CUSOLVER_STATUS_SUCCESS != status) { - throw cuda_exception::build("helpers::lup_: Cannot make LU decomposition", status); + break; + case DataType::FLOAT32: { + float *matrix = reinterpret_cast(input->specialBuffer()); + float *d_work = nullptr; + + status = cusolverDnSgetrf_bufferSize( + cusolverH, + n, + n, + matrix, + n, + &lwork); + if (CUSOLVER_STATUS_SUCCESS != status) { + throw cuda_exception::build("helpers::lup_: Cannot create cuSolver handle", status); + } + + err = cudaMalloc((void **) &d_work, sizeof(float) * lwork); + if (err) { + throw cuda_exception::build("helpers::lup_: Cannot allocate memory for solver data buffer", + err); + } + + if (permutation == nullptr) + status = cusolverDnSgetrf( + cusolverH, + n, + n, + matrix, + n, + d_work, + nullptr, + d_info); + else { + NDArray permutVector('c', {n}, nd4j::DataType::INT32, context); + int *permutationBuf = reinterpret_cast(permutVector.specialBuffer()); + status = cusolverDnSgetrf( + cusolverH, + n, + n, + matrix, + n, + d_work, + permutationBuf, + d_info); + fillUpPermutation <<< n, n, 128, *stream >> > + (permutation->specialBuffer(), permutation->specialShapeInfo(), permutationBuf, n); + permutation->tickWriteDevice(); + } + err = cudaFree(d_work); + if (err) { + throw cuda_exception::build("helpers::lup_: Cannot deallocate memory for solver data buffer", + err); + } + } - err = cudaFree(d_info); - if (err) { - throw cuda_exception::build("helpers::lup_: Cannot deallocate memory for solver info buffer", err); - } - cusolverDnDestroy(cusolverH); + } + if (CUSOLVER_STATUS_SUCCESS != status) { + throw cuda_exception::build("helpers::lup_: Cannot make LU decomposition", status); + } + err = cudaFree(d_info); + if (err) { + throw cuda_exception::build("helpers::lup_: Cannot deallocate memory for solver info buffer", err); + } + cusolverDnDestroy(cusolverH); // NDArray::registerSpecialUse({input}, {input}); - input->tickWriteDevice(); - } + input->tickWriteDevice(); + } +// ------------------------------------------------------------------------------------------------------------------ // - BUILD_SINGLE_TEMPLATE(template void lup_,(LaunchContext * context, NDArray * input, NDArray * output, NDArray * permutation), FLOAT_NATIVE); + BUILD_SINGLE_TEMPLATE(template void lup_,(LaunchContext * context, NDArray * input, NDArray * output, NDArray * permutation), FLOAT_NATIVE); - template - static int determinant_(nd4j::LaunchContext *context, NDArray *input, NDArray *output) { - Nd4jLong n = input->sizeAt(-1); - Nd4jLong n2 = n * n; - std::vector dims(); - auto packX = ConstantTadHelper::getInstance()->tadForDimensions(input->getShapeInfo(), {input->rankOf() - 2, input->rankOf() - 1}); - //auto packZ = ConstantTadHelper::getInstance()->tadForDimensions(output->shapeInfo(), {output->rankOf() - 1}); +// ------------------------------------------------------------------------------------------------------------------ // + template + static int determinant_(nd4j::LaunchContext *context, NDArray *input, NDArray *output) { + Nd4jLong n = input->sizeAt(-1); + Nd4jLong n2 = n * n; + std::vector dims(); + auto packX = ConstantTadHelper::getInstance()->tadForDimensions(input->getShapeInfo(), {input->rankOf() - 2, input->rankOf() - 1}); + //auto packZ = ConstantTadHelper::getInstance()->tadForDimensions(output->shapeInfo(), {output->rankOf() - 1}); // DataType dtype = input->dataType(); // if (dtype != DataType::DOUBLE) // dtype = DataType::FLOAT32; - auto matrix = NDArrayFactory::create(input->ordering(), {n, n}, DataTypeUtils::fromT(), context); //, block.getWorkspace()); - auto det = NDArrayFactory::create(1); - auto stream = context->getCudaStream(); - NDArray::prepareSpecialUse({output}, {input}); - dim3 launchDims(256, 256, 1024); - output->assign(1.f); - for (int e = 0; e < output->lengthOf(); e++) { - Nd4jLong pos = e * n2; + auto matrix = NDArrayFactory::create(input->ordering(), {n, n}, DataTypeUtils::fromT(), context); //, block.getWorkspace()); + auto det = NDArrayFactory::create(1); + auto stream = context->getCudaStream(); + NDArray::prepareSpecialUse({output}, {input}); + dim3 launchDims(256, 256, 1024); + output->assign(1.f); + for (int e = 0; e < output->lengthOf(); e++) { + Nd4jLong pos = e * n2; // if (matrix.dataType() == input->dataType()) - fillMatrix<<>>(matrix.specialBuffer(), matrix.specialShapeInfo(), input->specialBuffer(), input->specialShapeInfo(), pos, n); + fillMatrix<<>>(matrix.specialBuffer(), matrix.specialShapeInfo(), input->specialBuffer(), input->specialShapeInfo(), pos, n); // else // fillMatrix<<>>(matrix.specialBuffer(), matrix.specialShapeInfo(), input->specialBuffer(), input->specialShapeInfo(), pos, n); // if (matrix.dataType() == input->dataType()) - lup_(context, &matrix, nullptr, nullptr); + lup_(context, &matrix, nullptr, nullptr); // else // lup_(context, &matrix, nullptr, nullptr); - auto offset = shape::getIndexOffset(e, output->shapeInfo(), output->lengthOf()); - auto inputBuf = reinterpret_cast(matrix.specialBuffer()); - auto outputBuf = reinterpret_cast(output->specialBuffer()) + offset; + auto offset = shape::getIndexOffset(e, output->shapeInfo(), output->lengthOf()); + auto inputBuf = reinterpret_cast(matrix.specialBuffer()); + auto outputBuf = reinterpret_cast(output->specialBuffer()) + offset; // if (matrix.dataType() == input->dataType()) - determinantKernel << < launchDims.x, launchDims.y, launchDims.z, *stream >> > - (inputBuf, outputBuf, n); + determinantKernel << < launchDims.x, launchDims.y, launchDims.z, *stream >> > + (inputBuf, outputBuf, n); // else // determinantKernel<<>> (inputBuf, outputBuf, n); - } - NDArray::registerSpecialUse({output}, {input}); - - return Status::OK(); } + NDArray::registerSpecialUse({output}, {input}); + + return Status::OK(); + } int determinant(nd4j::LaunchContext *context, NDArray *input, NDArray *output) { NDArray::prepareSpecialUse({output}, {input}); From 3fb9aecb59f437f9d2a5f4ac7b34b7773123bb49 Mon Sep 17 00:00:00 2001 From: Alex Black Date: Tue, 10 Sep 2019 12:22:10 +1000 Subject: [PATCH 05/11] Fix for null shape in SameDiff.var validation (#250) Signed-off-by: AlexDBlack --- .../src/main/java/org/nd4j/autodiff/samediff/SameDiff.java | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/nd4j/nd4j-backends/nd4j-api-parent/nd4j-api/src/main/java/org/nd4j/autodiff/samediff/SameDiff.java b/nd4j/nd4j-backends/nd4j-api-parent/nd4j-api/src/main/java/org/nd4j/autodiff/samediff/SameDiff.java index 955677ca8..452077238 100644 --- a/nd4j/nd4j-backends/nd4j-api-parent/nd4j-api/src/main/java/org/nd4j/autodiff/samediff/SameDiff.java +++ b/nd4j/nd4j-backends/nd4j-api-parent/nd4j-api/src/main/java/org/nd4j/autodiff/samediff/SameDiff.java @@ -3367,8 +3367,10 @@ public class SameDiff extends SDBaseOps { */ public SDVariable var(@NonNull String name, @NonNull VariableType variableType, WeightInitScheme weightInitScheme, org.nd4j.linalg.api.buffer.DataType dataType, long... shape) { - for(long l : shape){ - Preconditions.checkArgument(l != 0, "Cannot create variable with a shape that contains zeros (empty array shape) - got shape %s", shape); + if(shape != null) { + for (long l : shape) { + Preconditions.checkArgument(l != 0, "Cannot create variable with a shape that contains zeros (empty array shape) - got shape %s", shape); + } } if (name == null || name.length() < 1) From f91970734b3fa067eade8bc94d9b1a93eefcf0d7 Mon Sep 17 00:00:00 2001 From: Alex Black Date: Tue, 10 Sep 2019 13:14:29 +1000 Subject: [PATCH 06/11] Another small fix (#251) Signed-off-by: AlexDBlack --- .../imports/graphmapper/BaseGraphMapper.java | 5 +++-- .../java/org/nd4j/linalg/util/ArrayUtil.java | 20 +++++++++++++++++++ 2 files changed, 23 insertions(+), 2 deletions(-) diff --git a/nd4j/nd4j-backends/nd4j-api-parent/nd4j-api/src/main/java/org/nd4j/imports/graphmapper/BaseGraphMapper.java b/nd4j/nd4j-backends/nd4j-api-parent/nd4j-api/src/main/java/org/nd4j/imports/graphmapper/BaseGraphMapper.java index fe252aeeb..95f238973 100644 --- a/nd4j/nd4j-backends/nd4j-api-parent/nd4j-api/src/main/java/org/nd4j/imports/graphmapper/BaseGraphMapper.java +++ b/nd4j/nd4j-backends/nd4j-api-parent/nd4j-api/src/main/java/org/nd4j/imports/graphmapper/BaseGraphMapper.java @@ -16,6 +16,7 @@ package org.nd4j.imports.graphmapper; +import org.nd4j.linalg.util.ArrayUtil; import org.nd4j.shade.protobuf.Message; import org.nd4j.shade.protobuf.TextFormat; import lombok.extern.slf4j.Slf4j; @@ -225,8 +226,8 @@ public abstract class BaseGraphMapper probably not a variable... + if(shape == null || ArrayUtil.contains(shape, 0)){ + //No shape, or 0 in shape -> probably not a variable... v = diff.var(entry.getKey(), VariableType.ARRAY, null, dt, (long[])null); } else { v = diff.var(entry.getKey(), dt, shape); diff --git a/nd4j/nd4j-common/src/main/java/org/nd4j/linalg/util/ArrayUtil.java b/nd4j/nd4j-common/src/main/java/org/nd4j/linalg/util/ArrayUtil.java index 2fe33dfba..cf54d4357 100644 --- a/nd4j/nd4j-common/src/main/java/org/nd4j/linalg/util/ArrayUtil.java +++ b/nd4j/nd4j-common/src/main/java/org/nd4j/linalg/util/ArrayUtil.java @@ -66,6 +66,26 @@ public class ArrayUtil { return false; } + public static boolean contains(int[] arr, int value){ + if(arr == null) + return false; + for( int i : arr ) { + if (i == value) + return true; + } + return false; + } + + public static boolean contains(long[] arr, int value){ + if(arr == null) + return false; + for( long i : arr ) { + if (i == value) + return true; + } + return false; + } + /** * * @param arrs From 4f7b35ac825654c15c944cd416b4b68bf4cc16c9 Mon Sep 17 00:00:00 2001 From: Alex Black Date: Tue, 10 Sep 2019 19:09:46 +1000 Subject: [PATCH 07/11] Update links to eclipse repos (#252) * Fix repo links and clean up old github templates Signed-off-by: AlexDBlack * More link updates Signed-off-by: AlexDBlack --- CONTRIBUTING.md | 2 +- README.md | 26 +- arbiter/.github/CONTRIBUTING.md | 15 - arbiter/.github/ISSUE_TEMPLATE.md | 19 - arbiter/.github/PULL_REQUEST_TEMPLATE.md | 10 - datavec/.github/CONTRIBUTING.md | 15 - datavec/.github/ISSUE_TEMPLATE.md | 19 - datavec/.github/PULL_REQUEST_TEMPLATE.md | 10 - datavec/README.md | 6 +- deeplearning4j/.github/CONTRIBUTING.md | 1 - deeplearning4j/.github/ISSUE_TEMPLATE.md | 19 - .../.github/PULL_REQUEST_TEMPLATE.md | 16 - deeplearning4j/.github/lock.yml | 28 -- deeplearning4j/GITTER_GUIDELINES.md | 6 +- deeplearning4j/README.md | 10 +- docs/README.md | 2 +- docs/datavec/templates/overview.md | 6 +- docs/deeplearning4j-nlp/templates/doc2vec.md | 2 +- docs/deeplearning4j-nlp/templates/overview.md | 8 +- docs/deeplearning4j-nlp/templates/word2vec.md | 18 +- .../templates/computationgraph.md | 22 +- .../templates/custom-layer.md | 2 +- .../templates/early-stopping.md | 2 +- .../deeplearning4j-nn/templates/evaluation.md | 2 +- .../templates/model-persistence.md | 2 +- docs/deeplearning4j-nn/templates/recurrent.md | 8 +- .../templates/visualization.md | 8 +- .../templates/data-howto.md | 14 +- .../templates/howto.md | 14 +- .../templates/intro.md | 4 +- .../templates/parameter-server.md | 4 +- .../templates/technicalref.md | 4 +- docs/deeplearning4j-zoo/templates/overview.md | 24 +- .../templates/android-image-classification.md | 2 +- .../templates/android-linear-classifier.md | 2 +- docs/deeplearning4j/templates/beginners.md | 2 +- docs/deeplearning4j/templates/benchmark.md | 6 +- .../templates/build-from-source.md | 24 +- docs/deeplearning4j/templates/cheat-sheet.md | 348 +++++++++--------- docs/deeplearning4j/templates/concepts.md | 4 +- .../templates/config-performance-debugging.md | 6 +- .../templates/config-snapshots.md | 4 +- docs/deeplearning4j/templates/contribute.md | 8 +- .../deeplearning4j/templates/examples-tour.md | 66 ++-- docs/deeplearning4j/templates/quickstart.md | 16 +- .../templates/troubleshooting-training.md | 4 +- docs/keras-import/templates/activations.md | 2 +- docs/keras-import/templates/constraints.md | 2 +- docs/keras-import/templates/initializers.md | 2 +- docs/keras-import/templates/losses.md | 2 +- docs/keras-import/templates/overview.md | 10 +- docs/keras-import/templates/regularizers.md | 2 +- .../templates/supported-features.md | 142 +++---- docs/nd4j/templates/overview.md | 16 +- docs/nd4j/templates/syntax.md | 4 +- docs/samediff/templates/adding-ops.md | 52 +-- gym-java-client/.github/CONTRIBUTING.md | 15 - gym-java-client/.github/ISSUE_TEMPLATE.md | 19 - .../.github/PULL_REQUEST_TEMPLATE.md | 10 - jumpy/README.md | 2 +- libnd4j/RaspberryPi.md | 2 +- libnd4j/UnderstandingGraph.md | 2 +- libnd4j/linuxOnPower.md | 7 +- libnd4j/macOSx10 (CPU only).md | 2 +- libnd4j/windows.md | 4 +- nd4j/.github/CONTRIBUTING.md | 15 - nd4j/.github/ISSUE_TEMPLATE.md | 19 - nd4j/.github/PULL_REQUEST_TEMPLATE.md | 10 - nd4j/README.md | 4 +- nd4j/RaspberryPi.md | 2 +- nd4s/README.md | 4 +- pydatavec/README.md | 2 +- rl4j/.github/CONTRIBUTING.md | 15 - rl4j/.github/ISSUE_TEMPLATE.md | 19 - rl4j/.github/PULL_REQUEST_TEMPLATE.md | 10 - rl4j/README.md | 8 +- scalnet/.github/CONTRIBUTING.md | 7 - scalnet/.github/ISSUE_TEMPLATE.md | 19 - scalnet/.github/PULL_REQUEST_TEMPLATE.md | 10 - scalnet/README.md | 5 +- 80 files changed, 480 insertions(+), 806 deletions(-) delete mode 100644 arbiter/.github/CONTRIBUTING.md delete mode 100644 arbiter/.github/ISSUE_TEMPLATE.md delete mode 100644 arbiter/.github/PULL_REQUEST_TEMPLATE.md delete mode 100644 datavec/.github/CONTRIBUTING.md delete mode 100644 datavec/.github/ISSUE_TEMPLATE.md delete mode 100644 datavec/.github/PULL_REQUEST_TEMPLATE.md delete mode 120000 deeplearning4j/.github/CONTRIBUTING.md delete mode 100644 deeplearning4j/.github/ISSUE_TEMPLATE.md delete mode 100644 deeplearning4j/.github/PULL_REQUEST_TEMPLATE.md delete mode 100644 deeplearning4j/.github/lock.yml delete mode 100644 gym-java-client/.github/CONTRIBUTING.md delete mode 100644 gym-java-client/.github/ISSUE_TEMPLATE.md delete mode 100644 gym-java-client/.github/PULL_REQUEST_TEMPLATE.md delete mode 100644 nd4j/.github/CONTRIBUTING.md delete mode 100644 nd4j/.github/ISSUE_TEMPLATE.md delete mode 100644 nd4j/.github/PULL_REQUEST_TEMPLATE.md delete mode 100644 rl4j/.github/CONTRIBUTING.md delete mode 100644 rl4j/.github/ISSUE_TEMPLATE.md delete mode 100644 rl4j/.github/PULL_REQUEST_TEMPLATE.md delete mode 100644 scalnet/.github/CONTRIBUTING.md delete mode 100644 scalnet/.github/ISSUE_TEMPLATE.md delete mode 100644 scalnet/.github/PULL_REQUEST_TEMPLATE.md diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 024ac9c21..4e75d7bfe 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -5,7 +5,7 @@ Thanks for your interest in DL4J. Our goal is to bring fast, open-source deep le ## Getting Started -Deeplearning4j's [open issues are here](https://github.com/deeplearning4j/deeplearning4j/issues). In time, we'll tag issues that would make a good first pull request for new contributors. An easy way to get started helping the project is to *file an issue*. You can do that on the Deeplearning4j issues page by clicking on the green button at the right. Issues can include bugs to fix, features to add, or documentation that looks outdated. +Deeplearning4j's [open issues are here](https://github.com/eclipse/deeplearning4j/issues). In time, we'll tag issues that would make a good first pull request for new contributors. An easy way to get started helping the project is to *file an issue*. You can do that on the Deeplearning4j issues page by clicking on the green button at the right. Issues can include bugs to fix, features to add, or documentation that looks outdated. Note that you will need to [build dl4j from source](https://deeplearning4j.org/docs/latest/deeplearning4j-build-from-source) diff --git a/README.md b/README.md index da04994d4..2b69f7981 100644 --- a/README.md +++ b/README.md @@ -2,17 +2,17 @@ Welcome to the new monorepo of Deeplearning4j that contains the source code for all the following projects, in addition to the original repository of Deeplearning4j moved to [deeplearning4j](deeplearning4j): - * https://github.com/deeplearning4j/libnd4j - * https://github.com/deeplearning4j/nd4j - * https://github.com/deeplearning4j/datavec - * https://github.com/deeplearning4j/arbiter - * https://github.com/deeplearning4j/nd4s - * https://github.com/deeplearning4j/gym-java-client - * https://github.com/deeplearning4j/rl4j - * https://github.com/deeplearning4j/scalnet - * https://github.com/deeplearning4j/pydl4j - * https://github.com/deeplearning4j/jumpy - * https://github.com/deeplearning4j/pydatavec + * https://github.com/eclipse/deeplearning4j/tree/master/libnd4j + * https://github.com/eclipse/deeplearning4j/tree/master/nd4j + * https://github.com/eclipse/deeplearning4j/tree/master/datavec + * https://github.com/eclipse/deeplearning4j/tree/master/arbiter + * https://github.com/eclipse/deeplearning4j/tree/master/nd4s + * https://github.com/eclipse/deeplearning4j/tree/master/gym-java-client + * https://github.com/eclipse/deeplearning4j/tree/master/rl4j + * https://github.com/eclipse/deeplearning4j/tree/master/scalnet + * https://github.com/eclipse/deeplearning4j/tree/master/pydl4j + * https://github.com/eclipse/deeplearning4j/tree/master/jumpy + * https://github.com/eclipse/deeplearning4j/tree/master/pydatavec To build everything, we can use commands like @@ -30,6 +30,6 @@ mvn -B -V -U clean install -pl '!jumpy,!pydatavec,!pydl4j' -Dlibnd4j.platform=li An example of GPU "CC" or compute capability is 61 for Titan X Pascal. # Want some examples? -We have separate repository with various examples available: https://github.com/deeplearning4j/dl4j-examples +We have separate repository with various examples available: https://github.com/eclipse/deeplearning4j-examples -In the examples repo, you'll also find a tutorial series in Zeppelin: https://github.com/deeplearning4j/dl4j-examples/tree/master/tutorials +In the examples repo, you'll also find a tutorial series in Zeppelin: https://github.com/eclipse/deeplearning4j-examples/tree/master/tutorials diff --git a/arbiter/.github/CONTRIBUTING.md b/arbiter/.github/CONTRIBUTING.md deleted file mode 100644 index 17598f387..000000000 --- a/arbiter/.github/CONTRIBUTING.md +++ /dev/null @@ -1,15 +0,0 @@ -## Contribute - -1. Check for open issues, or open a new issue to start a discussion around a feature idea or a bug. -2. If you feel uncomfortable or uncertain about an issue or your changes, feel free to contact us on Gitter using the link above. -3. Fork [the repository](https://github.com/deeplearning4j/Arbiter.git) on GitHub to start making your changes to the **master** branch (or branch off of it). -4. Write a test, which shows that the bug was fixed or that the feature works as expected. -5. Note the repository follows - the [Google Java style](https://google.github.io/styleguide/javaguide.html) - with two modifications: 120-char column wrap and 4-spaces indentation. You - can format your code to this format by typing `mvn formatter:format` in the - subproject you work on, by using the `contrib/formatter.xml` at the root of - the repository to configure the Eclipse formatter, or by [using the INtellij - plugin](https://github.com/HPI-Information-Systems/Metanome/wiki/Installing-the-google-styleguide-settings-in-intellij-and-eclipse). - -6. Send a pull request, and bug us on Gitter until it gets merged and published. diff --git a/arbiter/.github/ISSUE_TEMPLATE.md b/arbiter/.github/ISSUE_TEMPLATE.md deleted file mode 100644 index 9dd585d22..000000000 --- a/arbiter/.github/ISSUE_TEMPLATE.md +++ /dev/null @@ -1,19 +0,0 @@ -#### Issue Description - -Please describe your issue, along with: -- expected behavior -- encountered behavior - -#### Version Information - -Please indicate relevant versions, including, if relevant: - -* Deeplearning4j version -* platform information (OS, etc) -* CUDA version, if used -* NVIDIA driver version, if in use - -#### Contributing - -If you'd like to help us fix the issue by contributing some code, but would -like guidance or help in doing so, please mention it! diff --git a/arbiter/.github/PULL_REQUEST_TEMPLATE.md b/arbiter/.github/PULL_REQUEST_TEMPLATE.md deleted file mode 100644 index 1f92bacd6..000000000 --- a/arbiter/.github/PULL_REQUEST_TEMPLATE.md +++ /dev/null @@ -1,10 +0,0 @@ -## What changes were proposed in this pull request? - -(Please fill in changes proposed in this fix) - -## How was this patch tested? - -(Please explain how this patch was tested. E.g. unit tests, integration tests, manual tests) - -Please review -https://github.com/deeplearning4j/deeplearning4j/blob/master/CONTRIBUTING.md before opening a pull request. diff --git a/datavec/.github/CONTRIBUTING.md b/datavec/.github/CONTRIBUTING.md deleted file mode 100644 index a4ea5dc58..000000000 --- a/datavec/.github/CONTRIBUTING.md +++ /dev/null @@ -1,15 +0,0 @@ -## Contribute - -1. Check for open issues, or open a new issue to start a discussion around a feature idea or a bug. -2. If you feel uncomfortable or uncertain about an issue or your changes, feel free to contact us on Gitter using the link above. -3. Fork [the repository](https://github.com/deeplearning4j/DataVec.git) on GitHub to start making your changes to the **master** branch (or branch off of it). -4. Write a test, which shows that the bug was fixed or that the feature works as expected. -5. Note the repository follows - the [Google Java style](https://google.github.io/styleguide/javaguide.html) - with two modifications: 120-char column wrap and 4-spaces indentation. You - can format your code to this format by typing `mvn formatter:format` in the - subproject you work on, by using the `contrib/formatter.xml` at the root of - the repository to configure the Eclipse formatter, or by [using the INtellij - plugin](https://github.com/HPI-Information-Systems/Metanome/wiki/Installing-the-google-styleguide-settings-in-intellij-and-eclipse). - -6. Send a pull request, and bug us on Gitter until it gets merged and published. diff --git a/datavec/.github/ISSUE_TEMPLATE.md b/datavec/.github/ISSUE_TEMPLATE.md deleted file mode 100644 index 9dd585d22..000000000 --- a/datavec/.github/ISSUE_TEMPLATE.md +++ /dev/null @@ -1,19 +0,0 @@ -#### Issue Description - -Please describe your issue, along with: -- expected behavior -- encountered behavior - -#### Version Information - -Please indicate relevant versions, including, if relevant: - -* Deeplearning4j version -* platform information (OS, etc) -* CUDA version, if used -* NVIDIA driver version, if in use - -#### Contributing - -If you'd like to help us fix the issue by contributing some code, but would -like guidance or help in doing so, please mention it! diff --git a/datavec/.github/PULL_REQUEST_TEMPLATE.md b/datavec/.github/PULL_REQUEST_TEMPLATE.md deleted file mode 100644 index 1f92bacd6..000000000 --- a/datavec/.github/PULL_REQUEST_TEMPLATE.md +++ /dev/null @@ -1,10 +0,0 @@ -## What changes were proposed in this pull request? - -(Please fill in changes proposed in this fix) - -## How was this patch tested? - -(Please explain how this patch was tested. E.g. unit tests, integration tests, manual tests) - -Please review -https://github.com/deeplearning4j/deeplearning4j/blob/master/CONTRIBUTING.md before opening a pull request. diff --git a/datavec/README.md b/datavec/README.md index 7188db359..e631d7510 100644 --- a/datavec/README.md +++ b/datavec/README.md @@ -32,7 +32,7 @@ static data and for sequences (time series). Such operations can be executed on Apart from obviously providing readers for classic data formats, DataVec also provides an interface. So if you wanted to ingest specific custom data, you wouldn't have to build the whole pipeline. You would just have to write the very first step. For example, if you describe through the API how your data fits into a common format that complies with the interface, DataVec -would return a list of Writables for each record. You'll find more detail on the API in the corresponding [module](https://github.com/deeplearning4j/DataVec/tree/master/datavec-api). +would return a list of Writables for each record. You'll find more detail on the API in the corresponding [module](https://github.com/eclipse/deeplearning4j/tree/master/datavec/datavec-api). Another thing you can do with DataVec is data cleaning. Instead of having clean, ready-to-go data, let's say you start with data in different forms or from different sources. You might need to do sampling, filtering, or several incredibly messy ETL tasks needed to prepare data in the real world. DataVec offers filters and transformations that help with curating, preparing and massaging your data. It leverages Apache Spark to do this at scale. @@ -51,7 +51,7 @@ to be locked into a single tool, and using [Apache Flink](https://flink.apache.o ## Examples Examples for using DataVec are available -here: [https://github.com/deeplearning4j/dl4j-examples](https://github.com/deeplearning4j/dl4j-examples) +here: [https://github.com/eclipse/deeplearning4j-examples](https://github.com/eclipse/deeplearning4j-examples) --- @@ -91,7 +91,7 @@ It's useful to know which maintainers to contact to get information on a particu 1. Check for open issues, or open a new issue to start a discussion around a feature idea or a bug. 2. If you feel uncomfortable or uncertain about an issue or your changes, feel free to contact us on Gitter using the link above. -3. Fork [the repository](https://github.com/deeplearning4j/datavec.git) on GitHub to start making your changes. +3. Fork [the repository](https://github.com/eclipse/deeplearning4j.git) on GitHub to start making your changes. 4. Write a test, which shows that the bug was fixed or that the feature works as expected. 5. Note the repository follows the [Google Java style](https://google.github.io/styleguide/javaguide.html) with two modifications: 120-char column wrap and 4-spaces indentation. You can format your code to this format by typing `mvn diff --git a/deeplearning4j/.github/CONTRIBUTING.md b/deeplearning4j/.github/CONTRIBUTING.md deleted file mode 120000 index 44fcc6343..000000000 --- a/deeplearning4j/.github/CONTRIBUTING.md +++ /dev/null @@ -1 +0,0 @@ -../CONTRIBUTING.md \ No newline at end of file diff --git a/deeplearning4j/.github/ISSUE_TEMPLATE.md b/deeplearning4j/.github/ISSUE_TEMPLATE.md deleted file mode 100644 index 95d46fee4..000000000 --- a/deeplearning4j/.github/ISSUE_TEMPLATE.md +++ /dev/null @@ -1,19 +0,0 @@ -#### Issue Description - -Please describe our issue, along with: -- expected behavior -- encountered behavior - -#### Version Information - -Please indicate relevant versions, including, if relevant: - -* Deeplearning4j version -* platform information (OS, etc) -* CUDA version, if used -* NVIDIA driver version, if in use - -#### Contributing - -If you'd like to help us fix the issue by contributing some code, but would -like guidance or help in doing so, please mention it! diff --git a/deeplearning4j/.github/PULL_REQUEST_TEMPLATE.md b/deeplearning4j/.github/PULL_REQUEST_TEMPLATE.md deleted file mode 100644 index 44316b06f..000000000 --- a/deeplearning4j/.github/PULL_REQUEST_TEMPLATE.md +++ /dev/null @@ -1,16 +0,0 @@ -## What changes were proposed in this pull request? - -(Please fill in changes proposed in this fix) - -## How was this patch tested? - -(Please explain how this patch was tested. E.g. unit tests, integration tests, manual tests) - -## Quick checklist - -The following checklist helps ensure your PR is complete: - -- [ ] Reviewed the [Contributing Guidelines](https://github.com/deeplearning4j/deeplearning4j/blob/master/CONTRIBUTING.md) and followed the steps within. -- [ ] Created tests for any significant new code additions. -- [ ] Relevant tests for your changes are passing. -- [ ] Ran mvn formatter:format (see [formatter instructions](http://code.revelc.net/formatter-maven-plugin/examples.html#Setting_Source_Files) for targeting your specific files). diff --git a/deeplearning4j/.github/lock.yml b/deeplearning4j/.github/lock.yml deleted file mode 100644 index cdf578845..000000000 --- a/deeplearning4j/.github/lock.yml +++ /dev/null @@ -1,28 +0,0 @@ -# Configuration for lock-threads - https://github.com/dessant/lock-threads - -# Number of days of inactivity before a closed issue or pull request is locked -daysUntilLock: 45 - -# Issues and pull requests with these labels will not be locked. Set to `[]` to disable -exemptLabels: [] - -# Label to add before locking, such as `outdated`. Set to `false` to disable -lockLabel: Outdated - -# Comment to post before locking. Set to `false` to disable -lockComment: > - This thread has been automatically locked since there has not been - any recent activity after it was closed. Please open a new issue for - related bugs. - -# Limit to only `issues` or `pulls` -# only: issues - -# Optionally, specify configuration settings just for `issues` or `pulls` -# issues: -# exemptLabels: -# - help-wanted -# lockLabel: outdated - -# pulls: -# daysUntilLock: 30 \ No newline at end of file diff --git a/deeplearning4j/GITTER_GUIDELINES.md b/deeplearning4j/GITTER_GUIDELINES.md index a8a43964a..fdb5609a8 100644 --- a/deeplearning4j/GITTER_GUIDELINES.md +++ b/deeplearning4j/GITTER_GUIDELINES.md @@ -7,14 +7,14 @@ Welcome, stranger. You probably just joined a Gitter channel for Deeplearning4j. 3. We're doing our best to improve the documentation, but it's not perfect. We welcome ideas about how to improve it! Writing good docs is our responsibility; reading them is yours. Please consult the docs before you post in the channel. A little effort from you will earn a lot of respect from us. (DL4J is backed by a startup, Skymind, and we are serving customers as well as the open-source community, which feels like a lot sometimes.) * User guide: [https://deeplearning4j.org/docs/latest/](https://deeplearning4j.org/docs/latest/) * API: [https://deeplearning4j.org/api/latest/](https://deeplearning4j.org/api/latest/) -4. We welcome new contributors! Once you get familiar with the libs, if you see how our code can be improved, please file an issue or consider sending us a pull request with the new feature. [https://github.com/deeplearning4j/deeplearning4j/issues](https://github.com/deeplearning4j/deeplearning4j/issues) +4. We welcome new contributors! Once you get familiar with the libs, if you see how our code can be improved, please file an issue or consider sending us a pull request with the new feature. [https://github.com/eclipse/deeplearning4j/issues](https://github.com/eclipse/deeplearning4j/issues) Many of the questions asked on the Deeplearning4j Gitter support channel have been answered already in our documentation or can be easily Googled. To respect the Skymind team's time, Deeplearning4j users are kindly asked to remember a few things: 1. Please use Google before you ask a question. The Deeplearning4j Gitter channel should not be used as a human-enhanced search engine. (We promise that you'll end up with a better open-source framework if you only ask us the hard questions...) Please remember that DL4J Gitter channels are devoted to DL4J and other Skymind libraries specifically. We can't help with other frameworks or tools, which have their own docs and communities. 2. If you don't receive an immediate response, please post again and flag your question with the Gitter ID of one of the people in the channel answering questions. If you do receive a response and link, please spend some time reading and trying to understand the response and additional resources before you ask the same question again. 3. To answer questions, we need to know about your OS, Java version, Maven version and we may even need to see your code and stacktrace. When we ask for code, please send us a gist using https://gist.github.com/. If you can't give us code, in many cases we can't help you. (It's also a great sign of commitment on your part!) -4. We're not perfect, and neither is our documentation. If you find ways for us to improve, please open an issue [here](https://github.com/deeplearning4j/deeplearning4j/issues) or email us at help@skymind.io and let us know what we need to fix. +4. We're not perfect, and neither is our documentation. If you find ways for us to improve, please open an issue [here](https://github.com/eclipse/deeplearning4j/issues) or email us at help@skymind.io and let us know what we need to fix. 5. Neural nets aren't magic. They are inherently hard to tune. We get many questions from beginners on how to tune neural nets. If you must post a question related to tuning, please post to: https://gitter.im/deeplearning4j/deeplearning4j/tuninghelp ### Guidelines for Help with Neural Network Tuning: @@ -26,7 +26,7 @@ Providing help for tuning neural networks can be quite time consuming for the de 1. Before posting a question, please first read both [https://deeplearning4j.org/docs/latest/deeplearning4j-troubleshooting-training](https://deeplearning4j.org/docs/latest/deeplearning4j-troubleshooting-training) and [https://deeplearning4j.org/docs/latest/deeplearning4j-nn-visualization](https://deeplearning4j.org/docs/latest/deeplearning4j-nn-visualization). You may also find an answer to your question on one of the other pages: [https://deeplearning4j.org/docs/latest/](https://deeplearning4j.org/docs/latest/) 2. We generally won't answer questions that can be easily answered by searching Google or reading something like Andrej Karpathy's Stanford course on convolutional networks [http://cs231n.github.io/](http://cs231n.github.io/) or Ian Goodfellow and Yoshua Bengio's deep learning book [http://www.deeplearningbook.org/](http://www.deeplearningbook.org/) 3. For some questions/issues, it may not be possible to provide a short/simple answer to your question. In these cases, we might decide to answer your question by improving our documentation, instead of answering your question directly in Gitter. Please understand that improving our documentation helps *everyone* and is a better use of the team's time than answering one-off questions. -4. You should generally feel free to open issues ([https://github.com/deeplearning4j/deeplearning4j/issues](https://github.com/deeplearning4j/deeplearning4j/issues)) if you feel our documentation (troubleshooting/tuning) is lacking or doesn't answer common questions. +4. You should generally feel free to open issues ([https://github.com/eclipse/deeplearning4j/issues](https://github.com/eclipse/deeplearning4j/issues)) if you feel our documentation (troubleshooting/tuning) is lacking or doesn't answer common questions. 5. Upon entering the room, please do more than say "hi". Information-rich questions and comments are appreciated. Please keep the content relevant. Please note the below channels for different parts of the conversation. * Contributors/building from source: [https://gitter.im/deeplearning4j/deeplearning4j/earlyadopters](https://gitter.im/deeplearning4j/deeplearning4j/earlyadopters) diff --git a/deeplearning4j/README.md b/deeplearning4j/README.md index f7892cb8f..517cc820e 100755 --- a/deeplearning4j/README.md +++ b/deeplearning4j/README.md @@ -38,7 +38,7 @@ To get started using Deeplearning4j, please go to our [Quickstart](https://deepl --- ## Documentation -Documentation is available at [deeplearning4j.org](https://deeplearning4j.org/overview) and [JavaDocs](https://deeplearning4j.org/api/latest/). Open-source contributors can help us improve our documentation for Deeplearning4j by sending pull requests for the DL4J website [here](https://github.com/deeplearning4j/deeplearning4j/tree/gh-pages) and ND4J [here](https://github.com/deeplearning4j/nd4j/tree/gh-pages). +Documentation is available at [deeplearning4j.org](https://deeplearning4j.org/overview) and [JavaDocs](https://deeplearning4j.org/api/latest/). Open-source contributors can help us improve our documentation for Deeplearning4j by sending pull requests for the DL4J website [here](https://github.com/eclipse/deeplearning4j-docs) ## Support @@ -52,7 +52,7 @@ To install Deeplearning4J, see our [Quickstart](https://deeplearning4j.org/docs/ Search Maven Central for [deeplearning4j](https://search.maven.org/#search%7Cga%7C1%7Cdeeplearning4j) to get a list of dependencies. -Add the dependency information to your `pom.xml` file. **We highly recommend downloading via Maven unless you plan to help us develop DL4J.** An easy way to get up-to-date dependencies is to use the ones listed in our [dl4j-examples POM](https://github.com/deeplearning4j/dl4j-examples/blob/master/pom.xml). +Add the dependency information to your `pom.xml` file. **We highly recommend downloading via Maven unless you plan to help us develop DL4J.** An easy way to get up-to-date dependencies is to use the ones listed in our [dl4j-examples POM](https://github.com/eclipse/deeplearning4j-examples/blob/master/pom.xml).