diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md
index 4e75d7bfe..0a25d9775 100644
--- a/CONTRIBUTING.md
+++ b/CONTRIBUTING.md
@@ -9,7 +9,7 @@ Deeplearning4j's [open issues are here](https://github.com/eclipse/deeplearning4
Note that you will need to [build dl4j from source](https://deeplearning4j.org/docs/latest/deeplearning4j-build-from-source)
-For some tips on contributing to open source, this [post is helpful](http://blog.smartbear.com/programming/14-ways-to-contribute-to-open-source-without-being-a-programming-genius-or-a-rock-star/).
+For some tips on contributing to open source, this [post is helpful](https://smartbear.com/blog/test-and-monitor/14-ways-to-contribute-to-open-source-without-being/).
## Contributions
diff --git a/arbiter/arbiter-core/src/assembly/bin.xml b/arbiter/arbiter-core/src/assembly/bin.xml
index cc6920b24..c99d6b144 100644
--- a/arbiter/arbiter-core/src/assembly/bin.xml
+++ b/arbiter/arbiter-core/src/assembly/bin.xml
@@ -61,7 +61,7 @@
examples
diff --git a/datavec/datavec-api/pom.xml b/datavec/datavec-api/pom.xml
index 022f2e38b..b3401b431 100644
--- a/datavec/datavec-api/pom.xml
+++ b/datavec/datavec-api/pom.xml
@@ -52,11 +52,6 @@
joda-time${jodatime.version}
-
- org.yaml
- snakeyaml
- ${snakeyaml.version}
- org.nd4j
diff --git a/datavec/datavec-arrow/pom.xml b/datavec/datavec-arrow/pom.xml
index 645971a45..6134bbf27 100644
--- a/datavec/datavec-arrow/pom.xml
+++ b/datavec/datavec-arrow/pom.xml
@@ -29,21 +29,11 @@
datavec-arrow
-
- org.nd4j
- nd4j-arrow
- ${project.version}
- org.datavecdatavec-api${project.version}
-
- com.carrotsearch
- hppc
- ${hppc.version}
- org.apache.arrowarrow-vector
diff --git a/datavec/datavec-data/datavec-data-nlp/pom.xml b/datavec/datavec-data/datavec-data-nlp/pom.xml
index 17ad11211..12df0fb08 100644
--- a/datavec/datavec-data/datavec-data-nlp/pom.xml
+++ b/datavec/datavec-data/datavec-data-nlp/pom.xml
@@ -44,26 +44,6 @@
datavec-api${project.version}
-
- commons-logging
- commons-logging
- ${commons-logging.version}
-
-
- org.springframework
- spring-core
- ${spring.version}
-
-
- org.springframework
- spring-context
- ${spring.version}
-
-
- org.springframework
- spring-beans
- ${spring.version}
- org.cleartkcleartk-snowball
diff --git a/datavec/datavec-geo/pom.xml b/datavec/datavec-geo/pom.xml
index 15c22ba3b..50e843555 100644
--- a/datavec/datavec-geo/pom.xml
+++ b/datavec/datavec-geo/pom.xml
@@ -31,36 +31,6 @@
datavec-api${project.version}
-
- com.fasterxml.jackson.core
- jackson-core
- ${geo.jackson.version}
-
-
- com.fasterxml.jackson.core
- jackson-databind
- ${geo.jackson.version}
-
-
- com.fasterxml.jackson.core
- jackson-annotations
- ${geo.jackson.version}
-
-
- com.fasterxml.jackson.dataformat
- jackson-dataformat-yaml
- ${geo.jackson.version}
-
-
- com.fasterxml.jackson.dataformat
- jackson-dataformat-xml
- ${geo.jackson.version}
-
-
- com.fasterxml.jackson.datatype
- jackson-datatype-joda
- ${geo.jackson.version}
- com.maxmind.geoip2geoip2
diff --git a/datavec/datavec-hadoop/pom.xml b/datavec/datavec-hadoop/pom.xml
index c95e6d3bc..5ec6d4c3f 100644
--- a/datavec/datavec-hadoop/pom.xml
+++ b/datavec/datavec-hadoop/pom.xml
@@ -35,41 +35,11 @@
${project.version}
-
- com.sun.xml.bind
- jaxb-core
- ${jaxb.version}
-
-
- com.sun.xml.bind
- jaxb-impl
- ${jaxb.version}
- io.nettynetty${netty.version}
-
- org.apache.commons
- commons-compress
- ${commons-compress.version}
-
-
- org.apache.zookeeper
- zookeeper
- ${zookeeper.version}
-
-
- log4j
- log4j
-
-
- org.slf4j
- slf4j-log4j12
-
-
- org.apache.hadoophadoop-common
diff --git a/datavec/datavec-local/pom.xml b/datavec/datavec-local/pom.xml
index f286eeb95..d2b15ffed 100644
--- a/datavec/datavec-local/pom.xml
+++ b/datavec/datavec-local/pom.xml
@@ -73,42 +73,7 @@
-
- com.fasterxml.jackson.core
- jackson-core
- ${geo.jackson.version}
- test
-
-
- com.fasterxml.jackson.core
- jackson-databind
- ${geo.jackson.version}
- test
-
-
- com.fasterxml.jackson.core
- jackson-annotations
- ${geo.jackson.version}
- test
-
-
- com.fasterxml.jackson.dataformat
- jackson-dataformat-yaml
- ${geo.jackson.version}
- test
-
-
- com.fasterxml.jackson.dataformat
- jackson-dataformat-xml
- ${geo.jackson.version}
- test
-
-
- com.fasterxml.jackson.datatype
- jackson-datatype-joda
- ${geo.jackson.version}
- test
-
+
org.datavecdatavec-python
diff --git a/datavec/datavec-perf/pom.xml b/datavec/datavec-perf/pom.xml
index fb4eaaa89..95f3135e5 100644
--- a/datavec/datavec-perf/pom.xml
+++ b/datavec/datavec-perf/pom.xml
@@ -41,11 +41,6 @@
slf4j-api${slf4j.version}
-
- com.github.oshi
- oshi-core
- ${oshi.version}
- org.datavecdatavec-data-image
diff --git a/datavec/datavec-spark-inference-parent/datavec-spark-inference-client/pom.xml b/datavec/datavec-spark-inference-parent/datavec-spark-inference-client/pom.xml
index 076c22ab9..95f13081f 100644
--- a/datavec/datavec-spark-inference-parent/datavec-spark-inference-client/pom.xml
+++ b/datavec/datavec-spark-inference-parent/datavec-spark-inference-client/pom.xml
@@ -41,26 +41,6 @@
1.0.0-SNAPSHOTtest
-
- commons-codec
- commons-codec
- ${commons-codec.version}
-
-
- org.apache.httpcomponents
- httpclient
- ${httpclient.version}
-
-
- org.apache.httpcomponents
- httpcore
- ${httpcore.version}
-
-
- org.apache.httpcomponents
- httpmime
- ${httpmime.version}
- com.mashape.unirestunirest-java
diff --git a/datavec/datavec-spark-inference-parent/datavec-spark-inference-server/pom.xml b/datavec/datavec-spark-inference-parent/datavec-spark-inference-server/pom.xml
index 8bef216a7..77eff8758 100644
--- a/datavec/datavec-spark-inference-parent/datavec-spark-inference-server/pom.xml
+++ b/datavec/datavec-spark-inference-parent/datavec-spark-inference-server/pom.xml
@@ -94,12 +94,6 @@
${scala.version}
-
- org.yaml
- snakeyaml
- ${snakeyaml.version}
-
-
com.typesafe.playplay-java_2.11
diff --git a/datavec/datavec-spark/pom.xml b/datavec/datavec-spark/pom.xml
index f7143c6ea..72f0b105f 100644
--- a/datavec/datavec-spark/pom.xml
+++ b/datavec/datavec-spark/pom.xml
@@ -39,11 +39,6 @@
scala-library${scala.version}
-
- org.scala-lang
- scala-reflect
- ${scala.version}
- org.apache.spark
diff --git a/deeplearning4j/deeplearning4j-common/src/main/java/org/deeplearning4j/common/resources/DL4JResources.java b/deeplearning4j/deeplearning4j-common/src/main/java/org/deeplearning4j/common/resources/DL4JResources.java
index a28ad375d..fab713e8e 100644
--- a/deeplearning4j/deeplearning4j-common/src/main/java/org/deeplearning4j/common/resources/DL4JResources.java
+++ b/deeplearning4j/deeplearning4j-common/src/main/java/org/deeplearning4j/common/resources/DL4JResources.java
@@ -64,7 +64,7 @@ public class DL4JResources {
/**
* Set the base download URL for (most) DL4J datasets and models.
* This usually doesn't need to be set manually unless there is some issue with the default location
- * @param baseDownloadURL Base download URL to set. For example, http://blob.deeplearning4j.org/
+ * @param baseDownloadURL Base download URL to set. For example, https://dl4jdata.blob.core.windows.net/
*/
public static void setBaseDownloadURL(@NonNull String baseDownloadURL){
baseURL = baseDownloadURL;
@@ -79,8 +79,8 @@ public class DL4JResources {
/**
* Get the URL relative to the base URL.
- * For example, if baseURL is "http://blob.deeplearning4j.org/", and relativeToBase is "/datasets/iris.dat"
- * this simply returns "http://blob.deeplearning4j.org/datasets/iris.dat"
+ * For example, if baseURL is "https://dl4jdata.blob.core.windows.net/", and relativeToBase is "/datasets/iris.dat"
+ * this simply returns "https://dl4jdata.blob.core.windows.net/datasets/iris.dat"
*
* @param relativeToBase Relative URL
* @return URL
@@ -92,8 +92,8 @@ public class DL4JResources {
/**
* Get the URL relative to the base URL as a String.
- * For example, if baseURL is "http://blob.deeplearning4j.org/", and relativeToBase is "/datasets/iris.dat"
- * this simply returns "http://blob.deeplearning4j.org/datasets/iris.dat"
+ * For example, if baseURL is "https://dl4jdata.blob.core.windows.net/", and relativeToBase is "/datasets/iris.dat"
+ * this simply returns "https://dl4jdata.blob.core.windows.net/datasets/iris.dat"
*
* @param relativeToBase Relative URL
* @return URL
diff --git a/deeplearning4j/deeplearning4j-core/src/test/java/org/deeplearning4j/nn/mkldnn/ValidateMKLDNN.java b/deeplearning4j/deeplearning4j-core/src/test/java/org/deeplearning4j/nn/mkldnn/ValidateMKLDNN.java
index 7e3ae6720..f65e48f44 100644
--- a/deeplearning4j/deeplearning4j-core/src/test/java/org/deeplearning4j/nn/mkldnn/ValidateMKLDNN.java
+++ b/deeplearning4j/deeplearning4j-core/src/test/java/org/deeplearning4j/nn/mkldnn/ValidateMKLDNN.java
@@ -138,52 +138,55 @@ public class ValidateMKLDNN extends BaseDL4JTest {
ConvolutionMode cm = ConvolutionMode.Truncate;
for (int minibatch : new int[]{1, 3}) {
+ for (boolean b : new boolean[]{true, false}) {
- inputSize[0] = minibatch;
- INDArray f = Nd4j.rand(Nd4j.defaultFloatingPointType(), inputSize);
- INDArray l = TestUtils.randomOneHot(minibatch, 10);
+ inputSize[0] = minibatch;
+ INDArray f = Nd4j.rand(Nd4j.defaultFloatingPointType(), inputSize);
+ INDArray l = TestUtils.randomOneHot(minibatch, 10);
- MultiLayerConfiguration conf = new NeuralNetConfiguration.Builder()
- .updater(new Adam(0.01))
- .convolutionMode(cm)
- .seed(12345)
- .list()
- .layer(new ConvolutionLayer.Builder().activation(Activation.TANH)
- .kernelSize(kernel)
- .stride(stride)
- .padding(0, 0)
- .nOut(3)
- .build())
- .layer(new BatchNormalization.Builder().helperAllowFallback(false)/*.eps(0)*/.build())
- .layer(new ConvolutionLayer.Builder().activation(Activation.TANH)
- .kernelSize(kernel)
- .stride(stride)
- .padding(0, 0)
- .nOut(3)
- .build())
- .layer(new OutputLayer.Builder().nOut(10).activation(Activation.SOFTMAX).lossFunction(LossFunctions.LossFunction.MCXENT).build())
- .setInputType(InputType.convolutional(inputSize[2], inputSize[3], inputSize[1]))
- .build();
+ MultiLayerConfiguration conf = new NeuralNetConfiguration.Builder()
+ .dataType(DataType.FLOAT)
+ .updater(new Adam(0.01))
+ .convolutionMode(cm)
+ .seed(12345)
+ .list()
+ .layer(new ConvolutionLayer.Builder().activation(Activation.TANH)
+ .kernelSize(kernel)
+ .stride(stride)
+ .padding(0, 0)
+ .nOut(3)
+ .build())
+ .layer(new BatchNormalization.Builder().useLogStd(b).helperAllowFallback(false)/*.eps(0)*/.build())
+ .layer(new ConvolutionLayer.Builder().activation(Activation.TANH)
+ .kernelSize(kernel)
+ .stride(stride)
+ .padding(0, 0)
+ .nOut(3)
+ .build())
+ .layer(new OutputLayer.Builder().nOut(10).activation(Activation.SOFTMAX).lossFunction(LossFunctions.LossFunction.MCXENT).build())
+ .setInputType(InputType.convolutional(inputSize[2], inputSize[3], inputSize[1]))
+ .build();
- MultiLayerNetwork netWith = new MultiLayerNetwork(conf.clone());
- netWith.init();
+ MultiLayerNetwork netWith = new MultiLayerNetwork(conf.clone());
+ netWith.init();
- MultiLayerNetwork netWithout = new MultiLayerNetwork(conf.clone());
- netWithout.init();
+ MultiLayerNetwork netWithout = new MultiLayerNetwork(conf.clone());
+ netWithout.init();
- LayerHelperValidationUtil.TestCase tc = LayerHelperValidationUtil.TestCase.builder()
- .allowHelpersForClasses(Collections.>singletonList(org.deeplearning4j.nn.layers.normalization.BatchNormalization.class))
- .testForward(true)
- .testScore(true)
- .testBackward(true)
- .testTraining(true)
- .features(f)
- .labels(l)
- .data(new SingletonDataSetIterator(new DataSet(f, l)))
- .maxRelError(1e-4)
- .build();
+ LayerHelperValidationUtil.TestCase tc = LayerHelperValidationUtil.TestCase.builder()
+ .allowHelpersForClasses(Collections.>singletonList(org.deeplearning4j.nn.layers.normalization.BatchNormalization.class))
+ .testForward(true)
+ .testScore(true)
+ .testBackward(true)
+ .testTraining(true)
+ .features(f)
+ .labels(l)
+ .data(new SingletonDataSetIterator(new DataSet(f, l)))
+ .maxRelError(1e-4)
+ .build();
- LayerHelperValidationUtil.validateMLN(netWith, tc);
+ LayerHelperValidationUtil.validateMLN(netWith, tc);
+ }
}
}
diff --git a/deeplearning4j/deeplearning4j-graph/src/main/java/org/deeplearning4j/graph/models/deepwalk/DeepWalk.java b/deeplearning4j/deeplearning4j-graph/src/main/java/org/deeplearning4j/graph/models/deepwalk/DeepWalk.java
index 0bc633895..0ba9217ec 100644
--- a/deeplearning4j/deeplearning4j-graph/src/main/java/org/deeplearning4j/graph/models/deepwalk/DeepWalk.java
+++ b/deeplearning4j/deeplearning4j-graph/src/main/java/org/deeplearning4j/graph/models/deepwalk/DeepWalk.java
@@ -38,7 +38,7 @@ import java.util.concurrent.atomic.AtomicLong;
/**Implementation of the DeepWalk graph vectorization model, based on the paper
* DeepWalk: Online Learning of Social Representations by Perozzi, Al-Rfou & Skiena (2014),
- * http://arxiv.org/abs/1403.6652
+ * https://arxiv.org/abs/1403.6652
* Similar to word2vec in nature, DeepWalk is an unsupervised learning algorithm that learns a vector representation
* of each vertex in a graph. Vector representations are learned using walks (usually random walks) on the vertices in
* the graph.
diff --git a/deeplearning4j/deeplearning4j-modelimport/src/main/java/org/deeplearning4j/nn/modelimport/keras/exceptions/InvalidKerasConfigurationException.java b/deeplearning4j/deeplearning4j-modelimport/src/main/java/org/deeplearning4j/nn/modelimport/keras/exceptions/InvalidKerasConfigurationException.java
index bea7fa2ad..db51cb499 100644
--- a/deeplearning4j/deeplearning4j-modelimport/src/main/java/org/deeplearning4j/nn/modelimport/keras/exceptions/InvalidKerasConfigurationException.java
+++ b/deeplearning4j/deeplearning4j-modelimport/src/main/java/org/deeplearning4j/nn/modelimport/keras/exceptions/InvalidKerasConfigurationException.java
@@ -40,6 +40,6 @@ public class InvalidKerasConfigurationException extends Exception {
}
private static String appendDocumentationURL(String message) {
- return message + ". For more information, see http://deeplearning4j.org/model-import-keras.";
+ return message + ". For more information, see http://deeplearning4j.org/docs/latest/keras-import-overview";
}
}
diff --git a/deeplearning4j/deeplearning4j-modelimport/src/main/java/org/deeplearning4j/nn/modelimport/keras/exceptions/UnsupportedKerasConfigurationException.java b/deeplearning4j/deeplearning4j-modelimport/src/main/java/org/deeplearning4j/nn/modelimport/keras/exceptions/UnsupportedKerasConfigurationException.java
index c540bcd64..6244cf1e8 100644
--- a/deeplearning4j/deeplearning4j-modelimport/src/main/java/org/deeplearning4j/nn/modelimport/keras/exceptions/UnsupportedKerasConfigurationException.java
+++ b/deeplearning4j/deeplearning4j-modelimport/src/main/java/org/deeplearning4j/nn/modelimport/keras/exceptions/UnsupportedKerasConfigurationException.java
@@ -22,7 +22,7 @@ package org.deeplearning4j.nn.modelimport.keras.exceptions;
* is not currently supported.
*
* See https://deeplearning4j.org/docs/latest/keras-import-overview
- * for more information and file an issue at http://github.com/deeplearning4j/deeplearning4j/issues.
+ * for more information and file an issue at https://github.com/eclipse/deeplearning4j/issues.
*
* @author dave@skymind.io
*/
@@ -41,6 +41,6 @@ public class UnsupportedKerasConfigurationException extends Exception {
}
private static String appendDocumentationURL(String message) {
- return message + ". Please file an issue at http://github.com/deeplearning4j/deeplearning4j/issues.";
+ return message + ". Please file an issue at https://github.com/eclipse/deeplearning4j/issues.";
}
}
diff --git a/deeplearning4j/deeplearning4j-modelimport/src/main/java/org/deeplearning4j/nn/modelimport/keras/layers/embeddings/KerasEmbedding.java b/deeplearning4j/deeplearning4j-modelimport/src/main/java/org/deeplearning4j/nn/modelimport/keras/layers/embeddings/KerasEmbedding.java
index 6bc1c4129..2a34f707c 100644
--- a/deeplearning4j/deeplearning4j-modelimport/src/main/java/org/deeplearning4j/nn/modelimport/keras/layers/embeddings/KerasEmbedding.java
+++ b/deeplearning4j/deeplearning4j-modelimport/src/main/java/org/deeplearning4j/nn/modelimport/keras/layers/embeddings/KerasEmbedding.java
@@ -104,7 +104,7 @@ public class KerasEmbedding extends KerasLayer {
"on Embedding layers. Zero Masking for the Embedding layer only works with unidirectional LSTM for now."
+ " If you want to have this behaviour for your imported model " +
"in DL4J, apply masking as a pre-processing step to your input." +
- "See https://deeplearning4j.org/usingrnns#masking for more on this.");
+ "See http://deeplearning4j.org/docs/latest/deeplearning4j-nn-recurrent#masking for more on this.");
Pair init = getWeightInitFromConfig(layerConfig, conf.getLAYER_FIELD_EMBEDDING_INIT(),
enforceTrainingConfig, conf, kerasMajorVersion);
diff --git a/deeplearning4j/deeplearning4j-nearestneighbors-parent/deeplearning4j-nearestneighbor-server/pom.xml b/deeplearning4j/deeplearning4j-nearestneighbors-parent/deeplearning4j-nearestneighbor-server/pom.xml
index 7477c7794..38ee4204c 100644
--- a/deeplearning4j/deeplearning4j-nearestneighbors-parent/deeplearning4j-nearestneighbor-server/pom.xml
+++ b/deeplearning4j/deeplearning4j-nearestneighbors-parent/deeplearning4j-nearestneighbor-server/pom.xml
@@ -77,71 +77,6 @@
${project.version}
-
- com.google.protobuf
- protobuf-java
- ${google.protobuf.version}
-
-
- joda-time
- joda-time
- ${jodatime.version}
-
-
- org.apache.commons
- commons-lang3
- ${commons-lang3.version}
-
-
- org.hibernate
- hibernate-validator
- ${hibernate.version}
-
-
- org.scala-lang
- scala-library
- ${scala.version}
-
-
- org.scala-lang
- scala-reflect
- ${scala.version}
-
-
- org.yaml
- snakeyaml
- ${snakeyaml.version}
-
-
- com.fasterxml.jackson.core
- jackson-core
- ${jackson.version}
-
-
- com.fasterxml.jackson.core
- jackson-databind
- ${jackson.version}
-
-
- com.fasterxml.jackson.core
- jackson-annotations
- ${jackson.version}
-
-
- com.fasterxml.jackson.datatype
- jackson-datatype-jdk8
- ${jackson.version}
-
-
- com.fasterxml.jackson.datatype
- jackson-datatype-jsr310
- ${jackson.version}
-
-
- com.typesafe
- config
- ${typesafe.config.version}
- com.typesafe.playplay-java_2.11
diff --git a/deeplearning4j/deeplearning4j-nearestneighbors-parent/deeplearning4j-nearestneighbors-client/pom.xml b/deeplearning4j/deeplearning4j-nearestneighbors-parent/deeplearning4j-nearestneighbors-client/pom.xml
index 57248c559..d6b64b025 100644
--- a/deeplearning4j/deeplearning4j-nearestneighbors-parent/deeplearning4j-nearestneighbors-client/pom.xml
+++ b/deeplearning4j/deeplearning4j-nearestneighbors-parent/deeplearning4j-nearestneighbors-client/pom.xml
@@ -31,21 +31,6 @@
-
- org.apache.httpcomponents
- httpclient
- ${httpclient.version}
-
-
- org.apache.httpcomponents
- httpcore
- ${httpcore.version}
-
-
- org.apache.httpcomponents
- httpmime
- ${httpmime.version}
- com.mashape.unirestunirest-java
diff --git a/deeplearning4j/deeplearning4j-nearestneighbors-parent/nearestneighbor-core/src/main/java/org/deeplearning4j/clustering/quadtree/QuadTree.java b/deeplearning4j/deeplearning4j-nearestneighbors-parent/nearestneighbor-core/src/main/java/org/deeplearning4j/clustering/quadtree/QuadTree.java
index f1cc2e304..0fbf8afec 100644
--- a/deeplearning4j/deeplearning4j-nearestneighbors-parent/nearestneighbor-core/src/main/java/org/deeplearning4j/clustering/quadtree/QuadTree.java
+++ b/deeplearning4j/deeplearning4j-nearestneighbors-parent/nearestneighbor-core/src/main/java/org/deeplearning4j/clustering/quadtree/QuadTree.java
@@ -29,7 +29,7 @@ import static java.lang.Math.max;
* QuadTree: http://en.wikipedia.org/wiki/Quadtree
*
* Reference impl based on the paper by:
- * http://arxiv.org/pdf/1301.3342v2.pdf
+ * https://arxiv.org/pdf/1301.3342v2.pdf
*
* Primarily focused on 2 dimensions, may expand later if there's a reason.
*
diff --git a/deeplearning4j/deeplearning4j-nearestneighbors-parent/nearestneighbor-core/src/main/java/org/deeplearning4j/clustering/util/MathUtils.java b/deeplearning4j/deeplearning4j-nearestneighbors-parent/nearestneighbor-core/src/main/java/org/deeplearning4j/clustering/util/MathUtils.java
index ce6ddcff7..792231c7e 100755
--- a/deeplearning4j/deeplearning4j-nearestneighbors-parent/nearestneighbor-core/src/main/java/org/deeplearning4j/clustering/util/MathUtils.java
+++ b/deeplearning4j/deeplearning4j-nearestneighbors-parent/nearestneighbor-core/src/main/java/org/deeplearning4j/clustering/util/MathUtils.java
@@ -86,7 +86,7 @@ public class MathUtils {
/**
- * See: http://stackoverflow.com/questions/466204/rounding-off-to-nearest-power-of-2
+ * See: https://stackoverflow.com/questions/466204/rounding-off-to-nearest-power-of-2
* @param v the number to getFromOrigin the next power of 2 for
* @return the next power of 2 for the passed in value
*/
diff --git a/deeplearning4j/deeplearning4j-nlp-parent/deeplearning4j-nlp-chinese/pom.xml b/deeplearning4j/deeplearning4j-nlp-parent/deeplearning4j-nlp-chinese/pom.xml
index b72cb721d..35eb2903d 100644
--- a/deeplearning4j/deeplearning4j-nlp-parent/deeplearning4j-nlp-chinese/pom.xml
+++ b/deeplearning4j/deeplearning4j-nlp-parent/deeplearning4j-nlp-chinese/pom.xml
@@ -52,12 +52,6 @@
deeplearning4j-nlp${project.version}
-
- org.nutz
- nutz
- 1.r.58
- provided
- org.nlpcnnlp-lang
diff --git a/deeplearning4j/deeplearning4j-nlp-parent/deeplearning4j-nlp-uima/pom.xml b/deeplearning4j/deeplearning4j-nlp-parent/deeplearning4j-nlp-uima/pom.xml
index d27625e9f..44fbbcf9d 100644
--- a/deeplearning4j/deeplearning4j-nlp-parent/deeplearning4j-nlp-uima/pom.xml
+++ b/deeplearning4j/deeplearning4j-nlp-parent/deeplearning4j-nlp-uima/pom.xml
@@ -33,26 +33,6 @@
-
- commons-logging
- commons-logging
- ${commons-logging.version}
-
-
- org.springframework
- spring-core
- ${spring.version}
-
-
- org.springframework
- spring-context
- ${spring.version}
-
-
- org.springframework
- spring-beans
- ${spring.version}
- org.cleartkcleartk-snowball
diff --git a/deeplearning4j/deeplearning4j-nlp-parent/deeplearning4j-nlp/pom.xml b/deeplearning4j/deeplearning4j-nlp-parent/deeplearning4j-nlp/pom.xml
index 62c0c73f9..3f367689c 100644
--- a/deeplearning4j/deeplearning4j-nlp-parent/deeplearning4j-nlp/pom.xml
+++ b/deeplearning4j/deeplearning4j-nlp-parent/deeplearning4j-nlp/pom.xml
@@ -54,11 +54,6 @@
test
-
- org.objenesis
- objenesis
- ${objenesis.version}
- org.mockitomockito-core
@@ -66,16 +61,6 @@
test
-
-
-
-
-
- org.nd4j
- nd4j-jackson
- ${nd4j.version}
-
-
ch.qos.logbacklogback-classic
diff --git a/deeplearning4j/deeplearning4j-nlp-parent/deeplearning4j-nlp/src/main/java/org/deeplearning4j/models/sequencevectors/graph/walkers/impl/PopularityWalker.java b/deeplearning4j/deeplearning4j-nlp-parent/deeplearning4j-nlp/src/main/java/org/deeplearning4j/models/sequencevectors/graph/walkers/impl/PopularityWalker.java
index b48ddb2ea..05d69e94c 100644
--- a/deeplearning4j/deeplearning4j-nlp-parent/deeplearning4j-nlp/src/main/java/org/deeplearning4j/models/sequencevectors/graph/walkers/impl/PopularityWalker.java
+++ b/deeplearning4j/deeplearning4j-nlp-parent/deeplearning4j-nlp/src/main/java/org/deeplearning4j/models/sequencevectors/graph/walkers/impl/PopularityWalker.java
@@ -42,7 +42,7 @@ import java.util.*;
* Instead of rand walks, this walker produces walks based on number of edges coming into each node.
* This allows you to build walks filtering too rare nodes, or too popular nodes, depending on your demands.
*
- * Original DeepWalk paper: http://arxiv.org/pdf/1403.6652v2
+ * Original DeepWalk paper: https://arxiv.org/pdf/1403.6652v2
* @author raver119@gmail.com
*/
public class PopularityWalker extends RandomWalker implements GraphWalker {
diff --git a/deeplearning4j/deeplearning4j-nlp-parent/deeplearning4j-nlp/src/main/java/org/deeplearning4j/models/sequencevectors/graph/walkers/impl/RandomWalker.java b/deeplearning4j/deeplearning4j-nlp-parent/deeplearning4j-nlp/src/main/java/org/deeplearning4j/models/sequencevectors/graph/walkers/impl/RandomWalker.java
index 922dbbe27..b422a52d1 100644
--- a/deeplearning4j/deeplearning4j-nlp-parent/deeplearning4j-nlp/src/main/java/org/deeplearning4j/models/sequencevectors/graph/walkers/impl/RandomWalker.java
+++ b/deeplearning4j/deeplearning4j-nlp-parent/deeplearning4j-nlp/src/main/java/org/deeplearning4j/models/sequencevectors/graph/walkers/impl/RandomWalker.java
@@ -37,7 +37,7 @@ import java.util.concurrent.atomic.AtomicInteger;
/**
* This is Random-based walker for SequenceVectors-based DeepWalk implementation
*
- * Original DeepWalk paper: http://arxiv.org/pdf/1403.6652v2
+ * Original DeepWalk paper: https://arxiv.org/pdf/1403.6652v2
*
* @author AlexDBlack
* @author raver119@gmail.com
diff --git a/deeplearning4j/deeplearning4j-nn/src/main/java/org/deeplearning4j/nn/conf/GradientNormalization.java b/deeplearning4j/deeplearning4j-nn/src/main/java/org/deeplearning4j/nn/conf/GradientNormalization.java
index 01bd9cf3d..05b1c6638 100644
--- a/deeplearning4j/deeplearning4j-nn/src/main/java/org/deeplearning4j/nn/conf/GradientNormalization.java
+++ b/deeplearning4j/deeplearning4j-nn/src/main/java/org/deeplearning4j/nn/conf/GradientNormalization.java
@@ -52,7 +52,7 @@ package org.deeplearning4j.nn.conf;
*
* Thus, the l2 norm of the scaled gradients will not exceed the specified threshold, though may be smaller than it
* See: Pascanu, Mikolov, Bengio (2012), On the difficulty of training Recurrent Neural Networks,
- * http://arxiv.org/abs/1211.5063
+ * https://arxiv.org/abs/1211.5063
* Threshold for clipping can be set in Layer configuration, using gradientNormalizationThreshold(double threshold)
*
*
diff --git a/deeplearning4j/deeplearning4j-nn/src/main/java/org/deeplearning4j/nn/conf/distribution/OrthogonalDistribution.java b/deeplearning4j/deeplearning4j-nn/src/main/java/org/deeplearning4j/nn/conf/distribution/OrthogonalDistribution.java
index 8959c2349..dbe7143d4 100644
--- a/deeplearning4j/deeplearning4j-nn/src/main/java/org/deeplearning4j/nn/conf/distribution/OrthogonalDistribution.java
+++ b/deeplearning4j/deeplearning4j-nn/src/main/java/org/deeplearning4j/nn/conf/distribution/OrthogonalDistribution.java
@@ -23,7 +23,7 @@ import org.nd4j.shade.jackson.annotation.JsonProperty;
/**
* Orthogonal distribution, with gain parameter.
- * See http://arxiv.org/abs/1312.6120 for details
+ * See https://arxiv.org/abs/1312.6120 for details
*
*/
@EqualsAndHashCode(callSuper = false)
diff --git a/deeplearning4j/deeplearning4j-nn/src/main/java/org/deeplearning4j/nn/conf/layers/BatchNormalization.java b/deeplearning4j/deeplearning4j-nn/src/main/java/org/deeplearning4j/nn/conf/layers/BatchNormalization.java
index 4c470fec5..f95421585 100644
--- a/deeplearning4j/deeplearning4j-nn/src/main/java/org/deeplearning4j/nn/conf/layers/BatchNormalization.java
+++ b/deeplearning4j/deeplearning4j-nn/src/main/java/org/deeplearning4j/nn/conf/layers/BatchNormalization.java
@@ -236,7 +236,7 @@ public class BatchNormalization extends FeedForwardLayer {
/**
* Epsilon value for batch normalization; small floating point value added to variance (algorithm 1 in http://arxiv.org/pdf/1502.03167v3.pdf) to reduce/avoid
+ * href="https://arxiv.org/pdf/1502.03167v3.pdf">https://arxiv.org/pdf/1502.03167v3.pdf) to reduce/avoid
* underflow issues. Default: 1e-5
*/
protected double eps = 1e-5;
@@ -365,7 +365,7 @@ public class BatchNormalization extends FeedForwardLayer {
/**
* Epsilon value for batch normalization; small floating point value added to variance (algorithm 1 in http://arxiv.org/pdf/1502.03167v3.pdf) to reduce/avoid
+ * href="https://arxiv.org/pdf/1502.03167v3.pdf">https://arxiv.org/pdf/1502.03167v3.pdf) to reduce/avoid
* underflow issues. Default: 1e-5
*
* @param eps Epsilon values to use
diff --git a/deeplearning4j/deeplearning4j-nn/src/main/java/org/deeplearning4j/nn/conf/serde/BaseNetConfigDeserializer.java b/deeplearning4j/deeplearning4j-nn/src/main/java/org/deeplearning4j/nn/conf/serde/BaseNetConfigDeserializer.java
index d32488363..a90218946 100644
--- a/deeplearning4j/deeplearning4j-nn/src/main/java/org/deeplearning4j/nn/conf/serde/BaseNetConfigDeserializer.java
+++ b/deeplearning4j/deeplearning4j-nn/src/main/java/org/deeplearning4j/nn/conf/serde/BaseNetConfigDeserializer.java
@@ -53,8 +53,8 @@ import java.util.Map;
* We deserialize the config using the default deserializer, then handle the new IUpdater (which will be null for
* 0.8.0 and earlier configs) if necessary
*
- * Overall design:
- * http://stackoverflow.com/questions/18313323/how-do-i-call-the-default-deserializer-from-a-custom-deserializer-in-jackson
+ * Overall design:
+ * https://stackoverflow.com/questions/18313323/how-do-i-call-the-default-deserializer-from-a-custom-deserializer-in-jackson
*
* @author Alex Black
*/
diff --git a/deeplearning4j/deeplearning4j-nn/src/main/java/org/deeplearning4j/nn/layers/mkldnn/MKLDNNBatchNormHelper.java b/deeplearning4j/deeplearning4j-nn/src/main/java/org/deeplearning4j/nn/layers/mkldnn/MKLDNNBatchNormHelper.java
index 0d9ae18e7..2e8c04aa3 100644
--- a/deeplearning4j/deeplearning4j-nn/src/main/java/org/deeplearning4j/nn/layers/mkldnn/MKLDNNBatchNormHelper.java
+++ b/deeplearning4j/deeplearning4j-nn/src/main/java/org/deeplearning4j/nn/layers/mkldnn/MKLDNNBatchNormHelper.java
@@ -67,17 +67,17 @@ public class MKLDNNBatchNormHelper implements BatchNormalizationHelper {
INDArray beta, INDArray dGammaView, INDArray dBetaView, double eps, LayerWorkspaceMgr workspaceMgr) {
if(input.dataType() != DataType.FLOAT)
return null; //MKL-DNN only supports float
- /*
+
//TODO FIXME - AB 2019/11/01 - https://github.com/eclipse/deeplearning4j/issues/8335
List args = new ArrayList<>();
args.add(input);
args.add(meanCache);
args.add(varCache);
- args.add(epsilon);
if(gamma != null)
args.add(gamma.reshape(gamma.length()));
if(beta != null)
args.add(beta.reshape(beta.length()));
+ args.add(epsilon);
DynamicCustomOp op = DynamicCustomOp.builder("batchnorm_bp")
@@ -110,8 +110,6 @@ public class MKLDNNBatchNormHelper implements BatchNormalizationHelper {
g.setGradientFor(BatchNormalizationParamInitializer.BETA, dBetaView);
return new Pair<>(g, epsAtInput);
- */
- return null;
}
@Override
diff --git a/deeplearning4j/deeplearning4j-nn/src/main/java/org/deeplearning4j/nn/layers/normalization/BatchNormalization.java b/deeplearning4j/deeplearning4j-nn/src/main/java/org/deeplearning4j/nn/layers/normalization/BatchNormalization.java
index 8c8f329ea..cd070185c 100644
--- a/deeplearning4j/deeplearning4j-nn/src/main/java/org/deeplearning4j/nn/layers/normalization/BatchNormalization.java
+++ b/deeplearning4j/deeplearning4j-nn/src/main/java/org/deeplearning4j/nn/layers/normalization/BatchNormalization.java
@@ -49,8 +49,8 @@ import java.util.*;
/**
* Batch normalization layer.
* Rerences:
- * http://arxiv.org/pdf/1502.03167v3.pdf
- * http://arxiv.org/pdf/1410.7455v8.pdf
+ * https://arxiv.org/pdf/1502.03167v3.pdf
+ * https://arxiv.org/pdf/1410.7455v8.pdf
*
* https://kratzert.github.io/2016/02/12/understanding-the-gradient-flow-through-the-batch-normalization-layer.html
*
@@ -327,7 +327,7 @@ public class BatchNormalization extends BaseLayerhttp://www.cs.toronto.edu/~graves/phd.pdf
* See also for full/vectorized equations (and a comparison to other LSTM variants):
* Greff et al. 2015, "LSTM: A Search Space Odyssey", pg11. This is the "vanilla" variant in said paper
- * http://arxiv.org/pdf/1503.04069.pdf
+ * https://arxiv.org/pdf/1503.04069.pdf
*
* A high level description of bidirectional LSTM can be found from
* "Hybrid Speech Recognition with Deep Bidirectional LSTM"
diff --git a/deeplearning4j/deeplearning4j-nn/src/main/java/org/deeplearning4j/nn/layers/recurrent/GravesLSTM.java b/deeplearning4j/deeplearning4j-nn/src/main/java/org/deeplearning4j/nn/layers/recurrent/GravesLSTM.java
index 13f30b8bb..b112672f9 100644
--- a/deeplearning4j/deeplearning4j-nn/src/main/java/org/deeplearning4j/nn/layers/recurrent/GravesLSTM.java
+++ b/deeplearning4j/deeplearning4j-nn/src/main/java/org/deeplearning4j/nn/layers/recurrent/GravesLSTM.java
@@ -34,7 +34,7 @@ import org.deeplearning4j.nn.workspace.LayerWorkspaceMgr;
* http://www.cs.toronto.edu/~graves/phd.pdf
* See also for full/vectorized equations (and a comparison to other LSTM variants):
* Greff et al. 2015, "LSTM: A Search Space Odyssey", pg11. This is the "vanilla" variant in said paper
- * http://arxiv.org/pdf/1503.04069.pdf
+ * https://arxiv.org/pdf/1503.04069.pdf
*
* @author Alex Black
* @see LSTM LSTM class, for the version without peephole connections
diff --git a/deeplearning4j/deeplearning4j-nn/src/main/java/org/deeplearning4j/nn/layers/recurrent/LSTM.java b/deeplearning4j/deeplearning4j-nn/src/main/java/org/deeplearning4j/nn/layers/recurrent/LSTM.java
index 692713f6e..a55a19e46 100644
--- a/deeplearning4j/deeplearning4j-nn/src/main/java/org/deeplearning4j/nn/layers/recurrent/LSTM.java
+++ b/deeplearning4j/deeplearning4j-nn/src/main/java/org/deeplearning4j/nn/layers/recurrent/LSTM.java
@@ -38,7 +38,7 @@ import org.nd4j.util.OneTimeLogger;
*
* See also for full/vectorized equations (and a comparison to other LSTM variants):
* Greff et al. 2015, "LSTM: A Search Space Odyssey", pg11. This is the "no peephole" variant in said paper
- * http://arxiv.org/pdf/1503.04069.pdf
+ * https://arxiv.org/pdf/1503.04069.pdf
*
* @author Alex Black
* @see GravesLSTM GravesLSTM class, for the version with peephole connections
diff --git a/deeplearning4j/deeplearning4j-nn/src/main/java/org/deeplearning4j/nn/layers/recurrent/LSTMHelpers.java b/deeplearning4j/deeplearning4j-nn/src/main/java/org/deeplearning4j/nn/layers/recurrent/LSTMHelpers.java
index 86079aead..c733ef6c2 100644
--- a/deeplearning4j/deeplearning4j-nn/src/main/java/org/deeplearning4j/nn/layers/recurrent/LSTMHelpers.java
+++ b/deeplearning4j/deeplearning4j-nn/src/main/java/org/deeplearning4j/nn/layers/recurrent/LSTMHelpers.java
@@ -68,7 +68,7 @@ import static org.nd4j.linalg.indexing.NDArrayIndex.*;
*
* When 'hasPeepholeConnections' is true, this is the "vanilla" variant in said paper
* When 'hasPeepholeConnections' is false, this is the "no peephole" variant
- * http://arxiv.org/pdf/1503.04069.pdf
+ * https://arxiv.org/pdf/1503.04069.pdf
*
*
* @author Alex Black (LSTM implementations)
diff --git a/deeplearning4j/deeplearning4j-scaleout/deeplearning4j-aws/pom.xml b/deeplearning4j/deeplearning4j-scaleout/deeplearning4j-aws/pom.xml
index 7c9967ef8..94f66b405 100644
--- a/deeplearning4j/deeplearning4j-scaleout/deeplearning4j-aws/pom.xml
+++ b/deeplearning4j/deeplearning4j-scaleout/deeplearning4j-aws/pom.xml
@@ -44,184 +44,48 @@
-
- org.scala-lang
- scala-library
- ${scala.version}
-
-
- org.scala-lang
- scala-reflect
- ${scala.version}
-
-
-
- commons-logging
- commons-logging
- ${commons-logging.version}
-
-
- joda-time
- joda-time
- ${jodatime.version}
-
-
- org.apache.httpcomponents
- httpclient
- ${httpclient.version}
-
-
- org.apache.httpcomponents
- httpcore
- ${httpcore.version}
- com.amazonawsaws-java-sdk1.11.24
-
- org.deeplearning4j
- deeplearning4j-core
- ${project.parent.version}
- args4jargs4j2.32
+
+ org.slf4j
+ slf4j-api
+
+
+ org.nd4j
+ nd4j-api
+ ${nd4j.version}
+
+
+ org.deeplearning4j
+ deeplearning4j-util
+ ${project.version}
+
+
com.jcraftjsch${jsch.version}
+
- com.google.inject
- guice
- ${guice.version}
-
-
- com.google.protobuf
- protobuf-java
- ${google.protobuf.version}
-
-
- commons-codec
- commons-codec
- ${commons-codec.version}
-
-
- commons-collections
- commons-collections
- ${commons-collections.version}
-
-
- commons-io
- commons-io
- ${commons-io.version}
-
-
- commons-lang
- commons-lang
- ${commons-lang.version}
-
-
- commons-net
- commons-net
- ${commons-net.version}
-
-
- com.sun.xml.bind
- jaxb-core
- ${jaxb.version}
-
-
- com.sun.xml.bind
- jaxb-impl
- ${jaxb.version}
-
-
- io.netty
- netty
- ${netty.version}
-
-
- com.fasterxml.jackson.core
- jackson-core
- ${jackson.version}
-
-
- com.fasterxml.jackson.core
- jackson-databind
- ${jackson.version}
-
-
- com.fasterxml.jackson.core
- jackson-annotations
- ${jackson.version}
-
-
- javax.servlet
- javax.servlet-api
- ${servlet.version}
-
-
- org.apache.commons
- commons-compress
- ${commons-compress.version}
+ org.threadly
+ threadly
+ ${threadly.version}
+
org.apache.commonscommons-lang3${commons-lang3.version}
-
- org.apache.commons
- commons-math3
- ${commons-math3.version}
-
-
- org.apache.curator
- curator-recipes
- ${curator.version}
-
-
- com.typesafe
- config
- ${typesafe.config.version}
-
-
- org.apache.spark
- spark-core_2.11
- ${spark.version}
-
-
- com.google.code.findbugs
- jsr305
-
-
- org.slf4j
- jul-to-slf4j
-
-
- org.slf4j
- jcl-over-slf4j
-
-
- org.slf4j
- slf4j-log4j12
-
-
- log4j
- log4j
-
-
-
-
- org.threadly
- threadly
- ${threadly.version}
-
diff --git a/deeplearning4j/deeplearning4j-scaleout/deeplearning4j-aws/src/main/java/org/deeplearning4j/aws/emr/SparkEMRClient.java b/deeplearning4j/deeplearning4j-scaleout/deeplearning4j-aws/src/main/java/org/deeplearning4j/aws/emr/SparkEMRClient.java
index b1476fa3b..d179cca09 100644
--- a/deeplearning4j/deeplearning4j-scaleout/deeplearning4j-aws/src/main/java/org/deeplearning4j/aws/emr/SparkEMRClient.java
+++ b/deeplearning4j/deeplearning4j-scaleout/deeplearning4j-aws/src/main/java/org/deeplearning4j/aws/emr/SparkEMRClient.java
@@ -27,8 +27,8 @@ import lombok.AllArgsConstructor;
import lombok.Data;
import lombok.NoArgsConstructor;
import lombok.extern.slf4j.Slf4j;
-import org.apache.commons.lang.RandomStringUtils;
-import org.apache.spark.api.java.function.Function;
+import org.apache.commons.lang3.RandomStringUtils;
+import org.nd4j.linalg.function.Function;
import java.io.File;
import java.util.*;
@@ -157,7 +157,7 @@ public class SparkEMRClient {
private void submitJob(AmazonElasticMapReduce emr, String mainClass, List args, Map sparkConfs, File uberJar) throws Exception {
AmazonS3URI s3Jar = new AmazonS3URI(sparkS3JarFolder + "/" + uberJar.getName());
log.info(String.format("Placing uberJar %s to %s", uberJar.getPath(), s3Jar.toString()));
- PutObjectRequest putRequest = sparkS3PutObjectDecorator.call(
+ PutObjectRequest putRequest = sparkS3PutObjectDecorator.apply(
new PutObjectRequest(s3Jar.getBucket(), s3Jar.getKey(), uberJar)
);
sparkS3ClientBuilder.build().putObject(putRequest);
@@ -289,7 +289,7 @@ public class SparkEMRClient {
// This should allow the user to decorate the put call to add metadata to the jar put command, such as security groups,
protected Function sparkS3PutObjectDecorator = new Function() {
@Override
- public PutObjectRequest call(PutObjectRequest putObjectRequest) throws Exception {
+ public PutObjectRequest apply(PutObjectRequest putObjectRequest) {
return putObjectRequest;
}
};
diff --git a/deeplearning4j/deeplearning4j-scaleout/spark/pom.xml b/deeplearning4j/deeplearning4j-scaleout/spark/pom.xml
index 579e042ab..a24676022 100644
--- a/deeplearning4j/deeplearning4j-scaleout/spark/pom.xml
+++ b/deeplearning4j/deeplearning4j-scaleout/spark/pom.xml
@@ -116,7 +116,6 @@
-
org.nd4j
@@ -139,82 +138,6 @@
scala-reflect${scala.version}
-
-
- com.google.inject
- guice
- ${guice.version}
-
-
- com.google.protobuf
- protobuf-java
- ${google.protobuf.version}
-
-
- commons-codec
- commons-codec
- ${commons-codec.version}
-
-
- commons-collections
- commons-collections
- ${commons-collections.version}
-
-
- commons-io
- commons-io
- ${commons-io.version}
-
-
- commons-lang
- commons-lang
- ${commons-lang.version}
-
-
- commons-net
- commons-net
- ${commons-net.version}
-
-
- com.sun.xml.bind
- jaxb-core
- ${jaxb.version}
-
-
- com.sun.xml.bind
- jaxb-impl
- ${jaxb.version}
-
-
- io.netty
- netty
- ${netty.version}
-
-
- javax.servlet
- javax.servlet-api
- ${servlet.version}
-
-
- org.apache.commons
- commons-compress
- ${commons-compress.version}
-
-
- org.apache.commons
- commons-lang3
- ${commons-lang3.version}
-
-
- org.apache.commons
- commons-math3
- ${commons-math3.version}
-
-
- org.apache.curator
- curator-recipes
- ${curator.version}
- com.typesafeconfig
@@ -250,9 +173,7 @@
log4j
-
-
diff --git a/deeplearning4j/deeplearning4j-ui-parent/deeplearning4j-play/pom.xml b/deeplearning4j/deeplearning4j-ui-parent/deeplearning4j-play/pom.xml
index 1b4f33c1e..fa18ad91d 100644
--- a/deeplearning4j/deeplearning4j-ui-parent/deeplearning4j-play/pom.xml
+++ b/deeplearning4j/deeplearning4j-ui-parent/deeplearning4j-play/pom.xml
@@ -129,32 +129,11 @@
deeplearning4j-ui-model${project.version}
-
-
- com.google.protobuf
- protobuf-java
- ${google.protobuf.version}
- javax.ws.rsjavax.ws.rs-api${ws.rs.version}
-
- joda-time
- joda-time
- ${jodatime.version}
-
-
- org.apache.commons
- commons-lang3
- ${commons-lang3.version}
-
-
- org.hibernate
- hibernate-validator
- ${hibernate.version}
- org.scala-langscala-library
@@ -165,11 +144,6 @@
scala-reflect${scala.version}
-
- org.yaml
- snakeyaml
- ${snakeyaml.version}
- com.typesafe.playplay-java_2.11
diff --git a/docs/deeplearning4j-nlp/templates/word2vec.md b/docs/deeplearning4j-nlp/templates/word2vec.md
index e941060f2..df188dc2f 100644
--- a/docs/deeplearning4j-nlp/templates/word2vec.md
+++ b/docs/deeplearning4j-nlp/templates/word2vec.md
@@ -447,7 +447,7 @@ Marketers might seek to establish relationships among products to build a recomm
### Google's Word2vec Patent
-Word2vec is [a method of computing vector representations of words](http://arxiv.org/pdf/1301.3781.pdf) introduced by a team of researchers at Google led by Tomas Mikolov. Google [hosts an open-source version of Word2vec](https://code.google.com/p/word2vec/) released under an Apache 2.0 license. In 2014, Mikolov left Google for Facebook, and in May 2015, [Google was granted a patent for the method](http://patft.uspto.gov/netacgi/nph-Parser?Sect1=PTO2&Sect2=HITOFF&p=1&u=%2Fnetahtml%2FPTO%2Fsearch-bool.html&r=1&f=G&l=50&co1=AND&d=PTXT&s1=9037464&OS=9037464&RS=9037464), which does not abrogate the Apache license under which it has been released.
+Word2vec is [a method of computing vector representations of words](https://arxiv.org/pdf/1301.3781.pdf) introduced by a team of researchers at Google led by Tomas Mikolov. Google [hosts an open-source version of Word2vec](https://code.google.com/p/word2vec/) released under an Apache 2.0 license. In 2014, Mikolov left Google for Facebook, and in May 2015, [Google was granted a patent for the method](http://patft.uspto.gov/netacgi/nph-Parser?Sect1=PTO2&Sect2=HITOFF&p=1&u=%2Fnetahtml%2FPTO%2Fsearch-bool.html&r=1&f=G&l=50&co1=AND&d=PTXT&s1=9037464&OS=9037464&RS=9037464), which does not abrogate the Apache license under which it has been released.
### Foreign Languages
@@ -485,7 +485,7 @@ Deeplearning4j has a class called [SequenceVectors](https://github.com/eclipse/d
* [Quora: What Are Some Interesting Word2Vec Results?](http://www.quora.com/Word2vec/What-are-some-interesting-Word2Vec-results/answer/Omer-Levy)
* [Word2Vec: an introduction](http://www.folgertkarsdorp.nl/word2vec-an-introduction/); Folgert Karsdorp
* [Mikolov's Original Word2vec Code @Google](https://code.google.com/p/word2vec/)
-* [word2vec Explained: Deriving Mikolov et al.’s Negative-Sampling Word-Embedding Method](http://arxiv.org/pdf/1402.3722v1.pdf); Yoav Goldberg and Omer Levy
+* [word2vec Explained: Deriving Mikolov et al.’s Negative-Sampling Word-Embedding Method](https://arxiv.org/pdf/1402.3722v1.pdf); Yoav Goldberg and Omer Levy
* [Advances in Pre-Training Distributed Word Representations - by Mikolov et al](https://arxiv.org/abs/1712.09405)
diff --git a/docs/deeplearning4j-nn/templates/computationgraph.md b/docs/deeplearning4j-nn/templates/computationgraph.md
index a5ced0ceb..f4ff7f03d 100644
--- a/docs/deeplearning4j-nn/templates/computationgraph.md
+++ b/docs/deeplearning4j-nn/templates/computationgraph.md
@@ -51,10 +51,10 @@ Examples of some architectures that can be built using ComputationGraph include:
- Multi-task learning architectures
- Recurrent neural networks with skip connections
-- [GoogLeNet](http://arxiv.org/abs/1409.4842), a complex type of convolutional netural network for image classification
-- [Image caption generation](http://arxiv.org/abs/1411.4555)
+- [GoogLeNet](https://arxiv.org/abs/1409.4842), a complex type of convolutional netural network for image classification
+- [Image caption generation](https://arxiv.org/abs/1411.4555)
- [Convolutional networks for sentence classification](https://github.com/eclipse/deeplearning4j-examples/blob/master/dl4j-examples/src/main/java/org/deeplearning4j/examples/convolution/sentenceclassification/CnnSentenceClassificationExample.java)
-- [Residual learning convolutional neural networks](http://arxiv.org/abs/1512.03385)
+- [Residual learning convolutional neural networks](https://arxiv.org/abs/1512.03385)
## Configuring a Computation Graph
diff --git a/docs/deeplearning4j-nn/templates/model-persistence.md b/docs/deeplearning4j-nn/templates/model-persistence.md
index ef4d593e9..82f87f1ff 100644
--- a/docs/deeplearning4j-nn/templates/model-persistence.md
+++ b/docs/deeplearning4j-nn/templates/model-persistence.md
@@ -8,7 +8,7 @@ weight: 10
## Saving and Loading a Neural Network
-The `ModelSerializer` is a class which handles loading and saving models. There are two methods for saving models shown in the examples through the link. The first example saves a normal multilayer network, the second one saves a [computation graph](https://deeplearning4j.org/compgraph).
+The `ModelSerializer` is a class which handles loading and saving models. There are two methods for saving models shown in the examples through the link. The first example saves a normal multilayer network, the second one saves a [computation graph](https://deeplearning4j.org/docs/latest/deeplearning4j-nn-computationgraph).
Here is a [basic example](https://github.com/eclipse/deeplearning4j-examples/tree/master/dl4j-examples/src/main/java/org/deeplearning4j/examples/misc/modelsaving) with code to save a computation graph using the `ModelSerializer` class, as well as an example of using ModelSerializer to save a neural net built using MultiLayer configuration.
diff --git a/docs/deeplearning4j-nn/templates/recurrent.md b/docs/deeplearning4j-nn/templates/recurrent.md
index 0b33981e7..fe07ebddb 100644
--- a/docs/deeplearning4j-nn/templates/recurrent.md
+++ b/docs/deeplearning4j-nn/templates/recurrent.md
@@ -29,7 +29,7 @@ DL4J currently supports the following types of recurrent neural network
* BaseRecurrent
Java documentation for each is available, [GravesLSTM](https://deeplearning4j.org/api/{{page.version}}/org/deeplearning4j/nn/conf/layers/GravesLSTM.html),
- [BidirectionalGravesLSTM](https://deeplearning4j.org/api/{{page.version}}/org/deeplearning4j/nn/conf/layers/GravesBidirectionalLSTM.html), [BaseRecurrent](https://deeplearning4j.org/doc/org/deeplearning4j/nn/conf/layers/BaseRecurrentLayer.html)
+ [BidirectionalGravesLSTM](https://deeplearning4j.org/api/{{page.version}}/org/deeplearning4j/nn/conf/layers/GravesBidirectionalLSTM.html), [BaseRecurrent](https://deeplearning4j.org/api/latest/org/deeplearning4j/nn/conf/layers/BaseRecurrentLayer.html)
#### Data for RNNs
Consider for the moment a standard feed-forward network (a multi-layer perceptron or 'DenseLayer' in DL4J). These networks expect input and output data that is two-dimensional: that is, data with "shape" [numExamples,inputSize]. This means that the data into a feed-forward network has ‘numExamples’ rows/examples, where each row consists of ‘inputSize’ columns. A single example would have shape [1,inputSize], though in practice we generally use multiple examples for computational and optimization efficiency. Similarly, output data for a standard feed-forward network is also two dimensional, with shape [numExamples,outputSize].
diff --git a/docs/deeplearning4j-nn/templates/tsne-visualization.md b/docs/deeplearning4j-nn/templates/tsne-visualization.md
index 83ab3a3ce..9a55b1a74 100644
--- a/docs/deeplearning4j-nn/templates/tsne-visualization.md
+++ b/docs/deeplearning4j-nn/templates/tsne-visualization.md
@@ -8,7 +8,7 @@ weight: 10
## t-SNE's Data Visualization
-[t-Distributed Stochastic Neighbor Embedding](http://homepage.tudelft.nl/19j49/t-SNE.html) (t-SNE) is a data-visualization tool created by Laurens van der Maaten at Delft University of Technology.
+[t-Distributed Stochastic Neighbor Embedding](https://en.wikipedia.org/wiki/T-distributed_stochastic_neighbor_embedding) (t-SNE) is a data-visualization tool created by Laurens van der Maaten at Delft University of Technology.
While it can be used for any data, t-SNE (pronounced Tee-Snee) is only really meaningful with labeled data, which clarify how the input is clustering. Below, you can see the kind of graphic you can generate in DL4J with t-SNE working on MNIST data.
diff --git a/docs/deeplearning4j-scaleout/templates/howto.md b/docs/deeplearning4j-scaleout/templates/howto.md
index 500b1a241..af55969c6 100644
--- a/docs/deeplearning4j-scaleout/templates/howto.md
+++ b/docs/deeplearning4j-scaleout/templates/howto.md
@@ -627,7 +627,7 @@ To use the system clock time source, add the following to Spark submit:
## Failed training on Ubuntu 16.04 (Ubuntu bug that may affect DL4J users)
-When running a Spark on YARN cluster on Ubuntu 16.04 machines, chances are that after finishing a job, all processes owned by the user running Hadoop/YARN are killed. This is related to a bug in Ubuntu, which is documented at https://bugs.launchpad.net/ubuntu/+source/procps/+bug/1610499. There's also a Stackoverflow discussion about it at http://stackoverflow.com/questions/38419078/logouts-while-running-hadoop-under-ubuntu-16-04.
+When running a Spark on YARN cluster on Ubuntu 16.04 machines, chances are that after finishing a job, all processes owned by the user running Hadoop/YARN are killed. This is related to a bug in Ubuntu, which is documented at https://bugs.launchpad.net/ubuntu/+source/procps/+bug/1610499. There's also a Stackoverflow discussion about it at https://stackoverflow.com/questions/38419078/logouts-while-running-hadoop-under-ubuntu-16-04.
Some workarounds are suggested.
@@ -695,7 +695,7 @@ To use the system clock time source, add the following to Spark submit:
## Failed training on Ubuntu 16.04 (Ubuntu bug that may affect DL4J users)
-When running a Spark on YARN cluster on Ubuntu 16.04 machines, chances are that after finishing a job, all processes owned by the user running Hadoop/YARN are killed. This is related to a bug in Ubuntu, which is documented at https://bugs.launchpad.net/ubuntu/+source/procps/+bug/1610499. There's also a Stackoverflow discussion about it at http://stackoverflow.com/questions/38419078/logouts-while-running-hadoop-under-ubuntu-16-04.
+When running a Spark on YARN cluster on Ubuntu 16.04 machines, chances are that after finishing a job, all processes owned by the user running Hadoop/YARN are killed. This is related to a bug in Ubuntu, which is documented at https://bugs.launchpad.net/ubuntu/+source/procps/+bug/1610499. There's also a Stackoverflow discussion about it at https://stackoverflow.com/questions/38419078/logouts-while-running-hadoop-under-ubuntu-16-04.
Some workarounds are suggested.
diff --git a/docs/deeplearning4j/templates/beginners.md b/docs/deeplearning4j/templates/beginners.md
index f7740516d..3ca4d82f1 100644
--- a/docs/deeplearning4j/templates/beginners.md
+++ b/docs/deeplearning4j/templates/beginners.md
@@ -99,4 +99,4 @@ You can also download a [free version of the Skymind Intelligence Layer](https:/
Most of what we know about deep learning is contained in academic papers. You can find some of the major research groups [here](https://skymind.ai/wiki/machine-learning-research-groups-labs).
-While individual courses have limits on what they can teach, the Internet does not. Most math and programming questions can be answered by Googling and searching sites like [Stackoverflow](http://stackoverflow.com) and [Math Stackexchange](https://math.stackexchange.com/).
+While individual courses have limits on what they can teach, the Internet does not. Most math and programming questions can be answered by Googling and searching sites like [Stackoverflow](https://stackoverflow.com) and [Math Stackexchange](https://math.stackexchange.com/).
diff --git a/docs/deeplearning4j/templates/cheat-sheet.md b/docs/deeplearning4j/templates/cheat-sheet.md
index 3437ffa0f..f4b4157af 100644
--- a/docs/deeplearning4j/templates/cheat-sheet.md
+++ b/docs/deeplearning4j/templates/cheat-sheet.md
@@ -220,7 +220,7 @@ List of supported activation functions:
* **LEAKYRELU** - ([Source](https://github.com/eclipse/deeplearning4j/blob/master/nd4j/nd4j-backends/nd4j-api-parent/nd4j-api/src/main/java/org/nd4j/linalg/activations/impl/ActivationLReLU.java)) - leaky rectified linear unit. ```f(x) = max(0, x) + alpha * min(0, x)``` with ```alpha=0.01``` by default.
* **RATIONALTANH** - ([Source](https://github.com/eclipse/deeplearning4j/blob/master/nd4j/nd4j-backends/nd4j-api-parent/nd4j-api/src/main/java/org/nd4j/linalg/activations/impl/ActivationRationalTanh.java)) - ```tanh(y) ~ sgn(y) * { 1 - 1/(1+|y|+y^2+1.41645*y^4)}``` which approximates ```f(x) = 1.7159 * tanh(2x/3)```, but should be faster to execute. ([Reference](https://arxiv.org/abs/1508.01292))
* **RELU** - ([Source](https://github.com/eclipse/deeplearning4j/blob/master/nd4j/nd4j-backends/nd4j-api-parent/nd4j-api/src/main/java/org/nd4j/linalg/activations/impl/ActivationReLU.java)) - standard rectified linear unit: ```f(x) = x``` if ```x>0``` or ```f(x) = 0``` otherwise
-* **RRELU** - ([Source](https://github.com/eclipse/deeplearning4j/blob/master/nd4j/nd4j-backends/nd4j-api-parent/nd4j-api/src/main/java/org/nd4j/linalg/activations/impl/ActivationRReLU.java)) - randomized rectified linear unit. Deterministic during test time. ([Reference](http://arxiv.org/abs/1505.00853))
+* **RRELU** - ([Source](https://github.com/eclipse/deeplearning4j/blob/master/nd4j/nd4j-backends/nd4j-api-parent/nd4j-api/src/main/java/org/nd4j/linalg/activations/impl/ActivationRReLU.java)) - randomized rectified linear unit. Deterministic during test time. ([Reference](https://arxiv.org/abs/1505.00853))
* **SIGMOID** - ([Source](https://github.com/eclipse/deeplearning4j/blob/master/nd4j/nd4j-backends/nd4j-api-parent/nd4j-api/src/main/java/org/nd4j/linalg/activations/impl/ActivationSigmoid.java)) - standard sigmoid activation function, ```f(x) = 1 / (1 + exp(-x))```
* **SOFTMAX** - ([Source](https://github.com/eclipse/deeplearning4j/blob/master/nd4j/nd4j-backends/nd4j-api-parent/nd4j-api/src/main/java/org/nd4j/linalg/activations/impl/ActivationSoftmax.java)) - standard softmax activation function
* **SOFTPLUS** - ([Source](https://github.com/eclipse/deeplearning4j/blob/master/nd4j/nd4j-backends/nd4j-api-parent/nd4j-api/src/main/java/org/nd4j/linalg/activations/impl/ActivationSoftPlus.java)) - ```f(x) = log(1+e^x)``` - shape is similar to a smooth version of the RELU activation function
@@ -269,7 +269,7 @@ The [CS231n course notes](http://cs231n.github.io/neural-networks-3/#ada) have a
Supported updaters in Deeplearning4j:
* **AdaDelta** - ([Source](https://github.com/eclipse/deeplearning4j/tree/master/nd4j/nd4j-backends/nd4j-api-parent/nd4j-api/src/main/java/org/nd4j/linalg/learning/config/AdaDelta.java)) - [Reference](https://arxiv.org/abs/1212.5701)
* **AdaGrad** - ([Source](https://github.com/eclipse/deeplearning4j/tree/master/nd4j/nd4j-backends/nd4j-api-parent/nd4j-api/src/main/java/org/nd4j/linalg/learning/config/AdaGrad.java)) - [Reference](http://jmlr.org/papers/v12/duchi11a.html)
-* **AdaMax** - ([Source](https://github.com/eclipse/deeplearning4j/tree/master/nd4j/nd4j-backends/nd4j-api-parent/nd4j-api/src/main/java/org/nd4j/linalg/learning/config/AdaMax.java)) - A variant of the Adam updater - [Reference](http://arxiv.org/abs/1412.6980)
+* **AdaMax** - ([Source](https://github.com/eclipse/deeplearning4j/tree/master/nd4j/nd4j-backends/nd4j-api-parent/nd4j-api/src/main/java/org/nd4j/linalg/learning/config/AdaMax.java)) - A variant of the Adam updater - [Reference](https://arxiv.org/abs/1412.6980)
* **Adam** - ([Source](https://github.com/eclipse/deeplearning4j/tree/master/nd4j/nd4j-backends/nd4j-api-parent/nd4j-api/src/main/java/org/nd4j/linalg/learning/config/Adam.java))
* **Nadam** - ([Source](https://github.com/eclipse/deeplearning4j/tree/master/nd4j/nd4j-backends/nd4j-api-parent/nd4j-api/src/main/java/org/nd4j/linalg/learning/config/Nadam.java)) - A variant of the Adam updater, using the Nesterov mementum update rule - [Reference](https://arxiv.org/abs/1609.04747)
* **Nesterovs** - ([Source](https://github.com/eclipse/deeplearning4j/tree/master/nd4j/nd4j-backends/nd4j-api-parent/nd4j-api/src/main/java/org/nd4j/linalg/learning/config/Nesterovs.java)) - Nesterov momentum updater
diff --git a/docs/deeplearning4j/templates/config-performance-debugging.md b/docs/deeplearning4j/templates/config-performance-debugging.md
index 6dafd13b7..04b92ba23 100644
--- a/docs/deeplearning4j/templates/config-performance-debugging.md
+++ b/docs/deeplearning4j/templates/config-performance-debugging.md
@@ -84,7 +84,7 @@ Not all DL4J layer types are supported in cuDNN. DL4J layers with cuDNN support
To check if cuDNN is being used, the simplest approach is to look at the log output when running inference or training:
If cuDNN is NOT available when you are using a layer that supports it, you will see a message such as:
```
-o.d.n.l.c.ConvolutionLayer - cuDNN not found: use cuDNN for better GPU performance by including the deeplearning4j-cuda module. For more information, please refer to: https://deeplearning4j.org/cudnn
+o.d.n.l.c.ConvolutionLayer - cuDNN not found: use cuDNN for better GPU performance by including the deeplearning4j-cuda module. For more information, please refer to: https://deeplearning4j.org/docs/latest/deeplearning4j-config-cudnn
java.lang.ClassNotFoundException: org.deeplearning4j.nn.layers.convolution.CudnnConvolutionHelper
at java.net.URLClassLoader.findClass(URLClassLoader.java:381)
at java.lang.ClassLoader.loadClass(ClassLoader.java:424)
diff --git a/docs/deeplearning4j/templates/examples-tour.md b/docs/deeplearning4j/templates/examples-tour.md
index 2aa5dd29b..ee6c049ab 100644
--- a/docs/deeplearning4j/templates/examples-tour.md
+++ b/docs/deeplearning4j/templates/examples-tour.md
@@ -18,31 +18,31 @@ Most of the examples make use of DataVec, a toolkit for preprocessing and clearn
This example takes the canonical Iris dataset of the flower species of the same name, whose relevant measurements are sepal length, sepal width, petal length and petal width. It builds a Spark RDD from the relatively small dataset and runs an analysis against it.
-[Show me the code](http://github.com/eclipse/deeplearning4j-examples/blob/master/datavec-examples/src/main/java/org/datavec/transform/analysis/IrisAnalysis.java)
+[Show me the code](https://github.com/eclipse/deeplearning4j-examples/blob/master/datavec-examples/src/main/java/org/datavec/transform/analysis/IrisAnalysis.java)
### BasicDataVecExample.java
This example loads data into a Spark RDD. All DataVec transform operations use Spark RDDs. Here, we use DataVec to filter data, apply time transformations and remove columns.
-[Show me the code](http://github.com/eclipse/deeplearning4j-examples/blob/master/datavec-examples/src/main/java/org/datavec/transform/basic/BasicDataVecExample.java)
+[Show me the code](https://github.com/eclipse/deeplearning4j-examples/blob/master/datavec-examples/src/main/java/org/datavec/transform/basic/BasicDataVecExample.java)
### PrintSchemasAtEachStep.java
This example shows the print Schema tools that are useful to visualize and to ensure that the code for the transform is behaving as expected.
-[Show me the code](http://github.com/eclipse/deeplearning4j-examples/blob/master/datavec-examples/src/main/java/org/datavec/transform/debugging/PrintSchemasAtEachStep.java)
+[Show me the code](https://github.com/eclipse/deeplearning4j-examples/blob/master/datavec-examples/src/main/java/org/datavec/transform/debugging/PrintSchemasAtEachStep.java)
### JoinExample.java
You may need to join datasets before passing to a neural network. You can do that in DataVec, and this example shows you how.
-[Show me the code](http://github.com/eclipse/deeplearning4j-examples/blob/master/datavec-examples/src/main/java/org/datavec/transform/join/JoinExample.java)
+[Show me the code](https://github.com/eclipse/deeplearning4j-examples/blob/master/datavec-examples/src/main/java/org/datavec/transform/join/JoinExample.java)
### LogDataExample.java
This is an example of parsing log data using DataVec. The obvious use cases are cybersecurity and customer relationship management.
-[Show me the code](http://github.com/eclipse/deeplearning4j-examples/blob/master/datavec-examples/src/main/java/org/datavec/transform/logdata/LogDataExample.java)
+[Show me the code](https://github.com/eclipse/deeplearning4j-examples/blob/master/datavec-examples/src/main/java/org/datavec/transform/logdata/LogDataExample.java)
### MnistImagePipelineExample.java
@@ -50,7 +50,7 @@ This example is from the video below, which demonstrates the ParentPathLabelGene
-[Show me the code](http://github.com/eclipse/deeplearning4j-examples/blob/master/dl4j-examples/src/main/java/org/deeplearning4j/examples/dataExamples/MnistImagePipelineExample.java)
+[Show me the code](https://github.com/eclipse/deeplearning4j-examples/blob/master/dl4j-examples/src/main/java/org/deeplearning4j/examples/dataExamples/MnistImagePipelineExample.java)
### PreprocessNormalizerExample.java
@@ -78,13 +78,13 @@ MNIST is the "Hello World" of deep learning. Simple, straightforward, and focuss
This is a Single Layer Perceptron for recognizing digits. Note that this pulls the images from a binary package containing the dataset, a rather special case for data ingestion.
-[Show me the code](http://github.com/eclipse/deeplearning4j-examples/blob/master/dl4j-examples/src/main/java/org/deeplearning4j/examples/feedforward/mnist/MLPMnistSingleLayerExample.java)
+[Show me the code](https://github.com/eclipse/deeplearning4j-examples/blob/master/dl4j-examples/src/main/java/org/deeplearning4j/examples/feedforward/mnist/MLPMnistSingleLayerExample.java)
### MLPMnistTwoLayerExample.java
A two-layer perceptron for MNIST, showing there is more than one useful network for a given dataset.
-[Show me the code](http://github.com/eclipse/deeplearning4j-examples/blob/master/dl4j-examples/src/main/java/org/deeplearning4j/examples/feedforward/mnist/MLPMnistTwoLayerExample.java)
+[Show me the code](https://github.com/eclipse/deeplearning4j-examples/blob/master/dl4j-examples/src/main/java/org/deeplearning4j/examples/feedforward/mnist/MLPMnistTwoLayerExample.java)
### Feedforward Examples
@@ -92,7 +92,7 @@ Data flows through feed-forward neural networks in a single pass from input via
These networks can be used for a wide range of tasks depending on they are configured. Along with image classification over MNIST data, this directory has examples demonstrating regression, classification, and anomoly detection.
-[Show me the code](http://github.com/eclipse/deeplearning4j-examples/tree/master/dl4j-examples/src/main/java/org/deeplearning4j/examples/feedforward)
+[Show me the code](https://github.com/eclipse/deeplearning4j-examples/tree/master/dl4j-examples/src/main/java/org/deeplearning4j/examples/feedforward)
### Convolutional Neural Networks
@@ -102,7 +102,7 @@ Convolutional Neural Networks are mainly used for image recognition, although th
This example can be run using either LeNet or AlexNet.
-[Show me the code](http://github.com/eclipse/deeplearning4j-examples/blob/master/dl4j-examples/src/main/java/org/deeplearning4j/examples/convolution/AnimalsClassification.java)
+[Show me the code](https://github.com/eclipse/deeplearning4j-examples/blob/master/dl4j-examples/src/main/java/org/deeplearning4j/examples/convolution/AnimalsClassification.java)
---
@@ -115,7 +115,7 @@ load the model for later training or inference.
This demonstrates saving and loading a network build using the class ComputationGraph.
-[Show me the code](http://github.com/eclipse/deeplearning4j-examples/blob/master/dl4j-examples/src/main/java/org/deeplearning4j/examples/misc/modelsaving/SaveLoadComputationGraph.java)
+[Show me the code](https://github.com/eclipse/deeplearning4j-examples/blob/master/dl4j-examples/src/main/java/org/deeplearning4j/examples/misc/modelsaving/SaveLoadComputationGraph.java)
### SaveLoadMultiLayerNetwork.java
@@ -135,11 +135,11 @@ Do you need to add a Loss Function that is not available or prebuilt yet? Check
### CustomLossExample.java
-[Show me the code](http://github.com/eclipse/deeplearning4j-examples/blob/master/dl4j-examples/src/main/java/org/deeplearning4j/examples/misc/lossfunctions/CustomLossExample.java)
+[Show me the code](https://github.com/eclipse/deeplearning4j-examples/blob/master/dl4j-examples/src/main/java/org/deeplearning4j/examples/misc/lossfunctions/CustomLossExample.java)
### CustomLossL1L2.java
-[Show me the code](http://github.com/eclipse/deeplearning4j-examples/blob/master/dl4j-examples/src/main/java/org/deeplearning4j/examples/misc/lossfunctions/CustomLossL1L2.java)
+[Show me the code](https://github.com/eclipse/deeplearning4j-examples/blob/master/dl4j-examples/src/main/java/org/deeplearning4j/examples/misc/lossfunctions/CustomLossL1L2.java)
### Custom Layer
@@ -147,7 +147,7 @@ Do you need to add a layer with features that aren't available in DeepLearning4J
### CustomLayerExample.java
-[Show me the code](http://github.com/eclipse/deeplearning4j-examples/blob/master/dl4j-examples/src/main/java/org/deeplearning4j/examples/misc/customlayers/CustomLayerExample.java)
+[Show me the code](https://github.com/eclipse/deeplearning4j-examples/blob/master/dl4j-examples/src/main/java/org/deeplearning4j/examples/misc/customlayers/CustomLayerExample.java)
---
@@ -159,25 +159,25 @@ Neural Networks for NLP? We have those, too.
Global Vectors for Word Representation are useful for detecting relationships between words.
-[Show me the code](http://github.com/eclipse/deeplearning4j-examples/blob/master/dl4j-examples/src/main/java/org/deeplearning4j/examples/nlp/glove/GloVeExample.java)
+[Show me the code](https://github.com/eclipse/deeplearning4j-examples/blob/master/dl4j-examples/src/main/java/org/deeplearning4j/examples/nlp/glove/GloVeExample.java)
### Paragraph Vectors
A vectorized representation of words. Described [here](https://cs.stanford.edu/~quocle/paragraph_vector.pdf)
-[Show me the code](http://github.com/eclipse/deeplearning4j-examples/blob/master/dl4j-examples/src/main/java/org/deeplearning4j/examples/nlp/paragraphvectors/ParagraphVectorsClassifierExample.java)
+[Show me the code](https://github.com/eclipse/deeplearning4j-examples/blob/master/dl4j-examples/src/main/java/org/deeplearning4j/examples/nlp/paragraphvectors/ParagraphVectorsClassifierExample.java)
### Sequence Vectors
One way to represent sentences is as a sequence of words.
-[Show me the code](http://github.com/eclipse/deeplearning4j-examples/blob/master/dl4j-examples/src/main/java/org/deeplearning4j/examples/nlp/sequencevectors/SequenceVectorsTextExample.java)
+[Show me the code](https://github.com/eclipse/deeplearning4j-examples/blob/master/dl4j-examples/src/main/java/org/deeplearning4j/examples/nlp/sequencevectors/SequenceVectorsTextExample.java)
### Word2Vec
Described [here](https://deeplearning4j.org/word2vec.html)
-[Show me the code](http://github.com/eclipse/deeplearning4j-examples/blob/master/dl4j-examples/src/main/java/org/deeplearning4j/examples/nlp/word2vec/Word2VecRawTextExample.java)
+[Show me the code](https://github.com/eclipse/deeplearning4j-examples/blob/master/dl4j-examples/src/main/java/org/deeplearning4j/examples/nlp/word2vec/Word2VecRawTextExample.java)
---
@@ -185,7 +185,7 @@ Described [here](https://deeplearning4j.org/word2vec.html)
t-Distributed Stochastic Neighbor Embedding (t-SNE) is useful for data visualization. We include an example in the NLP section since word similarity visualization is a common use.
-[Show me the code](http://github.com/eclipse/deeplearning4j-examples/blob/master/dl4j-examples/src/main/java/org/deeplearning4j/examples/nlp/tsne/TSNEStandardExample.java)
+[Show me the code](https://github.com/eclipse/deeplearning4j-examples/blob/master/dl4j-examples/src/main/java/org/deeplearning4j/examples/nlp/tsne/TSNEStandardExample.java)
---
@@ -199,19 +199,19 @@ The examples folder for Recurrent Neural Networks has the following:
An RNN learns a string of characters.
-[Show me the code](http://github.com/eclipse/deeplearning4j-examples/blob/master/dl4j-examples/src/main/java/org/deeplearning4j/examples/recurrent/basic/BasicRNNExample.java)
+[Show me the code](https://github.com/eclipse/deeplearning4j-examples/blob/master/dl4j-examples/src/main/java/org/deeplearning4j/examples/recurrent/basic/BasicRNNExample.java)
### GravesLSTMCharModellingExample.java
Takes the complete works of Shakespeare as a sequence of characters and Trains a Neural Net to generate "Shakespeare" one character at a time.
-[Show me the code](http://github.com/eclipse/deeplearning4j-examples/blob/master/dl4j-examples/src/main/java/org/deeplearning4j/examples/recurrent/character/GravesLSTMCharModellingExample.java)
+[Show me the code](https://github.com/eclipse/deeplearning4j-examples/blob/master/dl4j-examples/src/main/java/org/deeplearning4j/examples/recurrent/character/GravesLSTMCharModellingExample.java)
### SingleTimestepRegressionExample.java
Regression with an LSTM (Long Short Term Memory) Recurrent Neural Network.
-[Show me the code](http://github.com/eclipse/deeplearning4j-examples/blob/master/dl4j-examples/src/main/java/org/deeplearning4j/examples/recurrent/regression/SingleTimestepRegressionExample.java)
+[Show me the code](https://github.com/eclipse/deeplearning4j-examples/blob/master/dl4j-examples/src/main/java/org/deeplearning4j/examples/recurrent/regression/SingleTimestepRegressionExample.java)
### AdditionRNN.java
@@ -254,13 +254,13 @@ DeepLearning4j supports using a Spark Cluster for network training. Here are the
### MnistMLPExample.java
This is an example of a Multi-Layer Perceptron training on the Mnist data set of handwritten digits.
-[Show me the code](http://github.com/eclipse/deeplearning4j-examples/blob/master/dl4j-spark-examples/dl4j-spark/src/main/java/org/deeplearning4j/mlp/MnistMLPExample.java)
+[Show me the code](https://github.com/eclipse/deeplearning4j-examples/blob/master/dl4j-spark-examples/dl4j-spark/src/main/java/org/deeplearning4j/mlp/MnistMLPExample.java)
### SparkLSTMCharacterExample.java
An LSTM recurrent Network in Spark.
-[Show me the code](http://github.com/eclipse/deeplearning4j-examples/blob/master/dl4j-spark-examples/dl4j-spark/src/main/java/org/deeplearning4j/rnn/SparkLSTMCharacterExample.java)
+[Show me the code](https://github.com/eclipse/deeplearning4j-examples/blob/master/dl4j-spark-examples/dl4j-spark/src/main/java/org/deeplearning4j/rnn/SparkLSTMCharacterExample.java)
---
@@ -274,7 +274,7 @@ The learning algorithms and loss functions are executed as ND4J operations.
This is a directory with examples for creating and manipulating NDArrays.
-[Show me the code](http://github.com/eclipse/deeplearning4j-examples/tree/master/nd4j-examples/src/main/java/org/nd4j/examples)
+[Show me the code](https://github.com/eclipse/deeplearning4j-examples/tree/master/nd4j-examples/src/main/java/org/nd4j/examples)
---
@@ -282,4 +282,4 @@ This is a directory with examples for creating and manipulating NDArrays.
Deep learning algorithms have learned to play Space Invaders and Doom using reinforcement learning. DeepLearning4J/RL4J examples of Reinforcement Learning are available here:
-[Show me the code](http://github.com/eclipse/deeplearning4j-examples/tree/master/rl4j-examples)
\ No newline at end of file
+[Show me the code](https://github.com/eclipse/deeplearning4j-examples/tree/master/rl4j-examples)
\ No newline at end of file
diff --git a/docs/deeplearning4j/templates/quickstart.md b/docs/deeplearning4j/templates/quickstart.md
index bcc042f07..25f4216ff 100644
--- a/docs/deeplearning4j/templates/quickstart.md
+++ b/docs/deeplearning4j/templates/quickstart.md
@@ -179,7 +179,7 @@ Congratulations! You just trained your first neural network with Deeplearning4j.
**Q:** **SPARK ISSUES** I am running the examples and having issues with the Spark based examples such as distributed training or datavec transform options.
-**A:** You may be missing some dependencies that Spark requires. See this [Stack Overflow discussion](http://stackoverflow.com/a/38735202/3892515) for a discussion of potential dependency issues. Windows users may need the winutils.exe from Hadoop.
+**A:** You may be missing some dependencies that Spark requires. See this [Stack Overflow discussion](https://stackoverflow.com/a/38735202/3892515) for a discussion of potential dependency issues. Windows users may need the winutils.exe from Hadoop.
Download winutils.exe from https://github.com/steveloughran/winutils and put it into the null/bin/winutils.exe (or create a hadoop folder and add that to HADOOP_HOME)
diff --git a/libnd4j/include/helpers/files.h b/libnd4j/include/helpers/files.h
index fa87d4e3e..c49cedbb7 100644
--- a/libnd4j/include/helpers/files.h
+++ b/libnd4j/include/helpers/files.h
@@ -16,7 +16,7 @@
//
// Methods to lookup files in $PATH
-// adopted from http://stackoverflow.com/questions/2718915/check-if-file-exists-including-on-path
+// adopted from https://stackoverflow.com/questions/2718915/check-if-file-exists-including-on-path
//
#ifndef LIBND4J_FILES_H
diff --git a/libnd4j/include/ops/declarable/headers/nn.h b/libnd4j/include/ops/declarable/headers/nn.h
index 9f9b0e40a..810733680 100644
--- a/libnd4j/include/ops/declarable/headers/nn.h
+++ b/libnd4j/include/ops/declarable/headers/nn.h
@@ -137,7 +137,7 @@ namespace nd4j {
#endif
/**
- * This operation performs batch normalization of layer, it is based on following article http://arxiv.org/abs/1502.03167.
+ * This operation performs batch normalization of layer, it is based on following article https://arxiv.org/abs/1502.03167.
* Expected arguments:
* x: input 4D array of shape [bS,iH,iW,iD] (data format = NHWC) or [bS,iD,iH,iW] (data format = NCHW), where
* bS - batch size
diff --git a/libnd4j/include/ops/declarable/helpers/cpu/gru.cpp b/libnd4j/include/ops/declarable/helpers/cpu/gru.cpp
index 9799e609d..579ab2612 100644
--- a/libnd4j/include/ops/declarable/helpers/cpu/gru.cpp
+++ b/libnd4j/include/ops/declarable/helpers/cpu/gru.cpp
@@ -19,7 +19,7 @@
//
// implementation of gated Recurrent Unit cell
-// (cf. http://arxiv.org/abs/1406.1078).
+// (cf. https://arxiv.org/abs/1406.1078).
// Kyunghyun Cho, Bart van Merrienboer, Caglar Gulcehre, Dzmitry Bahdanau, Fethi Bougares, Holger Schwenk, Yoshua Bengio
// "Learning Phrase Representations using RNN Encoder-Decoder for Statistical Machine Translation"
diff --git a/libnd4j/include/ops/declarable/helpers/cuda/gru.cu b/libnd4j/include/ops/declarable/helpers/cuda/gru.cu
index 8e7b62a91..cbbdf1439 100644
--- a/libnd4j/include/ops/declarable/helpers/cuda/gru.cu
+++ b/libnd4j/include/ops/declarable/helpers/cuda/gru.cu
@@ -19,7 +19,7 @@
//
// implementation of gated Recurrent Unit cell
-// (cf. http://arxiv.org/abs/1406.1078).
+// (cf. https://arxiv.org/abs/1406.1078).
// Kyunghyun Cho, Bart van Merrienboer, Caglar Gulcehre, Dzmitry Bahdanau, Fethi Bougares, Holger Schwenk, Yoshua Bengio
// "Learning Phrase Representations using RNN Encoder-Decoder for Statistical Machine Translation"
diff --git a/libnd4j/tests_cpu/layers_tests/testinclude.h b/libnd4j/tests_cpu/layers_tests/testinclude.h
index f27623cff..79607cdc9 100644
--- a/libnd4j/tests_cpu/layers_tests/testinclude.h
+++ b/libnd4j/tests_cpu/layers_tests/testinclude.h
@@ -24,7 +24,7 @@
#include
#include
-//http://stackoverflow.com/questions/228005/alternative-to-itoa-for-converting-integer-to-string-c
+//https://stackoverflow.com/questions/228005/alternative-to-itoa-for-converting-integer-to-string-c
FORCEINLINE std::string int_array_to_string(Nd4jLong int_array[], Nd4jLong size_of_array) {
std::string returnstring = "[";
for (int temp = 0; temp < size_of_array; temp++) {
diff --git a/nd4j/README.md b/nd4j/README.md
index f26adea75..1d41d4403 100644
--- a/nd4j/README.md
+++ b/nd4j/README.md
@@ -41,12 +41,12 @@ To install ND4J, there are a couple of approaches, and more information can be f
#### Install from Maven Central
-1. Search for nd4j in the [Maven Central Repository](http://mvnrepository.com/search?q=nd4j) to find the available nd4j jars.
+1. Search for nd4j in the [Maven Central Repository](https://search.maven.org/search?q=nd4j) to find the available nd4j jars.
2. Include the appropriate dependency in your pom.xml.
#### Clone from the GitHub Repo
-https://deeplearning4j.org/buildinglocally
+https://deeplearning4j.org/docs/latest/deeplearning4j-build-from-source
## Contribute
1. Check for open issues, or open a new issue to start a discussion around a feature idea or a bug.
diff --git a/nd4j/nd4j-backends/nd4j-api-parent/nd4j-api/pom.xml b/nd4j/nd4j-backends/nd4j-api-parent/nd4j-api/pom.xml
index 21924f80a..b4a374baf 100644
--- a/nd4j/nd4j-backends/nd4j-api-parent/nd4j-api/pom.xml
+++ b/nd4j/nd4j-backends/nd4j-api-parent/nd4j-api/pom.xml
@@ -192,12 +192,6 @@
-
- org.objenesis
- objenesis
- ${objenesis.version}
-
-
@@ -206,22 +200,6 @@
${oshi.version}
-
- junit
- junit
- test
-
-
- ch.qos.logback
- logback-classic
- test
-
-
- ch.qos.logback
- logback-core
- test
-
-
org.slf4jslf4j-api
diff --git a/nd4j/nd4j-backends/nd4j-api-parent/nd4j-api/src/main/java/org/nd4j/autodiff/samediff/ops/SDNN.java b/nd4j/nd4j-backends/nd4j-api-parent/nd4j-api/src/main/java/org/nd4j/autodiff/samediff/ops/SDNN.java
index 668a7a4a9..7b1cc5768 100644
--- a/nd4j/nd4j-backends/nd4j-api-parent/nd4j-api/src/main/java/org/nd4j/autodiff/samediff/ops/SDNN.java
+++ b/nd4j/nd4j-backends/nd4j-api-parent/nd4j-api/src/main/java/org/nd4j/autodiff/samediff/ops/SDNN.java
@@ -69,7 +69,7 @@ public class SDNN extends SDOps {
/**
* Neural network batch normalization operation.
- * For details, see http://arxiv.org/abs/1502.03167
+ * For details, see https://arxiv.org/abs/1502.03167
*
* @param name Name of the output variable
* @param input Input variable.
@@ -139,7 +139,7 @@ public class SDNN extends SDOps {
* out = a * (exp(x) - 1) if x <= 0
* with constant a = 1.0
*