Update docs links to new website URLs [WIP] (#452)

* Update docs links to new website URLs

Signed-off-by: Alex Black <blacka101@gmail.com>

* One more link

Signed-off-by: Alex Black <blacka101@gmail.com>
master
Alex Black 2020-05-12 13:02:19 +10:00 committed by GitHub
parent e2cd461578
commit 872a511042
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
18 changed files with 22 additions and 22 deletions

View File

@ -21,7 +21,7 @@ package org.deeplearning4j.nn.modelimport.keras.exceptions;
* Indicates that user is attempting to import a Keras model configuration that
* is malformed or invalid in some other way.
*
* See <a href="https://deeplearning4j.org/docs/latest/keras-import-overview">https://deeplearning4j.org/docs/latest/keras-import-overview</a> for more information.
* See <a href="https://deeplearning4j.konduit.ai/keras-import/overview">https://deeplearning4j.konduit.ai/keras-import/overview</a> for more information.
*
* @author dave@skymind.io
*/
@ -40,6 +40,6 @@ public class InvalidKerasConfigurationException extends Exception {
}
private static String appendDocumentationURL(String message) {
return message + ". For more information, see http://deeplearning4j.org/docs/latest/keras-import-overview";
return message + ". For more information, see https://deeplearning4j.konduit.ai/keras-import/overview";
}
}

View File

@ -21,7 +21,7 @@ package org.deeplearning4j.nn.modelimport.keras.exceptions;
* Indicates that user is attempting to import a Keras model configuration that
* is not currently supported.
*
* See <a href="https://deeplearning4j.org/docs/latest/keras-import-overview">https://deeplearning4j.org/docs/latest/keras-import-overview</a>
* See <a href="https://deeplearning4j.konduit.ai/keras-import/overview">https://deeplearning4j.konduit.ai/keras-import/overview</a>
* for more information and file an issue at <a href="https://github.com/eclipse/deeplearning4j/issues">https://github.com/eclipse/deeplearning4j/issues</a>.
*
* @author dave@skymind.io

View File

@ -103,7 +103,7 @@ public class KerasEmbedding extends KerasLayer {
"on Embedding layers. Zero Masking for the Embedding layer only works with unidirectional LSTM for now."
+ " If you want to have this behaviour for your imported model " +
"in DL4J, apply masking as a pre-processing step to your input." +
"See http://deeplearning4j.org/docs/latest/deeplearning4j-nn-recurrent#masking for more on this.");
"See https://deeplearning4j.konduit.ai/models/recurrent#masking-one-to-many-many-to-one-and-sequence-classification for more on this.");
IWeightInit init = getWeightInitFromConfig(layerConfig, conf.getLAYER_FIELD_EMBEDDING_INIT(),
enforceTrainingConfig, conf, kerasMajorVersion);

View File

@ -17,10 +17,10 @@
package org.deeplearning4j.nn.conf;
/**
* Workspace mode to use. See <a href="https://deeplearning4j.org/docs/latest/deeplearning4j-config-workspaces">https://deeplearning4j.org/docs/latest/deeplearning4j-config-workspaces</a><br>
* Workspace mode to use. See <a href="https://deeplearning4j.konduit.ai/config/config-memory/config-workspaces">https://deeplearning4j.konduit.ai/config/config-memory/config-workspaces</a><br>
* <br>
* NONE: No workspaces will be used for the network. Highest memory use, least performance.<br>
* ENABLED: Use workspaces.<br>
* ENABLED: Use workspaces. This is the default and should almost always be used<br>
* SINGLE: Deprecated. Now equivalent to ENABLED, which should be used instead.<br>
* SEPARATE: Deprecated. Now equivalent to ENABLED, which sohuld be used instead.<br>
*

View File

@ -38,7 +38,7 @@ import java.util.Map;
/**
* LSTM recurrent neural network layer without peephole connections. Supports CuDNN acceleration - see <a
* href="https://deeplearning4j.org/docs/latest/deeplearning4j-config-cudnn">https://deeplearning4j.org/docs/latest/deeplearning4j-config-cudnn</a> for details
* href="https://deeplearning4j.konduit.ai/config/backends/config-cudnn">https://deeplearning4j.konduit.ai/config/backends/config-cudnn</a> for details
*
* @author Alex Black
* @see GravesLSTM GravesLSTM class for an alternative LSTM (with peephole connections)

View File

@ -1540,8 +1540,8 @@ public class ComputationGraph implements Serializable, Model, NeuralNetwork {
* (not) clearing the layer input arrays.<br>
* Note: this method should NOT be used with clearInputs = true, unless you know what you are doing. Specifically:
* when using clearInputs=false, in combination with workspaces, the layer input fields may leak outside of the
* workspaces in which they were defined - potentially causing a crash. See <a href="https://deeplearning4j.org/docs/latest/deeplearning4j-config-workspaces">
* https://deeplearning4j.org/docs/latest/deeplearning4j-config-workspaces</a>
* workspaces in which they were defined - potentially causing a crash. See <a href="https://deeplearning4j.konduit.ai/config/config-memory/config-workspaces">
* https://deeplearning4j.konduit.ai/config/config-memory/config-workspaces</a>
* for more details
*
* @param input An array of ComputationGraph inputs

View File

@ -86,7 +86,7 @@ public class ConvolutionLayer extends BaseLayer<org.deeplearning4j.nn.conf.layer
} else {
OneTimeLogger.info(log, "cuDNN not found: "
+ "use cuDNN for better GPU performance by including the deeplearning4j-cuda module. "
+ "For more information, please refer to: https://deeplearning4j.org/docs/latest/deeplearning4j-config-cudnn", t);
+ "For more information, please refer to: https://deeplearning4j.konduit.ai/config/backends/config-cudnn", t);
}
}
} else if("CPU".equalsIgnoreCase(backend)){

View File

@ -78,7 +78,7 @@ public class SubsamplingLayer extends AbstractLayer<org.deeplearning4j.nn.conf.l
} else {
OneTimeLogger.info(log, "cuDNN not found: "
+ "use cuDNN for better GPU performance by including the deeplearning4j-cuda module. "
+ "For more information, please refer to: https://deeplearning4j.org/docs/latest/deeplearning4j-config-cudnn", t);
+ "For more information, please refer to: https://deeplearning4j.konduit.ai/config/backends/config-cudnn", t);
}
}
} else if("CPU".equalsIgnoreCase(backend) ){

View File

@ -86,7 +86,7 @@ public class BatchNormalization extends BaseLayer<org.deeplearning4j.nn.conf.lay
} else {
OneTimeLogger.info(log, "cuDNN not found: "
+ "use cuDNN for better GPU performance by including the deeplearning4j-cuda module. "
+ "For more information, please refer to: https://deeplearning4j.org/docs/latest/deeplearning4j-config-cudnn", t);
+ "For more information, please refer to: https://deeplearning4j.konduit.ai/config/backends/config-cudnn", t);
}
}
} else if("CPU".equalsIgnoreCase(backend)){

View File

@ -96,7 +96,7 @@ public class LocalResponseNormalization
} else {
OneTimeLogger.info(log, "cuDNN not found: "
+ "use cuDNN for better GPU performance by including the deeplearning4j-cuda module. "
+ "For more information, please refer to: https://deeplearning4j.org/docs/latest/deeplearning4j-config-cudnn", t);
+ "For more information, please refer to: https://deeplearning4j.konduit.ai/config/backends/config-cudnn", t);
}
}
}

View File

@ -32,7 +32,7 @@ import java.util.Map;
/**
*
* RNN tutorial: https://deeplearning4j.org/docs/latest/deeplearning4j-nn-recurrent
* RNN tutorial: https://deeplearning4j.konduit.ai/models/recurrent
* READ THIS FIRST
*
* Bdirectional LSTM layer implementation.

View File

@ -71,7 +71,7 @@ public class LSTM extends BaseRecurrentLayer<org.deeplearning4j.nn.conf.layers.L
} else {
OneTimeLogger.info(log, "cuDNN not found: "
+ "use cuDNN for better GPU performance by including the deeplearning4j-cuda module. "
+ "For more information, please refer to: https://deeplearning4j.org/docs/latest/deeplearning4j-config-cudnn", t);
+ "For more information, please refer to: https://deeplearning4j.konduit.ai/config/backends/config-cudnn", t);
}
}
}

View File

@ -52,8 +52,8 @@ import static org.nd4j.linalg.indexing.NDArrayIndex.*;
/**
*
* RNN tutorial: <a href="https://deeplearning4j.org/docs/latest/deeplearning4j-nn-recurrent">https://deeplearning4j.org/docs/latest/deeplearning4j-nn-recurrent</a>
* READ THIS FIRST if you want to understand what the heck is happening here.
* RNN tutorial: <a href="https://deeplearning4j.konduit.ai/models/recurrent">https://deeplearning4j.konduit.ai/models/recurrent</a>
* READ THIS FIRST if you want to understand this code.
*
* Shared code for the standard "forwards" LSTM RNN and the bidirectional LSTM RNN
* This was extracted from GravesLSTM and refactored into static helper functions. The general reasoning for this was

View File

@ -826,7 +826,7 @@ public class ParallelWrapper implements AutoCloseable {
/**
* This method allows you to specify training mode for this instance of PW.<br>
* 1) AVERAGING - stands for parameters averaging. Each X epochs weights and updaters state will be averaged across all models<br>
* 2) SHARED_GRADIENTS - stands for gradients sharing - more details available here: <a href="https://deeplearning4j.org/docs/latest/deeplearning4j-scaleout-intro">https://deeplearning4j.org/docs/latest/deeplearning4j-scaleout-intro</a><br>
* 2) SHARED_GRADIENTS - stands for gradients sharing - more details available here: <a href="https://deeplearning4j.konduit.ai/distributed-deep-learning/intro">https://deeplearning4j.konduit.ai/distributed-deep-learning/intro</a><br>
* 3) CUSTOM - this method allows you to specify custom gradients accumulator, this giving you better control of configuration params for training.<br>
*
* @param mode

View File

@ -71,7 +71,7 @@ public class SparkUtils {
+ "for ND4J INDArrays.\nWhen using Kryo, An appropriate Kryo registrator must be used to avoid"
+ " serialization issues (NullPointerException) with off-heap data in INDArrays.\n"
+ "Use nd4j-kryo_2.10 or _2.11 artifact, with sparkConf.set(\"spark.kryo.registrator\", \"org.nd4j.kryo.Nd4jRegistrator\");\n"
+ "See https://deeplearning4j.org/docs/latest/deeplearning4j-scaleout-howto#kryo for more details";
+ "See https://deeplearning4j.konduit.ai/distributed-deep-learning/howto#how-to-use-kryo-serialization-with-dl-4-j-and-nd-4-j for more details";
private static String sparkExecutorId;

View File

@ -108,7 +108,7 @@ public class TestSparkDl4jMultiLayer extends BaseSparkTest {
.activation(Activation.SOFTMAX).nIn(100).nOut(10).build())
.build();
//Configuration for Spark training: see https://deeplearning4j.org/docs/latest/deeplearning4j-scaleout-howto for explanation of these configuration options
//Configuration for Spark training: see https://deeplearning4j.konduit.ai/distributed-deep-learning/howto for explanation of these configuration options
TrainingMaster tm = new ParameterAveragingTrainingMaster.Builder(batchSizePerWorker)
.averagingFrequency(2)

View File

@ -195,7 +195,7 @@ public class CharacterIterator implements DataSetIterator {
// dimension 0 = number of examples in minibatch
// dimension 1 = size of each vector (i.e., number of characters)
// dimension 2 = length of each time series/example
//Why 'f' order here? See https://deeplearning4j.org/docs/latest/deeplearning4j-nn-recurrent data section "Alternative: Implementing a custom DataSetIterator"
//Why 'f' order here? See https://deeplearning4j.konduit.ai/models/recurrent data section "Alternative: Implementing a custom DataSetIterator"
INDArray input = Nd4j.create(new int[]{currMinibatchSize, validCharacters.length, exampleLength}, 'f');
INDArray labels = Nd4j.create(new int[]{currMinibatchSize, validCharacters.length, exampleLength}, 'f');

View File

@ -61,7 +61,7 @@ import java.util.*;
@Slf4j
public abstract class DefaultOpExecutioner implements OpExecutioner {
private static final String SCOPE_PANIC_MSG = "For more details, see the ND4J User Guide: deeplearning4j.org/docs/latest/nd4j-overview#workspaces-panic";
private static final String SCOPE_PANIC_MSG = "For more details, see the ND4J User Guide: https://deeplearning4j.konduit.ai/nd4j/overview#workspaces-scope-panic";
protected ProfilingMode profilingMode = ProfilingMode.SCOPE_PANIC;