From d592e0285280f296992f0f12f306754f1eca7b3b Mon Sep 17 00:00:00 2001 From: brian Date: Thu, 23 Mar 2023 17:39:00 +0100 Subject: [PATCH] Refactoring and separation of IModel / Layer --- .../TupleStreamDataSetIteratorTest.java | 2 +- .../ModelTupleStreamIntegrationTest.java | 4 +- .../solr/handler/ModelTupleStreamTest.java | 6 +- .../solr/ltr/model/ScoringModelTest.java | 6 +- .../remote/JsonModelServerTest.java | 10 +- .../pw/SharedTrainingWrapper.java | 2 +- .../training/SharedTrainingMaster.java | 2 +- .../training/SharedTrainingWorker.java | 4 +- .../spark/parameterserver/BaseSparkTest.java | 6 +- .../train/GradientSharingTrainingTest.java | 12 +- .../spark/api/worker/NetBroadcastTuple.java | 10 +- ...eVaeReconstructionProbWithKeyFunction.java | 2 +- .../score/BaseVaeScoreWithKeyFunction.java | 2 +- .../impl/evaluation/EvaluationRunner.java | 4 +- ...VaeReconstructionErrorWithKeyFunction.java | 2 +- ...GVaeReconstructionProbWithKeyFunction.java | 2 +- .../impl/multilayer/SparkDl4jMultiLayer.java | 16 +- .../scoring/FeedForwardWithKeyFunction.java | 6 +- .../scoring/ScoreExamplesFunction.java | 4 +- .../scoring/ScoreExamplesWithKeyFunction.java | 6 +- .../scoring/ScoreFlatMapFunction.java | 4 +- ...VaeReconstructionErrorWithKeyFunction.java | 6 +- .../VaeReconstructionProbWithKeyFunction.java | 6 +- .../ParameterAveragingTrainingMaster.java | 6 +- .../deeplearning4j/spark/BaseSparkTest.java | 6 +- .../spark/TestEarlyStoppingSpark.java | 12 +- .../TestEarlyStoppingSparkCompGraph.java | 10 +- .../org/deeplearning4j/spark/TestKryo.java | 6 +- .../spark/datavec/TestPreProcessedData.java | 8 +- .../spark/impl/TestKryoWarning.java | 6 +- .../impl/customlayer/TestCustomLayer.java | 6 +- .../impl/graph/TestSparkComputationGraph.java | 10 +- .../spark/impl/misc/TestFrozenLayers.java | 12 +- .../impl/multilayer/TestMiscFunctions.java | 12 +- .../multilayer/TestSparkDl4jMultiLayer.java | 4 +- ...arameterAveragingSparkVsSingleMachine.java | 16 +- ...TestSparkMultiLayerParameterAveraging.java | 52 +- .../stats/TestTrainingStatsCollection.java | 4 +- .../spark/ui/TestListeners.java | 4 +- .../network/MultiLayerNetworkHandler.java | 6 +- .../ActorCriticFactoryCompGraphStdConv.java | 2 +- .../ActorCriticFactoryCompGraphStdDense.java | 2 +- .../ActorCriticFactorySeparateStdDense.java | 10 +- .../rl4j/network/ac/ActorCriticSeparate.java | 10 +- .../deeplearning4j/rl4j/network/dqn/DQN.java | 6 +- .../rl4j/network/dqn/DQNFactoryStdConv.java | 8 +- .../rl4j/network/dqn/DQNFactoryStdDense.java | 6 +- .../org/deeplearning4j/rl4j/NStepRnn.java | 2 +- .../deeplearning4j/rl4j/RobotLakeExample.java | 2 +- .../org/deeplearning4j/rl4j/TMazeExample.java | 2 +- .../network/MultiLayerNetworkHandlerTest.java | 8 +- .../rl4j/policy/PolicyTest.java | 4 +- README.md | 6 +- .../src/test/java/net/brutex/gan/App.java | 139 +- .../src/test/java/net/brutex/gan/GAN.java | 29 +- .../net/brutex/gan/MnistDCGANExample.java | 14 +- .../java/net/brutex/gan/MnistSimpleGAN.java | 9 +- .../test/java/net/brutex/spark/BrianTest.java | 9 +- .../java/net/brutex/spark/BrianTest2.java | 7 +- .../java/net/brutex/spark/TestServer.java | 20 +- .../java/net/brutex/spark/TestServer2.java | 12 +- .../IntegrationTestBaselineGenerator.java | 22 +- .../integration/IntegrationTestRunner.java | 92 +- .../deeplearning4j/integration/TestCase.java | 4 +- .../deeplearning4j/integration/TestUtils.java | 16 +- .../testcases/dl4j/CNN1DTestCases.java | 4 +- .../testcases/dl4j/CNN2DTestCases.java | 20 +- .../testcases/dl4j/CNN3DTestCases.java | 7 +- .../testcases/dl4j/MLPTestCases.java | 11 +- .../testcases/dl4j/RNNTestCases.java | 18 +- .../testcases/dl4j/UnsupervisedTestCases.java | 4 +- build.gradle | 19 +- .../activations/impl/ActivationIdentity.java | 2 +- .../linalg/workspace/BaseWorkspaceMgr.java | 2 +- .../nd4j/linalg/workspace/WorkspaceMgr.java | 10 +- cavis-dnn/cavis-dnn-core/build.gradle | 1 + .../net/brutex/ai/dnn/core/util/ANSI.java | 52 + .../listener/SystemInfoFilePrintListener.java | 16 +- .../listener/SystemInfoPrintListener.java | 16 +- .../core/util/ModelGuesser.java | 14 +- .../LayerHelperValidationUtil.java | 29 +- .../java/org/deeplearning4j/RandomTests.java | 8 +- .../java/org/deeplearning4j/TestUtils.java | 34 +- .../iterator/DataSetIteratorTest.java | 18 +- .../earlystopping/TestEarlyStopping.java | 99 +- .../TestEarlyStoppingCompGraph.java | 34 +- .../org/deeplearning4j/eval/EvalTest.java | 38 +- .../eval/EvaluationToolsTests.java | 7 +- .../java/org/deeplearning4j/eval/ROCTest.java | 6 +- .../eval/RegressionEvalTest.java | 5 +- .../exceptions/TestInvalidConfigurations.java | 41 +- .../exceptions/TestInvalidInput.java | 29 +- .../gradientcheck/AttentionLayerTest.java | 25 +- .../gradientcheck/BNGradientCheckTest.java | 101 +- .../gradientcheck/CNN1DGradientCheckTest.java | 45 +- .../gradientcheck/CNN3DGradientCheckTest.java | 46 +- .../gradientcheck/CNNGradientCheckTest.java | 131 +- .../CapsnetGradientCheckTest.java | 10 +- .../gradientcheck/DropoutGradientCheck.java | 13 +- .../GlobalPoolingGradientCheckTests.java | 21 +- .../gradientcheck/GradientCheckTests.java | 71 +- .../GradientCheckTestsComputationGraph.java | 108 +- .../GradientCheckTestsMasking.java | 19 +- .../gradientcheck/LRNGradientCheckTests.java | 8 +- .../gradientcheck/LSTMGradientCheckTests.java | 52 +- .../LossFunctionGradientCheck.java | 11 +- .../NoBiasGradientCheckTests.java | 15 +- .../OutputLayerGradientChecks.java | 19 +- .../gradientcheck/RnnGradientChecks.java | 13 +- .../UtilLayerGradientChecks.java | 15 +- .../gradientcheck/VaeGradientCheckTests.java | 19 +- .../gradientcheck/YoloGradientCheckTests.java | 8 +- .../ComputationGraphConfigurationTest.java | 30 +- .../org/deeplearning4j/nn/conf/JsonTest.java | 6 +- .../MultiLayerNeuralNetConfigurationTest.java | 727 +- .../MultiNeuralNetConfLayerBuilderTest.java | 16 +- .../nn/conf/NeuralNetConfigurationTest.java | 62 +- .../nn/conf/constraints/TestConstraints.java | 43 +- .../nn/conf/dropout/TestDropout.java | 39 +- .../nn/conf/graph/ElementWiseVertexTest.java | 18 +- .../nn/conf/graph/ShiftVertexTest.java | 8 +- .../nn/conf/layers/LayerBuilderTest.java | 16 +- .../nn/conf/layers/LayerConfigTest.java | 198 +- .../layers/LayerConfigValidationTest.java | 51 +- .../conf/preprocessor/CNNProcessorTest.java | 43 +- .../preprocessor/CustomPreprocessorTest.java | 15 +- .../conf/preprocessor/TestPreProcessors.java | 47 +- .../nn/conf/weightnoise/TestWeightNoise.java | 31 +- .../deeplearning4j/nn/dtypes/DTypeTests.java | 130 +- .../nn/graph/ComputationGraphTestRNN.java | 46 +- .../nn/graph/TestCompGraphCNN.java | 8 +- .../nn/graph/TestCompGraphUnsupervised.java | 26 +- .../nn/graph/TestComputationGraphNetwork.java | 216 +- .../nn/graph/TestSetGetParameters.java | 18 +- .../nn/graph/TestVariableLengthTSCG.java | 24 +- .../nn/graph/graphnodes/TestGraphNodes.java | 10 +- .../nn/layers/ActivationLayerTest.java | 59 +- .../nn/layers/AutoEncoderTest.java | 2 +- ...t.java => BaseLayerConfigurationTest.java} | 19 +- .../nn/layers/CacheModeTest.java | 32 +- .../nn/layers/CenterLossOutputLayerTest.java | 8 +- .../nn/layers/DropoutLayerTest.java | 31 +- .../nn/layers/FrozenLayerTest.java | 132 +- .../layers/FrozenLayerWithBackpropTest.java | 112 +- .../nn/layers/OutputLayerTest.java | 71 +- .../nn/layers/RepeatVectorTest.java | 6 +- .../deeplearning4j/nn/layers/SeedTest.java | 14 +- .../deeplearning4j/nn/layers/TestDropout.java | 10 +- .../nn/layers/capsule/CapsNetMNISTTest.java | 5 +- .../nn/layers/capsule/CapsuleLayerTest.java | 5 +- .../capsule/CapsuleStrengthLayerTest.java | 5 +- .../layers/capsule/PrimaryCapsulesTest.java | 5 +- .../convolution/ConvDataFormatTests.java | 34 +- .../layers/convolution/Convolution3DTest.java | 8 +- .../ConvolutionLayerSetupTest.java | 190 +- .../convolution/ConvolutionLayerTest.java | 134 +- .../LocallyConnectedLayerTest.java | 16 +- .../layers/convolution/SpaceToDepthTest.java | 6 +- .../convolution/SubsamplingLayerTest.java | 38 +- .../convolution/TestConvolutionModes.java | 39 +- .../layers/convolution/Upsampling1DTest.java | 6 +- .../layers/convolution/Upsampling2DTest.java | 6 +- .../layers/custom/TestCustomActivation.java | 13 +- .../nn/layers/custom/TestCustomLayers.java | 41 +- .../custom/testclasses/CustomLayer.java | 10 +- .../custom/testclasses/CustomLayerImpl.java | 3 +- .../custom/testclasses/CustomOutputLayer.java | 11 +- .../testclasses/CustomOutputLayerImpl.java | 3 +- .../layers/feedforward/dense/DenseTest.java | 13 +- .../embedding/EmbeddingLayerTest.java | 106 +- .../normalization/BatchNormalizationTest.java | 55 +- .../normalization/LocalResponseTest.java | 11 +- .../objdetect/TestYolo2OutputLayer.java | 13 +- .../nn/layers/ocnn/OCNNOutputLayerTest.java | 31 +- .../pooling/GlobalPoolingMaskingTests.java | 17 +- .../layers/recurrent/BidirectionalTest.java | 42 +- .../GravesBidirectionalLSTMTest.java | 61 +- .../nn/layers/recurrent/GravesLSTMTest.java | 27 +- .../layers/recurrent/MaskZeroLayerTest.java | 6 +- .../layers/recurrent/RnnDataFormatTests.java | 18 +- .../recurrent/TestLastTimeStepLayer.java | 4 +- .../recurrent/TestRecurrentWeightInit.java | 2 +- .../nn/layers/recurrent/TestRnnLayers.java | 29 +- .../nn/layers/recurrent/TestSimpleRnn.java | 5 +- .../layers/recurrent/TestTimeDistributed.java | 19 +- .../samediff/SameDiffCustomLayerTests.java | 7 +- .../nn/layers/samediff/TestSameDiffConv.java | 17 +- .../nn/layers/samediff/TestSameDiffDense.java | 59 +- .../samediff/TestSameDiffDenseVertex.java | 14 +- .../layers/samediff/TestSameDiffLambda.java | 16 +- .../layers/samediff/TestSameDiffOutput.java | 17 +- .../testlayers/MinimalSameDiffDense.java | 2 +- .../samediff/testlayers/SameDiffConv.java | 7 +- .../samediff/testlayers/SameDiffDense.java | 7 +- .../testlayers/SameDiffDenseVertex.java | 4 +- .../testlayers/SameDiffMSEOutputLayer.java | 2 +- .../nn/layers/variational/TestVAE.java | 58 +- .../nn/misc/CloseNetworkTests.java | 11 +- .../deeplearning4j/nn/misc/LargeNetTest.java | 12 +- .../deeplearning4j/nn/misc/TestLrChanges.java | 64 +- .../nn/misc/TestMemoryReports.java | 21 +- .../nn/misc/TestNetConversion.java | 17 +- .../nn/misc/WorkspaceTests.java | 68 +- .../nn/mkldnn/ValidateMKLDNN.java | 15 +- .../nn/multilayer/BackPropMLPTest.java | 13 +- .../nn/multilayer/MultiLayerTest.java | 2745 +++--- .../nn/multilayer/MultiLayerTestRNN.java | 72 +- .../nn/multilayer/TestMasking.java | 23 +- .../nn/multilayer/TestSetGetParameters.java | 47 +- .../nn/multilayer/TestVariableLengthTS.java | 38 +- .../rl/TestMultiModelGradientApplication.java | 37 +- .../nn/transferlearning/TestFrozenLayers.java | 15 +- .../TestTransferLearningModelSerializer.java | 17 +- .../TransferLearningCompGraphTest.java | 116 +- .../TransferLearningComplex.java | 34 +- .../TransferLearningHelperTest.java | 78 +- .../TransferLearningMLNTest.java | 256 +- .../nn/updater/TestGradientNormalization.java | 30 +- .../nn/updater/TestUpdaters.java | 142 +- .../nn/updater/custom/TestCustomUpdater.java | 25 +- .../nn/weights/WeightInitIdentityTest.java | 6 +- .../solver/BackTrackLineSearchTest.java | 49 +- .../optimize/solver/TestOptimizers.java | 391 +- .../listener/TestCheckpointListener.java | 17 +- .../listener/TestFailureListener.java | 13 +- .../optimizer/listener/TestListeners.java | 47 +- .../parallelism/RandomTests.java | 19 +- .../listener/TestSystemInfoPrintListener.java | 5 +- .../regressiontest/MiscRegressionTests.java | 9 +- .../regressiontest/RegressionTest050.java | 32 +- .../regressiontest/RegressionTest060.java | 44 +- .../regressiontest/RegressionTest071.java | 44 +- .../regressiontest/RegressionTest080.java | 44 +- .../regressiontest/RegressionTest100a.java | 30 +- .../regressiontest/RegressionTest100b3.java | 36 +- .../regressiontest/RegressionTest100b4.java | 76 +- .../regressiontest/RegressionTest100b6.java | 76 +- .../customlayer100a/CustomLayer.java | 17 +- .../customlayer100a/CustomLayerImpl.java | 18 +- .../CompareTrainingImplementations.java | 9 +- .../util/CrashReportingUtilTest.java | 23 +- .../deeplearning4j/util/ModelGuesserTest.java | 31 +- .../util/ModelSerializerTest.java | 47 +- .../util/ModelValidatorTests.java | 5 +- .../cuda/recurrent/CudnnLSTMHelper.java | 2 +- .../nn/modelimport/keras/KerasLayer.java | 29 +- .../nn/modelimport/keras/KerasModel.java | 18 +- .../modelimport/keras/KerasModelImport.java | 10 +- .../keras/KerasSequentialModel.java | 40 +- .../keras/config/KerasLayerConfiguration.java | 2 +- .../modelimport/keras/layers/TFOpLayer.java | 18 +- .../keras/layers/TFOpLayerImpl.java | 3 +- .../keras/layers/core/KerasDense.java | 4 +- .../keras/layers/recurrent/KerasLSTM.java | 12 +- .../layers/recurrent/KerasSimpleRnn.java | 12 +- .../layers/wrappers/KerasBidirectional.java | 15 +- .../keras/utils/KerasLayerUtils.java | 8 +- .../keras/utils/KerasModelUtils.java | 6 +- .../nn/modelimport/keras/KerasTestUtils.java | 6 +- .../configurations/FullModelComparisons.java | 9 +- .../Keras1ModelConfigurationTest.java | 6 +- .../Keras2ModelConfigurationTest.java | 11 +- .../KerasInitilizationTest.java | 2 +- .../configurations/KerasModelImportTest.java | 13 +- .../keras/e2e/KerasCustomLayerTest.java | 3 +- .../keras/e2e/KerasModelEndToEndTest.java | 14 +- .../advanced/activation/KerasPReLUTest.java | 2 +- .../KerasAtrousConvolution1DTest.java | 2 +- .../KerasAtrousConvolution2DTest.java | 2 +- .../convolution/KerasConvolution1DTest.java | 2 +- .../convolution/KerasConvolution2DTest.java | 2 +- .../convolution/KerasConvolution3DTest.java | 2 +- .../convolution/KerasDeconvolution2DTest.java | 2 +- .../KerasDepthwiseConvolution2DTest.java | 2 +- .../KerasSeparableConvolution2DTest.java | 2 +- .../keras/layers/core/KerasDenseTest.java | 2 +- .../keras/layers/recurrent/KerasLSTMTest.java | 3 +- .../layers/recurrent/KerasSimpleRnnTest.java | 2 +- .../models/word2vec/Word2VecTestsSmall.java | 8 +- cavis-dnn/cavis-dnn-nn-api/build.gradle | 27 + .../java/net/brutex/ai/dnn/api/Layer.java | 40 + .../brutex/ai/dnn/api/LayerConfiguration.java | 51 + .../net/brutex/ai/dnn/api/NeuralNetwork.java | 69 + .../dnn/api/NeuralNetworkConfiguration.java | 43 + cavis-dnn/cavis-dnn-nn/build.gradle | 5 +- .../java/net/brutex/ai/dnn/api/Animal.java | 68 + .../ai/dnn/api/IActivationFunction.java | 57 + .../java/net/brutex/ai/dnn/api/ILayer.java | 46 + .../ai/dnn/api/ILayerConfiguration.java | 27 + .../java/net/brutex/ai/dnn/api/IModel.java | 301 + .../dnn/api/INeuralNetworkConfiguration.java | 39 + .../java/net/brutex/ai/dnn/api/IUnit.java | 47 + .../java/net/brutex/ai/dnn/api/LayerType.java | 52 + .../main/java/net/brutex/ai/dnn/api/NN.java | 43 + .../ai/dnn/conf/layer/Layer_Descriptions.md | 31 + .../dnn/networks/ArtificialNeuralNetwork.java | 153 + .../EarlyStoppingConfiguration.java | 6 +- .../EarlyStoppingModelSaver.java | 4 +- .../earlystopping/EarlyStoppingResult.java | 4 +- .../listener/EarlyStoppingListener.java | 4 +- .../saver/InMemoryModelSaver.java | 4 +- .../scorecalc/AutoencoderScoreCalculator.java | 12 +- .../ClassificationScoreCalculator.java | 4 +- .../scorecalc/DataSetLossCalculator.java | 10 +- .../scorecalc/ROCScoreCalculator.java | 4 +- .../scorecalc/RegressionScoreCalculator.java | 4 +- .../scorecalc/ScoreCalculator.java | 4 +- .../VAEReconErrorScoreCalculator.java | 12 +- .../VAEReconProbScoreCalculator.java | 12 +- .../base/BaseIEvaluationScoreCalculator.java | 4 +- .../scorecalc/base/BaseScoreCalculator.java | 4 +- .../trainer/BaseEarlyStoppingTrainer.java | 18 +- .../trainer/EarlyStoppingTrainer.java | 5 +- .../trainer/IEarlyStoppingTrainer.java | 4 +- .../gradientcheck/GradientCheckUtil.java | 80 +- .../nn/adapters/YoloModelAdapter.java | 4 +- .../nn/api/AbstractParamInitializer.java | 39 + .../org/deeplearning4j/nn/api/Classifier.java | 3 +- .../{Trainable.java => ITrainableLayer.java} | 38 +- ...va => ITraininableLayerConfiguration.java} | 7 +- .../java/org/deeplearning4j/nn/api/Layer.java | 391 +- .../java/org/deeplearning4j/nn/api/Model.java | 237 - .../deeplearning4j/nn/api/ModelAdapter.java | 5 +- .../deeplearning4j/nn/api/NeuralNetwork.java | 104 - .../nn/api/ParamInitializer.java | 34 +- .../org/deeplearning4j/nn/api/Updater.java | 6 +- .../nn/api/layers/LayerConstraint.java | 2 +- .../nn/api/layers/RecurrentLayer.java | 8 +- .../conf/ComputationGraphConfiguration.java | 97 +- .../nn/conf/MultiLayerConfiguration.java | 769 -- .../NeuralNetBaseBuilderConfiguration.java | 950 ++ .../nn/conf/NeuralNetConfiguration.java | 2213 +++-- .../nn/conf/constraint/BaseConstraint.java | 4 +- .../nn/conf/constraint/MaxNormConstraint.java | 4 +- .../conf/constraint/MinMaxNormConstraint.java | 6 +- .../conf/constraint/UnitNormConstraint.java | 4 +- .../nn/conf/graph/LayerVertex.java | 45 +- .../nn/conf/layers/ActivationLayer.java | 23 +- .../nn/conf/layers/AutoEncoder.java | 12 +- ...Layer.java => BaseLayerConfiguration.java} | 73 +- .../nn/conf/layers/BaseUpsamplingLayer.java | 4 +- .../nn/conf/layers/BatchNormalization.java | 13 +- .../nn/conf/layers/CapsuleLayer.java | 6 +- .../nn/conf/layers/CapsuleStrengthLayer.java | 2 +- .../nn/conf/layers/CenterLossOutputLayer.java | 10 +- .../nn/conf/layers/Cnn3DLossLayer.java | 12 +- .../nn/conf/layers/CnnLossLayer.java | 12 +- .../nn/conf/layers/Convolution1DLayer.java | 11 +- .../nn/conf/layers/Convolution3D.java | 10 +- .../nn/conf/layers/ConvolutionLayer.java | 17 +- .../nn/conf/layers/Deconvolution2D.java | 11 +- .../nn/conf/layers/Deconvolution3D.java | 12 +- .../nn/conf/layers/DenseLayer.java | 21 +- .../conf/layers/DepthwiseConvolution2D.java | 10 +- .../nn/conf/layers/DropoutLayer.java | 15 +- .../nn/conf/layers/EmbeddingLayer.java | 11 +- .../conf/layers/EmbeddingSequenceLayer.java | 14 +- .../nn/conf/layers/FeedForwardLayer.java | 8 +- .../nn/conf/layers/GlobalPoolingLayer.java | 12 +- .../conf/layers/GravesBidirectionalLSTM.java | 12 +- .../nn/conf/layers/GravesLSTM.java | 16 +- .../deeplearning4j/nn/conf/layers/LSTM.java | 11 +- .../{Layer.java => LayerConfiguration.java} | 114 +- .../nn/conf/layers/LayerValidation.java | 16 +- .../layers/LocalResponseNormalization.java | 29 +- .../nn/conf/layers/LocallyConnected1D.java | 7 +- .../nn/conf/layers/LocallyConnected2D.java | 7 +- .../nn/conf/layers/LossLayer.java | 10 +- .../nn/conf/layers/NoParamLayer.java | 26 +- .../nn/conf/layers/OutputLayer.java | 10 +- .../nn/conf/layers/PReLULayer.java | 13 +- .../nn/conf/layers/PrimaryCapsules.java | 4 +- .../conf/layers/RecurrentAttentionLayer.java | 4 +- .../nn/conf/layers/RnnLossLayer.java | 13 +- .../nn/conf/layers/RnnOutputLayer.java | 9 +- .../conf/layers/SeparableConvolution2D.java | 11 +- .../nn/conf/layers/SpaceToBatchLayer.java | 12 +- .../nn/conf/layers/SpaceToDepthLayer.java | 12 +- .../nn/conf/layers/Subsampling1DLayer.java | 10 +- .../nn/conf/layers/Subsampling3DLayer.java | 12 +- .../nn/conf/layers/SubsamplingLayer.java | 13 +- .../nn/conf/layers/Upsampling1D.java | 11 +- .../nn/conf/layers/Upsampling2D.java | 10 +- .../nn/conf/layers/Upsampling3D.java | 12 +- .../nn/conf/layers/ZeroPadding1DLayer.java | 12 +- .../nn/conf/layers/ZeroPadding3DLayer.java | 12 +- .../nn/conf/layers/ZeroPaddingLayer.java | 12 +- .../conf/layers/convolutional/Cropping1D.java | 14 +- .../conf/layers/convolutional/Cropping2D.java | 14 +- .../conf/layers/convolutional/Cropping3D.java | 14 +- .../misc/ElementWiseMultiplicationLayer.java | 14 +- .../nn/conf/layers/misc/FrozenLayer.java | 72 +- .../layers/misc/FrozenLayerWithBackprop.java | 36 +- .../nn/conf/layers/misc/RepeatVector.java | 11 +- .../layers/objdetect/Yolo2OutputLayer.java | 27 +- .../conf/layers/recurrent/Bidirectional.java | 44 +- .../conf/layers/recurrent/LastTimeStep.java | 13 +- .../nn/conf/layers/recurrent/SimpleRnn.java | 10 +- .../layers/recurrent/TimeDistributed.java | 16 +- .../samediff/AbstractSameDiffLayer.java | 23 +- .../layers/samediff/SameDiffLambdaLayer.java | 2 +- .../layers/samediff/SameDiffLambdaVertex.java | 2 +- .../conf/layers/samediff/SameDiffLayer.java | 10 +- .../layers/samediff/SameDiffOutputLayer.java | 9 +- .../conf/layers/samediff/SameDiffVertex.java | 23 +- .../nn/conf/layers/util/MaskLayer.java | 8 +- .../nn/conf/layers/util/MaskZeroLayer.java | 18 +- .../variational/VariationalAutoencoder.java | 11 +- .../conf/layers/wrapper/BaseWrapperLayer.java | 105 - .../BaseWrapperLayerConfiguration.java | 196 + .../nn/conf/memory/NetworkMemoryReport.java | 2 +- .../nn/conf/misc/DummyConfig.java | 4 +- .../nn/conf/ocnn/OCNNOutputLayer.java | 16 +- .../conf/serde/BaseNetConfigDeserializer.java | 71 +- ...utationGraphConfigurationDeserializer.java | 45 +- .../nn/conf/serde/JsonMappers.java | 6 +- ...> NeuralNetConfigurationDeserializer.java} | 52 +- .../conf/serde/legacy/LegacyJsonFormat.java | 2 +- .../nn/conf/weightnoise/DropConnect.java | 6 +- .../nn/conf/weightnoise/IWeightNoise.java | 2 +- .../nn/conf/weightnoise/WeightNoise.java | 6 +- .../nn/graph/ComputationGraph.java | 415 +- .../nn/graph/vertex/BaseGraphVertex.java | 24 +- .../nn/graph/vertex/BaseWrapperVertex.java | 14 +- .../nn/graph/vertex/GraphVertex.java | 10 +- .../nn/graph/vertex/impl/FrozenVertex.java | 30 +- .../nn/graph/vertex/impl/LayerVertex.java | 22 +- .../impl/rnn/DuplicateToTimeSeriesVertex.java | 4 +- .../vertex/impl/rnn/LastTimeStepVertex.java | 4 +- .../impl/rnn/ReverseTimeSeriesVertex.java | 4 +- .../nn/layers/AbstractLayer.java | 945 +- .../nn/layers/ActivationLayer.java | 30 +- .../deeplearning4j/nn/layers/BaseLayer.java | 975 +- .../nn/layers/BaseOutputLayer.java | 26 +- .../nn/layers/BasePretrainNetwork.java | 39 +- .../nn/layers/DropoutLayer.java | 10 +- .../deeplearning4j/nn/layers/FrozenLayer.java | 18 +- .../nn/layers/FrozenLayerWithBackprop.java | 14 +- .../deeplearning4j/nn/layers/LossLayer.java | 23 +- .../deeplearning4j/nn/layers/OutputLayer.java | 3 +- .../nn/layers/RepeatVector.java | 16 +- .../nn/layers/convolution/Cnn3DLossLayer.java | 62 +- .../nn/layers/convolution/CnnLossLayer.java | 30 +- .../convolution/Convolution1DLayer.java | 36 +- .../convolution/Convolution3DLayer.java | 18 +- .../layers/convolution/ConvolutionLayer.java | 90 +- .../layers/convolution/Cropping1DLayer.java | 12 +- .../layers/convolution/Cropping2DLayer.java | 11 +- .../layers/convolution/Cropping3DLayer.java | 7 +- .../convolution/Deconvolution2DLayer.java | 42 +- .../convolution/Deconvolution3DLayer.java | 54 +- .../DepthwiseConvolution2DLayer.java | 42 +- .../SeparableConvolution2DLayer.java | 46 +- .../nn/layers/convolution/SpaceToBatch.java | 24 +- .../nn/layers/convolution/SpaceToDepth.java | 18 +- .../convolution/ZeroPadding1DLayer.java | 7 +- .../convolution/ZeroPadding3DLayer.java | 7 +- .../layers/convolution/ZeroPaddingLayer.java | 16 +- .../subsampling/Subsampling1DLayer.java | 10 +- .../subsampling/Subsampling3DLayer.java | 38 +- .../subsampling/SubsamplingLayer.java | 80 +- .../convolution/upsampling/Upsampling1D.java | 8 +- .../convolution/upsampling/Upsampling2D.java | 16 +- .../convolution/upsampling/Upsampling3D.java | 16 +- .../nn/layers/feedforward/PReLU.java | 6 +- .../feedforward/autoencoder/AutoEncoder.java | 14 +- .../layers/feedforward/dense/DenseLayer.java | 12 +- .../ElementWiseMultiplicationLayer.java | 8 +- .../feedforward/embedding/EmbeddingLayer.java | 12 +- .../embedding/EmbeddingSequenceLayer.java | 34 +- .../nn/layers/mkldnn/MKLDNNLSTMHelper.java | 2 +- .../normalization/BatchNormalization.java | 87 +- .../LocalResponseNormalization.java | 43 +- .../nn/layers/objdetect/Yolo2OutputLayer.java | 46 +- .../nn/layers/ocnn/OCNNOutputLayer.java | 36 +- .../nn/layers/ocnn/OCNNParamInitializer.java | 35 +- .../nn/layers/pooling/GlobalPoolingLayer.java | 27 +- .../layers/recurrent/BaseRecurrentLayer.java | 8 +- .../layers/recurrent/BidirectionalLayer.java | 189 +- .../recurrent/GravesBidirectionalLSTM.java | 30 +- .../nn/layers/recurrent/GravesLSTM.java | 12 +- .../nn/layers/recurrent/LSTM.java | 17 +- .../nn/layers/recurrent/LSTMHelpers.java | 9 +- .../layers/recurrent/LastTimeStepLayer.java | 4 +- .../nn/layers/recurrent/RnnLossLayer.java | 30 +- .../nn/layers/recurrent/RnnOutputLayer.java | 24 +- .../nn/layers/recurrent/SimpleRnn.java | 14 +- .../layers/samediff/SameDiffGraphVertex.java | 8 +- .../nn/layers/samediff/SameDiffLayer.java | 30 +- .../layers/samediff/SameDiffOutputLayer.java | 32 +- .../training/CenterLossOutputLayer.java | 38 +- .../nn/layers/util/MaskLayer.java | 3 +- .../variational/VariationalAutoencoder.java | 355 +- .../nn/layers/wrapper/BaseWrapperLayer.java | 576 +- .../nn/multilayer/MultiLayerNetwork.java | 8482 +++++++++-------- .../BatchNormalizationParamInitializer.java | 50 +- .../params/BidirectionalParamInitializer.java | 48 +- .../nn/params/CenterLossParamInitializer.java | 18 +- .../params/Convolution3DParamInitializer.java | 25 +- .../params/ConvolutionParamInitializer.java | 52 +- .../Deconvolution3DParamInitializer.java | 26 +- .../params/DeconvolutionParamInitializer.java | 12 +- .../nn/params/DefaultParamInitializer.java | 74 +- .../DepthwiseConvolutionParamInitializer.java | 44 +- .../params/ElementWiseParamInitializer.java | 16 +- .../nn/params/EmptyParamInitializer.java | 26 +- .../params/FrozenLayerParamInitializer.java | 120 +- ...ozenLayerWithBackpropParamInitializer.java | 42 +- ...avesBidirectionalLSTMParamInitializer.java | 40 +- .../nn/params/GravesLSTMParamInitializer.java | 36 +- .../nn/params/LSTMParamInitializer.java | 43 +- .../nn/params/PReLUParamInitializer.java | 41 +- .../nn/params/PretrainParamInitializer.java | 17 +- .../nn/params/SameDiffParamInitializer.java | 30 +- .../SeparableConvolutionParamInitializer.java | 50 +- .../nn/params/SimpleRnnParamInitializer.java | 39 +- ...ariationalAutoencoderParamInitializer.java | 27 +- .../params/WrapperLayerParamInitializer.java | 64 +- .../FineTuneConfiguration.java | 1479 +-- .../nn/transferlearning/TransferLearning.java | 181 +- .../TransferLearningHelper.java | 27 +- .../nn/updater/BaseMultiLayerUpdater.java | 57 +- .../nn/updater/LayerUpdater.java | 12 +- .../nn/updater/MultiLayerUpdater.java | 10 +- .../nn/updater/UpdaterBlock.java | 21 +- .../nn/updater/UpdaterCreator.java | 13 +- .../nn/updater/UpdaterUtils.java | 10 +- .../graph/ComputationGraphUpdater.java | 21 +- .../org/deeplearning4j/optimize/Solver.java | 8 +- .../optimize/api/BaseTrainingListener.java | 16 +- .../optimize/api/ConvexOptimizer.java | 4 +- .../optimize/api/IterationListener.java | 4 +- .../optimize/api/TrainingListener.java | 28 +- .../listeners/CheckpointListener.java | 28 +- .../CollectScoresIterationListener.java | 6 +- .../listeners/CollectScoresListener.java | 6 +- .../ComposableIterationListener.java | 4 +- .../listeners/EvaluativeListener.java | 12 +- .../listeners/FailureTestingListener.java | 34 +- .../listeners/PerformanceListener.java | 8 +- .../listeners/ScoreIterationListener.java | 8 +- .../listeners/ScoreToChartListener.java | 6 +- .../listeners/SleepyTrainingListener.java | 16 +- .../listeners/TimeIterationListener.java | 6 +- .../callbacks/EvaluationCallback.java | 4 +- .../callbacks/ModelSavingCallback.java | 6 +- .../optimize/solvers/BackTrackLineSearch.java | 15 +- .../optimize/solvers/BaseOptimizer.java | 39 +- .../optimize/solvers/ConjugateGradient.java | 4 +- .../optimize/solvers/LBFGS.java | 6 +- .../optimize/solvers/LineGradientDescent.java | 4 +- .../solvers/StochasticGradientDescent.java | 10 +- .../EncodedGradientsAccumulator.java | 6 +- .../util/Convolution1DUtils.java | 7 +- .../deeplearning4j/util/ConvolutionUtils.java | 18 +- .../util/CrashReportingUtil.java | 62 +- .../util/DL4JModelValidator.java | 12 +- .../deeplearning4j/util/ModelSerializer.java | 36 +- .../org/deeplearning4j/util/NetworkUtils.java | 67 +- .../deeplearning4j/util/OutputLayerUtil.java | 17 +- .../deeplearning4j/util/TimeSeriesUtils.java | 6 +- .../main/resources/simplelogger.properties | 25 + .../java/net/brutex/ai/dnn/api/dnnTest.java | 142 + .../brutex/ai/dnn/conf/layer/FFLayerTest.java | 36 + .../nn/layers/HelperUtilsTest.java | 2 +- .../ParameterServerTrainer.java | 14 +- .../ParameterServerTrainerContext.java | 10 +- .../ParameterServerParallelWrapperTest.java | 8 +- .../EarlyStoppingParallelTrainer.java | 18 +- .../parallelism/InplaceParallelInference.java | 31 +- .../parallelism/ParallelInference.java | 36 +- .../parallelism/ParallelWrapper.java | 21 +- .../factory/DefaultTrainerContext.java | 10 +- .../factory/SymmetricTrainerContext.java | 13 +- .../parallelism/factory/TrainerContext.java | 10 +- .../parallelism/main/ParallelWrapperMain.java | 4 +- .../parallelism/trainer/DefaultTrainer.java | 36 +- .../parallelism/trainer/SymmetricTrainer.java | 4 +- .../parallelism/trainer/Trainer.java | 10 +- .../InplaceParallelInferenceTest.java | 14 +- .../parallelism/ParallelInferenceTest.java | 29 +- .../parallelism/ParallelWrapperTest.java | 11 +- .../parallelism/TestListeners.java | 35 +- .../TestParallelEarlyStopping.java | 11 +- .../TestParallelEarlyStoppingUI.java | 5 +- .../factory/DefaultTrainerContextTest.java | 11 +- .../factory/SymmetricTrainerContextTest.java | 8 +- .../main/ParallelWrapperMainTest.java | 8 +- .../spark/api/TrainingHook.java | 10 +- .../spark/api/worker/NetBroadcastTuple.java | 10 +- .../BaseSparkEarlyStoppingTrainer.java | 4 +- ...eVaeReconstructionProbWithKeyFunction.java | 2 +- .../score/BaseVaeScoreWithKeyFunction.java | 2 +- .../impl/evaluation/EvaluationRunner.java | 10 +- .../impl/graph/SparkComputationGraph.java | 20 +- ...VaeReconstructionErrorWithKeyFunction.java | 4 +- ...GVaeReconstructionProbWithKeyFunction.java | 4 +- .../impl/multilayer/SparkDl4jMultiLayer.java | 28 +- .../scoring/FeedForwardWithKeyFunction.java | 6 +- .../scoring/ScoreExamplesFunction.java | 5 +- .../scoring/ScoreExamplesWithKeyFunction.java | 6 +- .../scoring/ScoreFlatMapFunction.java | 4 +- ...VaeReconstructionErrorWithKeyFunction.java | 11 +- .../VaeReconstructionProbWithKeyFunction.java | 8 +- .../ParameterAveragingTrainingMaster.java | 14 +- .../ParameterAveragingTrainingWorker.java | 12 +- .../deeplearning4j/spark/BaseSparkTest.java | 7 +- .../spark/TestEarlyStoppingSpark.java | 21 +- .../TestEarlyStoppingSparkCompGraph.java | 20 +- .../org/deeplearning4j/spark/TestKryo.java | 13 +- .../spark/datavec/TestPreProcessedData.java | 7 +- .../spark/impl/TestKryoWarning.java | 5 +- .../impl/customlayer/TestCustomLayer.java | 5 +- .../impl/customlayer/layer/CustomLayer.java | 10 +- .../customlayer/layer/CustomLayerImpl.java | 3 +- .../impl/graph/TestSparkComputationGraph.java | 17 +- .../spark/impl/misc/TestFrozenLayers.java | 31 +- .../impl/multilayer/TestMiscFunctions.java | 15 +- .../multilayer/TestSparkDl4jMultiLayer.java | 4 +- ...arameterAveragingSparkVsSingleMachine.java | 72 +- ...TestSparkMultiLayerParameterAveraging.java | 101 +- .../stats/TestTrainingStatsCollection.java | 4 +- .../spark/ui/TestListeners.java | 3 +- .../ParameterServerTrainingHook.java | 10 +- .../pw/SharedTrainingWrapper.java | 24 +- .../training/SharedTrainingMaster.java | 10 +- .../training/SharedTrainingWorker.java | 4 +- .../spark/parameterserver/BaseSparkTest.java | 7 +- .../train/GradientSharingTrainingTest.java | 30 +- .../deeplearning4j/plot/BarnesHutTsne.java | 227 +- .../cpu/nativecpu/ops/CpuOpContext.java | 3 +- .../nd4j/jita/workspace/CudaWorkspace.java | 768 +- .../workspace/CudaWorkspaceDeallocator.java | 2 +- .../ops/executioner/CudaExecutioner.java | 2 +- .../ops/executioner/CudaOpContext.java | 3 +- .../ConvolutionalIterationListener.java | 14 +- .../org/deeplearning4j/ui/ManualTests.java | 29 +- .../ui/weights/TestConvolutionalListener.java | 11 +- .../ui/model/stats/BaseStatsListener.java | 71 +- .../ui/model/stats/impl/SbeStatsReport.java | 4 +- .../ui/stats/TestStatsListener.java | 11 +- .../ui/stats/TestTransferStatsCollection.java | 5 +- .../ui/module/train/TrainModule.java | 33 +- .../ui/module/train/TrainModuleUtils.java | 42 +- .../main/resources/templates/SameDiffUI.html | 2 +- .../templates/TrainingModel.html.ftl | 6 +- .../deeplearning4j/ui/TestRemoteReceiver.java | 7 +- .../org/deeplearning4j/ui/TestVertxUI.java | 24 +- .../deeplearning4j/ui/TestVertxUIManual.java | 13 +- .../ui/TestVertxUIMultiSession.java | 13 +- .../deeplearning4j/zoo/InstantiableModel.java | 6 +- .../java/org/deeplearning4j/zoo/ZooModel.java | 6 +- .../org/deeplearning4j/zoo/model/AlexNet.java | 21 +- .../deeplearning4j/zoo/model/Darknet19.java | 14 +- .../zoo/model/FaceNetNN4Small2.java | 9 +- .../zoo/model/InceptionResNetV1.java | 12 +- .../org/deeplearning4j/zoo/model/LeNet.java | 15 +- .../org/deeplearning4j/zoo/model/NASNet.java | 8 +- .../deeplearning4j/zoo/model/ResNet50.java | 11 +- .../deeplearning4j/zoo/model/SimpleCNN.java | 17 +- .../deeplearning4j/zoo/model/SqueezeNet.java | 12 +- .../zoo/model/TextGenerationLSTM.java | 15 +- .../deeplearning4j/zoo/model/TinyYOLO.java | 9 +- .../org/deeplearning4j/zoo/model/UNet.java | 9 +- .../org/deeplearning4j/zoo/model/VGG16.java | 8 +- .../org/deeplearning4j/zoo/model/VGG19.java | 10 +- .../deeplearning4j/zoo/model/Xception.java | 11 +- .../org/deeplearning4j/zoo/model/YOLO2.java | 9 +- .../org/deeplearning4j/zoo/TestImageNet.java | 6 +- .../deeplearning4j/zoo/TestInstantiation.java | 9 +- .../org/deeplearning4j/zoo/TestUtils.java | 8 +- settings.gradle | 2 + 672 files changed, 20808 insertions(+), 17478 deletions(-) create mode 100644 cavis-dnn/cavis-dnn-core/src/main/java/net/brutex/ai/dnn/core/util/ANSI.java rename cavis-dnn/cavis-dnn-core/src/test/java/org/deeplearning4j/nn/layers/{BaseLayerTest.java => BaseLayerConfigurationTest.java} (82%) create mode 100644 cavis-dnn/cavis-dnn-nn-api/build.gradle create mode 100644 cavis-dnn/cavis-dnn-nn-api/src/main/java/net/brutex/ai/dnn/api/Layer.java create mode 100644 cavis-dnn/cavis-dnn-nn-api/src/main/java/net/brutex/ai/dnn/api/LayerConfiguration.java create mode 100644 cavis-dnn/cavis-dnn-nn-api/src/main/java/net/brutex/ai/dnn/api/NeuralNetwork.java create mode 100644 cavis-dnn/cavis-dnn-nn-api/src/main/java/net/brutex/ai/dnn/api/NeuralNetworkConfiguration.java create mode 100644 cavis-dnn/cavis-dnn-nn/src/main/java/net/brutex/ai/dnn/api/Animal.java create mode 100644 cavis-dnn/cavis-dnn-nn/src/main/java/net/brutex/ai/dnn/api/IActivationFunction.java create mode 100644 cavis-dnn/cavis-dnn-nn/src/main/java/net/brutex/ai/dnn/api/ILayer.java create mode 100644 cavis-dnn/cavis-dnn-nn/src/main/java/net/brutex/ai/dnn/api/ILayerConfiguration.java create mode 100644 cavis-dnn/cavis-dnn-nn/src/main/java/net/brutex/ai/dnn/api/IModel.java create mode 100644 cavis-dnn/cavis-dnn-nn/src/main/java/net/brutex/ai/dnn/api/INeuralNetworkConfiguration.java create mode 100644 cavis-dnn/cavis-dnn-nn/src/main/java/net/brutex/ai/dnn/api/IUnit.java create mode 100644 cavis-dnn/cavis-dnn-nn/src/main/java/net/brutex/ai/dnn/api/LayerType.java create mode 100644 cavis-dnn/cavis-dnn-nn/src/main/java/net/brutex/ai/dnn/api/NN.java create mode 100644 cavis-dnn/cavis-dnn-nn/src/main/java/net/brutex/ai/dnn/conf/layer/Layer_Descriptions.md create mode 100644 cavis-dnn/cavis-dnn-nn/src/main/java/net/brutex/ai/dnn/networks/ArtificialNeuralNetwork.java create mode 100644 cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/nn/api/AbstractParamInitializer.java rename cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/nn/api/{Trainable.java => ITrainableLayer.java} (70%) rename cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/nn/api/{TrainingConfig.java => ITraininableLayerConfiguration.java} (91%) delete mode 100644 cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/nn/api/Model.java delete mode 100644 cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/nn/api/NeuralNetwork.java delete mode 100644 cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/nn/conf/MultiLayerConfiguration.java create mode 100644 cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/nn/conf/NeuralNetBaseBuilderConfiguration.java rename cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/nn/conf/layers/{BaseLayer.java => BaseLayerConfiguration.java} (89%) rename cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/nn/conf/layers/{Layer.java => LayerConfiguration.java} (79%) delete mode 100644 cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/nn/conf/layers/wrapper/BaseWrapperLayer.java create mode 100644 cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/nn/conf/layers/wrapper/BaseWrapperLayerConfiguration.java rename cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/nn/conf/serde/{MultiLayerConfigurationDeserializer.java => NeuralNetConfigurationDeserializer.java} (79%) create mode 100644 cavis-dnn/cavis-dnn-nn/src/main/resources/simplelogger.properties create mode 100644 cavis-dnn/cavis-dnn-nn/src/test/java/net/brutex/ai/dnn/api/dnnTest.java create mode 100644 cavis-dnn/cavis-dnn-nn/src/test/java/net/brutex/ai/dnn/conf/layer/FFLayerTest.java diff --git a/.old/deeplearning4j/deeplearning4j-dataimport-solrj/src/test/java/org/deeplearning4j/nn/dataimport/solr/client/solrj/io/stream/TupleStreamDataSetIteratorTest.java b/.old/deeplearning4j/deeplearning4j-dataimport-solrj/src/test/java/org/deeplearning4j/nn/dataimport/solr/client/solrj/io/stream/TupleStreamDataSetIteratorTest.java index 5c21d354a..67ad09bd1 100644 --- a/.old/deeplearning4j/deeplearning4j-dataimport-solrj/src/test/java/org/deeplearning4j/nn/dataimport/solr/client/solrj/io/stream/TupleStreamDataSetIteratorTest.java +++ b/.old/deeplearning4j/deeplearning4j-dataimport-solrj/src/test/java/org/deeplearning4j/nn/dataimport/solr/client/solrj/io/stream/TupleStreamDataSetIteratorTest.java @@ -205,7 +205,7 @@ public class TupleStreamDataSetIteratorTest extends SolrCloudTestCase { public void modelFitTest() throws Exception { final MultiLayerNetwork model = new MultiLayerNetwork( - new NeuralNetConfiguration.Builder() + NeuralNetConfiguration.builder() .list( new OutputLayer.Builder(LossFunction.MSE) .nIn(3) diff --git a/.old/deeplearning4j/deeplearning4j-modelexport-solr/src/test/java/org/deeplearning4j/nn/modelexport/solr/handler/ModelTupleStreamIntegrationTest.java b/.old/deeplearning4j/deeplearning4j-modelexport-solr/src/test/java/org/deeplearning4j/nn/modelexport/solr/handler/ModelTupleStreamIntegrationTest.java index 7c0505605..c2c260fdd 100644 --- a/.old/deeplearning4j/deeplearning4j-modelexport-solr/src/test/java/org/deeplearning4j/nn/modelexport/solr/handler/ModelTupleStreamIntegrationTest.java +++ b/.old/deeplearning4j/deeplearning4j-modelexport-solr/src/test/java/org/deeplearning4j/nn/modelexport/solr/handler/ModelTupleStreamIntegrationTest.java @@ -35,7 +35,7 @@ import org.apache.solr.client.solrj.request.UpdateRequest; import org.apache.solr.cloud.SolrCloudTestCase; import org.apache.solr.common.params.ModifiableSolrParams; import org.deeplearning4j.nn.api.Model; -import org.deeplearning4j.nn.conf.MultiLayerConfiguration; +import org.deeplearning4j.nn.conf.NeuralNetConfiguration; import org.deeplearning4j.nn.conf.NeuralNetConfiguration; import org.deeplearning4j.nn.conf.layers.OutputLayer; import org.deeplearning4j.nn.multilayer.MultiLayerNetwork; @@ -153,7 +153,7 @@ public class ModelTupleStreamIntegrationTest extends SolrCloudTestCase { final int numInputs = 3; final int numOutputs = 2; - final MultiLayerConfiguration conf = new NeuralNetConfiguration.Builder() + final NeuralNetConfiguration conf = NeuralNetConfiguration.builder() .list( new OutputLayer.Builder() .nIn(numInputs) diff --git a/.old/deeplearning4j/deeplearning4j-modelexport-solr/src/test/java/org/deeplearning4j/nn/modelexport/solr/handler/ModelTupleStreamTest.java b/.old/deeplearning4j/deeplearning4j-modelexport-solr/src/test/java/org/deeplearning4j/nn/modelexport/solr/handler/ModelTupleStreamTest.java index a638fa14a..c6a05607b 100644 --- a/.old/deeplearning4j/deeplearning4j-modelexport-solr/src/test/java/org/deeplearning4j/nn/modelexport/solr/handler/ModelTupleStreamTest.java +++ b/.old/deeplearning4j/deeplearning4j-modelexport-solr/src/test/java/org/deeplearning4j/nn/modelexport/solr/handler/ModelTupleStreamTest.java @@ -43,7 +43,7 @@ import org.apache.solr.core.SolrResourceLoader; import org.apache.solr.handler.SolrDefaultStreamFactory; import org.deeplearning4j.nn.api.Model; import org.deeplearning4j.nn.conf.ComputationGraphConfiguration; -import org.deeplearning4j.nn.conf.MultiLayerConfiguration; +import org.deeplearning4j.nn.conf.NeuralNetConfiguration; import org.deeplearning4j.nn.conf.NeuralNetConfiguration; import org.deeplearning4j.nn.conf.layers.OutputLayer; import org.deeplearning4j.nn.graph.ComputationGraph; @@ -242,7 +242,7 @@ public class ModelTupleStreamTest { protected Model buildMultiLayerNetworkModel(int numInputs, int numOutputs) throws Exception { - final MultiLayerConfiguration conf = new NeuralNetConfiguration.Builder() + final NeuralNetConfiguration conf = NeuralNetConfiguration.builder() .list( new OutputLayer.Builder() .nIn(numInputs) @@ -274,7 +274,7 @@ public class ModelTupleStreamTest { protected Model buildComputationGraphModel(int numInputs, int numOutputs) throws Exception { - final ComputationGraphConfiguration conf = new NeuralNetConfiguration.Builder() + final ComputationGraphConfiguration conf = NeuralNetConfiguration.builder() .graphBuilder() .addInputs("inputLayer") .addLayer("outputLayer", diff --git a/.old/deeplearning4j/deeplearning4j-modelexport-solr/src/test/java/org/deeplearning4j/nn/modelexport/solr/ltr/model/ScoringModelTest.java b/.old/deeplearning4j/deeplearning4j-modelexport-solr/src/test/java/org/deeplearning4j/nn/modelexport/solr/ltr/model/ScoringModelTest.java index 7f77c6c0c..1986511bb 100644 --- a/.old/deeplearning4j/deeplearning4j-modelexport-solr/src/test/java/org/deeplearning4j/nn/modelexport/solr/ltr/model/ScoringModelTest.java +++ b/.old/deeplearning4j/deeplearning4j-modelexport-solr/src/test/java/org/deeplearning4j/nn/modelexport/solr/ltr/model/ScoringModelTest.java @@ -42,7 +42,7 @@ import org.apache.solr.ltr.norm.Normalizer; import org.apache.solr.request.SolrQueryRequest; import org.deeplearning4j.nn.api.Model; import org.deeplearning4j.nn.conf.ComputationGraphConfiguration; -import org.deeplearning4j.nn.conf.MultiLayerConfiguration; +import org.deeplearning4j.nn.conf.NeuralNetConfiguration; import org.deeplearning4j.nn.conf.NeuralNetConfiguration; import org.deeplearning4j.nn.conf.layers.OutputLayer; import org.deeplearning4j.nn.graph.ComputationGraph; @@ -192,7 +192,7 @@ public class ScoringModelTest { protected Model buildMultiLayerNetworkModel(int numFeatures) throws Exception { - final MultiLayerConfiguration conf = new NeuralNetConfiguration.Builder() + final NeuralNetConfiguration conf = NeuralNetConfiguration.builder() .list( new OutputLayer.Builder().nIn(numFeatures).nOut(1).lossFunction(LossFunctions.LossFunction.MSE).activation(Activation.IDENTITY).build() ) @@ -217,7 +217,7 @@ public class ScoringModelTest { protected Model buildComputationGraphModel(int numFeatures) throws Exception { - final ComputationGraphConfiguration conf = new NeuralNetConfiguration.Builder() + final ComputationGraphConfiguration conf = NeuralNetConfiguration.builder() .graphBuilder() .addInputs("inputLayer") .addLayer("outputLayer", diff --git a/.old/deeplearning4j/deeplearning4j-remote/deeplearning4j-json-server/src/test/java/org/deeplearning4j/remote/JsonModelServerTest.java b/.old/deeplearning4j/deeplearning4j-remote/deeplearning4j-json-server/src/test/java/org/deeplearning4j/remote/JsonModelServerTest.java index 1de161c2e..dd75472c6 100644 --- a/.old/deeplearning4j/deeplearning4j-remote/deeplearning4j-json-server/src/test/java/org/deeplearning4j/remote/JsonModelServerTest.java +++ b/.old/deeplearning4j/deeplearning4j-remote/deeplearning4j-json-server/src/test/java/org/deeplearning4j/remote/JsonModelServerTest.java @@ -23,7 +23,7 @@ import lombok.extern.slf4j.Slf4j; import lombok.val; import org.deeplearning4j.BaseDL4JTest; import org.deeplearning4j.nn.conf.ComputationGraphConfiguration; -import org.deeplearning4j.nn.conf.MultiLayerConfiguration; +import org.deeplearning4j.nn.conf.NeuralNetConfiguration; import org.deeplearning4j.nn.conf.NeuralNetConfiguration; import org.deeplearning4j.nn.conf.graph.MergeVertex; import org.deeplearning4j.nn.conf.layers.*; @@ -70,7 +70,7 @@ public class JsonModelServerTest extends BaseDL4JTest { private static final MultiLayerNetwork model; static { - val conf = new NeuralNetConfiguration.Builder() + val conf = NeuralNetConfiguration.builder() .seed(119) .updater(new Adam(0.119f)) .weightInit(WeightInit.XAVIER) @@ -541,7 +541,7 @@ public class JsonModelServerTest extends BaseDL4JTest { @Test public void testMlnMnist() throws Exception { - MultiLayerConfiguration conf = new NeuralNetConfiguration.Builder() + NeuralNetConfiguration conf = NeuralNetConfiguration.builder() .list() .layer(new DenseLayer.Builder().nIn(784).nOut(10).build()) .layer(new LossLayer.Builder().activation(Activation.SOFTMAX).build()) @@ -597,7 +597,7 @@ public class JsonModelServerTest extends BaseDL4JTest { @Test public void testCompGraph() throws Exception { - ComputationGraphConfiguration conf = new NeuralNetConfiguration.Builder() + ComputationGraphConfiguration conf = NeuralNetConfiguration.builder() .graphBuilder() .addInputs("input1", "input2") .addLayer("L1", new DenseLayer.Builder().nIn(3).nOut(4).build(), "input1") @@ -652,7 +652,7 @@ public class JsonModelServerTest extends BaseDL4JTest { @Test public void testCompGraph_1() throws Exception { - ComputationGraphConfiguration conf = new NeuralNetConfiguration.Builder() + ComputationGraphConfiguration conf = NeuralNetConfiguration.builder() .updater(new Sgd(0.01)) .graphBuilder() .addInputs("input") diff --git a/.old/deeplearning4j/deeplearning4j-scaleout/spark/dl4j-spark-parameterserver/src/main/java/org/deeplearning4j/spark/parameterserver/pw/SharedTrainingWrapper.java b/.old/deeplearning4j/deeplearning4j-scaleout/spark/dl4j-spark-parameterserver/src/main/java/org/deeplearning4j/spark/parameterserver/pw/SharedTrainingWrapper.java index f3f2cee80..23bc7566d 100644 --- a/.old/deeplearning4j/deeplearning4j-scaleout/spark/dl4j-spark-parameterserver/src/main/java/org/deeplearning4j/spark/parameterserver/pw/SharedTrainingWrapper.java +++ b/.old/deeplearning4j/deeplearning4j-scaleout/spark/dl4j-spark-parameterserver/src/main/java/org/deeplearning4j/spark/parameterserver/pw/SharedTrainingWrapper.java @@ -425,7 +425,7 @@ public class SharedTrainingWrapper { .setTrainingWorkspaceMode(trainingConfiguration.getWorkspaceMode()); ((ComputationGraph) originalModel).setGradientsAccumulator(accumulator); } else if (model instanceof MultiLayerNetwork) { - ((MultiLayerNetwork) originalModel).getLayerWiseConfigurations() + ((MultiLayerNetwork) originalModel).getConfiguration() .setTrainingWorkspaceMode(trainingConfiguration.getWorkspaceMode()); ((MultiLayerNetwork) originalModel).setGradientsAccumulator(accumulator); } diff --git a/.old/deeplearning4j/deeplearning4j-scaleout/spark/dl4j-spark-parameterserver/src/main/java/org/deeplearning4j/spark/parameterserver/training/SharedTrainingMaster.java b/.old/deeplearning4j/deeplearning4j-scaleout/spark/dl4j-spark-parameterserver/src/main/java/org/deeplearning4j/spark/parameterserver/training/SharedTrainingMaster.java index 2a17ab3e1..d02cb4234 100644 --- a/.old/deeplearning4j/deeplearning4j-scaleout/spark/dl4j-spark-parameterserver/src/main/java/org/deeplearning4j/spark/parameterserver/training/SharedTrainingMaster.java +++ b/.old/deeplearning4j/deeplearning4j-scaleout/spark/dl4j-spark-parameterserver/src/main/java/org/deeplearning4j/spark/parameterserver/training/SharedTrainingMaster.java @@ -261,7 +261,7 @@ public class SharedTrainingMaster extends BaseTrainingMaster extends BaseVa /** * @param params MultiLayerNetwork parameters - * @param jsonConfig MultiLayerConfiguration, as json + * @param jsonConfig NeuralNetConfiguration, as json * @param useLogProbability If true: use log probability. False: use raw probability. * @param batchSize Batch size to use when scoring * @param numSamples Number of samples to use when calling {@link VariationalAutoencoder#reconstructionLogProbability(INDArray, int)} diff --git a/.old/deeplearning4j/deeplearning4j-scaleout/spark/dl4j-spark/src/main/java/org/deeplearning4j/spark/impl/common/score/BaseVaeScoreWithKeyFunction.java b/.old/deeplearning4j/deeplearning4j-scaleout/spark/dl4j-spark/src/main/java/org/deeplearning4j/spark/impl/common/score/BaseVaeScoreWithKeyFunction.java index 4140b8a53..cfcc93b78 100644 --- a/.old/deeplearning4j/deeplearning4j-scaleout/spark/dl4j-spark/src/main/java/org/deeplearning4j/spark/impl/common/score/BaseVaeScoreWithKeyFunction.java +++ b/.old/deeplearning4j/deeplearning4j-scaleout/spark/dl4j-spark/src/main/java/org/deeplearning4j/spark/impl/common/score/BaseVaeScoreWithKeyFunction.java @@ -45,7 +45,7 @@ public abstract class BaseVaeScoreWithKeyFunction implements PairFlatMapFunct /** * @param params MultiLayerNetwork parameters - * @param jsonConfig MultiLayerConfiguration, as json + * @param jsonConfig NeuralNetConfiguration, as json * @param batchSize Batch size to use when scoring */ public BaseVaeScoreWithKeyFunction(Broadcast params, Broadcast jsonConfig, int batchSize) { diff --git a/.old/deeplearning4j/deeplearning4j-scaleout/spark/dl4j-spark/src/main/java/org/deeplearning4j/spark/impl/evaluation/EvaluationRunner.java b/.old/deeplearning4j/deeplearning4j-scaleout/spark/dl4j-spark/src/main/java/org/deeplearning4j/spark/impl/evaluation/EvaluationRunner.java index 8550c6e3c..426682d69 100644 --- a/.old/deeplearning4j/deeplearning4j-scaleout/spark/dl4j-spark/src/main/java/org/deeplearning4j/spark/impl/evaluation/EvaluationRunner.java +++ b/.old/deeplearning4j/deeplearning4j-scaleout/spark/dl4j-spark/src/main/java/org/deeplearning4j/spark/impl/evaluation/EvaluationRunner.java @@ -27,7 +27,7 @@ import org.deeplearning4j.datasets.iterator.IteratorDataSetIterator; import org.deeplearning4j.datasets.iterator.IteratorMultiDataSetIterator; import org.deeplearning4j.nn.api.Model; import org.deeplearning4j.nn.conf.ComputationGraphConfiguration; -import org.deeplearning4j.nn.conf.MultiLayerConfiguration; +import org.deeplearning4j.nn.conf.NeuralNetConfiguration; import org.deeplearning4j.nn.graph.ComputationGraph; import org.deeplearning4j.nn.multilayer.MultiLayerNetwork; import org.nd4j.common.base.Preconditions; @@ -131,7 +131,7 @@ public class EvaluationRunner { cg.init(deviceLocalParams.get(), false); m = cg; } else { - MultiLayerConfiguration conf = MultiLayerConfiguration.fromJson(json.getValue()); + NeuralNetConfiguration conf = NeuralNetConfiguration.fromJson(json.getValue()); MultiLayerNetwork net = new MultiLayerNetwork(conf); net.init(deviceLocalParams.get(), false); m = net; diff --git a/.old/deeplearning4j/deeplearning4j-scaleout/spark/dl4j-spark/src/main/java/org/deeplearning4j/spark/impl/graph/scoring/CGVaeReconstructionErrorWithKeyFunction.java b/.old/deeplearning4j/deeplearning4j-scaleout/spark/dl4j-spark/src/main/java/org/deeplearning4j/spark/impl/graph/scoring/CGVaeReconstructionErrorWithKeyFunction.java index d8aadc3f1..e13b5f9b6 100644 --- a/.old/deeplearning4j/deeplearning4j-scaleout/spark/dl4j-spark/src/main/java/org/deeplearning4j/spark/impl/graph/scoring/CGVaeReconstructionErrorWithKeyFunction.java +++ b/.old/deeplearning4j/deeplearning4j-scaleout/spark/dl4j-spark/src/main/java/org/deeplearning4j/spark/impl/graph/scoring/CGVaeReconstructionErrorWithKeyFunction.java @@ -33,7 +33,7 @@ public class CGVaeReconstructionErrorWithKeyFunction extends BaseVaeScoreWith /** * @param params MultiLayerNetwork parameters - * @param jsonConfig MultiLayerConfiguration, as json + * @param jsonConfig NeuralNetConfiguration, as json * @param batchSize Batch size to use when scoring */ public CGVaeReconstructionErrorWithKeyFunction(Broadcast params, Broadcast jsonConfig, diff --git a/.old/deeplearning4j/deeplearning4j-scaleout/spark/dl4j-spark/src/main/java/org/deeplearning4j/spark/impl/graph/scoring/CGVaeReconstructionProbWithKeyFunction.java b/.old/deeplearning4j/deeplearning4j-scaleout/spark/dl4j-spark/src/main/java/org/deeplearning4j/spark/impl/graph/scoring/CGVaeReconstructionProbWithKeyFunction.java index 57c568239..e9455092c 100644 --- a/.old/deeplearning4j/deeplearning4j-scaleout/spark/dl4j-spark/src/main/java/org/deeplearning4j/spark/impl/graph/scoring/CGVaeReconstructionProbWithKeyFunction.java +++ b/.old/deeplearning4j/deeplearning4j-scaleout/spark/dl4j-spark/src/main/java/org/deeplearning4j/spark/impl/graph/scoring/CGVaeReconstructionProbWithKeyFunction.java @@ -33,7 +33,7 @@ public class CGVaeReconstructionProbWithKeyFunction extends BaseVaeReconstruc /** * @param params MultiLayerNetwork parameters - * @param jsonConfig MultiLayerConfiguration, as json + * @param jsonConfig NeuralNetConfiguration, as json * @param useLogProbability If true: use log probability. False: use raw probability. * @param batchSize Batch size to use when scoring * @param numSamples Number of samples to use when calling {@link VariationalAutoencoder#reconstructionLogProbability(INDArray, int)} diff --git a/.old/deeplearning4j/deeplearning4j-scaleout/spark/dl4j-spark/src/main/java/org/deeplearning4j/spark/impl/multilayer/SparkDl4jMultiLayer.java b/.old/deeplearning4j/deeplearning4j-scaleout/spark/dl4j-spark/src/main/java/org/deeplearning4j/spark/impl/multilayer/SparkDl4jMultiLayer.java index be7780f2f..054520c70 100644 --- a/.old/deeplearning4j/deeplearning4j-scaleout/spark/dl4j-spark/src/main/java/org/deeplearning4j/spark/impl/multilayer/SparkDl4jMultiLayer.java +++ b/.old/deeplearning4j/deeplearning4j-scaleout/spark/dl4j-spark/src/main/java/org/deeplearning4j/spark/impl/multilayer/SparkDl4jMultiLayer.java @@ -35,7 +35,7 @@ import org.datavec.spark.util.BroadcastHadoopConfigHolder; import org.deeplearning4j.core.loader.DataSetLoader; import org.deeplearning4j.core.loader.MultiDataSetLoader; import org.deeplearning4j.core.loader.impl.SerializedDataSetLoader; -import org.deeplearning4j.nn.conf.MultiLayerConfiguration; +import org.deeplearning4j.nn.conf.NeuralNetConfiguration; import org.deeplearning4j.nn.conf.layers.FeedForwardLayer; import org.deeplearning4j.nn.multilayer.MultiLayerNetwork; import org.deeplearning4j.spark.api.TrainingMaster; @@ -80,7 +80,7 @@ public class SparkDl4jMultiLayer extends SparkListenable { public static final int DEFAULT_ROC_THRESHOLD_STEPS = 32; public static final int DEFAULT_EVAL_WORKERS = 4; private transient JavaSparkContext sc; - private MultiLayerConfiguration conf; + private NeuralNetConfiguration conf; private MultiLayerNetwork network; private double lastScore; private int defaultEvaluationWorkers = DEFAULT_EVAL_WORKERS; @@ -104,7 +104,7 @@ public class SparkDl4jMultiLayer extends SparkListenable { * @param sparkContext the spark context to use * @param conf the configuration of the network */ - public SparkDl4jMultiLayer(SparkContext sparkContext, MultiLayerConfiguration conf, + public SparkDl4jMultiLayer(SparkContext sparkContext, NeuralNetConfiguration conf, TrainingMaster trainingMaster) { this(new JavaSparkContext(sparkContext), initNetwork(conf), trainingMaster); } @@ -115,14 +115,14 @@ public class SparkDl4jMultiLayer extends SparkListenable { * @param sc the spark context to use * @param conf the configuration of the network */ - public SparkDl4jMultiLayer(JavaSparkContext sc, MultiLayerConfiguration conf, TrainingMaster trainingMaster) { + public SparkDl4jMultiLayer(JavaSparkContext sc, NeuralNetConfiguration conf, TrainingMaster trainingMaster) { this(sc.sc(), conf, trainingMaster); } public SparkDl4jMultiLayer(JavaSparkContext javaSparkContext, MultiLayerNetwork network, TrainingMaster trainingMaster) { sc = javaSparkContext; - this.conf = network.getLayerWiseConfigurations().clone(); + this.conf = network.getConfiguration().clone(); this.network = network; if (!network.isInitCalled()) network.init(); @@ -132,7 +132,7 @@ public class SparkDl4jMultiLayer extends SparkListenable { SparkUtils.checkKryoConfiguration(javaSparkContext, log); } - private static MultiLayerNetwork initNetwork(MultiLayerConfiguration conf) { + private static MultiLayerNetwork initNetwork(NeuralNetConfiguration conf) { MultiLayerNetwork net = new MultiLayerNetwork(conf); net.init(); return net; @@ -315,8 +315,8 @@ public class SparkDl4jMultiLayer extends SparkListenable { * @return the multi layer network that was fitDataSet */ public MultiLayerNetwork fitLabeledPoint(JavaRDD rdd) { - int nLayers = network.getLayerWiseConfigurations().getConfs().size(); - FeedForwardLayer ffl = (FeedForwardLayer) network.getLayerWiseConfigurations().getConf(nLayers - 1).getLayer(); + int nLayers = network.getConfiguration().getConfs().size(); + FeedForwardLayer ffl = (FeedForwardLayer) network.getConfiguration().getConf(nLayers - 1).getLayer(); JavaRDD ds = MLLibUtil.fromLabeledPoint(sc, rdd, ffl.getNOut()); return fit(ds); } diff --git a/.old/deeplearning4j/deeplearning4j-scaleout/spark/dl4j-spark/src/main/java/org/deeplearning4j/spark/impl/multilayer/scoring/FeedForwardWithKeyFunction.java b/.old/deeplearning4j/deeplearning4j-scaleout/spark/dl4j-spark/src/main/java/org/deeplearning4j/spark/impl/multilayer/scoring/FeedForwardWithKeyFunction.java index 510f2e4d4..c064c81d0 100644 --- a/.old/deeplearning4j/deeplearning4j-scaleout/spark/dl4j-spark/src/main/java/org/deeplearning4j/spark/impl/multilayer/scoring/FeedForwardWithKeyFunction.java +++ b/.old/deeplearning4j/deeplearning4j-scaleout/spark/dl4j-spark/src/main/java/org/deeplearning4j/spark/impl/multilayer/scoring/FeedForwardWithKeyFunction.java @@ -22,7 +22,7 @@ package org.deeplearning4j.spark.impl.multilayer.scoring; import org.apache.spark.api.java.function.PairFlatMapFunction; import org.apache.spark.broadcast.Broadcast; -import org.deeplearning4j.nn.conf.MultiLayerConfiguration; +import org.deeplearning4j.nn.conf.NeuralNetConfiguration; import org.deeplearning4j.nn.multilayer.MultiLayerNetwork; import org.nd4j.linalg.api.ndarray.INDArray; import org.nd4j.linalg.dataset.api.DataSetUtil; @@ -49,7 +49,7 @@ public class FeedForwardWithKeyFunction /** * @param params MultiLayerNetwork parameters - * @param jsonConfig MultiLayerConfiguration, as json + * @param jsonConfig NeuralNetConfiguration, as json * @param batchSize Batch size to use for forward pass (use > 1 for efficiency) */ public FeedForwardWithKeyFunction(Broadcast params, Broadcast jsonConfig, int batchSize) { @@ -65,7 +65,7 @@ public class FeedForwardWithKeyFunction return Collections.emptyIterator(); } - MultiLayerNetwork network = new MultiLayerNetwork(MultiLayerConfiguration.fromJson(jsonConfig.getValue())); + MultiLayerNetwork network = new MultiLayerNetwork(NeuralNetConfiguration.fromJson(jsonConfig.getValue())); network.init(); INDArray val = params.value().unsafeDuplication(); if (val.length() != network.numParams(false)) diff --git a/.old/deeplearning4j/deeplearning4j-scaleout/spark/dl4j-spark/src/main/java/org/deeplearning4j/spark/impl/multilayer/scoring/ScoreExamplesFunction.java b/.old/deeplearning4j/deeplearning4j-scaleout/spark/dl4j-spark/src/main/java/org/deeplearning4j/spark/impl/multilayer/scoring/ScoreExamplesFunction.java index 6c3878da5..a8990125d 100644 --- a/.old/deeplearning4j/deeplearning4j-scaleout/spark/dl4j-spark/src/main/java/org/deeplearning4j/spark/impl/multilayer/scoring/ScoreExamplesFunction.java +++ b/.old/deeplearning4j/deeplearning4j-scaleout/spark/dl4j-spark/src/main/java/org/deeplearning4j/spark/impl/multilayer/scoring/ScoreExamplesFunction.java @@ -23,7 +23,7 @@ package org.deeplearning4j.spark.impl.multilayer.scoring; import org.apache.spark.api.java.function.DoubleFlatMapFunction; import org.apache.spark.api.java.function.FlatMapFunction; import org.apache.spark.broadcast.Broadcast; -import org.deeplearning4j.nn.conf.MultiLayerConfiguration; +import org.deeplearning4j.nn.conf.NeuralNetConfiguration; import org.deeplearning4j.nn.multilayer.MultiLayerNetwork; import org.nd4j.linalg.api.ndarray.INDArray; import org.nd4j.linalg.dataset.DataSet; @@ -60,7 +60,7 @@ public class ScoreExamplesFunction implements DoubleFlatMapFunction implements PairFlatMapFunction implements PairFlatMapFunction, DataSetIterator iter = new IteratorDataSetIterator(dataSetIterator, minibatchSize); //Does batching where appropriate - MultiLayerNetwork network = new MultiLayerNetwork(MultiLayerConfiguration.fromJson(json)); + MultiLayerNetwork network = new MultiLayerNetwork(NeuralNetConfiguration.fromJson(json)); network.init(); INDArray val = params.value().unsafeDuplication(); //.value() object will be shared by all executors on each machine -> OK, as params are not modified by score function if (val.length() != network.numParams(false)) diff --git a/.old/deeplearning4j/deeplearning4j-scaleout/spark/dl4j-spark/src/main/java/org/deeplearning4j/spark/impl/multilayer/scoring/VaeReconstructionErrorWithKeyFunction.java b/.old/deeplearning4j/deeplearning4j-scaleout/spark/dl4j-spark/src/main/java/org/deeplearning4j/spark/impl/multilayer/scoring/VaeReconstructionErrorWithKeyFunction.java index 3f7c5ba6c..95c0c721e 100644 --- a/.old/deeplearning4j/deeplearning4j-scaleout/spark/dl4j-spark/src/main/java/org/deeplearning4j/spark/impl/multilayer/scoring/VaeReconstructionErrorWithKeyFunction.java +++ b/.old/deeplearning4j/deeplearning4j-scaleout/spark/dl4j-spark/src/main/java/org/deeplearning4j/spark/impl/multilayer/scoring/VaeReconstructionErrorWithKeyFunction.java @@ -22,7 +22,7 @@ package org.deeplearning4j.spark.impl.multilayer.scoring; import org.apache.spark.broadcast.Broadcast; import org.deeplearning4j.nn.api.Layer; -import org.deeplearning4j.nn.conf.MultiLayerConfiguration; +import org.deeplearning4j.nn.conf.NeuralNetConfiguration; import org.deeplearning4j.nn.layers.variational.VariationalAutoencoder; import org.deeplearning4j.nn.multilayer.MultiLayerNetwork; import org.deeplearning4j.spark.impl.common.score.BaseVaeScoreWithKeyFunction; @@ -36,7 +36,7 @@ public class VaeReconstructionErrorWithKeyFunction extends BaseVaeScoreWithKe /** * @param params MultiLayerNetwork parameters - * @param jsonConfig MultiLayerConfiguration, as json + * @param jsonConfig NeuralNetConfiguration, as json * @param batchSize Batch size to use when scoring */ public VaeReconstructionErrorWithKeyFunction(Broadcast params, Broadcast jsonConfig, @@ -47,7 +47,7 @@ public class VaeReconstructionErrorWithKeyFunction extends BaseVaeScoreWithKe @Override public VariationalAutoencoder getVaeLayer() { MultiLayerNetwork network = - new MultiLayerNetwork(MultiLayerConfiguration.fromJson((String) jsonConfig.getValue())); + new MultiLayerNetwork(NeuralNetConfiguration.fromJson((String) jsonConfig.getValue())); network.init(); INDArray val = ((INDArray) params.value()).unsafeDuplication(); if (val.length() != network.numParams(false)) diff --git a/.old/deeplearning4j/deeplearning4j-scaleout/spark/dl4j-spark/src/main/java/org/deeplearning4j/spark/impl/multilayer/scoring/VaeReconstructionProbWithKeyFunction.java b/.old/deeplearning4j/deeplearning4j-scaleout/spark/dl4j-spark/src/main/java/org/deeplearning4j/spark/impl/multilayer/scoring/VaeReconstructionProbWithKeyFunction.java index d9dd8a155..18890d020 100644 --- a/.old/deeplearning4j/deeplearning4j-scaleout/spark/dl4j-spark/src/main/java/org/deeplearning4j/spark/impl/multilayer/scoring/VaeReconstructionProbWithKeyFunction.java +++ b/.old/deeplearning4j/deeplearning4j-scaleout/spark/dl4j-spark/src/main/java/org/deeplearning4j/spark/impl/multilayer/scoring/VaeReconstructionProbWithKeyFunction.java @@ -22,7 +22,7 @@ package org.deeplearning4j.spark.impl.multilayer.scoring; import org.apache.spark.broadcast.Broadcast; import org.deeplearning4j.nn.api.Layer; -import org.deeplearning4j.nn.conf.MultiLayerConfiguration; +import org.deeplearning4j.nn.conf.NeuralNetConfiguration; import org.deeplearning4j.nn.layers.variational.VariationalAutoencoder; import org.deeplearning4j.nn.multilayer.MultiLayerNetwork; import org.deeplearning4j.spark.impl.common.score.BaseVaeReconstructionProbWithKeyFunction; @@ -34,7 +34,7 @@ public class VaeReconstructionProbWithKeyFunction extends BaseVaeReconstructi /** * @param params MultiLayerNetwork parameters - * @param jsonConfig MultiLayerConfiguration, as json + * @param jsonConfig NeuralNetConfiguration, as json * @param useLogProbability If true: use log probability. False: use raw probability. * @param batchSize Batch size to use when scoring * @param numSamples Number of samples to use when calling {@link VariationalAutoencoder#reconstructionLogProbability(INDArray, int)} @@ -47,7 +47,7 @@ public class VaeReconstructionProbWithKeyFunction extends BaseVaeReconstructi @Override public VariationalAutoencoder getVaeLayer() { MultiLayerNetwork network = - new MultiLayerNetwork(MultiLayerConfiguration.fromJson((String) jsonConfig.getValue())); + new MultiLayerNetwork(NeuralNetConfiguration.fromJson((String) jsonConfig.getValue())); network.init(); INDArray val = ((INDArray) params.value()).unsafeDuplication(); if (val.length() != network.numParams(false)) diff --git a/.old/deeplearning4j/deeplearning4j-scaleout/spark/dl4j-spark/src/main/java/org/deeplearning4j/spark/impl/paramavg/ParameterAveragingTrainingMaster.java b/.old/deeplearning4j/deeplearning4j-scaleout/spark/dl4j-spark/src/main/java/org/deeplearning4j/spark/impl/paramavg/ParameterAveragingTrainingMaster.java index 8d8532e0b..411422884 100644 --- a/.old/deeplearning4j/deeplearning4j-scaleout/spark/dl4j-spark/src/main/java/org/deeplearning4j/spark/impl/paramavg/ParameterAveragingTrainingMaster.java +++ b/.old/deeplearning4j/deeplearning4j-scaleout/spark/dl4j-spark/src/main/java/org/deeplearning4j/spark/impl/paramavg/ParameterAveragingTrainingMaster.java @@ -41,7 +41,7 @@ import org.deeplearning4j.core.storage.StatsStorageRouter; import org.deeplearning4j.core.storage.StatsStorageRouterProvider; import org.deeplearning4j.core.storage.StorageMetaData; import org.deeplearning4j.nn.conf.ComputationGraphConfiguration; -import org.deeplearning4j.nn.conf.MultiLayerConfiguration; +import org.deeplearning4j.nn.conf.NeuralNetConfiguration; import org.deeplearning4j.nn.graph.ComputationGraph; import org.deeplearning4j.nn.multilayer.MultiLayerNetwork; import org.deeplearning4j.optimize.api.TrainingListener; @@ -274,7 +274,7 @@ public class ParameterAveragingTrainingMaster @Override public ParameterAveragingTrainingWorker getWorkerInstance(SparkDl4jMultiLayer network) { - NetBroadcastTuple tuple = new NetBroadcastTuple(network.getNetwork().getLayerWiseConfigurations(), + NetBroadcastTuple tuple = new NetBroadcastTuple(network.getNetwork().getConfiguration(), network.getNetwork().params(), network.getNetwork().getUpdater().getStateViewArray()); if (collectTrainingStats) @@ -726,7 +726,7 @@ public class ParameterAveragingTrainingMaster if (params != null) { //Params may be null for edge case (empty RDD) if (network != null) { - MultiLayerConfiguration conf = network.getNetwork().getLayerWiseConfigurations(); + NeuralNetConfiguration conf = network.getNetwork().getConfiguration(); int numUpdates = averagingFrequency; conf.setIterationCount(conf.getIterationCount() + numUpdates); } else { diff --git a/.old/deeplearning4j/deeplearning4j-scaleout/spark/dl4j-spark/src/test/java/org/deeplearning4j/spark/BaseSparkTest.java b/.old/deeplearning4j/deeplearning4j-scaleout/spark/dl4j-spark/src/test/java/org/deeplearning4j/spark/BaseSparkTest.java index e00f8d6d3..686560ffc 100644 --- a/.old/deeplearning4j/deeplearning4j-scaleout/spark/dl4j-spark/src/test/java/org/deeplearning4j/spark/BaseSparkTest.java +++ b/.old/deeplearning4j/deeplearning4j-scaleout/spark/dl4j-spark/src/test/java/org/deeplearning4j/spark/BaseSparkTest.java @@ -26,7 +26,7 @@ import org.apache.spark.api.java.JavaRDD; import org.apache.spark.api.java.JavaSparkContext; import org.datavec.spark.util.SerializableHadoopConfig; import org.deeplearning4j.BaseDL4JTest; -import org.deeplearning4j.nn.conf.MultiLayerConfiguration; +import org.deeplearning4j.nn.conf.NeuralNetConfiguration; import org.deeplearning4j.nn.conf.NeuralNetConfiguration; import org.deeplearning4j.spark.impl.multilayer.SparkDl4jMultiLayer; import org.deeplearning4j.spark.impl.paramavg.ParameterAveragingTrainingMaster; @@ -129,8 +129,8 @@ public abstract class BaseSparkTest extends BaseDL4JTest implements Serializable return 4; } - protected MultiLayerConfiguration getBasicConf() { - MultiLayerConfiguration conf = new NeuralNetConfiguration.Builder().seed(123) + protected NeuralNetConfiguration getBasicConf() { + NeuralNetConfiguration conf = NeuralNetConfiguration.builder().seed(123) .updater(new Nesterovs(0.1, 0.9)).list() .layer(0, new org.deeplearning4j.nn.conf.layers.DenseLayer.Builder().nIn(nIn).nOut(3) .activation(Activation.TANH).build()) diff --git a/.old/deeplearning4j/deeplearning4j-scaleout/spark/dl4j-spark/src/test/java/org/deeplearning4j/spark/TestEarlyStoppingSpark.java b/.old/deeplearning4j/deeplearning4j-scaleout/spark/dl4j-spark/src/test/java/org/deeplearning4j/spark/TestEarlyStoppingSpark.java index ed8de3623..7154808f6 100644 --- a/.old/deeplearning4j/deeplearning4j-scaleout/spark/dl4j-spark/src/test/java/org/deeplearning4j/spark/TestEarlyStoppingSpark.java +++ b/.old/deeplearning4j/deeplearning4j-scaleout/spark/dl4j-spark/src/test/java/org/deeplearning4j/spark/TestEarlyStoppingSpark.java @@ -35,7 +35,7 @@ import org.deeplearning4j.earlystopping.termination.MaxTimeIterationTerminationC import org.deeplearning4j.earlystopping.termination.ScoreImprovementEpochTerminationCondition; import org.deeplearning4j.earlystopping.trainer.IEarlyStoppingTrainer; import org.deeplearning4j.nn.api.OptimizationAlgorithm; -import org.deeplearning4j.nn.conf.MultiLayerConfiguration; +import org.deeplearning4j.nn.conf.NeuralNetConfiguration; import org.deeplearning4j.nn.conf.NeuralNetConfiguration; import org.deeplearning4j.nn.conf.layers.OutputLayer; import org.deeplearning4j.nn.multilayer.MultiLayerNetwork; @@ -68,7 +68,7 @@ public class TestEarlyStoppingSpark extends BaseSparkTest { //Spark tests don't run on windows return; } - MultiLayerConfiguration conf = new NeuralNetConfiguration.Builder() + NeuralNetConfiguration conf = NeuralNetConfiguration.builder() .optimizationAlgo(OptimizationAlgorithm.STOCHASTIC_GRADIENT_DESCENT) .updater(new Sgd()).weightInit(WeightInit.XAVIER).list() .layer(0, new OutputLayer.Builder().nIn(4).nOut(3) @@ -123,7 +123,7 @@ public class TestEarlyStoppingSpark extends BaseSparkTest { return; } Nd4j.getRandom().setSeed(12345); - MultiLayerConfiguration conf = new NeuralNetConfiguration.Builder().seed(12345) + NeuralNetConfiguration conf = NeuralNetConfiguration.builder().seed(12345) .optimizationAlgo(OptimizationAlgorithm.STOCHASTIC_GRADIENT_DESCENT) .updater(new Sgd(10.0)) //Intentionally huge LR .weightInit(WeightInit.XAVIER).list() @@ -163,7 +163,7 @@ public class TestEarlyStoppingSpark extends BaseSparkTest { return; } Nd4j.getRandom().setSeed(12345); - MultiLayerConfiguration conf = new NeuralNetConfiguration.Builder().seed(12345) + NeuralNetConfiguration conf = NeuralNetConfiguration.builder().seed(12345) .optimizationAlgo(OptimizationAlgorithm.STOCHASTIC_GRADIENT_DESCENT) .updater(new Sgd(1e-6)).weightInit(WeightInit.XAVIER).list() .layer(0, new OutputLayer.Builder().nIn(4).nOut(3) @@ -209,7 +209,7 @@ public class TestEarlyStoppingSpark extends BaseSparkTest { return; } Nd4j.getRandom().setSeed(12345); - MultiLayerConfiguration conf = new NeuralNetConfiguration.Builder().seed(12345) + NeuralNetConfiguration conf = NeuralNetConfiguration.builder().seed(12345) .optimizationAlgo(OptimizationAlgorithm.STOCHASTIC_GRADIENT_DESCENT) .updater(new Sgd(0.0)).weightInit(WeightInit.XAVIER).list() .layer(0, new OutputLayer.Builder().nIn(4).nOut(3) @@ -246,7 +246,7 @@ public class TestEarlyStoppingSpark extends BaseSparkTest { //Spark tests don't run on windows return; } - MultiLayerConfiguration conf = new NeuralNetConfiguration.Builder() + NeuralNetConfiguration conf = NeuralNetConfiguration.builder() .optimizationAlgo(OptimizationAlgorithm.STOCHASTIC_GRADIENT_DESCENT) .updater(new Sgd()).weightInit(WeightInit.XAVIER).list() .layer(0, new OutputLayer.Builder().nIn(4).nOut(3) diff --git a/.old/deeplearning4j/deeplearning4j-scaleout/spark/dl4j-spark/src/test/java/org/deeplearning4j/spark/TestEarlyStoppingSparkCompGraph.java b/.old/deeplearning4j/deeplearning4j-scaleout/spark/dl4j-spark/src/test/java/org/deeplearning4j/spark/TestEarlyStoppingSparkCompGraph.java index 3de17a742..76fa0e65b 100644 --- a/.old/deeplearning4j/deeplearning4j-scaleout/spark/dl4j-spark/src/test/java/org/deeplearning4j/spark/TestEarlyStoppingSparkCompGraph.java +++ b/.old/deeplearning4j/deeplearning4j-scaleout/spark/dl4j-spark/src/test/java/org/deeplearning4j/spark/TestEarlyStoppingSparkCompGraph.java @@ -71,7 +71,7 @@ public class TestEarlyStoppingSparkCompGraph extends BaseSparkTest { //Spark tests don't run on windows return; } - ComputationGraphConfiguration conf = new NeuralNetConfiguration.Builder() + ComputationGraphConfiguration conf = NeuralNetConfiguration.builder() .optimizationAlgo(OptimizationAlgorithm.STOCHASTIC_GRADIENT_DESCENT) .updater(new Sgd()).weightInit(WeightInit.XAVIER).graphBuilder().addInputs("in") .addLayer("0", new OutputLayer.Builder().nIn(4).nOut(3) @@ -124,7 +124,7 @@ public class TestEarlyStoppingSparkCompGraph extends BaseSparkTest { return; } Nd4j.getRandom().setSeed(12345); - ComputationGraphConfiguration conf = new NeuralNetConfiguration.Builder().seed(12345) + ComputationGraphConfiguration conf = NeuralNetConfiguration.builder().seed(12345) .optimizationAlgo(OptimizationAlgorithm.STOCHASTIC_GRADIENT_DESCENT) .updater(new Sgd(2.0)) //Intentionally huge LR .weightInit(WeightInit.XAVIER).graphBuilder().addInputs("in") @@ -165,7 +165,7 @@ public class TestEarlyStoppingSparkCompGraph extends BaseSparkTest { return; } Nd4j.getRandom().setSeed(12345); - ComputationGraphConfiguration conf = new NeuralNetConfiguration.Builder().seed(12345) + ComputationGraphConfiguration conf = NeuralNetConfiguration.builder().seed(12345) .optimizationAlgo(OptimizationAlgorithm.STOCHASTIC_GRADIENT_DESCENT) .updater(new Sgd(1e-6)).weightInit(WeightInit.XAVIER).graphBuilder() .addInputs("in") @@ -213,7 +213,7 @@ public class TestEarlyStoppingSparkCompGraph extends BaseSparkTest { return; } Nd4j.getRandom().setSeed(12345); - ComputationGraphConfiguration conf = new NeuralNetConfiguration.Builder().seed(12345) + ComputationGraphConfiguration conf = NeuralNetConfiguration.builder().seed(12345) .optimizationAlgo(OptimizationAlgorithm.STOCHASTIC_GRADIENT_DESCENT) .updater(new Sgd(0.0)).weightInit(WeightInit.XAVIER).graphBuilder() .addInputs("in") @@ -253,7 +253,7 @@ public class TestEarlyStoppingSparkCompGraph extends BaseSparkTest { //Spark tests don't run on windows return; } - ComputationGraphConfiguration conf = new NeuralNetConfiguration.Builder() + ComputationGraphConfiguration conf = NeuralNetConfiguration.builder() .optimizationAlgo(OptimizationAlgorithm.STOCHASTIC_GRADIENT_DESCENT) .updater(new Sgd()).weightInit(WeightInit.XAVIER).graphBuilder().addInputs("in") .addLayer("0", new OutputLayer.Builder().nIn(4).nOut(3) diff --git a/.old/deeplearning4j/deeplearning4j-scaleout/spark/dl4j-spark/src/test/java/org/deeplearning4j/spark/TestKryo.java b/.old/deeplearning4j/deeplearning4j-scaleout/spark/dl4j-spark/src/test/java/org/deeplearning4j/spark/TestKryo.java index 33023d605..47f1807d0 100644 --- a/.old/deeplearning4j/deeplearning4j-scaleout/spark/dl4j-spark/src/test/java/org/deeplearning4j/spark/TestKryo.java +++ b/.old/deeplearning4j/deeplearning4j-scaleout/spark/dl4j-spark/src/test/java/org/deeplearning4j/spark/TestKryo.java @@ -22,7 +22,7 @@ package org.deeplearning4j.spark; import org.apache.spark.serializer.SerializerInstance; import org.deeplearning4j.nn.conf.ComputationGraphConfiguration; -import org.deeplearning4j.nn.conf.MultiLayerConfiguration; +import org.deeplearning4j.nn.conf.NeuralNetConfiguration; import org.deeplearning4j.nn.conf.NeuralNetConfiguration; import org.deeplearning4j.nn.conf.distribution.UniformDistribution; import org.deeplearning4j.nn.conf.graph.*; @@ -68,14 +68,14 @@ public class TestKryo extends BaseSparkKryoTest { Map m = new HashMap<>(); m.put(0, 0.5); m.put(10, 0.1); - MultiLayerConfiguration mlc = new NeuralNetConfiguration.Builder() + NeuralNetConfiguration mlc = NeuralNetConfiguration.builder() .updater(new Nadam(new MapSchedule(ScheduleType.ITERATION,m))).list().layer(0, new OutputLayer.Builder().nIn(10).nOut(10).build()) .build(); testSerialization(mlc, si); - ComputationGraphConfiguration cgc = new NeuralNetConfiguration.Builder() + ComputationGraphConfiguration cgc = NeuralNetConfiguration.builder() .dist(new UniformDistribution(-1, 1)) .updater(new Adam(new MapSchedule(ScheduleType.ITERATION,m))) .graphBuilder() diff --git a/.old/deeplearning4j/deeplearning4j-scaleout/spark/dl4j-spark/src/test/java/org/deeplearning4j/spark/datavec/TestPreProcessedData.java b/.old/deeplearning4j/deeplearning4j-scaleout/spark/dl4j-spark/src/test/java/org/deeplearning4j/spark/datavec/TestPreProcessedData.java index 714c3ffb6..946f8816f 100644 --- a/.old/deeplearning4j/deeplearning4j-scaleout/spark/dl4j-spark/src/test/java/org/deeplearning4j/spark/datavec/TestPreProcessedData.java +++ b/.old/deeplearning4j/deeplearning4j-scaleout/spark/dl4j-spark/src/test/java/org/deeplearning4j/spark/datavec/TestPreProcessedData.java @@ -30,7 +30,7 @@ import org.datavec.api.records.reader.impl.csv.CSVRecordReader; import org.deeplearning4j.datasets.iterator.impl.IrisDataSetIterator; import org.deeplearning4j.nn.api.OptimizationAlgorithm; import org.deeplearning4j.nn.conf.ComputationGraphConfiguration; -import org.deeplearning4j.nn.conf.MultiLayerConfiguration; +import org.deeplearning4j.nn.conf.NeuralNetConfiguration; import org.deeplearning4j.nn.conf.NeuralNetConfiguration; import org.deeplearning4j.nn.conf.Updater; import org.deeplearning4j.spark.BaseSparkTest; @@ -84,7 +84,7 @@ public class TestPreProcessedData extends BaseSparkTest { iter.next().save(f2); } - MultiLayerConfiguration conf = new NeuralNetConfiguration.Builder().updater(Updater.RMSPROP) + NeuralNetConfiguration conf = NeuralNetConfiguration.builder().updater(Updater.RMSPROP) .optimizationAlgo(OptimizationAlgorithm.STOCHASTIC_GRADIENT_DESCENT).list() .layer(0, new org.deeplearning4j.nn.conf.layers.DenseLayer.Builder().nIn(4).nOut(3) .activation(Activation.TANH).build()) @@ -134,7 +134,7 @@ public class TestPreProcessedData extends BaseSparkTest { iter.next().save(f2); } - ComputationGraphConfiguration conf = new NeuralNetConfiguration.Builder().updater(Updater.RMSPROP) + ComputationGraphConfiguration conf = NeuralNetConfiguration.builder().updater(Updater.RMSPROP) .optimizationAlgo(OptimizationAlgorithm.STOCHASTIC_GRADIENT_DESCENT) .graphBuilder().addInputs("in") .addLayer("0", new org.deeplearning4j.nn.conf.layers.DenseLayer.Builder().nIn(4).nOut(3) @@ -188,7 +188,7 @@ public class TestPreProcessedData extends BaseSparkTest { mds.save(f2); } - ComputationGraphConfiguration conf = new NeuralNetConfiguration.Builder().updater(Updater.RMSPROP) + ComputationGraphConfiguration conf = NeuralNetConfiguration.builder().updater(Updater.RMSPROP) .optimizationAlgo(OptimizationAlgorithm.STOCHASTIC_GRADIENT_DESCENT) .graphBuilder().addInputs("in") .addLayer("0", new org.deeplearning4j.nn.conf.layers.DenseLayer.Builder().nIn(4).nOut(3) diff --git a/.old/deeplearning4j/deeplearning4j-scaleout/spark/dl4j-spark/src/test/java/org/deeplearning4j/spark/impl/TestKryoWarning.java b/.old/deeplearning4j/deeplearning4j-scaleout/spark/dl4j-spark/src/test/java/org/deeplearning4j/spark/impl/TestKryoWarning.java index ec2195081..6aa102fb4 100644 --- a/.old/deeplearning4j/deeplearning4j-scaleout/spark/dl4j-spark/src/test/java/org/deeplearning4j/spark/impl/TestKryoWarning.java +++ b/.old/deeplearning4j/deeplearning4j-scaleout/spark/dl4j-spark/src/test/java/org/deeplearning4j/spark/impl/TestKryoWarning.java @@ -23,7 +23,7 @@ package org.deeplearning4j.spark.impl; import org.apache.spark.SparkConf; import org.apache.spark.api.java.JavaSparkContext; import org.deeplearning4j.nn.conf.ComputationGraphConfiguration; -import org.deeplearning4j.nn.conf.MultiLayerConfiguration; +import org.deeplearning4j.nn.conf.NeuralNetConfiguration; import org.deeplearning4j.nn.conf.NeuralNetConfiguration; import org.deeplearning4j.nn.conf.layers.OutputLayer; import org.deeplearning4j.spark.api.TrainingMaster; @@ -40,7 +40,7 @@ public class TestKryoWarning { try { - MultiLayerConfiguration conf = new NeuralNetConfiguration.Builder().list() + NeuralNetConfiguration conf = NeuralNetConfiguration.builder().list() .layer(0, new OutputLayer.Builder().nIn(10).nOut(10).build()) .build(); @@ -57,7 +57,7 @@ public class TestKryoWarning { try { - ComputationGraphConfiguration conf = new NeuralNetConfiguration.Builder().graphBuilder().addInputs("in") + ComputationGraphConfiguration conf = NeuralNetConfiguration.builder().graphBuilder().addInputs("in") .addLayer("0", new OutputLayer.Builder().nIn(10).nOut(10).build(), "in").setOutputs("0") .build(); diff --git a/.old/deeplearning4j/deeplearning4j-scaleout/spark/dl4j-spark/src/test/java/org/deeplearning4j/spark/impl/customlayer/TestCustomLayer.java b/.old/deeplearning4j/deeplearning4j-scaleout/spark/dl4j-spark/src/test/java/org/deeplearning4j/spark/impl/customlayer/TestCustomLayer.java index b3c96333d..1b7bf1052 100644 --- a/.old/deeplearning4j/deeplearning4j-scaleout/spark/dl4j-spark/src/test/java/org/deeplearning4j/spark/impl/customlayer/TestCustomLayer.java +++ b/.old/deeplearning4j/deeplearning4j-scaleout/spark/dl4j-spark/src/test/java/org/deeplearning4j/spark/impl/customlayer/TestCustomLayer.java @@ -22,7 +22,7 @@ package org.deeplearning4j.spark.impl.customlayer; import com.sun.jna.Platform; import org.apache.spark.api.java.JavaRDD; -import org.deeplearning4j.nn.conf.MultiLayerConfiguration; +import org.deeplearning4j.nn.conf.NeuralNetConfiguration; import org.deeplearning4j.nn.conf.NeuralNetConfiguration; import org.deeplearning4j.nn.conf.layers.DenseLayer; import org.deeplearning4j.nn.conf.layers.OutputLayer; @@ -51,8 +51,8 @@ public class TestCustomLayer extends BaseSparkTest { } //Basic test - checks whether exceptions etc are thrown with custom layers + spark //Custom layers are tested more extensively in dl4j core - MultiLayerConfiguration conf = - new NeuralNetConfiguration.Builder().updater(new Sgd(0.1)).list() + NeuralNetConfiguration conf = + NeuralNetConfiguration.builder().updater(new Sgd(0.1)).list() .layer(0, new DenseLayer.Builder().nIn(10).nOut(10).build()) .layer(1, new CustomLayer(3.14159)).layer(2, new OutputLayer.Builder(LossFunctions.LossFunction.MCXENT) diff --git a/.old/deeplearning4j/deeplearning4j-scaleout/spark/dl4j-spark/src/test/java/org/deeplearning4j/spark/impl/graph/TestSparkComputationGraph.java b/.old/deeplearning4j/deeplearning4j-scaleout/spark/dl4j-spark/src/test/java/org/deeplearning4j/spark/impl/graph/TestSparkComputationGraph.java index cc6e5f9ec..7a28146fb 100644 --- a/.old/deeplearning4j/deeplearning4j-scaleout/spark/dl4j-spark/src/test/java/org/deeplearning4j/spark/impl/graph/TestSparkComputationGraph.java +++ b/.old/deeplearning4j/deeplearning4j-scaleout/spark/dl4j-spark/src/test/java/org/deeplearning4j/spark/impl/graph/TestSparkComputationGraph.java @@ -77,7 +77,7 @@ public class TestSparkComputationGraph extends BaseSparkTest { public static ComputationGraph getBasicNetIris2Class() { - ComputationGraphConfiguration conf = new NeuralNetConfiguration.Builder().weightInit(WeightInit.XAVIER) + ComputationGraphConfiguration conf = NeuralNetConfiguration.builder().weightInit(WeightInit.XAVIER) .graphBuilder().addInputs("in") .addLayer("l0", new DenseLayer.Builder().nIn(4).nOut(10).build(), "in") .addLayer("l1", new OutputLayer.Builder(LossFunctions.LossFunction.MCXENT) @@ -104,7 +104,7 @@ public class TestSparkComputationGraph extends BaseSparkTest { while (iter.hasNext()) list.add(iter.next()); - ComputationGraphConfiguration config = new NeuralNetConfiguration.Builder() + ComputationGraphConfiguration config = NeuralNetConfiguration.builder() .updater(new Sgd(0.1)) .graphBuilder().addInputs("in") .addLayer("dense", new DenseLayer.Builder().nIn(4).nOut(2).build(), "in").addLayer("out", @@ -138,7 +138,7 @@ public class TestSparkComputationGraph extends BaseSparkTest { @Test public void testDistributedScoring() { - ComputationGraphConfiguration conf = new NeuralNetConfiguration.Builder().l1(0.1).l2(0.1) + ComputationGraphConfiguration conf = NeuralNetConfiguration.builder().l1(0.1).l2(0.1) .seed(123).updater(new Nesterovs(0.1, 0.9)).graphBuilder() .addInputs("in") .addLayer("0", new org.deeplearning4j.nn.conf.layers.DenseLayer.Builder().nIn(nIn).nOut(3) @@ -217,7 +217,7 @@ public class TestSparkComputationGraph extends BaseSparkTest { //@Ignore("AB 2019/05/23 - Failing on CI only - passing locally. Possible precision or threading issue") public void testSeedRepeatability() throws Exception { - ComputationGraphConfiguration conf = new NeuralNetConfiguration.Builder().seed(12345).updater(Updater.RMSPROP) + ComputationGraphConfiguration conf = NeuralNetConfiguration.builder().seed(12345).updater(Updater.RMSPROP) .optimizationAlgo(OptimizationAlgorithm.STOCHASTIC_GRADIENT_DESCENT) .weightInit(WeightInit.XAVIER).graphBuilder().addInputs("in") .addLayer("0", new org.deeplearning4j.nn.conf.layers.DenseLayer.Builder().nIn(4).nOut(4) @@ -414,7 +414,7 @@ public class TestSparkComputationGraph extends BaseSparkTest { JavaRDD rdd = sc.parallelize(l); // simple model - val modelConf = new NeuralNetConfiguration.Builder() + val modelConf = NeuralNetConfiguration.builder() .updater(new Adam(0.01)) .weightInit(WeightInit.XAVIER_UNIFORM) .biasInit(0) diff --git a/.old/deeplearning4j/deeplearning4j-scaleout/spark/dl4j-spark/src/test/java/org/deeplearning4j/spark/impl/misc/TestFrozenLayers.java b/.old/deeplearning4j/deeplearning4j-scaleout/spark/dl4j-spark/src/test/java/org/deeplearning4j/spark/impl/misc/TestFrozenLayers.java index 887696af3..87493404e 100644 --- a/.old/deeplearning4j/deeplearning4j-scaleout/spark/dl4j-spark/src/test/java/org/deeplearning4j/spark/impl/misc/TestFrozenLayers.java +++ b/.old/deeplearning4j/deeplearning4j-scaleout/spark/dl4j-spark/src/test/java/org/deeplearning4j/spark/impl/misc/TestFrozenLayers.java @@ -53,7 +53,7 @@ public class TestFrozenLayers extends BaseSparkTest { @Test public void testSparkFrozenLayers() { - NeuralNetConfiguration.Builder overallConf = new NeuralNetConfiguration.Builder().updater(new Sgd(0.1)) + NeuralNetConfiguration.NeuralNetConfigurationBuilder overallConf = NeuralNetConfiguration.builder().updater(new Sgd(0.1)) .activation(Activation.TANH); FineTuneConfiguration finetune = new FineTuneConfiguration.Builder().updater(new Sgd(0.1)).build(); @@ -74,7 +74,7 @@ public class TestFrozenLayers extends BaseSparkTest { MultiLayerNetwork withFrozen = new TransferLearning.Builder(origModel).fineTuneConfiguration(finetune) .setFeatureExtractor(1).build(); - Map m = withFrozen.paramTable(); + Map m = withFrozen.getParamTable(); Map pCopy = new HashMap<>(); for (Map.Entry entry : m.entrySet()) { pCopy.put(entry.getKey(), entry.getValue().dup()); @@ -110,7 +110,7 @@ public class TestFrozenLayers extends BaseSparkTest { MultiLayerNetwork fitted = sNet.getNetwork(); - Map fittedParams = fitted.paramTable(); + Map fittedParams = fitted.getParamTable(); for (Map.Entry entry : fittedParams.entrySet()) { INDArray orig = pCopy.get(entry.getKey()); @@ -136,7 +136,7 @@ public class TestFrozenLayers extends BaseSparkTest { int nIn = 6; int nOut = 3; - ComputationGraph origModel = new ComputationGraph(new NeuralNetConfiguration.Builder().updater(new Sgd(0.1)) + ComputationGraph origModel = new ComputationGraph(NeuralNetConfiguration.builder().updater(new Sgd(0.1)) .activation(Activation.TANH).graphBuilder().addInputs("in") .addLayer("0", new DenseLayer.Builder().nIn(6).nOut(5).build(), "in") .addLayer("1", new DenseLayer.Builder().nIn(5).nOut(4).build(), "0") @@ -151,7 +151,7 @@ public class TestFrozenLayers extends BaseSparkTest { ComputationGraph withFrozen = new TransferLearning.GraphBuilder(origModel).fineTuneConfiguration(finetune) .setFeatureExtractor("1").build(); - Map m = withFrozen.paramTable(); + Map m = withFrozen.getParamTable(); Map pCopy = new HashMap<>(); for (Map.Entry entry : m.entrySet()) { pCopy.put(entry.getKey(), entry.getValue().dup()); @@ -187,7 +187,7 @@ public class TestFrozenLayers extends BaseSparkTest { ComputationGraph fitted = sNet.getNetwork(); - Map fittedParams = fitted.paramTable(); + Map fittedParams = fitted.getParamTable(); for (Map.Entry entry : fittedParams.entrySet()) { INDArray orig = pCopy.get(entry.getKey()); diff --git a/.old/deeplearning4j/deeplearning4j-scaleout/spark/dl4j-spark/src/test/java/org/deeplearning4j/spark/impl/multilayer/TestMiscFunctions.java b/.old/deeplearning4j/deeplearning4j-scaleout/spark/dl4j-spark/src/test/java/org/deeplearning4j/spark/impl/multilayer/TestMiscFunctions.java index 550ccc9b2..adc3d5508 100644 --- a/.old/deeplearning4j/deeplearning4j-scaleout/spark/dl4j-spark/src/test/java/org/deeplearning4j/spark/impl/multilayer/TestMiscFunctions.java +++ b/.old/deeplearning4j/deeplearning4j-scaleout/spark/dl4j-spark/src/test/java/org/deeplearning4j/spark/impl/multilayer/TestMiscFunctions.java @@ -23,7 +23,7 @@ package org.deeplearning4j.spark.impl.multilayer; import org.apache.spark.api.java.JavaPairRDD; import org.deeplearning4j.datasets.iterator.impl.IrisDataSetIterator; import org.deeplearning4j.nn.conf.ComputationGraphConfiguration; -import org.deeplearning4j.nn.conf.MultiLayerConfiguration; +import org.deeplearning4j.nn.conf.NeuralNetConfiguration; import org.deeplearning4j.nn.conf.NeuralNetConfiguration; import org.deeplearning4j.nn.conf.layers.*; import org.deeplearning4j.nn.conf.layers.variational.GaussianReconstructionDistribution; @@ -57,7 +57,7 @@ public class TestMiscFunctions extends BaseSparkTest { @Test public void testFeedForwardWithKey() { - MultiLayerConfiguration conf = new NeuralNetConfiguration.Builder().weightInit(WeightInit.XAVIER).list() + NeuralNetConfiguration conf = NeuralNetConfiguration.builder().weightInit(WeightInit.XAVIER).list() .layer(0, new DenseLayer.Builder().nIn(4).nOut(3).build()) .layer(1, new OutputLayer.Builder(LossFunctions.LossFunction.MCXENT).nIn(3).nOut(3) .activation(Activation.SOFTMAX).build()) @@ -107,7 +107,7 @@ public class TestMiscFunctions extends BaseSparkTest { @Test public void testFeedForwardWithKeyInputMask() { - MultiLayerConfiguration conf = new NeuralNetConfiguration.Builder().weightInit(WeightInit.XAVIER) + NeuralNetConfiguration conf = NeuralNetConfiguration.builder().weightInit(WeightInit.XAVIER) .list() .layer( new LSTM.Builder().nIn(4).nOut(3).build()) .layer(new GlobalPoolingLayer(PoolingType.AVG)) @@ -162,7 +162,7 @@ public class TestMiscFunctions extends BaseSparkTest { @Test public void testFeedForwardWithKeyGraph() { - ComputationGraphConfiguration conf = new NeuralNetConfiguration.Builder().weightInit(WeightInit.XAVIER) + ComputationGraphConfiguration conf = NeuralNetConfiguration.builder().weightInit(WeightInit.XAVIER) .graphBuilder().addInputs("in1", "in2") .addLayer("0", new DenseLayer.Builder().nIn(4).nOut(3).build(), "in1") .addLayer("1", new DenseLayer.Builder().nIn(4).nOut(3).build(), "in2").addLayer("2", @@ -220,7 +220,7 @@ public class TestMiscFunctions extends BaseSparkTest { int nIn = 10; - MultiLayerConfiguration mlc = new NeuralNetConfiguration.Builder().list() + NeuralNetConfiguration mlc = NeuralNetConfiguration.builder().list() .layer(0, new org.deeplearning4j.nn.conf.layers.variational.VariationalAutoencoder.Builder() .reconstructionDistribution( new GaussianReconstructionDistribution(Activation.IDENTITY)) @@ -259,7 +259,7 @@ public class TestMiscFunctions extends BaseSparkTest { int nIn = 10; - MultiLayerConfiguration mlc = new NeuralNetConfiguration.Builder() + NeuralNetConfiguration mlc = NeuralNetConfiguration.builder() .list().layer(0, new org.deeplearning4j.nn.conf.layers.variational.VariationalAutoencoder.Builder() .reconstructionDistribution(new LossFunctionWrapper( diff --git a/.old/deeplearning4j/deeplearning4j-scaleout/spark/dl4j-spark/src/test/java/org/deeplearning4j/spark/impl/multilayer/TestSparkDl4jMultiLayer.java b/.old/deeplearning4j/deeplearning4j-scaleout/spark/dl4j-spark/src/test/java/org/deeplearning4j/spark/impl/multilayer/TestSparkDl4jMultiLayer.java index c64618557..e66e8bb9d 100644 --- a/.old/deeplearning4j/deeplearning4j-scaleout/spark/dl4j-spark/src/test/java/org/deeplearning4j/spark/impl/multilayer/TestSparkDl4jMultiLayer.java +++ b/.old/deeplearning4j/deeplearning4j-scaleout/spark/dl4j-spark/src/test/java/org/deeplearning4j/spark/impl/multilayer/TestSparkDl4jMultiLayer.java @@ -25,7 +25,7 @@ import lombok.extern.slf4j.Slf4j; import org.apache.spark.api.java.JavaRDD; import org.deeplearning4j.datasets.iterator.impl.MnistDataSetIterator; import org.deeplearning4j.nn.api.OptimizationAlgorithm; -import org.deeplearning4j.nn.conf.MultiLayerConfiguration; +import org.deeplearning4j.nn.conf.NeuralNetConfiguration; import org.deeplearning4j.nn.conf.NeuralNetConfiguration; import org.deeplearning4j.nn.conf.layers.DenseLayer; import org.deeplearning4j.nn.conf.layers.OutputLayer; @@ -102,7 +102,7 @@ public class TestSparkDl4jMultiLayer extends BaseSparkTest { //---------------------------------- //Create network configuration and conduct network training - MultiLayerConfiguration conf = new NeuralNetConfiguration.Builder() + NeuralNetConfiguration conf = NeuralNetConfiguration.builder() .dataType(DataType.FLOAT) .seed(12345) .optimizationAlgo(OptimizationAlgorithm.STOCHASTIC_GRADIENT_DESCENT) diff --git a/.old/deeplearning4j/deeplearning4j-scaleout/spark/dl4j-spark/src/test/java/org/deeplearning4j/spark/impl/paramavg/TestCompareParameterAveragingSparkVsSingleMachine.java b/.old/deeplearning4j/deeplearning4j-scaleout/spark/dl4j-spark/src/test/java/org/deeplearning4j/spark/impl/paramavg/TestCompareParameterAveragingSparkVsSingleMachine.java index cbe7247bd..e5faa2884 100644 --- a/.old/deeplearning4j/deeplearning4j-scaleout/spark/dl4j-spark/src/test/java/org/deeplearning4j/spark/impl/paramavg/TestCompareParameterAveragingSparkVsSingleMachine.java +++ b/.old/deeplearning4j/deeplearning4j-scaleout/spark/dl4j-spark/src/test/java/org/deeplearning4j/spark/impl/paramavg/TestCompareParameterAveragingSparkVsSingleMachine.java @@ -26,7 +26,7 @@ import org.apache.spark.api.java.JavaRDD; import org.apache.spark.api.java.JavaSparkContext; import org.deeplearning4j.nn.api.OptimizationAlgorithm; import org.deeplearning4j.nn.conf.ComputationGraphConfiguration; -import org.deeplearning4j.nn.conf.MultiLayerConfiguration; +import org.deeplearning4j.nn.conf.NeuralNetConfiguration; import org.deeplearning4j.nn.conf.NeuralNetConfiguration; import org.deeplearning4j.nn.conf.inputs.InputType; import org.deeplearning4j.nn.conf.layers.ConvolutionLayer; @@ -63,9 +63,9 @@ public class TestCompareParameterAveragingSparkVsSingleMachine { } - private static MultiLayerConfiguration getConf(int seed, IUpdater updater) { + private static NeuralNetConfiguration getConf(int seed, IUpdater updater) { Nd4j.getRandom().setSeed(seed); - MultiLayerConfiguration conf = new NeuralNetConfiguration.Builder() + NeuralNetConfiguration conf = NeuralNetConfiguration.builder() .optimizationAlgo(OptimizationAlgorithm.STOCHASTIC_GRADIENT_DESCENT) .weightInit(WeightInit.XAVIER).updater(updater).seed(seed).list() .layer(0, new DenseLayer.Builder().nIn(10).nOut(10).build()).layer(1, new OutputLayer.Builder() @@ -74,9 +74,9 @@ public class TestCompareParameterAveragingSparkVsSingleMachine { return conf; } - private static MultiLayerConfiguration getConfCNN(int seed, IUpdater updater) { + private static NeuralNetConfiguration getConfCNN(int seed, IUpdater updater) { Nd4j.getRandom().setSeed(seed); - MultiLayerConfiguration conf = new NeuralNetConfiguration.Builder() + NeuralNetConfiguration conf = NeuralNetConfiguration.builder() .optimizationAlgo(OptimizationAlgorithm.STOCHASTIC_GRADIENT_DESCENT) .weightInit(WeightInit.XAVIER).updater(updater).seed(seed).list() .layer(0, new ConvolutionLayer.Builder().nOut(3).kernelSize(2, 2).stride(1, 1).padding(0, 0) @@ -85,13 +85,13 @@ public class TestCompareParameterAveragingSparkVsSingleMachine { .activation(Activation.TANH).build()) .layer(1, new OutputLayer.Builder().lossFunction(LossFunctions.LossFunction.MSE).nOut(10) .build()) - .setInputType(InputType.convolutional(10, 10, 3)).build(); + .inputType(InputType.convolutional(10, 10, 3)).build(); return conf; } private static ComputationGraphConfiguration getGraphConf(int seed, IUpdater updater) { Nd4j.getRandom().setSeed(seed); - ComputationGraphConfiguration conf = new NeuralNetConfiguration.Builder() + ComputationGraphConfiguration conf = NeuralNetConfiguration.builder() .optimizationAlgo(OptimizationAlgorithm.STOCHASTIC_GRADIENT_DESCENT) .weightInit(WeightInit.XAVIER).updater(updater).seed(seed).graphBuilder() .addInputs("in") @@ -105,7 +105,7 @@ public class TestCompareParameterAveragingSparkVsSingleMachine { private static ComputationGraphConfiguration getGraphConfCNN(int seed, IUpdater updater) { Nd4j.getRandom().setSeed(seed); - ComputationGraphConfiguration conf = new NeuralNetConfiguration.Builder() + ComputationGraphConfiguration conf = NeuralNetConfiguration.builder() .optimizationAlgo(OptimizationAlgorithm.STOCHASTIC_GRADIENT_DESCENT) .weightInit(WeightInit.XAVIER).updater(updater).seed(seed).graphBuilder() .addInputs("in") diff --git a/.old/deeplearning4j/deeplearning4j-scaleout/spark/dl4j-spark/src/test/java/org/deeplearning4j/spark/impl/paramavg/TestSparkMultiLayerParameterAveraging.java b/.old/deeplearning4j/deeplearning4j-scaleout/spark/dl4j-spark/src/test/java/org/deeplearning4j/spark/impl/paramavg/TestSparkMultiLayerParameterAveraging.java index bc1ced484..8907c2165 100644 --- a/.old/deeplearning4j/deeplearning4j-scaleout/spark/dl4j-spark/src/test/java/org/deeplearning4j/spark/impl/paramavg/TestSparkMultiLayerParameterAveraging.java +++ b/.old/deeplearning4j/deeplearning4j-scaleout/spark/dl4j-spark/src/test/java/org/deeplearning4j/spark/impl/paramavg/TestSparkMultiLayerParameterAveraging.java @@ -37,7 +37,7 @@ import org.deeplearning4j.datasets.iterator.impl.MnistDataSetIterator; import org.deeplearning4j.nn.api.Layer; import org.deeplearning4j.nn.api.OptimizationAlgorithm; import org.deeplearning4j.nn.conf.ComputationGraphConfiguration; -import org.deeplearning4j.nn.conf.MultiLayerConfiguration; +import org.deeplearning4j.nn.conf.NeuralNetConfiguration; import org.deeplearning4j.nn.conf.NeuralNetConfiguration; import org.deeplearning4j.nn.conf.layers.BaseLayer; import org.deeplearning4j.nn.conf.layers.BatchNormalization; @@ -127,7 +127,7 @@ public class TestSparkMultiLayerParameterAveraging extends BaseSparkTest { .toJavaRDD().map(new TestFn()); DataSet d = new IrisDataSetIterator(150, 150).next(); - MultiLayerConfiguration conf = new NeuralNetConfiguration.Builder().seed(123) + NeuralNetConfiguration conf = NeuralNetConfiguration.builder().seed(123) .optimizationAlgo(OptimizationAlgorithm.STOCHASTIC_GRADIENT_DESCENT).list() .layer(0, new DenseLayer.Builder().nIn(4).nOut(100).weightInit(WeightInit.XAVIER) .activation(Activation.RELU).build()) @@ -162,8 +162,8 @@ public class TestSparkMultiLayerParameterAveraging extends BaseSparkTest { .getAbsolutePath()) .toJavaRDD().map(new TestFn()); - MultiLayerConfiguration conf = - new NeuralNetConfiguration.Builder().seed(123) + NeuralNetConfiguration conf = + NeuralNetConfiguration.builder().seed(123) .updater(new Adam(1e-6)) .weightInit(WeightInit.XAVIER) .list() @@ -275,7 +275,7 @@ public class TestSparkMultiLayerParameterAveraging extends BaseSparkTest { //Spark tests don't run on windows return; } - MultiLayerConfiguration conf = new NeuralNetConfiguration.Builder().updater(new RmsProp()) + NeuralNetConfiguration conf = NeuralNetConfiguration.builder().updater(new RmsProp()) .optimizationAlgo(OptimizationAlgorithm.STOCHASTIC_GRADIENT_DESCENT).list() .layer(0, new org.deeplearning4j.nn.conf.layers.DenseLayer.Builder().nIn(nIn).nOut(3) .activation(Activation.TANH).build()) @@ -300,7 +300,7 @@ public class TestSparkMultiLayerParameterAveraging extends BaseSparkTest { @Test public void testDistributedScoring() { - MultiLayerConfiguration conf = new NeuralNetConfiguration.Builder().l1(0.1).l2(0.1) + NeuralNetConfiguration conf = NeuralNetConfiguration.builder().l1(0.1).l2(0.1) .seed(123).updater(new Nesterovs(0.1, 0.9)).list() .layer(0, new org.deeplearning4j.nn.conf.layers.DenseLayer.Builder().nIn(nIn).nOut(3) .activation(Activation.TANH).build()) @@ -389,7 +389,7 @@ public class TestSparkMultiLayerParameterAveraging extends BaseSparkTest { list.add(iter.next()); } - MultiLayerConfiguration conf = new NeuralNetConfiguration.Builder().updater(new RmsProp()) + NeuralNetConfiguration conf = NeuralNetConfiguration.builder().updater(new RmsProp()) .optimizationAlgo(OptimizationAlgorithm.STOCHASTIC_GRADIENT_DESCENT).list() .layer(0, new org.deeplearning4j.nn.conf.layers.DenseLayer.Builder().nIn(28 * 28).nOut(50) .activation(Activation.TANH).build()) @@ -453,7 +453,7 @@ public class TestSparkMultiLayerParameterAveraging extends BaseSparkTest { - MultiLayerConfiguration conf = new NeuralNetConfiguration.Builder().updater(new RmsProp()) + NeuralNetConfiguration conf = NeuralNetConfiguration.builder().updater(new RmsProp()) .optimizationAlgo(OptimizationAlgorithm.STOCHASTIC_GRADIENT_DESCENT).list() .layer(0, new org.deeplearning4j.nn.conf.layers.DenseLayer.Builder().nIn(28 * 28).nOut(50) .activation(Activation.TANH).build()) @@ -523,7 +523,7 @@ public class TestSparkMultiLayerParameterAveraging extends BaseSparkTest { - MultiLayerConfiguration conf = new NeuralNetConfiguration.Builder().updater(new RmsProp()) + NeuralNetConfiguration conf = NeuralNetConfiguration.builder().updater(new RmsProp()) .optimizationAlgo(OptimizationAlgorithm.STOCHASTIC_GRADIENT_DESCENT).list() .layer(0, new org.deeplearning4j.nn.conf.layers.DenseLayer.Builder().nIn(28 * 28).nOut(50) .activation(Activation.TANH).build()) @@ -611,7 +611,7 @@ public class TestSparkMultiLayerParameterAveraging extends BaseSparkTest { - ComputationGraphConfiguration conf = new NeuralNetConfiguration.Builder().updater(new RmsProp()) + ComputationGraphConfiguration conf = NeuralNetConfiguration.builder().updater(new RmsProp()) .optimizationAlgo(OptimizationAlgorithm.STOCHASTIC_GRADIENT_DESCENT) .graphBuilder().addInputs("in") .addLayer("0", new org.deeplearning4j.nn.conf.layers.DenseLayer.Builder().nIn(28 * 28).nOut(50) @@ -684,7 +684,7 @@ public class TestSparkMultiLayerParameterAveraging extends BaseSparkTest { //Spark tests don't run on windows return; } - MultiLayerConfiguration conf = new NeuralNetConfiguration.Builder().seed(12345).updater(new RmsProp()) + NeuralNetConfiguration conf = NeuralNetConfiguration.builder().seed(12345).updater(new RmsProp()) .optimizationAlgo(OptimizationAlgorithm.STOCHASTIC_GRADIENT_DESCENT) .weightInit(WeightInit.XAVIER).list() .layer(0, new org.deeplearning4j.nn.conf.layers.DenseLayer.Builder().nIn(4).nOut(4) @@ -769,7 +769,7 @@ public class TestSparkMultiLayerParameterAveraging extends BaseSparkTest { list.add(iter.next()); } - MultiLayerConfiguration conf = new NeuralNetConfiguration.Builder().updater(new RmsProp()) + NeuralNetConfiguration conf = NeuralNetConfiguration.builder().updater(new RmsProp()) .optimizationAlgo(OptimizationAlgorithm.STOCHASTIC_GRADIENT_DESCENT).list() .layer(0, new org.deeplearning4j.nn.conf.layers.DenseLayer.Builder().nIn(28 * 28).nOut(50) .activation(Activation.TANH).build()) @@ -791,13 +791,13 @@ public class TestSparkMultiLayerParameterAveraging extends BaseSparkTest { JavaRDD rdd = sc.parallelize(list); - assertEquals(0, sparkNet.getNetwork().getLayerWiseConfigurations().getIterationCount()); + assertEquals(0, sparkNet.getNetwork().getConfiguration().getIterationCount()); sparkNet.fit(rdd); assertEquals(minibatchesPerWorkerPerEpoch, - sparkNet.getNetwork().getLayerWiseConfigurations().getIterationCount()); + sparkNet.getNetwork().getConfiguration().getIterationCount()); sparkNet.fit(rdd); assertEquals(2 * minibatchesPerWorkerPerEpoch, - sparkNet.getNetwork().getLayerWiseConfigurations().getIterationCount()); + sparkNet.getNetwork().getConfiguration().getIterationCount()); sparkNet.getTrainingMaster().deleteTempFiles(sc); } @@ -819,7 +819,7 @@ public class TestSparkMultiLayerParameterAveraging extends BaseSparkTest { list.add(iter.next()); } - ComputationGraphConfiguration conf = new NeuralNetConfiguration.Builder().updater(new RmsProp()) + ComputationGraphConfiguration conf = NeuralNetConfiguration.builder().updater(new RmsProp()) .optimizationAlgo(OptimizationAlgorithm.STOCHASTIC_GRADIENT_DESCENT) .graphBuilder().addInputs("in") .addLayer("0", new org.deeplearning4j.nn.conf.layers.DenseLayer.Builder().nIn(28 * 28).nOut(50) @@ -860,7 +860,7 @@ public class TestSparkMultiLayerParameterAveraging extends BaseSparkTest { int nIn = 8; Nd4j.getRandom().setSeed(12345); - MultiLayerConfiguration conf = new NeuralNetConfiguration.Builder().seed(12345).updater(new RmsProp()) + NeuralNetConfiguration conf = NeuralNetConfiguration.builder().seed(12345).updater(new RmsProp()) .weightInit(WeightInit.XAVIER).list() .layer(0, new VariationalAutoencoder.Builder().nIn(8).nOut(10).encoderLayerSizes(12) .decoderLayerSizes(13).reconstructionDistribution( @@ -896,7 +896,7 @@ public class TestSparkMultiLayerParameterAveraging extends BaseSparkTest { int nIn = 8; Nd4j.getRandom().setSeed(12345); - ComputationGraphConfiguration conf = new NeuralNetConfiguration.Builder().seed(12345).updater(new RmsProp()) + ComputationGraphConfiguration conf = NeuralNetConfiguration.builder().seed(12345).updater(new RmsProp()) .weightInit(WeightInit.XAVIER).graphBuilder().addInputs("in") .addLayer("0", new VariationalAutoencoder.Builder().nIn(8).nOut(10).encoderLayerSizes(12) .decoderLayerSizes(13).reconstructionDistribution( @@ -936,8 +936,8 @@ public class TestSparkMultiLayerParameterAveraging extends BaseSparkTest { int nOut = 2; int layerSize = 10; - MultiLayerConfiguration conf = - new NeuralNetConfiguration.Builder().weightInit(WeightInit.XAVIER).list() + NeuralNetConfiguration conf = + NeuralNetConfiguration.builder().weightInit(WeightInit.XAVIER).list() .layer(0, new DenseLayer.Builder().nIn(nIn).nOut(layerSize).build()) .layer(1, new OutputLayer.Builder().nIn(layerSize).nOut(nOut) .activation(Activation.SOFTMAX).lossFunction( @@ -991,8 +991,8 @@ public class TestSparkMultiLayerParameterAveraging extends BaseSparkTest { int nOut = 3; int layerSize = 10; - MultiLayerConfiguration conf = - new NeuralNetConfiguration.Builder().weightInit(WeightInit.XAVIER).list() + NeuralNetConfiguration conf = + NeuralNetConfiguration.builder().weightInit(WeightInit.XAVIER).list() .layer(0, new DenseLayer.Builder().nIn(nIn).nOut(layerSize).build()) .layer(1, new OutputLayer.Builder().nIn(layerSize).nOut(nOut) .activation(Activation.SOFTMAX).lossFunction( @@ -1045,12 +1045,12 @@ public class TestSparkMultiLayerParameterAveraging extends BaseSparkTest { //Spark tests don't run on windows return; } - MultiLayerConfiguration conf = new NeuralNetConfiguration.Builder() + NeuralNetConfiguration conf = NeuralNetConfiguration.builder() .list() .layer(new OutputLayer.Builder().nIn(4).nOut(3).build()) .build(); - ComputationGraphConfiguration conf2 = new NeuralNetConfiguration.Builder() + ComputationGraphConfiguration conf2 = NeuralNetConfiguration.builder() .graphBuilder() .addInputs("in") .addLayer("out", new OutputLayer.Builder().nIn(4).nOut(3).build(), "in") @@ -1081,11 +1081,11 @@ public class TestSparkMultiLayerParameterAveraging extends BaseSparkTest { for(int i=0; i<3; i++ ){ - assertEquals(i, sn1.getNetwork().getLayerWiseConfigurations().getEpochCount()); + assertEquals(i, sn1.getNetwork().getConfiguration().getEpochCount()); assertEquals(i, sn2.getNetwork().getConfiguration().getEpochCount()); sn1.fit(rdd); sn2.fit(rdd); - assertEquals(i+1, sn1.getNetwork().getLayerWiseConfigurations().getEpochCount()); + assertEquals(i+1, sn1.getNetwork().getConfiguration().getEpochCount()); assertEquals(i+1, sn2.getNetwork().getConfiguration().getEpochCount()); } } diff --git a/.old/deeplearning4j/deeplearning4j-scaleout/spark/dl4j-spark/src/test/java/org/deeplearning4j/spark/impl/stats/TestTrainingStatsCollection.java b/.old/deeplearning4j/deeplearning4j-scaleout/spark/dl4j-spark/src/test/java/org/deeplearning4j/spark/impl/stats/TestTrainingStatsCollection.java index f4939e369..fc446048f 100644 --- a/.old/deeplearning4j/deeplearning4j-scaleout/spark/dl4j-spark/src/test/java/org/deeplearning4j/spark/impl/stats/TestTrainingStatsCollection.java +++ b/.old/deeplearning4j/deeplearning4j-scaleout/spark/dl4j-spark/src/test/java/org/deeplearning4j/spark/impl/stats/TestTrainingStatsCollection.java @@ -26,7 +26,7 @@ import org.apache.spark.SparkConf; import org.apache.spark.api.java.JavaRDD; import org.apache.spark.api.java.JavaSparkContext; import org.deeplearning4j.nn.api.OptimizationAlgorithm; -import org.deeplearning4j.nn.conf.MultiLayerConfiguration; +import org.deeplearning4j.nn.conf.NeuralNetConfiguration; import org.deeplearning4j.nn.conf.NeuralNetConfiguration; import org.deeplearning4j.nn.conf.layers.DenseLayer; import org.deeplearning4j.nn.conf.layers.OutputLayer; @@ -67,7 +67,7 @@ public class TestTrainingStatsCollection extends BaseSparkTest { try { - MultiLayerConfiguration conf = new NeuralNetConfiguration.Builder() + NeuralNetConfiguration conf = NeuralNetConfiguration.builder() .optimizationAlgo(OptimizationAlgorithm.STOCHASTIC_GRADIENT_DESCENT).list() .layer(0, new DenseLayer.Builder().nIn(10).nOut(10).build()) .layer(1, new OutputLayer.Builder().nIn(10).nOut(10).build()) diff --git a/.old/deeplearning4j/deeplearning4j-scaleout/spark/dl4j-spark/src/test/java/org/deeplearning4j/spark/ui/TestListeners.java b/.old/deeplearning4j/deeplearning4j-scaleout/spark/dl4j-spark/src/test/java/org/deeplearning4j/spark/ui/TestListeners.java index 6f79d7595..6d8a9e9bd 100644 --- a/.old/deeplearning4j/deeplearning4j-scaleout/spark/dl4j-spark/src/test/java/org/deeplearning4j/spark/ui/TestListeners.java +++ b/.old/deeplearning4j/deeplearning4j-scaleout/spark/dl4j-spark/src/test/java/org/deeplearning4j/spark/ui/TestListeners.java @@ -27,7 +27,7 @@ import org.deeplearning4j.core.storage.Persistable; import org.deeplearning4j.core.storage.StatsStorage; import org.deeplearning4j.datasets.iterator.impl.IrisDataSetIterator; import org.deeplearning4j.nn.api.OptimizationAlgorithm; -import org.deeplearning4j.nn.conf.MultiLayerConfiguration; +import org.deeplearning4j.nn.conf.NeuralNetConfiguration; import org.deeplearning4j.nn.conf.NeuralNetConfiguration; import org.deeplearning4j.nn.conf.layers.DenseLayer; import org.deeplearning4j.nn.multilayer.MultiLayerNetwork; @@ -60,7 +60,7 @@ public class TestListeners extends BaseSparkTest { JavaSparkContext sc = getContext(); int nExecutors = numExecutors(); - MultiLayerConfiguration conf = new NeuralNetConfiguration.Builder().seed(123) + NeuralNetConfiguration conf = NeuralNetConfiguration.builder().seed(123) .optimizationAlgo(OptimizationAlgorithm.STOCHASTIC_GRADIENT_DESCENT).list() .layer(0, new DenseLayer.Builder().nIn(4).nOut(100).weightInit(WeightInit.XAVIER) .activation(Activation.RELU).build()) diff --git a/.old/rl4j/rl4j-core/src/main/java/org/deeplearning4j/rl4j/network/MultiLayerNetworkHandler.java b/.old/rl4j/rl4j-core/src/main/java/org/deeplearning4j/rl4j/network/MultiLayerNetworkHandler.java index 58389d74e..37bd5c2a9 100644 --- a/.old/rl4j/rl4j-core/src/main/java/org/deeplearning4j/rl4j/network/MultiLayerNetworkHandler.java +++ b/.old/rl4j/rl4j-core/src/main/java/org/deeplearning4j/rl4j/network/MultiLayerNetworkHandler.java @@ -20,7 +20,7 @@ package org.deeplearning4j.rl4j.network; import lombok.Getter; -import org.deeplearning4j.nn.conf.MultiLayerConfiguration; +import org.deeplearning4j.nn.conf.NeuralNetConfiguration; import org.deeplearning4j.nn.gradient.Gradient; import org.deeplearning4j.nn.layers.recurrent.RnnOutputLayer; import org.deeplearning4j.nn.multilayer.MultiLayerNetwork; @@ -41,7 +41,7 @@ public class MultiLayerNetworkHandler implements INetworkHandler { @Getter private final boolean recurrent; - private final MultiLayerConfiguration configuration; + private final NeuralNetConfiguration configuration; private final String labelName; private final String gradientName; private final int inputFeatureIdx; @@ -59,7 +59,7 @@ public class MultiLayerNetworkHandler implements INetworkHandler { int inputFeatureIdx) { this.model = model; recurrent = model.getOutputLayer() instanceof RnnOutputLayer; - configuration = model.getLayerWiseConfigurations(); + configuration = model.getConfiguration(); this.labelName = labelName; this.gradientName = gradientName; this.inputFeatureIdx = inputFeatureIdx; diff --git a/.old/rl4j/rl4j-core/src/main/java/org/deeplearning4j/rl4j/network/ac/ActorCriticFactoryCompGraphStdConv.java b/.old/rl4j/rl4j-core/src/main/java/org/deeplearning4j/rl4j/network/ac/ActorCriticFactoryCompGraphStdConv.java index cda26645f..ed8ceacda 100644 --- a/.old/rl4j/rl4j-core/src/main/java/org/deeplearning4j/rl4j/network/ac/ActorCriticFactoryCompGraphStdConv.java +++ b/.old/rl4j/rl4j-core/src/main/java/org/deeplearning4j/rl4j/network/ac/ActorCriticFactoryCompGraphStdConv.java @@ -59,7 +59,7 @@ public class ActorCriticFactoryCompGraphStdConv implements ActorCriticFactoryCom int w = (((shapeInputs[2] - 8) / 4 + 1) - 4) / 2 + 1; ComputationGraphConfiguration.GraphBuilder confB = - new NeuralNetConfiguration.Builder().seed(Constants.NEURAL_NET_SEED) + NeuralNetConfiguration.builder().seed(Constants.NEURAL_NET_SEED) .optimizationAlgo(OptimizationAlgorithm.STOCHASTIC_GRADIENT_DESCENT) .updater(conf.getUpdater() != null ? conf.getUpdater() : new Adam()) .weightInit(WeightInit.XAVIER) diff --git a/.old/rl4j/rl4j-core/src/main/java/org/deeplearning4j/rl4j/network/ac/ActorCriticFactoryCompGraphStdDense.java b/.old/rl4j/rl4j-core/src/main/java/org/deeplearning4j/rl4j/network/ac/ActorCriticFactoryCompGraphStdDense.java index 65e409b83..f05d43f3b 100644 --- a/.old/rl4j/rl4j-core/src/main/java/org/deeplearning4j/rl4j/network/ac/ActorCriticFactoryCompGraphStdDense.java +++ b/.old/rl4j/rl4j-core/src/main/java/org/deeplearning4j/rl4j/network/ac/ActorCriticFactoryCompGraphStdDense.java @@ -49,7 +49,7 @@ public class ActorCriticFactoryCompGraphStdDense implements ActorCriticFactoryCo nIn *= i; } ComputationGraphConfiguration.GraphBuilder confB = - new NeuralNetConfiguration.Builder().seed(Constants.NEURAL_NET_SEED) + NeuralNetConfiguration.builder().seed(Constants.NEURAL_NET_SEED) .optimizationAlgo(OptimizationAlgorithm.STOCHASTIC_GRADIENT_DESCENT) .updater(conf.getUpdater() != null ? conf.getUpdater() : new Adam()) .weightInit(WeightInit.XAVIER) diff --git a/.old/rl4j/rl4j-core/src/main/java/org/deeplearning4j/rl4j/network/ac/ActorCriticFactorySeparateStdDense.java b/.old/rl4j/rl4j-core/src/main/java/org/deeplearning4j/rl4j/network/ac/ActorCriticFactorySeparateStdDense.java index 8f8b739d8..80cb6384b 100644 --- a/.old/rl4j/rl4j-core/src/main/java/org/deeplearning4j/rl4j/network/ac/ActorCriticFactorySeparateStdDense.java +++ b/.old/rl4j/rl4j-core/src/main/java/org/deeplearning4j/rl4j/network/ac/ActorCriticFactorySeparateStdDense.java @@ -24,7 +24,7 @@ import lombok.AllArgsConstructor; import lombok.Builder; import lombok.Value; import org.deeplearning4j.nn.api.OptimizationAlgorithm; -import org.deeplearning4j.nn.conf.MultiLayerConfiguration; +import org.deeplearning4j.nn.conf.NeuralNetConfiguration; import org.deeplearning4j.nn.conf.NeuralNetConfiguration; import org.deeplearning4j.nn.conf.inputs.InputType; import org.deeplearning4j.nn.conf.layers.DenseLayer; @@ -56,7 +56,7 @@ public class ActorCriticFactorySeparateStdDense implements ActorCriticFactorySep for (int i : numInputs) { nIn *= i; } - NeuralNetConfiguration.ListBuilder confB = new NeuralNetConfiguration.Builder().seed(Constants.NEURAL_NET_SEED) + NeuralNetConfiguration.ListBuilder confB = NeuralNetConfiguration.builder().seed(Constants.NEURAL_NET_SEED) .optimizationAlgo(OptimizationAlgorithm.STOCHASTIC_GRADIENT_DESCENT) .updater(conf.getUpdater() != null ? conf.getUpdater() : new Adam()) .weightInit(WeightInit.XAVIER) @@ -81,7 +81,7 @@ public class ActorCriticFactorySeparateStdDense implements ActorCriticFactorySep } confB.setInputType(conf.isUseLSTM() ? InputType.recurrent(nIn) : InputType.feedForward(nIn)); - MultiLayerConfiguration mlnconf2 = confB.build(); + NeuralNetConfiguration mlnconf2 = confB.build(); MultiLayerNetwork model = new MultiLayerNetwork(mlnconf2); model.init(); if (conf.getListeners() != null) { @@ -90,7 +90,7 @@ public class ActorCriticFactorySeparateStdDense implements ActorCriticFactorySep model.setListeners(new ScoreIterationListener(Constants.NEURAL_NET_ITERATION_LISTENER)); } - NeuralNetConfiguration.ListBuilder confB2 = new NeuralNetConfiguration.Builder().seed(Constants.NEURAL_NET_SEED) + NeuralNetConfiguration.ListBuilder confB2 = NeuralNetConfiguration.builder().seed(Constants.NEURAL_NET_SEED) .optimizationAlgo(OptimizationAlgorithm.STOCHASTIC_GRADIENT_DESCENT) .updater(conf.getUpdater() != null ? conf.getUpdater() : new Adam()) .weightInit(WeightInit.XAVIER) @@ -116,7 +116,7 @@ public class ActorCriticFactorySeparateStdDense implements ActorCriticFactorySep } confB2.setInputType(conf.isUseLSTM() ? InputType.recurrent(nIn) : InputType.feedForward(nIn)); - MultiLayerConfiguration mlnconf = confB2.build(); + NeuralNetConfiguration mlnconf = confB2.build(); MultiLayerNetwork model2 = new MultiLayerNetwork(mlnconf); model2.init(); if (conf.getListeners() != null) { diff --git a/.old/rl4j/rl4j-core/src/main/java/org/deeplearning4j/rl4j/network/ac/ActorCriticSeparate.java b/.old/rl4j/rl4j-core/src/main/java/org/deeplearning4j/rl4j/network/ac/ActorCriticSeparate.java index 9daeb1af8..8ae8f1944 100644 --- a/.old/rl4j/rl4j-core/src/main/java/org/deeplearning4j/rl4j/network/ac/ActorCriticSeparate.java +++ b/.old/rl4j/rl4j-core/src/main/java/org/deeplearning4j/rl4j/network/ac/ActorCriticSeparate.java @@ -23,7 +23,7 @@ package org.deeplearning4j.rl4j.network.ac; import lombok.Getter; import org.apache.commons.lang3.NotImplementedException; import org.deeplearning4j.nn.api.NeuralNetwork; -import org.deeplearning4j.nn.conf.MultiLayerConfiguration; +import org.deeplearning4j.nn.conf.NeuralNetConfiguration; import org.deeplearning4j.nn.gradient.Gradient; import org.deeplearning4j.nn.layers.recurrent.RnnOutputLayer; import org.deeplearning4j.nn.multilayer.MultiLayerNetwork; @@ -131,7 +131,7 @@ public class ActorCriticSeparate implements IAct @Override public void applyGradients(Gradients gradients) { int batchSize = (int)gradients.getBatchSize(); - MultiLayerConfiguration valueConf = valueNet.getLayerWiseConfigurations(); + NeuralNetConfiguration valueConf = valueNet.getConfiguration(); int valueIterationCount = valueConf.getIterationCount(); int valueEpochCount = valueConf.getEpochCount(); Gradient valueGradient = gradients.getGradient(CommonGradientNames.ActorCritic.Value); @@ -145,7 +145,7 @@ public class ActorCriticSeparate implements IAct } valueConf.setIterationCount(valueIterationCount + 1); - MultiLayerConfiguration policyConf = policyNet.getLayerWiseConfigurations(); + NeuralNetConfiguration policyConf = policyNet.getConfiguration(); int policyIterationCount = policyConf.getIterationCount(); int policyEpochCount = policyConf.getEpochCount(); Gradient policyGradient = gradients.getGradient(CommonGradientNames.ActorCritic.Policy); @@ -191,7 +191,7 @@ public class ActorCriticSeparate implements IAct @Deprecated public void applyGradient(Gradient[] gradient, int batchSize) { - MultiLayerConfiguration valueConf = valueNet.getLayerWiseConfigurations(); + NeuralNetConfiguration valueConf = valueNet.getConfiguration(); int valueIterationCount = valueConf.getIterationCount(); int valueEpochCount = valueConf.getEpochCount(); valueNet.getUpdater().update(valueNet, gradient[0], valueIterationCount, valueEpochCount, batchSize, LayerWorkspaceMgr.noWorkspaces()); @@ -204,7 +204,7 @@ public class ActorCriticSeparate implements IAct } valueConf.setIterationCount(valueIterationCount + 1); - MultiLayerConfiguration policyConf = policyNet.getLayerWiseConfigurations(); + NeuralNetConfiguration policyConf = policyNet.getConfiguration(); int policyIterationCount = policyConf.getIterationCount(); int policyEpochCount = policyConf.getEpochCount(); policyNet.getUpdater().update(policyNet, gradient[1], policyIterationCount, policyEpochCount, batchSize, LayerWorkspaceMgr.noWorkspaces()); diff --git a/.old/rl4j/rl4j-core/src/main/java/org/deeplearning4j/rl4j/network/dqn/DQN.java b/.old/rl4j/rl4j-core/src/main/java/org/deeplearning4j/rl4j/network/dqn/DQN.java index 8338884a2..c292432b2 100644 --- a/.old/rl4j/rl4j-core/src/main/java/org/deeplearning4j/rl4j/network/dqn/DQN.java +++ b/.old/rl4j/rl4j-core/src/main/java/org/deeplearning4j/rl4j/network/dqn/DQN.java @@ -22,7 +22,7 @@ package org.deeplearning4j.rl4j.network.dqn; import org.apache.commons.lang3.NotImplementedException; import org.deeplearning4j.nn.api.NeuralNetwork; -import org.deeplearning4j.nn.conf.MultiLayerConfiguration; +import org.deeplearning4j.nn.conf.NeuralNetConfiguration; import org.deeplearning4j.nn.gradient.Gradient; import org.deeplearning4j.nn.multilayer.MultiLayerNetwork; import org.deeplearning4j.nn.workspace.LayerWorkspaceMgr; @@ -157,7 +157,7 @@ public class DQN implements IDQN { public void applyGradients(Gradients gradients) { Gradient qValues = gradients.getGradient(CommonGradientNames.QValues); - MultiLayerConfiguration mlnConf = mln.getLayerWiseConfigurations(); + NeuralNetConfiguration mlnConf = mln.getConfiguration(); int iterationCount = mlnConf.getIterationCount(); int epochCount = mlnConf.getEpochCount(); mln.getUpdater().update(mln, qValues, iterationCount, epochCount, (int)gradients.getBatchSize(), LayerWorkspaceMgr.noWorkspaces()); @@ -172,7 +172,7 @@ public class DQN implements IDQN { } public void applyGradient(Gradient[] gradient, int batchSize) { - MultiLayerConfiguration mlnConf = mln.getLayerWiseConfigurations(); + NeuralNetConfiguration mlnConf = mln.getConfiguration(); int iterationCount = mlnConf.getIterationCount(); int epochCount = mlnConf.getEpochCount(); mln.getUpdater().update(mln, gradient[0], iterationCount, epochCount, batchSize, LayerWorkspaceMgr.noWorkspaces()); diff --git a/.old/rl4j/rl4j-core/src/main/java/org/deeplearning4j/rl4j/network/dqn/DQNFactoryStdConv.java b/.old/rl4j/rl4j-core/src/main/java/org/deeplearning4j/rl4j/network/dqn/DQNFactoryStdConv.java index cf683aa35..bb64200bd 100644 --- a/.old/rl4j/rl4j-core/src/main/java/org/deeplearning4j/rl4j/network/dqn/DQNFactoryStdConv.java +++ b/.old/rl4j/rl4j-core/src/main/java/org/deeplearning4j/rl4j/network/dqn/DQNFactoryStdConv.java @@ -24,7 +24,7 @@ import lombok.AllArgsConstructor; import lombok.Builder; import lombok.Value; import org.deeplearning4j.nn.api.OptimizationAlgorithm; -import org.deeplearning4j.nn.conf.MultiLayerConfiguration; +import org.deeplearning4j.nn.conf.NeuralNetConfiguration; import org.deeplearning4j.nn.conf.NeuralNetConfiguration; import org.deeplearning4j.nn.conf.inputs.InputType; import org.deeplearning4j.nn.conf.layers.ConvolutionLayer; @@ -55,7 +55,7 @@ public class DQNFactoryStdConv implements DQNFactory { throw new AssertionError("Impossible to apply convolutional layer on a shape == 1"); - NeuralNetConfiguration.ListBuilder confB = new NeuralNetConfiguration.Builder().seed(Constants.NEURAL_NET_SEED) + NeuralNetConfiguration.ListBuilder confB = NeuralNetConfiguration.builder().seed(Constants.NEURAL_NET_SEED) .optimizationAlgo(OptimizationAlgorithm.STOCHASTIC_GRADIENT_DESCENT) .l2(conf.getL2()) .updater(conf.getUpdater() != null ? conf.getUpdater() : new Adam()) @@ -71,8 +71,8 @@ public class DQNFactoryStdConv implements DQNFactory { confB.layer(3, new OutputLayer.Builder(LossFunctions.LossFunction.MSE).activation(Activation.IDENTITY).nOut(numOutputs) .build()); - confB.setInputType(InputType.convolutional(shapeInputs[1], shapeInputs[2], shapeInputs[0])); - MultiLayerConfiguration mlnconf = confB.build(); + confB.inputType(InputType.convolutional(shapeInputs[1], shapeInputs[2], shapeInputs[0])); + NeuralNetConfiguration mlnconf = confB.build(); MultiLayerNetwork model = new MultiLayerNetwork(mlnconf); model.init(); if (conf.getListeners() != null) { diff --git a/.old/rl4j/rl4j-core/src/main/java/org/deeplearning4j/rl4j/network/dqn/DQNFactoryStdDense.java b/.old/rl4j/rl4j-core/src/main/java/org/deeplearning4j/rl4j/network/dqn/DQNFactoryStdDense.java index d35a5f064..15b33170a 100644 --- a/.old/rl4j/rl4j-core/src/main/java/org/deeplearning4j/rl4j/network/dqn/DQNFactoryStdDense.java +++ b/.old/rl4j/rl4j-core/src/main/java/org/deeplearning4j/rl4j/network/dqn/DQNFactoryStdDense.java @@ -24,7 +24,7 @@ import lombok.AllArgsConstructor; import lombok.Builder; import lombok.Value; import org.deeplearning4j.nn.api.OptimizationAlgorithm; -import org.deeplearning4j.nn.conf.MultiLayerConfiguration; +import org.deeplearning4j.nn.conf.NeuralNetConfiguration; import org.deeplearning4j.nn.conf.NeuralNetConfiguration; import org.deeplearning4j.nn.conf.layers.DenseLayer; import org.deeplearning4j.nn.conf.layers.OutputLayer; @@ -54,7 +54,7 @@ public class DQNFactoryStdDense implements DQNFactory { nIn *= i; } - NeuralNetConfiguration.ListBuilder confB = new NeuralNetConfiguration.Builder().seed(Constants.NEURAL_NET_SEED) + NeuralNetConfiguration.ListBuilder confB = NeuralNetConfiguration.builder().seed(Constants.NEURAL_NET_SEED) .optimizationAlgo(OptimizationAlgorithm.STOCHASTIC_GRADIENT_DESCENT) .updater(conf.getUpdater() != null ? conf.getUpdater() : new Adam()) .weightInit(WeightInit.XAVIER) @@ -82,7 +82,7 @@ public class DQNFactoryStdDense implements DQNFactory { ); - MultiLayerConfiguration mlnconf = confB.build(); + NeuralNetConfiguration mlnconf = confB.build(); MultiLayerNetwork model = new MultiLayerNetwork(mlnconf); model.init(); if (conf.getListeners() != null) { diff --git a/.old/rl4j/rl4j-core/src/test/java/org/deeplearning4j/rl4j/NStepRnn.java b/.old/rl4j/rl4j-core/src/test/java/org/deeplearning4j/rl4j/NStepRnn.java index 5cd403cee..dc23edd6e 100644 --- a/.old/rl4j/rl4j-core/src/test/java/org/deeplearning4j/rl4j/NStepRnn.java +++ b/.old/rl4j/rl4j-core/src/test/java/org/deeplearning4j/rl4j/NStepRnn.java @@ -134,7 +134,7 @@ public class NStepRnn { } private static ComputationGraphConfiguration.GraphBuilder buildBaseNetworkConfiguration(int lstmLayerSize, int dl1Size, int dl2Size) { - return new NeuralNetConfiguration.Builder().seed(Constants.NEURAL_NET_SEED) + return NeuralNetConfiguration.builder().seed(Constants.NEURAL_NET_SEED) .optimizationAlgo(OptimizationAlgorithm.STOCHASTIC_GRADIENT_DESCENT) .updater(new Adam()) .weightInit(WeightInit.XAVIER) diff --git a/.old/rl4j/rl4j-core/src/test/java/org/deeplearning4j/rl4j/RobotLakeExample.java b/.old/rl4j/rl4j-core/src/test/java/org/deeplearning4j/rl4j/RobotLakeExample.java index 4f95632a0..adbd6a3c5 100644 --- a/.old/rl4j/rl4j-core/src/test/java/org/deeplearning4j/rl4j/RobotLakeExample.java +++ b/.old/rl4j/rl4j-core/src/test/java/org/deeplearning4j/rl4j/RobotLakeExample.java @@ -188,7 +188,7 @@ public class RobotLakeExample { } private static ComputationGraphConfiguration.GraphBuilder buildBaseNetworkConfiguration() { - return new NeuralNetConfiguration.Builder().seed(Constants.NEURAL_NET_SEED) + return NeuralNetConfiguration.builder().seed(Constants.NEURAL_NET_SEED) .optimizationAlgo(OptimizationAlgorithm.STOCHASTIC_GRADIENT_DESCENT) .updater(new Adam()) .weightInit(WeightInit.XAVIER) diff --git a/.old/rl4j/rl4j-core/src/test/java/org/deeplearning4j/rl4j/TMazeExample.java b/.old/rl4j/rl4j-core/src/test/java/org/deeplearning4j/rl4j/TMazeExample.java index 617e436df..64c971e00 100644 --- a/.old/rl4j/rl4j-core/src/test/java/org/deeplearning4j/rl4j/TMazeExample.java +++ b/.old/rl4j/rl4j-core/src/test/java/org/deeplearning4j/rl4j/TMazeExample.java @@ -174,7 +174,7 @@ public class TMazeExample { } private static ComputationGraphConfiguration.GraphBuilder buildBaseNetworkConfiguration() { - return new NeuralNetConfiguration.Builder().seed(Constants.NEURAL_NET_SEED) + return NeuralNetConfiguration.builder().seed(Constants.NEURAL_NET_SEED) .optimizationAlgo(OptimizationAlgorithm.STOCHASTIC_GRADIENT_DESCENT) .updater(new Adam()) .weightInit(WeightInit.XAVIER) diff --git a/.old/rl4j/rl4j-core/src/test/java/org/deeplearning4j/rl4j/network/MultiLayerNetworkHandlerTest.java b/.old/rl4j/rl4j-core/src/test/java/org/deeplearning4j/rl4j/network/MultiLayerNetworkHandlerTest.java index 0f5b51407..69d305b31 100644 --- a/.old/rl4j/rl4j-core/src/test/java/org/deeplearning4j/rl4j/network/MultiLayerNetworkHandlerTest.java +++ b/.old/rl4j/rl4j-core/src/test/java/org/deeplearning4j/rl4j/network/MultiLayerNetworkHandlerTest.java @@ -21,7 +21,7 @@ package org.deeplearning4j.rl4j.network; import org.deeplearning4j.nn.api.Updater; -import org.deeplearning4j.nn.conf.MultiLayerConfiguration; +import org.deeplearning4j.nn.conf.NeuralNetConfiguration; import org.deeplearning4j.nn.gradient.Gradient; import org.deeplearning4j.nn.layers.recurrent.RnnOutputLayer; import org.deeplearning4j.nn.multilayer.MultiLayerNetwork; @@ -52,7 +52,7 @@ public class MultiLayerNetworkHandlerTest { private MultiLayerNetwork modelMock; private TrainingListener trainingListenerMock; - private MultiLayerConfiguration configurationMock; + private NeuralNetConfiguration configurationMock; private MultiLayerNetworkHandler sut; @@ -60,10 +60,10 @@ public class MultiLayerNetworkHandlerTest { modelMock = mock(MultiLayerNetwork.class); trainingListenerMock = mock(TrainingListener.class); - configurationMock = mock(MultiLayerConfiguration.class); + configurationMock = mock(NeuralNetConfiguration.class); when(configurationMock.getIterationCount()).thenReturn(123); when(configurationMock.getEpochCount()).thenReturn(234); - when(modelMock.getLayerWiseConfigurations()).thenReturn(configurationMock); + when(modelMock.getConfiguration()).thenReturn(configurationMock); if(setupRecurrent) { when(modelMock.getOutputLayer()).thenReturn(new RnnOutputLayer(null, null)); diff --git a/.old/rl4j/rl4j-core/src/test/java/org/deeplearning4j/rl4j/policy/PolicyTest.java b/.old/rl4j/rl4j-core/src/test/java/org/deeplearning4j/rl4j/policy/PolicyTest.java index f74713466..f0ff3f641 100644 --- a/.old/rl4j/rl4j-core/src/test/java/org/deeplearning4j/rl4j/policy/PolicyTest.java +++ b/.old/rl4j/rl4j-core/src/test/java/org/deeplearning4j/rl4j/policy/PolicyTest.java @@ -166,9 +166,9 @@ public class PolicyTest { @Test public void testACPolicy() throws Exception { - ComputationGraph cg = new ComputationGraph(new NeuralNetConfiguration.Builder().seed(444).graphBuilder().addInputs("input") + ComputationGraph cg = new ComputationGraph(NeuralNetConfiguration.builder().seed(444).graphBuilder().addInputs("input") .addLayer("output", new OutputLayer.Builder().nOut(1).lossFunction(LossFunctions.LossFunction.XENT).activation(Activation.SIGMOID).build(), "input").setOutputs("output").build()); - MultiLayerNetwork mln = new MultiLayerNetwork(new NeuralNetConfiguration.Builder().seed(555).list() + MultiLayerNetwork mln = new MultiLayerNetwork(NeuralNetConfiguration.builder().seed(555).list() .layer(0, new OutputLayer.Builder().nOut(1).lossFunction(LossFunctions.LossFunction.XENT).activation(Activation.SIGMOID).build()).build()); ACPolicy policy = new ACPolicy(new DummyAC(mln), true, Nd4j.getRandom()); diff --git a/README.md b/README.md index e3eb6ba84..d1e64a639 100644 --- a/README.md +++ b/README.md @@ -48,12 +48,12 @@ Deeplearning4J offers a very high level API for defining even complex neural net you how LeNet, a convolutional neural network, is defined in DL4J. ```java -MultiLayerConfiguration conf = new NeuralNetConfiguration.Builder() +NeuralNetConfiguration conf = NeuralNetConfiguration.builder() .seed(seed) .l2(0.0005) .weightInit(WeightInit.XAVIER) .updater(new Adam(1e-3)) - .list() + .layer(new ConvolutionLayer.Builder(5, 5) .stride(1,1) .nOut(20) @@ -78,7 +78,7 @@ MultiLayerConfiguration conf = new NeuralNetConfiguration.Builder() .nOut(outputNum) .activation(Activation.SOFTMAX) .build()) - .setInputType(InputType.convolutionalFlat(28,28,1)) + .inputType(InputType.convolutionalFlat(28,28,1)) .build(); ``` diff --git a/brutex-extended-tests/src/test/java/net/brutex/gan/App.java b/brutex-extended-tests/src/test/java/net/brutex/gan/App.java index bf4783145..0287d32a9 100644 --- a/brutex-extended-tests/src/test/java/net/brutex/gan/App.java +++ b/brutex-extended-tests/src/test/java/net/brutex/gan/App.java @@ -21,15 +21,21 @@ package net.brutex.gan; +import java.awt.BorderLayout; +import java.awt.Dimension; +import java.awt.GridLayout; +import java.awt.Image; +import java.awt.image.BufferedImage; +import java.io.File; +import java.util.Arrays; import java.util.Random; -import javax.ws.rs.client.ClientBuilder; +import javax.swing.ImageIcon; +import javax.swing.JFrame; +import javax.swing.JLabel; +import javax.swing.JPanel; +import javax.swing.WindowConstants; import lombok.extern.slf4j.Slf4j; -import okhttp3.OkHttpClient; -import okhttp3.Request; -import okhttp3.Response; import org.apache.commons.lang3.ArrayUtils; -import org.datavec.api.Writable; -import org.datavec.api.records.reader.RecordReader; import org.datavec.api.split.FileSplit; import org.datavec.image.loader.NativeImageLoader; import org.datavec.image.recordreader.ImageRecordReader; @@ -37,41 +43,33 @@ import org.datavec.image.transform.ColorConversionTransform; import org.datavec.image.transform.ImageTransform; import org.datavec.image.transform.PipelineImageTransform; import org.datavec.image.transform.ResizeImageTransform; -import org.datavec.image.transform.ScaleImageTransform; import org.datavec.image.transform.ShowImageTransform; import org.deeplearning4j.datasets.datavec.RecordReaderDataSetIterator; -import org.deeplearning4j.datasets.iterator.impl.MnistDataSetIterator; import org.deeplearning4j.nn.conf.GradientNormalization; -import org.deeplearning4j.nn.conf.MultiLayerConfiguration; import org.deeplearning4j.nn.conf.NeuralNetConfiguration; +import org.deeplearning4j.nn.conf.distribution.NormalDistribution; import org.deeplearning4j.nn.conf.inputs.InputType; -import org.deeplearning4j.nn.conf.layers.*; +import org.deeplearning4j.nn.conf.layers.ActivationLayer; +import org.deeplearning4j.nn.conf.layers.DenseLayer; +import org.deeplearning4j.nn.conf.layers.DropoutLayer; +import org.deeplearning4j.nn.conf.layers.LayerConfiguration; +import org.deeplearning4j.nn.conf.layers.OutputLayer; import org.deeplearning4j.nn.conf.layers.misc.FrozenLayerWithBackprop; +import org.deeplearning4j.nn.conf.weightnoise.WeightNoise; import org.deeplearning4j.nn.multilayer.MultiLayerNetwork; import org.deeplearning4j.nn.weights.WeightInit; +import org.deeplearning4j.nn.weights.WeightInitXavier; import org.deeplearning4j.optimize.listeners.PerformanceListener; import org.deeplearning4j.optimize.listeners.ScoreToChartListener; -import org.glassfish.jersey.client.JerseyClient; -import org.glassfish.jersey.client.JerseyClientBuilder; import org.junit.jupiter.api.Test; import org.nd4j.linalg.activations.Activation; import org.nd4j.linalg.activations.impl.ActivationLReLU; import org.nd4j.linalg.api.ndarray.INDArray; import org.nd4j.linalg.dataset.DataSet; import org.nd4j.linalg.dataset.api.iterator.DataSetIterator; -import org.nd4j.linalg.dataset.api.preprocessor.ImagePreProcessingScaler; -import org.nd4j.linalg.dataset.api.preprocessor.NormalizerMinMaxScaler; import org.nd4j.linalg.factory.Nd4j; import org.nd4j.linalg.learning.config.Adam; import org.nd4j.linalg.learning.config.IUpdater; -import org.nd4j.linalg.lossfunctions.LossFunctions; - - -import javax.swing.*; -import java.awt.*; -import java.awt.image.BufferedImage; -import java.io.File; -import java.util.Arrays; import org.nd4j.linalg.lossfunctions.LossFunctions.LossFunction; @Slf4j @@ -95,8 +93,8 @@ public class App { private static JPanel panel; private static JPanel panel2; - private static Layer[] genLayers() { - return new Layer[] { + private static LayerConfiguration[] genLayers() { + return new LayerConfiguration[] { new DenseLayer.Builder().nIn(INPUT).nOut(X_DIM*Y_DIM*CHANNELS).weightInit(WeightInit.NORMAL).build(), new ActivationLayer.Builder(new ActivationLReLU(0.2)).build(), new DenseLayer.Builder().nIn(X_DIM*Y_DIM*CHANNELS).nOut(X_DIM*Y_DIM).build(), @@ -106,30 +104,33 @@ public class App { new DenseLayer.Builder().nIn(X_DIM*Y_DIM).nOut(X_DIM*Y_DIM*CHANNELS).activation(Activation.TANH) .build() }; - } + } /** * Returns a network config that takes in a 10x10 random number and produces a 28x28 grayscale image. * * @return config */ - private static MultiLayerConfiguration generator() { - MultiLayerConfiguration conf = new NeuralNetConfiguration.Builder() + private static NeuralNetConfiguration generator() { + NeuralNetConfiguration conf = NeuralNetConfiguration.builder() .seed(42) .updater(UPDATER) .gradientNormalization(GradientNormalization.RenormalizeL2PerLayer) .gradientNormalizationThreshold(GRADIENT_THRESHOLD) + //.weightInit(WeightInit.XAVIER) .weightInit(WeightInit.XAVIER) .activation(Activation.IDENTITY) - .list(genLayers()) - .setInputType(InputType.convolutional(X_DIM, Y_DIM, CHANNELS)) + .layersFromArray(genLayers()) + .inputType(InputType.convolutional(X_DIM, Y_DIM, CHANNELS)) + // .inputPreProcessor("CNN1", new FeedForwardToCnnPreProcessor(Y_DIM, X_DIM, CHANNELS)) .build(); + ((NeuralNetConfiguration) conf).init(); - return conf; + return conf; } - private static Layer[] disLayers() { - return new Layer[]{ + private static LayerConfiguration[] disLayers() { + return new LayerConfiguration[]{ new DenseLayer.Builder().nOut(X_DIM*Y_DIM*CHANNELS*2).build(), //input is set by setInputType on the network new ActivationLayer.Builder(new ActivationLReLU(0.2)).build(), new DropoutLayer.Builder(1 - 0.5).build(), @@ -146,44 +147,51 @@ public class App { }; } - private static MultiLayerConfiguration discriminator() { - MultiLayerConfiguration conf = new NeuralNetConfiguration.Builder() + private static NeuralNetConfiguration discriminator() { + + NeuralNetConfiguration conf = NeuralNetConfiguration.builder() .seed(42) .updater(UPDATER) .gradientNormalization(GradientNormalization.RenormalizeL2PerLayer) .gradientNormalizationThreshold(GRADIENT_THRESHOLD) .weightInit(WeightInit.XAVIER) + //.weightInitFn(new WeightInitXavier()) + //.activationFn(new ActivationIdentity()) .activation(Activation.IDENTITY) - .list(disLayers()) - .setInputType(InputType.convolutional(X_DIM, Y_DIM, CHANNELS)) + .layersFromArray(disLayers()) + .inputType(InputType.convolutional(X_DIM, Y_DIM, CHANNELS)) .build(); + ((NeuralNetConfiguration) conf).init(); return conf; } - private static MultiLayerConfiguration gan() { - Layer[] genLayers = genLayers(); - Layer[] disLayers = Arrays.stream(disLayers()) + private static NeuralNetConfiguration gan() { + LayerConfiguration[] genLayers = genLayers(); + LayerConfiguration[] disLayers = Arrays.stream(disLayers()) .map((layer) -> { - if (layer instanceof DenseLayer || layer instanceof OutputLayer) { - return new FrozenLayerWithBackprop(layer); + if (layer instanceof DenseLayer || layer instanceof OutputLayer) { + return new FrozenLayerWithBackprop(layer); } else { return layer; } - }).toArray(Layer[]::new); - Layer[] layers = ArrayUtils.addAll(genLayers, disLayers); + }).toArray(LayerConfiguration[]::new); + LayerConfiguration[] layers = ArrayUtils.addAll(genLayers, disLayers); - MultiLayerConfiguration conf = new NeuralNetConfiguration.Builder() + NeuralNetConfiguration conf = NeuralNetConfiguration.builder() .seed(42) - .updater(UPDATER) - .gradientNormalization(GradientNormalization.RenormalizeL2PerLayer) - .gradientNormalizationThreshold(GRADIENT_THRESHOLD) - .weightInit(WeightInit.XAVIER) - .activation(Activation.IDENTITY) - .list(layers) - .setInputType(InputType.convolutional(X_DIM, Y_DIM, CHANNELS)) + .updater( Adam.builder().learningRate(0.0002).beta1(0.5).build() ) + .gradientNormalization( GradientNormalization.RenormalizeL2PerLayer) + .gradientNormalizationThreshold( 100 ) + //.weightInitFn( new WeightInitXavier() ) //this is internal + .weightNoise(new WeightNoise(new NormalDistribution(0.5, 0.5))) + .weightInit( WeightInit.XAVIER) + //.activationFn( new ActivationIdentity()) //this is internal + .activation( Activation.IDENTITY ) + .layersFromArray( layers ) + .inputType( InputType.convolutional(X_DIM, Y_DIM, CHANNELS)) .build(); - +((NeuralNetConfiguration) conf).init(); return conf; } @@ -194,6 +202,8 @@ public class App { } public static void main(String... args) throws Exception { + + log.info("\u001B[32m Some \u001B[1m green \u001B[22m text \u001B[0m \u001B[7m Inverted\u001B[0m "); Nd4j.getMemoryManager().setAutoGcWindow(15 * 1000); // MnistDataSetIterator trainData = new MnistDataSetIterator(128, true, 45); @@ -219,16 +229,17 @@ public class App { MultiLayerNetwork gen = new MultiLayerNetwork(generator()); MultiLayerNetwork dis = new MultiLayerNetwork(discriminator()); MultiLayerNetwork gan = new MultiLayerNetwork(gan()); - gen.init(); - dis.init(); - gan.init(); + gen.init(); log.debug("Generator network: {}", gen); + dis.init(); log.debug("Discriminator network: {}", dis); + gan.init(); log.debug("Complete GAN network: {}", gan); + copyParams(gen, dis, gan); - //gen.setListeners(new PerformanceListener(10, true)); - //dis.setListeners(new PerformanceListener(10, true)); - //gan.setListeners(new PerformanceListener(10, true)); - gan.setListeners(new ScoreToChartListener("gan")); + gen.addTrainingListeners(new PerformanceListener(10, true)); + dis.addTrainingListeners(new PerformanceListener(10, true)); + gan.addTrainingListeners(new PerformanceListener(10, true)); + gan.addTrainingListeners(new ScoreToChartListener("gan")); //dis.setListeners(new ScoreToChartListener("dis")); gan.fit(Nd4j.rand(batchSize, CHANNELS, X_DIM, Y_DIM), Nd4j.zeros(batchSize, 1)); @@ -315,23 +326,25 @@ public class App { int genLayerCount = gen.getLayers().length; for (int i = 0; i < gan.getLayers().length; i++) { if (i < genLayerCount) { - gen.getLayer(i).setParams(gan.getLayer(i).params()); + if(gan.getLayer(i).getParams() != null) + gen.getLayer(i).setParams(gan.getLayer(i).getParams()); } else { - dis.getLayer(i - genLayerCount).setParams(gan.getLayer(i).params()); + if(gan.getLayer(i).getParams() != null) + dis.getLayer(i - genLayerCount).setParams(gan.getLayer(i).getParams()); } } } private static void updateGen(MultiLayerNetwork gen, MultiLayerNetwork gan) { for (int i = 0; i < gen.getLayers().length; i++) { - gen.getLayer(i).setParams(gan.getLayer(i).params()); + gen.getLayer(i).setParams(gan.getLayer(i).getParams()); } } private static void updateGan(MultiLayerNetwork gen, MultiLayerNetwork dis, MultiLayerNetwork gan) { int genLayerCount = gen.getLayers().length; for (int i = genLayerCount; i < gan.getLayers().length; i++) { - gan.getLayer(i).setParams(dis.getLayer(i - genLayerCount).params()); + gan.getLayer(i).setParams(dis.getLayer(i - genLayerCount).getParams()); } } diff --git a/brutex-extended-tests/src/test/java/net/brutex/gan/GAN.java b/brutex-extended-tests/src/test/java/net/brutex/gan/GAN.java index 25473fc9e..41eb277d7 100644 --- a/brutex-extended-tests/src/test/java/net/brutex/gan/GAN.java +++ b/brutex-extended-tests/src/test/java/net/brutex/gan/GAN.java @@ -25,6 +25,7 @@ import org.apache.commons.lang3.ArrayUtils; import org.deeplearning4j.nn.api.Layer; import org.deeplearning4j.nn.api.OptimizationAlgorithm; import org.deeplearning4j.nn.conf.*; +import org.deeplearning4j.nn.conf.layers.LayerConfiguration; import org.deeplearning4j.nn.multilayer.MultiLayerNetwork; import org.deeplearning4j.optimize.api.BaseTrainingListener; import org.nd4j.evaluation.classification.Evaluation; @@ -114,15 +115,15 @@ public class GAN { public void setGeneratorListeners(BaseTrainingListener[] listeners) { - generator.setListeners(listeners); + generator.addTrainingListeners(listeners); } public void setDiscriminatorListeners(BaseTrainingListener[] listeners) { - discriminator.setListeners(listeners); + discriminator.addTrainingListeners(listeners); } public void setGanListeners(BaseTrainingListener[] listeners) { - gan.setListeners(listeners); + gan.addTrainingListeners(listeners); } public void fit(DataSetIterator realData, int numEpochs) { @@ -199,13 +200,13 @@ public class GAN { Layer[] disLayers = ganDiscriminator.getLayers(); Layer[] layers = ArrayUtils.addAll(genLayers, disLayers); - MultiLayerConfiguration genConf = generator.getLayerWiseConfigurations(); - MultiLayerConfiguration disConf = ganDiscriminator.getLayerWiseConfigurations(); - org.deeplearning4j.nn.conf.layers.Layer[] confLayers = new org.deeplearning4j.nn.conf.layers.Layer[layers.length]; + NeuralNetConfiguration genConf = generator.getNetConfiguration(); + NeuralNetConfiguration disConf = ganDiscriminator.getNetConfiguration(); + LayerConfiguration[] confLayers = new LayerConfiguration[layers.length]; Map preProcessors = new HashMap<>(); for (int i = 0; i < layers.length; i++) { - confLayers[i] = layers[i].conf().getLayer(); + confLayers[i] = layers[i].getLayerConfiguration(); if (i < numGenLayers) { preProcessors.put(i, genConf.getInputPreProcess(i)); } else { @@ -213,7 +214,7 @@ public class GAN { } } - MultiLayerConfiguration ganConf = new NeuralNetConfiguration.Builder() + NeuralNetConfiguration ganConf = NeuralNetConfiguration.builder() .seed(seed) .updater(updater) .biasUpdater(biasUpdater) @@ -224,7 +225,7 @@ public class GAN { .trainingWorkspaceMode(trainingWorkSpaceMode) .inferenceWorkspaceMode(inferenceWorkspaceMode) .cacheMode(cacheMode) - .list(confLayers) + .layersFromArray(confLayers) .inputPreProcessors(preProcessors) .build(); gan = new MultiLayerNetwork(ganConf); @@ -238,9 +239,9 @@ public class GAN { int genLayerCount = generator.getLayers().length; for (int i = 0; i < gan.getLayers().length; i++) { if (i < genLayerCount) { - generator.getLayer(i).setParams(gan.getLayer(i).params()); + generator.getLayer(i).setParams(gan.getLayer(i).getParams()); } else { - discriminator.getLayer(i - genLayerCount).setParams(gan.getLayer(i).params()); + discriminator.getLayer(i - genLayerCount).setParams(gan.getLayer(i).getParams()); } } } @@ -251,7 +252,7 @@ public class GAN { */ private void updateGeneratorFromGan() { for (int i = 0; i < generator.getLayers().length; i++) { - generator.getLayer(i).setParams(gan.getLayer(i).params()); + generator.getLayer(i).setParams(gan.getLayer(i).getParams()); } } @@ -262,12 +263,12 @@ public class GAN { private void updateGanWithDiscriminator() { int genLayerCount = generator.getLayers().length; for (int i = genLayerCount; i < gan.getLayers().length; i++) { - gan.getLayer(i).setParams(discriminator.getLayer(i - genLayerCount).params()); + gan.getLayer(i).setParams(discriminator.getLayer(i - genLayerCount).getParams()); } } /** - * GAN builder, used as a starting point for creating a MultiLayerConfiguration or + * GAN builder, used as a starting point for creating a NeuralNetConfiguration or * ComputationGraphConfiguration.
*/ public static class Builder implements Cloneable { diff --git a/brutex-extended-tests/src/test/java/net/brutex/gan/MnistDCGANExample.java b/brutex-extended-tests/src/test/java/net/brutex/gan/MnistDCGANExample.java index d0e5bb73d..4dd171fea 100644 --- a/brutex-extended-tests/src/test/java/net/brutex/gan/MnistDCGANExample.java +++ b/brutex-extended-tests/src/test/java/net/brutex/gan/MnistDCGANExample.java @@ -100,7 +100,7 @@ public class MnistDCGANExample { public static void main(String[] args) throws Exception { Supplier genSupplier = () -> { - return new MultiLayerNetwork(new NeuralNetConfiguration.Builder().list() + return new MultiLayerNetwork(NeuralNetConfiguration.builder() .layer(0, new DenseLayer.Builder().nIn(latentDim).nOut(width / 2 * height / 2 * 128) .activation(Activation.LEAKYRELU).weightInit(WeightInit.NORMAL).build()) .layer(1, new Convolution2D.Builder().nIn(128).nOut(128).kernelSize(5, 5) @@ -119,16 +119,16 @@ public class MnistDCGANExample { .inputPreProcessor(1, new FeedForwardToCnnPreProcessor(height / 2, width / 2, 128)) .inputPreProcessor(6, new CnnToFeedForwardPreProcessor(height, width, channels)) - .setInputType(InputType.feedForward(latentDim)) + .inputType(InputType.feedForward(latentDim)) .build()); }; GAN.DiscriminatorProvider discriminatorProvider = (updater) -> { - return new MultiLayerNetwork(new NeuralNetConfiguration.Builder() + return new MultiLayerNetwork(NeuralNetConfiguration.builder() .updater(new RmsProp.Builder().learningRate(0.0008).rmsDecay(1e-8).build()) //.gradientNormalization(GradientNormalization.ClipElementWiseAbsoluteValue) //.gradientNormalizationThreshold(100.0) - .list() + .layer(0, new Convolution2D.Builder().nIn(channels).nOut(64).kernelSize(3, 3) .activation(Activation.LEAKYRELU).build()) .layer(1, new Convolution2D.Builder().nIn(64).nOut(64).kernelSize(3, 3).stride(2, 2) @@ -142,7 +142,7 @@ public class MnistDCGANExample { .layer(6, new LossLayer.Builder().lossFunction(LossFunctions.LossFunction.XENT).build()) .inputPreProcessor(0, new FeedForwardToCnnPreProcessor(height, width, channels)) .inputPreProcessor(4, new CnnToFeedForwardPreProcessor(2, 2, 64)) - .setInputType(InputType.convolutionalFlat(height, width, channels)) + .inputType(InputType.convolutionalFlat(height, width, channels)) .build()); }; @@ -155,8 +155,8 @@ public class MnistDCGANExample { .updater(new RmsProp.Builder().learningRate(0.0008).rmsDecay(1e-8).build()) .build(); - gan.getGenerator().setListeners(new PerformanceListener(1, true)); - gan.getDiscriminator().setListeners(new PerformanceListener(1, true)); + gan.getGenerator().addTrainingListeners(new PerformanceListener(1, true)); + gan.getDiscriminator().addTrainingListeners(new PerformanceListener(1, true)); Nd4j.getMemoryManager().setAutoGcWindow(15 * 1000); diff --git a/brutex-extended-tests/src/test/java/net/brutex/gan/MnistSimpleGAN.java b/brutex-extended-tests/src/test/java/net/brutex/gan/MnistSimpleGAN.java index 037a0be9d..be3014f3c 100644 --- a/brutex-extended-tests/src/test/java/net/brutex/gan/MnistSimpleGAN.java +++ b/brutex-extended-tests/src/test/java/net/brutex/gan/MnistSimpleGAN.java @@ -23,7 +23,6 @@ package net.brutex.gan; import org.deeplearning4j.datasets.iterator.impl.MnistDataSetIterator; import org.deeplearning4j.nn.conf.GradientNormalization; -import org.deeplearning4j.nn.conf.MultiLayerConfiguration; import org.deeplearning4j.nn.conf.NeuralNetConfiguration; import org.deeplearning4j.nn.conf.layers.ActivationLayer; import org.deeplearning4j.nn.conf.layers.DenseLayer; @@ -57,12 +56,12 @@ public class MnistSimpleGAN { public static MultiLayerNetwork getGenerator() { - MultiLayerConfiguration genConf = new NeuralNetConfiguration.Builder() + NeuralNetConfiguration genConf = NeuralNetConfiguration.builder() .weightInit(WeightInit.XAVIER) .activation(Activation.IDENTITY) .gradientNormalization(GradientNormalization.RenormalizeL2PerLayer) .gradientNormalizationThreshold(100) - .list() + .layer(new DenseLayer.Builder().nIn(100).nOut(256).weightInit(WeightInit.NORMAL).build()) .layer(new ActivationLayer.Builder(new ActivationLReLU(0.2)).build()) .layer(new DenseLayer.Builder().nIn(256).nOut(512).build()) @@ -76,14 +75,14 @@ public class MnistSimpleGAN { public static MultiLayerNetwork getDiscriminator(IUpdater updater) { - MultiLayerConfiguration discConf = new NeuralNetConfiguration.Builder() + NeuralNetConfiguration discConf = NeuralNetConfiguration.builder() .seed(42) .updater(updater) .weightInit(WeightInit.XAVIER) .activation(Activation.IDENTITY) .gradientNormalization(GradientNormalization.RenormalizeL2PerLayer) .gradientNormalizationThreshold(100) - .list() + .layer(new DenseLayer.Builder().nIn(784).nOut(1024).updater(updater).build()) .layer(new ActivationLayer.Builder(new ActivationLReLU(0.2)).build()) .layer(new DropoutLayer.Builder(1 - 0.5).build()) diff --git a/brutex-extended-tests/src/test/java/net/brutex/spark/BrianTest.java b/brutex-extended-tests/src/test/java/net/brutex/spark/BrianTest.java index efb54aa29..75965d7b5 100644 --- a/brutex-extended-tests/src/test/java/net/brutex/spark/BrianTest.java +++ b/brutex-extended-tests/src/test/java/net/brutex/spark/BrianTest.java @@ -35,7 +35,6 @@ import org.apache.spark.sql.RowFactory; import org.apache.spark.sql.SparkSession; import org.apache.spark.sql.types.DataTypes; import org.apache.spark.sql.types.Metadata; -import org.apache.spark.sql.types.StringType; import org.apache.spark.sql.types.StructField; import org.apache.spark.sql.types.StructType; import org.datavec.api.records.reader.impl.csv.CSVRecordReader; @@ -43,12 +42,10 @@ import org.datavec.api.transform.TransformProcess; import org.datavec.api.transform.filter.FilterInvalidValues; import org.datavec.api.transform.schema.Schema; import org.datavec.api.Writable; -import org.datavec.spark.transform.Normalization; import org.datavec.spark.transform.SparkTransformExecutor; import org.datavec.spark.transform.misc.StringToWritablesFunction; import org.deeplearning4j.datasets.iterator.impl.EmnistDataSetIterator; import org.deeplearning4j.datasets.iterator.impl.EmnistDataSetIterator.Set; -import org.deeplearning4j.nn.conf.MultiLayerConfiguration; import org.deeplearning4j.nn.conf.NeuralNetConfiguration; import org.deeplearning4j.nn.conf.layers.DenseLayer; import org.deeplearning4j.nn.conf.layers.OutputLayer; @@ -287,15 +284,15 @@ public class BrianTest extends BaseSparkSessionTest { //Define Network - MultiLayerConfiguration multiLayerConfiguration = new NeuralNetConfiguration.Builder() + NeuralNetConfiguration multiLayerConfiguration = NeuralNetConfiguration.builder() .seed(123) .updater(new Nesterovs(0.1, 0.9)) - .list() + .layer(0, new DenseLayer.Builder().nIn(5).nOut(20).weightInit(WeightInit.XAVIER) .activation(Activation.RELU).l2(0.001).build()) .layer(1, new DenseLayer.Builder().nIn(20).nOut(20).weightInit(WeightInit.XAVIER) .activation(Activation.RELU).build()) - //.layer(2, new DenseLayer.Builder().nIn(9).nOut(9).weightInit(WeightInit.XAVIER).activation(Activation.RELU).build()) + //.layer(2, new DenseLayerConfiguration.Builder().nIn(9).nOut(9).weightInit(WeightInit.XAVIER).activation(Activation.RELU).build()) .layer(2, new OutputLayer.Builder(LossFunctions.LossFunction.XENT).nIn(20).nOut(4) .weightInit(WeightInit.XAVIER).activation(Activation.SIGMOID).build()) .build(); diff --git a/brutex-extended-tests/src/test/java/net/brutex/spark/BrianTest2.java b/brutex-extended-tests/src/test/java/net/brutex/spark/BrianTest2.java index 4e340c69a..9195933ff 100644 --- a/brutex-extended-tests/src/test/java/net/brutex/spark/BrianTest2.java +++ b/brutex-extended-tests/src/test/java/net/brutex/spark/BrianTest2.java @@ -37,7 +37,6 @@ import org.datavec.api.transform.schema.Schema; import org.datavec.api.Writable; import org.datavec.spark.transform.SparkTransformExecutor; import org.datavec.spark.transform.misc.StringToWritablesFunction; -import org.deeplearning4j.nn.conf.MultiLayerConfiguration; import org.deeplearning4j.nn.conf.NeuralNetConfiguration; import org.deeplearning4j.nn.conf.layers.DenseLayer; import org.deeplearning4j.nn.conf.layers.OutputLayer; @@ -295,13 +294,13 @@ public class BrianTest2 /*extends BaseDL4JTest*/ { */ //Define Network - MultiLayerConfiguration multiLayerConfiguration = new NeuralNetConfiguration.Builder() + NeuralNetConfiguration multiLayerConfiguration = NeuralNetConfiguration.builder() .seed(123) .updater(new Nesterovs(0.1, 0.9)) - .list() + .layer(0, new DenseLayer.Builder().nIn(5).nOut(20).weightInit(WeightInit.XAVIER).activation(Activation.RELU).l2(0.001).build()) .layer(1, new DenseLayer.Builder().nIn(20).nOut(20).weightInit(WeightInit.XAVIER).activation(Activation.RELU).build()) - //.layer(2, new DenseLayer.Builder().nIn(9).nOut(9).weightInit(WeightInit.XAVIER).activation(Activation.RELU).build()) + //.layer(2, new DenseLayerConfiguration.Builder().nIn(9).nOut(9).weightInit(WeightInit.XAVIER).activation(Activation.RELU).build()) .layer(2, new OutputLayer.Builder(LossFunctions.LossFunction.XENT).nIn(20).nOut(4).weightInit(WeightInit.XAVIER).activation(Activation.SIGMOID).build()) .build(); diff --git a/brutex-extended-tests/src/test/java/net/brutex/spark/TestServer.java b/brutex-extended-tests/src/test/java/net/brutex/spark/TestServer.java index 353195da4..0cf2e5676 100644 --- a/brutex-extended-tests/src/test/java/net/brutex/spark/TestServer.java +++ b/brutex-extended-tests/src/test/java/net/brutex/spark/TestServer.java @@ -21,7 +21,6 @@ package net.brutex.spark; -import lombok.extern.log4j.Log4j2; //import net.brutex.ai.performance.storage.PostgresStatsStorage; import lombok.extern.slf4j.Slf4j; import org.datavec.api.records.reader.RecordReader; @@ -29,22 +28,17 @@ import org.datavec.api.records.reader.impl.collection.ListStringRecordReader; import org.datavec.api.records.reader.impl.csv.CSVRecordReader; import org.datavec.api.split.FileSplit; import org.datavec.api.split.ListStringSplit; -import org.deeplearning4j.core.storage.StatsStorage; -import org.deeplearning4j.datasets.datavec.RecordReaderDataSetIterator; -import org.deeplearning4j.nn.conf.MultiLayerConfiguration; + import org.deeplearning4j.datasets.datavec.RecordReaderDataSetIterator; import org.deeplearning4j.nn.conf.NeuralNetConfiguration; -import org.deeplearning4j.nn.conf.layers.DenseLayer; + import org.deeplearning4j.nn.conf.layers.DenseLayer; import org.deeplearning4j.nn.conf.layers.LSTM; import org.deeplearning4j.nn.conf.layers.OutputLayer; import org.deeplearning4j.nn.conf.preprocessor.FeedForwardToRnnPreProcessor; import org.deeplearning4j.nn.conf.preprocessor.RnnToFeedForwardPreProcessor; import org.deeplearning4j.nn.multilayer.MultiLayerNetwork; import org.deeplearning4j.nn.weights.WeightInit; -import org.deeplearning4j.optimize.listeners.ScoreIterationListener; -import org.deeplearning4j.ui.api.UIServer; -import org.deeplearning4j.ui.model.stats.StatsListener; -import org.deeplearning4j.ui.model.storage.FileStatsStorage; -import org.junit.jupiter.api.AfterAll; + import org.deeplearning4j.ui.api.UIServer; + import org.junit.jupiter.api.AfterAll; import org.junit.jupiter.api.Tag; import org.junit.jupiter.api.Test; import org.nd4j.evaluation.classification.Evaluation; @@ -86,16 +80,16 @@ public class TestServer { int i = 2000; int numClasses = 10; int numBatchSize = 100; - MultiLayerConfiguration conf = new NeuralNetConfiguration.Builder() + NeuralNetConfiguration conf = NeuralNetConfiguration.builder() .seed(1234) .weightInit(WeightInit.XAVIER) .updater(new Nesterovs.Builder().learningRate(0.15).build()) .activation(Activation.RELU) .l2(0) - .list() + //.layer(0, new ConvolutionLayer.Builder().nIn(1).kernelSize(1, 5).stride(1,1).padding(0,2).nOut(1).name("1st Filter").updater(new Adam.Builder().learningRate(0.2).build()).build()) //.layer(1, new ConvolutionLayer.Builder().nIn(1).kernelSize(1, 2).stride(1,2).padding(0,0).nOut(1).name("2nd Filter").updater(new Adam.Builder().learningRate(0.1).build()).build()) - // .layer(1, new DenseLayer.Builder().nIn(10).nOut(64).activation(Activation.RELU).build()) + // .layer(1, new DenseLayerConfiguration.Builder().nIn(10).nOut(64).activation(Activation.RELU).build()) .layer(0, new DenseLayer.Builder().nIn(10).nOut(100).activation(Activation.RELU).l2(0.003).build()) .layer(1, new LSTM.Builder().nIn(100).nOut(100).activation(Activation.TANH).build()) .layer(2, new LSTM.Builder().nIn(100).nOut(100).activation(Activation.TANH).build()) diff --git a/brutex-extended-tests/src/test/java/net/brutex/spark/TestServer2.java b/brutex-extended-tests/src/test/java/net/brutex/spark/TestServer2.java index d6ac22e11..db8a74ae7 100644 --- a/brutex-extended-tests/src/test/java/net/brutex/spark/TestServer2.java +++ b/brutex-extended-tests/src/test/java/net/brutex/spark/TestServer2.java @@ -21,7 +21,6 @@ package net.brutex.spark; -import lombok.extern.log4j.Log4j2; //import net.brutex.ai.performance.storage.PostgresStatsStorage; import lombok.extern.slf4j.Slf4j; import org.datavec.api.records.reader.RecordReader; @@ -32,9 +31,8 @@ import org.datavec.api.split.ListStringSplit; import org.datavec.image.recordreader.ImageRecordReader; import org.deeplearning4j.core.storage.StatsStorage; import org.deeplearning4j.datasets.datavec.RecordReaderDataSetIterator; -import org.deeplearning4j.nn.conf.MultiLayerConfiguration; import org.deeplearning4j.nn.conf.NeuralNetConfiguration; -import org.deeplearning4j.nn.conf.layers.DenseLayer; + import org.deeplearning4j.nn.conf.layers.DenseLayer; import org.deeplearning4j.nn.conf.layers.LSTM; import org.deeplearning4j.nn.conf.layers.OutputLayer; import org.deeplearning4j.nn.conf.preprocessor.FeedForwardToRnnPreProcessor; @@ -122,16 +120,16 @@ public class TestServer2 { int i = 2000; int numClasses = 10; int numBatchSize = 100; - MultiLayerConfiguration conf = new NeuralNetConfiguration.Builder() + NeuralNetConfiguration conf = NeuralNetConfiguration.builder() .seed(1234) .weightInit(WeightInit.XAVIER) .updater(new Nesterovs.Builder().learningRate(0.15).build()) .activation(Activation.RELU) .l2(0) - .list() + //.layer(0, new ConvolutionLayer.Builder().nIn(1).kernelSize(1, 5).stride(1,1).padding(0,2).nOut(1).name("1st Filter").updater(new Adam.Builder().learningRate(0.2).build()).build()) //.layer(1, new ConvolutionLayer.Builder().nIn(1).kernelSize(1, 2).stride(1,2).padding(0,0).nOut(1).name("2nd Filter").updater(new Adam.Builder().learningRate(0.1).build()).build()) - // .layer(1, new DenseLayer.Builder().nIn(10).nOut(64).activation(Activation.RELU).build()) + // .layer(1, new DenseLayerConfiguration.Builder().nIn(10).nOut(64).activation(Activation.RELU).build()) .layer(0, new DenseLayer.Builder().nIn(10).nOut(100).activation(Activation.RELU).l2(0.003).build()) .layer(1, new LSTM.Builder().nIn(100).nOut(100).activation(Activation.TANH).build()) .layer(2, new LSTM.Builder().nIn(100).nOut(100).activation(Activation.TANH).build()) @@ -207,7 +205,7 @@ public class TestServer2 { //PostgresStatsStorage psqlStore = new PostgresStatsStorage(); int listenerFrequency = 2; //net.setListeners(new StatsListener(psqlStore, listenerFrequency), new StatsListener(statsStorage, listenerFrequency), new ScoreIterationListener(200)); - net.setListeners(new StatsListener(statsStorage, listenerFrequency), new ScoreIterationListener(200)); + net.addTrainingListeners(new StatsListener(statsStorage, listenerFrequency), new ScoreIterationListener(200)); //Attach the StatsStorage instance to the UI: this allows the contents of the StatsStorage to be visualized diff --git a/brutex-extended-tests/src/test/java/org/deeplearning4j/integration/IntegrationTestBaselineGenerator.java b/brutex-extended-tests/src/test/java/org/deeplearning4j/integration/IntegrationTestBaselineGenerator.java index 7c4bcc9ac..8775bfc2e 100644 --- a/brutex-extended-tests/src/test/java/org/deeplearning4j/integration/IntegrationTestBaselineGenerator.java +++ b/brutex-extended-tests/src/test/java/org/deeplearning4j/integration/IntegrationTestBaselineGenerator.java @@ -21,14 +21,14 @@ package org.deeplearning4j.integration; import lombok.extern.slf4j.Slf4j; +import net.brutex.ai.dnn.api.IModel; import org.apache.commons.io.FileUtils; import org.deeplearning4j.datasets.iterator.MultiDataSetWrapperIterator; import org.deeplearning4j.integration.testcases.samediff.SameDiffCNNCases; import org.deeplearning4j.integration.testcases.samediff.SameDiffMLPTestCases; import org.deeplearning4j.integration.testcases.samediff.SameDiffRNNTestCases; -import org.deeplearning4j.nn.api.Model; import org.deeplearning4j.nn.conf.ComputationGraphConfiguration; -import org.deeplearning4j.nn.conf.MultiLayerConfiguration; +import org.deeplearning4j.nn.conf.NeuralNetConfiguration; import org.deeplearning4j.nn.graph.ComputationGraph; import org.deeplearning4j.nn.multilayer.MultiLayerNetwork; import org.deeplearning4j.optimize.listeners.CollectScoresListener; @@ -135,12 +135,12 @@ public class IntegrationTestBaselineGenerator { MultiLayerNetwork mln = null; ComputationGraph cg = null; SameDiff sd = null; - Model m = null; + IModel m = null; if (tc.getTestType() == TestCase.TestType.RANDOM_INIT) { Object config = tc.getConfiguration(); String json = null; - if (config instanceof MultiLayerConfiguration) { - MultiLayerConfiguration mlc = (MultiLayerConfiguration) config; + if (config instanceof NeuralNetConfiguration) { + NeuralNetConfiguration mlc = (NeuralNetConfiguration) config; json = mlc.toJson(); mln = new MultiLayerNetwork(mlc); mln.init(); @@ -284,21 +284,21 @@ public class IntegrationTestBaselineGenerator { INDArray paramsPostTraining; if (modelType == ModelType.MLN) { int[] layersToTrain = tc.getUnsupervisedTrainLayersMLN(); - Preconditions.checkState(layersToTrain != null, "Layer indices must not be null"); + Preconditions.checkState(layersToTrain != null, "ILayer indices must not be null"); DataSetIterator dsi = new MultiDataSetWrapperIterator(iter); for (int i : layersToTrain) { mln.pretrainLayer(i, dsi); } - paramsPostTraining = mln.params(); + paramsPostTraining = mln.getModelParams(); } else if (modelType == ModelType.CG) { String[] layersToTrain = tc.getUnsupervisedTrainLayersCG(); - Preconditions.checkState(layersToTrain != null, "Layer names must not be null"); + Preconditions.checkState(layersToTrain != null, "ILayer names must not be null"); for (String i : layersToTrain) { cg.pretrainLayer(i, iter); } - paramsPostTraining = cg.params(); + paramsPostTraining = cg.getModelParams(); } else { throw new UnsupportedOperationException("SameDiff not supported for unsupervised training tests"); } @@ -314,7 +314,7 @@ public class IntegrationTestBaselineGenerator { CollectScoresListener l = new CollectScoresListener(1); if (modelType != ModelType.SAMEDIFF) - m.setListeners(l); + m.addTrainingListeners(l); History h = null; if (modelType == ModelType.MLN) { @@ -349,7 +349,7 @@ public class IntegrationTestBaselineGenerator { } } else { File p = new File(testBaseDir, IntegrationTestRunner.PARAMS_POST_TRAIN_FILENAME); - IntegrationTestRunner.write(m.params(), p); + IntegrationTestRunner.write(m.getModelParams(), p); } } } diff --git a/brutex-extended-tests/src/test/java/org/deeplearning4j/integration/IntegrationTestRunner.java b/brutex-extended-tests/src/test/java/org/deeplearning4j/integration/IntegrationTestRunner.java index fbc0d60a3..786c6d6b9 100644 --- a/brutex-extended-tests/src/test/java/org/deeplearning4j/integration/IntegrationTestRunner.java +++ b/brutex-extended-tests/src/test/java/org/deeplearning4j/integration/IntegrationTestRunner.java @@ -25,18 +25,18 @@ import com.google.common.collect.ImmutableSet; import com.google.common.reflect.ClassPath; import lombok.NonNull; import lombok.extern.slf4j.Slf4j; +import net.brutex.ai.dnn.api.IModel; import org.apache.commons.io.FileUtils; import org.deeplearning4j.BaseDL4JTest; import org.deeplearning4j.common.config.DL4JClassLoading; import org.deeplearning4j.datasets.iterator.MultiDataSetWrapperIterator; import org.deeplearning4j.integration.util.CountingMultiDataSetIterator; -import org.deeplearning4j.nn.api.Model; import org.deeplearning4j.nn.conf.BackpropType; import org.deeplearning4j.nn.conf.ComputationGraphConfiguration; import org.deeplearning4j.nn.conf.InputPreProcessor; -import org.deeplearning4j.nn.conf.MultiLayerConfiguration; +import org.deeplearning4j.nn.conf.NeuralNetConfiguration; import org.deeplearning4j.nn.conf.graph.LayerVertex; -import org.deeplearning4j.nn.conf.layers.Layer; +import org.deeplearning4j.nn.conf.layers.LayerConfiguration; import org.deeplearning4j.nn.graph.ComputationGraph; import org.deeplearning4j.nn.graph.vertex.GraphVertex; import org.deeplearning4j.nn.layers.BaseOutputLayer; @@ -177,22 +177,22 @@ public class IntegrationTestRunner { MultiLayerNetwork mln = null; ComputationGraph cg = null; SameDiff sd = null; - Model m = null; + IModel m = null; if (tc.getTestType() == TestCase.TestType.RANDOM_INIT) { log.info("Checking RANDOM_INIT test case: saved model vs. initialized model"); //Checking randomly initialized model: File savedModel = new File(testBaseDir, IntegrationTestRunner.RANDOM_INIT_UNTRAINED_MODEL_FILENAME); Object config = tc.getConfiguration(); - if (config instanceof MultiLayerConfiguration) { - MultiLayerConfiguration mlc = (MultiLayerConfiguration) config; + if (config instanceof NeuralNetConfiguration) { + NeuralNetConfiguration mlc = (NeuralNetConfiguration) config; mln = new MultiLayerNetwork(mlc); mln.init(); m = mln; MultiLayerNetwork loaded = MultiLayerNetwork.load(savedModel, true); - assertEquals(loaded.getLayerWiseConfigurations(), mln.getLayerWiseConfigurations(), "Configs not equal"); - assertEquals( loaded.params(), mln.params(), "Params not equal"); - assertEquals( loaded.paramTable(), mln.paramTable(), "Param table not equal"); + assertEquals(loaded.getNetConfiguration(), mln.getNetConfiguration(), "Configs not equal"); + assertEquals( loaded.getModelParams(), mln.getModelParams(), "Params not equal"); + assertEquals( loaded.getParamTable(), mln.getParamTable(), "Param table not equal"); } else if(config instanceof ComputationGraphConfiguration ){ ComputationGraphConfiguration cgc = (ComputationGraphConfiguration) config; cg = new ComputationGraph(cgc); @@ -200,9 +200,9 @@ public class IntegrationTestRunner { m = cg; ComputationGraph loaded = ComputationGraph.load(savedModel, true); - assertEquals(loaded.getConfiguration(), cg.getConfiguration(), "Configs not equal" ); - assertEquals( loaded.params(), cg.params(), "Params not equal"); - assertEquals(loaded.paramTable(), cg.paramTable(), "Param table not equal"); + assertEquals(loaded.getComputationGraphConfiguration(), cg.getComputationGraphConfiguration(), "Configs not equal" ); + assertEquals( loaded.getModelParams(), cg.getModelParams(), "Params not equal"); + assertEquals(loaded.getParamTable(), cg.getParamTable(), "Param table not equal"); } else if(config instanceof SameDiff){ sd = (SameDiff)config; SameDiff loaded = SameDiff.load(savedModel, true); @@ -383,22 +383,22 @@ public class IntegrationTestRunner { org.deeplearning4j.nn.api.Layer[] layers; if(modelType == ModelType.MLN){ int[] layersToTrain = tc.getUnsupervisedTrainLayersMLN(); - Preconditions.checkState(layersToTrain != null, "Layer indices must not be null"); + Preconditions.checkState(layersToTrain != null, "ILayer indices must not be null"); DataSetIterator dsi = new MultiDataSetWrapperIterator(iter); for( int i : layersToTrain){ mln.pretrainLayer(i, dsi); } - paramsPostTraining = mln.params(); + paramsPostTraining = mln.getModelParams(); layers = mln.getLayers(); } else if(modelType == ModelType.CG) { String[] layersToTrain = tc.getUnsupervisedTrainLayersCG(); - Preconditions.checkState(layersToTrain != null, "Layer names must not be null"); + Preconditions.checkState(layersToTrain != null, "ILayer names must not be null"); for( String i : layersToTrain){ cg.pretrainLayer(i, iter); } - paramsPostTraining = cg.params(); + paramsPostTraining = cg.getModelParams(); layers = cg.getLayers(); } else { throw new UnsupportedOperationException("Unsupported layerwise pretraining not supported for SameDiff models"); @@ -426,11 +426,11 @@ public class IntegrationTestRunner { boolean isTbptt; int tbpttLength; if(modelType == ModelType.MLN){ - isTbptt = mln.getLayerWiseConfigurations().getBackpropType() == BackpropType.TruncatedBPTT; - tbpttLength = mln.getLayerWiseConfigurations().getTbpttFwdLength(); + isTbptt = mln.getNetConfiguration().getBackpropType() == BackpropType.TruncatedBPTT; + tbpttLength = mln.getNetConfiguration().getTbpttFwdLength(); } else if(modelType == ModelType.CG) { - isTbptt = cg.getConfiguration().getBackpropType() == BackpropType.TruncatedBPTT; - tbpttLength = cg.getConfiguration().getTbpttFwdLength(); + isTbptt = cg.getComputationGraphConfiguration().getBackpropType() == BackpropType.TruncatedBPTT; + tbpttLength = cg.getComputationGraphConfiguration().getTbpttFwdLength(); } else { isTbptt = false; tbpttLength = 0; @@ -439,7 +439,7 @@ public class IntegrationTestRunner { CountingMultiDataSetIterator countingIter = new CountingMultiDataSetIterator(trainData, isTbptt, tbpttLength); CollectScoresListener l = new CollectScoresListener(1); if(modelType != ModelType.SAMEDIFF) { - m.setListeners(l); + m.addTrainingListeners(l); } int iterBefore; @@ -458,11 +458,11 @@ public class IntegrationTestRunner { epochAfter = mln.getEpochCount(); layers = mln.getLayers(); } else if(modelType == ModelType.CG){ - iterBefore = cg.getConfiguration().getIterationCount(); - epochBefore = cg.getConfiguration().getEpochCount(); + iterBefore = cg.getComputationGraphConfiguration().getIterationCount(); + epochBefore = cg.getComputationGraphConfiguration().getEpochCount(); cg.fit(countingIter); - iterAfter = cg.getConfiguration().getIterationCount(); - epochAfter = cg.getConfiguration().getEpochCount(); + iterAfter = cg.getComputationGraphConfiguration().getIterationCount(); + epochAfter = cg.getComputationGraphConfiguration().getEpochCount(); layers = cg.getLayers(); } else { iterBefore = sd.getTrainingConfig().getIterationCount(); @@ -519,10 +519,10 @@ public class IntegrationTestRunner { if(modelType != ModelType.SAMEDIFF) { File p = new File(testBaseDir, IntegrationTestRunner.PARAMS_POST_TRAIN_FILENAME); INDArray paramsExp = read(p); - INDArray z = exceedsRelError(m.params(), paramsExp, tc.getMaxRelativeErrorParamsPostTraining(), tc.getMinAbsErrorParamsPostTraining()); + INDArray z = exceedsRelError(m.getModelParams(), paramsExp, tc.getMaxRelativeErrorParamsPostTraining(), tc.getMinAbsErrorParamsPostTraining()); int count = z.sumNumber().intValue(); if (count > 0) { - logFailedParams(20, "Parameter", layers, z, paramsExp, m.params()); + logFailedParams(20, "Parameter", layers, z, paramsExp, m.getModelParams()); } assertEquals( 0, count, "Number of params exceeded max relative error"); } else { @@ -606,13 +606,13 @@ public class IntegrationTestRunner { if (modelType == ModelType.MLN) { ModelSerializer.writeModel(m, f, true); MultiLayerNetwork restored = MultiLayerNetwork.load(f, true); - assertEquals(mln.getLayerWiseConfigurations(), restored.getLayerWiseConfigurations()); - assertEquals(mln.params(), restored.params()); + assertEquals(mln.getNetConfiguration(), restored.getNetConfiguration()); + assertEquals(mln.getModelParams(), restored.getModelParams()); } else if(modelType == ModelType.CG){ ModelSerializer.writeModel(m, f, true); ComputationGraph restored = ComputationGraph.load(f, true); - assertEquals(cg.getConfiguration(), restored.getConfiguration()); - assertEquals(cg.params(), restored.params()); + assertEquals(cg.getComputationGraphConfiguration(), restored.getComputationGraphConfiguration()); + assertEquals(cg.getModelParams(), restored.getModelParams()); } else { sd.save(f, true); SameDiff restored = SameDiff.load(f, true); @@ -722,7 +722,7 @@ public class IntegrationTestRunner { } //Work out which layers, vertices etc we have seen - so we can (at the end of all tests) log our integration test coverage - private static void collectCoverageInformation(Model m){ + private static void collectCoverageInformation(IModel m){ boolean isMLN = (m instanceof MultiLayerNetwork); MultiLayerNetwork mln = (isMLN ? (MultiLayerNetwork)m : null); ComputationGraph cg = (!isMLN ? (ComputationGraph)m : null); @@ -735,17 +735,17 @@ public class IntegrationTestRunner { layers = cg.getLayers(); } for (org.deeplearning4j.nn.api.Layer l : layers) { - Layer lConf = l.conf().getLayer(); + LayerConfiguration lConf = l.getLayerConfiguration(); layerConfClassesSeen.put(lConf.getClass(), layerConfClassesSeen.getOrDefault(lConf.getClass(), 0) + 1); } //Collect preprocessor coverage information: Collection preProcessors; if (isMLN) { - preProcessors = mln.getLayerWiseConfigurations().getInputPreProcessors().values(); + preProcessors = mln.getNetConfiguration().getInputPreProcessors().values(); } else { preProcessors = new ArrayList<>(); - for (org.deeplearning4j.nn.conf.graph.GraphVertex gv : cg.getConfiguration().getVertices().values()) { + for (org.deeplearning4j.nn.conf.graph.GraphVertex gv : cg.getComputationGraphConfiguration().getVertices().values()) { if (gv instanceof LayerVertex) { InputPreProcessor pp = ((LayerVertex) gv).getPreProcessor(); if (pp != null) { @@ -760,14 +760,14 @@ public class IntegrationTestRunner { //Collect vertex coverage information if (!isMLN) { - for (org.deeplearning4j.nn.conf.graph.GraphVertex gv : cg.getConfiguration().getVertices().values()) { + for (org.deeplearning4j.nn.conf.graph.GraphVertex gv : cg.getComputationGraphConfiguration().getVertices().values()) { vertexConfClassesSeen.put(gv.getClass(), vertexConfClassesSeen.getOrDefault(gv.getClass(), 0) + 1); } } } - private static void checkLayerClearance(Model m) { + private static void checkLayerClearance(IModel m) { //Check that the input fields for all layers have been cleared org.deeplearning4j.nn.api.Layer[] layers; if (m instanceof MultiLayerNetwork) { @@ -801,7 +801,7 @@ public class IntegrationTestRunner { } } - private static void validateLayerIterCounts(Model m, int expEpoch, int expIter){ + private static void validateLayerIterCounts(IModel m, int expEpoch, int expIter){ //Check that the iteration and epoch counts - on the layers - are synced org.deeplearning4j.nn.api.Layer[] layers; if (m instanceof MultiLayerNetwork) { @@ -817,7 +817,7 @@ public class IntegrationTestRunner { } - private static Map getFrozenLayerParamCopies(Model m){ + private static Map getFrozenLayerParamCopies(IModel m){ Map out = new LinkedHashMap<>(); org.deeplearning4j.nn.api.Layer[] layers; if (m instanceof MultiLayerNetwork) { @@ -832,9 +832,9 @@ public class IntegrationTestRunner { if(m instanceof MultiLayerNetwork){ paramPrefix = l.getIndex() + "_"; } else { - paramPrefix = l.conf().getLayer().getLayerName() + "_"; + paramPrefix = l.getLayerConfiguration().getLayerName() + "_"; } - Map paramTable = l.paramTable(); + Map paramTable = l.getParamTable(); for(Map.Entry e : paramTable.entrySet()){ out.put(paramPrefix + e.getKey(), e.getValue().dup()); } @@ -854,7 +854,7 @@ public class IntegrationTestRunner { return out; } - public static void checkFrozenParams(Map copiesBeforeTraining, Model m){ + public static void checkFrozenParams(Map copiesBeforeTraining, IModel m){ for(Map.Entry e : copiesBeforeTraining.entrySet()){ INDArray actual = m.getParam(e.getKey()); assertEquals(e.getValue(), actual, e.getKey()); @@ -872,14 +872,14 @@ public class IntegrationTestRunner { log.info("||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||"); - log.info("Layer coverage - classes seen:"); + log.info("ILayer coverage - classes seen:"); for (Class c : layerClasses) { if (layerConfClassesSeen.containsKey(c)) { log.info("Class seen {} times in tests: {}", layerConfClassesSeen.get(c), c.getName()); } } - log.info("Layer classes NOT seen in any tests:"); + log.info("ILayer classes NOT seen in any tests:"); for (Class c : layerClasses) { if (!layerConfClassesSeen.containsKey(c)) { log.info("Class NOT seen in any tests: {}", c.getName()); @@ -939,7 +939,7 @@ public class IntegrationTestRunner { } private static boolean isLayerConfig(Class c) { - return Layer.class.isAssignableFrom(c); + return LayerConfiguration.class.isAssignableFrom(c); } private static boolean isPreprocessorConfig(Class c) { @@ -1088,7 +1088,7 @@ public class IntegrationTestRunner { if(pSoFar + n < i){ pSoFar += n; } else { - for(Map.Entry e : l.paramTable().entrySet()){ + for(Map.Entry e : l.getParamTable().entrySet()){ pSoFar += e.getValue().length(); if(pSoFar >= i){ pName = e.getKey(); diff --git a/brutex-extended-tests/src/test/java/org/deeplearning4j/integration/TestCase.java b/brutex-extended-tests/src/test/java/org/deeplearning4j/integration/TestCase.java index b2d76f04a..41afafa4e 100644 --- a/brutex-extended-tests/src/test/java/org/deeplearning4j/integration/TestCase.java +++ b/brutex-extended-tests/src/test/java/org/deeplearning4j/integration/TestCase.java @@ -21,7 +21,7 @@ package org.deeplearning4j.integration; import lombok.Data; -import org.deeplearning4j.nn.api.Model; +import net.brutex.ai.dnn.api.IModel; import org.nd4j.autodiff.samediff.SameDiff; import org.nd4j.evaluation.IEvaluation; import org.nd4j.linalg.api.ndarray.INDArray; @@ -89,7 +89,7 @@ public abstract class TestCase { /** * Required for pretrained models (testType == TestType.PRETRAINED) */ - public Model getPretrainedModel() throws Exception { + public IModel getPretrainedModel() throws Exception { throw new RuntimeException("Implementations must override this method if used"); } diff --git a/brutex-extended-tests/src/test/java/org/deeplearning4j/integration/TestUtils.java b/brutex-extended-tests/src/test/java/org/deeplearning4j/integration/TestUtils.java index 5c16cc908..60e314d71 100644 --- a/brutex-extended-tests/src/test/java/org/deeplearning4j/integration/TestUtils.java +++ b/brutex-extended-tests/src/test/java/org/deeplearning4j/integration/TestUtils.java @@ -22,7 +22,7 @@ package org.deeplearning4j.integration; import org.apache.commons.compress.utils.IOUtils; import org.deeplearning4j.nn.conf.ComputationGraphConfiguration; -import org.deeplearning4j.nn.conf.MultiLayerConfiguration; +import org.deeplearning4j.nn.conf.NeuralNetConfiguration; import org.deeplearning4j.nn.graph.ComputationGraph; import org.deeplearning4j.nn.multilayer.MultiLayerNetwork; import org.deeplearning4j.util.ModelSerializer; @@ -48,15 +48,15 @@ public class TestUtils { ByteArrayInputStream bais = new ByteArrayInputStream(bytes); restored = ModelSerializer.restoreMultiLayerNetwork(bais, true); - assertEquals(net.getLayerWiseConfigurations(), restored.getLayerWiseConfigurations()); - assertEquals(net.params(), restored.params()); + assertEquals(net.getNetConfiguration(), restored.getNetConfiguration()); + assertEquals(net.getModelParams(), restored.getModelParams()); } catch (IOException e){ //Should never happen throw new RuntimeException(e); } - //Also check the MultiLayerConfiguration is serializable (required by Spark etc) - MultiLayerConfiguration conf = net.getLayerWiseConfigurations(); + //Also check the NeuralNetConfiguration is serializable (required by Spark etc) + NeuralNetConfiguration conf = net.getNetConfiguration(); serializeDeserializeJava(conf); return restored; @@ -73,15 +73,15 @@ public class TestUtils { ByteArrayInputStream bais = new ByteArrayInputStream(bytes); restored = ModelSerializer.restoreComputationGraph(bais, true); - assertEquals(net.getConfiguration(), restored.getConfiguration()); - assertEquals(net.params(), restored.params()); + assertEquals(net.getComputationGraphConfiguration(), restored.getComputationGraphConfiguration()); + assertEquals(net.getModelParams(), restored.getModelParams()); } catch (IOException e){ //Should never happen throw new RuntimeException(e); } //Also check the ComputationGraphConfiguration is serializable (required by Spark etc) - ComputationGraphConfiguration conf = net.getConfiguration(); + ComputationGraphConfiguration conf = net.getComputationGraphConfiguration(); serializeDeserializeJava(conf); return restored; diff --git a/brutex-extended-tests/src/test/java/org/deeplearning4j/integration/testcases/dl4j/CNN1DTestCases.java b/brutex-extended-tests/src/test/java/org/deeplearning4j/integration/testcases/dl4j/CNN1DTestCases.java index d65a0a9cc..ec116ca31 100644 --- a/brutex-extended-tests/src/test/java/org/deeplearning4j/integration/testcases/dl4j/CNN1DTestCases.java +++ b/brutex-extended-tests/src/test/java/org/deeplearning4j/integration/testcases/dl4j/CNN1DTestCases.java @@ -80,12 +80,12 @@ public class CNN1DTestCases { CharacterIterator iter = CharacterIterator.getShakespeareIterator(miniBatchSize,exampleLength); int nOut = iter.totalOutcomes(); - return new NeuralNetConfiguration.Builder() + return ((NeuralNetConfiguration.NeuralNetConfigurationBuilder)NeuralNetConfiguration.builder() .dataType(DataType.FLOAT) .seed(12345) .weightInit(WeightInit.XAVIER) .updater(new Adam(0.01)) - .convolutionMode(ConvolutionMode.Same) + .convolutionMode(ConvolutionMode.Same)) .graphBuilder() .addInputs("in") .layer("0", new Convolution1DLayer.Builder().nOut(32).activation(Activation.TANH).kernelSize(3).stride(1).build(), "in") diff --git a/brutex-extended-tests/src/test/java/org/deeplearning4j/integration/testcases/dl4j/CNN2DTestCases.java b/brutex-extended-tests/src/test/java/org/deeplearning4j/integration/testcases/dl4j/CNN2DTestCases.java index 3b351e277..4b7b3f7a3 100644 --- a/brutex-extended-tests/src/test/java/org/deeplearning4j/integration/testcases/dl4j/CNN2DTestCases.java +++ b/brutex-extended-tests/src/test/java/org/deeplearning4j/integration/testcases/dl4j/CNN2DTestCases.java @@ -32,7 +32,7 @@ import org.deeplearning4j.datasets.fetchers.DataSetType; import org.deeplearning4j.datasets.iterator.EarlyTerminationDataSetIterator; import org.deeplearning4j.datasets.iterator.impl.MnistDataSetIterator; import org.deeplearning4j.datasets.iterator.impl.TinyImageNetDataSetIterator; -import org.deeplearning4j.nn.api.Model; +import net.brutex.ai.dnn.api.IModel; import org.deeplearning4j.nn.api.OptimizationAlgorithm; import org.deeplearning4j.nn.conf.*; import org.deeplearning4j.nn.conf.inputs.InputType; @@ -98,13 +98,13 @@ public class CNN2DTestCases { int outputNum = 10; // The number of possible outcomes int seed = 123; - MultiLayerConfiguration conf = new NeuralNetConfiguration.Builder() + NeuralNetConfiguration conf = NeuralNetConfiguration.builder() .dataType(DataType.FLOAT) .seed(seed) .l2(0.0005) .weightInit(WeightInit.XAVIER) .updater(new Nesterovs(0.01, 0.9)) - .list() + .layer(0, new ConvolutionLayer.Builder(5, 5) //nIn and nOut specify depth. nIn here is the nChannels and nOut is the number of filters to be applied .nIn(nChannels) @@ -132,7 +132,7 @@ public class CNN2DTestCases { .nOut(outputNum) .activation(Activation.SOFTMAX) .build()) - .setInputType(InputType.convolutionalFlat(28, 28, 1)) //See note below + .inputType(InputType.convolutionalFlat(28, 28, 1)) //See note below .build(); return conf; @@ -207,7 +207,7 @@ public class CNN2DTestCases { } @Override - public Model getPretrainedModel() throws Exception { + public IModel getPretrainedModel() throws Exception { VGG16 vgg16 = VGG16.builder() .seed(12345) .build(); @@ -294,7 +294,7 @@ public class CNN2DTestCases { } @Override - public Model getPretrainedModel() throws Exception { + public IModel getPretrainedModel() throws Exception { int nClasses = 10; int nBoxes = 5; double lambdaNoObj = 0.5; @@ -403,20 +403,20 @@ public class CNN2DTestCases { } @Override - public Model getPretrainedModel() throws Exception { + public IModel getPretrainedModel() throws Exception { Map lrSchedule = new HashMap<>(); lrSchedule.put(0, 0.01); lrSchedule.put(1000, 0.005); lrSchedule.put(3000, 0.001); - MultiLayerConfiguration conf = new NeuralNetConfiguration.Builder() + NeuralNetConfiguration conf = NeuralNetConfiguration.builder() .dataType(DataType.FLOAT) .seed(12345) .l2(0.0005) .weightInit(WeightInit.XAVIER) .updater(new Nesterovs(0.01, 0.9)) - .list() + .layer(0, new ConvolutionLayer.Builder(5, 5) //nIn and nOut specify depth. nIn here is the nChannels and nOut is the number of filters to be applied .nIn(1) @@ -446,7 +446,7 @@ public class CNN2DTestCases { .nOut(10) .activation(Activation.SOFTMAX) .build()) - .setInputType(InputType.convolutionalFlat(28, 28, 1)) //See note below + .inputType(InputType.convolutionalFlat(28, 28, 1)) //See note below .build(); diff --git a/brutex-extended-tests/src/test/java/org/deeplearning4j/integration/testcases/dl4j/CNN3DTestCases.java b/brutex-extended-tests/src/test/java/org/deeplearning4j/integration/testcases/dl4j/CNN3DTestCases.java index f856d5159..157116ba9 100644 --- a/brutex-extended-tests/src/test/java/org/deeplearning4j/integration/testcases/dl4j/CNN3DTestCases.java +++ b/brutex-extended-tests/src/test/java/org/deeplearning4j/integration/testcases/dl4j/CNN3DTestCases.java @@ -24,7 +24,6 @@ import org.deeplearning4j.datasets.iterator.impl.SingletonMultiDataSetIterator; import org.deeplearning4j.integration.ModelType; import org.deeplearning4j.integration.TestCase; import org.deeplearning4j.nn.conf.ConvolutionMode; -import org.deeplearning4j.nn.conf.MultiLayerConfiguration; import org.deeplearning4j.nn.conf.NeuralNetConfiguration; import org.deeplearning4j.nn.conf.inputs.InputType; import org.deeplearning4j.nn.conf.layers.Convolution3D; @@ -76,13 +75,13 @@ public class CNN3DTestCases { int outputNum = 10; // The number of possible outcomes int seed = 123; - MultiLayerConfiguration conf = new NeuralNetConfiguration.Builder() + NeuralNetConfiguration conf = NeuralNetConfiguration.builder() .seed(seed) .l2(0.0005) .weightInit(WeightInit.XAVIER) .updater(new Nesterovs(0.01, 0.9)) .convolutionMode(ConvolutionMode.Same) - .list() + .layer(new Convolution3D.Builder(3,3,3) .dataFormat(Convolution3D.DataFormat.NCDHW) .nIn(nChannels) @@ -98,7 +97,7 @@ public class CNN3DTestCases { .nOut(outputNum) .activation(Activation.SOFTMAX) .build()) - .setInputType(InputType.convolutional3D(8,8,8,nChannels)) + .inputType(InputType.convolutional3D(8,8,8,nChannels)) .build(); return conf; diff --git a/brutex-extended-tests/src/test/java/org/deeplearning4j/integration/testcases/dl4j/MLPTestCases.java b/brutex-extended-tests/src/test/java/org/deeplearning4j/integration/testcases/dl4j/MLPTestCases.java index 9a58e5138..69e9fa4cd 100644 --- a/brutex-extended-tests/src/test/java/org/deeplearning4j/integration/testcases/dl4j/MLPTestCases.java +++ b/brutex-extended-tests/src/test/java/org/deeplearning4j/integration/testcases/dl4j/MLPTestCases.java @@ -28,7 +28,6 @@ import org.datavec.api.split.FileSplit; import org.deeplearning4j.datasets.datavec.RecordReaderDataSetIterator; import org.deeplearning4j.datasets.iterator.EarlyTerminationDataSetIterator; import org.deeplearning4j.datasets.iterator.impl.MnistDataSetIterator; -import org.deeplearning4j.nn.conf.MultiLayerConfiguration; import org.deeplearning4j.nn.conf.NeuralNetConfiguration; import org.deeplearning4j.nn.conf.inputs.InputType; import org.deeplearning4j.nn.conf.layers.DenseLayer; @@ -93,7 +92,7 @@ public class MLPTestCases { @Override public Object getConfiguration() { - return new NeuralNetConfiguration.Builder() + return NeuralNetConfiguration.builder() .dataType(DataType.FLOAT) .seed(12345) .updater(new Adam(new MapSchedule.Builder(ScheduleType.ITERATION) @@ -104,13 +103,13 @@ public class MLPTestCases { .add(14, 1e-2) .build())) .l1(1e-3).l2(1e-3) - .list() + .layer(new DenseLayer.Builder().activation(Activation.TANH).nOut(64).build()) .layer(new OutputLayer.Builder().nOut(10) .lossFunction(LossFunctions.LossFunction.MCXENT) .activation(Activation.SOFTMAX) .build()) - .setInputType(InputType.convolutionalFlat(28,28,1)) + .inputType(InputType.convolutionalFlat(28,28,1)) .build(); } @@ -198,11 +197,11 @@ public class MLPTestCases { int numHiddenNodes = 20; //log.info("Build model...."); - MultiLayerConfiguration conf = new NeuralNetConfiguration.Builder() + NeuralNetConfiguration conf = NeuralNetConfiguration.builder() .dataType(DataType.FLOAT) .seed(seed) .updater(new Nesterovs(learningRate, 0.9)) - .list() + .layer(0, new DenseLayer.Builder().nIn(numInputs).nOut(numHiddenNodes) .weightInit(WeightInit.XAVIER) .activation(Activation.RELU) diff --git a/brutex-extended-tests/src/test/java/org/deeplearning4j/integration/testcases/dl4j/RNNTestCases.java b/brutex-extended-tests/src/test/java/org/deeplearning4j/integration/testcases/dl4j/RNNTestCases.java index a2cf437fe..edb312c0f 100644 --- a/brutex-extended-tests/src/test/java/org/deeplearning4j/integration/testcases/dl4j/RNNTestCases.java +++ b/brutex-extended-tests/src/test/java/org/deeplearning4j/integration/testcases/dl4j/RNNTestCases.java @@ -112,20 +112,20 @@ public class RNNTestCases { int lstmLayerSize = 200; //Number of units in each GravesLSTM layer int tbpttLength = 50; //Length for truncated backpropagation through time. i.e., do parameter updates ever 50 characters - return new NeuralNetConfiguration.Builder() + return NeuralNetConfiguration.builder() .dataType(DataType.FLOAT) .seed(12345) .l2(0.001) .weightInit(WeightInit.XAVIER) .updater(new Adam(1e-3)) - .list() + .layer(0, new LSTM.Builder().nIn(iter.inputColumns()).nOut(lstmLayerSize) .activation(Activation.TANH).build()) .layer(1, new LSTM.Builder().nIn(lstmLayerSize).nOut(lstmLayerSize) .activation(Activation.TANH).build()) .layer(2, new RnnOutputLayer.Builder(LossFunctions.LossFunction.MCXENT).activation(Activation.SOFTMAX) //MCXENT + softmax for classification .nIn(lstmLayerSize).nOut(nOut).build()) - .backpropType(BackpropType.TruncatedBPTT).tBPTTForwardLength(tbpttLength).tBPTTBackwardLength(tbpttLength) + .backpropType(BackpropType.TruncatedBPTT).tbpttFwdLength(tbpttLength).tbpttBackLength(tbpttLength) .build(); } @@ -195,19 +195,19 @@ public class RNNTestCases { @Override public Object getConfiguration() throws Exception { - return new NeuralNetConfiguration.Builder() + return NeuralNetConfiguration.builder() .dataType(DataType.FLOAT) .seed(12345) .updater(new Adam(5e-2)) .l1(1e-3).l2(1e-3) - .list() + .layer(0, new LSTM.Builder().activation(Activation.TANH).nOut(10).build()) .layer(new GlobalPoolingLayer.Builder().poolingType(PoolingType.AVG).build()) .layer(new OutputLayer.Builder().nOut(6) .lossFunction(LossFunctions.LossFunction.MCXENT) .activation(Activation.SOFTMAX) .build()) - .setInputType(InputType.recurrent(1)) + .inputType(InputType.recurrent(1)) .build(); } @@ -316,19 +316,19 @@ public class RNNTestCases { @Override public Object getConfiguration() throws Exception { - return new NeuralNetConfiguration.Builder() + return NeuralNetConfiguration.builder() .dataType(DataType.FLOAT) .seed(12345) .updater(new Adam(5e-2)) .l1(1e-3).l2(1e-3) - .list() + .layer(0, new Bidirectional(new LSTM.Builder().activation(Activation.TANH).nOut(10).build())) .layer(new GlobalPoolingLayer.Builder().poolingType(PoolingType.AVG).build()) .layer(new OutputLayer.Builder().nOut(6) .lossFunction(LossFunctions.LossFunction.MCXENT) .activation(Activation.SOFTMAX) .build()) - .setInputType(InputType.recurrent(1)) + .inputType(InputType.recurrent(1)) .build(); } diff --git a/brutex-extended-tests/src/test/java/org/deeplearning4j/integration/testcases/dl4j/UnsupervisedTestCases.java b/brutex-extended-tests/src/test/java/org/deeplearning4j/integration/testcases/dl4j/UnsupervisedTestCases.java index 574e3be2d..84b60ffd6 100644 --- a/brutex-extended-tests/src/test/java/org/deeplearning4j/integration/testcases/dl4j/UnsupervisedTestCases.java +++ b/brutex-extended-tests/src/test/java/org/deeplearning4j/integration/testcases/dl4j/UnsupervisedTestCases.java @@ -72,13 +72,13 @@ public class UnsupervisedTestCases { @Override public Object getConfiguration() { - return new NeuralNetConfiguration.Builder() + return NeuralNetConfiguration.builder() .dataType(DataType.FLOAT) .seed(12345) .updater(new Adam(1e-3)) .weightInit(WeightInit.XAVIER) .l2(1e-4) - .list() + .layer(0, new VariationalAutoencoder.Builder() .activation(Activation.TANH) .encoderLayerSizes(256, 256) //2 encoder layers, each of size 256 diff --git a/build.gradle b/build.gradle index 3a59a0cf4..a3a070c2b 100644 --- a/build.gradle +++ b/build.gradle @@ -44,6 +44,7 @@ ext { scalaVersion = "2.12" logger.quiet("Scala main version is set to {}", scalaVersion) + logger.quiet("Running java {}", JavaVersion.current()) } configurations.all { @@ -64,10 +65,10 @@ allprojects { Project proj -> plugins.withType(JavaPlugin) { - sourceCompatibility = 11 - targetCompatibility = 1.8 + sourceCompatibility = JavaVersion.VERSION_11 + targetCompatibility = JavaVersion.VERSION_11 tasks.withType(JavaCompile) { - options.release = 8 + options.release = 11 } dependencies { @@ -85,7 +86,6 @@ allprojects { Project proj -> testImplementation 'org.junit.jupiter:junit-jupiter-engine' testImplementation 'org.junit.jupiter:junit-jupiter-api' testImplementation 'org.junit.jupiter:junit-jupiter-params' - implementation "org.slf4j:slf4j-api" implementation "org.slf4j:slf4j-simple" @@ -107,14 +107,17 @@ allprojects { Project proj -> } plugins.withType(MavenPublishPlugin) { + publishing { publications { - mavenJava(MavenPublication) { - /* Need to verify the property exists, as some + if(! proj.name.contains("cavis-full")) { + mavenJava(MavenPublication) { + /* Need to verify the property exists, as some modules may not declare it (i.e. the java-platform plugin) */ - if (components.hasProperty("java") && !proj.name.equals("cavis-native-lib")) { - from components.java + if (components.hasProperty("java")) { + from components.java + } } } } diff --git a/cavis-dnn/cavis-dnn-api/src/main/java/org/nd4j/linalg/activations/impl/ActivationIdentity.java b/cavis-dnn/cavis-dnn-api/src/main/java/org/nd4j/linalg/activations/impl/ActivationIdentity.java index 46124c636..0a2c48fee 100644 --- a/cavis-dnn/cavis-dnn-api/src/main/java/org/nd4j/linalg/activations/impl/ActivationIdentity.java +++ b/cavis-dnn/cavis-dnn-api/src/main/java/org/nd4j/linalg/activations/impl/ActivationIdentity.java @@ -26,7 +26,7 @@ import org.nd4j.common.primitives.Pair; import org.nd4j.linalg.activations.BaseActivationFunction; import org.nd4j.linalg.api.ndarray.INDArray; -/** +/** The ActivationIdentity activation function, just returns the input as is. * f(x) = x */ @EqualsAndHashCode(callSuper = false) diff --git a/cavis-dnn/cavis-dnn-api/src/main/java/org/nd4j/linalg/workspace/BaseWorkspaceMgr.java b/cavis-dnn/cavis-dnn-api/src/main/java/org/nd4j/linalg/workspace/BaseWorkspaceMgr.java index 9baf97578..a0f45a6d1 100644 --- a/cavis-dnn/cavis-dnn-api/src/main/java/org/nd4j/linalg/workspace/BaseWorkspaceMgr.java +++ b/cavis-dnn/cavis-dnn-api/src/main/java/org/nd4j/linalg/workspace/BaseWorkspaceMgr.java @@ -195,7 +195,7 @@ public abstract class BaseWorkspaceMgr> implements WorkspaceMg } @Override - public INDArray validateArrayLocation(@NonNull T arrayType, @NonNull INDArray array, boolean migrateIfInvalid, boolean exceptionIfDetached) { + public INDArray validateArrayLocation(T arrayType, INDArray array, boolean migrateIfInvalid, boolean exceptionIfDetached) { validateConfig(arrayType); if(scopeOutOfWs.contains(arrayType)){ diff --git a/cavis-dnn/cavis-dnn-api/src/main/java/org/nd4j/linalg/workspace/WorkspaceMgr.java b/cavis-dnn/cavis-dnn-api/src/main/java/org/nd4j/linalg/workspace/WorkspaceMgr.java index 90b77f449..f096a1f6f 100644 --- a/cavis-dnn/cavis-dnn-api/src/main/java/org/nd4j/linalg/workspace/WorkspaceMgr.java +++ b/cavis-dnn/cavis-dnn-api/src/main/java/org/nd4j/linalg/workspace/WorkspaceMgr.java @@ -67,7 +67,7 @@ public interface WorkspaceMgr> { /** * Set arrays to be scoped out (not in any workspace) for the specified array type. - * This means that create, dup, leverage etc methods will return result arrays that are not attached to any workspace + * This means that create, dup, leverage etc. methods will return result arrays that are not attached to any workspace * * @param arrayType Array type to set scoped out for */ @@ -120,7 +120,7 @@ public interface WorkspaceMgr> { boolean isWorkspaceOpen(T arrayType); /** - * Assert thath the workspace for the specified array type is open. + * Assert that the workspace for the specified array type is open. * For array types that are set to scoped out, this will be treated as a no-op * @param arrayType Array type to check * @param msg May be null. If non-null: include this in the exception @@ -129,7 +129,7 @@ public interface WorkspaceMgr> { void assertOpen(T arrayType, String msg) throws ND4JWorkspaceException; /** - * Assert thath the workspace for the specified array type is not open. + * Assert that the workspace for the specified array type is not open. * For array types that are set to scoped out, this will be treated as a no-op * @param arrayType Array type to check * @param msg May be null. If non-null: include this in the exception @@ -193,7 +193,7 @@ public interface WorkspaceMgr> { /** * Create an uninitialized array in the specified array type's workspace (or detached if none is specified). - * Equivalent to {@link org.nd4j.linalg.factory.Nd4j#createUninitialized(int)} (int...)}, other than the array location + * Equivalent to {@link org.nd4j.linalg.factory.Nd4j#createUninitialized(int...)}, other than the array location * @param arrayType Array type * @param dataType Data type of the created array * @param shape Shape @@ -231,7 +231,7 @@ public interface WorkspaceMgr> { /** * Cast the specified array to the specified datatype.
- * If the array is already the correct type, the bahaviour depends on the 'dupIfCorrectType' argument.
+ * If the array is already the correct type, the behaviour depends on the 'dupIfCorrectType' argument.
* dupIfCorrectType = false && toCast.dataType() == dataType: return input array as-is (unless workspace is wrong)
* dupIfCorrectType = true && toCast.dataType() == dataType: duplicate the array into the specified workspace
* @param arrayType Array type diff --git a/cavis-dnn/cavis-dnn-core/build.gradle b/cavis-dnn/cavis-dnn-core/build.gradle index 18c322532..e40b8482f 100644 --- a/cavis-dnn/cavis-dnn-core/build.gradle +++ b/cavis-dnn/cavis-dnn-core/build.gradle @@ -19,6 +19,7 @@ dependencies { testImplementation projects.cavisNative.cavisNativeCommon testImplementation projects.cavisNd4j.cavisNd4jCommonTests testImplementation projects.cavisDnn.cavisDnnCommonTests + testImplementation projects.cavisDnn.cavisDnnNn implementation "org.apache.commons:commons-lang3" diff --git a/cavis-dnn/cavis-dnn-core/src/main/java/net/brutex/ai/dnn/core/util/ANSI.java b/cavis-dnn/cavis-dnn-core/src/main/java/net/brutex/ai/dnn/core/util/ANSI.java new file mode 100644 index 000000000..bd2247445 --- /dev/null +++ b/cavis-dnn/cavis-dnn-core/src/main/java/net/brutex/ai/dnn/core/util/ANSI.java @@ -0,0 +1,52 @@ +/* + * + * ****************************************************************************** + * * + * * This program and the accompanying materials are made available under the + * * terms of the Apache License, Version 2.0 which is available at + * * https://www.apache.org/licenses/LICENSE-2.0. + * * + * * See the NOTICE file distributed with this work for additional + * * information regarding copyright ownership. + * * Unless required by applicable law or agreed to in writing, software + * * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * * License for the specific language governing permissions and limitations + * * under the License. + * * + * * SPDX-License-Identifier: Apache-2.0 + * ***************************************************************************** + * + */ + +package net.brutex.ai.dnn.core.util; + +/** + * ANSI colour codes + */ +public enum ANSI { + BLACK("\u001B[30m"), + RED("\u001B[31m"), + GREEN("\u001B[32m"), + YELLOW("\u001B[33m"), + BLUE("\u001B[34m"), + PURPLE("\u001B[35m"), + CYAN("\u001B[36m"), + WHITE("\u001B[37m"), + + ANSI_RESET("\u001B[0m"), + + BLACK_BACKGROUND("\u001B[40m"), + RED_BACKGROUND("\u001B[41m"), + GREEN_BACKGROUND("\u001B[42m"), + YELLOW_BACKGROUND("\u001B[43m"), + BLUE_BACKGROUND("\u001B[44m"), + PURPLE_BACKGROUND("\u001B[45m"), + CYAN_BACKGROUND("\u001B[46m"), + WHITE_BACKGROUND("\u001B[47m"); + + String code; + ANSI(String code) { + this.code = code; + } +} diff --git a/cavis-dnn/cavis-dnn-core/src/main/java/org/deeplearning4j/core/listener/SystemInfoFilePrintListener.java b/cavis-dnn/cavis-dnn-core/src/main/java/org/deeplearning4j/core/listener/SystemInfoFilePrintListener.java index 88f8a2bd8..d9e3d7b6f 100644 --- a/cavis-dnn/cavis-dnn-core/src/main/java/org/deeplearning4j/core/listener/SystemInfoFilePrintListener.java +++ b/cavis-dnn/cavis-dnn-core/src/main/java/org/deeplearning4j/core/listener/SystemInfoFilePrintListener.java @@ -23,8 +23,8 @@ package org.deeplearning4j.core.listener; import lombok.NonNull; import lombok.Builder; import lombok.extern.slf4j.Slf4j; +import net.brutex.ai.dnn.api.IModel; import org.apache.commons.io.FileUtils; -import org.deeplearning4j.nn.api.Model; import org.deeplearning4j.optimize.api.TrainingListener; import org.nd4j.linalg.api.ndarray.INDArray; import oshi.json.SystemInfo; @@ -56,12 +56,12 @@ public class SystemInfoFilePrintListener implements TrainingListener { } @Override - public void iterationDone(Model model, int iteration, int epoch) { + public void iterationDone(IModel model, int iteration, int epoch) { } @Override - public void onEpochStart(Model model) { + public void onEpochStart(IModel model) { if(!printOnEpochStart || printFileTarget == null) return; @@ -70,7 +70,7 @@ public class SystemInfoFilePrintListener implements TrainingListener { } @Override - public void onEpochEnd(Model model) { + public void onEpochEnd(IModel model) { if(!printOnEpochEnd || printFileTarget == null) return; @@ -79,7 +79,7 @@ public class SystemInfoFilePrintListener implements TrainingListener { } @Override - public void onForwardPass(Model model, List activations) { + public void onForwardPass(IModel model, List activations) { if(!printOnBackwardPass || printFileTarget == null) return; @@ -88,7 +88,7 @@ public class SystemInfoFilePrintListener implements TrainingListener { } @Override - public void onForwardPass(Model model, Map activations) { + public void onForwardPass(IModel model, Map activations) { if(!printOnForwardPass || printFileTarget == null) return; @@ -97,7 +97,7 @@ public class SystemInfoFilePrintListener implements TrainingListener { } @Override - public void onGradientCalculation(Model model) { + public void onGradientCalculation(IModel model) { if(!printOnGradientCalculation || printFileTarget == null) return; @@ -107,7 +107,7 @@ public class SystemInfoFilePrintListener implements TrainingListener { } @Override - public void onBackwardPass(Model model) { + public void onBackwardPass(IModel model) { if(!printOnBackwardPass || printFileTarget == null) return; diff --git a/cavis-dnn/cavis-dnn-core/src/main/java/org/deeplearning4j/core/listener/SystemInfoPrintListener.java b/cavis-dnn/cavis-dnn-core/src/main/java/org/deeplearning4j/core/listener/SystemInfoPrintListener.java index 5b115d542..e4bdfcda6 100644 --- a/cavis-dnn/cavis-dnn-core/src/main/java/org/deeplearning4j/core/listener/SystemInfoPrintListener.java +++ b/cavis-dnn/cavis-dnn-core/src/main/java/org/deeplearning4j/core/listener/SystemInfoPrintListener.java @@ -22,7 +22,7 @@ package org.deeplearning4j.core.listener; import lombok.Builder; import lombok.extern.slf4j.Slf4j; -import org.deeplearning4j.nn.api.Model; +import net.brutex.ai.dnn.api.IModel; import org.deeplearning4j.optimize.api.TrainingListener; import org.nd4j.linalg.api.ndarray.INDArray; import oshi.json.SystemInfo; @@ -49,12 +49,12 @@ public class SystemInfoPrintListener implements TrainingListener { private static final String SYSTEM_INFO = "System info on epoch end: "; @Override - public void iterationDone(Model model, int iteration, int epoch) { + public void iterationDone(IModel model, int iteration, int epoch) { } @Override - public void onEpochStart(Model model) { + public void onEpochStart(IModel model) { if(!printOnEpochStart) return; @@ -64,7 +64,7 @@ public class SystemInfoPrintListener implements TrainingListener { } @Override - public void onEpochEnd(Model model) { + public void onEpochEnd(IModel model) { if(!printOnEpochEnd) return; @@ -74,7 +74,7 @@ public class SystemInfoPrintListener implements TrainingListener { } @Override - public void onForwardPass(Model model, List activations) { + public void onForwardPass(IModel model, List activations) { if(!printOnBackwardPass) return; @@ -84,7 +84,7 @@ public class SystemInfoPrintListener implements TrainingListener { } @Override - public void onForwardPass(Model model, Map activations) { + public void onForwardPass(IModel model, Map activations) { if(!printOnForwardPass) return; @@ -94,7 +94,7 @@ public class SystemInfoPrintListener implements TrainingListener { } @Override - public void onGradientCalculation(Model model) { + public void onGradientCalculation(IModel model) { if(!printOnGradientCalculation) return; @@ -104,7 +104,7 @@ public class SystemInfoPrintListener implements TrainingListener { } @Override - public void onBackwardPass(Model model) { + public void onBackwardPass(IModel model) { if(!printOnBackwardPass) return; SystemInfo systemInfo = new SystemInfo(); diff --git a/cavis-dnn/cavis-dnn-core/src/main/java/org/deeplearning4j/core/util/ModelGuesser.java b/cavis-dnn/cavis-dnn-core/src/main/java/org/deeplearning4j/core/util/ModelGuesser.java index 70b250978..3ab6eec8f 100644 --- a/cavis-dnn/cavis-dnn-core/src/main/java/org/deeplearning4j/core/util/ModelGuesser.java +++ b/cavis-dnn/cavis-dnn-core/src/main/java/org/deeplearning4j/core/util/ModelGuesser.java @@ -21,13 +21,13 @@ package org.deeplearning4j.core.util; import lombok.extern.slf4j.Slf4j; +import net.brutex.ai.dnn.api.IModel; import org.apache.commons.io.FileUtils; import org.apache.commons.io.IOUtils; import org.deeplearning4j.common.util.DL4JFileUtils; import org.deeplearning4j.common.config.DL4JSystemProperties; -import org.deeplearning4j.nn.api.Model; import org.deeplearning4j.nn.conf.ComputationGraphConfiguration; -import org.deeplearning4j.nn.conf.MultiLayerConfiguration; +import org.deeplearning4j.nn.conf.NeuralNetConfiguration; import org.deeplearning4j.nn.modelimport.keras.KerasModelImport; import org.deeplearning4j.util.ModelSerializer; import org.nd4j.linalg.dataset.api.preprocessor.Normalizer; @@ -80,7 +80,7 @@ public class ModelGuesser { //note here that we load json BEFORE YAML. YAML //turns out to load just fine *accidentally* try { - return MultiLayerConfiguration.fromJson(input); + return NeuralNetConfiguration.fromJson(input); } catch (Exception e) { log.warn("Tried multi layer config from json", e); try { @@ -96,7 +96,7 @@ public class ModelGuesser { } catch (Exception e3) { log.warn("Tried computation graph from json"); try { - return MultiLayerConfiguration.fromYaml(input); + return NeuralNetConfiguration.fromYaml(input); } catch (Exception e4) { log.warn("Tried multi layer configuration from yaml"); try { @@ -142,7 +142,7 @@ public class ModelGuesser { * @return the loaded model * @throws Exception */ - public static Model loadModelGuess(String path) throws Exception { + public static IModel loadModelGuess(String path) throws Exception { try { return ModelSerializer.restoreMultiLayerNetwork(new File(path), true); } catch (Exception e) { @@ -185,7 +185,7 @@ public class ModelGuesser { * @return the loaded model * @throws Exception */ - public static Model loadModelGuess(InputStream stream) throws Exception { + public static IModel loadModelGuess(InputStream stream) throws Exception { return loadModelGuess(stream, null); } @@ -194,7 +194,7 @@ public class ModelGuesser { * @param stream Stream of the model file * @param tempDirectory Temporary/working directory. May be null. */ - public static Model loadModelGuess(InputStream stream, File tempDirectory) throws Exception { + public static IModel loadModelGuess(InputStream stream, File tempDirectory) throws Exception { //Currently (Nov 2017): KerasModelImport doesn't support loading from input streams //Simplest solution here: write to a temporary file File f; diff --git a/cavis-dnn/cavis-dnn-core/src/test/java/org/deeplearning4j/LayerHelperValidationUtil.java b/cavis-dnn/cavis-dnn-core/src/test/java/org/deeplearning4j/LayerHelperValidationUtil.java index cc1220762..308b7c7ad 100644 --- a/cavis-dnn/cavis-dnn-core/src/test/java/org/deeplearning4j/LayerHelperValidationUtil.java +++ b/cavis-dnn/cavis-dnn-core/src/test/java/org/deeplearning4j/LayerHelperValidationUtil.java @@ -26,6 +26,7 @@ import lombok.extern.slf4j.Slf4j; import org.deeplearning4j.common.config.DL4JClassLoading; import org.deeplearning4j.nn.api.Layer; import org.deeplearning4j.nn.conf.layers.ConvolutionLayer; +import org.deeplearning4j.nn.conf.layers.LayerConfiguration; import org.deeplearning4j.nn.conf.layers.SubsamplingLayer; import org.deeplearning4j.nn.multilayer.MultiLayerNetwork; import org.deeplearning4j.optimize.listeners.CollectScoresListener; @@ -99,7 +100,7 @@ public class LayerHelperValidationUtil { //Don't allow fallback: for(Layer l : netOrig.getLayers()){ - org.deeplearning4j.nn.conf.layers.Layer lConf = l.conf().getLayer(); + LayerConfiguration lConf = l.getLayerConfiguration(); if(lConf instanceof ConvolutionLayer){ ((ConvolutionLayer) lConf).setCudnnAllowFallback(false); } else if(lConf instanceof SubsamplingLayer){ @@ -108,14 +109,14 @@ public class LayerHelperValidationUtil { } - MultiLayerNetwork net1NoHelper = new MultiLayerNetwork(netOrig.getLayerWiseConfigurations().clone()); + MultiLayerNetwork net1NoHelper = new MultiLayerNetwork(netOrig.getNetConfiguration().clone()); net1NoHelper.init(); log.info("Removing all layer helpers from network copy 1"); removeHelpers(net1NoHelper.getLayers(), null); - MultiLayerNetwork net2With = new MultiLayerNetwork(netOrig.getLayerWiseConfigurations().clone()); + MultiLayerNetwork net2With = new MultiLayerNetwork(netOrig.getNetConfiguration().clone()); net2With.init(); - net2With.params().assign(netOrig.params()); + net2With.getModelParams().assign(netOrig.getModelParams()); log.info("Removing all except for specified helpers from network copy 2: " + t.getAllowHelpersForClasses()); removeHelpers(net2With.getLayers(), t.getAllowHelpersForClasses()); @@ -123,7 +124,7 @@ public class LayerHelperValidationUtil { Preconditions.checkNotNull(t.getFeatures(), "Features are not set (null)"); for (boolean train : new boolean[]{false, true}) { - assertEquals(net1NoHelper.params(), net2With.params()); + assertEquals(net1NoHelper.getModelParams(), net2With.getModelParams()); String s = "Feed forward test - " + t.getTestName() + " - " + (train ? "Train: " : "Test: "); List ff1; try { @@ -133,7 +134,7 @@ public class LayerHelperValidationUtil { enableCppHelpers(); } List ff2 = net2With.feedForward(t.getFeatures(), train); - List paramKeys = new ArrayList<>(net1NoHelper.paramTable().keySet()); + List paramKeys = new ArrayList<>(net1NoHelper.getParamTable().keySet()); Collections.sort(paramKeys); for (String p : paramKeys) { INDArray p1 = net1NoHelper.getParam(p); @@ -179,7 +180,7 @@ public class LayerHelperValidationUtil { double maxRE = relError.maxNumber().doubleValue(); log.info(s + "Output, max relative error: " + maxRE); - assertEquals(net1NoHelper.params(), net2With.params()); //Check that forward pass does not modify params + assertEquals(net1NoHelper.getModelParams(), net2With.getModelParams()); //Check that forward pass does not modify params assertTrue(maxRE < t.getMaxRelError(), s + "Max RE: " + maxRE); } } @@ -224,7 +225,7 @@ public class LayerHelperValidationUtil { } net2With.computeGradientAndScore(); - List paramKeys = new ArrayList<>(net1NoHelper.paramTable().keySet()); + List paramKeys = new ArrayList<>(net1NoHelper.getParamTable().keySet()); Collections.sort(paramKeys); for(String p : paramKeys){ INDArray g1 = net1NoHelper.gradient().gradientForVariable().get(p); @@ -252,26 +253,26 @@ public class LayerHelperValidationUtil { Preconditions.checkNotNull(t.getData(), "DataSetIterator is not set (null)"); log.info("Testing run-to-run consistency of training with layer helper"); - net2With = new MultiLayerNetwork(netOrig.getLayerWiseConfigurations().clone()); + net2With = new MultiLayerNetwork(netOrig.getNetConfiguration().clone()); net2With.init(); - net2With.params().assign(netOrig.params()); + net2With.getModelParams().assign(netOrig.getModelParams()); log.info("Removing all except for specified layer helpers from network copy 2: " + t.getAllowHelpersForClasses()); removeHelpers(net2With.getLayers(), t.getAllowHelpersForClasses()); CollectScoresListener listener = new CollectScoresListener(1); - net2With.setListeners(listener); + net2With.addTrainingListeners(listener); net2With.fit(t.getData()); for( int i=0; i<2; i++ ) { - net2With = new MultiLayerNetwork(netOrig.getLayerWiseConfigurations().clone()); + net2With = new MultiLayerNetwork(netOrig.getNetConfiguration().clone()); net2With.init(); - net2With.params().assign(netOrig.params()); + net2With.getModelParams().assign(netOrig.getModelParams()); log.info("Removing all except for specified layer helpers from network copy 2: " + t.getAllowHelpersForClasses()); removeHelpers(net2With.getLayers(), t.getAllowHelpersForClasses()); CollectScoresListener listener2 = new CollectScoresListener(1); - net2With.setListeners(listener2); + net2With.addTrainingListeners(listener2); net2With.fit(t.getData()); DoubleArrayList listOrig = listener.getListScore(); diff --git a/cavis-dnn/cavis-dnn-core/src/test/java/org/deeplearning4j/RandomTests.java b/cavis-dnn/cavis-dnn-core/src/test/java/org/deeplearning4j/RandomTests.java index 63b13e660..d939dab81 100644 --- a/cavis-dnn/cavis-dnn-core/src/test/java/org/deeplearning4j/RandomTests.java +++ b/cavis-dnn/cavis-dnn-core/src/test/java/org/deeplearning4j/RandomTests.java @@ -23,19 +23,15 @@ package org.deeplearning4j; import org.deeplearning4j.datasets.iterator.EarlyTerminationDataSetIterator; import org.deeplearning4j.datasets.iterator.impl.MnistDataSetIterator; import org.deeplearning4j.nn.api.OptimizationAlgorithm; -import org.deeplearning4j.nn.conf.MultiLayerConfiguration; import org.deeplearning4j.nn.conf.NeuralNetConfiguration; import org.deeplearning4j.nn.multilayer.MultiLayerNetwork; import org.junit.jupiter.api.Test; -import org.nd4j.common.resources.Resources; import org.nd4j.linalg.activations.Activation; import org.nd4j.linalg.dataset.api.iterator.DataSetIterator; -import org.nd4j.linalg.factory.Nd4j; import org.nd4j.linalg.learning.config.RmsProp; import org.nd4j.linalg.lossfunctions.LossFunctions; -import java.nio.file.Files; import java.util.concurrent.CountDownLatch; //@Ignore @@ -44,8 +40,8 @@ public class RandomTests extends BaseDL4JTest { @Test public void testReproduce() throws Exception { - final MultiLayerConfiguration conf = new NeuralNetConfiguration.Builder().updater(new RmsProp()) - .optimizationAlgo(OptimizationAlgorithm.STOCHASTIC_GRADIENT_DESCENT).list() + final NeuralNetConfiguration conf = NeuralNetConfiguration.builder().updater(new RmsProp()) + .optimizationAlgo(OptimizationAlgorithm.STOCHASTIC_GRADIENT_DESCENT) .layer(0, new org.deeplearning4j.nn.conf.layers.DenseLayer.Builder().nIn(28 * 28).nOut(10) .activation(Activation.TANH).build()) .layer(1, new org.deeplearning4j.nn.conf.layers.OutputLayer.Builder( diff --git a/cavis-dnn/cavis-dnn-core/src/test/java/org/deeplearning4j/TestUtils.java b/cavis-dnn/cavis-dnn-core/src/test/java/org/deeplearning4j/TestUtils.java index f1e12d123..495b21e18 100644 --- a/cavis-dnn/cavis-dnn-core/src/test/java/org/deeplearning4j/TestUtils.java +++ b/cavis-dnn/cavis-dnn-core/src/test/java/org/deeplearning4j/TestUtils.java @@ -23,9 +23,9 @@ package org.deeplearning4j; import org.apache.commons.compress.utils.IOUtils; import org.deeplearning4j.nn.api.Layer; import org.deeplearning4j.nn.conf.ComputationGraphConfiguration; -import org.deeplearning4j.nn.conf.MultiLayerConfiguration; +import org.deeplearning4j.nn.conf.NeuralNetConfiguration; import org.deeplearning4j.nn.conf.RNNFormat; -import org.deeplearning4j.nn.conf.layers.BaseLayer; +import org.deeplearning4j.nn.conf.layers.BaseLayerConfiguration; import org.deeplearning4j.nn.conf.layers.samediff.AbstractSameDiffLayer; import org.deeplearning4j.nn.graph.ComputationGraph; import org.deeplearning4j.nn.layers.convolution.ConvolutionLayer; @@ -66,15 +66,15 @@ public class TestUtils { ByteArrayInputStream bais = new ByteArrayInputStream(bytes); restored = ModelSerializer.restoreMultiLayerNetwork(bais, true); - assertEquals(net.getLayerWiseConfigurations(), restored.getLayerWiseConfigurations()); - assertEquals(net.params(), restored.params()); + assertEquals(net.getNetConfiguration(), restored.getNetConfiguration()); + assertEquals(net.getModelParams(), restored.getModelParams()); } catch (IOException e){ //Should never happen throw new RuntimeException(e); } - //Also check the MultiLayerConfiguration is serializable (required by Spark etc) - MultiLayerConfiguration conf = net.getLayerWiseConfigurations(); + //Also check the NeuralNetConfiguration is serializable (required by Spark etc) + NeuralNetConfiguration conf = net.getNetConfiguration(); serializeDeserializeJava(conf); return restored; @@ -90,15 +90,15 @@ public class TestUtils { ByteArrayInputStream bais = new ByteArrayInputStream(bytes); restored = ModelSerializer.restoreComputationGraph(bais, true); - assertEquals(net.getConfiguration(), restored.getConfiguration()); - assertEquals(net.params(), restored.params()); + assertEquals(net.getComputationGraphConfiguration(), restored.getComputationGraphConfiguration()); + assertEquals(net.getModelParams(), restored.getModelParams()); } catch (IOException e){ //Should never happen throw new RuntimeException(e); } //Also check the ComputationGraphConfiguration is serializable (required by Spark etc) - ComputationGraphConfiguration conf = net.getConfiguration(); + ComputationGraphConfiguration conf = net.getComputationGraphConfiguration(); serializeDeserializeJava(conf); return restored; @@ -205,8 +205,8 @@ public class TestUtils { return null; } - public static L2Regularization getL2Reg(BaseLayer baseLayer){ - return getL2Reg(baseLayer.getRegularization()); + public static L2Regularization getL2Reg(BaseLayerConfiguration baseLayerConfiguration){ + return getL2Reg(baseLayerConfiguration.getRegularization()); } public static L2Regularization getL2Reg(List l){ @@ -218,7 +218,7 @@ public class TestUtils { return null; } - public static WeightDecay getWeightDecayReg(BaseLayer bl){ + public static WeightDecay getWeightDecayReg(BaseLayerConfiguration bl){ return getWeightDecayReg(bl.getRegularization()); } @@ -231,7 +231,7 @@ public class TestUtils { return null; } - public static double getL1(BaseLayer layer) { + public static double getL1(BaseLayerConfiguration layer) { List l = layer.getRegularization(); return getL1(l); } @@ -246,7 +246,7 @@ public class TestUtils { return l1Reg.getL1().valueAt(0,0); } - public static double getL2(BaseLayer layer) { + public static double getL2(BaseLayerConfiguration layer) { List l = layer.getRegularization(); return getL2(l); } @@ -269,7 +269,7 @@ public class TestUtils { return getL2(layer.getRegularization()); } - public static double getWeightDecay(BaseLayer layer) { + public static double getWeightDecay(BaseLayerConfiguration layer) { return getWeightDecayReg(layer.getRegularization()).getCoeff().valueAt(0,0); } @@ -317,14 +317,14 @@ public class TestUtils { for(Layer l : layers){ //Don't use instanceof here - there are sub conv subclasses if(l.getClass() == ConvolutionLayer.class || l instanceof SubsamplingLayer || l instanceof BatchNormalization || l instanceof LSTM){ - Preconditions.checkNotNull(l.getHelper(), l.conf().getLayer().getLayerName()); + Preconditions.checkNotNull(l.getHelper(), l.getLayerConfiguration().getLayerName()); } } } public static void assertHelpersAbsent(Layer[] layers) throws Exception { for(Layer l : layers){ - Preconditions.checkState(l.getHelper() == null, l.conf().getLayer().getLayerName()); + Preconditions.checkState(l.getHelper() == null, l.getLayerConfiguration().getLayerName()); } } } diff --git a/cavis-dnn/cavis-dnn-core/src/test/java/org/deeplearning4j/datasets/iterator/DataSetIteratorTest.java b/cavis-dnn/cavis-dnn-core/src/test/java/org/deeplearning4j/datasets/iterator/DataSetIteratorTest.java index dc9b3ffcf..be740689b 100644 --- a/cavis-dnn/cavis-dnn-core/src/test/java/org/deeplearning4j/datasets/iterator/DataSetIteratorTest.java +++ b/cavis-dnn/cavis-dnn-core/src/test/java/org/deeplearning4j/datasets/iterator/DataSetIteratorTest.java @@ -31,7 +31,6 @@ import org.deeplearning4j.datasets.iterator.impl.*; import org.deeplearning4j.eval.Evaluation; import org.deeplearning4j.nn.api.OptimizationAlgorithm; import org.deeplearning4j.nn.conf.GradientNormalization; -import org.deeplearning4j.nn.conf.MultiLayerConfiguration; import org.deeplearning4j.nn.conf.NeuralNetConfiguration; import org.deeplearning4j.nn.conf.inputs.InputType; import org.deeplearning4j.nn.conf.layers.ConvolutionLayer; @@ -168,9 +167,9 @@ public class DataSetIteratorTest extends BaseDL4JTest { LFWDataSetIterator lfw = new LFWDataSetIterator(batchSize, numSamples, new int[] {numRows, numColumns, numChannels}, outputNum, false, true, 1.0, new Random(seed)); - MultiLayerConfiguration.Builder builder = new NeuralNetConfiguration.Builder().seed(seed) + NeuralNetConfiguration.NeuralNetConfigurationBuilder builder = NeuralNetConfiguration.builder().seed(seed) .gradientNormalization(GradientNormalization.RenormalizeL2PerLayer) - .optimizationAlgo(OptimizationAlgorithm.STOCHASTIC_GRADIENT_DESCENT).list() + .optimizationAlgo(OptimizationAlgorithm.STOCHASTIC_GRADIENT_DESCENT) .layer(0, new ConvolutionLayer.Builder(5, 5).nIn(numChannels).nOut(6) .weightInit(WeightInit.XAVIER).activation(Activation.RELU).build()) .layer(1, new SubsamplingLayer.Builder(SubsamplingLayer.PoolingType.MAX, new int[] {2, 2}) @@ -178,13 +177,12 @@ public class DataSetIteratorTest extends BaseDL4JTest { .layer(2, new OutputLayer.Builder(LossFunctions.LossFunction.NEGATIVELOGLIKELIHOOD) .nOut(outputNum).weightInit(WeightInit.XAVIER).activation(Activation.SOFTMAX) .build()) - .setInputType(InputType.convolutionalFlat(numRows, numColumns, numChannels)) - ; + .inputType(InputType.convolutionalFlat(numRows, numColumns, numChannels)); MultiLayerNetwork model = new MultiLayerNetwork(builder.build()); model.init(); - model.setListeners(new ScoreIterationListener(listenerFreq)); + model.addTrainingListeners(new ScoreIterationListener(listenerFreq)); model.fit(lfw.next()); @@ -229,9 +227,9 @@ public class DataSetIteratorTest extends BaseDL4JTest { Cifar10DataSetIterator cifar = new Cifar10DataSetIterator(batchSize); - MultiLayerConfiguration.Builder builder = new NeuralNetConfiguration.Builder().seed(seed) + NeuralNetConfiguration.NeuralNetConfigurationBuilder builder = NeuralNetConfiguration.builder().seed(seed) .gradientNormalization(GradientNormalization.RenormalizeL2PerLayer) - .optimizationAlgo(OptimizationAlgorithm.STOCHASTIC_GRADIENT_DESCENT).list() + .optimizationAlgo(OptimizationAlgorithm.STOCHASTIC_GRADIENT_DESCENT) .layer(0, new ConvolutionLayer.Builder(5, 5).nIn(channels).nOut(6).weightInit(WeightInit.XAVIER) .activation(Activation.RELU).build()) .layer(1, new SubsamplingLayer.Builder(SubsamplingLayer.PoolingType.MAX, new int[] {2, 2}) @@ -240,7 +238,7 @@ public class DataSetIteratorTest extends BaseDL4JTest { .nOut(outputNum).weightInit(WeightInit.XAVIER).activation(Activation.SOFTMAX) .build()) - .setInputType(InputType.convolutionalFlat(height, width, channels)); + .inputType(InputType.convolutionalFlat(height, width, channels)); MultiLayerNetwork model = new MultiLayerNetwork(builder.build()); model.init(); @@ -248,7 +246,7 @@ public class DataSetIteratorTest extends BaseDL4JTest { //model.setListeners(Arrays.asList((TrainingListener) new ScoreIterationListener(listenerFreq))); CollectScoresIterationListener listener = new CollectScoresIterationListener(listenerFreq); - model.setListeners(listener); + model.addTrainingListeners(listener); model.fit(cifar); diff --git a/cavis-dnn/cavis-dnn-core/src/test/java/org/deeplearning4j/earlystopping/TestEarlyStopping.java b/cavis-dnn/cavis-dnn-core/src/test/java/org/deeplearning4j/earlystopping/TestEarlyStopping.java index 13ae46efb..0923ba407 100644 --- a/cavis-dnn/cavis-dnn-core/src/test/java/org/deeplearning4j/earlystopping/TestEarlyStopping.java +++ b/cavis-dnn/cavis-dnn-core/src/test/java/org/deeplearning4j/earlystopping/TestEarlyStopping.java @@ -38,10 +38,9 @@ import org.deeplearning4j.earlystopping.scorecalc.*; import org.deeplearning4j.earlystopping.termination.*; import org.deeplearning4j.earlystopping.trainer.EarlyStoppingTrainer; import org.deeplearning4j.earlystopping.trainer.IEarlyStoppingTrainer; -import org.deeplearning4j.nn.api.Model; +import net.brutex.ai.dnn.api.IModel; import org.deeplearning4j.nn.api.OptimizationAlgorithm; import org.deeplearning4j.nn.conf.GradientNormalization; -import org.deeplearning4j.nn.conf.MultiLayerConfiguration; import org.deeplearning4j.nn.conf.NeuralNetConfiguration; import org.deeplearning4j.nn.conf.layers.*; import org.deeplearning4j.nn.conf.layers.variational.BernoulliReconstructionDistribution; @@ -133,9 +132,9 @@ public class TestEarlyStopping extends BaseDL4JTest { String msg = i + " - " + sc.getClass().getSimpleName(); log.info("Starting test - {}", msg); - MultiLayerConfiguration conf = new NeuralNetConfiguration.Builder() + NeuralNetConfiguration conf = NeuralNetConfiguration.builder() .seed(12345) - .updater(new Sgd(0.5)).weightInit(WeightInit.XAVIER).list() + .updater(new Sgd(0.5)).weightInit(WeightInit.XAVIER) .layer(new DenseLayer.Builder().nIn(4).nOut(4).activation(Activation.TANH).build()) .layer(new OutputLayer.Builder().nIn(4).nOut(3) .activation(Activation.SOFTMAX) @@ -219,15 +218,15 @@ public class TestEarlyStopping extends BaseDL4JTest { @Test public void testEarlyStoppingEveryNEpoch() { - MultiLayerConfiguration conf = new NeuralNetConfiguration.Builder() + NeuralNetConfiguration conf = NeuralNetConfiguration.builder() .optimizationAlgo(OptimizationAlgorithm.STOCHASTIC_GRADIENT_DESCENT) - .updater(new Sgd(0.01)).weightInit(WeightInit.XAVIER).list() + .updater(new Sgd(0.01)).weightInit(WeightInit.XAVIER) .layer(0, new OutputLayer.Builder().nIn(4).nOut(3) .activation(Activation.SOFTMAX) .lossFunction(LossFunctions.LossFunction.MCXENT).build()) .build(); MultiLayerNetwork net = new MultiLayerNetwork(conf); - net.setListeners(new ScoreIterationListener(1)); + net.addTrainingListeners(new ScoreIterationListener(1)); DataSetIterator irisIter = new IrisDataSetIterator(150, 150); EarlyStoppingModelSaver saver = new InMemoryModelSaver<>(); @@ -248,15 +247,15 @@ public class TestEarlyStopping extends BaseDL4JTest { @Test public void testEarlyStoppingIrisMultiEpoch() { - MultiLayerConfiguration conf = new NeuralNetConfiguration.Builder() + NeuralNetConfiguration conf = NeuralNetConfiguration.builder() .optimizationAlgo(OptimizationAlgorithm.STOCHASTIC_GRADIENT_DESCENT) - .updater(new Sgd(0.001)).weightInit(WeightInit.XAVIER).list() + .updater(new Sgd(0.001)).weightInit(WeightInit.XAVIER) .layer(0, new OutputLayer.Builder().nIn(4).nOut(3) .activation(Activation.SOFTMAX) .lossFunction(LossFunctions.LossFunction.MCXENT).build()) .build(); MultiLayerNetwork net = new MultiLayerNetwork(conf); - net.setListeners(new ScoreIterationListener(1)); + net.addTrainingListeners(new ScoreIterationListener(1)); DataSetIterator irisIter = new IrisDataSetIterator(150, 150); MultipleEpochsIterator mIter = new MultipleEpochsIterator(10, irisIter); @@ -297,15 +296,15 @@ public class TestEarlyStopping extends BaseDL4JTest { //Test poor tuning (high LR): should terminate on MaxScoreIterationTerminationCondition Nd4j.getRandom().setSeed(12345); - MultiLayerConfiguration conf = new NeuralNetConfiguration.Builder().seed(12345) + NeuralNetConfiguration conf = NeuralNetConfiguration.builder().seed(12345) .optimizationAlgo(OptimizationAlgorithm.STOCHASTIC_GRADIENT_DESCENT) .updater(new Sgd(5.0)) //Intentionally huge LR - .weightInit(WeightInit.XAVIER).list() + .weightInit(WeightInit.XAVIER) .layer(0, new OutputLayer.Builder().nIn(4).nOut(3).activation(Activation.SOFTMAX) .lossFunction(LossFunctions.LossFunction.MCXENT).build()) .build(); MultiLayerNetwork net = new MultiLayerNetwork(conf); - net.setListeners(new ScoreIterationListener(1)); + net.addTrainingListeners(new ScoreIterationListener(1)); DataSetIterator irisIter = new IrisDataSetIterator(150, 150); EarlyStoppingModelSaver saver = new InMemoryModelSaver<>(); @@ -336,15 +335,15 @@ public class TestEarlyStopping extends BaseDL4JTest { //test termination after max time Nd4j.getRandom().setSeed(12345); - MultiLayerConfiguration conf = new NeuralNetConfiguration.Builder().seed(12345) + NeuralNetConfiguration conf = NeuralNetConfiguration.builder().seed(12345) .optimizationAlgo(OptimizationAlgorithm.STOCHASTIC_GRADIENT_DESCENT) - .updater(new Sgd(1e-6)).weightInit(WeightInit.XAVIER).list() + .updater(new Sgd(1e-6)).weightInit(WeightInit.XAVIER) .layer(0, new OutputLayer.Builder().nIn(4).nOut(3) .activation(Activation.SOFTMAX) .lossFunction(LossFunctions.LossFunction.MCXENT).build()) .build(); MultiLayerNetwork net = new MultiLayerNetwork(conf); - net.setListeners(new ScoreIterationListener(1)); + net.addTrainingListeners(new ScoreIterationListener(1)); DataSetIterator irisIter = new IrisDataSetIterator(150, 150); @@ -379,15 +378,15 @@ public class TestEarlyStopping extends BaseDL4JTest { //Simulate this by setting LR = 0.0 Nd4j.getRandom().setSeed(12345); - MultiLayerConfiguration conf = new NeuralNetConfiguration.Builder().seed(12345) + NeuralNetConfiguration conf = NeuralNetConfiguration.builder().seed(12345) .optimizationAlgo(OptimizationAlgorithm.STOCHASTIC_GRADIENT_DESCENT) - .updater(new Sgd(0.0)).weightInit(WeightInit.XAVIER).list() + .updater(new Sgd(0.0)).weightInit(WeightInit.XAVIER) .layer(0, new OutputLayer.Builder().nIn(4).nOut(3) .activation(Activation.SOFTMAX) .lossFunction(LossFunctions.LossFunction.MCXENT).build()) .build(); MultiLayerNetwork net = new MultiLayerNetwork(conf); - net.setListeners(new ScoreIterationListener(1)); + net.addTrainingListeners(new ScoreIterationListener(1)); DataSetIterator irisIter = new IrisDataSetIterator(150, 150); @@ -419,9 +418,9 @@ public class TestEarlyStopping extends BaseDL4JTest { //Simulate this by setting LR = 0.0 Random rng = new Random(123); Nd4j.getRandom().setSeed(12345); - MultiLayerConfiguration conf = new NeuralNetConfiguration.Builder().seed(123) + NeuralNetConfiguration conf = NeuralNetConfiguration.builder().seed(123) .optimizationAlgo(OptimizationAlgorithm.STOCHASTIC_GRADIENT_DESCENT) - .updater(new Nesterovs(0.0,0.9)).list() + .updater(new Nesterovs(0.0,0.9)) .layer(0, new DenseLayer.Builder().nIn(1).nOut(20) .weightInit(WeightInit.XAVIER).activation( Activation.TANH) @@ -431,7 +430,7 @@ public class TestEarlyStopping extends BaseDL4JTest { .build()) .build(); MultiLayerNetwork net = new MultiLayerNetwork(conf); - net.setListeners(new ScoreIterationListener(1)); + net.addTrainingListeners(new ScoreIterationListener(1)); int nSamples = 100; //Generate the training data INDArray x = Nd4j.linspace(-10, 10, nSamples).reshape(nSamples, 1); @@ -466,15 +465,15 @@ public class TestEarlyStopping extends BaseDL4JTest { @Test public void testEarlyStoppingGetBestModel() { - MultiLayerConfiguration conf = new NeuralNetConfiguration.Builder() + NeuralNetConfiguration conf = NeuralNetConfiguration.builder() .optimizationAlgo(OptimizationAlgorithm.STOCHASTIC_GRADIENT_DESCENT) - .updater(new Sgd(0.001)).weightInit(WeightInit.XAVIER).list() + .updater(new Sgd(0.001)).weightInit(WeightInit.XAVIER) .layer(0, new OutputLayer.Builder().nIn(4).nOut(3) .activation(Activation.SOFTMAX) .lossFunction(LossFunctions.LossFunction.MCXENT).build()) .build(); MultiLayerNetwork net = new MultiLayerNetwork(conf); - net.setListeners(new ScoreIterationListener(1)); + net.addTrainingListeners(new ScoreIterationListener(1)); DataSetIterator irisIter = new IrisDataSetIterator(150, 150); MultipleEpochsIterator mIter = new MultipleEpochsIterator(10, irisIter); @@ -496,23 +495,23 @@ public class TestEarlyStopping extends BaseDL4JTest { MultiLayerNetwork mln = result.getBestModel(); assertEquals(net.getnLayers(), mln.getnLayers()); - assertEquals(net.conf().getOptimizationAlgo(), mln.conf().getOptimizationAlgo()); - BaseLayer bl = (BaseLayer) net.conf().getLayer(); - assertEquals(bl.getActivationFn().toString(), ((BaseLayer) mln.conf().getLayer()).getActivationFn().toString()); - assertEquals(bl.getIUpdater(), ((BaseLayer) mln.conf().getLayer()).getIUpdater()); + assertEquals(net.getNetConfiguration().getOptimizationAlgo(), mln.getNetConfiguration().getOptimizationAlgo()); + BaseLayerConfiguration bl = (BaseLayerConfiguration) net.getLayerConfiguration(); + assertEquals(bl.getActivationFn().toString(), ((BaseLayerConfiguration) mln.getLayerConfiguration()).getActivationFn().toString()); + assertEquals(bl.getIUpdater(), ((BaseLayerConfiguration) mln.getLayerConfiguration()).getIUpdater()); } @Test public void testListeners() { - MultiLayerConfiguration conf = new NeuralNetConfiguration.Builder() + NeuralNetConfiguration conf = NeuralNetConfiguration.builder() .optimizationAlgo(OptimizationAlgorithm.STOCHASTIC_GRADIENT_DESCENT) - .updater(new Sgd(0.001)).weightInit(WeightInit.XAVIER).list() + .updater(new Sgd(0.001)).weightInit(WeightInit.XAVIER) .layer(0, new OutputLayer.Builder().nIn(4).nOut(3) .activation(Activation.SOFTMAX) .lossFunction(LossFunctions.LossFunction.MCXENT).build()) .build(); MultiLayerNetwork net = new MultiLayerNetwork(conf); - net.setListeners(new ScoreIterationListener(1)); + net.addTrainingListeners(new ScoreIterationListener(1)); DataSetIterator irisIter = new IrisDataSetIterator(150, 150); EarlyStoppingModelSaver saver = new InMemoryModelSaver<>(); @@ -569,8 +568,8 @@ public class TestEarlyStopping extends BaseDL4JTest { Metric.MAE}) { log.info("Metric: " + metric); - MultiLayerConfiguration conf = new NeuralNetConfiguration.Builder() - .list() + NeuralNetConfiguration conf = NeuralNetConfiguration.builder() + .layer(new DenseLayer.Builder().nIn(784).nOut(32).build()) .layer(new OutputLayer.Builder().nIn(32).nOut(784).activation(Activation.SIGMOID).lossFunction(LossFunctions.LossFunction.MSE).build()) .build(); @@ -612,8 +611,8 @@ public class TestEarlyStopping extends BaseDL4JTest { Metric.MAE}) { log.info("Metric: " + metric); - MultiLayerConfiguration conf = new NeuralNetConfiguration.Builder() - .list() + NeuralNetConfiguration conf = NeuralNetConfiguration.builder() + .layer(new AutoEncoder.Builder().nIn(784).nOut(32).build()) .build(); @@ -655,8 +654,8 @@ public class TestEarlyStopping extends BaseDL4JTest { Metric.MAE}) { log.info("Metric: " + metric); - MultiLayerConfiguration conf = new NeuralNetConfiguration.Builder() - .list() + NeuralNetConfiguration conf = NeuralNetConfiguration.builder() + .layer(new VariationalAutoencoder.Builder() .nIn(784).nOut(32) .encoderLayerSizes(64) @@ -700,8 +699,8 @@ public class TestEarlyStopping extends BaseDL4JTest { for(boolean logProb : new boolean[]{false, true}) { - MultiLayerConfiguration conf = new NeuralNetConfiguration.Builder() - .list() + NeuralNetConfiguration conf = NeuralNetConfiguration.builder() + .layer(new VariationalAutoencoder.Builder() .nIn(784).nOut(32) .encoderLayerSizes(64) @@ -747,8 +746,8 @@ public class TestEarlyStopping extends BaseDL4JTest { for(Evaluation.Metric metric : Evaluation.Metric.values()) { log.info("Metric: " + metric); - MultiLayerConfiguration conf = new NeuralNetConfiguration.Builder() - .list() + NeuralNetConfiguration conf = NeuralNetConfiguration.builder() + .layer(new DenseLayer.Builder().nIn(784).nOut(32).build()) .layer(new OutputLayer.Builder().nIn(32).nOut(10).activation(Activation.SOFTMAX).build()) .build(); @@ -784,8 +783,8 @@ public class TestEarlyStopping extends BaseDL4JTest { @Test public void testEarlyStoppingListeners() { - MultiLayerConfiguration conf = new NeuralNetConfiguration.Builder() - .updater(new Sgd(0.001)).weightInit(WeightInit.XAVIER).list() + NeuralNetConfiguration conf = NeuralNetConfiguration.builder() + .updater(new Sgd(0.001)).weightInit(WeightInit.XAVIER) .layer(0, new OutputLayer.Builder().nIn(4).nOut(3) .activation(Activation.SOFTMAX) .lossFunction(LossFunctions.LossFunction.MCXENT).build()) @@ -793,7 +792,7 @@ public class TestEarlyStopping extends BaseDL4JTest { MultiLayerNetwork net = new MultiLayerNetwork(conf); TestListener tl = new TestListener(); - net.setListeners(tl); + net.addTrainingListeners(tl); DataSetIterator irisIter = new IrisDataSetIterator(50, 150); EarlyStoppingModelSaver saver = new InMemoryModelSaver<>(); @@ -827,19 +826,19 @@ public class TestEarlyStopping extends BaseDL4JTest { private int maxEpochEnd = -1; @Override - public void onEpochStart(Model model){ + public void onEpochStart(IModel model){ countEpochStart++; maxEpochStart = Math.max(maxEpochStart, BaseOptimizer.getEpochCount(model)); } @Override - public void onEpochEnd(Model model){ + public void onEpochEnd(IModel model){ countEpochEnd++; maxEpochEnd = Math.max(maxEpochEnd, BaseOptimizer.getEpochCount(model)); } @Override - public void iterationDone(Model model, int iteration, int epoch){ + public void iterationDone(IModel model, int iteration, int epoch){ iterCount++; } @@ -859,7 +858,7 @@ public class TestEarlyStopping extends BaseDL4JTest { DataSetIterator test = new SingletonDataSetIterator(ds); - MultiLayerConfiguration conf = new NeuralNetConfiguration.Builder() + NeuralNetConfiguration conf = NeuralNetConfiguration.builder() .seed(123) .weightInit(WeightInit.XAVIER) .updater(new Adam(0.1)) @@ -868,7 +867,7 @@ public class TestEarlyStopping extends BaseDL4JTest { .gradientNormalization(GradientNormalization .ClipElementWiseAbsoluteValue) .gradientNormalizationThreshold(1.0) - .list() + .layer(0, new LSTM.Builder() .nIn(10) .nOut(10) diff --git a/cavis-dnn/cavis-dnn-core/src/test/java/org/deeplearning4j/earlystopping/TestEarlyStoppingCompGraph.java b/cavis-dnn/cavis-dnn-core/src/test/java/org/deeplearning4j/earlystopping/TestEarlyStoppingCompGraph.java index 4209f8dd3..22b739f89 100644 --- a/cavis-dnn/cavis-dnn-core/src/test/java/org/deeplearning4j/earlystopping/TestEarlyStoppingCompGraph.java +++ b/cavis-dnn/cavis-dnn-core/src/test/java/org/deeplearning4j/earlystopping/TestEarlyStoppingCompGraph.java @@ -76,7 +76,7 @@ public class TestEarlyStoppingCompGraph extends BaseDL4JTest { @Test public void testEarlyStoppingIris() { - ComputationGraphConfiguration conf = new NeuralNetConfiguration.Builder() + ComputationGraphConfiguration conf = NeuralNetConfiguration.builder() .optimizationAlgo(OptimizationAlgorithm.STOCHASTIC_GRADIENT_DESCENT) .updater(new Sgd(0.001)).weightInit(WeightInit.XAVIER).graphBuilder().addInputs("in") .addLayer("0", new OutputLayer.Builder().nIn(4).nOut(3) @@ -84,7 +84,7 @@ public class TestEarlyStoppingCompGraph extends BaseDL4JTest { .lossFunction(LossFunctions.LossFunction.MCXENT).build(), "in") .setOutputs("0").build(); ComputationGraph net = new ComputationGraph(conf); - net.setListeners(new ScoreIterationListener(1)); + net.addTrainingListeners(new ScoreIterationListener(1)); DataSetIterator irisIter = new IrisDataSetIterator(150, 150); EarlyStoppingModelSaver saver = new InMemoryModelSaver<>(); @@ -120,7 +120,7 @@ public class TestEarlyStoppingCompGraph extends BaseDL4JTest { //Test poor tuning (high LR): should terminate on MaxScoreIterationTerminationCondition Nd4j.getRandom().setSeed(12345); - ComputationGraphConfiguration conf = new NeuralNetConfiguration.Builder().seed(12345) + ComputationGraphConfiguration conf = NeuralNetConfiguration.builder().seed(12345) .optimizationAlgo(OptimizationAlgorithm.STOCHASTIC_GRADIENT_DESCENT) .updater(new Sgd(5.0)) //Intentionally huge LR .weightInit(WeightInit.XAVIER).graphBuilder().addInputs("in") @@ -128,7 +128,7 @@ public class TestEarlyStoppingCompGraph extends BaseDL4JTest { .lossFunction(LossFunctions.LossFunction.MCXENT).build(), "in") .setOutputs("0").build(); ComputationGraph net = new ComputationGraph(conf); - net.setListeners(new ScoreIterationListener(1)); + net.addTrainingListeners(new ScoreIterationListener(1)); DataSetIterator irisIter = new IrisDataSetIterator(150, 150); EarlyStoppingModelSaver saver = new InMemoryModelSaver<>(); @@ -156,7 +156,7 @@ public class TestEarlyStoppingCompGraph extends BaseDL4JTest { //test termination after max time Nd4j.getRandom().setSeed(12345); - ComputationGraphConfiguration conf = new NeuralNetConfiguration.Builder().seed(12345) + ComputationGraphConfiguration conf = NeuralNetConfiguration.builder().seed(12345) .optimizationAlgo(OptimizationAlgorithm.STOCHASTIC_GRADIENT_DESCENT) .updater(new Sgd(1e-6)).weightInit(WeightInit.XAVIER).graphBuilder() .addInputs("in") @@ -165,7 +165,7 @@ public class TestEarlyStoppingCompGraph extends BaseDL4JTest { .lossFunction(LossFunctions.LossFunction.MCXENT).build(), "in") .setOutputs("0").build(); ComputationGraph net = new ComputationGraph(conf); - net.setListeners(new ScoreIterationListener(1)); + net.addTrainingListeners(new ScoreIterationListener(1)); DataSetIterator irisIter = new IrisDataSetIterator(150, 150); @@ -198,7 +198,7 @@ public class TestEarlyStoppingCompGraph extends BaseDL4JTest { //Simulate this by setting LR = 0.0 Nd4j.getRandom().setSeed(12345); - ComputationGraphConfiguration conf = new NeuralNetConfiguration.Builder().seed(12345) + ComputationGraphConfiguration conf = NeuralNetConfiguration.builder().seed(12345) .optimizationAlgo(OptimizationAlgorithm.STOCHASTIC_GRADIENT_DESCENT) .updater(new Sgd(0.0)).weightInit(WeightInit.XAVIER).graphBuilder() .addInputs("in") @@ -207,7 +207,7 @@ public class TestEarlyStoppingCompGraph extends BaseDL4JTest { .lossFunction(LossFunctions.LossFunction.MCXENT).build(), "in") .setOutputs("0").build(); ComputationGraph net = new ComputationGraph(conf); - net.setListeners(new ScoreIterationListener(1)); + net.addTrainingListeners(new ScoreIterationListener(1)); DataSetIterator irisIter = new IrisDataSetIterator(150, 150); @@ -233,7 +233,7 @@ public class TestEarlyStoppingCompGraph extends BaseDL4JTest { @Test public void testListeners() { - ComputationGraphConfiguration conf = new NeuralNetConfiguration.Builder() + ComputationGraphConfiguration conf = NeuralNetConfiguration.builder() .optimizationAlgo(OptimizationAlgorithm.STOCHASTIC_GRADIENT_DESCENT) .updater(new Sgd(0.001)).weightInit(WeightInit.XAVIER).graphBuilder().addInputs("in") .addLayer("0", new OutputLayer.Builder().nIn(4).nOut(3) @@ -241,7 +241,7 @@ public class TestEarlyStoppingCompGraph extends BaseDL4JTest { .lossFunction(LossFunctions.LossFunction.MCXENT).build(), "in") .setOutputs("0").build(); ComputationGraph net = new ComputationGraph(conf); - net.setListeners(new ScoreIterationListener(1)); + net.addTrainingListeners(new ScoreIterationListener(1)); DataSetIterator irisIter = new IrisDataSetIterator(150, 150); EarlyStoppingModelSaver saver = new InMemoryModelSaver<>(); @@ -297,7 +297,7 @@ public class TestEarlyStoppingCompGraph extends BaseDL4JTest { Metric.MAE}) { log.info("Metric: " + metric); - ComputationGraphConfiguration conf = new NeuralNetConfiguration.Builder() + ComputationGraphConfiguration conf = NeuralNetConfiguration.builder() .graphBuilder() .addInputs("in") .layer("0", new DenseLayer.Builder().nIn(784).nOut(32).build(), "in") @@ -343,7 +343,7 @@ public class TestEarlyStoppingCompGraph extends BaseDL4JTest { Metric.MAE}) { log.info("Metric: " + metric); - ComputationGraphConfiguration conf = new NeuralNetConfiguration.Builder() + ComputationGraphConfiguration conf = NeuralNetConfiguration.builder() .graphBuilder() .addInputs("in") .layer("0", new AutoEncoder.Builder().nIn(784).nOut(32).build(), "in") @@ -388,7 +388,7 @@ public class TestEarlyStoppingCompGraph extends BaseDL4JTest { Metric.MAE}) { log.info("Metric: " + metric); - ComputationGraphConfiguration conf = new NeuralNetConfiguration.Builder() + ComputationGraphConfiguration conf = NeuralNetConfiguration.builder() .graphBuilder() .addInputs("in") .layer("0", new VariationalAutoencoder.Builder() @@ -435,7 +435,7 @@ public class TestEarlyStoppingCompGraph extends BaseDL4JTest { for(boolean logProb : new boolean[]{false, true}) { - ComputationGraphConfiguration conf = new NeuralNetConfiguration.Builder() + ComputationGraphConfiguration conf = NeuralNetConfiguration.builder() .updater(new Adam(1e-5)) .graphBuilder() .addInputs("in") @@ -486,7 +486,7 @@ public class TestEarlyStoppingCompGraph extends BaseDL4JTest { for(Evaluation.Metric metric : Evaluation.Metric.values()) { log.info("Metric: " + metric); - ComputationGraphConfiguration conf = new NeuralNetConfiguration.Builder() + ComputationGraphConfiguration conf = NeuralNetConfiguration.builder() .graphBuilder() .addInputs("in") .layer("0", new DenseLayer.Builder().nIn(784).nOut(32).build(), "in") @@ -526,7 +526,7 @@ public class TestEarlyStoppingCompGraph extends BaseDL4JTest { @Test public void testEarlyStoppingListenersCG() { - ComputationGraphConfiguration conf = new NeuralNetConfiguration.Builder() + ComputationGraphConfiguration conf = NeuralNetConfiguration.builder() .updater(new Sgd(0.001)).weightInit(WeightInit.XAVIER) .graphBuilder() .addInputs("in") @@ -538,7 +538,7 @@ public class TestEarlyStoppingCompGraph extends BaseDL4JTest { ComputationGraph net = new ComputationGraph(conf); TestEarlyStopping.TestListener tl = new TestEarlyStopping.TestListener(); - net.setListeners(tl); + net.addTrainingListeners(tl); DataSetIterator irisIter = new IrisDataSetIterator(50, 150); EarlyStoppingModelSaver saver = new InMemoryModelSaver<>(); diff --git a/cavis-dnn/cavis-dnn-core/src/test/java/org/deeplearning4j/eval/EvalTest.java b/cavis-dnn/cavis-dnn-core/src/test/java/org/deeplearning4j/eval/EvalTest.java index 30cb1e5ca..04d6f440f 100644 --- a/cavis-dnn/cavis-dnn-core/src/test/java/org/deeplearning4j/eval/EvalTest.java +++ b/cavis-dnn/cavis-dnn-core/src/test/java/org/deeplearning4j/eval/EvalTest.java @@ -69,7 +69,7 @@ public class EvalTest extends BaseDL4JTest { public void testIris() { // Network config - MultiLayerConfiguration conf = new NeuralNetConfiguration.Builder() + NeuralNetConfiguration conf = NeuralNetConfiguration.builder() .optimizationAlgo(OptimizationAlgorithm.LINE_GRADIENT_DESCENT).seed(42) .updater(new Sgd(1e-6)).list() @@ -84,7 +84,7 @@ public class EvalTest extends BaseDL4JTest { // Instantiate model MultiLayerNetwork model = new MultiLayerNetwork(conf); model.init(); - model.addListeners(new ScoreIterationListener(1)); + model.addTrainingListeners(new ScoreIterationListener(1)); // Train-test split DataSetIterator iter = new IrisDataSetIterator(150, 150); @@ -177,7 +177,7 @@ public class EvalTest extends BaseDL4JTest { rrdsi.reset(); Nd4j.getRandom().setSeed(12345); - MultiLayerConfiguration conf = new NeuralNetConfiguration.Builder().seed(12345) + NeuralNetConfiguration conf = NeuralNetConfiguration.builder().seed(12345) .optimizationAlgo(OptimizationAlgorithm.STOCHASTIC_GRADIENT_DESCENT).updater(new Sgd(0.1)) .list() .layer(0, new OutputLayer.Builder(LossFunctions.LossFunction.MCXENT) @@ -295,7 +295,7 @@ public class EvalTest extends BaseDL4JTest { int tbpttLength = 10; int tsLength = 5 * tbpttLength + tbpttLength / 2; - MultiLayerConfiguration conf1 = new NeuralNetConfiguration.Builder() + NeuralNetConfiguration conf1 = NeuralNetConfiguration.builder() .seed(12345) .trainingWorkspaceMode(ws) .inferenceWorkspaceMode(ws) @@ -306,7 +306,7 @@ public class EvalTest extends BaseDL4JTest { .build()) .build(); - MultiLayerConfiguration conf2 = new NeuralNetConfiguration.Builder() + NeuralNetConfiguration conf2 = NeuralNetConfiguration.builder() .seed(12345) .trainingWorkspaceMode(ws) .inferenceWorkspaceMode(ws) @@ -314,7 +314,7 @@ public class EvalTest extends BaseDL4JTest { .layer(new LSTM.Builder().nIn(nIn).nOut(layerSize).build()) .layer(new RnnOutputLayer.Builder().nIn(layerSize).nOut(nOut) .activation(Activation.SOFTMAX).build()) - .tBPTTLength(10) + .tbpttFwdLength(10).tbpttBackLength(10) .backpropType(BackpropType.TruncatedBPTT) .build(); @@ -324,7 +324,7 @@ public class EvalTest extends BaseDL4JTest { MultiLayerNetwork net2 = new MultiLayerNetwork(conf2); net2.init(); - net2.setParams(net1.params()); + net2.setParams(net1.getModelParams()); for(boolean useMask : new boolean[]{false, true}) { @@ -371,7 +371,7 @@ public class EvalTest extends BaseDL4JTest { int tbpttLength = 10; int tsLength = 5 * tbpttLength + tbpttLength / 2; - ComputationGraphConfiguration conf1 = new NeuralNetConfiguration.Builder() + ComputationGraphConfiguration conf1 = NeuralNetConfiguration.builder() .seed(12345) .trainingWorkspaceMode(ws) .inferenceWorkspaceMode(ws) @@ -384,7 +384,7 @@ public class EvalTest extends BaseDL4JTest { .setOutputs("1") .build(); - ComputationGraphConfiguration conf2 = new NeuralNetConfiguration.Builder() + ComputationGraphConfiguration conf2 = NeuralNetConfiguration.builder() .seed(12345) .trainingWorkspaceMode(ws) .inferenceWorkspaceMode(ws) @@ -405,7 +405,7 @@ public class EvalTest extends BaseDL4JTest { ComputationGraph net2 = new ComputationGraph(conf2); net2.init(); - net2.setParams(net1.params()); + net2.setParams(net1.getModelParams()); for (boolean useMask : new boolean[]{false, true}) { @@ -455,12 +455,12 @@ public class EvalTest extends BaseDL4JTest { DataSetIterator testData = new SequenceRecordReaderDataSetIterator(fsr, lsr, 1, -1, true, SequenceRecordReaderDataSetIterator.AlignmentMode.ALIGN_END); - MultiLayerConfiguration conf = new NeuralNetConfiguration.Builder().seed(123) + NeuralNetConfiguration conf = NeuralNetConfiguration.builder().seed(123) .list() .layer(0, new LSTM.Builder().activation(Activation.TANH).nIn(3).nOut(3).build()) .layer(1, new RnnOutputLayer.Builder().activation(Activation.SIGMOID).lossFunction(LossFunctions.LossFunction.XENT) .nIn(3).nOut(1).build()) - .backpropType(BackpropType.TruncatedBPTT).tBPTTForwardLength(10).tBPTTBackwardLength(10) + .backpropType(BackpropType.TruncatedBPTT).tbpttFwdLength(10).tbpttBackLength(10) .build(); MultiLayerNetwork net = new MultiLayerNetwork(conf); net.init(); @@ -473,7 +473,7 @@ public class EvalTest extends BaseDL4JTest { //Sanity check: https://github.com/eclipse/deeplearning4j/issues/5351 // Network config - MultiLayerConfiguration conf = new NeuralNetConfiguration.Builder() + NeuralNetConfiguration conf = NeuralNetConfiguration.builder() .optimizationAlgo(OptimizationAlgorithm.LINE_GRADIENT_DESCENT).seed(42) .updater(new Sgd(1e-6)).list() @@ -492,7 +492,7 @@ public class EvalTest extends BaseDL4JTest { DataSetIterator iter = new IrisDataSetIterator(30, 150); DataSetIterator iterTest = new IrisDataSetIterator(30, 150); - net.setListeners(new EvaluativeListener(iterTest, 3)); + net.addTrainingListeners(new EvaluativeListener(iterTest, 3)); for( int i=0; i<3; i++ ){ net.fit(iter); @@ -503,7 +503,7 @@ public class EvalTest extends BaseDL4JTest { public void testMultiOutputEvalSimple(){ Nd4j.getRandom().setSeed(12345); - ComputationGraphConfiguration conf = new NeuralNetConfiguration.Builder() + ComputationGraphConfiguration conf = NeuralNetConfiguration.builder() .seed(12345) .graphBuilder() .addInputs("in") @@ -538,7 +538,7 @@ public class EvalTest extends BaseDL4JTest { public void testMultiOutputEvalCG(){ //Simple sanity check on evaluation - ComputationGraphConfiguration conf = new NeuralNetConfiguration.Builder() + ComputationGraphConfiguration conf = NeuralNetConfiguration.builder() .graphBuilder() .addInputs("in") .layer("0", new EmbeddingSequenceLayer.Builder().nIn(10).nOut(10).build(), "in") @@ -566,7 +566,7 @@ public class EvalTest extends BaseDL4JTest { @Test public void testInvalidEvaluation(){ - MultiLayerConfiguration conf = new NeuralNetConfiguration.Builder() + NeuralNetConfiguration conf = NeuralNetConfiguration.builder() .list() .layer(new DenseLayer.Builder().nIn(4).nOut(10).build()) @@ -622,11 +622,11 @@ public class EvalTest extends BaseDL4JTest { //Disable validation, and check same thing: - net.getLayerWiseConfigurations().setValidateOutputLayerConfig(false); + net.getNetConfiguration().setValidateOutputLayerConfig(false); net.evaluate(iter); net.evaluateROCMultiClass(iter, 0); - cg.getConfiguration().setValidateOutputLayerConfig(false); + cg.getComputationGraphConfiguration().setValidateOutputLayerConfig(false); cg.evaluate(iter); cg.evaluateROCMultiClass(iter, 0); } diff --git a/cavis-dnn/cavis-dnn-core/src/test/java/org/deeplearning4j/eval/EvaluationToolsTests.java b/cavis-dnn/cavis-dnn-core/src/test/java/org/deeplearning4j/eval/EvaluationToolsTests.java index 70271cd95..7cf2431f9 100644 --- a/cavis-dnn/cavis-dnn-core/src/test/java/org/deeplearning4j/eval/EvaluationToolsTests.java +++ b/cavis-dnn/cavis-dnn-core/src/test/java/org/deeplearning4j/eval/EvaluationToolsTests.java @@ -23,7 +23,6 @@ package org.deeplearning4j.eval; import org.deeplearning4j.BaseDL4JTest; import org.deeplearning4j.datasets.iterator.impl.IrisDataSetIterator; import org.deeplearning4j.core.evaluation.EvaluationTools; -import org.deeplearning4j.nn.conf.MultiLayerConfiguration; import org.deeplearning4j.nn.conf.NeuralNetConfiguration; import org.deeplearning4j.nn.conf.layers.DenseLayer; import org.deeplearning4j.nn.conf.layers.OutputLayer; @@ -48,7 +47,7 @@ public class EvaluationToolsTests extends BaseDL4JTest { DataSetIterator iter = new IrisDataSetIterator(150, 150); - MultiLayerConfiguration conf = new NeuralNetConfiguration.Builder().weightInit(WeightInit.XAVIER).list() + NeuralNetConfiguration conf = NeuralNetConfiguration.builder().weightInit(WeightInit.XAVIER).list() .layer(0, new DenseLayer.Builder().nIn(4).nOut(4).activation(Activation.TANH).build()).layer(1, new OutputLayer.Builder().nIn(4).nOut(2).activation(Activation.SOFTMAX) .lossFunction(LossFunctions.LossFunction.MCXENT).build()) @@ -82,7 +81,7 @@ public class EvaluationToolsTests extends BaseDL4JTest { String str = EvaluationTools.rocChartToHtml(roc); - // System.out.println(str); + System.out.println(str); } } @@ -90,7 +89,7 @@ public class EvaluationToolsTests extends BaseDL4JTest { public void testRocMultiToHtml() throws Exception { DataSetIterator iter = new IrisDataSetIterator(150, 150); - MultiLayerConfiguration conf = new NeuralNetConfiguration.Builder().weightInit(WeightInit.XAVIER).list() + NeuralNetConfiguration conf = NeuralNetConfiguration.builder().weightInit(WeightInit.XAVIER).list() .layer(0, new DenseLayer.Builder().nIn(4).nOut(4).activation(Activation.TANH).build()).layer(1, new OutputLayer.Builder().nIn(4).nOut(3).activation(Activation.SOFTMAX) .lossFunction(LossFunctions.LossFunction.MCXENT).build()) diff --git a/cavis-dnn/cavis-dnn-core/src/test/java/org/deeplearning4j/eval/ROCTest.java b/cavis-dnn/cavis-dnn-core/src/test/java/org/deeplearning4j/eval/ROCTest.java index 5684a76d6..ca3ad1b54 100644 --- a/cavis-dnn/cavis-dnn-core/src/test/java/org/deeplearning4j/eval/ROCTest.java +++ b/cavis-dnn/cavis-dnn-core/src/test/java/org/deeplearning4j/eval/ROCTest.java @@ -22,23 +22,19 @@ package org.deeplearning4j.eval; import org.deeplearning4j.BaseDL4JTest; import org.deeplearning4j.datasets.iterator.impl.IrisDataSetIterator; -import org.deeplearning4j.nn.conf.MultiLayerConfiguration; import org.deeplearning4j.nn.conf.NeuralNetConfiguration; import org.deeplearning4j.nn.conf.layers.DenseLayer; import org.deeplearning4j.nn.conf.layers.OutputLayer; import org.deeplearning4j.nn.multilayer.MultiLayerNetwork; import org.deeplearning4j.nn.weights.WeightInit; import org.junit.jupiter.api.Test; -import org.nd4j.evaluation.curves.PrecisionRecallCurve; import org.nd4j.evaluation.curves.RocCurve; import org.nd4j.linalg.activations.Activation; import org.nd4j.linalg.api.ndarray.INDArray; -import org.nd4j.linalg.api.ops.random.impl.BernoulliDistribution; import org.nd4j.linalg.dataset.api.DataSet; import org.nd4j.linalg.dataset.api.iterator.DataSetIterator; import org.nd4j.linalg.dataset.api.preprocessor.NormalizerStandardize; import org.nd4j.linalg.factory.Nd4j; -import org.nd4j.linalg.indexing.NDArrayIndex; import org.nd4j.linalg.lossfunctions.LossFunctions; import java.util.*; @@ -86,7 +82,7 @@ public class ROCTest extends BaseDL4JTest { DataSetIterator iter = new IrisDataSetIterator(150, 150); Nd4j.getRandom().setSeed(12345); - MultiLayerConfiguration conf = new NeuralNetConfiguration.Builder().weightInit(WeightInit.XAVIER).seed(12345) + NeuralNetConfiguration conf = NeuralNetConfiguration.builder().weightInit(WeightInit.XAVIER).seed(12345) .list() .layer(0, new DenseLayer.Builder().nIn(4).nOut(4).activation(Activation.TANH).build()).layer(1, new OutputLayer.Builder().nIn(4).nOut(3).activation(Activation.SOFTMAX) diff --git a/cavis-dnn/cavis-dnn-core/src/test/java/org/deeplearning4j/eval/RegressionEvalTest.java b/cavis-dnn/cavis-dnn-core/src/test/java/org/deeplearning4j/eval/RegressionEvalTest.java index b5e2b994e..92991d1cc 100644 --- a/cavis-dnn/cavis-dnn-core/src/test/java/org/deeplearning4j/eval/RegressionEvalTest.java +++ b/cavis-dnn/cavis-dnn-core/src/test/java/org/deeplearning4j/eval/RegressionEvalTest.java @@ -23,7 +23,6 @@ package org.deeplearning4j.eval; import org.deeplearning4j.BaseDL4JTest; import org.deeplearning4j.datasets.iterator.ExistingDataSetIterator; import org.deeplearning4j.nn.conf.ComputationGraphConfiguration; -import org.deeplearning4j.nn.conf.MultiLayerConfiguration; import org.deeplearning4j.nn.conf.NeuralNetConfiguration; import org.deeplearning4j.nn.conf.layers.OutputLayer; import org.deeplearning4j.nn.graph.ComputationGraph; @@ -49,7 +48,7 @@ public class RegressionEvalTest extends BaseDL4JTest { public void testRegressionEvalMethods() { //Basic sanity check - MultiLayerConfiguration conf = new NeuralNetConfiguration.Builder().weightInit(WeightInit.ZERO).list() + NeuralNetConfiguration conf = NeuralNetConfiguration.builder().weightInit(WeightInit.ZERO).list() .layer(0, new OutputLayer.Builder().activation(Activation.TANH) .lossFunction(LossFunctions.LossFunction.MSE).nIn(10).nOut(5).build()) .build(); @@ -71,7 +70,7 @@ public class RegressionEvalTest extends BaseDL4JTest { ComputationGraphConfiguration graphConf = - new NeuralNetConfiguration.Builder().weightInit(WeightInit.ZERO).graphBuilder() + NeuralNetConfiguration.builder().weightInit(WeightInit.ZERO).graphBuilder() .addInputs("in").addLayer("0", new OutputLayer.Builder() .lossFunction(LossFunctions.LossFunction.MSE) .activation(Activation.TANH).nIn(10).nOut(5).build(), "in") diff --git a/cavis-dnn/cavis-dnn-core/src/test/java/org/deeplearning4j/exceptions/TestInvalidConfigurations.java b/cavis-dnn/cavis-dnn-core/src/test/java/org/deeplearning4j/exceptions/TestInvalidConfigurations.java index 0a09599bb..be9568f89 100644 --- a/cavis-dnn/cavis-dnn-core/src/test/java/org/deeplearning4j/exceptions/TestInvalidConfigurations.java +++ b/cavis-dnn/cavis-dnn-core/src/test/java/org/deeplearning4j/exceptions/TestInvalidConfigurations.java @@ -24,7 +24,6 @@ import lombok.extern.slf4j.Slf4j; import org.deeplearning4j.BaseDL4JTest; import org.deeplearning4j.exception.DL4JException; import org.deeplearning4j.nn.conf.ConvolutionMode; -import org.deeplearning4j.nn.conf.MultiLayerConfiguration; import org.deeplearning4j.nn.conf.NeuralNetConfiguration; import org.deeplearning4j.nn.conf.inputs.InputType; import org.deeplearning4j.nn.conf.layers.*; @@ -41,7 +40,7 @@ import static org.junit.jupiter.api.Assertions.fail; public class TestInvalidConfigurations extends BaseDL4JTest { public static MultiLayerNetwork getDensePlusOutput(int nIn, int nOut) { - MultiLayerConfiguration conf = new NeuralNetConfiguration.Builder().list() + NeuralNetConfiguration conf = NeuralNetConfiguration.builder().list() .layer(0, new DenseLayer.Builder().nIn(nIn).nOut(10).build()) .layer(1, new OutputLayer.Builder().nIn(10).nOut(nOut).build()).build(); @@ -52,7 +51,7 @@ public class TestInvalidConfigurations extends BaseDL4JTest { } public static MultiLayerNetwork getLSTMPlusRnnOutput(int nIn, int nOut) { - MultiLayerConfiguration conf = new NeuralNetConfiguration.Builder().list() + NeuralNetConfiguration conf = NeuralNetConfiguration.builder().list() .layer(0, new GravesLSTM.Builder().nIn(nIn).nOut(10).build()) .layer(1, new RnnOutputLayer.Builder().nIn(10).nOut(nOut).build()).build(); @@ -63,10 +62,10 @@ public class TestInvalidConfigurations extends BaseDL4JTest { } public static MultiLayerNetwork getCnnPlusOutputLayer(int depthIn, int inH, int inW, int nOut) { - MultiLayerConfiguration conf = new NeuralNetConfiguration.Builder().list() + NeuralNetConfiguration conf = NeuralNetConfiguration.builder().list() .layer(0, new ConvolutionLayer.Builder().nIn(depthIn).nOut(5).build()) .layer(1, new OutputLayer.Builder().nOut(nOut).build()) - .setInputType(InputType.convolutional(inH, inW, depthIn)).build(); + .inputType(InputType.convolutional(inH, inW, depthIn)).build(); MultiLayerNetwork net = new MultiLayerNetwork(conf); net.init(); @@ -90,7 +89,7 @@ public class TestInvalidConfigurations extends BaseDL4JTest { @Test public void testDenseNout0() { try { - MultiLayerConfiguration conf = new NeuralNetConfiguration.Builder().list() + NeuralNetConfiguration conf = NeuralNetConfiguration.builder().list() .layer(0, new DenseLayer.Builder().nIn(10).nOut(0).build()) .layer(1, new OutputLayer.Builder().nIn(10).nOut(10).build()).build(); @@ -147,7 +146,7 @@ public class TestInvalidConfigurations extends BaseDL4JTest { @Test public void testLSTMNOut0() { try { - MultiLayerConfiguration conf = new NeuralNetConfiguration.Builder().list() + NeuralNetConfiguration conf = NeuralNetConfiguration.builder().list() .layer(0, new GravesLSTM.Builder().nIn(10).nOut(0).build()) .layer(1, new RnnOutputLayer.Builder().nIn(10).nOut(10).build()).build(); @@ -178,10 +177,10 @@ public class TestInvalidConfigurations extends BaseDL4JTest { @Test public void testConvolutionalNOut0() { try { - MultiLayerConfiguration conf = new NeuralNetConfiguration.Builder().list() + NeuralNetConfiguration conf = NeuralNetConfiguration.builder().list() .layer(0, new ConvolutionLayer.Builder().nIn(5).nOut(0).build()) .layer(1, new OutputLayer.Builder().nOut(10).build()) - .setInputType(InputType.convolutional(10, 10, 5)).build(); + .inputType(InputType.convolutional(10, 10, 5)).build(); MultiLayerNetwork net = new MultiLayerNetwork(conf); net.init(); @@ -207,12 +206,12 @@ public class TestInvalidConfigurations extends BaseDL4JTest { //(10-3+2*0)/2+1 = 7/2 + 1 try { - MultiLayerConfiguration conf = new NeuralNetConfiguration.Builder().convolutionMode(ConvolutionMode.Strict) + NeuralNetConfiguration conf = NeuralNetConfiguration.builder().convolutionMode(ConvolutionMode.Strict) .list() .layer(0, new ConvolutionLayer.Builder().kernelSize(3, 2).stride(2, 2).padding(0, 0).nOut(5) .build()) .layer(1, new OutputLayer.Builder().nOut(10).build()) - .setInputType(InputType.convolutional(hIn, wIn, depthIn)).build(); + .inputType(InputType.convolutional(hIn, wIn, depthIn)).build(); MultiLayerNetwork net = new MultiLayerNetwork(conf); net.init(); @@ -234,11 +233,11 @@ public class TestInvalidConfigurations extends BaseDL4JTest { int hIn = 10; int wIn = 10; - MultiLayerConfiguration conf = new NeuralNetConfiguration.Builder().list() + NeuralNetConfiguration conf = NeuralNetConfiguration.builder().list() .layer(0, new ConvolutionLayer.Builder().kernelSize(7, 7).stride(1, 1).padding(0, 0).nOut(5) .build()) .layer(1, new OutputLayer.Builder().nOut(10).activation(Activation.SOFTMAX).build()) - .setInputType(InputType.convolutional(hIn, wIn, depthIn)).build(); + .inputType(InputType.convolutional(hIn, wIn, depthIn)).build(); MultiLayerNetwork net = new MultiLayerNetwork(conf); net.init(); @@ -265,8 +264,8 @@ public class TestInvalidConfigurations extends BaseDL4JTest { //Invalid: (10-3+0)/2+1 = 4.5 - MultiLayerConfiguration conf = - new NeuralNetConfiguration.Builder().convolutionMode(ConvolutionMode.Strict).list() + NeuralNetConfiguration conf = + NeuralNetConfiguration.builder().convolutionMode(ConvolutionMode.Strict).list() .layer(0, new ConvolutionLayer.Builder().kernelSize(3, 3).stride(2, 2) .padding(0, 0).nIn(depthIn).nOut(5).build()) .layer(1, new OutputLayer.Builder().nIn(5 * 4 * 4).nOut(10).activation(Activation.SOFTMAX).build()) @@ -299,22 +298,22 @@ public class TestInvalidConfigurations extends BaseDL4JTest { //(10-3+2*0)/2+1 = 7/2 + 1 try { - MultiLayerConfiguration conf = new NeuralNetConfiguration.Builder().list() + NeuralNetConfiguration conf = NeuralNetConfiguration.builder().list() .layer(0, new ConvolutionLayer.Builder().kernelSize(2, 3).stride(2, 2).padding(0, 0).nOut(5) .build()) .layer(1, new OutputLayer.Builder().nOut(10).activation(Activation.SOFTMAX).build()) - .setInputType(InputType.convolutional(hIn, wIn, depthIn)).build(); + .inputType(InputType.convolutional(hIn, wIn, depthIn)).build(); } catch (Exception e) { fail("Did not expect exception with default (truncate)"); } try { - MultiLayerConfiguration conf = new NeuralNetConfiguration.Builder().convolutionMode(ConvolutionMode.Strict) + NeuralNetConfiguration conf = NeuralNetConfiguration.builder().convolutionMode(ConvolutionMode.Strict) .list() .layer(0, new ConvolutionLayer.Builder().kernelSize(2, 3).stride(2, 2).padding(0, 0).nOut(5) .build()) .layer(1, new OutputLayer.Builder().nOut(10).build()) - .setInputType(InputType.convolutional(hIn, wIn, depthIn)).build(); + .inputType(InputType.convolutional(hIn, wIn, depthIn)).build(); MultiLayerNetwork net = new MultiLayerNetwork(conf); net.init(); @@ -338,12 +337,12 @@ public class TestInvalidConfigurations extends BaseDL4JTest { //(10-3+2*0)/2+1 = 7/2 + 1 try { - MultiLayerConfiguration conf = new NeuralNetConfiguration.Builder().convolutionMode(ConvolutionMode.Strict) + NeuralNetConfiguration conf = NeuralNetConfiguration.builder().convolutionMode(ConvolutionMode.Strict) .list() .layer(0, new SubsamplingLayer.Builder().kernelSize(2, 3).stride(2, 2).padding(0, 0) .build()) .layer(1, new OutputLayer.Builder().nOut(10).build()) - .setInputType(InputType.convolutional(hIn, wIn, depthIn)).build(); + .inputType(InputType.convolutional(hIn, wIn, depthIn)).build(); MultiLayerNetwork net = new MultiLayerNetwork(conf); net.init(); diff --git a/cavis-dnn/cavis-dnn-core/src/test/java/org/deeplearning4j/exceptions/TestInvalidInput.java b/cavis-dnn/cavis-dnn-core/src/test/java/org/deeplearning4j/exceptions/TestInvalidInput.java index 7d958355a..4e35f44eb 100644 --- a/cavis-dnn/cavis-dnn-core/src/test/java/org/deeplearning4j/exceptions/TestInvalidInput.java +++ b/cavis-dnn/cavis-dnn-core/src/test/java/org/deeplearning4j/exceptions/TestInvalidInput.java @@ -23,7 +23,6 @@ package org.deeplearning4j.exceptions; import lombok.extern.slf4j.Slf4j; import org.deeplearning4j.BaseDL4JTest; import org.deeplearning4j.exception.DL4JException; -import org.deeplearning4j.nn.conf.MultiLayerConfiguration; import org.deeplearning4j.nn.conf.NeuralNetConfiguration; import org.deeplearning4j.nn.conf.inputs.InputType; import org.deeplearning4j.nn.conf.layers.*; @@ -43,7 +42,7 @@ public class TestInvalidInput extends BaseDL4JTest { @Test public void testInputNinMismatchDense() { - MultiLayerConfiguration conf = new NeuralNetConfiguration.Builder().list() + NeuralNetConfiguration conf = NeuralNetConfiguration.builder().list() .layer(0, new DenseLayer.Builder().nIn(10).nOut(10).build()) .layer(1, new OutputLayer.Builder().nIn(10).nOut(10).activation(Activation.SOFTMAX).build()).build(); @@ -64,7 +63,7 @@ public class TestInvalidInput extends BaseDL4JTest { @Test public void testLabelsNOutMismatchOutputLayer() { - MultiLayerConfiguration conf = new NeuralNetConfiguration.Builder().list() + NeuralNetConfiguration conf = NeuralNetConfiguration.builder().list() .layer(0, new DenseLayer.Builder().nIn(10).nOut(10).build()) .layer(1, new OutputLayer.Builder().nIn(10).nOut(10).activation(Activation.SOFTMAX).build()).build(); @@ -85,7 +84,7 @@ public class TestInvalidInput extends BaseDL4JTest { @Test public void testLabelsNOutMismatchRnnOutputLayer() { - MultiLayerConfiguration conf = new NeuralNetConfiguration.Builder().list() + NeuralNetConfiguration conf = NeuralNetConfiguration.builder().list() .layer(0, new LSTM.Builder().nIn(5).nOut(5).build()) .layer(1, new RnnOutputLayer.Builder().nIn(5).nOut(5).activation(Activation.SOFTMAX).build()).build(); @@ -112,10 +111,10 @@ public class TestInvalidInput extends BaseDL4JTest { int w = 16; int d = 3; - MultiLayerConfiguration conf = new NeuralNetConfiguration.Builder().list() + NeuralNetConfiguration conf = NeuralNetConfiguration.builder().list() .layer(0, new ConvolutionLayer.Builder().nIn(d).nOut(5).build()) .layer(1, new OutputLayer.Builder().nOut(10).activation(Activation.SOFTMAX).build()) - .setInputType(InputType.convolutional(h, w, d)).build(); + .inputType(InputType.convolutional(h, w, d)).build(); MultiLayerNetwork net = new MultiLayerNetwork(conf); net.init(); @@ -139,10 +138,10 @@ public class TestInvalidInput extends BaseDL4JTest { int w = 16; int d = 3; - MultiLayerConfiguration conf = new NeuralNetConfiguration.Builder().list() + NeuralNetConfiguration conf = NeuralNetConfiguration.builder().list() .layer(0, new ConvolutionLayer.Builder().nIn(d).nOut(5).build()) .layer(1, new OutputLayer.Builder().nOut(10).activation(Activation.SOFTMAX).build()) - .setInputType(InputType.convolutional(h, w, d)).build(); + .inputType(InputType.convolutional(h, w, d)).build(); MultiLayerNetwork net = new MultiLayerNetwork(conf); net.init(); @@ -165,10 +164,10 @@ public class TestInvalidInput extends BaseDL4JTest { int w = 16; int d = 3; - MultiLayerConfiguration conf = new NeuralNetConfiguration.Builder().list() + NeuralNetConfiguration conf = NeuralNetConfiguration.builder().list() .layer(0, new SubsamplingLayer.Builder().kernelSize(2, 2).build()) .layer(1, new OutputLayer.Builder().nOut(10).activation(Activation.SOFTMAX).build()) - .setInputType(InputType.convolutional(h, w, d)).build(); + .inputType(InputType.convolutional(h, w, d)).build(); MultiLayerNetwork net = new MultiLayerNetwork(conf); net.init(); @@ -188,7 +187,7 @@ public class TestInvalidInput extends BaseDL4JTest { @Test public void testInputNinMismatchLSTM() { - MultiLayerConfiguration conf = new NeuralNetConfiguration.Builder().list() + NeuralNetConfiguration conf = NeuralNetConfiguration.builder().list() .layer(0, new GravesLSTM.Builder().nIn(5).nOut(5).build()) .layer(1, new RnnOutputLayer.Builder().nIn(5).nOut(5).activation(Activation.SOFTMAX).build()).build(); @@ -209,7 +208,7 @@ public class TestInvalidInput extends BaseDL4JTest { @Test public void testInputNinMismatchBidirectionalLSTM() { - MultiLayerConfiguration conf = new NeuralNetConfiguration.Builder().list() + NeuralNetConfiguration conf = NeuralNetConfiguration.builder().list() .layer(0, new GravesBidirectionalLSTM.Builder().nIn(5).nOut(5).build()) .layer(1, new RnnOutputLayer.Builder().nIn(5).nOut(5).activation(Activation.SOFTMAX).build()).build(); @@ -231,7 +230,7 @@ public class TestInvalidInput extends BaseDL4JTest { @Test public void testInputNinMismatchEmbeddingLayer() { - MultiLayerConfiguration conf = new NeuralNetConfiguration.Builder().list() + NeuralNetConfiguration conf = NeuralNetConfiguration.builder().list() .layer(0, new EmbeddingLayer.Builder().nIn(10).nOut(10).build()) .layer(1, new OutputLayer.Builder().nIn(10).nOut(10).activation(Activation.SOFTMAX).build()).build(); @@ -257,7 +256,7 @@ public class TestInvalidInput extends BaseDL4JTest { for(String layerType : new String[]{"simple", "lstm", "graves"}) { - Layer l; + LayerConfiguration l; switch (layerType){ case "simple": l = new SimpleRnn.Builder().nIn(5).nOut(5).build(); @@ -272,7 +271,7 @@ public class TestInvalidInput extends BaseDL4JTest { throw new RuntimeException(); } - MultiLayerConfiguration conf = new NeuralNetConfiguration.Builder().list() + NeuralNetConfiguration conf = NeuralNetConfiguration.builder().list() .layer(l) .layer(new RnnOutputLayer.Builder().nIn(5).nOut(5).activation(Activation.SOFTMAX).build()).build(); diff --git a/cavis-dnn/cavis-dnn-core/src/test/java/org/deeplearning4j/gradientcheck/AttentionLayerTest.java b/cavis-dnn/cavis-dnn-core/src/test/java/org/deeplearning4j/gradientcheck/AttentionLayerTest.java index e375aa180..b83cc07c4 100644 --- a/cavis-dnn/cavis-dnn-core/src/test/java/org/deeplearning4j/gradientcheck/AttentionLayerTest.java +++ b/cavis-dnn/cavis-dnn-core/src/test/java/org/deeplearning4j/gradientcheck/AttentionLayerTest.java @@ -23,7 +23,6 @@ package org.deeplearning4j.gradientcheck; import org.deeplearning4j.BaseDL4JTest; import org.deeplearning4j.TestUtils; import org.deeplearning4j.nn.conf.ComputationGraphConfiguration; -import org.deeplearning4j.nn.conf.MultiLayerConfiguration; import org.deeplearning4j.nn.conf.NeuralNetConfiguration; import org.deeplearning4j.nn.conf.graph.AttentionVertex; import org.deeplearning4j.nn.conf.inputs.InputType; @@ -84,7 +83,7 @@ public class AttentionLayerTest extends BaseDL4JTest { System.out.println("Starting test: " + name); - MultiLayerConfiguration conf = new NeuralNetConfiguration.Builder() + NeuralNetConfiguration conf = NeuralNetConfiguration.builder() .dataType(DataType.DOUBLE) .activation(Activation.TANH) .updater(new NoOp()) @@ -98,7 +97,7 @@ public class AttentionLayerTest extends BaseDL4JTest { .layer(new GlobalPoolingLayer.Builder().poolingType(PoolingType.MAX).build()) .layer(new OutputLayer.Builder().nOut(nOut).activation(Activation.SOFTMAX) .lossFunction(LossFunctions.LossFunction.MCXENT).build()) - .setInputType(InputType.recurrent(nIn)) + .inputType(InputType.recurrent(nIn)) .build(); MultiLayerNetwork net = new MultiLayerNetwork(conf); @@ -145,7 +144,7 @@ public class AttentionLayerTest extends BaseDL4JTest { System.out.println("Starting test: " + name); - MultiLayerConfiguration conf = new NeuralNetConfiguration.Builder() + NeuralNetConfiguration conf = NeuralNetConfiguration.builder() .dataType(DataType.DOUBLE) .activation(Activation.TANH) .updater(new NoOp()) @@ -159,7 +158,7 @@ public class AttentionLayerTest extends BaseDL4JTest { .layer(new GlobalPoolingLayer.Builder().poolingType(PoolingType.MAX).build()) .layer(new OutputLayer.Builder().nOut(nOut).activation(Activation.SOFTMAX) .lossFunction(LossFunctions.LossFunction.MCXENT).build()) - .setInputType(InputType.recurrent(nIn)) + .inputType(InputType.recurrent(nIn)) .build(); MultiLayerNetwork net = new MultiLayerNetwork(conf); @@ -185,7 +184,7 @@ public class AttentionLayerTest extends BaseDL4JTest { for (boolean inputMask : new boolean[]{false, true}) { for (boolean projectInput : new boolean[]{false, true}) { - MultiLayerConfiguration conf = new NeuralNetConfiguration.Builder() + NeuralNetConfiguration conf = NeuralNetConfiguration.builder() .dataType(DataType.DOUBLE) .activation(Activation.TANH) .updater(new NoOp()) @@ -199,7 +198,7 @@ public class AttentionLayerTest extends BaseDL4JTest { .layer(new GlobalPoolingLayer.Builder().poolingType(PoolingType.MAX).build()) .layer(new OutputLayer.Builder().nOut(nOut).activation(Activation.SOFTMAX) .lossFunction(LossFunctions.LossFunction.MCXENT).build()) - .setInputType(InputType.recurrent(nIn)) + .inputType(InputType.recurrent(nIn)) .build(); MultiLayerNetwork net = new MultiLayerNetwork(conf); @@ -240,7 +239,7 @@ public class AttentionLayerTest extends BaseDL4JTest { int nOut = 5; int layerSize = 8; - MultiLayerConfiguration conf = new NeuralNetConfiguration.Builder() + NeuralNetConfiguration conf = NeuralNetConfiguration.builder() .dataType(DataType.DOUBLE) .activation(Activation.IDENTITY) .updater(new NoOp()) @@ -251,7 +250,7 @@ public class AttentionLayerTest extends BaseDL4JTest { .layer(new GlobalPoolingLayer.Builder().poolingType(PoolingType.AVG).build()) .layer(new OutputLayer.Builder().nOut(nOut).activation(Activation.SOFTMAX) .lossFunction(LossFunctions.LossFunction.MCXENT).build()) - .setInputType(InputType.recurrent(nIn)) + .inputType(InputType.recurrent(nIn)) .build(); MultiLayerNetwork net = new MultiLayerNetwork(conf); @@ -303,7 +302,7 @@ public class AttentionLayerTest extends BaseDL4JTest { System.out.println("Starting test: " + name); - MultiLayerConfiguration conf = new NeuralNetConfiguration.Builder() + NeuralNetConfiguration conf = NeuralNetConfiguration.builder() .dataType(DataType.DOUBLE) .activation(Activation.IDENTITY) .updater(new NoOp()) @@ -314,7 +313,7 @@ public class AttentionLayerTest extends BaseDL4JTest { .layer(new GlobalPoolingLayer.Builder().poolingType(PoolingType.AVG).build()) .layer(new OutputLayer.Builder().nOut(nOut).activation(Activation.SOFTMAX) .lossFunction(LossFunctions.LossFunction.MCXENT).build()) - .setInputType(InputType.recurrent(nIn)) + .inputType(InputType.recurrent(nIn)) .build(); MultiLayerNetwork net = new MultiLayerNetwork(conf); @@ -361,7 +360,7 @@ public class AttentionLayerTest extends BaseDL4JTest { System.out.println("Starting test: " + name); - ComputationGraphConfiguration graph = new NeuralNetConfiguration.Builder() + ComputationGraphConfiguration graph = NeuralNetConfiguration.builder() .dataType(DataType.DOUBLE) .activation(Activation.TANH) .updater(new NoOp()) @@ -425,7 +424,7 @@ public class AttentionLayerTest extends BaseDL4JTest { System.out.println("Starting test: " + name); - ComputationGraphConfiguration graph = new NeuralNetConfiguration.Builder() + ComputationGraphConfiguration graph = NeuralNetConfiguration.builder() .dataType(DataType.DOUBLE) .activation(Activation.TANH) .updater(new NoOp()) diff --git a/cavis-dnn/cavis-dnn-core/src/test/java/org/deeplearning4j/gradientcheck/BNGradientCheckTest.java b/cavis-dnn/cavis-dnn-core/src/test/java/org/deeplearning4j/gradientcheck/BNGradientCheckTest.java index 65f8787d8..0380ed2a0 100644 --- a/cavis-dnn/cavis-dnn-core/src/test/java/org/deeplearning4j/gradientcheck/BNGradientCheckTest.java +++ b/cavis-dnn/cavis-dnn-core/src/test/java/org/deeplearning4j/gradientcheck/BNGradientCheckTest.java @@ -25,7 +25,6 @@ import org.deeplearning4j.TestUtils; import org.deeplearning4j.datasets.iterator.impl.IrisDataSetIterator; import org.deeplearning4j.nn.api.OptimizationAlgorithm; import org.deeplearning4j.nn.conf.ComputationGraphConfiguration; -import org.deeplearning4j.nn.conf.MultiLayerConfiguration; import org.deeplearning4j.nn.conf.NeuralNetConfiguration; import org.deeplearning4j.nn.conf.distribution.NormalDistribution; import org.deeplearning4j.nn.conf.distribution.UniformDistribution; @@ -74,23 +73,23 @@ public class BNGradientCheckTest extends BaseDL4JTest { for (boolean useLogStd : new boolean[]{true, false}) { - MultiLayerConfiguration.Builder builder = - new NeuralNetConfiguration.Builder().updater(new NoOp()) - .dataType(DataType.DOUBLE) - .seed(12345L) - .dist(new NormalDistribution(0, 1)).list() - .layer(0, new DenseLayer.Builder().nIn(4).nOut(3) - .activation(Activation.IDENTITY).build()) - .layer(1, new BatchNormalization.Builder().useLogStd(useLogStd).nOut(3).build()) - .layer(2, new ActivationLayer.Builder().activation(Activation.TANH).build()) - .layer(3, new OutputLayer.Builder(LossFunctions.LossFunction.MCXENT) - .activation(Activation.SOFTMAX).nIn(3).nOut(3).build()); + NeuralNetConfiguration.NeuralNetConfigurationBuilder builder = + NeuralNetConfiguration.builder().updater(new NoOp()) + .dataType(DataType.DOUBLE) + .seed(12345L) + .dist(new NormalDistribution(0, 1)).list() + .layer(0, new DenseLayer.Builder().nIn(4).nOut(3) + .activation(Activation.IDENTITY).build()) + .layer(1, new BatchNormalization.Builder().useLogStd(useLogStd).nOut(3).build()) + .layer(2, new ActivationLayer.Builder().activation(Activation.TANH).build()) + .layer(3, new OutputLayer.Builder(LossFunctions.LossFunction.MCXENT) + .activation(Activation.SOFTMAX).nIn(3).nOut(3).build()); MultiLayerNetwork mln = new MultiLayerNetwork(builder.build()); mln.init(); // for (int j = 0; j < mln.getnLayers(); j++) -// System.out.println("Layer " + j + " # params: " + mln.getLayer(j).numParams()); +// System.out.println("ILayer " + j + " # params: " + mln.getLayer(j).numParams()); //Mean and variance vars are not gradient checkable; mean/variance "gradient" is used to implement running mean/variance calc //i.e., runningMean = decay * runningMean + (1-decay) * batchMean @@ -119,7 +118,7 @@ public class BNGradientCheckTest extends BaseDL4JTest { } for (boolean useLogStd : new boolean[]{true, false}) { - MultiLayerConfiguration.Builder builder = new NeuralNetConfiguration.Builder() + NeuralNetConfiguration.NeuralNetConfigurationBuilder builder = NeuralNetConfiguration.builder() .dataType(DataType.DOUBLE) .updater(new NoOp()).seed(12345L) .dist(new NormalDistribution(0, 2)).list() @@ -129,13 +128,13 @@ public class BNGradientCheckTest extends BaseDL4JTest { .layer(2, new ActivationLayer.Builder().activation(Activation.TANH).build()) .layer(3, new OutputLayer.Builder(LossFunctions.LossFunction.MCXENT) .activation(Activation.SOFTMAX).nOut(nOut).build()) - .setInputType(InputType.convolutional(hw, hw, depth)); + .inputType(InputType.convolutional(hw, hw, depth)); MultiLayerNetwork mln = new MultiLayerNetwork(builder.build()); mln.init(); // for (int j = 0; j < mln.getnLayers(); j++) -// System.out.println("Layer " + j + " # params: " + mln.getLayer(j).numParams()); +// System.out.println("ILayer " + j + " # params: " + mln.getLayer(j).numParams()); //Mean and variance vars are not gradient checkable; mean/variance "gradient" is used to implement running mean/variance calc //i.e., runningMean = decay * runningMean + (1-decay) * batchMean @@ -188,7 +187,7 @@ public class BNGradientCheckTest extends BaseDL4JTest { LossFunctions.LossFunction lf = lossFunctions[i]; Activation outputActivation = outputActivations[i]; - MultiLayerConfiguration.Builder builder = new NeuralNetConfiguration.Builder().seed(12345) + NeuralNetConfiguration.NeuralNetConfigurationBuilder builder = NeuralNetConfiguration.builder().seed(12345) .dataType(DataType.DOUBLE) .l2(l2vals[j]) .optimizationAlgo(OptimizationAlgorithm.LINE_GRADIENT_DESCENT) @@ -203,9 +202,9 @@ public class BNGradientCheckTest extends BaseDL4JTest { .layer(4, new ActivationLayer.Builder().activation(afn).build()) .layer(5, new OutputLayer.Builder(lf).activation(outputActivation).nOut(nOut) .build()) - .setInputType(InputType.convolutional(hw, hw, depth)); + .inputType(InputType.convolutional(hw, hw, depth)); - MultiLayerConfiguration conf = builder.build(); + NeuralNetConfiguration conf = builder.build(); MultiLayerNetwork mln = new MultiLayerNetwork(conf); mln.init(); @@ -219,11 +218,11 @@ public class BNGradientCheckTest extends BaseDL4JTest { mln.setInput(ds.getFeatures()); mln.setLabels(ds.getLabels()); mln.computeGradientAndScore(); - double scoreBefore = mln.score(); + double scoreBefore = mln.getScore(); for (int k = 0; k < 20; k++) mln.fit(ds); mln.computeGradientAndScore(); - double scoreAfter = mln.score(); + double scoreAfter = mln.getScore(); //Can't test in 'characteristic mode of operation' if not learning String msg = name + " - score did not (sufficiently) decrease during learning - activationFn=" @@ -237,7 +236,7 @@ public class BNGradientCheckTest extends BaseDL4JTest { + ", outputActivation=" + outputActivation + ", doLearningFirst=" + doLearningFirst + ", l1=" + l1vals[j] + ", l2=" + l2vals[j]); // for (int k = 0; k < mln.getnLayers(); k++) -// System.out.println("Layer " + k + " # params: " + mln.getLayer(k).numParams()); +// System.out.println("ILayer " + k + " # params: " + mln.getLayer(k).numParams()); //Mean and variance vars are not gradient checkable; mean/variance "gradient" is used to implement running mean/variance calc //i.e., runningMean = decay * runningMean + (1-decay) * batchMean @@ -294,23 +293,23 @@ public class BNGradientCheckTest extends BaseDL4JTest { LossFunctions.LossFunction lf = lossFunctions[i]; Activation outputActivation = outputActivations[i]; - MultiLayerConfiguration.Builder builder = - new NeuralNetConfiguration.Builder() - .dataType(DataType.DOUBLE) - .l2(l2vals[j]) - .optimizationAlgo(OptimizationAlgorithm.CONJUGATE_GRADIENT) - .updater(new NoOp()) - .dist(new UniformDistribution(-2, 2)).seed(12345L).list() - .layer(0, new DenseLayer.Builder().nIn(nIn).nOut(4) - .activation(afn).build()) - .layer(1, new BatchNormalization.Builder().useLogStd(useLogStd).build()) - .layer(2, new DenseLayer.Builder().nIn(4).nOut(4).build()) - .layer(3, new BatchNormalization.Builder().useLogStd(useLogStd).build()) - .layer(4, new OutputLayer.Builder(lf) - .activation(outputActivation).nOut(nOut) - .build()); + NeuralNetConfiguration.NeuralNetConfigurationBuilder builder = + NeuralNetConfiguration.builder() + .dataType(DataType.DOUBLE) + .l2(l2vals[j]) + .optimizationAlgo(OptimizationAlgorithm.CONJUGATE_GRADIENT) + .updater(new NoOp()) + .dist(new UniformDistribution(-2, 2)).seed(12345L).list() + .layer(0, new DenseLayer.Builder().nIn(nIn).nOut(4) + .activation(afn).build()) + .layer(1, new BatchNormalization.Builder().useLogStd(useLogStd).build()) + .layer(2, new DenseLayer.Builder().nIn(4).nOut(4).build()) + .layer(3, new BatchNormalization.Builder().useLogStd(useLogStd).build()) + .layer(4, new OutputLayer.Builder(lf) + .activation(outputActivation).nOut(nOut) + .build()); - MultiLayerConfiguration conf = builder.build(); + NeuralNetConfiguration conf = builder.build(); MultiLayerNetwork mln = new MultiLayerNetwork(conf); mln.init(); @@ -323,11 +322,11 @@ public class BNGradientCheckTest extends BaseDL4JTest { mln.setInput(ds.getFeatures()); mln.setLabels(ds.getLabels()); mln.computeGradientAndScore(); - double scoreBefore = mln.score(); + double scoreBefore = mln.getScore(); for (int k = 0; k < 10; k++) mln.fit(ds); mln.computeGradientAndScore(); - double scoreAfter = mln.score(); + double scoreAfter = mln.getScore(); //Can't test in 'characteristic mode of operation' if not learning String msg = name + " - score did not (sufficiently) decrease during learning - activationFn=" @@ -341,7 +340,7 @@ public class BNGradientCheckTest extends BaseDL4JTest { + ", outputActivation=" + outputActivation + ", doLearningFirst=" + doLearningFirst + ", l1=" + l1vals[j] + ", l2=" + l2vals[j]); // for (int k = 0; k < mln.getnLayers(); k++) -// System.out.println("Layer " + k + " # params: " + mln.getLayer(k).numParams()); +// System.out.println("ILayer " + k + " # params: " + mln.getLayer(k).numParams()); //Mean and variance vars are not gradient checkable; mean/variance "gradient" is used to implement running mean/variance calc //i.e., runningMean = decay * runningMean + (1-decay) * batchMean @@ -370,7 +369,7 @@ public class BNGradientCheckTest extends BaseDL4JTest { INDArray labels = ds.getLabels(); for (boolean useLogStd : new boolean[]{true, false}) { - MultiLayerConfiguration.Builder builder = new NeuralNetConfiguration.Builder().updater(new NoOp()) + NeuralNetConfiguration.NeuralNetConfigurationBuilder builder = NeuralNetConfiguration.builder().updater(new NoOp()) .dataType(DataType.DOUBLE) .seed(12345L) .dist(new NormalDistribution(0, 1)).list() @@ -385,7 +384,7 @@ public class BNGradientCheckTest extends BaseDL4JTest { mln.init(); // for (int j = 0; j < mln.getnLayers(); j++) -// System.out.println("Layer " + j + " # params: " + mln.getLayer(j).numParams()); +// System.out.println("ILayer " + j + " # params: " + mln.getLayer(j).numParams()); //Mean and variance vars are not gradient checkable; mean/variance "gradient" is used to implement running mean/variance calc //i.e., runningMean = decay * runningMean + (1-decay) * batchMean @@ -414,7 +413,7 @@ public class BNGradientCheckTest extends BaseDL4JTest { } for (boolean useLogStd : new boolean[]{true, false}) { - MultiLayerConfiguration.Builder builder = new NeuralNetConfiguration.Builder().updater(new NoOp()) + NeuralNetConfiguration.NeuralNetConfigurationBuilder builder = NeuralNetConfiguration.builder().updater(new NoOp()) .dataType(DataType.DOUBLE) .seed(12345L) .dist(new NormalDistribution(0, 2)).list() @@ -424,13 +423,13 @@ public class BNGradientCheckTest extends BaseDL4JTest { .layer(2, new ActivationLayer.Builder().activation(Activation.TANH).build()) .layer(3, new OutputLayer.Builder(LossFunctions.LossFunction.MCXENT) .activation(Activation.SOFTMAX).nOut(nOut).build()) - .setInputType(InputType.convolutional(hw, hw, depth)); + .inputType(InputType.convolutional(hw, hw, depth)); MultiLayerNetwork mln = new MultiLayerNetwork(builder.build()); mln.init(); // for (int j = 0; j < mln.getnLayers(); j++) -// System.out.println("Layer " + j + " # params: " + mln.getLayer(j).numParams()); +// System.out.println("ILayer " + j + " # params: " + mln.getLayer(j).numParams()); //Mean and variance vars are not gradient checkable; mean/variance "gradient" is used to implement running mean/variance calc //i.e., runningMean = decay * runningMean + (1-decay) * batchMean @@ -457,7 +456,7 @@ public class BNGradientCheckTest extends BaseDL4JTest { for (boolean useLogStd : new boolean[]{true, false}) { - ComputationGraphConfiguration conf = new NeuralNetConfiguration.Builder().seed(seed).updater(new NoOp()) + ComputationGraphConfiguration conf = NeuralNetConfiguration.builder().seed(seed).updater(new NoOp()) .dataType(DataType.DOUBLE) .weightInit(WeightInit.XAVIER).graphBuilder().addInputs("in") .setInputTypes(InputType.convolutional(height, width, channels)) @@ -526,7 +525,7 @@ public class BNGradientCheckTest extends BaseDL4JTest { LossFunctions.LossFunction lf = lossFunctions[i]; Activation outputActivation = outputActivations[i]; - ComputationGraphConfiguration conf = new NeuralNetConfiguration.Builder().seed(12345) + ComputationGraphConfiguration conf = NeuralNetConfiguration.builder().seed(12345) .dataType(DataType.DOUBLE) .optimizationAlgo(OptimizationAlgorithm.LINE_GRADIENT_DESCENT) .updater(new NoOp()) @@ -554,11 +553,11 @@ public class BNGradientCheckTest extends BaseDL4JTest { net.setInput(0, ds.getFeatures()); net.setLabels(ds.getLabels()); net.computeGradientAndScore(); - double scoreBefore = net.score(); + double scoreBefore = net.getScore(); for (int k = 0; k < 20; k++) net.fit(ds); net.computeGradientAndScore(); - double scoreAfter = net.score(); + double scoreAfter = net.getScore(); //Can't test in 'characteristic mode of operation' if not learning String msg = name + " - score did not (sufficiently) decrease during learning - activationFn=" @@ -572,7 +571,7 @@ public class BNGradientCheckTest extends BaseDL4JTest { + ", outputActivation=" + outputActivation + ", doLearningFirst=" + doLearningFirst + ", l1=" + l1vals[j] + ", l2=" + l2vals[j]); // for (int k = 0; k < net.getNumLayers(); k++) -// System.out.println("Layer " + k + " # params: " + net.getLayer(k).numParams()); +// System.out.println("ILayer " + k + " # params: " + net.getLayer(k).numParams()); //Mean and variance vars are not gradient checkable; mean/variance "gradient" is used to implement running mean/variance calc //i.e., runningMean = decay * runningMean + (1-decay) * batchMean diff --git a/cavis-dnn/cavis-dnn-core/src/test/java/org/deeplearning4j/gradientcheck/CNN1DGradientCheckTest.java b/cavis-dnn/cavis-dnn-core/src/test/java/org/deeplearning4j/gradientcheck/CNN1DGradientCheckTest.java index b61c1fe24..0f474bb16 100644 --- a/cavis-dnn/cavis-dnn-core/src/test/java/org/deeplearning4j/gradientcheck/CNN1DGradientCheckTest.java +++ b/cavis-dnn/cavis-dnn-core/src/test/java/org/deeplearning4j/gradientcheck/CNN1DGradientCheckTest.java @@ -24,17 +24,14 @@ import lombok.extern.slf4j.Slf4j; import org.deeplearning4j.BaseDL4JTest; import org.deeplearning4j.TestUtils; import org.deeplearning4j.nn.conf.ConvolutionMode; -import org.deeplearning4j.nn.conf.MultiLayerConfiguration; import org.deeplearning4j.nn.conf.NeuralNetConfiguration; import org.deeplearning4j.nn.conf.RNNFormat; import org.deeplearning4j.nn.conf.distribution.NormalDistribution; import org.deeplearning4j.nn.conf.inputs.InputType; import org.deeplearning4j.nn.conf.layers.*; import org.deeplearning4j.nn.conf.layers.convolutional.Cropping1D; -import org.deeplearning4j.nn.modelimport.keras.KerasModelImport; import org.deeplearning4j.nn.multilayer.MultiLayerNetwork; import org.deeplearning4j.util.Convolution1DUtils; -import org.deeplearning4j.util.ConvolutionUtils; import org.junit.jupiter.api.Test; import org.nd4j.linalg.activations.Activation; import org.nd4j.linalg.api.buffer.DataType; @@ -45,8 +42,6 @@ import org.nd4j.linalg.indexing.NDArrayIndex; import org.nd4j.linalg.learning.config.NoOp; import org.nd4j.linalg.lossfunctions.LossFunctions; -import java.io.File; - import static org.junit.jupiter.api.Assertions.assertEquals; import static org.junit.jupiter.api.Assertions.assertTrue; @@ -90,7 +85,7 @@ public class CNN1DGradientCheckTest extends BaseDL4JTest { } } - MultiLayerConfiguration conf = new NeuralNetConfiguration.Builder() + NeuralNetConfiguration conf = NeuralNetConfiguration.builder() .dataType(DataType.DOUBLE) .updater(new NoOp()) .dist(new NormalDistribution(0, 1)).convolutionMode(ConvolutionMode.Same).list() @@ -103,10 +98,10 @@ public class CNN1DGradientCheckTest extends BaseDL4JTest { .build()) .layer(new RnnOutputLayer.Builder(LossFunctions.LossFunction.MCXENT) .activation(Activation.SOFTMAX).nOut(finalNOut).build()) - .setInputType(InputType.recurrent(convNIn, length)).build(); + .inputType(InputType.recurrent(convNIn, length)).build(); String json = conf.toJson(); - MultiLayerConfiguration c2 = MultiLayerConfiguration.fromJson(json); + NeuralNetConfiguration c2 = NeuralNetConfiguration.fromJson(json); assertEquals(conf, c2); MultiLayerNetwork net = new MultiLayerNetwork(conf); @@ -118,7 +113,7 @@ public class CNN1DGradientCheckTest extends BaseDL4JTest { if (PRINT_RESULTS) { System.out.println(msg); // for (int j = 0; j < net.getnLayers(); j++) -// System.out.println("Layer " + j + " # params: " + net.getLayer(j).numParams()); +// System.out.println("ILayer " + j + " # params: " + net.getLayer(j).numParams()); } boolean gradOK = GradientCheckUtil.checkGradients(net, DEFAULT_EPS, DEFAULT_MAX_REL_ERROR, @@ -170,7 +165,7 @@ public class CNN1DGradientCheckTest extends BaseDL4JTest { } } - MultiLayerConfiguration conf = new NeuralNetConfiguration.Builder() + NeuralNetConfiguration conf = NeuralNetConfiguration.builder() .dataType(DataType.DOUBLE) .updater(new NoOp()) .dist(new NormalDistribution(0, 1)).convolutionMode(ConvolutionMode.Same).list() @@ -183,10 +178,10 @@ public class CNN1DGradientCheckTest extends BaseDL4JTest { .build()) .layer(new RnnOutputLayer.Builder(LossFunctions.LossFunction.MCXENT) .activation(Activation.SOFTMAX).nOut(finalNOut).build()) - .setInputType(InputType.recurrent(convNIn, length,RNNFormat.NCW)).build(); + .inputType(InputType.recurrent(convNIn, length,RNNFormat.NCW)).build(); String json = conf.toJson(); - MultiLayerConfiguration c2 = MultiLayerConfiguration.fromJson(json); + NeuralNetConfiguration c2 = NeuralNetConfiguration.fromJson(json); assertEquals(conf, c2); MultiLayerNetwork net = new MultiLayerNetwork(conf); @@ -198,7 +193,7 @@ public class CNN1DGradientCheckTest extends BaseDL4JTest { if (PRINT_RESULTS) { System.out.println(msg); // for (int j = 0; j < net.getnLayers(); j++) -// System.out.println("Layer " + j + " # params: " + net.getLayer(j).numParams()); +// System.out.println("ILayer " + j + " # params: " + net.getLayer(j).numParams()); } boolean gradOK = GradientCheckUtil.checkGradients(net, DEFAULT_EPS, DEFAULT_MAX_REL_ERROR, @@ -251,7 +246,7 @@ public class CNN1DGradientCheckTest extends BaseDL4JTest { } } - MultiLayerConfiguration conf = new NeuralNetConfiguration.Builder() + NeuralNetConfiguration conf = NeuralNetConfiguration.builder() .dataType(DataType.DOUBLE) .updater(new NoOp()) .dist(new NormalDistribution(0, 1)).convolutionMode(ConvolutionMode.Same).list() @@ -267,10 +262,10 @@ public class CNN1DGradientCheckTest extends BaseDL4JTest { .stride(stride).padding(padding).pnorm(pnorm).build()) .layer(new RnnOutputLayer.Builder(LossFunctions.LossFunction.MCXENT) .activation(Activation.SOFTMAX).nOut(finalNOut).build()) - .setInputType(InputType.recurrent(convNIn, length,RNNFormat.NCW)).build(); + .inputType(InputType.recurrent(convNIn, length,RNNFormat.NCW)).build(); String json = conf.toJson(); - MultiLayerConfiguration c2 = MultiLayerConfiguration.fromJson(json); + NeuralNetConfiguration c2 = NeuralNetConfiguration.fromJson(json); assertEquals(conf, c2); MultiLayerNetwork net = new MultiLayerNetwork(conf); @@ -282,7 +277,7 @@ public class CNN1DGradientCheckTest extends BaseDL4JTest { if (PRINT_RESULTS) { System.out.println(msg); // for (int j = 0; j < net.getnLayers(); j++) -// System.out.println("Layer " + j + " # params: " + net.getLayer(j).numParams()); +// System.out.println("ILayer " + j + " # params: " + net.getLayer(j).numParams()); } boolean gradOK = GradientCheckUtil.checkGradients(net, DEFAULT_EPS, DEFAULT_MAX_REL_ERROR, @@ -330,7 +325,7 @@ public class CNN1DGradientCheckTest extends BaseDL4JTest { } } - MultiLayerConfiguration conf = new NeuralNetConfiguration.Builder() + NeuralNetConfiguration conf = NeuralNetConfiguration.builder() .dataType(DataType.DOUBLE) .updater(new NoOp()) .dist(new NormalDistribution(0, 1)).convolutionMode(ConvolutionMode.Same).list() @@ -344,10 +339,10 @@ public class CNN1DGradientCheckTest extends BaseDL4JTest { .stride(stride).padding(padding).pnorm(pnorm).build()) .layer(3, new RnnOutputLayer.Builder(LossFunctions.LossFunction.MCXENT) .activation(Activation.SOFTMAX).nOut(finalNOut).build()) - .setInputType(InputType.recurrent(convNIn, length,RNNFormat.NCW)).build(); + .inputType(InputType.recurrent(convNIn, length,RNNFormat.NCW)).build(); String json = conf.toJson(); - MultiLayerConfiguration c2 = MultiLayerConfiguration.fromJson(json); + NeuralNetConfiguration c2 = NeuralNetConfiguration.fromJson(json); assertEquals(conf, c2); MultiLayerNetwork net = new MultiLayerNetwork(conf); @@ -359,7 +354,7 @@ public class CNN1DGradientCheckTest extends BaseDL4JTest { if (PRINT_RESULTS) { System.out.println(msg); // for (int j = 0; j < net.getnLayers(); j++) -// System.out.println("Layer " + j + " # params: " + net.getLayer(j).numParams()); +// System.out.println("ILayer " + j + " # params: " + net.getLayer(j).numParams()); } boolean gradOK = GradientCheckUtil.checkGradients(net, DEFAULT_EPS, DEFAULT_MAX_REL_ERROR, @@ -393,7 +388,7 @@ public class CNN1DGradientCheckTest extends BaseDL4JTest { log.info("Starting test: " + s); Nd4j.getRandom().setSeed(12345); - MultiLayerConfiguration conf = new NeuralNetConfiguration.Builder() + NeuralNetConfiguration conf = NeuralNetConfiguration.builder() .dataType(DataType.DOUBLE) .updater(new NoOp()) .activation(Activation.TANH) @@ -413,7 +408,7 @@ public class CNN1DGradientCheckTest extends BaseDL4JTest { .layer(new GlobalPoolingLayer(PoolingType.AVG)) .layer(new OutputLayer.Builder(LossFunctions.LossFunction.MCXENT) .activation(Activation.SOFTMAX).nOut(finalNOut).build()) - .setInputType(InputType.recurrent(convNIn, length)).build(); + .inputType(InputType.recurrent(convNIn, length)).build(); MultiLayerNetwork net = new MultiLayerNetwork(conf); net.init(); @@ -481,7 +476,7 @@ public class CNN1DGradientCheckTest extends BaseDL4JTest { log.info("Starting test: " + s); Nd4j.getRandom().setSeed(12345); - MultiLayerConfiguration conf = new NeuralNetConfiguration.Builder() + NeuralNetConfiguration conf = NeuralNetConfiguration.builder() .dataType(DataType.DOUBLE) .updater(new NoOp()) .activation(Activation.TANH) @@ -501,7 +496,7 @@ public class CNN1DGradientCheckTest extends BaseDL4JTest { .build()) .layer(new RnnOutputLayer.Builder(LossFunctions.LossFunction.MCXENT) .activation(Activation.SOFTMAX).nOut(finalNOut).build()) - .setInputType(InputType.recurrent(convNIn, length,RNNFormat.NCW)).build(); + .inputType(InputType.recurrent(convNIn, length,RNNFormat.NCW)).build(); MultiLayerNetwork net = new MultiLayerNetwork(conf); net.init(); diff --git a/cavis-dnn/cavis-dnn-core/src/test/java/org/deeplearning4j/gradientcheck/CNN3DGradientCheckTest.java b/cavis-dnn/cavis-dnn-core/src/test/java/org/deeplearning4j/gradientcheck/CNN3DGradientCheckTest.java index 4d3de0bfb..ba60ca557 100644 --- a/cavis-dnn/cavis-dnn-core/src/test/java/org/deeplearning4j/gradientcheck/CNN3DGradientCheckTest.java +++ b/cavis-dnn/cavis-dnn-core/src/test/java/org/deeplearning4j/gradientcheck/CNN3DGradientCheckTest.java @@ -24,7 +24,6 @@ import lombok.extern.java.Log; import org.deeplearning4j.BaseDL4JTest; import org.deeplearning4j.TestUtils; import org.deeplearning4j.nn.conf.ConvolutionMode; -import org.deeplearning4j.nn.conf.MultiLayerConfiguration; import org.deeplearning4j.nn.conf.NeuralNetConfiguration; import org.deeplearning4j.nn.conf.distribution.NormalDistribution; import org.deeplearning4j.nn.conf.inputs.InputType; @@ -35,7 +34,6 @@ import org.deeplearning4j.nn.multilayer.MultiLayerNetwork; import org.deeplearning4j.nn.weights.WeightInit; import org.junit.jupiter.api.Test; import org.nd4j.linalg.activations.Activation; -import org.nd4j.linalg.api.buffer.DataBuffer; import org.nd4j.linalg.api.buffer.DataType; import org.nd4j.linalg.api.ndarray.INDArray; import org.nd4j.linalg.factory.Nd4j; @@ -112,7 +110,7 @@ public class CNN3DGradientCheckTest extends BaseDL4JTest { labels.putScalar(new int[]{i, i % finalNOut}, 1.0); } - MultiLayerConfiguration conf = new NeuralNetConfiguration.Builder() + NeuralNetConfiguration conf = NeuralNetConfiguration.builder() .dataType(DataType.DOUBLE) .updater(new NoOp()).weightInit(WeightInit.LECUN_NORMAL) .dist(new NormalDistribution(0, 1)) @@ -131,10 +129,10 @@ public class CNN3DGradientCheckTest extends BaseDL4JTest { .inputPreProcessor(2, new Cnn3DToFeedForwardPreProcessor(outDepth, outHeight, outWidth, convNOut2, df == Convolution3D.DataFormat.NCDHW)) - .setInputType(InputType.convolutional3D(df, depth, height, width, convNIn)).build(); + .inputType(InputType.convolutional3D(df, depth, height, width, convNIn)).build(); String json = conf.toJson(); - MultiLayerConfiguration c2 = MultiLayerConfiguration.fromJson(json); + NeuralNetConfiguration c2 = NeuralNetConfiguration.fromJson(json); assertEquals(conf, c2); MultiLayerNetwork net = new MultiLayerNetwork(conf); @@ -149,7 +147,7 @@ public class CNN3DGradientCheckTest extends BaseDL4JTest { if (PRINT_RESULTS) { log.info(msg); // for (int j = 0; j < net.getnLayers(); j++) { -// log.info("Layer " + j + " # params: " + net.getLayer(j).numParams()); +// log.info("ILayer " + j + " # params: " + net.getLayer(j).numParams()); // } } @@ -215,7 +213,7 @@ public class CNN3DGradientCheckTest extends BaseDL4JTest { labels.putScalar(new int[]{i, i % finalNOut}, 1.0); } - MultiLayerConfiguration conf = new NeuralNetConfiguration.Builder() + NeuralNetConfiguration conf = NeuralNetConfiguration.builder() .dataType(DataType.DOUBLE) .updater(new NoOp()).weightInit(WeightInit.LECUN_NORMAL) .dist(new NormalDistribution(0, 1)) @@ -235,10 +233,10 @@ public class CNN3DGradientCheckTest extends BaseDL4JTest { .inputPreProcessor(3, new Cnn3DToFeedForwardPreProcessor(outDepth, outHeight, outWidth, convNOut2, true)) - .setInputType(InputType.convolutional3D(depth, height, width, convNIn)).build(); + .inputType(InputType.convolutional3D(depth, height, width, convNIn)).build(); String json = conf.toJson(); - MultiLayerConfiguration c2 = MultiLayerConfiguration.fromJson(json); + NeuralNetConfiguration c2 = NeuralNetConfiguration.fromJson(json); assertEquals(conf, c2); MultiLayerNetwork net = new MultiLayerNetwork(conf); @@ -252,7 +250,7 @@ public class CNN3DGradientCheckTest extends BaseDL4JTest { if (PRINT_RESULTS) { log.info(msg); // for (int j = 0; j < net.getnLayers(); j++) { -// log.info("Layer " + j + " # params: " + net.getLayer(j).numParams()); +// log.info("ILayer " + j + " # params: " + net.getLayer(j).numParams()); // } } @@ -310,7 +308,7 @@ public class CNN3DGradientCheckTest extends BaseDL4JTest { labels.putScalar(new int[]{i, i % finalNOut}, 1.0); } - MultiLayerConfiguration conf = new NeuralNetConfiguration.Builder() + NeuralNetConfiguration conf = NeuralNetConfiguration.builder() .dataType(DataType.DOUBLE) .updater(new NoOp()) .weightInit(WeightInit.XAVIER) @@ -327,10 +325,10 @@ public class CNN3DGradientCheckTest extends BaseDL4JTest { .activation(Activation.SOFTMAX).nOut(finalNOut).build()) .inputPreProcessor(2, new Cnn3DToFeedForwardPreProcessor(outDepth, outHeight, outWidth,convNOut, df)) - .setInputType(InputType.convolutional3D(df, depth, height, width, convNIn)).build(); + .inputType(InputType.convolutional3D(df, depth, height, width, convNIn)).build(); String json = conf.toJson(); - MultiLayerConfiguration c2 = MultiLayerConfiguration.fromJson(json); + NeuralNetConfiguration c2 = NeuralNetConfiguration.fromJson(json); assertEquals(conf, c2); MultiLayerNetwork net = new MultiLayerNetwork(conf); @@ -397,7 +395,7 @@ public class CNN3DGradientCheckTest extends BaseDL4JTest { labels.putScalar(new int[]{i, i % finalNOut}, 1.0); } - MultiLayerConfiguration conf = new NeuralNetConfiguration.Builder() + NeuralNetConfiguration conf = NeuralNetConfiguration.builder() .dataType(DataType.DOUBLE) .updater(new NoOp()).weightInit(WeightInit.LECUN_NORMAL) .dist(new NormalDistribution(0, 1)) @@ -414,10 +412,10 @@ public class CNN3DGradientCheckTest extends BaseDL4JTest { .inputPreProcessor(2, new Cnn3DToFeedForwardPreProcessor(outDepth, outHeight, outWidth, convNOut, true)) - .setInputType(InputType.convolutional3D(df, depth, height, width, convNIn)).build(); + .inputType(InputType.convolutional3D(df, depth, height, width, convNIn)).build(); String json = conf.toJson(); - MultiLayerConfiguration c2 = MultiLayerConfiguration.fromJson(json); + NeuralNetConfiguration c2 = NeuralNetConfiguration.fromJson(json); assertEquals(conf, c2); MultiLayerNetwork net = new MultiLayerNetwork(conf); @@ -431,7 +429,7 @@ public class CNN3DGradientCheckTest extends BaseDL4JTest { if (PRINT_RESULTS) { log.info(msg); // for (int j = 0; j < net.getnLayers(); j++) { -// log.info("Layer " + j + " # params: " + net.getLayer(j).numParams()); +// log.info("ILayer " + j + " # params: " + net.getLayer(j).numParams()); // } } @@ -493,7 +491,7 @@ public class CNN3DGradientCheckTest extends BaseDL4JTest { labels.putScalar(new int[]{i, i % finalNOut}, 1.0); } - MultiLayerConfiguration conf = new NeuralNetConfiguration.Builder() + NeuralNetConfiguration conf = NeuralNetConfiguration.builder() .dataType(DataType.DOUBLE) .updater(new NoOp()).weightInit(WeightInit.LECUN_NORMAL) .dist(new NormalDistribution(0, 1)) @@ -513,10 +511,10 @@ public class CNN3DGradientCheckTest extends BaseDL4JTest { .inputPreProcessor(3, new Cnn3DToFeedForwardPreProcessor(outDepth, outHeight, outWidth, convNOut2, true)) - .setInputType(InputType.convolutional3D(depth, height, width, convNIn)).build(); + .inputType(InputType.convolutional3D(depth, height, width, convNIn)).build(); String json = conf.toJson(); - MultiLayerConfiguration c2 = MultiLayerConfiguration.fromJson(json); + NeuralNetConfiguration c2 = NeuralNetConfiguration.fromJson(json); assertEquals(conf, c2); MultiLayerNetwork net = new MultiLayerNetwork(conf); @@ -530,7 +528,7 @@ public class CNN3DGradientCheckTest extends BaseDL4JTest { if (PRINT_RESULTS) { log.info(msg); // for (int j = 0; j < net.getnLayers(); j++) { -// log.info("Layer " + j + " # params: " + net.getLayer(j).numParams()); +// log.info("ILayer " + j + " # params: " + net.getLayer(j).numParams()); // } } @@ -592,7 +590,7 @@ public class CNN3DGradientCheckTest extends BaseDL4JTest { labels.putScalar(new int[]{j, j % finalNOut}, 1.0); } - MultiLayerConfiguration conf = new NeuralNetConfiguration.Builder() + NeuralNetConfiguration conf = NeuralNetConfiguration.builder() .dataType(DataType.DOUBLE) .updater(new NoOp()) .weightInit(new NormalDistribution(0, 0.1)) @@ -607,10 +605,10 @@ public class CNN3DGradientCheckTest extends BaseDL4JTest { .build()) .layer(new OutputLayer.Builder(LossFunctions.LossFunction.MCXENT) .activation(Activation.SOFTMAX).nOut(finalNOut).build()) - .setInputType(InputType.convolutional3D(df, depth, height, width, convNIn)).build(); + .inputType(InputType.convolutional3D(df, depth, height, width, convNIn)).build(); String json = conf.toJson(); - MultiLayerConfiguration c2 = MultiLayerConfiguration.fromJson(json); + NeuralNetConfiguration c2 = NeuralNetConfiguration.fromJson(json); assertEquals(conf, c2); MultiLayerNetwork net = new MultiLayerNetwork(conf); diff --git a/cavis-dnn/cavis-dnn-core/src/test/java/org/deeplearning4j/gradientcheck/CNNGradientCheckTest.java b/cavis-dnn/cavis-dnn-core/src/test/java/org/deeplearning4j/gradientcheck/CNNGradientCheckTest.java index b9536ee41..d11bd33c6 100644 --- a/cavis-dnn/cavis-dnn-core/src/test/java/org/deeplearning4j/gradientcheck/CNNGradientCheckTest.java +++ b/cavis-dnn/cavis-dnn-core/src/test/java/org/deeplearning4j/gradientcheck/CNNGradientCheckTest.java @@ -26,7 +26,6 @@ import org.deeplearning4j.datasets.iterator.impl.IrisDataSetIterator; import org.deeplearning4j.nn.api.OptimizationAlgorithm; import org.deeplearning4j.nn.conf.CNN2DFormat; import org.deeplearning4j.nn.conf.ConvolutionMode; -import org.deeplearning4j.nn.conf.MultiLayerConfiguration; import org.deeplearning4j.nn.conf.NeuralNetConfiguration; import org.deeplearning4j.nn.conf.distribution.NormalDistribution; import org.deeplearning4j.nn.conf.inputs.InputType; @@ -100,15 +99,15 @@ public class CNNGradientCheckTest extends BaseDL4JTest { LossFunctions.LossFunction lf = lossFunctions[i]; Activation outputActivation = outputActivations[i]; - MultiLayerConfiguration.Builder builder = new NeuralNetConfiguration.Builder() + NeuralNetConfiguration.NeuralNetConfigurationBuilder builder = NeuralNetConfiguration.builder() .dataType(DataType.DOUBLE) .optimizationAlgo(OptimizationAlgorithm.CONJUGATE_GRADIENT).updater(new NoOp()) .weightInit(WeightInit.XAVIER).seed(12345L).list() .layer(0, new ConvolutionLayer.Builder(1, 1).nOut(6).activation(afn).build()) .layer(1, new OutputLayer.Builder(lf).activation(outputActivation).nOut(3).build()) - .setInputType(InputType.convolutionalFlat(1, 4, 1)); + .inputType(InputType.convolutionalFlat(1, 4, 1)); - MultiLayerConfiguration conf = builder.build(); + NeuralNetConfiguration conf = builder.build(); MultiLayerNetwork mln = new MultiLayerNetwork(conf); mln.init(); @@ -120,11 +119,11 @@ public class CNNGradientCheckTest extends BaseDL4JTest { mln.setInput(ds.getFeatures()); mln.setLabels(ds.getLabels()); mln.computeGradientAndScore(); - double scoreBefore = mln.score(); + double scoreBefore = mln.getScore(); for (int j = 0; j < 10; j++) mln.fit(ds); mln.computeGradientAndScore(); - double scoreAfter = mln.score(); + double scoreAfter = mln.getScore(); //Can't test in 'characteristic mode of operation' if not learning String msg = name + " - score did not (sufficiently) decrease during learning - activationFn=" + afn + ", lossFn=" + lf + ", outputActivation=" + outputActivation @@ -137,7 +136,7 @@ public class CNNGradientCheckTest extends BaseDL4JTest { System.out.println(name + " - activationFn=" + afn + ", lossFn=" + lf + ", outputActivation=" + outputActivation + ", doLearningFirst=" + doLearningFirst); // for (int j = 0; j < mln.getnLayers(); j++) -// System.out.println("Layer " + j + " # params: " + mln.getLayer(j).numParams()); +// System.out.println("ILayer " + j + " # params: " + mln.getLayer(j).numParams()); } boolean gradOK = GradientCheckUtil.checkGradients(mln, DEFAULT_EPS, DEFAULT_MAX_REL_ERROR, @@ -186,7 +185,7 @@ public class CNNGradientCheckTest extends BaseDL4JTest { double l2 = l2vals[i]; double l1 = l1vals[i]; - MultiLayerConfiguration.Builder builder = new NeuralNetConfiguration.Builder() + NeuralNetConfiguration.NeuralNetConfigurationBuilder builder = NeuralNetConfiguration.builder() .dataType(DataType.DOUBLE) .l2(l2).l1(l1).l2Bias(biasL2[i]).l1Bias(biasL1[i]) .optimizationAlgo( @@ -198,9 +197,9 @@ public class CNNGradientCheckTest extends BaseDL4JTest { .layer(1, new OutputLayer.Builder(lf).activation(outputActivation).nOut(3) .weightInit(WeightInit.XAVIER).updater(new NoOp()).build()) - .setInputType(InputType.convolutionalFlat(1, 4, 1)); + .inputType(InputType.convolutionalFlat(1, 4, 1)); - MultiLayerConfiguration conf = builder.build(); + NeuralNetConfiguration conf = builder.build(); MultiLayerNetwork mln = new MultiLayerNetwork(conf); mln.init(); @@ -212,11 +211,11 @@ public class CNNGradientCheckTest extends BaseDL4JTest { mln.setInput(ds.getFeatures()); mln.setLabels(ds.getLabels()); mln.computeGradientAndScore(); - double scoreBefore = mln.score(); + double scoreBefore = mln.getScore(); for (int j = 0; j < 10; j++) mln.fit(ds); mln.computeGradientAndScore(); - double scoreAfter = mln.score(); + double scoreAfter = mln.getScore(); //Can't test in 'characteristic mode of operation' if not learning String msg = testName + "- score did not (sufficiently) decrease during learning - activationFn=" @@ -231,7 +230,7 @@ public class CNNGradientCheckTest extends BaseDL4JTest { + ", outputActivation=" + outputActivation + ", doLearningFirst=" + doLearningFirst); // for (int j = 0; j < mln.getnLayers(); j++) -// System.out.println("Layer " + j + " # params: " + mln.getLayer(j).numParams()); +// System.out.println("ILayer " + j + " # params: " + mln.getLayer(j).numParams()); } boolean gradOK = GradientCheckUtil.checkGradients(mln, DEFAULT_EPS, DEFAULT_MAX_REL_ERROR, @@ -269,8 +268,8 @@ public class CNNGradientCheckTest extends BaseDL4JTest { labels.putScalar(new int[]{i, i % nOut}, 1.0); } - MultiLayerConfiguration conf = - new NeuralNetConfiguration.Builder() + NeuralNetConfiguration conf = + NeuralNetConfiguration.builder() .dataType(DataType.DOUBLE) .updater(new NoOp()) .dist(new NormalDistribution(0, 1)) @@ -281,7 +280,7 @@ public class CNNGradientCheckTest extends BaseDL4JTest { .layer(new OutputLayer.Builder(LossFunctions.LossFunction.MCXENT) .activation(Activation.SOFTMAX).nIn(2 * 2 * 4) .nOut(nOut).build()) - .setInputType(InputType.convolutionalFlat(height, width, inputDepth)) + .inputType(InputType.convolutionalFlat(height, width, inputDepth)) .build(); MultiLayerNetwork net = new MultiLayerNetwork(conf); @@ -293,7 +292,7 @@ public class CNNGradientCheckTest extends BaseDL4JTest { if (PRINT_RESULTS) { System.out.println(msg); // for (int j = 0; j < net.getnLayers(); j++) -// System.out.println("Layer " + j + " # params: " + net.getLayer(j).numParams()); +// System.out.println("ILayer " + j + " # params: " + net.getLayer(j).numParams()); } boolean gradOK = GradientCheckUtil.checkGradients(net, DEFAULT_EPS, DEFAULT_MAX_REL_ERROR, DEFAULT_MIN_ABS_ERROR, PRINT_RESULTS, RETURN_ON_FIRST_FAILURE, input, labels); @@ -334,8 +333,8 @@ public class CNNGradientCheckTest extends BaseDL4JTest { labels.putScalar(new int[]{i, i % nOut}, 1.0); } - MultiLayerConfiguration conf = - new NeuralNetConfiguration.Builder() + NeuralNetConfiguration conf = + NeuralNetConfiguration.builder() .dataType(DataType.DOUBLE) .updater(new NoOp()).weightInit(new NormalDistribution(0, 1)) .list() @@ -349,7 +348,7 @@ public class CNNGradientCheckTest extends BaseDL4JTest { .layer(new OutputLayer.Builder(LossFunctions.LossFunction.MCXENT) .activation(Activation.SOFTMAX) .nOut(nOut).build()) - .setInputType(InputType.convolutional(height, width, inputDepth, format)) + .inputType(InputType.convolutional(height, width, inputDepth, format)) .build(); MultiLayerNetwork net = new MultiLayerNetwork(conf); @@ -361,7 +360,7 @@ public class CNNGradientCheckTest extends BaseDL4JTest { if (PRINT_RESULTS) { System.out.println(msg); // for (int j = 0; j < net.getnLayers(); j++) -// System.out.println("Layer " + j + " # params: " + net.getLayer(j).numParams()); +// System.out.println("ILayer " + j + " # params: " + net.getLayer(j).numParams()); } boolean gradOK = GradientCheckUtil.checkGradients(net, DEFAULT_EPS, DEFAULT_MAX_REL_ERROR, DEFAULT_MIN_ABS_ERROR, PRINT_RESULTS, RETURN_ON_FIRST_FAILURE, input, labels); @@ -403,8 +402,8 @@ public class CNNGradientCheckTest extends BaseDL4JTest { INDArray input = Nd4j.rand(DataType.DOUBLE, inShape); INDArray labels = TestUtils.randomOneHot(minibatchSize, nOut); - MultiLayerConfiguration conf = - new NeuralNetConfiguration.Builder() + NeuralNetConfiguration conf = + NeuralNetConfiguration.builder() .dataType(DataType.DOUBLE) .updater(new NoOp()) .dist(new NormalDistribution(0, 1)) @@ -416,7 +415,7 @@ public class CNNGradientCheckTest extends BaseDL4JTest { .layer(new OutputLayer.Builder(LossFunctions.LossFunction.MCXENT) .activation(Activation.SOFTMAX).nIn(8 * 8 * 3) .nOut(4).build()) - .setInputType(InputType.convolutional(height, width, inputDepth, format)) + .inputType(InputType.convolutional(height, width, inputDepth, format)) .build(); MultiLayerNetwork net = new MultiLayerNetwork(conf); @@ -427,7 +426,7 @@ public class CNNGradientCheckTest extends BaseDL4JTest { if (PRINT_RESULTS) { System.out.println(msg); // for (int j = 0; j < net.getnLayers(); j++) -// System.out.println("Layer " + j + " # params: " + net.getLayer(j).numParams()); +// System.out.println("ILayer " + j + " # params: " + net.getLayer(j).numParams()); } boolean gradOK = GradientCheckUtil.checkGradients(net, DEFAULT_EPS, DEFAULT_MAX_REL_ERROR, @@ -472,8 +471,8 @@ public class CNNGradientCheckTest extends BaseDL4JTest { labels.putScalar(new int[]{i, i % nOut}, 1.0); } - MultiLayerConfiguration conf = - new NeuralNetConfiguration.Builder().updater(new NoOp()) + NeuralNetConfiguration conf = + NeuralNetConfiguration.builder().updater(new NoOp()) .dataType(DataType.DOUBLE) .dist(new NormalDistribution(0, 1)) .list().layer(0, @@ -488,7 +487,7 @@ public class CNNGradientCheckTest extends BaseDL4JTest { .layer(2, new OutputLayer.Builder(LossFunctions.LossFunction.MCXENT) .activation(Activation.SOFTMAX).nIn(3 * 3 * 3) .nOut(4).build()) - .setInputType(InputType.convolutional(height, width, inputDepth, format)) + .inputType(InputType.convolutional(height, width, inputDepth, format)) .build(); MultiLayerNetwork net = new MultiLayerNetwork(conf); @@ -500,7 +499,7 @@ public class CNNGradientCheckTest extends BaseDL4JTest { if (PRINT_RESULTS) { System.out.println(msg); // for (int j = 0; j < net.getnLayers(); j++) -// System.out.println("Layer " + j + " # params: " + net.getLayer(j).numParams()); +// System.out.println("ILayer " + j + " # params: " + net.getLayer(j).numParams()); } boolean gradOK = GradientCheckUtil.checkGradients(net, DEFAULT_EPS, DEFAULT_MAX_REL_ERROR, @@ -546,8 +545,8 @@ public class CNNGradientCheckTest extends BaseDL4JTest { labels.putScalar(new int[]{i, i % nOut}, 1.0); } - MultiLayerConfiguration conf = - new NeuralNetConfiguration.Builder().updater(new NoOp()) + NeuralNetConfiguration conf = + NeuralNetConfiguration.builder().updater(new NoOp()) .dataType(DataType.DOUBLE) .dist(new NormalDistribution(0, 1)) .list().layer(0, @@ -562,7 +561,7 @@ public class CNNGradientCheckTest extends BaseDL4JTest { .layer(3, new OutputLayer.Builder(LossFunctions.LossFunction.MCXENT) .activation(Activation.SOFTMAX).nIn(2 * 2 * 2) .nOut(4).build()) - .setInputType(InputType.convolutional(height, width, inputDepth, format)) + .inputType(InputType.convolutional(height, width, inputDepth, format)) .build(); MultiLayerNetwork net = new MultiLayerNetwork(conf); @@ -606,7 +605,7 @@ public class CNNGradientCheckTest extends BaseDL4JTest { INDArray input = Nd4j.rand(DataType.DOUBLE, inShape); INDArray labels = TestUtils.randomOneHot(minibatchSize, nOut); - MultiLayerConfiguration conf = new NeuralNetConfiguration.Builder().seed(12345).updater(new NoOp()) + NeuralNetConfiguration conf = NeuralNetConfiguration.builder().seed(12345).updater(new NoOp()) .dataType(DataType.DOUBLE) .activation(afn) .list() @@ -623,10 +622,9 @@ public class CNNGradientCheckTest extends BaseDL4JTest { .layer(3, new OutputLayer.Builder(LossFunctions.LossFunction.MCXENT) .activation(Activation.SOFTMAX).nIn(2 * 2 * 2).nOut(nOut) .build()) - .setInputType(InputType.convolutional(height, width, inputDepth, format)).build(); + .inputType(InputType.convolutional(height, width, inputDepth, format)).build(); - assertEquals(ConvolutionMode.Truncate, - ((ConvolutionLayer) conf.getConf(0).getLayer()).getConvolutionMode()); + assertEquals(ConvolutionMode.Truncate, conf.getConvolutionMode()); MultiLayerNetwork net = new MultiLayerNetwork(conf); net.init(); @@ -673,7 +671,7 @@ public class CNNGradientCheckTest extends BaseDL4JTest { labels.putScalar(new int[]{i, i % nOut}, 1.0); } - MultiLayerConfiguration conf = new NeuralNetConfiguration.Builder().seed(12345).updater(new NoOp()) + NeuralNetConfiguration conf = NeuralNetConfiguration.builder().seed(12345).updater(new NoOp()) .dataType(DataType.DOUBLE) .activation(afn) .list() @@ -689,10 +687,9 @@ public class CNNGradientCheckTest extends BaseDL4JTest { .layer(3, new OutputLayer.Builder(LossFunctions.LossFunction.MCXENT) .activation(Activation.SOFTMAX).nIn(2 * 2 * 2).nOut(nOut) .build()) - .setInputType(InputType.convolutional(height, width, inputDepth, format)).build(); + .inputType(InputType.convolutional(height, width, inputDepth, format)).build(); - assertEquals(ConvolutionMode.Truncate, - ((ConvolutionLayer) conf.getConf(0).getLayer()).getConvolutionMode()); + assertEquals(ConvolutionMode.Truncate,conf.getConvolutionMode()); MultiLayerNetwork net = new MultiLayerNetwork(conf); net.init(); @@ -744,7 +741,7 @@ public class CNNGradientCheckTest extends BaseDL4JTest { INDArray labels = TestUtils.randomOneHot(minibatchSize, nOut); - MultiLayerConfiguration conf = new NeuralNetConfiguration.Builder().seed(12345) + NeuralNetConfiguration conf = NeuralNetConfiguration.builder().seed(12345) .dataType(DataType.DOUBLE) .updater(new NoOp()) .activation(Activation.SIGMOID).convolutionMode(Same).list() @@ -760,7 +757,7 @@ public class CNNGradientCheckTest extends BaseDL4JTest { .stride(1, 1).padding(0, 0).build()) .layer(3, new OutputLayer.Builder(LossFunctions.LossFunction.MCXENT) .activation(Activation.SOFTMAX).nOut(nOut).build()) - .setInputType(InputType.convolutional(height, width, inputDepth, format)).build(); + .inputType(InputType.convolutional(height, width, inputDepth, format)).build(); MultiLayerNetwork net = new MultiLayerNetwork(conf); net.init(); @@ -810,14 +807,14 @@ public class CNNGradientCheckTest extends BaseDL4JTest { labels.putScalar(new int[]{i, i % nOut}, 1.0); } - Layer convLayer = new ConvolutionLayer.Builder().name("layer 0").kernelSize(k, k).dataFormat(format) + LayerConfiguration convLayer = new ConvolutionLayer.Builder().name("layer 0").kernelSize(k, k).dataFormat(format) .stride(stride, stride).padding(0, 0).nIn(inputDepth).nOut(2).build(); - Layer poolLayer = new SubsamplingLayer.Builder() + LayerConfiguration poolLayer = new SubsamplingLayer.Builder() .poolingType(SubsamplingLayer.PoolingType.MAX).kernelSize(k, k).dataFormat(format) .stride(stride, stride).padding(0, 0).build(); - MultiLayerConfiguration conf = new NeuralNetConfiguration.Builder().seed(12345) + NeuralNetConfiguration conf = NeuralNetConfiguration.builder().seed(12345) .dataType(DataType.DOUBLE) .updater(new NoOp()) .activation(Activation.TANH).convolutionMode(Same).list() @@ -825,7 +822,7 @@ public class CNNGradientCheckTest extends BaseDL4JTest { .layer(1, convFirst ? poolLayer : convLayer) .layer(2, new OutputLayer.Builder(LossFunctions.LossFunction.MCXENT) .activation(Activation.SOFTMAX).nOut(nOut).build()) - .setInputType(InputType.convolutional(height, width, inputDepth, format)) + .inputType(InputType.convolutional(height, width, inputDepth, format)) .build(); MultiLayerNetwork net = new MultiLayerNetwork(conf); @@ -883,8 +880,8 @@ public class CNNGradientCheckTest extends BaseDL4JTest { INDArray input = Nd4j.rand(DataType.DOUBLE, inShape); INDArray labels = TestUtils.randomOneHot(minibatchSize, nOut); - MultiLayerConfiguration conf = - new NeuralNetConfiguration.Builder().updater(new NoOp()) + NeuralNetConfiguration conf = + NeuralNetConfiguration.builder().updater(new NoOp()) .dataType(DataType.DOUBLE) .dist(new NormalDistribution(0, 1)).list() .layer(0, new ConvolutionLayer.Builder(kernel, stride, padding).dataFormat(format) @@ -894,7 +891,7 @@ public class CNNGradientCheckTest extends BaseDL4JTest { padding).nIn(3).nOut(3).dataFormat(format).build())//output: (6-2+0)/1+1 = 5 .layer(3, new OutputLayer.Builder(LossFunctions.LossFunction.MCXENT) .activation(Activation.SOFTMAX).nOut(4).build()) - .setInputType(InputType.convolutional(height, width, inputDepth, format)) + .inputType(InputType.convolutional(height, width, inputDepth, format)) .build(); MultiLayerNetwork net = new MultiLayerNetwork(conf); @@ -920,7 +917,7 @@ public class CNNGradientCheckTest extends BaseDL4JTest { if (PRINT_RESULTS) { System.out.println(msg); // for (int j = 0; j < net.getnLayers(); j++) -// System.out.println("Layer " + j + " # params: " + net.getLayer(j).numParams()); +// System.out.println("ILayer " + j + " # params: " + net.getLayer(j).numParams()); } boolean gradOK = GradientCheckUtil.checkGradients(net, DEFAULT_EPS, DEFAULT_MAX_REL_ERROR, @@ -971,7 +968,7 @@ public class CNNGradientCheckTest extends BaseDL4JTest { labels.putScalar(new int[]{j, j % nOut}, 1.0); } - NeuralNetConfiguration.ListBuilder b = new NeuralNetConfiguration.Builder().seed(12345) + NeuralNetConfiguration conf = NeuralNetConfiguration.builder().seed(12345) .dataType(DataType.DOUBLE) .updater(new NoOp()) .activation(act) @@ -981,11 +978,11 @@ public class CNNGradientCheckTest extends BaseDL4JTest { .stride(s, s).dataFormat(format) .dilation(d, d) .convolutionMode(cm) - .nIn(inputDepth).nOut(nOut).build()); + .nIn(inputDepth).nOut(nOut).build()) - MultiLayerConfiguration conf = b.layer(new OutputLayer.Builder(LossFunctions.LossFunction.MCXENT) + .layer(new OutputLayer.Builder(LossFunctions.LossFunction.MCXENT) .activation(Activation.SOFTMAX).nOut(nOut).build()) - .setInputType(InputType.convolutional(h, w, inputDepth, format)).build(); + .inputType(InputType.convolutional(h, w, inputDepth, format)).build(); MultiLayerNetwork net = new MultiLayerNetwork(conf); net.init(); @@ -1043,7 +1040,7 @@ public class CNNGradientCheckTest extends BaseDL4JTest { labels.putScalar(new int[]{i, i % nOut}, 1.0); } - NeuralNetConfiguration.ListBuilder b = new NeuralNetConfiguration.Builder().seed(12345) + NeuralNetConfiguration conf = NeuralNetConfiguration.builder().seed(12345) .dataType(DataType.DOUBLE) .updater(new NoOp()) .activation(Activation.TANH) @@ -1054,11 +1051,11 @@ public class CNNGradientCheckTest extends BaseDL4JTest { .stride(s, s) .dilation(d, d) .depthMultiplier(3).dataFormat(format) - .nIn(inputDepth).nOut(2).build()); + .nIn(inputDepth).nOut(2).build()) - MultiLayerConfiguration conf = b.layer(new OutputLayer.Builder(LossFunctions.LossFunction.MCXENT) + .layer(new OutputLayer.Builder(LossFunctions.LossFunction.MCXENT) .activation(Activation.SOFTMAX).nOut(nOut).build()) - .setInputType(InputType.convolutional(h, w, inputDepth, format)).build(); + .inputType(InputType.convolutional(h, w, inputDepth, format)).build(); MultiLayerNetwork net = new MultiLayerNetwork(conf); net.init(); @@ -1116,7 +1113,7 @@ public class CNNGradientCheckTest extends BaseDL4JTest { labels.putScalar(new int[]{i, i % nOut}, 1.0); } - NeuralNetConfiguration.ListBuilder b = new NeuralNetConfiguration.Builder().seed(12345) + NeuralNetConfiguration.NeuralNetConfigurationBuilder b = NeuralNetConfiguration.builder().seed(12345) .dataType(DataType.DOUBLE) .updater(new NoOp()) .activation(Activation.TANH).convolutionMode(cm).list() @@ -1140,9 +1137,9 @@ public class CNNGradientCheckTest extends BaseDL4JTest { .build()); } - MultiLayerConfiguration conf = b.layer(new OutputLayer.Builder(LossFunctions.LossFunction.MCXENT) + NeuralNetConfiguration conf = (NeuralNetConfiguration) b.layer(new OutputLayer.Builder(LossFunctions.LossFunction.MCXENT) .activation(Activation.SOFTMAX).nOut(nOut).build()) - .setInputType(InputType.convolutional(h, w, inputDepth, format)).build(); + .inputType(InputType.convolutional(h, w, inputDepth, format)).build(); MultiLayerNetwork net = new MultiLayerNetwork(conf); net.init(); @@ -1190,8 +1187,8 @@ public class CNNGradientCheckTest extends BaseDL4JTest { INDArray input = Nd4j.rand(DataType.DOUBLE, inShape); INDArray labels = TestUtils.randomOneHot(minibatchSize, nOut); - MultiLayerConfiguration conf = - new NeuralNetConfiguration.Builder() + NeuralNetConfiguration conf = + NeuralNetConfiguration.builder() .dataType(DataType.DOUBLE) .updater(new NoOp()) .convolutionMode(ConvolutionMode.Same) @@ -1208,7 +1205,7 @@ public class CNNGradientCheckTest extends BaseDL4JTest { .build()) .layer(3, new OutputLayer.Builder(LossFunctions.LossFunction.MCXENT) .activation(Activation.SOFTMAX).nOut(nOut).build()) - .setInputType(InputType.convolutional(height, width, inputDepth, format)) + .inputType(InputType.convolutional(height, width, inputDepth, format)) .build(); MultiLayerNetwork net = new MultiLayerNetwork(conf); @@ -1277,7 +1274,7 @@ public class CNNGradientCheckTest extends BaseDL4JTest { labels.putScalar(new int[]{i, i % nOut}, 1.0); } - NeuralNetConfiguration.ListBuilder b = new NeuralNetConfiguration.Builder().seed(12345) + NeuralNetConfiguration.NeuralNetConfigurationBuilder b = NeuralNetConfiguration.builder().seed(12345) .dataType(DataType.DOUBLE) .updater(new NoOp()) .activation(Activation.TANH) @@ -1293,9 +1290,9 @@ public class CNNGradientCheckTest extends BaseDL4JTest { .depthMultiplier(depthMultiplier) .nIn(nIn).build()); // nOut = nIn * depthMultiplier - MultiLayerConfiguration conf = b.layer(new OutputLayer.Builder(LossFunctions.LossFunction.MCXENT) + NeuralNetConfiguration conf = (NeuralNetConfiguration) b.layer(new OutputLayer.Builder(LossFunctions.LossFunction.MCXENT) .activation(Activation.SOFTMAX).nOut(nOut).build()) - .setInputType(InputType.convolutional(height, width, nIn, format)).build(); + .inputType(InputType.convolutional(height, width, nIn, format)).build(); MultiLayerNetwork net = new MultiLayerNetwork(conf); net.init(); diff --git a/cavis-dnn/cavis-dnn-core/src/test/java/org/deeplearning4j/gradientcheck/CapsnetGradientCheckTest.java b/cavis-dnn/cavis-dnn-core/src/test/java/org/deeplearning4j/gradientcheck/CapsnetGradientCheckTest.java index c0a6cad8e..8d9caef52 100644 --- a/cavis-dnn/cavis-dnn-core/src/test/java/org/deeplearning4j/gradientcheck/CapsnetGradientCheckTest.java +++ b/cavis-dnn/cavis-dnn-core/src/test/java/org/deeplearning4j/gradientcheck/CapsnetGradientCheckTest.java @@ -24,7 +24,6 @@ import static org.junit.jupiter.api.Assertions.assertTrue; import org.deeplearning4j.BaseDL4JTest; import org.deeplearning4j.TestUtils; -import org.deeplearning4j.nn.conf.MultiLayerConfiguration; import org.deeplearning4j.nn.conf.NeuralNetConfiguration; import org.deeplearning4j.nn.conf.distribution.UniformDistribution; import org.deeplearning4j.nn.conf.inputs.InputType; @@ -44,8 +43,6 @@ import org.nd4j.linalg.factory.Nd4j; import org.nd4j.linalg.learning.config.NoOp; import org.nd4j.linalg.lossfunctions.impl.LossNegativeLogLikelihood; -import java.util.Random; - ////@Ignore public class CapsnetGradientCheckTest extends BaseDL4JTest { @@ -80,12 +77,11 @@ public class CapsnetGradientCheckTest extends BaseDL4JTest { labels.putScalar(new int[]{i, i % capsule}, 1.0); } - MultiLayerConfiguration conf = new NeuralNetConfiguration.Builder() + NeuralNetConfiguration conf = NeuralNetConfiguration.builder() .dataType(DataType.DOUBLE) .seed(123) .updater(new NoOp()) - .weightInit(new WeightInitDistribution(new UniformDistribution(-6, 6))) - .list() + .dist(new UniformDistribution(-6, 6)) .layer(new PrimaryCapsules.Builder(primaryCapsDim, primarpCapsChannel) .kernelSize(3, 3) .stride(2, 2) @@ -94,7 +90,7 @@ public class CapsnetGradientCheckTest extends BaseDL4JTest { .layer(new CapsuleStrengthLayer.Builder().build()) .layer(new ActivationLayer.Builder(new ActivationSoftmax()).build()) .layer(new LossLayer.Builder(new LossNegativeLogLikelihood()).build()) - .setInputType(InputType.convolutional(height, width, inputDepth)) + .inputType(InputType.convolutional(height, width, inputDepth)) .build(); MultiLayerNetwork net = new MultiLayerNetwork(conf); diff --git a/cavis-dnn/cavis-dnn-core/src/test/java/org/deeplearning4j/gradientcheck/DropoutGradientCheck.java b/cavis-dnn/cavis-dnn-core/src/test/java/org/deeplearning4j/gradientcheck/DropoutGradientCheck.java index 9aafd297c..5c124dfa0 100644 --- a/cavis-dnn/cavis-dnn-core/src/test/java/org/deeplearning4j/gradientcheck/DropoutGradientCheck.java +++ b/cavis-dnn/cavis-dnn-core/src/test/java/org/deeplearning4j/gradientcheck/DropoutGradientCheck.java @@ -25,7 +25,6 @@ import org.deeplearning4j.BaseDL4JTest; import org.deeplearning4j.TestUtils; import org.deeplearning4j.nn.conf.ComputationGraphConfiguration; import org.deeplearning4j.nn.conf.ConvolutionMode; -import org.deeplearning4j.nn.conf.MultiLayerConfiguration; import org.deeplearning4j.nn.conf.NeuralNetConfiguration; import org.deeplearning4j.nn.conf.distribution.NormalDistribution; import org.deeplearning4j.nn.conf.dropout.*; @@ -92,7 +91,7 @@ public class DropoutGradientCheck extends BaseDL4JTest { continue; } - NeuralNetConfiguration.ListBuilder builder = new NeuralNetConfiguration.Builder() + NeuralNetConfiguration.NeuralNetConfigurationBuilder builder = NeuralNetConfiguration.builder() .dataType(DataType.DOUBLE) .dist(new NormalDistribution(0,1)) .convolutionMode(ConvolutionMode.Same) @@ -104,18 +103,18 @@ public class DropoutGradientCheck extends BaseDL4JTest { if(cnn){ builder.layer(new ConvolutionLayer.Builder().kernelSize(3,3).stride(2,2).nOut(2).build()); builder.layer(new ConvolutionLayer.Builder().kernelSize(3,3).stride(2,2).nOut(2).build()); - builder.setInputType(InputType.convolutional(6,6,2)); + builder.inputType(InputType.convolutional(6,6,2)); } else { builder.layer(new DenseLayer.Builder().nOut(3).build()); builder.layer(new DenseLayer.Builder().nOut(3).build()); - builder.setInputType(InputType.feedForward(6)); + builder.inputType(InputType.feedForward(6)); } builder.layer(new OutputLayer.Builder().nOut(3).activation(Activation.SOFTMAX).lossFunction(LossFunction.MCXENT).build()); - MultiLayerConfiguration conf = builder.build(); + NeuralNetConfiguration conf = builder.build(); //Remove spatial dropout from output layer - can't be used for 2d input if(i == 4){ - conf.getConf(2).getLayer().setIDropout(null); + conf.getFlattenedLayerConfigurations().get(2).setIDropout(null); } MultiLayerNetwork mln = new MultiLayerNetwork(conf); @@ -149,7 +148,7 @@ public class DropoutGradientCheck extends BaseDL4JTest { Nd4j.getRandom().setSeed(12345); int mb = 3; - ComputationGraphConfiguration conf = new NeuralNetConfiguration.Builder() + ComputationGraphConfiguration conf = NeuralNetConfiguration.builder() .dataType(DataType.DOUBLE) .dist(new NormalDistribution(0,1)) .convolutionMode(ConvolutionMode.Same) diff --git a/cavis-dnn/cavis-dnn-core/src/test/java/org/deeplearning4j/gradientcheck/GlobalPoolingGradientCheckTests.java b/cavis-dnn/cavis-dnn-core/src/test/java/org/deeplearning4j/gradientcheck/GlobalPoolingGradientCheckTests.java index 7cb10f83b..18d430044 100644 --- a/cavis-dnn/cavis-dnn-core/src/test/java/org/deeplearning4j/gradientcheck/GlobalPoolingGradientCheckTests.java +++ b/cavis-dnn/cavis-dnn-core/src/test/java/org/deeplearning4j/gradientcheck/GlobalPoolingGradientCheckTests.java @@ -24,7 +24,6 @@ import org.deeplearning4j.BaseDL4JTest; import org.deeplearning4j.TestUtils; import org.deeplearning4j.nn.conf.CNN2DFormat; import org.deeplearning4j.nn.conf.ConvolutionMode; -import org.deeplearning4j.nn.conf.MultiLayerConfiguration; import org.deeplearning4j.nn.conf.NeuralNetConfiguration; import org.deeplearning4j.nn.conf.distribution.NormalDistribution; import org.deeplearning4j.nn.conf.inputs.InputType; @@ -72,7 +71,7 @@ public class GlobalPoolingGradientCheckTests extends BaseDL4JTest { for (int miniBatchSize : minibatchSizes) { for (PoolingType pt : poolingTypes) { - MultiLayerConfiguration conf = new NeuralNetConfiguration.Builder() + NeuralNetConfiguration conf = NeuralNetConfiguration.builder() .dataType(DataType.DOUBLE) .updater(new NoOp()) .dist(new NormalDistribution(0, 1.0)).seed(12345L).list() @@ -95,7 +94,7 @@ public class GlobalPoolingGradientCheckTests extends BaseDL4JTest { System.out.println("testLSTMGlobalPoolingBasicMultiLayer() - " + pt + ", minibatch = " + miniBatchSize); // for (int j = 0; j < mln.getnLayers(); j++) -// System.out.println("Layer " + j + " # params: " + mln.getLayer(j).numParams()); +// System.out.println("ILayer " + j + " # params: " + mln.getLayer(j).numParams()); } boolean gradOK = GradientCheckUtil.checkGradients(mln, DEFAULT_EPS, DEFAULT_MAX_REL_ERROR, @@ -127,7 +126,7 @@ public class GlobalPoolingGradientCheckTests extends BaseDL4JTest { for (int miniBatchSize : minibatchSizes) { for (PoolingType pt : poolingTypes) { - MultiLayerConfiguration conf = new NeuralNetConfiguration.Builder() + NeuralNetConfiguration conf = NeuralNetConfiguration.builder() .dataType(DataType.DOUBLE) .updater(new NoOp()) .dist(new NormalDistribution(0, 1.0)).seed(12345L).list() @@ -138,7 +137,7 @@ public class GlobalPoolingGradientCheckTests extends BaseDL4JTest { .layer(1, new GlobalPoolingLayer.Builder().poolingType(pt).build()) .layer(2, new OutputLayer.Builder(LossFunctions.LossFunction.MCXENT) .activation(Activation.SOFTMAX).nOut(nOut).build()) - .setInputType(InputType.convolutional(inputH, inputW, inputDepth, nchw ? CNN2DFormat.NCHW : CNN2DFormat.NHWC)).build(); + .inputType(InputType.convolutional(inputH, inputW, inputDepth, nchw ? CNN2DFormat.NCHW : CNN2DFormat.NHWC)).build(); MultiLayerNetwork mln = new MultiLayerNetwork(conf); mln.init(); @@ -156,7 +155,7 @@ public class GlobalPoolingGradientCheckTests extends BaseDL4JTest { if (PRINT_RESULTS) { System.out.println("testCnnGlobalPoolingBasicMultiLayer() - " + pt + ", minibatch = " + miniBatchSize + " - " + (nchw ? "NCHW" : "NHWC")); // for (int j = 0; j < mln.getnLayers(); j++) -// System.out.println("Layer " + j + " # params: " + mln.getLayer(j).numParams()); +// System.out.println("ILayer " + j + " # params: " + mln.getLayer(j).numParams()); } boolean gradOK = GradientCheckUtil.checkGradients(mln, DEFAULT_EPS, DEFAULT_MAX_REL_ERROR, @@ -185,7 +184,7 @@ public class GlobalPoolingGradientCheckTests extends BaseDL4JTest { for (PoolingType pt : poolingTypes) { - MultiLayerConfiguration conf = new NeuralNetConfiguration.Builder() + NeuralNetConfiguration conf = NeuralNetConfiguration.builder() .dataType(DataType.DOUBLE) .updater(new NoOp()) .dist(new NormalDistribution(0, 1.0)).seed(12345L).list() @@ -216,7 +215,7 @@ public class GlobalPoolingGradientCheckTests extends BaseDL4JTest { if (PRINT_RESULTS) { System.out.println("testLSTMGlobalPoolingBasicMultiLayer() - " + pt + ", minibatch = " + miniBatchSize); // for (int j = 0; j < mln.getnLayers(); j++) -// System.out.println("Layer " + j + " # params: " + mln.getLayer(j).numParams()); +// System.out.println("ILayer " + j + " # params: " + mln.getLayer(j).numParams()); } boolean gradOK = GradientCheckUtil.checkGradients(new GradientCheckUtil.MLNConfig().net(mln).input(input) @@ -259,7 +258,7 @@ public class GlobalPoolingGradientCheckTests extends BaseDL4JTest { stride = new int[] {inputH, 1}; } - MultiLayerConfiguration conf = new NeuralNetConfiguration.Builder() + NeuralNetConfiguration conf = NeuralNetConfiguration.builder() .dataType(DataType.DOUBLE) .updater(new NoOp()) .dist(new NormalDistribution(0, 1.0)).convolutionMode(ConvolutionMode.Same) @@ -270,7 +269,7 @@ public class GlobalPoolingGradientCheckTests extends BaseDL4JTest { .layer(2, new OutputLayer.Builder(LossFunctions.LossFunction.MCXENT) .activation(Activation.SOFTMAX).nOut(nOut).build()) - .setInputType(InputType.convolutional(inputH, inputW, inputDepth)).build(); + .inputType(InputType.convolutional(inputH, inputW, inputDepth)).build(); MultiLayerNetwork mln = new MultiLayerNetwork(conf); mln.init(); @@ -299,7 +298,7 @@ public class GlobalPoolingGradientCheckTests extends BaseDL4JTest { System.out.println("testCnnGlobalPoolingBasicMultiLayer() - " + pt + ", minibatch = " + miniBatchSize); // for (int j = 0; j < mln.getnLayers(); j++) -// System.out.println("Layer " + j + " # params: " + mln.getLayer(j).numParams()); +// System.out.println("ILayer " + j + " # params: " + mln.getLayer(j).numParams()); } boolean gradOK = GradientCheckUtil.checkGradients(new GradientCheckUtil.MLNConfig().net(mln).input(input) diff --git a/cavis-dnn/cavis-dnn-core/src/test/java/org/deeplearning4j/gradientcheck/GradientCheckTests.java b/cavis-dnn/cavis-dnn-core/src/test/java/org/deeplearning4j/gradientcheck/GradientCheckTests.java index cab80a69a..39dc54659 100644 --- a/cavis-dnn/cavis-dnn-core/src/test/java/org/deeplearning4j/gradientcheck/GradientCheckTests.java +++ b/cavis-dnn/cavis-dnn-core/src/test/java/org/deeplearning4j/gradientcheck/GradientCheckTests.java @@ -26,7 +26,6 @@ import org.deeplearning4j.TestUtils; import org.deeplearning4j.datasets.iterator.impl.IrisDataSetIterator; import org.deeplearning4j.nn.api.OptimizationAlgorithm; import org.deeplearning4j.nn.conf.ComputationGraphConfiguration; -import org.deeplearning4j.nn.conf.MultiLayerConfiguration; import org.deeplearning4j.nn.conf.NeuralNetConfiguration; import org.deeplearning4j.nn.conf.RNNFormat; import org.deeplearning4j.nn.conf.distribution.NormalDistribution; @@ -74,7 +73,7 @@ public class GradientCheckTests extends BaseDL4JTest { public void testMinibatchApplication() { IrisDataSetIterator iter = new IrisDataSetIterator(30, 150); - MultiLayerConfiguration conf = new NeuralNetConfiguration.Builder().miniBatch(false) + NeuralNetConfiguration conf = NeuralNetConfiguration.builder().miniBatch(false) .dataType(DataType.DOUBLE) .optimizationAlgo(OptimizationAlgorithm.STOCHASTIC_GRADIENT_DESCENT).updater(new NoOp()) .list() @@ -106,11 +105,11 @@ public class GradientCheckTests extends BaseDL4JTest { mln.setInput(ds.getFeatures()); mln.setLabels(ds.getLabels()); mln.computeGradientAndScore(); - double scoreBefore = mln.score(); + double scoreBefore = mln.getScore(); for (int j = 0; j < 10; j++) mln.fit(ds); mln.computeGradientAndScore(); - double scoreAfter = mln.score(); + double scoreAfter = mln.getScore(); //Can't test in 'characteristic mode of operation' if not learning String msg = "testMinibatchApplication() - score did not (sufficiently) decrease during learning - activationFn=" + afn + ", lossFn=" + lf + ", outputActivation=" + outputActivation @@ -123,7 +122,7 @@ public class GradientCheckTests extends BaseDL4JTest { + lf + ", outputActivation=" + outputActivation + ", doLearningFirst=" + doLearningFirst); // for (int j = 0; j < mln.getnLayers(); j++) -// System.out.println("Layer " + j + " # params: " + mln.getLayer(j).numParams()); +// System.out.println("ILayer " + j + " # params: " + mln.getLayer(j).numParams()); } boolean gradOK = GradientCheckUtil.checkGradients(mln, DEFAULT_EPS, DEFAULT_MAX_REL_ERROR, @@ -164,7 +163,7 @@ public class GradientCheckTests extends BaseDL4JTest { LossFunction lf = lossFunctions[i]; Activation outputActivation = outputActivations[i]; - MultiLayerConfiguration conf = new NeuralNetConfiguration.Builder() + NeuralNetConfiguration conf = NeuralNetConfiguration.builder() .dataType(DataType.DOUBLE) .optimizationAlgo(OptimizationAlgorithm.CONJUGATE_GRADIENT).updater(new NoOp()) .seed(12345L) @@ -185,11 +184,11 @@ public class GradientCheckTests extends BaseDL4JTest { mln.setInput(ds.getFeatures()); mln.setLabels(ds.getLabels()); mln.computeGradientAndScore(); - double scoreBefore = mln.score(); + double scoreBefore = mln.getScore(); for (int j = 0; j < 10; j++) mln.fit(ds); mln.computeGradientAndScore(); - double scoreAfter = mln.score(); + double scoreAfter = mln.getScore(); //Can't test in 'characteristic mode of operation' if not learning String msg = "testGradMLP2LayerIrisSimple() - score did not (sufficiently) decrease during learning - activationFn=" + afn + ", lossFn=" + lf + ", outputActivation=" + outputActivation @@ -203,7 +202,7 @@ public class GradientCheckTests extends BaseDL4JTest { + lf + ", outputActivation=" + outputActivation + ", doLearningFirst=" + doLearningFirst); // for (int j = 0; j < mln.getnLayers(); j++) -// System.out.println("Layer " + j + " # params: " + mln.getLayer(j).numParams()); +// System.out.println("ILayer " + j + " # params: " + mln.getLayer(j).numParams()); } boolean gradOK = GradientCheckUtil.checkGradients(mln, DEFAULT_EPS, DEFAULT_MAX_REL_ERROR, @@ -253,8 +252,8 @@ public class GradientCheckTests extends BaseDL4JTest { double l2 = l2vals[k]; double l1 = l1vals[k]; - MultiLayerConfiguration conf = - new NeuralNetConfiguration.Builder().l2(l2).l1(l1) + NeuralNetConfiguration conf = + NeuralNetConfiguration.builder().l2(l2).l1(l1) .dataType(DataType.DOUBLE) .l2Bias(biasL2[k]).l1Bias(biasL1[k]) .optimizationAlgo(OptimizationAlgorithm.CONJUGATE_GRADIENT) @@ -279,11 +278,11 @@ public class GradientCheckTests extends BaseDL4JTest { mln.setInput(ds.getFeatures()); mln.setLabels(ds.getLabels()); mln.computeGradientAndScore(); - double scoreBefore = mln.score(); + double scoreBefore = mln.getScore(); for (int j = 0; j < 10; j++) mln.fit(ds); mln.computeGradientAndScore(); - double scoreAfter = mln.score(); + double scoreAfter = mln.getScore(); //Can't test in 'characteristic mode of operation' if not learning String msg = "testGradMLP2LayerIrisSimple() - score did not (sufficiently) decrease during learning - activationFn=" + afn + ", lossFn=" + lf + ", outputActivation=" + outputActivation @@ -297,7 +296,7 @@ public class GradientCheckTests extends BaseDL4JTest { + ", lossFn=" + lf + ", outputActivation=" + outputActivation + ", doLearningFirst=" + doLearningFirst + ", l2=" + l2 + ", l1=" + l1); // for (int j = 0; j < mln.getnLayers(); j++) -// System.out.println("Layer " + j + " # params: " + mln.getLayer(j).numParams()); +// System.out.println("ILayer " + j + " # params: " + mln.getLayer(j).numParams()); } boolean gradOK = GradientCheckUtil.checkGradients(mln, DEFAULT_EPS, DEFAULT_MAX_REL_ERROR, @@ -325,7 +324,7 @@ public class GradientCheckTests extends BaseDL4JTest { labels.putScalar(new int[] {i, r.nextInt(3)}, 1.0); } - MultiLayerConfiguration conf = new NeuralNetConfiguration.Builder().l2(0.2).l1(0.1) + NeuralNetConfiguration conf = NeuralNetConfiguration.builder().l2(0.2).l1(0.1) .dataType(DataType.DOUBLE) .optimizationAlgo(OptimizationAlgorithm.STOCHASTIC_GRADIENT_DESCENT).seed(12345L) .list().layer(new EmbeddingLayer.Builder().nIn(4).nOut(3).weightInit(WeightInit.XAVIER) @@ -342,7 +341,7 @@ public class GradientCheckTests extends BaseDL4JTest { if (PRINT_RESULTS) { System.out.println("testEmbeddingLayerSimple"); // for (int j = 0; j < mln.getnLayers(); j++) -// System.out.println("Layer " + j + " # params: " + mln.getLayer(j).numParams()); +// System.out.println("ILayer " + j + " # params: " + mln.getLayer(j).numParams()); } boolean gradOK = GradientCheckUtil.checkGradients(mln, DEFAULT_EPS, DEFAULT_MAX_REL_ERROR, @@ -363,7 +362,7 @@ public class GradientCheckTests extends BaseDL4JTest { labels.putScalar(new int[] {i, r.nextInt(3)}, 1.0); } - MultiLayerConfiguration conf = new NeuralNetConfiguration.Builder().l2(0.2).l1(0.1) + NeuralNetConfiguration conf = NeuralNetConfiguration.builder().l2(0.2).l1(0.1) .dataType(DataType.DOUBLE) .optimizationAlgo(OptimizationAlgorithm.STOCHASTIC_GRADIENT_DESCENT).seed(12345L) .list().layer(0, @@ -382,7 +381,7 @@ public class GradientCheckTests extends BaseDL4JTest { if (PRINT_RESULTS) { System.out.println("testEmbeddingLayerSimple"); // for (int j = 0; j < mln.getnLayers(); j++) -// System.out.println("Layer " + j + " # params: " + mln.getLayer(j).numParams()); +// System.out.println("ILayer " + j + " # params: " + mln.getLayer(j).numParams()); } boolean gradOK = GradientCheckUtil.checkGradients(mln, DEFAULT_EPS, DEFAULT_MAX_REL_ERROR, @@ -429,8 +428,8 @@ public class GradientCheckTests extends BaseDL4JTest { double l1 = l1vals[k]; Nd4j.getRandom().setSeed(12345); - MultiLayerConfiguration conf = - new NeuralNetConfiguration.Builder() + NeuralNetConfiguration conf = + NeuralNetConfiguration.builder() .dataType(DataType.DOUBLE) .updater(new NoOp()) .l2(l2).l1(l1) @@ -453,11 +452,11 @@ public class GradientCheckTests extends BaseDL4JTest { mln.setInput(ds.getFeatures()); mln.setLabels(ds.getLabels()); mln.computeGradientAndScore(); - double scoreBefore = mln.score(); + double scoreBefore = mln.getScore(); for (int j = 0; j < 10; j++) mln.fit(ds); mln.computeGradientAndScore(); - double scoreAfter = mln.score(); + double scoreAfter = mln.getScore(); //Can't test in 'characteristic mode of operation' if not learning msg = "testGradMLP2LayerIrisSimple() - score did not (sufficiently) decrease during learning - activationFn=" + afn + ", lossFn=" + lf + ", outputActivation=" + outputActivation @@ -472,7 +471,7 @@ public class GradientCheckTests extends BaseDL4JTest { if (PRINT_RESULTS) { System.out.println(msg); // for (int j = 0; j < mln.getnLayers(); j++) -// System.out.println("Layer " + j + " # params: " + mln.getLayer(j).numParams()); +// System.out.println("ILayer " + j + " # params: " + mln.getLayer(j).numParams()); } boolean gradOK = GradientCheckUtil.checkGradients(mln, DEFAULT_EPS, DEFAULT_MAX_REL_ERROR, @@ -491,7 +490,7 @@ public class GradientCheckTests extends BaseDL4JTest { for(Activation a : new Activation[]{Activation.IDENTITY, Activation.TANH}) { - ComputationGraphConfiguration conf = new NeuralNetConfiguration.Builder() + ComputationGraphConfiguration conf = NeuralNetConfiguration.builder() .dataType(DataType.DOUBLE) .optimizationAlgo(OptimizationAlgorithm.CONJUGATE_GRADIENT).updater(new NoOp()) .seed(12345L) @@ -512,7 +511,7 @@ public class GradientCheckTests extends BaseDL4JTest { ComputationGraph netGraph = new ComputationGraph(conf); netGraph.init(); - log.info("params before learning: " + netGraph.getLayer(1).paramTable()); + log.info("params before learning: " + netGraph.getLayer(1).getParamTable()); //Run a number of iterations of learning manually make some pseudo data //the ides is simple: since we do a element wise multiplication layer (just a scaling), we want the cos sim @@ -524,13 +523,13 @@ public class GradientCheckTests extends BaseDL4JTest { netGraph.setInputs(features); netGraph.setLabels(labels); netGraph.computeGradientAndScore(); - double scoreBefore = netGraph.score(); + double scoreBefore = netGraph.getScore(); String msg; for (int epoch = 0; epoch < 5; epoch++) netGraph.fit(new INDArray[]{features}, new INDArray[]{labels}); netGraph.computeGradientAndScore(); - double scoreAfter = netGraph.score(); + double scoreAfter = netGraph.getScore(); //Can't test in 'characteristic mode of operation' if not learning msg = "elementWiseMultiplicationLayerTest() - score did not (sufficiently) decrease during learning - activationFn=" + "Id" + ", lossFn=" + "Cos-sim" + ", outputActivation=" + "Id" @@ -539,7 +538,7 @@ public class GradientCheckTests extends BaseDL4JTest { assertTrue( scoreAfter < 0.8 * scoreBefore, msg); // expectation in case linear regression(with only element wise multiplication layer): large weight for the fourth weight - log.info("params after learning: " + netGraph.getLayer(1).paramTable()); + log.info("params after learning: " + netGraph.getLayer(1).getParamTable()); boolean gradOK = GradientCheckUtil.checkGradients(new GradientCheckUtil.GraphConfig().net(netGraph).inputs(new INDArray[]{features}) .labels(new INDArray[]{labels})); @@ -561,7 +560,7 @@ public class GradientCheckTests extends BaseDL4JTest { for (boolean maskArray : new boolean[]{false, true}) { for (int inputRank : new int[]{2, 3}) { - MultiLayerConfiguration conf = new NeuralNetConfiguration.Builder() + NeuralNetConfiguration conf = NeuralNetConfiguration.builder() .dataType(DataType.DOUBLE) .seed(12345) .updater(new NoOp()) @@ -672,8 +671,8 @@ public class GradientCheckTests extends BaseDL4JTest { double l2 = l2vals[k]; double l1 = l1vals[k]; - MultiLayerConfiguration conf = - new NeuralNetConfiguration.Builder().l2(l2).l1(l1) + NeuralNetConfiguration conf = + NeuralNetConfiguration.builder().l2(l2).l1(l1) .dataType(DataType.DOUBLE) .l2Bias(biasL2[k]).l1Bias(biasL1[k]) .weightDecay(wdVals[k]).weightDecayBias(wdBias[k]) @@ -714,7 +713,7 @@ public class GradientCheckTests extends BaseDL4JTest { // (a) activation function // (b) Whether to test at random initialization, or after some learning (i.e., 'characteristic mode of operation') // (c) Loss function (with specified output activations) - // (d) Layer Normalization enabled / disabled + // (d) ILayer Normalization enabled / disabled Activation[] activFns = {Activation.SIGMOID, Activation.TANH}; boolean[] characteristic = {true, false}; //If true: run some backprop steps first @@ -736,7 +735,7 @@ public class GradientCheckTests extends BaseDL4JTest { LossFunction lf = lossFunctions[i]; Activation outputActivation = outputActivations[i]; - MultiLayerConfiguration conf = new NeuralNetConfiguration.Builder() + NeuralNetConfiguration conf = NeuralNetConfiguration.builder() .dataType(DataType.DOUBLE) .optimizationAlgo(OptimizationAlgorithm.CONJUGATE_GRADIENT).updater(new NoOp()) .seed(12345L) @@ -758,11 +757,11 @@ public class GradientCheckTests extends BaseDL4JTest { mln.setInput(ds.getFeatures()); mln.setLabels(ds.getLabels()); mln.computeGradientAndScore(); - double scoreBefore = mln.score(); + double scoreBefore = mln.getScore(); for (int j = 0; j < 10; j++) mln.fit(ds); mln.computeGradientAndScore(); - double scoreAfter = mln.score(); + double scoreAfter = mln.getScore(); //Can't test in 'characteristic mode of operation' if not learning String msg = "testGradMLP2LayerIrisSimple() - score did not (sufficiently) decrease during learning - activationFn=" + afn + ", lossFn=" + lf + ", layerNorm=" + layerNorm + ", outputActivation=" + outputActivation @@ -776,7 +775,7 @@ public class GradientCheckTests extends BaseDL4JTest { + lf + ", outputActivation=" + outputActivation + ", doLearningFirst=" + doLearningFirst + ", layerNorm=" + layerNorm); // for (int j = 0; j < mln.getnLayers(); j++) -// System.out.println("Layer " + j + " # params: " + mln.getLayer(j).numParams()); +// System.out.println("ILayer " + j + " # params: " + mln.getLayer(j).numParams()); } boolean gradOK = GradientCheckUtil.checkGradients(mln, DEFAULT_EPS, DEFAULT_MAX_REL_ERROR, diff --git a/cavis-dnn/cavis-dnn-core/src/test/java/org/deeplearning4j/gradientcheck/GradientCheckTestsComputationGraph.java b/cavis-dnn/cavis-dnn-core/src/test/java/org/deeplearning4j/gradientcheck/GradientCheckTestsComputationGraph.java index ec99f3852..c121e8b14 100644 --- a/cavis-dnn/cavis-dnn-core/src/test/java/org/deeplearning4j/gradientcheck/GradientCheckTestsComputationGraph.java +++ b/cavis-dnn/cavis-dnn-core/src/test/java/org/deeplearning4j/gradientcheck/GradientCheckTestsComputationGraph.java @@ -36,9 +36,6 @@ import org.deeplearning4j.nn.conf.graph.rnn.ReverseTimeSeriesVertex; import org.deeplearning4j.nn.conf.inputs.InputType; import org.deeplearning4j.nn.conf.layers.*; import org.deeplearning4j.nn.conf.layers.recurrent.SimpleRnn; -import org.deeplearning4j.nn.conf.preprocessor.CnnToFeedForwardPreProcessor; -import org.deeplearning4j.nn.conf.preprocessor.FeedForwardToRnnPreProcessor; -import org.deeplearning4j.nn.conf.preprocessor.RnnToFeedForwardPreProcessor; import org.deeplearning4j.nn.graph.ComputationGraph; import org.deeplearning4j.nn.multilayer.MultiLayerNetwork; import org.deeplearning4j.nn.weights.WeightInit; @@ -52,7 +49,6 @@ import org.nd4j.linalg.indexing.NDArrayIndex; import org.nd4j.linalg.learning.config.NoOp; import org.nd4j.linalg.lossfunctions.LossFunctions; -import java.util.Arrays; import java.util.Map; import java.util.Random; @@ -74,7 +70,7 @@ public class GradientCheckTestsComputationGraph extends BaseDL4JTest { @Test public void testBasicIris() { Nd4j.getRandom().setSeed(12345); - ComputationGraphConfiguration conf = new NeuralNetConfiguration.Builder().seed(12345) + ComputationGraphConfiguration conf = NeuralNetConfiguration.builder().seed(12345) .dataType(DataType.DOUBLE) .optimizationAlgo(OptimizationAlgorithm.STOCHASTIC_GRADIENT_DESCENT) .dist(new NormalDistribution(0, 1)).updater(new NoOp()) @@ -106,7 +102,7 @@ public class GradientCheckTestsComputationGraph extends BaseDL4JTest { if (PRINT_RESULTS) { System.out.println("testBasicIris()"); // for (int j = 0; j < graph.getNumLayers(); j++) -// System.out.println("Layer " + j + " # params: " + graph.getLayer(j).numParams()); +// System.out.println("ILayer " + j + " # params: " + graph.getLayer(j).numParams()); } boolean gradOK = GradientCheckUtil.checkGradients(new GradientCheckUtil.GraphConfig().net(graph).inputs(new INDArray[]{input}) @@ -120,7 +116,7 @@ public class GradientCheckTestsComputationGraph extends BaseDL4JTest { @Test public void testBasicIrisWithMerging() { Nd4j.getRandom().setSeed(12345); - ComputationGraphConfiguration conf = new NeuralNetConfiguration.Builder().seed(12345) + ComputationGraphConfiguration conf = NeuralNetConfiguration.builder().seed(12345) .dataType(DataType.DOUBLE) .optimizationAlgo(OptimizationAlgorithm.STOCHASTIC_GRADIENT_DESCENT) .dist(new NormalDistribution(0, 1)).updater(new NoOp()) @@ -157,7 +153,7 @@ public class GradientCheckTestsComputationGraph extends BaseDL4JTest { if (PRINT_RESULTS) { System.out.println("testBasicIrisWithMerging()"); // for (int j = 0; j < graph.getNumLayers(); j++) -// System.out.println("Layer " + j + " # params: " + graph.getLayer(j).numParams()); +// System.out.println("ILayer " + j + " # params: " + graph.getLayer(j).numParams()); } boolean gradOK = GradientCheckUtil.checkGradients(new GradientCheckUtil.GraphConfig().net(graph).inputs(new INDArray[]{input}) @@ -177,7 +173,7 @@ public class GradientCheckTestsComputationGraph extends BaseDL4JTest { for (ElementWiseVertex.Op op : ops) { Nd4j.getRandom().setSeed(12345); - ComputationGraphConfiguration conf = new NeuralNetConfiguration.Builder().seed(12345) + ComputationGraphConfiguration conf = NeuralNetConfiguration.builder().seed(12345) .dataType(DataType.DOUBLE) .optimizationAlgo(OptimizationAlgorithm.STOCHASTIC_GRADIENT_DESCENT) .dist(new NormalDistribution(0, 1)) @@ -214,7 +210,7 @@ public class GradientCheckTestsComputationGraph extends BaseDL4JTest { if (PRINT_RESULTS) { System.out.println("testBasicIrisWithElementWiseVertex(op=" + op + ")"); // for (int j = 0; j < graph.getNumLayers(); j++) -// System.out.println("Layer " + j + " # params: " + graph.getLayer(j).numParams()); +// System.out.println("ILayer " + j + " # params: " + graph.getLayer(j).numParams()); } boolean gradOK = GradientCheckUtil.checkGradients(new GradientCheckUtil.GraphConfig().net(graph).inputs(new INDArray[]{input}) @@ -235,7 +231,7 @@ public class GradientCheckTestsComputationGraph extends BaseDL4JTest { for (ElementWiseVertex.Op op : ops) { Nd4j.getRandom().setSeed(12345); - ComputationGraphConfiguration conf = new NeuralNetConfiguration.Builder().seed(12345) + ComputationGraphConfiguration conf = NeuralNetConfiguration.builder().seed(12345) .dataType(DataType.DOUBLE) .optimizationAlgo(OptimizationAlgorithm.STOCHASTIC_GRADIENT_DESCENT) .dist(new NormalDistribution(0, 1)) @@ -274,7 +270,7 @@ public class GradientCheckTestsComputationGraph extends BaseDL4JTest { if (PRINT_RESULTS) { System.out.println("testBasicIrisWithElementWiseVertex(op=" + op + ")"); // for (int j = 0; j < graph.getNumLayers(); j++) -// System.out.println("Layer " + j + " # params: " + graph.getLayer(j).numParams()); +// System.out.println("ILayer " + j + " # params: " + graph.getLayer(j).numParams()); } boolean gradOK = GradientCheckUtil.checkGradients(new GradientCheckUtil.GraphConfig().net(graph).inputs(new INDArray[]{input}) @@ -295,7 +291,7 @@ public class GradientCheckTestsComputationGraph extends BaseDL4JTest { for(boolean firstSmaller : new boolean[]{false, true}) { for (ElementWiseVertex.Op op : ops) { - ComputationGraphConfiguration conf = new NeuralNetConfiguration.Builder() + ComputationGraphConfiguration conf = NeuralNetConfiguration.builder() .updater(new NoOp()) .dataType(DataType.DOUBLE) .activation(Activation.TANH) @@ -343,7 +339,7 @@ public class GradientCheckTestsComputationGraph extends BaseDL4JTest { String msg = "testCnnDepthMerge - " + format; Nd4j.getRandom().setSeed(12345); - ComputationGraphConfiguration conf = new NeuralNetConfiguration.Builder().seed(12345) + ComputationGraphConfiguration conf = NeuralNetConfiguration.builder().seed(12345) .dataType(DataType.DOUBLE) .optimizationAlgo(OptimizationAlgorithm.STOCHASTIC_GRADIENT_DESCENT) .dist(new NormalDistribution(0, 0.1)) @@ -376,7 +372,7 @@ public class GradientCheckTestsComputationGraph extends BaseDL4JTest { if (PRINT_RESULTS) { System.out.println(msg); // for (int j = 0; j < graph.getNumLayers(); j++) -// System.out.println("Layer " + j + " # params: " + graph.getLayer(j).numParams()); +// System.out.println("ILayer " + j + " # params: " + graph.getLayer(j).numParams()); } boolean gradOK = GradientCheckUtil.checkGradients(new GradientCheckUtil.GraphConfig().net(graph).inputs(new INDArray[]{input}) @@ -398,7 +394,7 @@ public class GradientCheckTestsComputationGraph extends BaseDL4JTest { int outSize = 3; Nd4j.getRandom().setSeed(12345); ComputationGraphConfiguration conf = - new NeuralNetConfiguration.Builder().seed(12345) + NeuralNetConfiguration.builder().seed(12345) .dataType(DataType.DOUBLE) .optimizationAlgo(OptimizationAlgorithm.STOCHASTIC_GRADIENT_DESCENT) .dist(new UniformDistribution(0.2, 0.6)) @@ -439,7 +435,7 @@ public class GradientCheckTestsComputationGraph extends BaseDL4JTest { if (PRINT_RESULTS) { System.out.println(msg); // for (int j = 0; j < graph.getNumLayers(); j++) -// System.out.println("Layer " + j + " # params: " + graph.getLayer(j).numParams()); +// System.out.println("ILayer " + j + " # params: " + graph.getLayer(j).numParams()); } boolean gradOK = GradientCheckUtil.checkGradients(new GradientCheckUtil.GraphConfig().net(graph).inputs(new INDArray[]{input}) @@ -457,7 +453,7 @@ public class GradientCheckTestsComputationGraph extends BaseDL4JTest { int batchSize = 2; int timeSeriesLength = 4; int inLength = 3; - ComputationGraphConfiguration conf = new NeuralNetConfiguration.Builder().seed(1234) + ComputationGraphConfiguration conf = NeuralNetConfiguration.builder().seed(1234) .dataType(DataType.DOUBLE) .weightInit(new NormalDistribution(0, 1)) .updater(new NoOp()).graphBuilder().addInputs("input").setOutputs("out") @@ -478,7 +474,7 @@ public class GradientCheckTestsComputationGraph extends BaseDL4JTest { if (PRINT_RESULTS) { System.out.println("testLSTMWithSubset()"); // for (int j = 0; j < graph.getNumLayers(); j++) -// System.out.println("Layer " + j + " # params: " + graph.getLayer(j).numParams()); +// System.out.println("ILayer " + j + " # params: " + graph.getLayer(j).numParams()); } boolean gradOK = GradientCheckUtil.checkGradients(new GradientCheckUtil.GraphConfig().net(graph).inputs(new INDArray[]{input}) @@ -493,7 +489,7 @@ public class GradientCheckTestsComputationGraph extends BaseDL4JTest { public void testLSTMWithLastTimeStepVertex() { Nd4j.getRandom().setSeed(12345); - ComputationGraphConfiguration conf = new NeuralNetConfiguration.Builder().seed(12345) + ComputationGraphConfiguration conf = NeuralNetConfiguration.builder().seed(12345) .dataType(DataType.DOUBLE) .optimizationAlgo(OptimizationAlgorithm.STOCHASTIC_GRADIENT_DESCENT) .dist(new NormalDistribution(0, 1)) @@ -515,7 +511,7 @@ public class GradientCheckTestsComputationGraph extends BaseDL4JTest { if (PRINT_RESULTS) { System.out.println("testLSTMWithLastTimeStepVertex()"); // for (int j = 0; j < graph.getNumLayers(); j++) -// System.out.println("Layer " + j + " # params: " + graph.getLayer(j).numParams()); +// System.out.println("ILayer " + j + " # params: " + graph.getLayer(j).numParams()); } //First: test with no input mask array @@ -545,7 +541,7 @@ public class GradientCheckTestsComputationGraph extends BaseDL4JTest { int timeSeriesLength = 4; Nd4j.getRandom().setSeed(12345); ComputationGraphConfiguration conf = - new NeuralNetConfiguration.Builder().seed(12345) + NeuralNetConfiguration.builder().seed(12345) .dataType(DataType.DOUBLE) .optimizationAlgo(OptimizationAlgorithm.STOCHASTIC_GRADIENT_DESCENT) .dist(new NormalDistribution(0, 1)) @@ -579,7 +575,7 @@ public class GradientCheckTestsComputationGraph extends BaseDL4JTest { if (PRINT_RESULTS) { System.out.println("testLSTMWithDuplicateToTimeSeries()"); // for (int j = 0; j < graph.getNumLayers(); j++) -// System.out.println("Layer " + j + " # params: " + graph.getLayer(j).numParams()); +// System.out.println("ILayer " + j + " # params: " + graph.getLayer(j).numParams()); } boolean gradOK = GradientCheckUtil.checkGradients(new GradientCheckUtil.GraphConfig().net(graph).inputs(new INDArray[]{input1, input2}) @@ -595,7 +591,7 @@ public class GradientCheckTestsComputationGraph extends BaseDL4JTest { int timeSeriesLength = 4; Nd4j.getRandom().setSeed(12345); ComputationGraphConfiguration conf = - new NeuralNetConfiguration.Builder().seed(12345) + NeuralNetConfiguration.builder().seed(12345) .dataType(DataType.DOUBLE) .optimizationAlgo(OptimizationAlgorithm.STOCHASTIC_GRADIENT_DESCENT) .dist(new NormalDistribution(0, 1)) @@ -628,7 +624,7 @@ public class GradientCheckTestsComputationGraph extends BaseDL4JTest { if (PRINT_RESULTS) { System.out.println("testLSTMWithReverseTimeSeriesVertex()"); // for (int j = 0; j < graph.getNumLayers(); j++) -// System.out.println("Layer " + j + " # params: " + graph.getLayer(j).numParams()); +// System.out.println("ILayer " + j + " # params: " + graph.getLayer(j).numParams()); } boolean gradOK = GradientCheckUtil.checkGradients(new GradientCheckUtil.GraphConfig().net(graph).inputs(new INDArray[]{input}) @@ -654,7 +650,7 @@ public class GradientCheckTestsComputationGraph extends BaseDL4JTest { public void testMultipleInputsLayer() { Nd4j.getRandom().setSeed(12345); - ComputationGraphConfiguration conf = new NeuralNetConfiguration.Builder().seed(12345) + ComputationGraphConfiguration conf = NeuralNetConfiguration.builder().seed(12345) .dataType(DataType.DOUBLE) .optimizationAlgo(OptimizationAlgorithm.STOCHASTIC_GRADIENT_DESCENT) .dist(new NormalDistribution(0, 1)) @@ -683,7 +679,7 @@ public class GradientCheckTestsComputationGraph extends BaseDL4JTest { if (PRINT_RESULTS) { System.out.println(msg); // for (int j = 0; j < graph.getNumLayers(); j++) -// System.out.println("Layer " + j + " # params: " + graph.getLayer(j).numParams()); +// System.out.println("ILayer " + j + " # params: " + graph.getLayer(j).numParams()); } boolean gradOK = GradientCheckUtil.checkGradients(new GradientCheckUtil.GraphConfig().net(graph).inputs(inputs) @@ -697,7 +693,7 @@ public class GradientCheckTestsComputationGraph extends BaseDL4JTest { @Test public void testMultipleOutputsLayer() { Nd4j.getRandom().setSeed(12345); - ComputationGraphConfiguration conf = new NeuralNetConfiguration.Builder().seed(12345) + ComputationGraphConfiguration conf = NeuralNetConfiguration.builder().seed(12345) .dataType(DataType.DOUBLE) .optimizationAlgo(OptimizationAlgorithm.STOCHASTIC_GRADIENT_DESCENT) .dist(new NormalDistribution(0, 1)) @@ -723,7 +719,7 @@ public class GradientCheckTestsComputationGraph extends BaseDL4JTest { if (PRINT_RESULTS) { System.out.println(msg); // for (int j = 0; j < graph.getNumLayers(); j++) -// System.out.println("Layer " + j + " # params: " + graph.getLayer(j).numParams()); +// System.out.println("ILayer " + j + " # params: " + graph.getLayer(j).numParams()); } boolean gradOK = GradientCheckUtil.checkGradients(new GradientCheckUtil.GraphConfig().net(graph).inputs(new INDArray[]{input}) @@ -737,7 +733,7 @@ public class GradientCheckTestsComputationGraph extends BaseDL4JTest { @Test public void testMultipleOutputsMergeVertex() { Nd4j.getRandom().setSeed(12345); - ComputationGraphConfiguration conf = new NeuralNetConfiguration.Builder().seed(12345) + ComputationGraphConfiguration conf = NeuralNetConfiguration.builder().seed(12345) .dataType(DataType.DOUBLE) .optimizationAlgo(OptimizationAlgorithm.STOCHASTIC_GRADIENT_DESCENT) .dist(new NormalDistribution(0, 1)) @@ -769,7 +765,7 @@ public class GradientCheckTestsComputationGraph extends BaseDL4JTest { if (PRINT_RESULTS) { System.out.println(msg); // for (int j = 0; j < graph.getNumLayers(); j++) -// System.out.println("Layer " + j + " # params: " + graph.getLayer(j).numParams()); +// System.out.println("ILayer " + j + " # params: " + graph.getLayer(j).numParams()); } boolean gradOK = GradientCheckUtil.checkGradients(new GradientCheckUtil.GraphConfig().net(graph).inputs(input) @@ -786,7 +782,7 @@ public class GradientCheckTestsComputationGraph extends BaseDL4JTest { int inW = 7; Nd4j.getRandom().setSeed(12345); - ComputationGraphConfiguration conf = new NeuralNetConfiguration.Builder().seed(12345) + ComputationGraphConfiguration conf = NeuralNetConfiguration.builder().seed(12345) .dataType(DataType.DOUBLE) .optimizationAlgo(OptimizationAlgorithm.STOCHASTIC_GRADIENT_DESCENT) .dist(new NormalDistribution(0, 1)) @@ -820,7 +816,7 @@ public class GradientCheckTestsComputationGraph extends BaseDL4JTest { if (PRINT_RESULTS) { System.out.println(msg); // for (int j = 0; j < graph.getNumLayers(); j++) -// System.out.println("Layer " + j + " # params: " + graph.getLayer(j).numParams()); +// System.out.println("ILayer " + j + " # params: " + graph.getLayer(j).numParams()); } boolean gradOK = GradientCheckUtil.checkGradients(new GradientCheckUtil.GraphConfig().net(graph).inputs(new INDArray[]{input}) @@ -836,7 +832,7 @@ public class GradientCheckTestsComputationGraph extends BaseDL4JTest { public void testBasicIrisTripletStackingL2Loss() { Nd4j.getRandom().setSeed(12345); ComputationGraphConfiguration conf = - new NeuralNetConfiguration.Builder().seed(12345) + NeuralNetConfiguration.builder().seed(12345) .dataType(DataType.DOUBLE) .optimizationAlgo(OptimizationAlgorithm.STOCHASTIC_GRADIENT_DESCENT) .dist(new NormalDistribution(0, 1)) @@ -888,7 +884,7 @@ public class GradientCheckTestsComputationGraph extends BaseDL4JTest { if (PRINT_RESULTS) { System.out.println("testBasicIrisTripletStackingL2Loss()"); // for (int j = 0; j < graph.getNumLayers(); j++) -// System.out.println("Layer " + j + " # params: " + graph.getLayer(j).numParams()); +// System.out.println("ILayer " + j + " # params: " + graph.getLayer(j).numParams()); } boolean gradOK = GradientCheckUtil.checkGradients(new GradientCheckUtil.GraphConfig().net(graph).inputs(new INDArray[]{pos, anc, neg}) @@ -910,7 +906,7 @@ public class GradientCheckTestsComputationGraph extends BaseDL4JTest { for (boolean train : trainFirst) { for (double lambda : new double[] {0.0, 0.5, 2.0}) { - ComputationGraphConfiguration conf = new NeuralNetConfiguration.Builder().seed(12345) + ComputationGraphConfiguration conf = NeuralNetConfiguration.builder().seed(12345) .dataType(DataType.DOUBLE) .optimizationAlgo(OptimizationAlgorithm.STOCHASTIC_GRADIENT_DESCENT) .dist(new GaussianDistribution(0, 1)) @@ -949,7 +945,7 @@ public class GradientCheckTestsComputationGraph extends BaseDL4JTest { if (PRINT_RESULTS) { System.out.println(msg); // for (int j = 0; j < graph.getNumLayers(); j++) -// System.out.println("Layer " + j + " # params: " + graph.getLayer(j).numParams()); +// System.out.println("ILayer " + j + " # params: " + graph.getLayer(j).numParams()); } boolean gradOK = GradientCheckUtil.checkGradients(new GradientCheckUtil.GraphConfig().net(graph).inputs(new INDArray[]{example}) @@ -975,7 +971,7 @@ public class GradientCheckTestsComputationGraph extends BaseDL4JTest { for (boolean train : trainFirst) { for (double lambda : new double[] {0.0, 0.5, 2.0}) { - MultiLayerConfiguration conf = new NeuralNetConfiguration.Builder() + NeuralNetConfiguration conf = NeuralNetConfiguration.builder() .dataType(DataType.DOUBLE) .updater(new NoOp()) .dist(new NormalDistribution(0, 1.0)).seed(12345L).list() @@ -986,7 +982,7 @@ public class GradientCheckTestsComputationGraph extends BaseDL4JTest { .alpha(1.0).lambda(lambda).gradientCheck(true) .activation(Activation.SOFTMAX).build()) - .setInputType(InputType.convolutional(inputH, inputW, inputDepth)).build(); + .inputType(InputType.convolutional(inputH, inputW, inputDepth)).build(); MultiLayerNetwork net = new MultiLayerNetwork(conf); net.init(); @@ -1014,7 +1010,7 @@ public class GradientCheckTestsComputationGraph extends BaseDL4JTest { if (PRINT_RESULTS) { System.out.println(msg); // for (int j = 0; j < net.getnLayers(); j++) -// System.out.println("Layer " + j + " # params: " + net.getLayer(j).numParams()); +// System.out.println("ILayer " + j + " # params: " + net.getLayer(j).numParams()); } boolean gradOK = GradientCheckUtil.checkGradients(net, DEFAULT_EPS, DEFAULT_MAX_REL_ERROR, @@ -1029,7 +1025,7 @@ public class GradientCheckTestsComputationGraph extends BaseDL4JTest { @Test public void testBasicL2() { Nd4j.getRandom().setSeed(12345); - ComputationGraphConfiguration conf = new NeuralNetConfiguration.Builder().seed(12345) + ComputationGraphConfiguration conf = NeuralNetConfiguration.builder().seed(12345) .dataType(DataType.DOUBLE) .optimizationAlgo(OptimizationAlgorithm.STOCHASTIC_GRADIENT_DESCENT) .dist(new NormalDistribution(0, 1)) @@ -1063,7 +1059,7 @@ public class GradientCheckTestsComputationGraph extends BaseDL4JTest { if (PRINT_RESULTS) { System.out.println(testName); // for (int j = 0; j < graph.getNumLayers(); j++) -// System.out.println("Layer " + j + " # params: " + graph.getLayer(j).numParams()); +// System.out.println("ILayer " + j + " # params: " + graph.getLayer(j).numParams()); } boolean gradOK = GradientCheckUtil.checkGradients(new GradientCheckUtil.GraphConfig().net(graph).inputs(new INDArray[]{in1, in2}) @@ -1081,7 +1077,7 @@ public class GradientCheckTestsComputationGraph extends BaseDL4JTest { int layerSizes = 2; Nd4j.getRandom().setSeed(12345); - ComputationGraphConfiguration conf = new NeuralNetConfiguration.Builder().seed(12345) + ComputationGraphConfiguration conf = NeuralNetConfiguration.builder().seed(12345) .dataType(DataType.DOUBLE) .optimizationAlgo(OptimizationAlgorithm.STOCHASTIC_GRADIENT_DESCENT) .dist(new NormalDistribution(0, 1)) @@ -1121,7 +1117,7 @@ public class GradientCheckTestsComputationGraph extends BaseDL4JTest { if (PRINT_RESULTS) { System.out.println(testName); // for (int j = 0; j < graph.getNumLayers(); j++) -// System.out.println("Layer " + j + " # params: " + graph.getLayer(j).numParams()); +// System.out.println("ILayer " + j + " # params: " + graph.getLayer(j).numParams()); } boolean gradOK = GradientCheckUtil.checkGradients(new GradientCheckUtil.GraphConfig().net(graph).inputs(new INDArray[]{in1, in2}) @@ -1136,7 +1132,7 @@ public class GradientCheckTestsComputationGraph extends BaseDL4JTest { public void testBasicStackUnstackDebug() { Nd4j.getRandom().setSeed(12345); - ComputationGraphConfiguration conf = new NeuralNetConfiguration.Builder().seed(12345) + ComputationGraphConfiguration conf = NeuralNetConfiguration.builder().seed(12345) .dataType(DataType.DOUBLE) .optimizationAlgo(OptimizationAlgorithm.STOCHASTIC_GRADIENT_DESCENT) .dist(new NormalDistribution(0, 1)) @@ -1179,7 +1175,7 @@ public class GradientCheckTestsComputationGraph extends BaseDL4JTest { if (PRINT_RESULTS) { System.out.println(testName); // for (int j = 0; j < graph.getNumLayers(); j++) -// System.out.println("Layer " + j + " # params: " + graph.getLayer(j).numParams()); +// System.out.println("ILayer " + j + " # params: " + graph.getLayer(j).numParams()); } boolean gradOK = GradientCheckUtil.checkGradients(new GradientCheckUtil.GraphConfig().net(graph).inputs(new INDArray[]{in1, in2}) @@ -1196,7 +1192,7 @@ public class GradientCheckTestsComputationGraph extends BaseDL4JTest { int layerSizes = 2; Nd4j.getRandom().setSeed(12345); - ComputationGraphConfiguration conf = new NeuralNetConfiguration.Builder().seed(12345) + ComputationGraphConfiguration conf = NeuralNetConfiguration.builder().seed(12345) .dataType(DataType.DOUBLE) .optimizationAlgo(OptimizationAlgorithm.STOCHASTIC_GRADIENT_DESCENT) .dist(new NormalDistribution(0, 1)) @@ -1242,7 +1238,7 @@ public class GradientCheckTestsComputationGraph extends BaseDL4JTest { if (PRINT_RESULTS) { System.out.println(testName); // for (int j = 0; j < graph.getNumLayers(); j++) -// System.out.println("Layer " + j + " # params: " + graph.getLayer(j).numParams()); +// System.out.println("ILayer " + j + " # params: " + graph.getLayer(j).numParams()); } graph.setLayerMaskArrays(new INDArray[] {inMask1, inMask2}, null); @@ -1259,7 +1255,7 @@ public class GradientCheckTestsComputationGraph extends BaseDL4JTest { public void testBasicTwoOutputs() { Nd4j.getRandom().setSeed(12345); - ComputationGraphConfiguration conf = new NeuralNetConfiguration.Builder().seed(12345) + ComputationGraphConfiguration conf = NeuralNetConfiguration.builder().seed(12345) .dataType(DataType.DOUBLE) .optimizationAlgo(OptimizationAlgorithm.STOCHASTIC_GRADIENT_DESCENT) .dist(new NormalDistribution(0, 1)) @@ -1301,7 +1297,7 @@ public class GradientCheckTestsComputationGraph extends BaseDL4JTest { if (PRINT_RESULTS) { System.out.println(testName); // for (int j = 0; j < graph.getNumLayers(); j++) -// System.out.println("Layer " + j + " # params: " + graph.getLayer(j).numParams()); +// System.out.println("ILayer " + j + " # params: " + graph.getLayer(j).numParams()); } boolean gradOK = GradientCheckUtil.checkGradients(new GradientCheckUtil.GraphConfig().net(graph).inputs(new INDArray[]{in1, in2}) @@ -1320,7 +1316,7 @@ public class GradientCheckTestsComputationGraph extends BaseDL4JTest { int[][] definitions = {null,new int[]{1}}; for(int[] definition : definitions) { log.info("Testing definition {}",definition); - ComputationGraphConfiguration conf = new NeuralNetConfiguration.Builder().seed(12345) + ComputationGraphConfiguration conf = NeuralNetConfiguration.builder().seed(12345) .dataType(DataType.DOUBLE) .optimizationAlgo(OptimizationAlgorithm.STOCHASTIC_GRADIENT_DESCENT) .activation(Activation.TANH).updater(new NoOp()).graphBuilder() @@ -1347,7 +1343,7 @@ public class GradientCheckTestsComputationGraph extends BaseDL4JTest { if (PRINT_RESULTS) { System.out.println(testName); // for (int j = 0; j < graph.getNumLayers(); j++) -// System.out.println("Layer " + j + " # params: " + graph.getLayer(j).numParams()); +// System.out.println("ILayer " + j + " # params: " + graph.getLayer(j).numParams()); } boolean gradOK = GradientCheckUtil.checkGradients(new GradientCheckUtil.GraphConfig().net(graph).inputs(new INDArray[]{in1}) @@ -1368,7 +1364,7 @@ public class GradientCheckTestsComputationGraph extends BaseDL4JTest { int w = 4; int dIn = 2; - ComputationGraphConfiguration conf = new NeuralNetConfiguration.Builder().seed(12345) + ComputationGraphConfiguration conf = NeuralNetConfiguration.builder().seed(12345) .dataType(DataType.DOUBLE) .optimizationAlgo(OptimizationAlgorithm.STOCHASTIC_GRADIENT_DESCENT) .dist(new NormalDistribution(0, 1)) @@ -1398,7 +1394,7 @@ public class GradientCheckTestsComputationGraph extends BaseDL4JTest { if (PRINT_RESULTS) { System.out.println(testName); // for (int j = 0; j < graph.getNumLayers(); j++) -// System.out.println("Layer " + j + " # params: " + graph.getLayer(j).numParams()); +// System.out.println("ILayer " + j + " # params: " + graph.getLayer(j).numParams()); } boolean gradOK = GradientCheckUtil.checkGradients(new GradientCheckUtil.GraphConfig().net(graph).inputs(new INDArray[]{in1}) @@ -1420,7 +1416,7 @@ public class GradientCheckTestsComputationGraph extends BaseDL4JTest { labels.putScalar(new int[] {i, r.nextInt(3)}, 1.0); } - ComputationGraphConfiguration conf = new NeuralNetConfiguration.Builder().l2(0.2).l1(0.1) + ComputationGraphConfiguration conf = NeuralNetConfiguration.builder().l2(0.2).l1(0.1) .dataType(DataType.DOUBLE) .optimizationAlgo(OptimizationAlgorithm.STOCHASTIC_GRADIENT_DESCENT).seed(12345L) .updater(new NoOp()).graphBuilder().addInputs("in") @@ -1436,7 +1432,7 @@ public class GradientCheckTestsComputationGraph extends BaseDL4JTest { if (PRINT_RESULTS) { System.out.println("testGraphEmbeddingLayerSimple"); // for (int j = 0; j < cg.getNumLayers(); j++) -// System.out.println("Layer " + j + " # params: " + cg.getLayer(j).numParams()); +// System.out.println("ILayer " + j + " # params: " + cg.getLayer(j).numParams()); } boolean gradOK = GradientCheckUtil.checkGradients(new GradientCheckUtil.GraphConfig().net(cg).inputs(new INDArray[]{input}) diff --git a/cavis-dnn/cavis-dnn-core/src/test/java/org/deeplearning4j/gradientcheck/GradientCheckTestsMasking.java b/cavis-dnn/cavis-dnn-core/src/test/java/org/deeplearning4j/gradientcheck/GradientCheckTestsMasking.java index 4efd20ee7..689720529 100644 --- a/cavis-dnn/cavis-dnn-core/src/test/java/org/deeplearning4j/gradientcheck/GradientCheckTestsMasking.java +++ b/cavis-dnn/cavis-dnn-core/src/test/java/org/deeplearning4j/gradientcheck/GradientCheckTestsMasking.java @@ -24,7 +24,6 @@ import lombok.val; import org.deeplearning4j.BaseDL4JTest; import org.deeplearning4j.TestUtils; import org.deeplearning4j.nn.conf.ComputationGraphConfiguration; -import org.deeplearning4j.nn.conf.MultiLayerConfiguration; import org.deeplearning4j.nn.conf.NeuralNetConfiguration; import org.deeplearning4j.nn.conf.RNNFormat; import org.deeplearning4j.nn.conf.distribution.NormalDistribution; @@ -117,7 +116,7 @@ public class GradientCheckTestsMasking extends BaseDL4JTest { maskArr.putScalar(new int[] {0, j}, mask[i][j] ? 1.0 : 0.0); } - MultiLayerConfiguration conf = new NeuralNetConfiguration.Builder().seed(12345L) + NeuralNetConfiguration conf = NeuralNetConfiguration.builder().seed(12345L) .dataType(DataType.DOUBLE) .updater(new NoOp()) .list() @@ -158,7 +157,7 @@ public class GradientCheckTestsMasking extends BaseDL4JTest { int testNum = 0; for (INDArray mask : masks) { - MultiLayerConfiguration conf = new NeuralNetConfiguration.Builder() + NeuralNetConfiguration conf = NeuralNetConfiguration.builder() .updater(new NoOp()) .dataType(DataType.DOUBLE) .dist(new NormalDistribution(0, 1.0)).seed(12345L).list() @@ -238,7 +237,7 @@ public class GradientCheckTestsMasking extends BaseDL4JTest { Activation a = act[i]; - MultiLayerConfiguration conf = new NeuralNetConfiguration.Builder().updater(new NoOp()) + NeuralNetConfiguration conf = NeuralNetConfiguration.builder().updater(new NoOp()) .dataType(DataType.DOUBLE) .dist(new NormalDistribution(0, 1)).seed(12345) .list() @@ -332,7 +331,7 @@ public class GradientCheckTestsMasking extends BaseDL4JTest { Activation a = act[i]; Nd4j.getRandom().setSeed(12345); - MultiLayerConfiguration conf = new NeuralNetConfiguration.Builder().updater(new NoOp()) + NeuralNetConfiguration conf = NeuralNetConfiguration.builder().updater(new NoOp()) .dataType(DataType.DOUBLE) .dist(new NormalDistribution(0, 1)).seed(12345) .list() @@ -341,7 +340,7 @@ public class GradientCheckTestsMasking extends BaseDL4JTest { .layer(1, new RnnOutputLayer.Builder().nIn(layerSize).nOut(nOut).lossFunction(lf) .activation(a).build()) .validateOutputLayerConfig(false) - .setInputType(InputType.recurrent(nIn,tsLength, RNNFormat.NCW)) + .inputType(InputType.recurrent(nIn,tsLength, RNNFormat.NCW)) .build(); MultiLayerNetwork net = new MultiLayerNetwork(conf); @@ -365,7 +364,7 @@ public class GradientCheckTestsMasking extends BaseDL4JTest { //Check the equivalent compgraph: Nd4j.getRandom().setSeed(12345); - ComputationGraphConfiguration cg = new NeuralNetConfiguration.Builder().updater(new NoOp()) + ComputationGraphConfiguration cg = NeuralNetConfiguration.builder().updater(new NoOp()) .dataType(DataType.DOUBLE) .dist(new NormalDistribution(0, 2)).seed(12345) .graphBuilder().addInputs("in") @@ -397,7 +396,7 @@ public class GradientCheckTestsMasking extends BaseDL4JTest { int mb = 4; int tsLength = 5; - MultiLayerConfiguration conf = new NeuralNetConfiguration.Builder() + NeuralNetConfiguration conf = NeuralNetConfiguration.builder() .dataType(DataType.DOUBLE) .weightInit(new NormalDistribution(0,2)) .updater(new NoOp()) @@ -405,7 +404,7 @@ public class GradientCheckTestsMasking extends BaseDL4JTest { .layer(new LSTM.Builder().nIn(3).nOut(3).build()) .layer(new GlobalPoolingLayer.Builder().poolingType(PoolingType.AVG).build()) .layer(new OutputLayer.Builder().nIn(3).nOut(3).activation(Activation.SOFTMAX).build()) - .setInputType(InputType.recurrent(3)) + .inputType(InputType.recurrent(3)) .build(); MultiLayerNetwork net = new MultiLayerNetwork(conf); @@ -452,7 +451,7 @@ public class GradientCheckTestsMasking extends BaseDL4JTest { int mb = 10; int tsLength = 5; - ComputationGraphConfiguration conf = new NeuralNetConfiguration.Builder() + ComputationGraphConfiguration conf = NeuralNetConfiguration.builder() .dataType(DataType.DOUBLE) .weightInit(new NormalDistribution(0,2)) .updater(new NoOp()) diff --git a/cavis-dnn/cavis-dnn-core/src/test/java/org/deeplearning4j/gradientcheck/LRNGradientCheckTests.java b/cavis-dnn/cavis-dnn-core/src/test/java/org/deeplearning4j/gradientcheck/LRNGradientCheckTests.java index 9d982818a..18769905c 100644 --- a/cavis-dnn/cavis-dnn-core/src/test/java/org/deeplearning4j/gradientcheck/LRNGradientCheckTests.java +++ b/cavis-dnn/cavis-dnn-core/src/test/java/org/deeplearning4j/gradientcheck/LRNGradientCheckTests.java @@ -22,8 +22,8 @@ package org.deeplearning4j.gradientcheck; import org.deeplearning4j.BaseDL4JTest; import org.deeplearning4j.TestUtils; -import org.deeplearning4j.nn.conf.MultiLayerConfiguration; import org.deeplearning4j.nn.conf.NeuralNetConfiguration; +import org.deeplearning4j.nn.conf.NeuralNetConfiguration.NeuralNetConfigurationBuilder; import org.deeplearning4j.nn.conf.distribution.NormalDistribution; import org.deeplearning4j.nn.conf.inputs.InputType; import org.deeplearning4j.nn.conf.layers.ConvolutionLayer; @@ -68,7 +68,7 @@ public class LRNGradientCheckTests extends BaseDL4JTest { labels.putScalar(i, r.nextInt(nOut), 1.0); } - MultiLayerConfiguration.Builder builder = new NeuralNetConfiguration.Builder().updater(new NoOp()) + NeuralNetConfiguration.NeuralNetConfigurationBuilder builder = NeuralNetConfiguration.builder().updater(new NoOp()) .dataType(DataType.DOUBLE) .seed(12345L) .dist(new NormalDistribution(0, 2)).list() @@ -77,14 +77,14 @@ public class LRNGradientCheckTests extends BaseDL4JTest { .layer(1, new LocalResponseNormalization.Builder().build()) .layer(2, new OutputLayer.Builder(LossFunctions.LossFunction.MCXENT) .activation(Activation.SOFTMAX).nOut(nOut).build()) - .setInputType(InputType.convolutional(hw, hw, depth)); + .inputType(InputType.convolutional(hw, hw, depth)); MultiLayerNetwork mln = new MultiLayerNetwork(builder.build()); mln.init(); // if (PRINT_RESULTS) { // for (int j = 0; j < mln.getnLayers(); j++) -// System.out.println("Layer " + j + " # params: " + mln.getLayer(j).numParams()); +// System.out.println("ILayer " + j + " # params: " + mln.getLayer(j).numParams()); // } boolean gradOK = GradientCheckUtil.checkGradients(mln, DEFAULT_EPS, DEFAULT_MAX_REL_ERROR, diff --git a/cavis-dnn/cavis-dnn-core/src/test/java/org/deeplearning4j/gradientcheck/LSTMGradientCheckTests.java b/cavis-dnn/cavis-dnn-core/src/test/java/org/deeplearning4j/gradientcheck/LSTMGradientCheckTests.java index c1e20d858..421b6a63d 100644 --- a/cavis-dnn/cavis-dnn-core/src/test/java/org/deeplearning4j/gradientcheck/LSTMGradientCheckTests.java +++ b/cavis-dnn/cavis-dnn-core/src/test/java/org/deeplearning4j/gradientcheck/LSTMGradientCheckTests.java @@ -22,8 +22,8 @@ package org.deeplearning4j.gradientcheck; import org.deeplearning4j.BaseDL4JTest; import org.deeplearning4j.TestUtils; -import org.deeplearning4j.nn.conf.MultiLayerConfiguration; import org.deeplearning4j.nn.conf.NeuralNetConfiguration; +import org.deeplearning4j.nn.conf.NeuralNetConfiguration.NeuralNetConfigurationBuilder; import org.deeplearning4j.nn.conf.Updater; import org.deeplearning4j.nn.conf.distribution.NormalDistribution; import org.deeplearning4j.nn.conf.distribution.UniformDistribution; @@ -70,8 +70,8 @@ public class LSTMGradientCheckTests extends BaseDL4JTest { for (boolean graves : gravesLSTM) { - Layer l0; - Layer l1; + LayerConfiguration l0; + LayerConfiguration l1; if (graves) { l0 = new GravesLSTM.Builder().nIn(nIn).nOut(layerSize).activation(Activation.SIGMOID) .dist(new NormalDistribution(0, 1.0)) @@ -88,8 +88,8 @@ public class LSTMGradientCheckTests extends BaseDL4JTest { .updater(new NoOp()).build(); } - MultiLayerConfiguration conf = - new NeuralNetConfiguration.Builder().seed(12345L) + NeuralNetConfiguration conf = + NeuralNetConfiguration.builder().seed(12345L) .dataType(DataType.DOUBLE) .list() .layer(0, l0).layer(1, @@ -126,7 +126,7 @@ public class LSTMGradientCheckTests extends BaseDL4JTest { if (PRINT_RESULTS) { System.out.println(testName); // for (int j = 0; j < mln.getnLayers(); j++) -// System.out.println("Layer " + j + " # params: " + mln.getLayer(j).numParams()); +// System.out.println("ILayer " + j + " # params: " + mln.getLayer(j).numParams()); } boolean gradOK = GradientCheckUtil.checkGradients(mln, DEFAULT_EPS, DEFAULT_MAX_REL_ERROR, @@ -179,11 +179,11 @@ public class LSTMGradientCheckTests extends BaseDL4JTest { double l1 = l1vals[i]; Activation afn = activFns[i]; - NeuralNetConfiguration.Builder conf = - new NeuralNetConfiguration.Builder() - .dataType(DataType.DOUBLE) - .seed(12345L) - .dist(new NormalDistribution(0, 1)).updater(new NoOp()); + NeuralNetConfiguration.NeuralNetConfigurationBuilder conf = + NeuralNetConfiguration.builder() + .dataType(DataType.DOUBLE) + .seed(12345L) + .dist(new NormalDistribution(0, 1)).updater(new NoOp()); if (l1 > 0.0) conf.l1(l1); @@ -194,17 +194,17 @@ public class LSTMGradientCheckTests extends BaseDL4JTest { if (biasL1[i] > 0) conf.l1Bias(biasL1[i]); - Layer layer; + LayerConfiguration layer; if (graves) { layer = new GravesLSTM.Builder().nIn(nIn).nOut(layerSize).activation(afn).build(); } else { layer = new LSTM.Builder().nIn(nIn).nOut(layerSize).activation(afn).build(); } - NeuralNetConfiguration.ListBuilder conf2 = conf.list().layer(0, layer) + NeuralNetConfiguration.NeuralNetConfigurationBuilder conf2 = (NeuralNetConfigurationBuilder) conf + .layer(0, layer) .layer(1, new RnnOutputLayer.Builder(lf).activation(outputActivation) - .nIn(layerSize).nOut(nOut).build()) - ; + .nIn(layerSize).nOut(nOut).build()); MultiLayerNetwork mln = new MultiLayerNetwork(conf2.build()); mln.init(); @@ -215,7 +215,7 @@ public class LSTMGradientCheckTests extends BaseDL4JTest { if (PRINT_RESULTS) { System.out.println(testName); // for (int j = 0; j < mln.getnLayers(); j++) -// System.out.println("Layer " + j + " # params: " + mln.getLayer(j).numParams()); +// System.out.println("ILayer " + j + " # params: " + mln.getLayer(j).numParams()); } boolean gradOK = GradientCheckUtil.checkGradients(new GradientCheckUtil.MLNConfig().net(mln).input(input) @@ -249,14 +249,14 @@ public class LSTMGradientCheckTests extends BaseDL4JTest { INDArray labels = TestUtils.randomOneHotTimeSeries(miniBatchSize[i], nOut, timeSeriesLength[i]); - Layer layer; + LayerConfiguration layer; if (graves) { layer = new GravesLSTM.Builder().nIn(nIn).nOut(layerSize).activation(Activation.TANH).build(); } else { layer = new LSTM.Builder().nIn(nIn).nOut(layerSize).activation(Activation.TANH).build(); } - MultiLayerConfiguration conf = new NeuralNetConfiguration.Builder().seed(12345L) + NeuralNetConfiguration conf = NeuralNetConfiguration.builder().seed(12345L) .dataType(DataType.DOUBLE) .dist(new NormalDistribution(0, 1)) .updater(new NoOp()).list().layer(0, layer) @@ -309,8 +309,8 @@ public class LSTMGradientCheckTests extends BaseDL4JTest { double l2 = l2vals[k]; double l1 = l1vals[k]; - NeuralNetConfiguration.Builder conf = - new NeuralNetConfiguration.Builder(); + NeuralNetConfiguration.NeuralNetConfigurationBuilder conf = + NeuralNetConfiguration.builder(); if (l1 > 0.0) conf.l1(l1); if (l2 > 0.0) @@ -320,10 +320,10 @@ public class LSTMGradientCheckTests extends BaseDL4JTest { if (biasL1[k] > 0) conf.l1Bias(biasL1[k]); - MultiLayerConfiguration mlc = conf.seed(12345L) + NeuralNetConfiguration mlc = (NeuralNetConfiguration) conf.seed(12345L) .dataType(DataType.DOUBLE) .updater(new NoOp()) - .list().layer(0, + .layer(0, new GravesBidirectionalLSTM.Builder().nIn(nIn).nOut(layerSize) .weightInit(new NormalDistribution(0, 1)) .activation(afn) @@ -343,7 +343,7 @@ public class LSTMGradientCheckTests extends BaseDL4JTest { + ", lossFn=" + lf + ", outputActivation=" + outputActivation + ", l2=" + l2 + ", l1=" + l1); // for (int j = 0; j < mln.getnLayers(); j++) -// System.out.println("Layer " + j + " # params: " + mln.getLayer(j).numParams()); +// System.out.println("ILayer " + j + " # params: " + mln.getLayer(j).numParams()); } boolean gradOK = GradientCheckUtil.checkGradients(mln, DEFAULT_EPS, DEFAULT_MAX_REL_ERROR, @@ -380,7 +380,7 @@ public class LSTMGradientCheckTests extends BaseDL4JTest { } } - MultiLayerConfiguration conf = new NeuralNetConfiguration.Builder().seed(12345L) + NeuralNetConfiguration conf = NeuralNetConfiguration.builder().seed(12345L) .dataType(DataType.DOUBLE) .list() .layer(0, new GravesBidirectionalLSTM.Builder().nIn(nIn).nOut(layerSize) @@ -429,7 +429,7 @@ public class LSTMGradientCheckTests extends BaseDL4JTest { } - MultiLayerConfiguration conf = new NeuralNetConfiguration.Builder().updater(new NoOp()).seed(12345) + NeuralNetConfiguration conf = NeuralNetConfiguration.builder().updater(new NoOp()).seed(12345) .dataType(DataType.DOUBLE) .dist(new UniformDistribution(-2, 2)).list() .layer(0, new ConvolutionLayer.Builder(3, 3).nIn(2).nOut(3).stride(1, 1) @@ -440,7 +440,7 @@ public class LSTMGradientCheckTests extends BaseDL4JTest { .layer(3, new GravesLSTM.Builder().nIn(4).nOut(3).activation(Activation.TANH).build()) .layer(4, new RnnOutputLayer.Builder().lossFunction(LossFunction.MCXENT).nIn(3).nOut(nClasses) .activation(Activation.SOFTMAX).build()) - .setInputType(InputType.convolutional(6, 6, 2)).build(); + .inputType(InputType.convolutional(6, 6, 2)).build(); //Here: ConvolutionLayerSetup in config builder doesn't know that we are expecting time series input, not standard FF input -> override it here conf.getInputPreProcessors().put(0, new RnnToCnnPreProcessor(6, 6, 2)); diff --git a/cavis-dnn/cavis-dnn-core/src/test/java/org/deeplearning4j/gradientcheck/LossFunctionGradientCheck.java b/cavis-dnn/cavis-dnn-core/src/test/java/org/deeplearning4j/gradientcheck/LossFunctionGradientCheck.java index 74b142845..6197f73d3 100644 --- a/cavis-dnn/cavis-dnn-core/src/test/java/org/deeplearning4j/gradientcheck/LossFunctionGradientCheck.java +++ b/cavis-dnn/cavis-dnn-core/src/test/java/org/deeplearning4j/gradientcheck/LossFunctionGradientCheck.java @@ -26,7 +26,6 @@ import org.deeplearning4j.TestUtils; import org.deeplearning4j.gradientcheck.sdlosscustom.SDLossMAE; import org.deeplearning4j.gradientcheck.sdlosscustom.SDLossMSE; import org.deeplearning4j.nn.api.OptimizationAlgorithm; -import org.deeplearning4j.nn.conf.MultiLayerConfiguration; import org.deeplearning4j.nn.conf.NeuralNetConfiguration; import org.deeplearning4j.nn.conf.distribution.NormalDistribution; import org.deeplearning4j.nn.conf.distribution.UniformDistribution; @@ -183,7 +182,7 @@ public class LossFunctionGradientCheck extends BaseDL4JTest { + minibatchSizes[j]; Nd4j.getRandom().setSeed(12345); - MultiLayerConfiguration conf = new NeuralNetConfiguration.Builder() + NeuralNetConfiguration conf = NeuralNetConfiguration.builder() .dataType(DataType.DOUBLE) .optimizationAlgo(OptimizationAlgorithm.STOCHASTIC_GRADIENT_DESCENT).seed(12345) .updater(new NoOp()) @@ -347,7 +346,7 @@ public class LossFunctionGradientCheck extends BaseDL4JTest { } Nd4j.getRandom().setSeed(12345); - MultiLayerConfiguration conf = new NeuralNetConfiguration.Builder() + NeuralNetConfiguration conf = NeuralNetConfiguration.builder() .dataType(DataType.DOUBLE) .optimizationAlgo(OptimizationAlgorithm.STOCHASTIC_GRADIENT_DESCENT).seed(12345) .updater(new NoOp()) @@ -362,7 +361,7 @@ public class LossFunctionGradientCheck extends BaseDL4JTest { MultiLayerNetwork net = new MultiLayerNetwork(conf); net.init(); - assertSame(((LossLayer) net.getLayer(1).conf().getLayer()).getLossFn().getClass(), lossFunctions[i] + assertSame(((LossLayer) net.getLayer(1).getLayerConfiguration()).getLossFn().getClass(), lossFunctions[i] .getClass()); INDArray[] inOut = getFeaturesAndLabels(lossFunctions[i], minibatchSizes[j], 4, nOut[i], 12345); @@ -649,7 +648,7 @@ public class LossFunctionGradientCheck extends BaseDL4JTest { + minibatchSizes[j] + "; weights = " + w; Nd4j.getRandom().setSeed(12345); - MultiLayerConfiguration conf = new NeuralNetConfiguration.Builder() + NeuralNetConfiguration conf = NeuralNetConfiguration.builder() .dataType(DataType.DOUBLE) .optimizationAlgo(OptimizationAlgorithm.STOCHASTIC_GRADIENT_DESCENT).seed(12345) .updater(new NoOp()) @@ -667,7 +666,7 @@ public class LossFunctionGradientCheck extends BaseDL4JTest { net.init(); //Check params to avoid test flakiness on small or large params - INDArray params = net.params(); + INDArray params = net.getModelParams(); for( int x=0; x 1.5){ double d = Nd4j.getRandom().nextDouble(); diff --git a/cavis-dnn/cavis-dnn-core/src/test/java/org/deeplearning4j/gradientcheck/NoBiasGradientCheckTests.java b/cavis-dnn/cavis-dnn-core/src/test/java/org/deeplearning4j/gradientcheck/NoBiasGradientCheckTests.java index 5cfec0631..f47a4ee0e 100644 --- a/cavis-dnn/cavis-dnn-core/src/test/java/org/deeplearning4j/gradientcheck/NoBiasGradientCheckTests.java +++ b/cavis-dnn/cavis-dnn-core/src/test/java/org/deeplearning4j/gradientcheck/NoBiasGradientCheckTests.java @@ -22,7 +22,6 @@ package org.deeplearning4j.gradientcheck; import org.deeplearning4j.BaseDL4JTest; import org.deeplearning4j.TestUtils; -import org.deeplearning4j.nn.conf.MultiLayerConfiguration; import org.deeplearning4j.nn.conf.NeuralNetConfiguration; import org.deeplearning4j.nn.conf.distribution.NormalDistribution; import org.deeplearning4j.nn.conf.inputs.InputType; @@ -69,7 +68,7 @@ public class NoBiasGradientCheckTests extends BaseDL4JTest { for (boolean denseHasBias : new boolean[]{true, false}) { for (boolean outHasBias : new boolean[]{true, false}) { - MultiLayerConfiguration conf = new NeuralNetConfiguration.Builder() + NeuralNetConfiguration conf = NeuralNetConfiguration.builder() .dataType(DataType.DOUBLE) .updater(new NoOp()) .seed(12345L) @@ -78,7 +77,7 @@ public class NoBiasGradientCheckTests extends BaseDL4JTest { .dist(new NormalDistribution(0, 1)) .activation(Activation.TANH) - .hasBias(true) //Layer 0: Always have a bias + .hasBias(true) //ILayer 0: Always have a bias .build()) .layer(1, new DenseLayer.Builder().nIn(layerSize).nOut(layerSize) @@ -140,7 +139,7 @@ public class NoBiasGradientCheckTests extends BaseDL4JTest { for (boolean rnnOutHasBias : new boolean[]{true, false}) { - MultiLayerConfiguration conf = new NeuralNetConfiguration.Builder() + NeuralNetConfiguration conf = NeuralNetConfiguration.builder() .dataType(DataType.DOUBLE) .updater(new NoOp()) .seed(12345L) @@ -201,7 +200,7 @@ public class NoBiasGradientCheckTests extends BaseDL4JTest { for (boolean embeddingHasBias : new boolean[]{true, false}) { - MultiLayerConfiguration conf = new NeuralNetConfiguration.Builder() + NeuralNetConfiguration conf = NeuralNetConfiguration.builder() .dataType(DataType.DOUBLE) .updater(new NoOp()) .seed(12345L) @@ -267,8 +266,8 @@ public class NoBiasGradientCheckTests extends BaseDL4JTest { for(boolean cnnHasBias : new boolean[]{true, false}) { - MultiLayerConfiguration conf = - new NeuralNetConfiguration.Builder().updater(new NoOp()) + NeuralNetConfiguration conf = + NeuralNetConfiguration.builder().updater(new NoOp()) .dataType(DataType.DOUBLE) .dist(new NormalDistribution(0, 1)) .list() @@ -285,7 +284,7 @@ public class NoBiasGradientCheckTests extends BaseDL4JTest { .layer(new OutputLayer.Builder(LossFunctions.LossFunction.MCXENT) .activation(Activation.SOFTMAX) .nOut(4).build()) - .setInputType(InputType.convolutionalFlat(height, width, inputDepth)) + .inputType(InputType.convolutionalFlat(height, width, inputDepth)) .build(); MultiLayerNetwork net = new MultiLayerNetwork(conf); diff --git a/cavis-dnn/cavis-dnn-core/src/test/java/org/deeplearning4j/gradientcheck/OutputLayerGradientChecks.java b/cavis-dnn/cavis-dnn-core/src/test/java/org/deeplearning4j/gradientcheck/OutputLayerGradientChecks.java index 1c1da4cee..7556178b9 100644 --- a/cavis-dnn/cavis-dnn-core/src/test/java/org/deeplearning4j/gradientcheck/OutputLayerGradientChecks.java +++ b/cavis-dnn/cavis-dnn-core/src/test/java/org/deeplearning4j/gradientcheck/OutputLayerGradientChecks.java @@ -23,7 +23,6 @@ package org.deeplearning4j.gradientcheck; import org.deeplearning4j.BaseDL4JTest; import org.deeplearning4j.TestUtils; import org.deeplearning4j.nn.conf.ConvolutionMode; -import org.deeplearning4j.nn.conf.MultiLayerConfiguration; import org.deeplearning4j.nn.conf.NeuralNetConfiguration; import org.deeplearning4j.nn.conf.distribution.NormalDistribution; import org.deeplearning4j.nn.conf.layers.*; @@ -117,8 +116,8 @@ public class OutputLayerGradientChecks extends BaseDL4JTest { Activation oa = maskType == 2 ? Activation.SIGMOID : Activation.SOFTMAX; - MultiLayerConfiguration conf = - new NeuralNetConfiguration.Builder().seed(12345L) + NeuralNetConfiguration conf = + NeuralNetConfiguration.builder().seed(12345L) .dataType(DataType.DOUBLE) .updater(new NoOp()) .list() @@ -137,7 +136,7 @@ public class OutputLayerGradientChecks extends BaseDL4JTest { if (PRINT_RESULTS) { System.out.println(testName); // for (int j = 0; j < mln.getnLayers(); j++) -// System.out.println("Layer " + j + " # params: " + mln.getLayer(j).numParams()); +// System.out.println("ILayer " + j + " # params: " + mln.getLayer(j).numParams()); } System.out.println("Starting test: " + testName); @@ -223,8 +222,8 @@ public class OutputLayerGradientChecks extends BaseDL4JTest { Activation oa = maskType == 3 ? Activation.SIGMOID : Activation.SOFTMAX; - MultiLayerConfiguration conf = - new NeuralNetConfiguration.Builder().seed(12345L) + NeuralNetConfiguration conf = + NeuralNetConfiguration.builder().seed(12345L) .dataType(DataType.DOUBLE) .updater(new NoOp()) .convolutionMode(ConvolutionMode.Same) @@ -244,7 +243,7 @@ public class OutputLayerGradientChecks extends BaseDL4JTest { if (PRINT_RESULTS) { System.out.println(testName); // for (int j = 0; j < mln.getnLayers(); j++) -// System.out.println("Layer " + j + " # params: " + mln.getLayer(j).numParams()); +// System.out.println("ILayer " + j + " # params: " + mln.getLayer(j).numParams()); } System.out.println("Starting test: " + testName); @@ -370,8 +369,8 @@ public class OutputLayerGradientChecks extends BaseDL4JTest { Activation oa = maskType == 1 ? Activation.SOFTMAX : Activation.SIGMOID; - MultiLayerConfiguration conf = - new NeuralNetConfiguration.Builder().seed(12345L) + NeuralNetConfiguration conf = + NeuralNetConfiguration.builder().seed(12345L) .dataType(DataType.DOUBLE) .updater(new NoOp()) .convolutionMode(ConvolutionMode.Same) @@ -393,7 +392,7 @@ public class OutputLayerGradientChecks extends BaseDL4JTest { if (PRINT_RESULTS) { System.out.println(testName); // for (int j = 0; j < mln.getnLayers(); j++) -// System.out.println("Layer " + j + " # params: " + mln.getLayer(j).numParams()); +// System.out.println("ILayer " + j + " # params: " + mln.getLayer(j).numParams()); } System.out.println("Starting test: " + testName); diff --git a/cavis-dnn/cavis-dnn-core/src/test/java/org/deeplearning4j/gradientcheck/RnnGradientChecks.java b/cavis-dnn/cavis-dnn-core/src/test/java/org/deeplearning4j/gradientcheck/RnnGradientChecks.java index 87a42e4e0..44e904d7e 100644 --- a/cavis-dnn/cavis-dnn-core/src/test/java/org/deeplearning4j/gradientcheck/RnnGradientChecks.java +++ b/cavis-dnn/cavis-dnn-core/src/test/java/org/deeplearning4j/gradientcheck/RnnGradientChecks.java @@ -22,7 +22,6 @@ package org.deeplearning4j.gradientcheck; import org.deeplearning4j.BaseDL4JTest; import org.deeplearning4j.TestUtils; -import org.deeplearning4j.nn.conf.MultiLayerConfiguration; import org.deeplearning4j.nn.conf.NeuralNetConfiguration; import org.deeplearning4j.nn.conf.inputs.InputType; import org.deeplearning4j.nn.conf.layers.DenseLayer; @@ -108,7 +107,7 @@ public class RnnGradientChecks extends BaseDL4JTest { System.out.println("Starting test: " + name); - MultiLayerConfiguration conf = new NeuralNetConfiguration.Builder() + NeuralNetConfiguration conf = NeuralNetConfiguration.builder() .dataType(DataType.DOUBLE) .updater(new NoOp()) .weightInit(WeightInit.XAVIER) @@ -187,7 +186,7 @@ public class RnnGradientChecks extends BaseDL4JTest { System.out.println("Starting test: " + name); - MultiLayerConfiguration conf = new NeuralNetConfiguration.Builder() + NeuralNetConfiguration conf = NeuralNetConfiguration.builder() .dataType(DataType.DOUBLE) .updater(new NoOp()) .weightInit(WeightInit.XAVIER) @@ -263,7 +262,7 @@ public class RnnGradientChecks extends BaseDL4JTest { System.out.println("Starting test: " + name); } - MultiLayerConfiguration conf = new NeuralNetConfiguration.Builder() + NeuralNetConfiguration conf = NeuralNetConfiguration.builder() .dataType(DataType.DOUBLE) .activation(Activation.TANH) .updater(new NoOp()) @@ -275,7 +274,7 @@ public class RnnGradientChecks extends BaseDL4JTest { new LSTM.Builder().nOut(layerSize).build())) .layer(new OutputLayer.Builder().nOut(nOut).activation(Activation.SOFTMAX) .lossFunction(LossFunctions.LossFunction.MCXENT).build()) - .setInputType(InputType.recurrent(nIn)) + .inputType(InputType.recurrent(nIn)) .build(); MultiLayerNetwork net = new MultiLayerNetwork(conf); @@ -329,7 +328,7 @@ public class RnnGradientChecks extends BaseDL4JTest { System.out.println("Starting test: " + name); } - MultiLayerConfiguration conf = new NeuralNetConfiguration.Builder() + NeuralNetConfiguration conf = NeuralNetConfiguration.builder() .dataType(DataType.DOUBLE) .activation(Activation.TANH) .updater(new NoOp()) @@ -339,7 +338,7 @@ public class RnnGradientChecks extends BaseDL4JTest { .layer(new TimeDistributed(new DenseLayer.Builder().nOut(layerSize).activation(Activation.SOFTMAX).build())) .layer(new RnnOutputLayer.Builder().nOut(nOut).activation(Activation.SOFTMAX) .lossFunction(LossFunctions.LossFunction.MCXENT).build()) - .setInputType(InputType.recurrent(nIn)) + .inputType(InputType.recurrent(nIn)) .build(); MultiLayerNetwork net = new MultiLayerNetwork(conf); diff --git a/cavis-dnn/cavis-dnn-core/src/test/java/org/deeplearning4j/gradientcheck/UtilLayerGradientChecks.java b/cavis-dnn/cavis-dnn-core/src/test/java/org/deeplearning4j/gradientcheck/UtilLayerGradientChecks.java index 670987c78..212bd29da 100644 --- a/cavis-dnn/cavis-dnn-core/src/test/java/org/deeplearning4j/gradientcheck/UtilLayerGradientChecks.java +++ b/cavis-dnn/cavis-dnn-core/src/test/java/org/deeplearning4j/gradientcheck/UtilLayerGradientChecks.java @@ -23,7 +23,6 @@ package org.deeplearning4j.gradientcheck; import org.deeplearning4j.BaseDL4JTest; import org.deeplearning4j.TestUtils; import org.deeplearning4j.nn.conf.ConvolutionMode; -import org.deeplearning4j.nn.conf.MultiLayerConfiguration; import org.deeplearning4j.nn.conf.NeuralNetConfiguration; import org.deeplearning4j.nn.conf.Updater; import org.deeplearning4j.nn.conf.distribution.NormalDistribution; @@ -127,9 +126,9 @@ public class UtilLayerGradientChecks extends BaseDL4JTest { String name = "mb=" + minibatch + ", maskType=" + maskType + ", inputRank=" + inputRank; System.out.println("*** Starting test: " + name); - Layer l1; - Layer l2; - Layer l3; + LayerConfiguration l1; + LayerConfiguration l2; + LayerConfiguration l3; InputType it; switch (inputRank){ case 2: @@ -163,7 +162,7 @@ public class UtilLayerGradientChecks extends BaseDL4JTest { } - MultiLayerConfiguration conf = new NeuralNetConfiguration.Builder() + NeuralNetConfiguration conf = NeuralNetConfiguration.builder() .updater(new NoOp()) .activation(Activation.TANH) .dataType(DataType.DOUBLE) @@ -173,7 +172,7 @@ public class UtilLayerGradientChecks extends BaseDL4JTest { .layer(new MaskLayer()) .layer(l2) .layer(l3) - .setInputType(it) + .inputType(it) .build(); @@ -197,10 +196,10 @@ public class UtilLayerGradientChecks extends BaseDL4JTest { for( int minibatch : new int[]{1,5}) { - MultiLayerConfiguration conf2 = new NeuralNetConfiguration.Builder() + NeuralNetConfiguration conf2 = NeuralNetConfiguration.builder() .dataType(DataType.DOUBLE) .seed(12345) - .updater(Updater.NONE) + .updater(Updater.NONE.getIUpdaterWithDefaultConfig()) .list() .layer(new DenseLayer.Builder().nIn(10).nOut(10) .activation(Activation.TANH).weightInit(WeightInit.XAVIER).build()) diff --git a/cavis-dnn/cavis-dnn-core/src/test/java/org/deeplearning4j/gradientcheck/VaeGradientCheckTests.java b/cavis-dnn/cavis-dnn-core/src/test/java/org/deeplearning4j/gradientcheck/VaeGradientCheckTests.java index 92ddf8622..233836066 100644 --- a/cavis-dnn/cavis-dnn-core/src/test/java/org/deeplearning4j/gradientcheck/VaeGradientCheckTests.java +++ b/cavis-dnn/cavis-dnn-core/src/test/java/org/deeplearning4j/gradientcheck/VaeGradientCheckTests.java @@ -22,7 +22,6 @@ package org.deeplearning4j.gradientcheck; import org.deeplearning4j.BaseDL4JTest; import org.deeplearning4j.TestUtils; -import org.deeplearning4j.nn.conf.MultiLayerConfiguration; import org.deeplearning4j.nn.conf.NeuralNetConfiguration; import org.deeplearning4j.nn.conf.distribution.NormalDistribution; import org.deeplearning4j.nn.conf.layers.OutputLayer; @@ -94,8 +93,8 @@ public class VaeGradientCheckTests extends BaseDL4JTest { } Activation afn = activFns[i]; - MultiLayerConfiguration conf = - new NeuralNetConfiguration.Builder().l2(l2).l1(l1) + NeuralNetConfiguration conf = + NeuralNetConfiguration.builder().l2(l2).l1(l1) .dataType(DataType.DOUBLE) .updater(new NoOp()) .l2Bias(biasL2[i]).l1Bias(biasL1[i]) @@ -124,7 +123,7 @@ public class VaeGradientCheckTests extends BaseDL4JTest { if (PRINT_RESULTS) { System.out.println(msg); // for (int j = 0; j < mln.getnLayers(); j++) -// System.out.println("Layer " + j + " # params: " + mln.getLayer(j).numParams()); +// System.out.println("ILayer " + j + " # params: " + mln.getLayer(j).numParams()); } boolean gradOK = GradientCheckUtil.checkGradients(mln, DEFAULT_EPS, DEFAULT_MAX_REL_ERROR, @@ -170,7 +169,7 @@ public class VaeGradientCheckTests extends BaseDL4JTest { Activation pzxAfn = pzxAfns[i]; Activation pxzAfn = pxzAfns[i]; - MultiLayerConfiguration conf = new NeuralNetConfiguration.Builder().l2(l2) + NeuralNetConfiguration conf = NeuralNetConfiguration.builder().l2(l2) .dataType(DataType.DOUBLE) .l1(l1).l2Bias(biasL2[i]).l1Bias(biasL1[i]).updater(new NoOp()) .seed(12345L).weightInit(WeightInit.XAVIER).list() @@ -195,7 +194,7 @@ public class VaeGradientCheckTests extends BaseDL4JTest { if (PRINT_RESULTS) { System.out.println(msg); // for (int l = 0; l < mln.getnLayers(); l++) -// System.out.println("Layer " + l + " # params: " + mln.getLayer(l).numParams()); +// System.out.println("ILayer " + l + " # params: " + mln.getLayer(l).numParams()); } boolean gradOK = GradientCheckUtil.checkGradientsPretrainLayer(layer, DEFAULT_EPS, @@ -259,7 +258,7 @@ public class VaeGradientCheckTests extends BaseDL4JTest { throw new RuntimeException(); } - MultiLayerConfiguration conf = new NeuralNetConfiguration.Builder().l2(0.2).l1(0.3) + NeuralNetConfiguration conf = NeuralNetConfiguration.builder().l2(0.2).l1(0.3) .dataType(DataType.DOUBLE) .updater(new NoOp()) .seed(12345L).dist(new NormalDistribution(0, 1)) @@ -283,7 +282,7 @@ public class VaeGradientCheckTests extends BaseDL4JTest { if (PRINT_RESULTS) { System.out.println(msg); // for (int j = 0; j < mln.getnLayers(); j++) -// System.out.println("Layer " + j + " # params: " + mln.getLayer(j).numParams()); +// System.out.println("ILayer " + j + " # params: " + mln.getLayer(j).numParams()); } boolean gradOK = GradientCheckUtil.checkGradientsPretrainLayer(layer, DEFAULT_EPS, @@ -303,7 +302,7 @@ public class VaeGradientCheckTests extends BaseDL4JTest { for (int numSamples : new int[]{1, 2}) { INDArray features = Nd4j.rand(DataType.DOUBLE, minibatch, 4); - MultiLayerConfiguration conf = new NeuralNetConfiguration.Builder().l2(0.2).l1(0.3) + NeuralNetConfiguration conf = NeuralNetConfiguration.builder().l2(0.2).l1(0.3) .dataType(DataType.DOUBLE) .updater(new NoOp()) .seed(12345L).weightInit(WeightInit.XAVIER).list() @@ -325,7 +324,7 @@ public class VaeGradientCheckTests extends BaseDL4JTest { if (PRINT_RESULTS) { System.out.println(msg); // for (int j = 0; j < mln.getnLayers(); j++) -// System.out.println("Layer " + j + " # params: " + mln.getLayer(j).numParams()); +// System.out.println("ILayer " + j + " # params: " + mln.getLayer(j).numParams()); } boolean gradOK = GradientCheckUtil.checkGradientsPretrainLayer(layer, DEFAULT_EPS, diff --git a/cavis-dnn/cavis-dnn-core/src/test/java/org/deeplearning4j/gradientcheck/YoloGradientCheckTests.java b/cavis-dnn/cavis-dnn-core/src/test/java/org/deeplearning4j/gradientcheck/YoloGradientCheckTests.java index 9ae3e598a..1eb72b1bd 100644 --- a/cavis-dnn/cavis-dnn-core/src/test/java/org/deeplearning4j/gradientcheck/YoloGradientCheckTests.java +++ b/cavis-dnn/cavis-dnn-core/src/test/java/org/deeplearning4j/gradientcheck/YoloGradientCheckTests.java @@ -109,7 +109,7 @@ public class YoloGradientCheckTests extends BaseDL4JTest { labels = yoloLabels(mb, c, h, w).permute(0,2,3,1); } - MultiLayerConfiguration conf = new NeuralNetConfiguration.Builder().seed(12345) + NeuralNetConfiguration conf = NeuralNetConfiguration.builder().seed(12345) .dataType(DataType.DOUBLE) .updater(new NoOp()) .activation(a) @@ -122,7 +122,7 @@ public class YoloGradientCheckTests extends BaseDL4JTest { .layer(new Yolo2OutputLayer.Builder() .boundingBoxPriors(bbPrior) .build()) - .setInputType(InputType.convolutional(h, w, depthIn, format)) + .inputType(InputType.convolutional(h, w, depthIn, format)) .build(); MultiLayerNetwork net = new MultiLayerNetwork(conf); @@ -227,7 +227,7 @@ public class YoloGradientCheckTests extends BaseDL4JTest { DataSetIterator iter = new RecordReaderDataSetIterator(rr,2,1,1,true); iter.setPreProcessor(new ImagePreProcessingScaler()); - MultiLayerConfiguration conf = new NeuralNetConfiguration.Builder() + NeuralNetConfiguration conf = NeuralNetConfiguration.builder() .dataType(DataType.DOUBLE) .convolutionMode(ConvolutionMode.Same) .updater(new NoOp()) @@ -240,7 +240,7 @@ public class YoloGradientCheckTests extends BaseDL4JTest { .layer(new Yolo2OutputLayer.Builder() .boundingBoxPriors(bbPriors) .build()) - .setInputType(InputType.convolutional(h,w,c)) + .inputType(InputType.convolutional(h,w,c)) .build(); MultiLayerNetwork net = new MultiLayerNetwork(conf); diff --git a/cavis-dnn/cavis-dnn-core/src/test/java/org/deeplearning4j/nn/conf/ComputationGraphConfigurationTest.java b/cavis-dnn/cavis-dnn-core/src/test/java/org/deeplearning4j/nn/conf/ComputationGraphConfigurationTest.java index 7862cb95f..6e0cbd770 100644 --- a/cavis-dnn/cavis-dnn-core/src/test/java/org/deeplearning4j/nn/conf/ComputationGraphConfigurationTest.java +++ b/cavis-dnn/cavis-dnn-core/src/test/java/org/deeplearning4j/nn/conf/ComputationGraphConfigurationTest.java @@ -57,7 +57,7 @@ public class ComputationGraphConfigurationTest extends BaseDL4JTest { @Test public void testJSONBasic() { - ComputationGraphConfiguration conf = new NeuralNetConfiguration.Builder().seed(12345) + ComputationGraphConfiguration conf = NeuralNetConfiguration.builder().seed(12345) .optimizationAlgo(OptimizationAlgorithm.STOCHASTIC_GRADIENT_DESCENT) .dist(new NormalDistribution(0, 1)).updater(new NoOp()) .graphBuilder().addInputs("input") @@ -79,7 +79,7 @@ public class ComputationGraphConfigurationTest extends BaseDL4JTest { @Test public void testJSONBasic2() { ComputationGraphConfiguration conf = - new NeuralNetConfiguration.Builder() + NeuralNetConfiguration.builder() .optimizationAlgo(OptimizationAlgorithm.STOCHASTIC_GRADIENT_DESCENT) .graphBuilder().addInputs("input") .addLayer("cnn1", @@ -115,7 +115,7 @@ public class ComputationGraphConfigurationTest extends BaseDL4JTest { public void testJSONWithGraphNodes() { ComputationGraphConfiguration conf = - new NeuralNetConfiguration.Builder() + NeuralNetConfiguration.builder() .optimizationAlgo(OptimizationAlgorithm.STOCHASTIC_GRADIENT_DESCENT) .graphBuilder().addInputs("input1", "input2") .addLayer("cnn1", @@ -149,7 +149,7 @@ public class ComputationGraphConfigurationTest extends BaseDL4JTest { //Test no inputs for a layer: try { - new NeuralNetConfiguration.Builder().graphBuilder().addInputs("input1") + NeuralNetConfiguration.builder().graphBuilder().addInputs("input1") .addLayer("dense1", new DenseLayer.Builder().nIn(2).nOut(2).build(), "input1") .addLayer("out", new OutputLayer.Builder().nIn(2).nOut(2).build()).setOutputs("out") .build(); @@ -161,7 +161,7 @@ public class ComputationGraphConfigurationTest extends BaseDL4JTest { // Use appendLayer on first layer try { - new NeuralNetConfiguration.Builder().graphBuilder() + NeuralNetConfiguration.builder().graphBuilder() .appendLayer("dense1", new DenseLayer.Builder().nIn(2).nOut(2).build()) .addLayer("out", new OutputLayer.Builder().nIn(2).nOut(2).build()).setOutputs("out") .build(); @@ -173,7 +173,7 @@ public class ComputationGraphConfigurationTest extends BaseDL4JTest { //Test no network inputs try { - new NeuralNetConfiguration.Builder().graphBuilder() + NeuralNetConfiguration.builder().graphBuilder() .addLayer("dense1", new DenseLayer.Builder().nIn(2).nOut(2).build(), "input1") .addLayer("out", new OutputLayer.Builder().nIn(2).nOut(2).build(), "dense1") .setOutputs("out").build(); @@ -185,7 +185,7 @@ public class ComputationGraphConfigurationTest extends BaseDL4JTest { //Test no network outputs try { - new NeuralNetConfiguration.Builder().graphBuilder().addInputs("input1") + NeuralNetConfiguration.builder().graphBuilder().addInputs("input1") .addLayer("dense1", new DenseLayer.Builder().nIn(2).nOut(2).build(), "input1") .addLayer("out", new OutputLayer.Builder().nIn(2).nOut(2).build(), "dense1").build(); fail("No exception thrown for invalid configuration"); @@ -196,7 +196,7 @@ public class ComputationGraphConfigurationTest extends BaseDL4JTest { //Test: invalid input try { - new NeuralNetConfiguration.Builder().graphBuilder().addInputs("input1") + NeuralNetConfiguration.builder().graphBuilder().addInputs("input1") .addLayer("dense1", new DenseLayer.Builder().nIn(2).nOut(2).build(), "input1") .addLayer("out", new OutputLayer.Builder().nIn(2).nOut(2).build(), "thisDoesntExist") .setOutputs("out").build(); @@ -208,7 +208,7 @@ public class ComputationGraphConfigurationTest extends BaseDL4JTest { //Test: graph with cycles try { - ComputationGraphConfiguration conf = new NeuralNetConfiguration.Builder().graphBuilder().addInputs("input1") + ComputationGraphConfiguration conf = NeuralNetConfiguration.builder().graphBuilder().addInputs("input1") .addLayer("dense1", new DenseLayer.Builder().nIn(2).nOut(2).build(), "input1", "dense3") .addLayer("dense2", new DenseLayer.Builder().nIn(2).nOut(2).build(), "dense1") .addLayer("dense3", new DenseLayer.Builder().nIn(2).nOut(2).build(), "dense2") @@ -226,7 +226,7 @@ public class ComputationGraphConfigurationTest extends BaseDL4JTest { //Test: input != inputType count mismatch try { - new NeuralNetConfiguration.Builder().graphBuilder().addInputs("input1", "input2") + NeuralNetConfiguration.builder().graphBuilder().addInputs("input1", "input2") .setInputTypes(new InputType.InputTypeRecurrent(10, 12)) .addLayer("cnn1", new ConvolutionLayer.Builder(2, 2).stride(2, 2).nIn(1).nOut(5) @@ -259,7 +259,7 @@ public class ComputationGraphConfigurationTest extends BaseDL4JTest { // using runtime/reflection subtype mechanism in ComputationGraphConfiguration.fromJson() //Check a standard GraphVertex implementation, plus a static inner graph vertex - ComputationGraphConfiguration conf = new NeuralNetConfiguration.Builder().graphBuilder().addInputs("in") + ComputationGraphConfiguration conf = NeuralNetConfiguration.builder().graphBuilder().addInputs("in") .addVertex("test", new TestGraphVertex(3, 7), "in") .addVertex("test2", new StaticInnerGraphVertex(4, 5), "in").setOutputs("test", "test2").build(); @@ -282,7 +282,7 @@ public class ComputationGraphConfigurationTest extends BaseDL4JTest { @Test public void testOutputOrderDoesntChangeWhenCloning() { - ComputationGraphConfiguration conf = new NeuralNetConfiguration.Builder().graphBuilder().addInputs("in") + ComputationGraphConfiguration conf = NeuralNetConfiguration.builder().graphBuilder().addInputs("in") .addLayer("out1", new OutputLayer.Builder().nIn(1).nOut(1).build(), "in") .addLayer("out2", new OutputLayer.Builder().nIn(1).nOut(1).build(), "in") .addLayer("out3", new OutputLayer.Builder().nIn(1).nOut(1).build(), "in") @@ -299,7 +299,7 @@ public class ComputationGraphConfigurationTest extends BaseDL4JTest { @Test public void testAllowDisconnectedLayers() { - ComputationGraphConfiguration conf = new NeuralNetConfiguration.Builder().graphBuilder().addInputs("in") + ComputationGraphConfiguration conf = NeuralNetConfiguration.builder().graphBuilder().addInputs("in") .addLayer("bidirectional", new Bidirectional(new LSTM.Builder().activation(Activation.TANH).nOut(10).build()), "in") @@ -321,7 +321,7 @@ public class ComputationGraphConfigurationTest extends BaseDL4JTest { @Test public void testBidirectionalGraphSummary() { - ComputationGraphConfiguration conf = new NeuralNetConfiguration.Builder().graphBuilder().addInputs("in") + ComputationGraphConfiguration conf = NeuralNetConfiguration.builder().graphBuilder().addInputs("in") .addLayer("bidirectional", new Bidirectional(new LSTM.Builder().activation(Activation.TANH).nOut(10).build()), "in") @@ -408,7 +408,7 @@ public class ComputationGraphConfigurationTest extends BaseDL4JTest { if(nOut[i] == 1 && lossLayer) continue; //nOuts are not availabel in loss layer, can't expect it to detect this case try { - new NeuralNetConfiguration.Builder() + NeuralNetConfiguration.builder() .graphBuilder() .addInputs("in") .layer("0", new DenseLayer.Builder().nIn(10).nOut(10).build(), "in") diff --git a/cavis-dnn/cavis-dnn-core/src/test/java/org/deeplearning4j/nn/conf/JsonTest.java b/cavis-dnn/cavis-dnn-core/src/test/java/org/deeplearning4j/nn/conf/JsonTest.java index 190a89746..3e43bfdbe 100644 --- a/cavis-dnn/cavis-dnn-core/src/test/java/org/deeplearning4j/nn/conf/JsonTest.java +++ b/cavis-dnn/cavis-dnn-core/src/test/java/org/deeplearning4j/nn/conf/JsonTest.java @@ -98,7 +98,7 @@ public class JsonTest extends BaseDL4JTest { for (int i = 0; i < lossFunctions.length; i++) { - MultiLayerConfiguration conf = new NeuralNetConfiguration.Builder().seed(12345).updater(Updater.ADAM).list() + NeuralNetConfiguration conf = NeuralNetConfiguration.builder().seed(12345).updater(Updater.ADAM.getIUpdaterWithDefaultConfig()) .layer(0, new DenseLayer.Builder().nIn(4).nOut(nOut[i]).activation(Activation.TANH).build()) .layer(1, new LossLayer.Builder().lossFunction(lossFunctions[i]) .activation(outputActivationFn[i]).build()) @@ -107,8 +107,8 @@ public class JsonTest extends BaseDL4JTest { String json = conf.toJson(); String yaml = conf.toYaml(); - MultiLayerConfiguration fromJson = MultiLayerConfiguration.fromJson(json); - MultiLayerConfiguration fromYaml = MultiLayerConfiguration.fromYaml(yaml); + NeuralNetConfiguration fromJson = NeuralNetConfiguration.fromJson(json); + NeuralNetConfiguration fromYaml = NeuralNetConfiguration.fromYaml(yaml); assertEquals(conf, fromJson); assertEquals(conf, fromYaml); diff --git a/cavis-dnn/cavis-dnn-core/src/test/java/org/deeplearning4j/nn/conf/MultiLayerNeuralNetConfigurationTest.java b/cavis-dnn/cavis-dnn-core/src/test/java/org/deeplearning4j/nn/conf/MultiLayerNeuralNetConfigurationTest.java index a10a9a3c7..eead1511f 100644 --- a/cavis-dnn/cavis-dnn-core/src/test/java/org/deeplearning4j/nn/conf/MultiLayerNeuralNetConfigurationTest.java +++ b/cavis-dnn/cavis-dnn-core/src/test/java/org/deeplearning4j/nn/conf/MultiLayerNeuralNetConfigurationTest.java @@ -20,6 +20,18 @@ package org.deeplearning4j.nn.conf; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertNotSame; +import static org.junit.jupiter.api.Assertions.assertTrue; +import static org.junit.jupiter.api.Assertions.fail; + +import java.io.BufferedInputStream; +import java.io.BufferedOutputStream; +import java.io.File; +import java.io.FileInputStream; +import java.io.FileOutputStream; +import java.util.Arrays; +import java.util.Properties; import lombok.extern.slf4j.Slf4j; import org.deeplearning4j.BaseDL4JTest; import org.deeplearning4j.exception.DL4JInvalidConfigException; @@ -27,7 +39,15 @@ import org.deeplearning4j.nn.api.Layer; import org.deeplearning4j.nn.api.OptimizationAlgorithm; import org.deeplearning4j.nn.conf.distribution.NormalDistribution; import org.deeplearning4j.nn.conf.inputs.InputType; -import org.deeplearning4j.nn.conf.layers.*; +import org.deeplearning4j.nn.conf.layers.BaseLayerConfiguration; +import org.deeplearning4j.nn.conf.layers.ConvolutionLayer; +import org.deeplearning4j.nn.conf.layers.DenseLayer; +import org.deeplearning4j.nn.conf.layers.GlobalPoolingLayer; +import org.deeplearning4j.nn.conf.layers.LossLayer; +import org.deeplearning4j.nn.conf.layers.OutputLayer; +import org.deeplearning4j.nn.conf.layers.PoolingType; +import org.deeplearning4j.nn.conf.layers.SubsamplingLayer; +import org.deeplearning4j.nn.conf.layers.Upsampling2D; import org.deeplearning4j.nn.conf.preprocessor.CnnToFeedForwardPreProcessor; import org.deeplearning4j.nn.conf.weightnoise.DropConnect; import org.deeplearning4j.nn.multilayer.MultiLayerNetwork; @@ -41,349 +61,349 @@ import org.nd4j.linalg.learning.config.Adam; import org.nd4j.linalg.learning.config.NoOp; import org.nd4j.linalg.lossfunctions.LossFunctions; -import java.io.*; -import java.util.Arrays; -import java.util.Properties; - -import static org.junit.jupiter.api.Assertions.*; - @Slf4j public class MultiLayerNeuralNetConfigurationTest extends BaseDL4JTest { - @TempDir - public File testDir; + @TempDir + public File testDir; - @Test - public void testJson() throws Exception { - MultiLayerConfiguration conf = new NeuralNetConfiguration.Builder().list() - .layer(0, new DenseLayer.Builder().dist(new NormalDistribution(1, 1e-1)).build()) - .inputPreProcessor(0, new CnnToFeedForwardPreProcessor()).build(); + private static NeuralNetConfiguration getConf() { + NeuralNetConfiguration conf = NeuralNetConfiguration.builder().seed(12345L) + .layer(0, new DenseLayer.Builder().nIn(2).nOut(2) + .dist(new NormalDistribution(0, 1)).build()) + .layer(1, new OutputLayer.Builder().nIn(2).nOut(1) + .activation(Activation.TANH) + .dist(new NormalDistribution(0, 1)).lossFunction(LossFunctions.LossFunction.MSE) + .build()) + .build(); + return conf; + } - String json = conf.toJson(); - MultiLayerConfiguration from = MultiLayerConfiguration.fromJson(json); - assertEquals(conf.getConf(0), from.getConf(0)); + @Test + public void testJson() throws Exception { + NeuralNetConfiguration conf = NeuralNetConfiguration.builder() + .layer(0, new DenseLayer.Builder().dist(new NormalDistribution(1, 1e-1)).build()) + .inputPreProcessor(0, new CnnToFeedForwardPreProcessor()).build(); - Properties props = new Properties(); - props.put("json", json); - String key = props.getProperty("json"); - assertEquals(json, key); - File f = new File(testDir, "props"); - f.deleteOnExit(); - BufferedOutputStream bos = new BufferedOutputStream(new FileOutputStream(f)); - props.store(bos, ""); - bos.flush(); - bos.close(); - BufferedInputStream bis = new BufferedInputStream(new FileInputStream(f)); - Properties props2 = new Properties(); - props2.load(bis); - bis.close(); - assertEquals(props2.getProperty("json"), props.getProperty("json")); - String json2 = props2.getProperty("json"); - MultiLayerConfiguration conf3 = MultiLayerConfiguration.fromJson(json2); - assertEquals(conf.getConf(0), conf3.getConf(0)); + String json = conf.toJson(); + NeuralNetConfiguration from = NeuralNetConfiguration.fromJson(json); + assertEquals(conf.getConf(0), from.getConf(0)); + Properties props = new Properties(); + props.put("json", json); + String key = props.getProperty("json"); + assertEquals(json, key); + File f = new File(testDir, "props"); + f.deleteOnExit(); + BufferedOutputStream bos = new BufferedOutputStream(new FileOutputStream(f)); + props.store(bos, ""); + bos.flush(); + bos.close(); + BufferedInputStream bis = new BufferedInputStream(new FileInputStream(f)); + Properties props2 = new Properties(); + props2.load(bis); + bis.close(); + assertEquals(props2.getProperty("json"), props.getProperty("json")); + String json2 = props2.getProperty("json"); + NeuralNetConfiguration conf3 = NeuralNetConfiguration.fromJson(json2); + assertEquals(conf.getConf(0), conf3.getConf(0)); + + } + + @Test + public void testConvnetJson() { + final int numRows = 76; + final int numColumns = 76; + int nChannels = 3; + int outputNum = 6; + int seed = 123; + + //setup the network + NeuralNetConfiguration.NeuralNetConfigurationBuilder builder = NeuralNetConfiguration.builder().seed(seed) + .l1(1e-1).l2(2e-4).weightNoise(new DropConnect(0.5)).miniBatch(true) + .optimizationAlgo(OptimizationAlgorithm.CONJUGATE_GRADIENT) + .layer(0, + new ConvolutionLayer.Builder(5, 5).nOut(5).dropOut(0.5).weightInit(WeightInit.XAVIER) + .activation(Activation.RELU).build()) + .layer(1, new SubsamplingLayer.Builder(SubsamplingLayer.PoolingType.MAX, new int[]{2, 2}) + .build()) + .layer(2, + new ConvolutionLayer.Builder(3, 3).nOut(10).dropOut(0.5).weightInit(WeightInit.XAVIER) + .activation(Activation.RELU).build()) + .layer(3, new SubsamplingLayer.Builder(SubsamplingLayer.PoolingType.MAX, new int[]{2, 2}) + .build()) + .layer(4, new DenseLayer.Builder().nOut(100).activation(Activation.RELU).build()) + .layer(5, new OutputLayer.Builder(LossFunctions.LossFunction.NEGATIVELOGLIKELIHOOD) + .nOut(outputNum).weightInit(WeightInit.XAVIER).activation(Activation.SOFTMAX) + .build()) + + .inputType(InputType.convolutional(numRows, numColumns, nChannels)); + + NeuralNetConfiguration conf = builder.build(); + String json = conf.toJson(); + NeuralNetConfiguration conf2 = NeuralNetConfiguration.fromJson(json); + assertEquals(conf, conf2); + } + + @Test + public void testUpsamplingConvnetJson() { + final int numRows = 76; + final int numColumns = 76; + int nChannels = 3; + int outputNum = 6; + int seed = 123; + + //setup the network + NeuralNetConfiguration.NeuralNetConfigurationBuilder builder = NeuralNetConfiguration.builder().seed(seed) + .l1(1e-1).l2(2e-4).dropOut(0.5).miniBatch(true) + .optimizationAlgo(OptimizationAlgorithm.CONJUGATE_GRADIENT) + .layer(new ConvolutionLayer.Builder(5, 5).nOut(5).dropOut(0.5).weightInit(WeightInit.XAVIER) + .activation(Activation.RELU).build()) + .layer(new Upsampling2D.Builder().size(2).build()) + .layer(2, + new ConvolutionLayer.Builder(3, 3).nOut(10).dropOut(0.5).weightInit(WeightInit.XAVIER) + .activation(Activation.RELU).build()) + .layer(new Upsampling2D.Builder().size(2).build()) + .layer(4, new DenseLayer.Builder().nOut(100).activation(Activation.RELU).build()) + .layer(5, new OutputLayer.Builder(LossFunctions.LossFunction.NEGATIVELOGLIKELIHOOD) + .nOut(outputNum).weightInit(WeightInit.XAVIER).activation(Activation.SOFTMAX) + .build()) + + .inputType(InputType.convolutional(numRows, numColumns, nChannels)); + + NeuralNetConfiguration conf = builder.build(); + String json = conf.toJson(); + NeuralNetConfiguration conf2 = NeuralNetConfiguration.fromJson(json); + assertEquals(conf, conf2); + } + + @Test + public void testGlobalPoolingJson() { + NeuralNetConfiguration conf = NeuralNetConfiguration.builder().updater(new NoOp()) + .dist(new NormalDistribution(0, 1.0)).seed(12345L) + .layer(0, new ConvolutionLayer.Builder().kernelSize(2, 2).stride(1, 1).nOut(5).build()) + .layer(1, new GlobalPoolingLayer.Builder().poolingType(PoolingType.PNORM).pnorm(3).build()) + .layer(2, new OutputLayer.Builder(LossFunctions.LossFunction.MCXENT) + .activation(Activation.SOFTMAX).nOut(3).build()) + .inputType(InputType.convolutional(32, 32, 1)).build(); + + String str = conf.toJson(); + NeuralNetConfiguration fromJson = NeuralNetConfiguration.fromJson(str); + + assertEquals(conf, fromJson); + } + + @Test + public void testYaml() throws Exception { + NeuralNetConfiguration conf = NeuralNetConfiguration.builder() + .layer(0, new DenseLayer.Builder().dist(new NormalDistribution(1, 1e-1)).build()) + .inputPreProcessor(0, new CnnToFeedForwardPreProcessor()).build(); + String json = conf.toYaml(); + NeuralNetConfiguration from = NeuralNetConfiguration.fromYaml(json); + assertEquals(conf.getConf(0), from.getConf(0)); + + Properties props = new Properties(); + props.put("json", json); + String key = props.getProperty("json"); + assertEquals(json, key); + File f = new File(testDir, "props"); + f.deleteOnExit(); + BufferedOutputStream bos = new BufferedOutputStream(new FileOutputStream(f)); + props.store(bos, ""); + bos.flush(); + bos.close(); + BufferedInputStream bis = new BufferedInputStream(new FileInputStream(f)); + Properties props2 = new Properties(); + props2.load(bis); + bis.close(); + assertEquals(props2.getProperty("json"), props.getProperty("json")); + String yaml = props2.getProperty("json"); + NeuralNetConfiguration conf3 = NeuralNetConfiguration.fromYaml(yaml); + assertEquals(conf.getConf(0), conf3.getConf(0)); + + } + + @Test + public void testClone() { + NeuralNetConfiguration conf = NeuralNetConfiguration.builder() + .layer(0, new DenseLayer.Builder().build()) + .layer(1, new OutputLayer.Builder().lossFunction(LossFunctions.LossFunction.MSE).build()) + .inputPreProcessor(1, new CnnToFeedForwardPreProcessor()).build(); + + NeuralNetConfiguration conf2 = conf.clone(); + + assertEquals(conf, conf2); + assertNotSame(conf, conf2); + assertNotSame(conf.getNetConfigurations(), conf2.getNetConfigurations()); + for (int i = 0; i < conf.getNetConfigurations().size(); i++) { + assertNotSame(conf.getConf(i), conf2.getConf(i)); + } + assertNotSame(conf.getInputPreProcessors(), conf2.getInputPreProcessors()); + for (Integer layer : conf.getInputPreProcessors().keySet()) { + assertNotSame(conf.getInputPreProcess(layer), conf2.getInputPreProcess(layer)); + } + } + + @Test + public void testRandomWeightInit() { + MultiLayerNetwork model1 = new MultiLayerNetwork(getConf()); + model1.init(); + + Nd4j.getRandom().setSeed(12345L); + MultiLayerNetwork model2 = new MultiLayerNetwork(getConf()); + model2.init(); + + float[] p1 = model1.getModelParams().data().asFloat(); + float[] p2 = model2.getModelParams().data().asFloat(); + System.out.println(Arrays.toString(p1)); + System.out.println(Arrays.toString(p2)); + + org.junit.jupiter.api.Assertions.assertArrayEquals(p1, p2, 0.0f); + } + + @Test + public void testTrainingListener() { + MultiLayerNetwork model1 = new MultiLayerNetwork(getConf()); + model1.init(); + model1.addTrainingListeners(new ScoreIterationListener(1)); + + MultiLayerNetwork model2 = new MultiLayerNetwork(getConf()); + model2.addTrainingListeners(new ScoreIterationListener(1)); + model2.init(); + + Layer[] l1 = model1.getLayers(); + for (int i = 0; i < l1.length; i++) { + assertTrue(l1[i].getTrainingListeners() != null && l1[i].getTrainingListeners().size() == 1); + } + + Layer[] l2 = model2.getLayers(); + for (int i = 0; i < l2.length; i++) { + assertTrue(l2[i].getTrainingListeners() != null && l2[i].getTrainingListeners().size() == 1); + } + } + + @Test + public void testInvalidConfig() { + + try { + NeuralNetConfiguration conf = NeuralNetConfiguration.builder().seed(12345) + .build(); + MultiLayerNetwork net = new MultiLayerNetwork(conf); + net.init(); + fail("No exception thrown for invalid configuration"); + } catch (IllegalStateException e) { + //OK + log.error("", e); + } catch (Throwable e) { + log.error("", e); + fail("Unexpected exception thrown for invalid config"); } - @Test - public void testConvnetJson() { - final int numRows = 76; - final int numColumns = 76; - int nChannels = 3; - int outputNum = 6; - int seed = 123; - - //setup the network - MultiLayerConfiguration.Builder builder = new NeuralNetConfiguration.Builder().seed(seed) - .l1(1e-1).l2(2e-4).weightNoise(new DropConnect(0.5)).miniBatch(true) - .optimizationAlgo(OptimizationAlgorithm.CONJUGATE_GRADIENT).list() - .layer(0, new ConvolutionLayer.Builder(5, 5).nOut(5).dropOut(0.5).weightInit(WeightInit.XAVIER) - .activation(Activation.RELU).build()) - .layer(1, new SubsamplingLayer.Builder(SubsamplingLayer.PoolingType.MAX, new int[] {2, 2}) - .build()) - .layer(2, new ConvolutionLayer.Builder(3, 3).nOut(10).dropOut(0.5).weightInit(WeightInit.XAVIER) - .activation(Activation.RELU).build()) - .layer(3, new SubsamplingLayer.Builder(SubsamplingLayer.PoolingType.MAX, new int[] {2, 2}) - .build()) - .layer(4, new DenseLayer.Builder().nOut(100).activation(Activation.RELU).build()) - .layer(5, new OutputLayer.Builder(LossFunctions.LossFunction.NEGATIVELOGLIKELIHOOD) - .nOut(outputNum).weightInit(WeightInit.XAVIER).activation(Activation.SOFTMAX) - .build()) - - .setInputType(InputType.convolutional(numRows, numColumns, nChannels)); - - MultiLayerConfiguration conf = builder.build(); - String json = conf.toJson(); - MultiLayerConfiguration conf2 = MultiLayerConfiguration.fromJson(json); - assertEquals(conf, conf2); + try { + NeuralNetConfiguration conf = NeuralNetConfiguration.builder().seed(12345) + .layer(1, new DenseLayer.Builder().nIn(3).nOut(4).build()) + .layer(2, new OutputLayer.Builder().nIn(4).nOut(5).build()) + .build(); + MultiLayerNetwork net = new MultiLayerNetwork(conf); + net.init(); + fail("No exception thrown for invalid configuration"); + } catch (IllegalStateException e) { + //OK + log.info(e.toString()); + } catch (Throwable e) { + log.error("", e); + fail("Unexpected exception thrown for invalid config"); } - @Test - public void testUpsamplingConvnetJson() { - final int numRows = 76; - final int numColumns = 76; - int nChannels = 3; - int outputNum = 6; - int seed = 123; - - //setup the network - MultiLayerConfiguration.Builder builder = new NeuralNetConfiguration.Builder().seed(seed) - .l1(1e-1).l2(2e-4).dropOut(0.5).miniBatch(true) - .optimizationAlgo(OptimizationAlgorithm.CONJUGATE_GRADIENT).list() - .layer(new ConvolutionLayer.Builder(5, 5).nOut(5).dropOut(0.5).weightInit(WeightInit.XAVIER) - .activation(Activation.RELU).build()) - .layer(new Upsampling2D.Builder().size(2).build()) - .layer(2, new ConvolutionLayer.Builder(3, 3).nOut(10).dropOut(0.5).weightInit(WeightInit.XAVIER) - .activation(Activation.RELU).build()) - .layer(new Upsampling2D.Builder().size(2).build()) - .layer(4, new DenseLayer.Builder().nOut(100).activation(Activation.RELU).build()) - .layer(5, new OutputLayer.Builder(LossFunctions.LossFunction.NEGATIVELOGLIKELIHOOD) - .nOut(outputNum).weightInit(WeightInit.XAVIER).activation(Activation.SOFTMAX) - .build()) - - .setInputType(InputType.convolutional(numRows, numColumns, nChannels)); - - MultiLayerConfiguration conf = builder.build(); - String json = conf.toJson(); - MultiLayerConfiguration conf2 = MultiLayerConfiguration.fromJson(json); - assertEquals(conf, conf2); + try { + NeuralNetConfiguration conf = NeuralNetConfiguration.builder().seed(12345) + .layer(0, new DenseLayer.Builder().nIn(3).nOut(4).build()) + .layer(2, new OutputLayer.Builder().nIn(4).nOut(5).build()) + .build(); + MultiLayerNetwork net = new MultiLayerNetwork(conf); + net.init(); + fail("No exception thrown for invalid configuration"); + } catch (IllegalStateException e) { + //OK + log.info(e.toString()); + } catch (Throwable e) { + log.error("", e); + fail("Unexpected exception thrown for invalid config"); } + } - @Test - public void testGlobalPoolingJson() { - MultiLayerConfiguration conf = new NeuralNetConfiguration.Builder().updater(new NoOp()) - .dist(new NormalDistribution(0, 1.0)).seed(12345L).list() - .layer(0, new ConvolutionLayer.Builder().kernelSize(2, 2).stride(1, 1).nOut(5).build()) - .layer(1, new GlobalPoolingLayer.Builder().poolingType(PoolingType.PNORM).pnorm(3).build()) - .layer(2, new OutputLayer.Builder(LossFunctions.LossFunction.MCXENT) - .activation(Activation.SOFTMAX).nOut(3).build()) - .setInputType(InputType.convolutional(32, 32, 1)).build(); + @Test + public void testListOverloads() { - String str = conf.toJson(); - MultiLayerConfiguration fromJson = MultiLayerConfiguration.fromJson(str); + NeuralNetConfiguration conf = NeuralNetConfiguration.builder().seed(12345) + .layer(0, new DenseLayer.Builder().nIn(3).nOut(4).build()) + .layer(1, new OutputLayer.Builder().nIn(4).nOut(5).activation(Activation.SOFTMAX).build()) + .build(); + MultiLayerNetwork net = new MultiLayerNetwork(conf); + net.init(); - assertEquals(conf, fromJson); - } + DenseLayer dl = (DenseLayer) conf.getConf(0).getLayer(); + assertEquals(3, dl.getNIn()); + assertEquals(4, dl.getNOut()); + OutputLayer ol = (OutputLayer) conf.getConf(1).getLayer(); + assertEquals(4, ol.getNIn()); + assertEquals(5, ol.getNOut()); + + NeuralNetConfiguration conf2 = NeuralNetConfiguration.builder().seed(12345) + .layer(0, new DenseLayer.Builder().nIn(3).nOut(4).build()) + .layer(1, new OutputLayer.Builder().nIn(4).nOut(5).activation(Activation.SOFTMAX).build()) + .build(); + MultiLayerNetwork net2 = new MultiLayerNetwork(conf2); + net2.init(); + + NeuralNetConfiguration conf3 = NeuralNetConfiguration.builder().seed(12345) + .layer(new DenseLayer.Builder().nIn(3).nOut(4).build()) + .layer( + new OutputLayer.Builder().nIn(4).nOut(5).activation(Activation.SOFTMAX).build()) + .build(); + MultiLayerNetwork net3 = new MultiLayerNetwork(conf3); + net3.init(); + + assertEquals(conf, conf2); + assertEquals(conf, conf3); + } - @Test - public void testYaml() throws Exception { - MultiLayerConfiguration conf = new NeuralNetConfiguration.Builder().list() - .layer(0, new DenseLayer.Builder().dist(new NormalDistribution(1, 1e-1)).build()) - .inputPreProcessor(0, new CnnToFeedForwardPreProcessor()).build(); - String json = conf.toYaml(); - MultiLayerConfiguration from = MultiLayerConfiguration.fromYaml(json); - assertEquals(conf.getConf(0), from.getConf(0)); + @Test + public void testBiasLr() { + //setup the network + NeuralNetConfiguration conf = NeuralNetConfiguration.builder().seed(12345) + .updater(new Adam(1e-2)) + .biasUpdater(new Adam(0.5)) + .layer(0, new ConvolutionLayer.Builder(5, 5).nOut(5).weightInit(WeightInit.XAVIER) + .activation(Activation.RELU).build()) + .layer(1, new DenseLayer.Builder().nOut(100).activation(Activation.RELU).build()) + .layer(2, new DenseLayer.Builder().nOut(100).activation(Activation.RELU).build()) + .layer(3, new OutputLayer.Builder(LossFunctions.LossFunction.NEGATIVELOGLIKELIHOOD).nOut(10) + .weightInit(WeightInit.XAVIER).activation(Activation.SOFTMAX).build()) + .inputType(InputType.convolutional(28, 28, 1)).build(); - Properties props = new Properties(); - props.put("json", json); - String key = props.getProperty("json"); - assertEquals(json, key); - File f = new File(testDir, "props"); - f.deleteOnExit(); - BufferedOutputStream bos = new BufferedOutputStream(new FileOutputStream(f)); - props.store(bos, ""); - bos.flush(); - bos.close(); - BufferedInputStream bis = new BufferedInputStream(new FileInputStream(f)); - Properties props2 = new Properties(); - props2.load(bis); - bis.close(); - assertEquals(props2.getProperty("json"), props.getProperty("json")); - String yaml = props2.getProperty("json"); - MultiLayerConfiguration conf3 = MultiLayerConfiguration.fromYaml(yaml); - assertEquals(conf.getConf(0), conf3.getConf(0)); + BaseLayerConfiguration l0 = (BaseLayerConfiguration) conf.getConf(0).getLayer(); + BaseLayerConfiguration l1 = (BaseLayerConfiguration) conf.getConf(1).getLayer(); + BaseLayerConfiguration l2 = (BaseLayerConfiguration) conf.getConf(2).getLayer(); + BaseLayerConfiguration l3 = (BaseLayerConfiguration) conf.getConf(3).getLayer(); - } + assertEquals(0.5, ((Adam) l0.getUpdaterByParam("b")).getLearningRate(), 1e-6); + assertEquals(1e-2, ((Adam) l0.getUpdaterByParam("W")).getLearningRate(), 1e-6); - @Test - public void testClone() { - MultiLayerConfiguration conf = new NeuralNetConfiguration.Builder().list().layer(0, new DenseLayer.Builder().build()) - .layer(1, new OutputLayer.Builder().lossFunction(LossFunctions.LossFunction.MSE).build()) - .inputPreProcessor(1, new CnnToFeedForwardPreProcessor()).build(); + assertEquals(0.5, ((Adam) l1.getUpdaterByParam("b")).getLearningRate(), 1e-6); + assertEquals(1e-2, ((Adam) l1.getUpdaterByParam("W")).getLearningRate(), 1e-6); - MultiLayerConfiguration conf2 = conf.clone(); + assertEquals(0.5, ((Adam) l2.getUpdaterByParam("b")).getLearningRate(), 1e-6); + assertEquals(1e-2, ((Adam) l2.getUpdaterByParam("W")).getLearningRate(), 1e-6); - assertEquals(conf, conf2); - assertNotSame(conf, conf2); - assertNotSame(conf.getConfs(), conf2.getConfs()); - for (int i = 0; i < conf.getConfs().size(); i++) { - assertNotSame(conf.getConf(i), conf2.getConf(i)); - } - assertNotSame(conf.getInputPreProcessors(), conf2.getInputPreProcessors()); - for (Integer layer : conf.getInputPreProcessors().keySet()) { - assertNotSame(conf.getInputPreProcess(layer), conf2.getInputPreProcess(layer)); - } - } - - @Test - public void testRandomWeightInit() { - MultiLayerNetwork model1 = new MultiLayerNetwork(getConf()); - model1.init(); - - Nd4j.getRandom().setSeed(12345L); - MultiLayerNetwork model2 = new MultiLayerNetwork(getConf()); - model2.init(); - - float[] p1 = model1.params().data().asFloat(); - float[] p2 = model2.params().data().asFloat(); - System.out.println(Arrays.toString(p1)); - System.out.println(Arrays.toString(p2)); - - org.junit.jupiter.api.Assertions.assertArrayEquals(p1, p2, 0.0f); - } - - @Test - public void testTrainingListener() { - MultiLayerNetwork model1 = new MultiLayerNetwork(getConf()); - model1.init(); - model1.addListeners( new ScoreIterationListener(1)); - - MultiLayerNetwork model2 = new MultiLayerNetwork(getConf()); - model2.addListeners( new ScoreIterationListener(1)); - model2.init(); - - Layer[] l1 = model1.getLayers(); - for (int i = 0; i < l1.length; i++) - assertTrue(l1[i].getListeners() != null && l1[i].getListeners().size() == 1); - - Layer[] l2 = model2.getLayers(); - for (int i = 0; i < l2.length; i++) - assertTrue(l2[i].getListeners() != null && l2[i].getListeners().size() == 1); - } + assertEquals(0.5, ((Adam) l3.getUpdaterByParam("b")).getLearningRate(), 1e-6); + assertEquals(1e-2, ((Adam) l3.getUpdaterByParam("W")).getLearningRate(), 1e-6); + } - private static MultiLayerConfiguration getConf() { - MultiLayerConfiguration conf = new NeuralNetConfiguration.Builder().seed(12345L).list() - .layer(0, new DenseLayer.Builder().nIn(2).nOut(2) - .dist(new NormalDistribution(0, 1)).build()) - .layer(1, new OutputLayer.Builder().nIn(2).nOut(1) - .activation(Activation.TANH) - .dist(new NormalDistribution(0, 1)).lossFunction(LossFunctions.LossFunction.MSE).build()) - .build(); - return conf; - } - - @Test - public void testInvalidConfig() { - - try { - MultiLayerConfiguration conf = new NeuralNetConfiguration.Builder().seed(12345).list() - .build(); - MultiLayerNetwork net = new MultiLayerNetwork(conf); - net.init(); - fail("No exception thrown for invalid configuration"); - } catch (IllegalStateException e) { - //OK - log.error("",e); - } catch (Throwable e) { - log.error("",e); - fail("Unexpected exception thrown for invalid config"); - } - - try { - MultiLayerConfiguration conf = new NeuralNetConfiguration.Builder().seed(12345).list() - .layer(1, new DenseLayer.Builder().nIn(3).nOut(4).build()) - .layer(2, new OutputLayer.Builder().nIn(4).nOut(5).build()) - .build(); - MultiLayerNetwork net = new MultiLayerNetwork(conf); - net.init(); - fail("No exception thrown for invalid configuration"); - } catch (IllegalStateException e) { - //OK - log.info(e.toString()); - } catch (Throwable e) { - log.error("",e); - fail("Unexpected exception thrown for invalid config"); - } - - try { - MultiLayerConfiguration conf = new NeuralNetConfiguration.Builder().seed(12345).list() - .layer(0, new DenseLayer.Builder().nIn(3).nOut(4).build()) - .layer(2, new OutputLayer.Builder().nIn(4).nOut(5).build()) - .build(); - MultiLayerNetwork net = new MultiLayerNetwork(conf); - net.init(); - fail("No exception thrown for invalid configuration"); - } catch (IllegalStateException e) { - //OK - log.info(e.toString()); - } catch (Throwable e) { - log.error("",e); - fail("Unexpected exception thrown for invalid config"); - } - } - - @Test - public void testListOverloads() { - - MultiLayerConfiguration conf = new NeuralNetConfiguration.Builder().seed(12345).list() - .layer(0, new DenseLayer.Builder().nIn(3).nOut(4).build()) - .layer(1, new OutputLayer.Builder().nIn(4).nOut(5).activation(Activation.SOFTMAX).build()) - .build(); - MultiLayerNetwork net = new MultiLayerNetwork(conf); - net.init(); - - DenseLayer dl = (DenseLayer) conf.getConf(0).getLayer(); - assertEquals(3, dl.getNIn()); - assertEquals(4, dl.getNOut()); - OutputLayer ol = (OutputLayer) conf.getConf(1).getLayer(); - assertEquals(4, ol.getNIn()); - assertEquals(5, ol.getNOut()); - - MultiLayerConfiguration conf2 = new NeuralNetConfiguration.Builder().seed(12345).list() - .layer(0, new DenseLayer.Builder().nIn(3).nOut(4).build()) - .layer(1, new OutputLayer.Builder().nIn(4).nOut(5).activation(Activation.SOFTMAX).build()) - .build(); - MultiLayerNetwork net2 = new MultiLayerNetwork(conf2); - net2.init(); - - MultiLayerConfiguration conf3 = new NeuralNetConfiguration.Builder().seed(12345) - .list(new DenseLayer.Builder().nIn(3).nOut(4).build(), - new OutputLayer.Builder().nIn(4).nOut(5).activation(Activation.SOFTMAX).build()) - .build(); - MultiLayerNetwork net3 = new MultiLayerNetwork(conf3); - net3.init(); - - - assertEquals(conf, conf2); - assertEquals(conf, conf3); - } - - - @Test - public void testBiasLr() { - //setup the network - MultiLayerConfiguration conf = new NeuralNetConfiguration.Builder().seed(12345).updater(new Adam(1e-2)) - .biasUpdater(new Adam(0.5)).list() - .layer(0, new ConvolutionLayer.Builder(5, 5).nOut(5).weightInit(WeightInit.XAVIER) - .activation(Activation.RELU).build()) - .layer(1, new DenseLayer.Builder().nOut(100).activation(Activation.RELU).build()) - .layer(2, new DenseLayer.Builder().nOut(100).activation(Activation.RELU).build()) - .layer(3, new OutputLayer.Builder(LossFunctions.LossFunction.NEGATIVELOGLIKELIHOOD).nOut(10) - .weightInit(WeightInit.XAVIER).activation(Activation.SOFTMAX).build()) - .setInputType(InputType.convolutional(28, 28, 1)).build(); - - org.deeplearning4j.nn.conf.layers.BaseLayer l0 = (BaseLayer) conf.getConf(0).getLayer(); - org.deeplearning4j.nn.conf.layers.BaseLayer l1 = (BaseLayer) conf.getConf(1).getLayer(); - org.deeplearning4j.nn.conf.layers.BaseLayer l2 = (BaseLayer) conf.getConf(2).getLayer(); - org.deeplearning4j.nn.conf.layers.BaseLayer l3 = (BaseLayer) conf.getConf(3).getLayer(); - - assertEquals(0.5, ((Adam)l0.getUpdaterByParam("b")).getLearningRate(), 1e-6); - assertEquals(1e-2, ((Adam)l0.getUpdaterByParam("W")).getLearningRate(), 1e-6); - - assertEquals(0.5, ((Adam)l1.getUpdaterByParam("b")).getLearningRate(), 1e-6); - assertEquals(1e-2, ((Adam)l1.getUpdaterByParam("W")).getLearningRate(), 1e-6); - - assertEquals(0.5, ((Adam)l2.getUpdaterByParam("b")).getLearningRate(), 1e-6); - assertEquals(1e-2, ((Adam)l2.getUpdaterByParam("W")).getLearningRate(), 1e-6); - - assertEquals(0.5, ((Adam)l3.getUpdaterByParam("b")).getLearningRate(), 1e-6); - assertEquals(1e-2, ((Adam)l3.getUpdaterByParam("W")).getLearningRate(), 1e-6); - } - - - @Test - public void testInvalidOutputLayer(){ + @Test + public void testInvalidOutputLayer() { /* Test case (invalid configs) 1. nOut=1 + softmax @@ -393,37 +413,44 @@ public class MultiLayerNeuralNetConfigurationTest extends BaseDL4JTest { 5. mcxent + sigmoid */ - LossFunctions.LossFunction[] lf = new LossFunctions.LossFunction[]{ - LossFunctions.LossFunction.MCXENT, LossFunctions.LossFunction.MCXENT, LossFunctions.LossFunction.XENT, - LossFunctions.LossFunction.XENT, LossFunctions.LossFunction.MCXENT}; - int[] nOut = new int[]{1, 3, 3, 3, 3}; - Activation[] activations = new Activation[]{Activation.SOFTMAX, Activation.TANH, Activation.SOFTMAX, Activation.RELU, Activation.SIGMOID}; - for( int i=0; i r = net.getLayer(0).conf().getLayer().getRegularizationByParam("b"); + assertEquals(l1, TestUtils.getL1(net.getLayer(0).getLayerConfiguration().getRegularizationByParam("W")), 1e-4); + List r = net.getLayer(0).getLayerConfiguration().getRegularizationByParam("b"); assertEquals(0, r.size()); - r = net.getLayer(1).conf().getLayer().getRegularizationByParam("beta"); + r = net.getLayer(1).getLayerConfiguration().getRegularizationByParam("beta"); assertTrue(r == null || r.isEmpty()); - r = net.getLayer(1).conf().getLayer().getRegularizationByParam("gamma"); + r = net.getLayer(1).getLayerConfiguration().getRegularizationByParam("gamma"); assertTrue(r == null || r.isEmpty()); - r = net.getLayer(1).conf().getLayer().getRegularizationByParam("mean"); + r = net.getLayer(1).getLayerConfiguration().getRegularizationByParam("mean"); assertTrue(r == null || r.isEmpty()); - r = net.getLayer(1).conf().getLayer().getRegularizationByParam("var"); + r = net.getLayer(1).getLayerConfiguration().getRegularizationByParam("var"); assertTrue(r == null || r.isEmpty()); - assertEquals(l2, TestUtils.getL2(net.getLayer(2).conf().getLayer().getRegularizationByParam("W")), 1e-4); - r = net.getLayer(2).conf().getLayer().getRegularizationByParam("b"); + assertEquals(l2, TestUtils.getL2(net.getLayer(2).getLayerConfiguration().getRegularizationByParam("W")), 1e-4); + r = net.getLayer(2).getLayerConfiguration().getRegularizationByParam("b"); assertTrue(r == null || r.isEmpty()); } @@ -322,7 +322,7 @@ public class NeuralNetConfigurationTest extends BaseDL4JTest { .nIn(10).nOut(5).updater(new Sgd(1e-1)) .lossFunction(LossFunctions.LossFunction.KL_DIVERGENCE).build(); - NeuralNetConfiguration conf = new NeuralNetConfiguration.Builder().seed(42).layer(layer).build(); + NeuralNetConfiguration conf = NeuralNetConfiguration.builder().seed(42).layer(layer).build(); } } diff --git a/cavis-dnn/cavis-dnn-core/src/test/java/org/deeplearning4j/nn/conf/constraints/TestConstraints.java b/cavis-dnn/cavis-dnn-core/src/test/java/org/deeplearning4j/nn/conf/constraints/TestConstraints.java index 37260087d..d1aae72e9 100644 --- a/cavis-dnn/cavis-dnn-core/src/test/java/org/deeplearning4j/nn/conf/constraints/TestConstraints.java +++ b/cavis-dnn/cavis-dnn-core/src/test/java/org/deeplearning4j/nn/conf/constraints/TestConstraints.java @@ -26,7 +26,6 @@ import org.deeplearning4j.nn.api.OptimizationAlgorithm; import org.deeplearning4j.nn.api.layers.LayerConstraint; import org.deeplearning4j.nn.conf.BackpropType; import org.deeplearning4j.nn.conf.ComputationGraphConfiguration; -import org.deeplearning4j.nn.conf.MultiLayerConfiguration; import org.deeplearning4j.nn.conf.NeuralNetConfiguration; import org.deeplearning4j.nn.conf.constraint.MaxNormConstraint; import org.deeplearning4j.nn.conf.constraint.MinMaxNormConstraint; @@ -68,10 +67,10 @@ public class TestConstraints extends BaseDL4JTest { for (LayerConstraint lc : constraints) { - MultiLayerConfiguration conf = new NeuralNetConfiguration.Builder() + NeuralNetConfiguration conf = NeuralNetConfiguration.builder() .updater(new Sgd(0.0)) .dist(new NormalDistribution(0, 5)) - .list() + .layer(new LSTM.Builder().nIn(12).nOut(10) .constrainRecurrent(lc).build()) .layer(new OutputLayer.Builder().lossFunction(LossFunctions.LossFunction.MSE).nIn(10).nOut(8).build()) @@ -81,7 +80,7 @@ public class TestConstraints extends BaseDL4JTest { net.init(); LayerConstraint exp = lc.clone(); - assertEquals(exp.toString(), net.getLayer(0).conf().getLayer().getConstraints().get(0).toString()); + assertEquals(exp.toString(), net.getLayer(0).getLayerConfiguration().getConstraints().get(0).toString()); INDArray input = Nd4j.rand(3, 12); INDArray labels = Nd4j.rand(3, 8); @@ -120,11 +119,11 @@ public class TestConstraints extends BaseDL4JTest { for (LayerConstraint lc : constraints) { - MultiLayerConfiguration conf = new NeuralNetConfiguration.Builder() + NeuralNetConfiguration conf = NeuralNetConfiguration.builder() .updater(new Sgd(0.0)) .dist(new NormalDistribution(0, 5)) .biasInit(10.0) - .list() + .layer(new DenseLayer.Builder().nIn(12).nOut(10) .constrainBias(lc).build()) .layer(new OutputLayer.Builder().lossFunction(LossFunctions.LossFunction.MSE).nIn(10).nOut(8).build()) @@ -134,7 +133,7 @@ public class TestConstraints extends BaseDL4JTest { net.init(); LayerConstraint exp = lc.clone(); - assertEquals(exp.toString(), net.getLayer(0).conf().getLayer().getConstraints().get(0).toString()); + assertEquals(exp.toString(), net.getLayer(0).getLayerConfiguration().getConstraints().get(0).toString()); INDArray input = Nd4j.rand(3, 12); INDArray labels = Nd4j.rand(3, 8); @@ -173,10 +172,10 @@ public class TestConstraints extends BaseDL4JTest { for (LayerConstraint lc : constraints) { - MultiLayerConfiguration conf = new NeuralNetConfiguration.Builder() + NeuralNetConfiguration conf = NeuralNetConfiguration.builder() .updater(new Sgd(0.0)) .dist(new NormalDistribution(0, 5)) - .list() + .layer(new DenseLayer.Builder().nIn(12).nOut(10) .constrainWeights(lc).build()) .layer(new OutputLayer.Builder().lossFunction(LossFunctions.LossFunction.MSE).nIn(10).nOut(8).build()) @@ -186,7 +185,7 @@ public class TestConstraints extends BaseDL4JTest { net.init(); LayerConstraint exp = lc.clone(); - assertEquals(exp.toString(), net.getLayer(0).conf().getLayer().getConstraints().get(0).toString()); + assertEquals(exp.toString(), net.getLayer(0).getLayerConfiguration().getConstraints().get(0).toString()); INDArray input = Nd4j.rand(3, 12); INDArray labels = Nd4j.rand(3, 8); @@ -225,11 +224,11 @@ public class TestConstraints extends BaseDL4JTest { for (LayerConstraint lc : constraints) { - MultiLayerConfiguration conf = new NeuralNetConfiguration.Builder() + NeuralNetConfiguration conf = NeuralNetConfiguration.builder() .updater(new Sgd(0.0)) .dist(new NormalDistribution(0, 5)) .biasInit(0.2) - .list() + .layer(new DenseLayer.Builder().nIn(12).nOut(10) .constrainAllParameters(lc).build()) .layer(new OutputLayer.Builder().lossFunction(LossFunctions.LossFunction.MSE).nIn(10).nOut(8).build()) @@ -239,7 +238,7 @@ public class TestConstraints extends BaseDL4JTest { net.init(); LayerConstraint exp = lc.clone(); - assertEquals(exp.toString(), net.getLayer(0).conf().getLayer().getConstraints().get(0).toString()); + assertEquals(exp.toString(), net.getLayer(0).getLayerConfiguration().getConstraints().get(0).toString()); INDArray input = Nd4j.rand(3, 12); INDArray labels = Nd4j.rand(3, 8); @@ -286,11 +285,11 @@ public class TestConstraints extends BaseDL4JTest { for (LayerConstraint lc : constraints) { - MultiLayerConfiguration conf = new NeuralNetConfiguration.Builder() + NeuralNetConfiguration conf = NeuralNetConfiguration.builder() .updater(new Sgd(0.0)) .dist(new NormalDistribution(0, 5)) .biasInit(0.2) - .list() + .layer(new DenseLayer.Builder().nIn(12).nOut(10) .constrainWeights(lc).constrainBias(lc).build()) .layer(new OutputLayer.Builder().lossFunction(LossFunctions.LossFunction.MSE).nIn(10).nOut(8).build()) @@ -300,7 +299,7 @@ public class TestConstraints extends BaseDL4JTest { net.init(); LayerConstraint exp = lc.clone(); - assertEquals(exp.toString(), net.getLayer(0).conf().getLayer().getConstraints().get(0).toString()); + assertEquals(exp.toString(), net.getLayer(0).getLayerConfiguration().getConstraints().get(0).toString()); INDArray input = Nd4j.rand(3, 12); INDArray labels = Nd4j.rand(3, 8); @@ -346,12 +345,12 @@ public class TestConstraints extends BaseDL4JTest { for(LayerConstraint lc : constraints){ - MultiLayerConfiguration conf = new NeuralNetConfiguration.Builder() + NeuralNetConfiguration conf = NeuralNetConfiguration.builder() .constrainWeights(lc) .updater(new Sgd(0.0)) .dist(new NormalDistribution(0,5)) .biasInit(1) - .list() + .layer(new DenseLayer.Builder().nIn(12).nOut(10).build()) .layer(new OutputLayer.Builder().lossFunction(LossFunctions.LossFunction.MSE).nIn(10).nOut(8).build()) .build(); @@ -360,8 +359,8 @@ public class TestConstraints extends BaseDL4JTest { net.init(); LayerConstraint exp = lc.clone(); - assertEquals(exp.toString(), net.getLayer(0).conf().getLayer().getConstraints().get(0).toString()); - assertEquals(exp.toString(), net.getLayer(1).conf().getLayer().getConstraints().get(0).toString()); + assertEquals(exp.toString(), net.getLayer(0).getLayerConfiguration().getConstraints().get(0).toString()); + assertEquals(exp.toString(), net.getLayer(1).getLayerConfiguration().getConstraints().get(0).toString()); INDArray input = Nd4j.rand(3, 12); INDArray labels = Nd4j.rand(3, 8); @@ -400,7 +399,7 @@ public class TestConstraints extends BaseDL4JTest { int nIn = 10; int lstmLayerSize = 32; - ComputationGraphConfiguration conf = new NeuralNetConfiguration.Builder() + ComputationGraphConfiguration conf = NeuralNetConfiguration.builder() .optimizationAlgo(OptimizationAlgorithm.STOCHASTIC_GRADIENT_DESCENT) .weightInit(WeightInit.RELU_UNIFORM) .updater(new RmsProp(learningRate)) @@ -457,7 +456,7 @@ public class TestConstraints extends BaseDL4JTest { INDArray label = Nd4j.rand(1, 1); g.fit(new INDArray[]{in1, in2}, new INDArray[]{label}); - for(Map.Entry e : g.paramTable().entrySet()){ + for(Map.Entry e : g.getParamTable().entrySet()){ if(!e.getKey().contains("W")){ continue; } diff --git a/cavis-dnn/cavis-dnn-core/src/test/java/org/deeplearning4j/nn/conf/dropout/TestDropout.java b/cavis-dnn/cavis-dnn-core/src/test/java/org/deeplearning4j/nn/conf/dropout/TestDropout.java index 5c06f2adc..f574ae089 100644 --- a/cavis-dnn/cavis-dnn-core/src/test/java/org/deeplearning4j/nn/conf/dropout/TestDropout.java +++ b/cavis-dnn/cavis-dnn-core/src/test/java/org/deeplearning4j/nn/conf/dropout/TestDropout.java @@ -25,7 +25,6 @@ import org.deeplearning4j.BaseDL4JTest; import org.deeplearning4j.TestUtils; import org.deeplearning4j.datasets.iterator.ExistingDataSetIterator; import org.deeplearning4j.nn.conf.ComputationGraphConfiguration; -import org.deeplearning4j.nn.conf.MultiLayerConfiguration; import org.deeplearning4j.nn.conf.NeuralNetConfiguration; import org.deeplearning4j.nn.conf.graph.LayerVertex; import org.deeplearning4j.nn.conf.layers.DenseLayer; @@ -60,21 +59,21 @@ public class TestDropout extends BaseDL4JTest { @Test public void testBasicConfig(){ - MultiLayerConfiguration conf = new NeuralNetConfiguration.Builder() + NeuralNetConfiguration conf = NeuralNetConfiguration.builder() .dropOut(0.6) - .list() + .layer(new DenseLayer.Builder().nIn(10).nOut(10).build()) .layer(new DenseLayer.Builder().nIn(10).nOut(10).dropOut(0.7).build()) .layer(new DenseLayer.Builder().nIn(10).nOut(10).dropOut(new AlphaDropout(0.5)).build()) .build(); - assertEquals(new Dropout(0.6), conf.getConf(0).getLayer().getIDropout()); - assertEquals(new Dropout(0.7), conf.getConf(1).getLayer().getIDropout()); - assertEquals(new AlphaDropout(0.5), conf.getConf(2).getLayer().getIDropout()); + assertEquals(new Dropout(0.6), conf.getFlattenedLayerConfigurations().get(0).getIDropout()); + assertEquals(new Dropout(0.7), conf.getFlattenedLayerConfigurations().get(1).getIDropout()); + assertEquals(new AlphaDropout(0.5), conf.getFlattenedLayerConfigurations().get(2).getIDropout()); - ComputationGraphConfiguration conf2 = new NeuralNetConfiguration.Builder() - .dropOut(0.6) + ComputationGraphConfiguration conf2 = NeuralNetConfiguration.builder() + .dropOut( new Dropout(0.6)) .graphBuilder() .addInputs("in") .addLayer("0", new DenseLayer.Builder().nIn(10).nOut(10).build(), "in") @@ -83,9 +82,9 @@ public class TestDropout extends BaseDL4JTest { .setOutputs("2") .build(); - assertEquals(new Dropout(0.6), ((LayerVertex)conf2.getVertices().get("0")).getLayerConf().getLayer().getIDropout()); - assertEquals(new Dropout(0.7), ((LayerVertex)conf2.getVertices().get("1")).getLayerConf().getLayer().getIDropout()); - assertEquals(new AlphaDropout(0.5), ((LayerVertex)conf2.getVertices().get("2")).getLayerConf().getLayer().getIDropout()); + assertEquals(new Dropout(0.6), ((LayerVertex)conf2.getVertices().get("0")).getLayerConfiguration().getIDropout()); + assertEquals(new Dropout(0.7), ((LayerVertex)conf2.getVertices().get("1")).getLayerConfiguration().getIDropout()); + assertEquals(new AlphaDropout(0.5), ((LayerVertex)conf2.getVertices().get("2")).getLayerConfiguration().getIDropout()); } @Test @@ -94,8 +93,8 @@ public class TestDropout extends BaseDL4JTest { CustomDropout d1 = new CustomDropout(); CustomDropout d2 = new CustomDropout(); - MultiLayerConfiguration conf = new NeuralNetConfiguration.Builder() - .list() + NeuralNetConfiguration conf = NeuralNetConfiguration.builder() + .layer(new DenseLayer.Builder().nIn(4).nOut(3).dropOut(d1).build()) .layer(new OutputLayer.Builder(LossFunctions.LossFunction.MSE).dropOut(d2).nIn(3).nOut(3).build()) .build(); @@ -129,7 +128,7 @@ public class TestDropout extends BaseDL4JTest { d1 = new CustomDropout(); d2 = new CustomDropout(); - ComputationGraphConfiguration conf2 = new NeuralNetConfiguration.Builder() + ComputationGraphConfiguration conf2 = NeuralNetConfiguration.builder() .graphBuilder() .addInputs("in") .addLayer("0", new DenseLayer.Builder().nIn(4).nOut(3).dropOut(d1).build(), "in") @@ -186,9 +185,9 @@ public class TestDropout extends BaseDL4JTest { for(IDropout id : dropouts) { - MultiLayerConfiguration conf = new NeuralNetConfiguration.Builder() + NeuralNetConfiguration conf = NeuralNetConfiguration.builder() .dropOut(id) - .list() + .layer(new DenseLayer.Builder().nIn(4).nOut(3).build()) .layer(new OutputLayer.Builder(LossFunctions.LossFunction.MSE).nIn(3).nOut(3).build()) .build(); @@ -197,7 +196,7 @@ public class TestDropout extends BaseDL4JTest { TestUtils.testModelSerialization(net); - ComputationGraphConfiguration conf2 = new NeuralNetConfiguration.Builder() + ComputationGraphConfiguration conf2 = NeuralNetConfiguration.builder() .dropOut(id) .graphBuilder() .addInputs("in") @@ -601,13 +600,13 @@ public class TestDropout extends BaseDL4JTest { @Test public void testSpatialDropoutJSON(){ - MultiLayerConfiguration conf = new NeuralNetConfiguration.Builder() - .list() + NeuralNetConfiguration conf = NeuralNetConfiguration.builder() + .layer(new DropoutLayer.Builder(new SpatialDropout(0.5)).build()) .build(); String asJson = conf.toJson(); - MultiLayerConfiguration fromJson = MultiLayerConfiguration.fromJson(asJson); + NeuralNetConfiguration fromJson = NeuralNetConfiguration.fromJson(asJson); assertEquals(conf, fromJson); } diff --git a/cavis-dnn/cavis-dnn-core/src/test/java/org/deeplearning4j/nn/conf/graph/ElementWiseVertexTest.java b/cavis-dnn/cavis-dnn-core/src/test/java/org/deeplearning4j/nn/conf/graph/ElementWiseVertexTest.java index 046cf0f63..c3ec4a87c 100644 --- a/cavis-dnn/cavis-dnn-core/src/test/java/org/deeplearning4j/nn/conf/graph/ElementWiseVertexTest.java +++ b/cavis-dnn/cavis-dnn-core/src/test/java/org/deeplearning4j/nn/conf/graph/ElementWiseVertexTest.java @@ -70,7 +70,7 @@ public class ElementWiseVertexTest extends BaseDL4JTest { public void testElementWiseVertexForwardAdd() { int batchsz = 24; int featuresz = 17; - ComputationGraphConfiguration cgc = new NeuralNetConfiguration.Builder().graphBuilder() + ComputationGraphConfiguration cgc = NeuralNetConfiguration.builder().graphBuilder() .addInputs("input1", "input2", "input3") .addLayer("denselayer", new DenseLayer.Builder().nIn(featuresz).nOut(1).activation(Activation.IDENTITY) @@ -111,7 +111,7 @@ public class ElementWiseVertexTest extends BaseDL4JTest { public void testElementWiseVertexForwardProduct() { int batchsz = 24; int featuresz = 17; - ComputationGraphConfiguration cgc = new NeuralNetConfiguration.Builder().graphBuilder() + ComputationGraphConfiguration cgc = NeuralNetConfiguration.builder().graphBuilder() .addInputs("input1", "input2", "input3") .addLayer("denselayer", new DenseLayer.Builder().nIn(featuresz).nOut(1).activation(Activation.IDENTITY) @@ -152,7 +152,7 @@ public class ElementWiseVertexTest extends BaseDL4JTest { public void testElementWiseVertexForwardSubtract() { int batchsz = 24; int featuresz = 17; - ComputationGraphConfiguration cgc = new NeuralNetConfiguration.Builder().graphBuilder() + ComputationGraphConfiguration cgc = NeuralNetConfiguration.builder().graphBuilder() .addInputs("input1", "input2") .addLayer("denselayer", new DenseLayer.Builder().nIn(featuresz).nOut(1).activation(Activation.IDENTITY) @@ -194,7 +194,7 @@ public class ElementWiseVertexTest extends BaseDL4JTest { int featuresz = 17; int midsz = 13; int outputsz = 11; - ComputationGraphConfiguration cgc = new NeuralNetConfiguration.Builder().weightInit(WeightInit.XAVIER) + ComputationGraphConfiguration cgc = NeuralNetConfiguration.builder().weightInit(WeightInit.XAVIER) .dataType(DataType.DOUBLE) .biasInit(0.0).updater(new Sgd()) .optimizationAlgo(OptimizationAlgorithm.STOCHASTIC_GRADIENT_DESCENT).graphBuilder() @@ -232,7 +232,7 @@ public class ElementWiseVertexTest extends BaseDL4JTest { cg.computeGradientAndScore(); // Let's figure out what our params are now. - Map params = cg.paramTable(); + Map params = cg.getParamTable(); INDArray dense1_W = nullsafe(params.get("dense1_W")); INDArray dense1_b = nullsafe(params.get("dense1_b")); INDArray dense2_W = nullsafe(params.get("dense2_W")); @@ -370,7 +370,7 @@ public class ElementWiseVertexTest extends BaseDL4JTest { int featuresz = 17; int midsz = 13; int outputsz = 11; - ComputationGraphConfiguration cgc = new NeuralNetConfiguration.Builder().weightInit(WeightInit.XAVIER) + ComputationGraphConfiguration cgc = NeuralNetConfiguration.builder().weightInit(WeightInit.XAVIER) .dataType(DataType.DOUBLE) .biasInit(0.0).updater(new Sgd()) .optimizationAlgo(OptimizationAlgorithm.STOCHASTIC_GRADIENT_DESCENT).graphBuilder() @@ -408,7 +408,7 @@ public class ElementWiseVertexTest extends BaseDL4JTest { cg.computeGradientAndScore(); // Let's figure out what our params are now. - Map params = cg.paramTable(); + Map params = cg.getParamTable(); INDArray dense1_W = nullsafe(params.get("dense1_W")); INDArray dense1_b = nullsafe(params.get("dense1_b")); INDArray dense2_W = nullsafe(params.get("dense2_W")); @@ -545,7 +545,7 @@ public class ElementWiseVertexTest extends BaseDL4JTest { int featuresz = 17; int midsz = 13; int outputsz = 11; - ComputationGraphConfiguration cgc = new NeuralNetConfiguration.Builder().weightInit(WeightInit.XAVIER) + ComputationGraphConfiguration cgc = NeuralNetConfiguration.builder().weightInit(WeightInit.XAVIER) .dataType(DataType.DOUBLE) .biasInit(0.0).updater(new Sgd()) .optimizationAlgo(OptimizationAlgorithm.STOCHASTIC_GRADIENT_DESCENT).graphBuilder() @@ -578,7 +578,7 @@ public class ElementWiseVertexTest extends BaseDL4JTest { cg.computeGradientAndScore(); // Let's figure out what our params are now. - Map params = cg.paramTable(); + Map params = cg.getParamTable(); INDArray dense1_W = nullsafe(params.get("dense1_W")); INDArray dense1_b = nullsafe(params.get("dense1_b")); INDArray dense2_W = nullsafe(params.get("dense2_W")); diff --git a/cavis-dnn/cavis-dnn-core/src/test/java/org/deeplearning4j/nn/conf/graph/ShiftVertexTest.java b/cavis-dnn/cavis-dnn-core/src/test/java/org/deeplearning4j/nn/conf/graph/ShiftVertexTest.java index acab33814..be78b1ecf 100644 --- a/cavis-dnn/cavis-dnn-core/src/test/java/org/deeplearning4j/nn/conf/graph/ShiftVertexTest.java +++ b/cavis-dnn/cavis-dnn-core/src/test/java/org/deeplearning4j/nn/conf/graph/ShiftVertexTest.java @@ -84,7 +84,7 @@ public class ShiftVertexTest extends BaseDL4JTest { INDArray input = Nd4j .create(new double[][] {{0.2, 0.3, 0.5}, {0.7, 1.1, 1.3}, {1.7, 1.9, 2.3}, {2.9, 3.1, 3.7}}); double sf = 4.1; - ComputationGraphConfiguration cgc = new NeuralNetConfiguration.Builder().graphBuilder().addInputs("input") + ComputationGraphConfiguration cgc = NeuralNetConfiguration.builder().graphBuilder().addInputs("input") .addLayer("denselayer", new DenseLayer.Builder().nIn(input.columns()).nOut(1) .activation(Activation.IDENTITY).build(), @@ -138,7 +138,7 @@ public class ShiftVertexTest extends BaseDL4JTest { INDArray target = Nd4j.create(new double[][] {{0.05, 0.10, 0.15, 0.20, 0.25}, {0.30, 0.35, 0.40, 0.45, 0.50}, {0.55, 0.60, 0.65, 0.70, 0.75}, {0.80, 0.85, 0.90, 0.95, 0.99}}); - ComputationGraphConfiguration cgc = new NeuralNetConfiguration.Builder().weightInit(WeightInit.XAVIER) + ComputationGraphConfiguration cgc = NeuralNetConfiguration.builder().weightInit(WeightInit.XAVIER) .dataType(DataType.DOUBLE) .updater(new Sgd(0.01)) .optimizationAlgo(OptimizationAlgorithm.STOCHASTIC_GRADIENT_DESCENT).graphBuilder() @@ -158,8 +158,8 @@ public class ShiftVertexTest extends BaseDL4JTest { cg.setInput(0, input); cg.setLabel(0, target); cg.computeGradientAndScore(); - double score_dl4j = cg.score(); - Map weights = cg.paramTable(); + double score_dl4j = cg.getScore(); + Map weights = cg.getParamTable(); Gradient g = cg.gradient(); Map gradients = g.gradientForVariable(); Map manual_gradients = new TreeMap(); diff --git a/cavis-dnn/cavis-dnn-core/src/test/java/org/deeplearning4j/nn/conf/layers/LayerBuilderTest.java b/cavis-dnn/cavis-dnn-core/src/test/java/org/deeplearning4j/nn/conf/layers/LayerBuilderTest.java index 484da1ff9..680681920 100644 --- a/cavis-dnn/cavis-dnn-core/src/test/java/org/deeplearning4j/nn/conf/layers/LayerBuilderTest.java +++ b/cavis-dnn/cavis-dnn-core/src/test/java/org/deeplearning4j/nn/conf/layers/LayerBuilderTest.java @@ -74,7 +74,7 @@ public class LayerBuilderTest extends BaseDL4JTest { checkSerialization(layer); assertEquals(act, layer.getActivationFn()); - assertEquals(weight.getWeightInitFunction(), layer.getWeightInitFn()); + assertEquals(weight.getWeightInitFunction(), layer.getWeightInit()); assertEquals(new Dropout(dropOut), layer.getIDropout()); assertEquals(updater, layer.getIUpdater()); assertEquals(gradNorm, layer.getGradientNormalization()); @@ -199,8 +199,8 @@ public class LayerBuilderTest extends BaseDL4JTest { assertEquals(act, activationLayer.activationFn); } - private void checkSerialization(Layer layer) throws Exception { - NeuralNetConfiguration confExpected = new NeuralNetConfiguration.Builder().layer(layer).build(); + private void checkSerialization(LayerConfiguration layer) throws Exception { + NeuralNetConfiguration confExpected = NeuralNetConfiguration.builder().layer(layer).build(); NeuralNetConfiguration confActual; // check Java serialization @@ -212,21 +212,21 @@ public class LayerBuilderTest extends BaseDL4JTest { try (ByteArrayInputStream bis = new ByteArrayInputStream(data); ObjectInput in = new ObjectInputStream(bis)) { confActual = (NeuralNetConfiguration) in.readObject(); } - assertEquals(confExpected.getLayer(), confActual.getLayer(), "unequal Java serialization"); + assertEquals(confExpected.getFlattenedLayerConfigurations().get(0), confActual.getFlattenedLayerConfigurations().get(0), "unequal Java serialization"); // check JSON String json = confExpected.toJson(); confActual = NeuralNetConfiguration.fromJson(json); - assertEquals(confExpected.getLayer(), confActual.getLayer(), "unequal JSON serialization"); + assertEquals(confExpected.getFlattenedLayerConfigurations().get(0), confActual.getFlattenedLayerConfigurations().get(0), "unequal JSON serialization"); // check YAML String yaml = confExpected.toYaml(); confActual = NeuralNetConfiguration.fromYaml(yaml); - assertEquals(confExpected.getLayer(), confActual.getLayer(), "unequal YAML serialization"); + assertEquals(confExpected.getFlattenedLayerConfigurations().get(0), confActual.getFlattenedLayerConfigurations().get(0), "unequal YAML serialization"); // check the layer's use of callSuper on equals method - confActual.getLayer().setIDropout(new Dropout(new java.util.Random().nextDouble())); - assertNotEquals( confExpected.getLayer(), confActual.getLayer(), "broken equals method (missing callSuper?)"); + confActual.getFlattenedLayerConfigurations().get(0).setIDropout(new Dropout(new java.util.Random().nextDouble())); + assertNotEquals( confExpected, confActual, "broken equals method (missing callSuper?)"); } } diff --git a/cavis-dnn/cavis-dnn-core/src/test/java/org/deeplearning4j/nn/conf/layers/LayerConfigTest.java b/cavis-dnn/cavis-dnn-core/src/test/java/org/deeplearning4j/nn/conf/layers/LayerConfigTest.java index 60b549714..7777475e6 100644 --- a/cavis-dnn/cavis-dnn-core/src/test/java/org/deeplearning4j/nn/conf/layers/LayerConfigTest.java +++ b/cavis-dnn/cavis-dnn-core/src/test/java/org/deeplearning4j/nn/conf/layers/LayerConfigTest.java @@ -22,7 +22,6 @@ package org.deeplearning4j.nn.conf.layers; import org.deeplearning4j.BaseDL4JTest; import org.deeplearning4j.nn.conf.GradientNormalization; -import org.deeplearning4j.nn.conf.MultiLayerConfiguration; import org.deeplearning4j.nn.conf.NeuralNetConfiguration; import org.deeplearning4j.nn.conf.distribution.Distribution; import org.deeplearning4j.nn.conf.distribution.NormalDistribution; @@ -53,7 +52,7 @@ public class LayerConfigTest extends BaseDL4JTest { String name1 = "genisys"; String name2 = "bill"; - MultiLayerConfiguration conf = new NeuralNetConfiguration.Builder().list() + NeuralNetConfiguration conf = NeuralNetConfiguration.builder() .layer(0, new DenseLayer.Builder().nIn(2).nOut(2).name(name1).build()) .layer(1, new DenseLayer.Builder().nIn(2).nOut(2).name(name2).build()).build(); MultiLayerNetwork net = new MultiLayerNetwork(conf); @@ -67,25 +66,25 @@ public class LayerConfigTest extends BaseDL4JTest { @Test public void testActivationLayerwiseOverride() { //Without layerwise override: - MultiLayerConfiguration conf = new NeuralNetConfiguration.Builder().activation(Activation.RELU).list() + NeuralNetConfiguration conf = NeuralNetConfiguration.builder().activation(Activation.RELU) .layer(0, new DenseLayer.Builder().nIn(2).nOut(2).build()) .layer(1, new DenseLayer.Builder().nIn(2).nOut(2).build()).build(); MultiLayerNetwork net = new MultiLayerNetwork(conf); net.init(); - assertEquals("relu", ((BaseLayer) conf.getConf(0).getLayer()).getActivationFn().toString()); - assertEquals("relu", ((BaseLayer) conf.getConf(1).getLayer()).getActivationFn().toString()); + assertEquals("relu", ((BaseLayerConfiguration) conf.getConf(0).getLayer()).getActivationFn().toString()); + assertEquals("relu", ((BaseLayerConfiguration) conf.getConf(1).getLayer()).getActivationFn().toString()); //With - conf = new NeuralNetConfiguration.Builder().activation(Activation.RELU).list() + conf = NeuralNetConfiguration.builder().activation(Activation.RELU) .layer(0, new DenseLayer.Builder().nIn(2).nOut(2).build()) .layer(1, new DenseLayer.Builder().nIn(2).nOut(2).activation(Activation.TANH).build()).build(); net = new MultiLayerNetwork(conf); net.init(); - assertEquals("relu", ((BaseLayer) conf.getConf(0).getLayer()).getActivationFn().toString()); - assertEquals("tanh", ((BaseLayer) conf.getConf(1).getLayer()).getActivationFn().toString()); + assertEquals("relu", ((BaseLayerConfiguration) conf.getConf(0).getLayer()).getActivationFn().toString()); + assertEquals("tanh", ((BaseLayerConfiguration) conf.getConf(1).getLayer()).getActivationFn().toString()); } @@ -93,23 +92,23 @@ public class LayerConfigTest extends BaseDL4JTest { public void testWeightBiasInitLayerwiseOverride() { //Without layerwise override: final Distribution defaultDistribution = new NormalDistribution(0, 1.0); - MultiLayerConfiguration conf = new NeuralNetConfiguration.Builder() - .dist(defaultDistribution).biasInit(1).list() + NeuralNetConfiguration conf = NeuralNetConfiguration.builder() + .dist(defaultDistribution).biasInit(1) .layer(0, new DenseLayer.Builder().nIn(2).nOut(2).build()) .layer(1, new DenseLayer.Builder().nIn(2).nOut(2).build()).build(); MultiLayerNetwork net = new MultiLayerNetwork(conf); net.init(); - assertEquals(new WeightInitDistribution(defaultDistribution), ((BaseLayer) conf.getConf(0).getLayer()).getWeightInitFn()); - assertEquals(new WeightInitDistribution(defaultDistribution), ((BaseLayer) conf.getConf(1).getLayer()).getWeightInitFn()); + assertEquals(new WeightInitDistribution(defaultDistribution), ((BaseLayerConfiguration) conf.getConf(0).getLayer()).getWeightInit()); + assertEquals(new WeightInitDistribution(defaultDistribution), ((BaseLayerConfiguration) conf.getConf(1).getLayer()).getWeightInit()); - assertEquals(1, ((BaseLayer) conf.getConf(0).getLayer()).getBiasInit(), 0.0); - assertEquals(1, ((BaseLayer) conf.getConf(1).getLayer()).getBiasInit(), 0.0); + assertEquals(1, ((BaseLayerConfiguration) conf.getConf(0).getLayer()).getBiasInit(), 0.0); + assertEquals(1, ((BaseLayerConfiguration) conf.getConf(1).getLayer()).getBiasInit(), 0.0); //With: final Distribution overriddenDistribution = new UniformDistribution(0, 1); - conf = new NeuralNetConfiguration.Builder() - .dist(defaultDistribution).biasInit(1).list() + conf = NeuralNetConfiguration.builder() + .dist(defaultDistribution).biasInit(1) .layer(0, new DenseLayer.Builder().nIn(2).nOut(2).build()).layer(1, new DenseLayer.Builder().nIn(2).nOut(2) .dist(overriddenDistribution).biasInit(0).build()) @@ -118,11 +117,11 @@ public class LayerConfigTest extends BaseDL4JTest { net = new MultiLayerNetwork(conf); net.init(); - assertEquals(new WeightInitDistribution(defaultDistribution), ((BaseLayer) conf.getConf(0).getLayer()).getWeightInitFn()); - assertEquals(new WeightInitDistribution(overriddenDistribution), ((BaseLayer) conf.getConf(1).getLayer()).getWeightInitFn()); + assertEquals(new WeightInitDistribution(defaultDistribution), ((BaseLayerConfiguration) conf.getConf(0).getLayer()).getWeightInit()); + assertEquals(new WeightInitDistribution(overriddenDistribution), ((BaseLayerConfiguration) conf.getConf(1).getLayer()).getWeightInit()); - assertEquals(1, ((BaseLayer) conf.getConf(0).getLayer()).getBiasInit(), 0.0); - assertEquals(0, ((BaseLayer) conf.getConf(1).getLayer()).getBiasInit(), 0.0); + assertEquals(1, ((BaseLayerConfiguration) conf.getConf(0).getLayer()).getBiasInit(), 0.0); + assertEquals(0, ((BaseLayerConfiguration) conf.getConf(1).getLayer()).getBiasInit(), 0.0); } /* @@ -132,56 +131,56 @@ public class LayerConfigTest extends BaseDL4JTest { // the global config, and check they actually work. //Learning rate without layerwise override: - MultiLayerConfiguration conf = new NeuralNetConfiguration.Builder().learningRate(0.3).list() - .layer(0, new DenseLayer.Builder().nIn(2).nOut(2).build()) - .layer(1, new DenseLayer.Builder().nIn(2).nOut(2).build()).build(); + NeuralNetConfiguration conf = NeuralNetConfiguration.builder().learningRate(0.3) + .layer(0, new DenseLayerConfiguration.Builder().nIn(2).nOut(2).build()) + .layer(1, new DenseLayerConfiguration.Builder().nIn(2).nOut(2).build()).build(); MultiLayerNetwork net = new MultiLayerNetwork(conf); net.init(); - assertEquals(0.3, ((BaseLayer) conf.getConf(0).getLayer()).getLearningRate(), 0.0); - assertEquals(0.3, ((BaseLayer) conf.getConf(1).getLayer()).getLearningRate(), 0.0); + assertEquals(0.3, ((BaseLayerConfiguration) conf.getConf(0).getLayer()).getLearningRate(), 0.0); + assertEquals(0.3, ((BaseLayerConfiguration) conf.getConf(1).getLayer()).getLearningRate(), 0.0); //With: - conf = new NeuralNetConfiguration.Builder().learningRate(0.3).list() - .layer(0, new DenseLayer.Builder().nIn(2).nOut(2).build()) - .layer(1, new DenseLayer.Builder().nIn(2).nOut(2).learningRate(0.2).build()).build(); + conf = NeuralNetConfiguration.builder().learningRate(0.3) + .layer(0, new DenseLayerConfiguration.Builder().nIn(2).nOut(2).build()) + .layer(1, new DenseLayerConfiguration.Builder().nIn(2).nOut(2).learningRate(0.2).build()).build(); net = new MultiLayerNetwork(conf); net.init(); - assertEquals(0.3, ((BaseLayer) conf.getConf(0).getLayer()).getLearningRate(), 0.0); - assertEquals(0.2, ((BaseLayer) conf.getConf(1).getLayer()).getLearningRate(), 0.0); + assertEquals(0.3, ((BaseLayerConfiguration) conf.getConf(0).getLayer()).getLearningRate(), 0.0); + assertEquals(0.2, ((BaseLayerConfiguration) conf.getConf(1).getLayer()).getLearningRate(), 0.0); //L1 and L2 without layerwise override: - conf = new NeuralNetConfiguration.Builder().l1(0.1).l2(0.2).list() - .layer(0, new DenseLayer.Builder().nIn(2).nOut(2).build()) - .layer(1, new DenseLayer.Builder().nIn(2).nOut(2).build()).build(); + conf = NeuralNetConfiguration.builder().l1(0.1).l2(0.2) + .layer(0, new DenseLayerConfiguration.Builder().nIn(2).nOut(2).build()) + .layer(1, new DenseLayerConfiguration.Builder().nIn(2).nOut(2).build()).build(); net = new MultiLayerNetwork(conf); net.init(); - assertEquals(0.1, ((BaseLayer) conf.getConf(0).getLayer()).getL1(), 0.0); - assertEquals(0.1, ((BaseLayer) conf.getConf(1).getLayer()).getL1(), 0.0); - assertEquals(0.2, ((BaseLayer) conf.getConf(0).getLayer()).getL2(), 0.0); - assertEquals(0.2, ((BaseLayer) conf.getConf(1).getLayer()).getL2(), 0.0); + assertEquals(0.1, ((BaseLayerConfiguration) conf.getConf(0).getLayer()).getL1(), 0.0); + assertEquals(0.1, ((BaseLayerConfiguration) conf.getConf(1).getLayer()).getL1(), 0.0); + assertEquals(0.2, ((BaseLayerConfiguration) conf.getConf(0).getLayer()).getL2(), 0.0); + assertEquals(0.2, ((BaseLayerConfiguration) conf.getConf(1).getLayer()).getL2(), 0.0); //L1 and L2 with layerwise override: - conf = new NeuralNetConfiguration.Builder().l1(0.1).l2(0.2).list() - .layer(0, new DenseLayer.Builder().nIn(2).nOut(2).l1(0.9).build()) - .layer(1, new DenseLayer.Builder().nIn(2).nOut(2).l2(0.8).build()).build(); + conf = NeuralNetConfiguration.builder().l1(0.1).l2(0.2) + .layer(0, new DenseLayerConfiguration.Builder().nIn(2).nOut(2).l1(0.9).build()) + .layer(1, new DenseLayerConfiguration.Builder().nIn(2).nOut(2).l2(0.8).build()).build(); net = new MultiLayerNetwork(conf); net.init(); - assertEquals(0.9, ((BaseLayer) conf.getConf(0).getLayer()).getL1(), 0.0); - assertEquals(0.1, ((BaseLayer) conf.getConf(1).getLayer()).getL1(), 0.0); - assertEquals(0.2, ((BaseLayer) conf.getConf(0).getLayer()).getL2(), 0.0); - assertEquals(0.8, ((BaseLayer) conf.getConf(1).getLayer()).getL2(), 0.0); + assertEquals(0.9, ((BaseLayerConfiguration) conf.getConf(0).getLayer()).getL1(), 0.0); + assertEquals(0.1, ((BaseLayerConfiguration) conf.getConf(1).getLayer()).getL1(), 0.0); + assertEquals(0.2, ((BaseLayerConfiguration) conf.getConf(0).getLayer()).getL2(), 0.0); + assertEquals(0.8, ((BaseLayerConfiguration) conf.getConf(1).getLayer()).getL2(), 0.0); }*/ @Test public void testDropoutLayerwiseOverride() { - MultiLayerConfiguration conf = new NeuralNetConfiguration.Builder().dropOut(1.0).list() + NeuralNetConfiguration conf = NeuralNetConfiguration.builder().dropOut(1.0) .layer(0, new DenseLayer.Builder().nIn(2).nOut(2).build()) .layer(1, new DenseLayer.Builder().nIn(2).nOut(2).build()).build(); MultiLayerNetwork net = new MultiLayerNetwork(conf); @@ -190,7 +189,7 @@ public class LayerConfigTest extends BaseDL4JTest { assertEquals(new Dropout(1.0), conf.getConf(0).getLayer().getIDropout()); assertEquals(new Dropout(1.0), conf.getConf(1).getLayer().getIDropout()); - conf = new NeuralNetConfiguration.Builder().dropOut(1.0).list() + conf = NeuralNetConfiguration.builder().dropOut(1.0) .layer(0, new DenseLayer.Builder().nIn(2).nOut(2).build()) .layer(1, new DenseLayer.Builder().nIn(2).nOut(2).dropOut(2.0).build()).build(); @@ -206,46 +205,46 @@ public class LayerConfigTest extends BaseDL4JTest { Map testMomentumAfter = new HashMap<>(); testMomentumAfter.put(0, 0.1); - MultiLayerConfiguration conf = new NeuralNetConfiguration.Builder() + NeuralNetConfiguration conf = NeuralNetConfiguration.builder() .updater(new Nesterovs(1.0, new MapSchedule(ScheduleType.ITERATION, testMomentumAfter))) - .list() + .layer(0, new DenseLayer.Builder().nIn(2).nOut(2).build()) .layer(1, new DenseLayer.Builder().nIn(2).nOut(2).build()).build(); MultiLayerNetwork net = new MultiLayerNetwork(conf); net.init(); - assertEquals(0.1, ((Nesterovs)((BaseLayer) conf.getConf(0).getLayer()).getIUpdater()).getMomentumISchedule().valueAt(0,0), 0.0); - assertEquals(0.1, ((Nesterovs)((BaseLayer) conf.getConf(1).getLayer()).getIUpdater()).getMomentumISchedule().valueAt(0,0), 0.0); + assertEquals(0.1, ((Nesterovs)((BaseLayerConfiguration) conf.getConf(0).getLayer()).getIUpdater()).getMomentumISchedule().valueAt(0,0), 0.0); + assertEquals(0.1, ((Nesterovs)((BaseLayerConfiguration) conf.getConf(1).getLayer()).getIUpdater()).getMomentumISchedule().valueAt(0,0), 0.0); Map testMomentumAfter2 = new HashMap<>(); testMomentumAfter2.put(0, 0.2); - conf = new NeuralNetConfiguration.Builder().updater(new Nesterovs(1.0, new MapSchedule(ScheduleType.ITERATION, testMomentumAfter) )) - .list() + conf = NeuralNetConfiguration.builder().updater(new Nesterovs(1.0, new MapSchedule(ScheduleType.ITERATION, testMomentumAfter) )) + .layer(0, new DenseLayer.Builder().nIn(2).nOut(2).build()).layer(1, new DenseLayer.Builder() .nIn(2).nOut(2).updater(new Nesterovs(1.0, new MapSchedule(ScheduleType.ITERATION, testMomentumAfter2))).build()) .build(); net = new MultiLayerNetwork(conf); net.init(); - assertEquals(0.1, ((Nesterovs)((BaseLayer) conf.getConf(0).getLayer()).getIUpdater()).getMomentumISchedule().valueAt(0,0), 0.0); - assertEquals(0.2, ((Nesterovs)((BaseLayer) conf.getConf(1).getLayer()).getIUpdater()).getMomentumISchedule().valueAt(0,0), 0.0); + assertEquals(0.1, ((Nesterovs)((BaseLayerConfiguration) conf.getConf(0).getLayer()).getIUpdater()).getMomentumISchedule().valueAt(0,0), 0.0); + assertEquals(0.2, ((Nesterovs)((BaseLayerConfiguration) conf.getConf(1).getLayer()).getIUpdater()).getMomentumISchedule().valueAt(0,0), 0.0); } @Test public void testUpdaterRhoRmsDecayLayerwiseOverride() { - MultiLayerConfiguration conf = new NeuralNetConfiguration.Builder().updater(new AdaDelta(0.5, 0.9)).list() + NeuralNetConfiguration conf = NeuralNetConfiguration.builder().updater(new AdaDelta(0.5, 0.9)) .layer(0, new DenseLayer.Builder().nIn(2).nOut(2).build()) .layer(1, new DenseLayer.Builder().nIn(2).nOut(2).updater(new AdaDelta(0.01,0.9)).build()).build(); MultiLayerNetwork net = new MultiLayerNetwork(conf); net.init(); - assertTrue(((BaseLayer) conf.getConf(0).getLayer()).getIUpdater() instanceof AdaDelta); - assertTrue(((BaseLayer) conf.getConf(1).getLayer()).getIUpdater() instanceof AdaDelta); - assertEquals(0.5, ((AdaDelta)((BaseLayer) conf.getConf(0).getLayer()).getIUpdater()).getRho(), 0.0); - assertEquals(0.01, ((AdaDelta)((BaseLayer) conf.getConf(1).getLayer()).getIUpdater()).getRho(), 0.0); + assertTrue(((BaseLayerConfiguration) conf.getConf(0).getLayer()).getIUpdater() instanceof AdaDelta); + assertTrue(((BaseLayerConfiguration) conf.getConf(1).getLayer()).getIUpdater() instanceof AdaDelta); + assertEquals(0.5, ((AdaDelta)((BaseLayerConfiguration) conf.getConf(0).getLayer()).getIUpdater()).getRho(), 0.0); + assertEquals(0.01, ((AdaDelta)((BaseLayerConfiguration) conf.getConf(1).getLayer()).getIUpdater()).getRho(), 0.0); - conf = new NeuralNetConfiguration.Builder().updater(new RmsProp(1.0, 2.0, RmsProp.DEFAULT_RMSPROP_EPSILON)).list() + conf = NeuralNetConfiguration.builder().updater(new RmsProp(1.0, 2.0, RmsProp.DEFAULT_RMSPROP_EPSILON)) .layer(0, new DenseLayer.Builder().nIn(2).nOut(2).updater(new RmsProp(1.0, 1.0, RmsProp.DEFAULT_RMSPROP_EPSILON)).build()) .layer(1, new DenseLayer.Builder().nIn(2).nOut(2).updater(new AdaDelta(0.5,AdaDelta.DEFAULT_ADADELTA_EPSILON)).build()) .build(); @@ -253,53 +252,51 @@ public class LayerConfigTest extends BaseDL4JTest { net = new MultiLayerNetwork(conf); net.init(); - assertTrue(((BaseLayer) conf.getConf(0).getLayer()).getIUpdater() instanceof RmsProp); - assertTrue(((BaseLayer) conf.getConf(1).getLayer()).getIUpdater() instanceof AdaDelta); - assertEquals(1.0, ((RmsProp) ((BaseLayer) conf.getConf(0).getLayer()).getIUpdater()).getRmsDecay(), 0.0); - assertEquals(0.5, ((AdaDelta) ((BaseLayer) conf.getConf(1).getLayer()).getIUpdater()).getRho(), 0.0); + assertTrue(((BaseLayerConfiguration) conf.getConf(0).getLayer()).getIUpdater() instanceof RmsProp); + assertTrue(((BaseLayerConfiguration) conf.getConf(1).getLayer()).getIUpdater() instanceof AdaDelta); + assertEquals(1.0, ((RmsProp) ((BaseLayerConfiguration) conf.getConf(0).getLayer()).getIUpdater()).getRmsDecay(), 0.0); + assertEquals(0.5, ((AdaDelta) ((BaseLayerConfiguration) conf.getConf(1).getLayer()).getIUpdater()).getRho(), 0.0); } @Test public void testUpdaterAdamParamsLayerwiseOverride() { - MultiLayerConfiguration conf = new NeuralNetConfiguration.Builder() + NeuralNetConfiguration conf = NeuralNetConfiguration.builder() .updater(new Adam(1.0, 0.5, 0.5, 1e-8)) - .list() + .layer(0, new DenseLayer.Builder().nIn(2).nOut(2).build()) .layer(1, new DenseLayer.Builder().nIn(2).nOut(2).updater(new Adam(1.0, 0.6, 0.7, 1e-8)).build()) .build(); MultiLayerNetwork net = new MultiLayerNetwork(conf); net.init(); - assertEquals(0.5, ((Adam) ((BaseLayer) conf.getConf(0).getLayer()).getIUpdater()).getBeta1(), 0.0); - assertEquals(0.6, ((Adam) ((BaseLayer) conf.getConf(1).getLayer()).getIUpdater()).getBeta1(), 0.0); - assertEquals(0.5, ((Adam) ((BaseLayer) conf.getConf(0).getLayer()).getIUpdater()).getBeta2(), 0.0); - assertEquals(0.7, ((Adam) ((BaseLayer) conf.getConf(1).getLayer()).getIUpdater()).getBeta2(), 0.0); + assertEquals(0.5, ((Adam) ((BaseLayerConfiguration) conf.getConf(0).getLayer()).getIUpdater()).getBeta1(), 0.0); + assertEquals(0.6, ((Adam) ((BaseLayerConfiguration) conf.getConf(1).getLayer()).getIUpdater()).getBeta1(), 0.0); + assertEquals(0.5, ((Adam) ((BaseLayerConfiguration) conf.getConf(0).getLayer()).getIUpdater()).getBeta2(), 0.0); + assertEquals(0.7, ((Adam) ((BaseLayerConfiguration) conf.getConf(1).getLayer()).getIUpdater()).getBeta2(), 0.0); } @Test public void testGradientNormalizationLayerwiseOverride() { //Learning rate without layerwise override: - MultiLayerConfiguration conf = new NeuralNetConfiguration.Builder() + NeuralNetConfiguration conf = NeuralNetConfiguration.builder() .gradientNormalization(GradientNormalization.ClipElementWiseAbsoluteValue) - .gradientNormalizationThreshold(10).list() + .gradientNormalizationThreshold(10) .layer(0, new DenseLayer.Builder().nIn(2).nOut(2).build()) .layer(1, new DenseLayer.Builder().nIn(2).nOut(2).build()).build(); MultiLayerNetwork net = new MultiLayerNetwork(conf); net.init(); - - assertEquals(GradientNormalization.ClipElementWiseAbsoluteValue, - conf.getConf(0).getLayer().getGradientNormalization()); - assertEquals(GradientNormalization.ClipElementWiseAbsoluteValue, - conf.getConf(1).getLayer().getGradientNormalization()); - assertEquals(10, conf.getConf(0).getLayer().getGradientNormalizationThreshold(), 0.0); - assertEquals(10, conf.getConf(1).getLayer().getGradientNormalizationThreshold(), 0.0); + BaseLayerConfiguration bconf = (BaseLayerConfiguration) conf.getConf(0).getLayer(); + assertEquals(GradientNormalization.ClipElementWiseAbsoluteValue, bconf.getGradientNormalization()); + assertEquals(GradientNormalization.ClipElementWiseAbsoluteValue, bconf.getGradientNormalization()); + assertEquals(10, bconf.getGradientNormalizationThreshold(), 0.0); + assertEquals(10, bconf.getGradientNormalizationThreshold(), 0.0); //With: - conf = new NeuralNetConfiguration.Builder() + conf = NeuralNetConfiguration.builder() .gradientNormalization(GradientNormalization.ClipElementWiseAbsoluteValue) - .gradientNormalizationThreshold(10).list() + .gradientNormalizationThreshold(10) .layer(0, new DenseLayer.Builder().nIn(2).nOut(2).build()) .layer(1, new DenseLayer.Builder().nIn(2).nOut(2) .gradientNormalization(GradientNormalization.None) @@ -309,11 +306,10 @@ public class LayerConfigTest extends BaseDL4JTest { net = new MultiLayerNetwork(conf); net.init(); - assertEquals(GradientNormalization.ClipElementWiseAbsoluteValue, - conf.getConf(0).getLayer().getGradientNormalization()); - assertEquals(GradientNormalization.None, conf.getConf(1).getLayer().getGradientNormalization()); - assertEquals(10, conf.getConf(0).getLayer().getGradientNormalizationThreshold(), 0.0); - assertEquals(2.5, conf.getConf(1).getLayer().getGradientNormalizationThreshold(), 0.0); + assertEquals(GradientNormalization.ClipElementWiseAbsoluteValue, bconf.getGradientNormalization()); + assertEquals(GradientNormalization.None, bconf.getGradientNormalization()); + assertEquals(10, bconf.getGradientNormalizationThreshold(), 0.0); + assertEquals(2.5, bconf.getGradientNormalizationThreshold(), 0.0); } @@ -323,11 +319,11 @@ public class LayerConfigTest extends BaseDL4JTest { double lr = 2; double lrDecayRate = 5; int iterations = 1; - MultiLayerConfiguration conf = new NeuralNetConfiguration.Builder().learningRate(lr) + NeuralNetConfiguration conf = NeuralNetConfiguration.builder().learningRate(lr) .updater(Updater.SGD) .learningRateDecayPolicy(LearningRatePolicy.Exponential).lrPolicyDecayRate(lrDecayRate).list() - .layer(0, new DenseLayer.Builder().nIn(2).nOut(2).build()) - .layer(1, new DenseLayer.Builder().nIn(2).nOut(2).build()).build(); + .layer(0, new DenseLayerConfiguration.Builder().nIn(2).nOut(2).build()) + .layer(1, new DenseLayerConfiguration.Builder().nIn(2).nOut(2).build()).build(); MultiLayerNetwork net = new MultiLayerNetwork(conf); net.init(); @@ -343,10 +339,10 @@ public class LayerConfigTest extends BaseDL4JTest { double lrDecayRate = 5; double power = 3; int iterations = 1; - MultiLayerConfiguration conf = new NeuralNetConfiguration.Builder().iterations(iterations).learningRate(lr) + NeuralNetConfiguration conf = NeuralNetConfiguration.builder().iterations(iterations).learningRate(lr) .learningRateDecayPolicy(LearningRatePolicy.Inverse).lrPolicyDecayRate(lrDecayRate) - .lrPolicyPower(power).list().layer(0, new DenseLayer.Builder().nIn(2).nOut(2).build()) - .layer(1, new DenseLayer.Builder().nIn(2).nOut(2).build()).build(); + .lrPolicyPower(power).list().layer(0, new DenseLayerConfiguration.Builder().nIn(2).nOut(2).build()) + .layer(1, new DenseLayerConfiguration.Builder().nIn(2).nOut(2).build()).build(); MultiLayerNetwork net = new MultiLayerNetwork(conf); net.init(); @@ -365,10 +361,10 @@ public class LayerConfigTest extends BaseDL4JTest { double lrDecayRate = 5; double steps = 4; int iterations = 1; - MultiLayerConfiguration conf = new NeuralNetConfiguration.Builder().iterations(iterations).learningRate(lr) + NeuralNetConfiguration conf = NeuralNetConfiguration.builder().iterations(iterations).learningRate(lr) .learningRateDecayPolicy(LearningRatePolicy.Step).lrPolicyDecayRate(lrDecayRate) - .lrPolicySteps(steps).list().layer(0, new DenseLayer.Builder().nIn(2).nOut(2).build()) - .layer(1, new DenseLayer.Builder().nIn(2).nOut(2).build()).build(); + .lrPolicySteps(steps).list().layer(0, new DenseLayerConfiguration.Builder().nIn(2).nOut(2).build()) + .layer(1, new DenseLayerConfiguration.Builder().nIn(2).nOut(2).build()).build(); MultiLayerNetwork net = new MultiLayerNetwork(conf); net.init(); @@ -386,10 +382,10 @@ public class LayerConfigTest extends BaseDL4JTest { double lrDecayRate = 5; double power = 3; int iterations = 1; - MultiLayerConfiguration conf = new NeuralNetConfiguration.Builder().iterations(iterations).learningRate(lr) + NeuralNetConfiguration conf = NeuralNetConfiguration.builder().iterations(iterations).learningRate(lr) .learningRateDecayPolicy(LearningRatePolicy.Poly).lrPolicyDecayRate(lrDecayRate) - .lrPolicyPower(power).list().layer(0, new DenseLayer.Builder().nIn(2).nOut(2).build()) - .layer(1, new DenseLayer.Builder().nIn(2).nOut(2).build()).build(); + .lrPolicyPower(power).list().layer(0, new DenseLayerConfiguration.Builder().nIn(2).nOut(2).build()) + .layer(1, new DenseLayerConfiguration.Builder().nIn(2).nOut(2).build()).build(); MultiLayerNetwork net = new MultiLayerNetwork(conf); net.init(); @@ -407,10 +403,10 @@ public class LayerConfigTest extends BaseDL4JTest { double lrDecayRate = 5; double steps = 4; int iterations = 1; - MultiLayerConfiguration conf = new NeuralNetConfiguration.Builder().iterations(iterations).learningRate(lr) + NeuralNetConfiguration conf = NeuralNetConfiguration.builder().iterations(iterations).learningRate(lr) .learningRateDecayPolicy(LearningRatePolicy.Sigmoid).lrPolicyDecayRate(lrDecayRate) - .lrPolicySteps(steps).list().layer(0, new DenseLayer.Builder().nIn(2).nOut(2).build()) - .layer(1, new DenseLayer.Builder().nIn(2).nOut(2).build()).build(); + .lrPolicySteps(steps).list().layer(0, new DenseLayerConfiguration.Builder().nIn(2).nOut(2).build()) + .layer(1, new DenseLayerConfiguration.Builder().nIn(2).nOut(2).build()).build(); MultiLayerNetwork net = new MultiLayerNetwork(conf); net.init(); diff --git a/cavis-dnn/cavis-dnn-core/src/test/java/org/deeplearning4j/nn/conf/layers/LayerConfigValidationTest.java b/cavis-dnn/cavis-dnn-core/src/test/java/org/deeplearning4j/nn/conf/layers/LayerConfigValidationTest.java index 4b60f98c4..b813b2b5f 100644 --- a/cavis-dnn/cavis-dnn-core/src/test/java/org/deeplearning4j/nn/conf/layers/LayerConfigValidationTest.java +++ b/cavis-dnn/cavis-dnn-core/src/test/java/org/deeplearning4j/nn/conf/layers/LayerConfigValidationTest.java @@ -24,7 +24,6 @@ import org.deeplearning4j.BaseDL4JTest; import org.deeplearning4j.TestUtils; import org.deeplearning4j.nn.api.OptimizationAlgorithm; import org.deeplearning4j.nn.conf.ComputationGraphConfiguration; -import org.deeplearning4j.nn.conf.MultiLayerConfiguration; import org.deeplearning4j.nn.conf.NeuralNetConfiguration; import org.deeplearning4j.nn.conf.Updater; import org.deeplearning4j.nn.conf.distribution.Distribution; @@ -56,8 +55,8 @@ public class LayerConfigValidationTest extends BaseDL4JTest { @Test public void testDropConnect() { // Warning thrown only since some layers may not have l1 or l2 - MultiLayerConfiguration conf = new NeuralNetConfiguration.Builder().updater(new Sgd(0.1)).weightNoise(new DropConnect(0.5)) - .list().layer(0, new DenseLayer.Builder().nIn(2).nOut(2).build()) + NeuralNetConfiguration conf = NeuralNetConfiguration.builder().updater(new Sgd(0.1)).weightNoise(new DropConnect(0.5)) + .layer(0, new DenseLayer.Builder().nIn(2).nOut(2).build()) .layer(1, new DenseLayer.Builder().nIn(2).nOut(2).build()).build(); MultiLayerNetwork net = new MultiLayerNetwork(conf); net.init(); @@ -67,8 +66,8 @@ public class LayerConfigValidationTest extends BaseDL4JTest { @Test public void testL1L2NotSet() { // Warning thrown only since some layers may not have l1 or l2 - MultiLayerConfiguration conf = new NeuralNetConfiguration.Builder().updater(new Sgd(0.3)) - .list().layer(0, new DenseLayer.Builder().nIn(2).nOut(2).build()) + NeuralNetConfiguration conf = NeuralNetConfiguration.builder().updater(new Sgd(0.3)) + .layer(0, new DenseLayer.Builder().nIn(2).nOut(2).build()) .layer(1, new DenseLayer.Builder().nIn(2).nOut(2).build()).build(); MultiLayerNetwork net = new MultiLayerNetwork(conf); net.init(); @@ -78,7 +77,7 @@ public class LayerConfigValidationTest extends BaseDL4JTest { //@Ignore //Old assumption: throw exception on l1 but no regularization. Current design: warn, not exception public void testRegNotSetL1Global() { assertThrows(IllegalStateException.class, () -> { - MultiLayerConfiguration conf = new NeuralNetConfiguration.Builder().updater(new Sgd(0.3)).l1(0.5).list() + NeuralNetConfiguration conf = NeuralNetConfiguration.builder().updater(new Sgd(0.3)).l1(0.5) .layer(0, new DenseLayer.Builder().nIn(2).nOut(2).build()) .layer(1, new DenseLayer.Builder().nIn(2).nOut(2).build()).build(); MultiLayerNetwork net = new MultiLayerNetwork(conf); @@ -90,7 +89,7 @@ public class LayerConfigValidationTest extends BaseDL4JTest { //@Ignore //Old assumption: throw exception on l1 but no regularization. Current design: warn, not exception public void testRegNotSetL2Local() { assertThrows(IllegalStateException.class, () -> { - MultiLayerConfiguration conf = new NeuralNetConfiguration.Builder().updater(new Sgd(0.3)).list() + NeuralNetConfiguration conf = NeuralNetConfiguration.builder().updater(new Sgd(0.3)) .layer(0, new DenseLayer.Builder().nIn(2).nOut(2).l2(0.5).build()) .layer(1, new DenseLayer.Builder().nIn(2).nOut(2).build()).build(); MultiLayerNetwork net = new MultiLayerNetwork(conf); @@ -101,9 +100,9 @@ public class LayerConfigValidationTest extends BaseDL4JTest { @Test public void testWeightInitDistNotSet() { // Warning thrown only since global dist can be set with a different weight init locally - MultiLayerConfiguration conf = - new NeuralNetConfiguration.Builder().updater(new Sgd(0.3)).dist(new GaussianDistribution(1e-3, 2)) - .list().layer(0, new DenseLayer.Builder().nIn(2).nOut(2).build()) + NeuralNetConfiguration conf = + NeuralNetConfiguration.builder().updater(new Sgd(0.3)).dist(new GaussianDistribution(1e-3, 2)) + .layer(0, new DenseLayer.Builder().nIn(2).nOut(2).build()) .layer(1, new DenseLayer.Builder().nIn(2).nOut(2).build()).build(); MultiLayerNetwork net = new MultiLayerNetwork(conf); net.init(); @@ -115,8 +114,8 @@ public class LayerConfigValidationTest extends BaseDL4JTest { Map testMomentumAfter = new HashMap<>(); testMomentumAfter.put(0, 0.1); - MultiLayerConfiguration conf = - new NeuralNetConfiguration.Builder().updater(new Nesterovs(1.0, new MapSchedule(ScheduleType.ITERATION, testMomentumAfter))).list() + NeuralNetConfiguration conf = + NeuralNetConfiguration.builder().updater(new Nesterovs(1.0, new MapSchedule(ScheduleType.ITERATION, testMomentumAfter))) .layer(0, new DenseLayer.Builder().nIn(2).nOut(2).build()) .layer(1, new DenseLayer.Builder().nIn(2).nOut(2).build()).build(); MultiLayerNetwork net = new MultiLayerNetwork(conf); @@ -125,11 +124,11 @@ public class LayerConfigValidationTest extends BaseDL4JTest { @Test public void testCompGraphNullLayer() { - ComputationGraphConfiguration.GraphBuilder gb = new NeuralNetConfiguration.Builder() + ComputationGraphConfiguration.GraphBuilder gb = NeuralNetConfiguration.builder() .optimizationAlgo(OptimizationAlgorithm.STOCHASTIC_GRADIENT_DESCENT).updater(new Sgd(0.01)) .seed(42).miniBatch(false).l1(0.2).l2(0.2) /* Graph Builder */ - .updater(Updater.RMSPROP).graphBuilder().addInputs("in") + .updater(Updater.RMSPROP.getIUpdaterWithDefaultConfig()).graphBuilder().addInputs("in") .addLayer("L" + 1, new GravesLSTM.Builder().nIn(20).updater(Updater.RMSPROP).nOut(10) .weightInit(WeightInit.XAVIER) @@ -157,52 +156,52 @@ public class LayerConfigValidationTest extends BaseDL4JTest { double expectedL2 = 0.0; // Nesterovs Updater - MultiLayerConfiguration conf = new NeuralNetConfiguration.Builder().updater(new Nesterovs(0.9)) - .list().layer(0, new DenseLayer.Builder().nIn(2).nOut(2).l2(0.5).build()) + NeuralNetConfiguration conf = NeuralNetConfiguration.builder().updater(new Nesterovs(0.9)) + .layer(0, new DenseLayer.Builder().nIn(2).nOut(2).l2(0.5).build()) .layer(1, new DenseLayer.Builder().nIn(2).nOut(2).updater(new Nesterovs(0.3, 0.4)).build()).build(); MultiLayerNetwork net = new MultiLayerNetwork(conf); net.init(); - BaseLayer layerConf = (BaseLayer) net.getLayer(0).conf().getLayer(); + BaseLayerConfiguration layerConf = (BaseLayerConfiguration) net.getLayer(0).getLayerConfiguration(); assertEquals(expectedMomentum, ((Nesterovs) layerConf.getIUpdater()).getMomentum(), 1e-3); assertNull(TestUtils.getL1Reg(layerConf.getRegularization())); assertEquals(0.5, TestUtils.getL2(layerConf), 1e-3); - BaseLayer layerConf1 = (BaseLayer) net.getLayer(1).conf().getLayer(); + BaseLayerConfiguration layerConf1 = (BaseLayerConfiguration) net.getLayer(1).getLayerConfiguration(); assertEquals(0.4, ((Nesterovs) layerConf1.getIUpdater()).getMomentum(), 1e-3); // Adam Updater - conf = new NeuralNetConfiguration.Builder().updater(new Adam(0.3)) - .weightInit(new WeightInitDistribution(expectedDist)).list() + conf = NeuralNetConfiguration.builder().updater(new Adam(0.3)) + .weightInit(expectedDist) .layer(0, new DenseLayer.Builder().nIn(2).nOut(2).l2(0.5).l1(0.3).build()) .layer(1, new DenseLayer.Builder().nIn(2).nOut(2).build()).build(); net = new MultiLayerNetwork(conf); net.init(); - layerConf = (BaseLayer) net.getLayer(0).conf().getLayer(); + layerConf = (BaseLayerConfiguration) net.getLayer(0).getLayerConfiguration(); assertEquals(0.3, TestUtils.getL1(layerConf), 1e-3); assertEquals(0.5, TestUtils.getL2(layerConf), 1e-3); - layerConf1 = (BaseLayer) net.getLayer(1).conf().getLayer(); + layerConf1 = (BaseLayerConfiguration) net.getLayer(1).getLayerConfiguration(); assertEquals(expectedAdamMeanDecay, ((Adam) layerConf1.getIUpdater()).getBeta1(), 1e-3); assertEquals(expectedAdamVarDecay, ((Adam) layerConf1.getIUpdater()).getBeta2(), 1e-3); - assertEquals(new WeightInitDistribution(expectedDist), layerConf1.getWeightInitFn()); + assertEquals(new WeightInitDistribution(expectedDist), layerConf1.getWeightInit()); assertNull(TestUtils.getL1Reg(layerConf1.getRegularization())); assertNull(TestUtils.getL2Reg(layerConf1.getRegularization())); //RMSProp Updater - conf = new NeuralNetConfiguration.Builder().updater(new RmsProp(0.3)).list() + conf = NeuralNetConfiguration.builder().updater(new RmsProp(0.3)) .layer(0, new DenseLayer.Builder().nIn(2).nOut(2).build()) .layer(1, new DenseLayer.Builder().nIn(2).nOut(2).updater(new RmsProp(0.3, 0.4, RmsProp.DEFAULT_RMSPROP_EPSILON)).build()).build(); net = new MultiLayerNetwork(conf); net.init(); - layerConf = (BaseLayer) net.getLayer(0).conf().getLayer(); + layerConf = (BaseLayerConfiguration) net.getLayer(0).getLayerConfiguration(); assertEquals(expectedRmsDecay, ((RmsProp) layerConf.getIUpdater()).getRmsDecay(), 1e-3); assertNull(TestUtils.getL1Reg(layerConf.getRegularization())); assertNull(TestUtils.getL2Reg(layerConf.getRegularization())); - layerConf1 = (BaseLayer) net.getLayer(1).conf().getLayer(); + layerConf1 = (BaseLayerConfiguration) net.getLayer(1).getLayerConfiguration(); assertEquals(0.4, ((RmsProp) layerConf1.getIUpdater()).getRmsDecay(), 1e-3); diff --git a/cavis-dnn/cavis-dnn-core/src/test/java/org/deeplearning4j/nn/conf/preprocessor/CNNProcessorTest.java b/cavis-dnn/cavis-dnn-core/src/test/java/org/deeplearning4j/nn/conf/preprocessor/CNNProcessorTest.java index 48112c682..d530e416d 100644 --- a/cavis-dnn/cavis-dnn-core/src/test/java/org/deeplearning4j/nn/conf/preprocessor/CNNProcessorTest.java +++ b/cavis-dnn/cavis-dnn-core/src/test/java/org/deeplearning4j/nn/conf/preprocessor/CNNProcessorTest.java @@ -23,6 +23,7 @@ package org.deeplearning4j.nn.conf.preprocessor; import org.deeplearning4j.BaseDL4JTest; import org.deeplearning4j.nn.api.OptimizationAlgorithm; import org.deeplearning4j.nn.conf.*; +import org.deeplearning4j.nn.conf.NeuralNetConfiguration.NeuralNetConfigurationBuilder; import org.deeplearning4j.nn.conf.inputs.InputType; import org.deeplearning4j.nn.conf.layers.ConvolutionLayer; import org.deeplearning4j.nn.conf.layers.OutputLayer; @@ -233,64 +234,60 @@ public class CNNProcessorTest extends BaseDL4JTest { @Test public void testInvalidInputShape(){ - NeuralNetConfiguration.Builder builder = new NeuralNetConfiguration.Builder() - .seed(123) - .miniBatch(true) - .cacheMode(CacheMode.DEVICE) - .updater(new Nesterovs(0.9)) - .gradientNormalization(GradientNormalization.RenormalizeL2PerLayer) - .optimizationAlgo(OptimizationAlgorithm.STOCHASTIC_GRADIENT_DESCENT); int[] kernelArray = new int[]{3,3}; int[] strideArray = new int[]{1,1}; int[] zeroPaddingArray = new int[]{0,0}; int processWidth = 4; - NeuralNetConfiguration.ListBuilder listBuilder = builder.list(); // Building the DL4J network - listBuilder = listBuilder.layer(0, new ConvolutionLayer.Builder(kernelArray, strideArray, zeroPaddingArray) + NeuralNetConfiguration conf =NeuralNetConfiguration.builder() + .seed(123) + .miniBatch(true) + .cacheMode(CacheMode.DEVICE) + .updater(new Nesterovs(0.9)) + .gradientNormalization(GradientNormalization.RenormalizeL2PerLayer) + .optimizationAlgo(OptimizationAlgorithm.STOCHASTIC_GRADIENT_DESCENT) + // Building the DL4J network + .layer(0, new ConvolutionLayer.Builder(kernelArray, strideArray, zeroPaddingArray) .name("cnn1") .convolutionMode(ConvolutionMode.Strict) .nIn(2) // 2 input channels .nOut(processWidth) .weightInit(WeightInit.XAVIER_UNIFORM) .activation(Activation.RELU) - .biasInit(1e-2).build()); + .biasInit(1e-2).build()) - listBuilder = listBuilder.layer(1, new ConvolutionLayer.Builder(kernelArray, strideArray, zeroPaddingArray) + .layer(1, new ConvolutionLayer.Builder(kernelArray, strideArray, zeroPaddingArray) .name("cnn2") .convolutionMode(ConvolutionMode.Strict) .nOut(processWidth) .weightInit(WeightInit.XAVIER_UNIFORM) .activation(Activation.RELU) .biasInit(1e-2) - .build()); + .build()) - listBuilder = listBuilder.layer(2, new ConvolutionLayer.Builder(kernelArray, strideArray, zeroPaddingArray) + .layer(2, new ConvolutionLayer.Builder(kernelArray, strideArray, zeroPaddingArray) .name("cnn3") .convolutionMode(ConvolutionMode.Strict) .nOut(processWidth) .weightInit(WeightInit.XAVIER_UNIFORM) - .activation(Activation.RELU).build()); + .activation(Activation.RELU).build()) - listBuilder = listBuilder.layer(3, new ConvolutionLayer.Builder(kernelArray, strideArray, zeroPaddingArray) + .layer(3, new ConvolutionLayer.Builder(kernelArray, strideArray, zeroPaddingArray) .name("cnn4") .convolutionMode(ConvolutionMode.Strict) .nOut(processWidth) .weightInit(WeightInit.XAVIER_UNIFORM) - .activation(Activation.RELU).build()); + .activation(Activation.RELU).build()) - listBuilder = listBuilder - .layer(4, new OutputLayer.Builder(LossFunctions.LossFunction.MSE) + .layer(4, new OutputLayer.Builder(LossFunctions.LossFunction.MSE) .name("output") .nOut(1) .activation(Activation.TANH) - .build()); + .build()) - MultiLayerConfiguration conf = listBuilder - - - .setInputType(InputType.convolutional(20, 10, 2)) + .inputType(InputType.convolutional(20, 10, 2)) .build(); // For some reason, this model works diff --git a/cavis-dnn/cavis-dnn-core/src/test/java/org/deeplearning4j/nn/conf/preprocessor/CustomPreprocessorTest.java b/cavis-dnn/cavis-dnn-core/src/test/java/org/deeplearning4j/nn/conf/preprocessor/CustomPreprocessorTest.java index 36bfbc95f..c5755753a 100644 --- a/cavis-dnn/cavis-dnn-core/src/test/java/org/deeplearning4j/nn/conf/preprocessor/CustomPreprocessorTest.java +++ b/cavis-dnn/cavis-dnn-core/src/test/java/org/deeplearning4j/nn/conf/preprocessor/CustomPreprocessorTest.java @@ -21,8 +21,6 @@ package org.deeplearning4j.nn.conf.preprocessor; import org.deeplearning4j.BaseDL4JTest; -import org.deeplearning4j.nn.conf.InputPreProcessor; -import org.deeplearning4j.nn.conf.MultiLayerConfiguration; import org.deeplearning4j.nn.conf.NeuralNetConfiguration; import org.deeplearning4j.nn.conf.layers.DenseLayer; import org.deeplearning4j.nn.conf.layers.OutputLayer; @@ -30,11 +28,6 @@ import org.deeplearning4j.nn.conf.preprocessor.custom.MyCustomPreprocessor; import org.junit.jupiter.api.Test; import org.nd4j.linalg.activations.Activation; import org.nd4j.linalg.lossfunctions.LossFunctions; -import com.fasterxml.jackson.databind.ObjectMapper; -import com.fasterxml.jackson.databind.introspect.AnnotatedClass; -import com.fasterxml.jackson.databind.jsontype.NamedType; - -import java.util.Collection; import static org.junit.jupiter.api.Assertions.assertEquals; import static org.junit.jupiter.api.Assertions.assertTrue; @@ -44,8 +37,8 @@ public class CustomPreprocessorTest extends BaseDL4JTest { @Test public void testCustomPreprocessor() { //Second: let's create a MultiLayerCofiguration with one, and check JSON and YAML config actually works... - MultiLayerConfiguration conf = - new NeuralNetConfiguration.Builder().list() + NeuralNetConfiguration conf = + NeuralNetConfiguration.builder() .layer(0, new DenseLayer.Builder().nIn(10).nOut(10).build()) .layer(1, new OutputLayer.Builder(LossFunctions.LossFunction.MCXENT).nIn(10) .activation(Activation.SOFTMAX).nOut(10).build()) @@ -57,10 +50,10 @@ public class CustomPreprocessorTest extends BaseDL4JTest { // System.out.println(json); - MultiLayerConfiguration confFromJson = MultiLayerConfiguration.fromJson(json); + NeuralNetConfiguration confFromJson = NeuralNetConfiguration.fromJson(json); assertEquals(conf, confFromJson); - MultiLayerConfiguration confFromYaml = MultiLayerConfiguration.fromYaml(yaml); + NeuralNetConfiguration confFromYaml = NeuralNetConfiguration.fromYaml(yaml); assertEquals(conf, confFromYaml); assertTrue(confFromJson.getInputPreProcess(0) instanceof MyCustomPreprocessor); diff --git a/cavis-dnn/cavis-dnn-core/src/test/java/org/deeplearning4j/nn/conf/preprocessor/TestPreProcessors.java b/cavis-dnn/cavis-dnn-core/src/test/java/org/deeplearning4j/nn/conf/preprocessor/TestPreProcessors.java index 56c6cfb1d..798762556 100644 --- a/cavis-dnn/cavis-dnn-core/src/test/java/org/deeplearning4j/nn/conf/preprocessor/TestPreProcessors.java +++ b/cavis-dnn/cavis-dnn-core/src/test/java/org/deeplearning4j/nn/conf/preprocessor/TestPreProcessors.java @@ -23,7 +23,6 @@ package org.deeplearning4j.nn.conf.preprocessor; import lombok.val; import org.deeplearning4j.BaseDL4JTest; import org.deeplearning4j.nn.conf.InputPreProcessor; -import org.deeplearning4j.nn.conf.MultiLayerConfiguration; import org.deeplearning4j.nn.conf.NeuralNetConfiguration; import org.deeplearning4j.nn.conf.inputs.InputType; import org.deeplearning4j.nn.conf.layers.FeedForwardLayer; @@ -58,14 +57,14 @@ public class TestPreProcessors extends BaseDL4JTest { int timeSeriesLength = timeSeriesLengths[x]; RnnToFeedForwardPreProcessor proc = new RnnToFeedForwardPreProcessor(); - NeuralNetConfiguration nnc = new NeuralNetConfiguration.Builder() + NeuralNetConfiguration nnc = NeuralNetConfiguration.builder() .layer(new org.deeplearning4j.nn.conf.layers.DenseLayer.Builder().nIn(layerSize) .nOut(layerSize).build()) .build(); - long numParams = nnc.getLayer().initializer().numParams(nnc); + long numParams = nnc.getFlattenedLayerConfigurations().get(0).initializer().numParams(nnc); INDArray params = Nd4j.create(1, numParams); - DenseLayer layer = (DenseLayer) nnc.getLayer().instantiate(nnc, null, 0, params, true, params.dataType()); + DenseLayer layer = (DenseLayer) nnc.getFlattenedLayerConfigurations().get(0).instantiate(nnc, null, 0, params, true, params.dataType()); layer.setInputMiniBatchSize(miniBatchSize); INDArray activations3dc = Nd4j.create(new int[] {miniBatchSize, layerSize, timeSeriesLength}, 'c'); @@ -143,14 +142,14 @@ public class TestPreProcessors extends BaseDL4JTest { FeedForwardToRnnPreProcessor proc = new FeedForwardToRnnPreProcessor(); - NeuralNetConfiguration nnc = new NeuralNetConfiguration.Builder() + NeuralNetConfiguration nnc = NeuralNetConfiguration.builder() .layer(new org.deeplearning4j.nn.conf.layers.DenseLayer.Builder().nIn(layerSize) .nOut(layerSize).build()) .build(); - val numParams = nnc.getLayer().initializer().numParams(nnc); + val numParams = nnc.getFlattenedLayerConfigurations().get(0).initializer().numParams(nnc); INDArray params = Nd4j.create(1, numParams); - DenseLayer layer = (DenseLayer) nnc.getLayer().instantiate(nnc, null, 0, params, true, params.dataType()); + DenseLayer layer = (DenseLayer) nnc.getFlattenedLayerConfigurations().get(0).instantiate(nnc, null, 0, params, true, params.dataType()); layer.setInputMiniBatchSize(miniBatchSize); INDArray rand = Nd4j.rand(miniBatchSize * timeSeriesLength, layerSize); @@ -227,16 +226,16 @@ public class TestPreProcessors extends BaseDL4JTest { InputPreProcessor proc = new CnnToRnnPreProcessor(inputHeight, inputWidth, nChannels); NeuralNetConfiguration nnc = - new NeuralNetConfiguration.Builder() + NeuralNetConfiguration.builder() .layer(new org.deeplearning4j.nn.conf.layers.ConvolutionLayer.Builder( inputWidth, inputHeight).nIn(cnnNChannelsIn) .nOut(nChannels).build()) .build(); - val numParams = nnc.getLayer().initializer().numParams(nnc); + val numParams = nnc.getFlattenedLayerConfigurations().get(0).initializer().numParams(nnc); INDArray params = Nd4j.create(1, numParams); ConvolutionLayer layer = - (ConvolutionLayer) nnc.getLayer().instantiate(nnc, null, 0, params, true, params.dataType()); + (ConvolutionLayer) nnc.getFlattenedLayerConfigurations().get(0).instantiate(nnc, null, 0, params, true, params.dataType()); layer.setInputMiniBatchSize(miniBatchSize); INDArray activationsCnn = Nd4j.rand(miniBatchSize * timeSeriesLength, nChannels, @@ -309,16 +308,16 @@ public class TestPreProcessors extends BaseDL4JTest { InputPreProcessor proc = new RnnToCnnPreProcessor(inputHeight, inputWidth, nChannels); NeuralNetConfiguration nnc = - new NeuralNetConfiguration.Builder() + NeuralNetConfiguration.builder() .layer(new org.deeplearning4j.nn.conf.layers.ConvolutionLayer.Builder( inputWidth, inputHeight).nIn(cnnNChannelsIn) .nOut(nChannels).build()) .build(); - val numParams = nnc.getLayer().initializer().numParams(nnc); + val numParams = nnc.getFlattenedLayerConfigurations().get(0).initializer().numParams(nnc); INDArray params = Nd4j.create(1, numParams); ConvolutionLayer layer = - (ConvolutionLayer) nnc.getLayer().instantiate(nnc, null, 0, params, true, params.dataType()); + (ConvolutionLayer) nnc.getFlattenedLayerConfigurations().get(0).instantiate(nnc, null, 0, params, true, params.dataType()); layer.setInputMiniBatchSize(miniBatchSize); val shape_rnn = new long[] {miniBatchSize, nChannels * inputHeight * inputWidth, @@ -396,8 +395,8 @@ public class TestPreProcessors extends BaseDL4JTest { @Test public void testAutoAdditionOfPreprocessors() { //FF->RNN and RNN->FF - MultiLayerConfiguration conf1 = - new NeuralNetConfiguration.Builder().list() + NeuralNetConfiguration conf1 = + NeuralNetConfiguration.builder() .layer(0, new org.deeplearning4j.nn.conf.layers.DenseLayer.Builder().nIn(5) .nOut(6).build()) .layer(1, new GravesLSTM.Builder().nIn(6).nOut(7).build()) @@ -412,12 +411,12 @@ public class TestPreProcessors extends BaseDL4JTest { //FF-> CNN, CNN-> FF, FF->RNN - MultiLayerConfiguration conf2 = new NeuralNetConfiguration.Builder().list() + NeuralNetConfiguration conf2 = NeuralNetConfiguration.builder() .layer(0, new org.deeplearning4j.nn.conf.layers.ConvolutionLayer.Builder().nOut(10) .kernelSize(5, 5).stride(1, 1).build()) .layer(1, new org.deeplearning4j.nn.conf.layers.DenseLayer.Builder().nOut(6).build()) .layer(2, new RnnOutputLayer.Builder().nIn(6).nOut(5).activation(Activation.SOFTMAX).build()) - .setInputType(InputType.convolutionalFlat(28, 28, 1)).build(); + .inputType(InputType.convolutionalFlat(28, 28, 1)).build(); //Expect preprocessors: 0: FF->CNN; 1: CNN->FF; 2: FF->RNN assertEquals(3, conf2.getInputPreProcessors().size()); assertTrue(conf2.getInputPreProcess(0) instanceof FeedForwardToCnnPreProcessor); @@ -425,12 +424,12 @@ public class TestPreProcessors extends BaseDL4JTest { assertTrue(conf2.getInputPreProcess(2) instanceof FeedForwardToRnnPreProcessor); //CNN-> FF, FF->RNN - InputType.convolutional instead of convolutionalFlat - MultiLayerConfiguration conf2a = new NeuralNetConfiguration.Builder().list() + NeuralNetConfiguration conf2a = NeuralNetConfiguration.builder() .layer(0, new org.deeplearning4j.nn.conf.layers.ConvolutionLayer.Builder().nOut(10) .kernelSize(5, 5).stride(1, 1).build()) .layer(1, new org.deeplearning4j.nn.conf.layers.DenseLayer.Builder().nOut(6).build()) .layer(2, new RnnOutputLayer.Builder().nIn(6).nOut(5).activation(Activation.SOFTMAX).build()) - .setInputType(InputType.convolutional(28, 28, 1)).build(); + .inputType(InputType.convolutional(28, 28, 1)).build(); //Expect preprocessors: 1: CNN->FF; 2: FF->RNN assertEquals(2, conf2a.getInputPreProcessors().size()); assertTrue(conf2a.getInputPreProcess(1) instanceof CnnToFeedForwardPreProcessor); @@ -438,12 +437,12 @@ public class TestPreProcessors extends BaseDL4JTest { //FF->CNN and CNN->RNN: - MultiLayerConfiguration conf3 = new NeuralNetConfiguration.Builder().list() + NeuralNetConfiguration conf3 = NeuralNetConfiguration.builder().list() .layer(0, new org.deeplearning4j.nn.conf.layers.ConvolutionLayer.Builder().nOut(10) .kernelSize(5, 5).stride(1, 1).build()) .layer(1, new GravesLSTM.Builder().nOut(6).build()) .layer(2, new RnnOutputLayer.Builder().nIn(6).nOut(5).activation(Activation.SOFTMAX).build()) - .setInputType(InputType.convolutionalFlat(28, 28, 1)).build(); + .inputType(InputType.convolutionalFlat(28, 28, 1)).build(); //Expect preprocessors: 0: FF->CNN, 1: CNN->RNN; assertEquals(2, conf3.getInputPreProcessors().size()); assertTrue(conf3.getInputPreProcess(0) instanceof FeedForwardToCnnPreProcessor); @@ -452,8 +451,8 @@ public class TestPreProcessors extends BaseDL4JTest { @Test public void testCnnToDense() { - MultiLayerConfiguration conf = - new NeuralNetConfiguration.Builder() + NeuralNetConfiguration conf = + NeuralNetConfiguration.builder() .list().layer(0, new org.deeplearning4j.nn.conf.layers.ConvolutionLayer.Builder( 4, 4) // 28*28*1 => 15*15*10 @@ -467,7 +466,7 @@ public class TestPreProcessors extends BaseDL4JTest { .layer(2, new OutputLayer.Builder(LossFunctions.LossFunction.MCXENT).nIn(200) .nOut(5).weightInit(WeightInit.RELU) .activation(Activation.SOFTMAX).build()) - .setInputType(InputType.convolutionalFlat(28, 28, 1)) + .inputType(InputType.convolutionalFlat(28, 28, 1)) .build(); assertNotNull(conf.getInputPreProcess(0)); diff --git a/cavis-dnn/cavis-dnn-core/src/test/java/org/deeplearning4j/nn/conf/weightnoise/TestWeightNoise.java b/cavis-dnn/cavis-dnn-core/src/test/java/org/deeplearning4j/nn/conf/weightnoise/TestWeightNoise.java index d4bae91a6..8977d1b3f 100644 --- a/cavis-dnn/cavis-dnn-core/src/test/java/org/deeplearning4j/nn/conf/weightnoise/TestWeightNoise.java +++ b/cavis-dnn/cavis-dnn-core/src/test/java/org/deeplearning4j/nn/conf/weightnoise/TestWeightNoise.java @@ -27,10 +27,9 @@ import org.deeplearning4j.TestUtils; import org.deeplearning4j.datasets.iterator.ExistingDataSetIterator; import org.deeplearning4j.nn.api.Layer; import org.deeplearning4j.nn.conf.ComputationGraphConfiguration; -import org.deeplearning4j.nn.conf.MultiLayerConfiguration; import org.deeplearning4j.nn.conf.NeuralNetConfiguration; import org.deeplearning4j.nn.conf.distribution.NormalDistribution; -import org.deeplearning4j.nn.conf.layers.BaseLayer; +import org.deeplearning4j.nn.conf.layers.BaseLayerConfiguration; import org.deeplearning4j.nn.conf.layers.DenseLayer; import org.deeplearning4j.nn.conf.layers.OutputLayer; import org.deeplearning4j.nn.graph.ComputationGraph; @@ -65,9 +64,9 @@ public class TestWeightNoise extends BaseDL4JTest { }; for (IWeightNoise wn : weightNoises) { - MultiLayerConfiguration conf = new NeuralNetConfiguration.Builder() + NeuralNetConfiguration conf = NeuralNetConfiguration.builder() .weightNoise(wn) - .list() + .layer(new DenseLayer.Builder().nIn(10).nOut(10).build()) .layer(new DenseLayer.Builder().nIn(10).nOut(10).weightNoise(new DropConnect(0.25)).build()) .layer(new OutputLayer.Builder().nIn(10).nOut(10).activation(Activation.SOFTMAX).build()) @@ -76,14 +75,14 @@ public class TestWeightNoise extends BaseDL4JTest { MultiLayerNetwork net = new MultiLayerNetwork(conf); net.init(); - assertEquals(wn, ((BaseLayer) net.getLayer(0).conf().getLayer()).getWeightNoise()); - assertEquals(new DropConnect(0.25), ((BaseLayer) net.getLayer(1).conf().getLayer()).getWeightNoise()); - assertEquals(wn, ((BaseLayer) net.getLayer(2).conf().getLayer()).getWeightNoise()); + assertEquals(wn, ((BaseLayerConfiguration) net.getLayer(0).getLayerConfiguration()).getWeightNoise()); + assertEquals(new DropConnect(0.25), ((BaseLayerConfiguration) net.getLayer(1).getLayerConfiguration()).getWeightNoise()); + assertEquals(wn, ((BaseLayerConfiguration) net.getLayer(2).getLayerConfiguration()).getWeightNoise()); TestUtils.testModelSerialization(net); - ComputationGraphConfiguration conf2 = new NeuralNetConfiguration.Builder() + ComputationGraphConfiguration conf2 = NeuralNetConfiguration.builder() .weightNoise(wn) .graphBuilder() .addInputs("in") @@ -96,9 +95,9 @@ public class TestWeightNoise extends BaseDL4JTest { ComputationGraph graph = new ComputationGraph(conf2); graph.init(); - assertEquals(wn, ((BaseLayer) graph.getLayer(0).conf().getLayer()).getWeightNoise()); - assertEquals(new DropConnect(0.25), ((BaseLayer) graph.getLayer(1).conf().getLayer()).getWeightNoise()); - assertEquals(wn, ((BaseLayer) graph.getLayer(2).conf().getLayer()).getWeightNoise()); + assertEquals(wn, ((BaseLayerConfiguration) graph.getLayer(0).getLayerConfiguration()).getWeightNoise()); + assertEquals(new DropConnect(0.25), ((BaseLayerConfiguration) graph.getLayer(1).getLayerConfiguration()).getWeightNoise()); + assertEquals(wn, ((BaseLayerConfiguration) graph.getLayer(2).getLayerConfiguration()).getWeightNoise()); TestUtils.testModelSerialization(graph); @@ -144,8 +143,8 @@ public class TestWeightNoise extends BaseDL4JTest { List list = Arrays.asList(wn1, wn2, wn3); - MultiLayerConfiguration conf = new NeuralNetConfiguration.Builder() - .list() + NeuralNetConfiguration conf = NeuralNetConfiguration.builder() + .layer(new DenseLayer.Builder().nIn(10).nOut(10).weightNoise(wn1).build()) .layer(new DenseLayer.Builder().nIn(10).nOut(10).weightNoise(wn2).build()) .layer(new OutputLayer.Builder().nIn(10).nOut(10).activation(Activation.SOFTMAX).weightNoise(wn3).build()) @@ -168,7 +167,7 @@ public class TestWeightNoise extends BaseDL4JTest { wn3 = new CustomWeightNoise(); list = Arrays.asList(wn1, wn2, wn3); - ComputationGraphConfiguration conf2 = new NeuralNetConfiguration.Builder() + ComputationGraphConfiguration conf2 = NeuralNetConfiguration.builder() .graphBuilder() .addInputs("in") .layer("0", new DenseLayer.Builder().nIn(10).nOut(10).weightNoise(wn1).build(), "in") @@ -247,9 +246,9 @@ public class TestWeightNoise extends BaseDL4JTest { public void testDropConnectValues() { Nd4j.getRandom().setSeed(12345); - MultiLayerConfiguration conf = new NeuralNetConfiguration.Builder() + NeuralNetConfiguration conf = NeuralNetConfiguration.builder() .weightInit(WeightInit.ONES) - .list() + .layer(new OutputLayer.Builder().nIn(10).nOut(10).activation(Activation.SOFTMAX).build()) .build(); MultiLayerNetwork net = new MultiLayerNetwork(conf); diff --git a/cavis-dnn/cavis-dnn-core/src/test/java/org/deeplearning4j/nn/dtypes/DTypeTests.java b/cavis-dnn/cavis-dnn-core/src/test/java/org/deeplearning4j/nn/dtypes/DTypeTests.java index b3e625849..2f2a316dd 100644 --- a/cavis-dnn/cavis-dnn-core/src/test/java/org/deeplearning4j/nn/dtypes/DTypeTests.java +++ b/cavis-dnn/cavis-dnn-core/src/test/java/org/deeplearning4j/nn/dtypes/DTypeTests.java @@ -30,7 +30,6 @@ import org.deeplearning4j.common.config.DL4JClassLoading; import org.deeplearning4j.nn.conf.ComputationGraphConfiguration; import org.deeplearning4j.nn.conf.ConvolutionMode; import org.deeplearning4j.nn.conf.InputPreProcessor; -import org.deeplearning4j.nn.conf.MultiLayerConfiguration; import org.deeplearning4j.nn.conf.NeuralNetConfiguration; import org.deeplearning4j.nn.conf.RNNFormat; import org.deeplearning4j.nn.conf.WorkspaceMode; @@ -82,7 +81,7 @@ import org.deeplearning4j.nn.conf.layers.GlobalPoolingLayer; import org.deeplearning4j.nn.conf.layers.GravesBidirectionalLSTM; import org.deeplearning4j.nn.conf.layers.GravesLSTM; import org.deeplearning4j.nn.conf.layers.LSTM; -import org.deeplearning4j.nn.conf.layers.Layer; +import org.deeplearning4j.nn.conf.layers.LayerConfiguration; import org.deeplearning4j.nn.conf.layers.LearnedSelfAttentionLayer; import org.deeplearning4j.nn.conf.layers.LocalResponseNormalization; import org.deeplearning4j.nn.conf.layers.LocallyConnected1D; @@ -125,7 +124,7 @@ import org.deeplearning4j.nn.conf.layers.recurrent.TimeDistributed; import org.deeplearning4j.nn.conf.layers.util.MaskLayer; import org.deeplearning4j.nn.conf.layers.util.MaskZeroLayer; import org.deeplearning4j.nn.conf.layers.variational.VariationalAutoencoder; -import org.deeplearning4j.nn.conf.layers.wrapper.BaseWrapperLayer; +import org.deeplearning4j.nn.conf.layers.wrapper.BaseWrapperLayerConfiguration; import org.deeplearning4j.nn.conf.ocnn.OCNNOutputLayer; import org.deeplearning4j.nn.conf.preprocessor.CnnToFeedForwardPreProcessor; import org.deeplearning4j.nn.conf.preprocessor.CnnToRnnPreProcessor; @@ -141,7 +140,6 @@ import org.deeplearning4j.nn.modelimport.keras.preprocessors.ReshapePreprocessor import org.deeplearning4j.nn.modelimport.keras.preprocessors.TensorFlowCnnToFeedForwardPreProcessor; import org.deeplearning4j.nn.multilayer.MultiLayerNetwork; import org.deeplearning4j.nn.weights.WeightInit; -import org.deeplearning4j.nn.weights.WeightInitDistribution; import org.junit.jupiter.api.AfterAll; import org.junit.jupiter.api.Disabled; @@ -216,7 +214,7 @@ public class DTypeTests extends BaseDL4JTest { continue; } - if (Layer.class.isAssignableFrom(clazz)) { + if (LayerConfiguration.class.isAssignableFrom(clazz)) { layerClasses.add(clazz); } else if (InputPreProcessor.class.isAssignableFrom(clazz)) { preprocClasses.add(clazz); @@ -229,7 +227,7 @@ public class DTypeTests extends BaseDL4JTest { if (seenLayers.size() < layerClasses.size()) { for (Class c : layerClasses) { if (!seenLayers.contains(c) && !ignoreClasses.contains(c)) { - log.warn("Layer class not tested for global vs. network datatypes: {}", c); + log.warn("ILayer class not tested for global vs. network datatypes: {}", c); fail = true; } } @@ -258,12 +256,12 @@ public class DTypeTests extends BaseDL4JTest { } public static void logUsedClasses(MultiLayerNetwork net) { - MultiLayerConfiguration conf = net.getLayerWiseConfigurations(); - for (NeuralNetConfiguration nnc : conf.getConfs()) { - Layer l = nnc.getLayer(); + NeuralNetConfiguration conf = net.getNetConfiguration(); + for (NeuralNetConfiguration nnc : conf.getNetConfigurations()) { + LayerConfiguration l = nnc.getFlattenedLayerConfigurations().get(0); seenLayers.add(l.getClass()); - if (l instanceof BaseWrapperLayer) { - BaseWrapperLayer bwl = (BaseWrapperLayer) l; + if (l instanceof BaseWrapperLayerConfiguration) { + BaseWrapperLayerConfiguration bwl = (BaseWrapperLayerConfiguration) l; seenLayers.add(bwl.getUnderlying().getClass()); } else if (l instanceof Bidirectional) { seenLayers.add(((Bidirectional) l).getFwd().getClass()); @@ -279,11 +277,11 @@ public class DTypeTests extends BaseDL4JTest { } public static void logUsedClasses(ComputationGraph net) { - ComputationGraphConfiguration conf = net.getConfiguration(); + ComputationGraphConfiguration conf = net.getComputationGraphConfiguration(); for (GraphVertex gv : conf.getVertices().values()) { seenVertices.add(gv.getClass()); if (gv instanceof LayerVertex) { - seenLayers.add(((LayerVertex) gv).getLayerConf().getLayer().getClass()); + seenLayers.add(((LayerVertex) gv).getLayerConfiguration().getClass()); InputPreProcessor ipp = ((LayerVertex) gv).getPreProcessor(); if (ipp != null) { seenPreprocs.add(ipp.getClass()); @@ -301,7 +299,7 @@ public class DTypeTests extends BaseDL4JTest { for (DataType dt : new DataType[]{DataType.DOUBLE, DataType.FLOAT, DataType.HALF}) { Nd4j.setDefaultDataTypes(dt, dt); - MultiLayerConfiguration conf = new NeuralNetConfiguration.Builder() + NeuralNetConfiguration conf = NeuralNetConfiguration.builder() .seed(12345) .weightInit(WeightInit.XAVIER) .updater(new Adam(0.01)) @@ -323,17 +321,17 @@ public class DTypeTests extends BaseDL4JTest { net.setInput(inD); net.setLabels(lD); net.computeGradientAndScore(); - double scoreDouble = net.score(); + double scoreDouble = net.getScore(); INDArray grads = net.getFlattenedGradients(); INDArray u = net.getUpdater().getStateViewArray(); - assertEquals(DataType.DOUBLE, net.params().dataType()); + assertEquals(DataType.DOUBLE, net.getModelParams().dataType()); assertEquals(DataType.DOUBLE, grads.dataType()); assertEquals(DataType.DOUBLE, u.dataType()); MultiLayerNetwork netFloat = net.convertDataType(DataType.FLOAT); netFloat.initGradientsView(); - assertEquals(DataType.FLOAT, netFloat.params().dataType()); + assertEquals(DataType.FLOAT, netFloat.getModelParams().dataType()); assertEquals(DataType.FLOAT, netFloat.getFlattenedGradients().dataType()); assertEquals(DataType.FLOAT, netFloat.getUpdater(true).getStateViewArray().dataType()); INDArray inF = inD.castTo(DataType.FLOAT); @@ -342,7 +340,7 @@ public class DTypeTests extends BaseDL4JTest { netFloat.setInput(inF); netFloat.setLabels(lF); netFloat.computeGradientAndScore(); - double scoreFloat = netFloat.score(); + double scoreFloat = netFloat.getScore(); INDArray gradsFloat = netFloat.getFlattenedGradients(); INDArray uFloat = netFloat.getUpdater().getStateViewArray(); @@ -354,7 +352,7 @@ public class DTypeTests extends BaseDL4JTest { MultiLayerNetwork netFP16 = net.convertDataType(DataType.HALF); netFP16.initGradientsView(); - assertEquals(DataType.HALF, netFP16.params().dataType()); + assertEquals(DataType.HALF, netFP16.getModelParams().dataType()); assertEquals(DataType.HALF, netFP16.getFlattenedGradients().dataType()); assertEquals(DataType.HALF, netFP16.getUpdater(true).getStateViewArray().dataType()); @@ -364,7 +362,7 @@ public class DTypeTests extends BaseDL4JTest { netFP16.setInput(inH); netFP16.setLabels(lH); netFP16.computeGradientAndScore(); - double scoreHalf = netFP16.score(); + double scoreHalf = netFP16.getScore(); INDArray gradsHalf = netFP16.getFlattenedGradients(); INDArray uHalf = netFP16.getUpdater().getStateViewArray(); @@ -384,7 +382,7 @@ public class DTypeTests extends BaseDL4JTest { for (DataType dt : new DataType[]{DataType.DOUBLE, DataType.FLOAT, DataType.HALF}) { Nd4j.setDefaultDataTypes(dt, dt); - ComputationGraphConfiguration conf = new NeuralNetConfiguration.Builder() + ComputationGraphConfiguration conf = NeuralNetConfiguration.builder() .seed(12345) .weightInit(WeightInit.XAVIER) .updater(new Adam(0.01)) @@ -408,17 +406,17 @@ public class DTypeTests extends BaseDL4JTest { net.setInput(0, inD); net.setLabels(lD); net.computeGradientAndScore(); - double scoreDouble = net.score(); + double scoreDouble = net.getScore(); INDArray grads = net.getFlattenedGradients(); INDArray u = net.getUpdater().getStateViewArray(); - assertEquals(DataType.DOUBLE, net.params().dataType()); + assertEquals(DataType.DOUBLE, net.getModelParams().dataType()); assertEquals(DataType.DOUBLE, grads.dataType()); assertEquals(DataType.DOUBLE, u.dataType()); ComputationGraph netFloat = net.convertDataType(DataType.FLOAT); netFloat.initGradientsView(); - assertEquals(DataType.FLOAT, netFloat.params().dataType()); + assertEquals(DataType.FLOAT, netFloat.getModelParams().dataType()); assertEquals(DataType.FLOAT, netFloat.getFlattenedGradients().dataType()); assertEquals(DataType.FLOAT, netFloat.getUpdater(true).getStateViewArray().dataType()); INDArray inF = inD.castTo(DataType.FLOAT); @@ -427,7 +425,7 @@ public class DTypeTests extends BaseDL4JTest { netFloat.setInput(0, inF); netFloat.setLabels(lF); netFloat.computeGradientAndScore(); - double scoreFloat = netFloat.score(); + double scoreFloat = netFloat.getScore(); INDArray gradsFloat = netFloat.getFlattenedGradients(); INDArray uFloat = netFloat.getUpdater().getStateViewArray(); @@ -439,7 +437,7 @@ public class DTypeTests extends BaseDL4JTest { ComputationGraph netFP16 = net.convertDataType(DataType.HALF); netFP16.initGradientsView(); - assertEquals(DataType.HALF, netFP16.params().dataType()); + assertEquals(DataType.HALF, netFP16.getModelParams().dataType()); assertEquals(DataType.HALF, netFP16.getFlattenedGradients().dataType()); assertEquals(DataType.HALF, netFP16.getUpdater(true).getStateViewArray().dataType()); @@ -449,7 +447,7 @@ public class DTypeTests extends BaseDL4JTest { netFP16.setInput(0, inH); netFP16.setLabels(lH); netFP16.computeGradientAndScore(); - double scoreHalf = netFP16.score(); + double scoreHalf = netFP16.getScore(); INDArray gradsHalf = netFP16.getFlattenedGradients(); INDArray uHalf = netFP16.getUpdater().getStateViewArray(); @@ -475,8 +473,8 @@ public class DTypeTests extends BaseDL4JTest { String msg = "Global dtype: " + globalDtype + ", network dtype: " + networkDtype + ", outputLayer=" + outputLayer; - Layer ol; - Layer secondLast; + LayerConfiguration ol; + LayerConfiguration secondLast; switch (outputLayer) { case 0: ol = new OutputLayer.Builder().nOut(10).activation(Activation.SOFTMAX).lossFunction(LossFunctions.LossFunction.MCXENT).build(); @@ -503,7 +501,7 @@ public class DTypeTests extends BaseDL4JTest { } - MultiLayerConfiguration conf = new NeuralNetConfiguration.Builder() + NeuralNetConfiguration conf = NeuralNetConfiguration.builder() .dataType(networkDtype) .convolutionMode(ConvolutionMode.Same) .updater(new Adam(1e-2)) @@ -531,14 +529,14 @@ public class DTypeTests extends BaseDL4JTest { .layer(new ActivationLayer(Activation.LEAKYRELU)) .layer(secondLast) .layer(ol) - .setInputType(InputType.convolutionalFlat(8, 8, 1)) + .inputType(InputType.convolutionalFlat(8, 8, 1)) .build(); MultiLayerNetwork net = new MultiLayerNetwork(conf); net.init(); net.initGradientsView(); - assertEquals(networkDtype, net.params().dataType(), msg); + assertEquals(networkDtype, net.getModelParams().dataType(), msg); assertEquals(networkDtype, net.getFlattenedGradients().dataType(), msg); assertEquals(networkDtype, net.getUpdater(true).getStateViewArray().dataType(), msg); @@ -560,7 +558,7 @@ public class DTypeTests extends BaseDL4JTest { assertEquals(networkDtype, out.dataType(), msg); List ff = net.feedForward(in); for (int i = 0; i < ff.size(); i++) { - String s = msg + " - layer " + (i - 1) + " - " + (i == 0 ? "input" : net.getLayer(i - 1).conf().getLayer().getClass().getSimpleName()); + String s = msg + " - layer " + (i - 1) + " - " + (i == 0 ? "input" : net.getLayer(i - 1).getLayerConfiguration().getClass().getSimpleName()); assertEquals(networkDtype, ff.get(i).dataType(), msg); } @@ -601,8 +599,8 @@ public class DTypeTests extends BaseDL4JTest { String msg = "Global dtype: " + globalDtype + ", network dtype: " + networkDtype + ", outputLayer=" + outputLayer; log.info(msg); - Layer ol; - Layer secondLast; + LayerConfiguration ol; + LayerConfiguration secondLast; switch (outputLayer) { case 0: ol = new OutputLayer.Builder().nOut(10).activation(Activation.SOFTMAX).lossFunction(LossFunctions.LossFunction.MCXENT).build(); @@ -621,7 +619,7 @@ public class DTypeTests extends BaseDL4JTest { } - MultiLayerConfiguration conf = new NeuralNetConfiguration.Builder() + NeuralNetConfiguration conf = NeuralNetConfiguration.builder() .dataType(networkDtype) .convolutionMode(ConvolutionMode.Same) .updater(new Nesterovs(1e-2, 0.9)) @@ -636,14 +634,14 @@ public class DTypeTests extends BaseDL4JTest { .layer(new Upsampling3D.Builder().size(2).build()) .layer(secondLast) .layer(ol) - .setInputType(InputType.convolutional3D(Convolution3D.DataFormat.NCDHW, 8, 8, 8, 1)) + .inputType(InputType.convolutional3D(Convolution3D.DataFormat.NCDHW, 8, 8, 8, 1)) .build(); MultiLayerNetwork net = new MultiLayerNetwork(conf); net.init(); net.initGradientsView(); - assertEquals(networkDtype, net.params().dataType(), msg); + assertEquals(networkDtype, net.getModelParams().dataType(), msg); assertEquals(networkDtype, net.getFlattenedGradients().dataType(), msg); assertEquals(networkDtype, net.getUpdater(true).getStateViewArray().dataType(), msg); @@ -664,7 +662,7 @@ public class DTypeTests extends BaseDL4JTest { assertEquals(networkDtype, out.dataType(), msg); List ff = net.feedForward(in); for (int i = 0; i < ff.size(); i++) { - String s = msg + " - layer " + (i - 1) + " - " + (i == 0 ? "input" : net.getLayer(i - 1).conf().getLayer().getClass().getSimpleName()); + String s = msg + " - layer " + (i - 1) + " - " + (i == 0 ? "input" : net.getLayer(i - 1).getLayerConfiguration().getClass().getSimpleName()); assertEquals(networkDtype, ff.get(i).dataType(), s); } @@ -712,8 +710,8 @@ public class DTypeTests extends BaseDL4JTest { String msg = "Global dtype: " + globalDtype + ", network dtype: " + networkDtype + ", outputLayer=" + outputLayer + " at index " + outputLayer; - Layer ol; - Layer secondLast; + LayerConfiguration ol; + LayerConfiguration secondLast; switch (outputLayer) { case 0: ol = new OutputLayer.Builder().nOut(10).activation(Activation.SOFTMAX).lossFunction(LossFunctions.LossFunction.MCXENT).build(); @@ -732,7 +730,7 @@ public class DTypeTests extends BaseDL4JTest { } - MultiLayerConfiguration conf = new NeuralNetConfiguration.Builder() + NeuralNetConfiguration conf = NeuralNetConfiguration.builder() .trainingWorkspaceMode(WorkspaceMode.NONE) .inferenceWorkspaceMode(WorkspaceMode.NONE) .dataType(networkDtype) @@ -749,14 +747,14 @@ public class DTypeTests extends BaseDL4JTest { .layer(new Upsampling1D.Builder(2).build()) .layer(secondLast) .layer(ol) - .setInputType(InputType.recurrent(5, 10,RNNFormat.NCW)) + .inputType(InputType.recurrent(5, 10,RNNFormat.NCW)) .build(); MultiLayerNetwork net = new MultiLayerNetwork(conf); net.init(); net.initGradientsView(); - assertEquals(networkDtype, net.params().dataType(), msg); + assertEquals(networkDtype, net.getModelParams().dataType(), msg); assertEquals(networkDtype, net.getFlattenedGradients().dataType(), msg); assertEquals(networkDtype, net.getUpdater(true).getStateViewArray().dataType(), msg); @@ -774,7 +772,7 @@ public class DTypeTests extends BaseDL4JTest { assertEquals(networkDtype, out.dataType(), msg); List ff = net.feedForward(in); for (int i = 0; i < ff.size(); i++) { - String s = msg + " - layer " + (i - 1) + " - " + (i == 0 ? "input" : net.getLayer(i - 1).conf().getLayer().getClass().getSimpleName()); + String s = msg + " - layer " + (i - 1) + " - " + (i == 0 ? "input" : net.getLayer(i - 1).getLayerConfiguration().getClass().getSimpleName()); assertEquals(networkDtype, ff.get(i).dataType(), s); } @@ -814,7 +812,7 @@ public class DTypeTests extends BaseDL4JTest { String msg = "Global dtype: " + globalDtype + ", network dtype: " + networkDtype; - MultiLayerConfiguration conf = new NeuralNetConfiguration.Builder() + NeuralNetConfiguration conf = NeuralNetConfiguration.builder() .dataType(networkDtype) .convolutionMode(ConvolutionMode.Same) .updater(new Adam(1e-2)) @@ -822,14 +820,14 @@ public class DTypeTests extends BaseDL4JTest { .layer(new SpaceToBatchLayer.Builder().blocks(1, 1).build()) .layer(new SpaceToDepthLayer.Builder().blocks(2).build()) .layer(new OutputLayer.Builder().nOut(10).activation(Activation.SOFTMAX).lossFunction(LossFunctions.LossFunction.MCXENT).build()) - .setInputType(InputType.convolutional(28, 28, 5)) + .inputType(InputType.convolutional(28, 28, 5)) .build(); MultiLayerNetwork net = new MultiLayerNetwork(conf); net.init(); net.initGradientsView(); - assertEquals(networkDtype, net.params().dataType(), msg); + assertEquals(networkDtype, net.getModelParams().dataType(), msg); assertEquals(networkDtype, net.getFlattenedGradients().dataType(), msg); assertEquals(networkDtype, net.getUpdater(true).getStateViewArray().dataType(), msg); @@ -840,7 +838,7 @@ public class DTypeTests extends BaseDL4JTest { assertEquals(networkDtype, out.dataType(), msg); List ff = net.feedForward(in); for (int i = 0; i < ff.size(); i++) { - String s = msg + " - layer " + (i - 1) + " - " + (i == 0 ? "input" : net.getLayer(i - 1).conf().getLayer().getClass().getSimpleName()); + String s = msg + " - layer " + (i - 1) + " - " + (i == 0 ? "input" : net.getLayer(i - 1).getLayerConfiguration().getClass().getSimpleName()); assertEquals(networkDtype, ff.get(i).dataType(), s); } @@ -878,8 +876,8 @@ public class DTypeTests extends BaseDL4JTest { String msg = "Global dtype: " + globalDtype + ", network dtype: " + networkDtype + ", outputLayer=" + outputLayer; - Layer ol; - Layer secondLast; + LayerConfiguration ol; + LayerConfiguration secondLast; switch (outputLayer) { case 0: ol = new RnnOutputLayer.Builder().nOut(5).activation(Activation.SOFTMAX).lossFunction(LossFunctions.LossFunction.MCXENT).build(); @@ -897,7 +895,7 @@ public class DTypeTests extends BaseDL4JTest { throw new RuntimeException(); } - MultiLayerConfiguration conf = new NeuralNetConfiguration.Builder() + NeuralNetConfiguration conf = NeuralNetConfiguration.builder() .dataType(networkDtype) .convolutionMode(ConvolutionMode.Same) .updater(new Adam(1e-2)) @@ -918,7 +916,7 @@ public class DTypeTests extends BaseDL4JTest { net.init(); net.initGradientsView(); - assertEquals(networkDtype, net.params().dataType(), msg); + assertEquals(networkDtype, net.getModelParams().dataType(), msg); assertEquals(networkDtype, net.getFlattenedGradients().dataType(), msg); assertEquals(networkDtype, net.getUpdater(true).getStateViewArray().dataType(), msg); @@ -982,12 +980,12 @@ public class DTypeTests extends BaseDL4JTest { int width = 6; int inputDepth = 4; - MultiLayerConfiguration conf = new NeuralNetConfiguration.Builder() + NeuralNetConfiguration conf = NeuralNetConfiguration.builder() .dataType(networkDtype) .seed(123) .updater(new NoOp()) - .weightInit(new WeightInitDistribution(new UniformDistribution(-6, 6))) - .list() + .dist(new UniformDistribution(-6, 6)) + .layer(new PrimaryCapsules.Builder(primaryCapsDim, primarpCapsChannel) .kernelSize(3, 3) .stride(2, 2) @@ -996,7 +994,7 @@ public class DTypeTests extends BaseDL4JTest { .layer(new CapsuleStrengthLayer.Builder().build()) .layer(new ActivationLayer.Builder(new ActivationSoftmax()).build()) .layer(new LossLayer.Builder(new LossNegativeLogLikelihood()).build()) - .setInputType(InputType.convolutional(height, width, inputDepth)) + .inputType(InputType.convolutional(height, width, inputDepth)) .build(); MultiLayerNetwork net = new MultiLayerNetwork(conf); @@ -1013,7 +1011,7 @@ public class DTypeTests extends BaseDL4JTest { assertEquals(networkDtype, out.dataType(), msg); List ff = net.feedForward(in); for (int i = 0; i < ff.size(); i++) { - String s = msg + " - layer " + (i - 1) + " - " + (i == 0 ? "input" : net.getLayer(i - 1).conf().getLayer().getClass().getSimpleName()); + String s = msg + " - layer " + (i - 1) + " - " + (i == 0 ? "input" : net.getLayer(i - 1).getLayerConfiguration().getClass().getSimpleName()); assertEquals(networkDtype, ff.get(i).dataType(), s); } @@ -1052,11 +1050,11 @@ public class DTypeTests extends BaseDL4JTest { String msg = "Global dtype: " + globalDtype + ", network dtype: " + networkDtype + ", test=" + test; - ComputationGraphConfiguration.GraphBuilder conf = new NeuralNetConfiguration.Builder() + ComputationGraphConfiguration.GraphBuilder conf = NeuralNetConfiguration.builder() .dataType(networkDtype) .seed(123) .updater(new NoOp()) - .weightInit(new WeightInitDistribution(new UniformDistribution(-6, 6))) + .dist(new UniformDistribution(-6, 6)) .graphBuilder() .addInputs("in") .setOutputs("out"); @@ -1144,7 +1142,7 @@ public class DTypeTests extends BaseDL4JTest { for (int test = 0; test < 8; test++) { String msg = "Global dtype: " + globalDtype + ", network dtype: " + networkDtype + ", test=" + test; - ComputationGraphConfiguration.GraphBuilder b = new NeuralNetConfiguration.Builder() + ComputationGraphConfiguration.GraphBuilder b = NeuralNetConfiguration.builder() .dataType(networkDtype) .seed(123) .updater(new NoOp()) @@ -1301,7 +1299,7 @@ public class DTypeTests extends BaseDL4JTest { for (int test = 0; test < 2; test++) { String msg = "Global dtype: " + globalDtype + ", network dtype: " + networkDtype + ", test=" + test; - ComputationGraphConfiguration.GraphBuilder b = new NeuralNetConfiguration.Builder() + ComputationGraphConfiguration.GraphBuilder b = NeuralNetConfiguration.builder() .dataType(networkDtype) .seed(123) .updater(new NoOp()) @@ -1395,7 +1393,7 @@ public class DTypeTests extends BaseDL4JTest { INDArray in = Nd4j.rand(networkDtype, new long[]{mb, nIn, tsLength}); INDArray labels = TestUtils.randomOneHot(mb, nOut); - MultiLayerConfiguration conf = new NeuralNetConfiguration.Builder() + NeuralNetConfiguration conf = NeuralNetConfiguration.builder() .dataType(networkDtype) .activation(Activation.TANH) .updater(new NoOp()) @@ -1408,7 +1406,7 @@ public class DTypeTests extends BaseDL4JTest { .layer(new GlobalPoolingLayer.Builder().poolingType(PoolingType.MAX).build()) .layer(new OutputLayer.Builder().nOut(nOut).activation(Activation.SOFTMAX) .lossFunction(LossFunctions.LossFunction.MCXENT).build()) - .setInputType(InputType.recurrent(nIn)) + .inputType(InputType.recurrent(nIn)) .build(); MultiLayerNetwork net = new MultiLayerNetwork(conf); @@ -1418,7 +1416,7 @@ public class DTypeTests extends BaseDL4JTest { assertEquals( networkDtype, out.dataType(), msg); List ff = net.feedForward(in); for (int i = 0; i < ff.size(); i++) { - String s = msg + " - layer " + (i - 1) + " - " + (i == 0 ? "input" : net.getLayer(i - 1).conf().getLayer().getClass().getSimpleName()); + String s = msg + " - layer " + (i - 1) + " - " + (i == 0 ? "input" : net.getLayer(i - 1).getLayerConfiguration().getClass().getSimpleName()); assertEquals(networkDtype, ff.get(i).dataType(), s); } @@ -1482,7 +1480,7 @@ public class DTypeTests extends BaseDL4JTest { System.out.println("Starting test: " + name); - ComputationGraphConfiguration conf = new NeuralNetConfiguration.Builder() + ComputationGraphConfiguration conf = NeuralNetConfiguration.builder() .dataType(networkDtype) .activation(Activation.TANH) .updater(new NoOp()) diff --git a/cavis-dnn/cavis-dnn-core/src/test/java/org/deeplearning4j/nn/graph/ComputationGraphTestRNN.java b/cavis-dnn/cavis-dnn-core/src/test/java/org/deeplearning4j/nn/graph/ComputationGraphTestRNN.java index eb8c1cbcc..4197263b6 100644 --- a/cavis-dnn/cavis-dnn-core/src/test/java/org/deeplearning4j/nn/graph/ComputationGraphTestRNN.java +++ b/cavis-dnn/cavis-dnn-core/src/test/java/org/deeplearning4j/nn/graph/ComputationGraphTestRNN.java @@ -65,8 +65,8 @@ public class ComputationGraphTestRNN extends BaseDL4JTest { Nd4j.getRandom().setSeed(12345); int timeSeriesLength = 12; - //4 layer network: 2 GravesLSTM + DenseLayer + RnnOutputLayer. Hence also tests preprocessors. - ComputationGraphConfiguration conf = new NeuralNetConfiguration.Builder().seed(12345).graphBuilder() + //4 layer network: 2 GravesLSTM + DenseLayerConfiguration + RnnOutputLayer. Hence also tests preprocessors. + ComputationGraphConfiguration conf = NeuralNetConfiguration.builder().seed(12345).graphBuilder() .addInputs("in") .addLayer("0", new org.deeplearning4j.nn.conf.layers.GravesLSTM.Builder().nIn(5).nOut(7) .activation(Activation.TANH) @@ -156,7 +156,7 @@ public class ComputationGraphTestRNN extends BaseDL4JTest { Nd4j.getRandom().setSeed(12345); int timeSeriesLength = 6; - ComputationGraphConfiguration conf = new NeuralNetConfiguration.Builder().graphBuilder().addInputs("in") + ComputationGraphConfiguration conf = NeuralNetConfiguration.builder().graphBuilder().addInputs("in") .addLayer("0", new org.deeplearning4j.nn.conf.layers.GravesLSTM.Builder().nIn(5).nOut(7) .activation(Activation.TANH) .dist(new NormalDistribution(0, 0.5)).build(), "in") @@ -208,10 +208,10 @@ public class ComputationGraphTestRNN extends BaseDL4JTest { Nd4j.getRandom().setSeed(12345); int timeSeriesLength = 12; - //4 layer network: 2 GravesLSTM + DenseLayer + RnnOutputLayer. Hence also tests preprocessors. + //4 layer network: 2 GravesLSTM + DenseLayerConfiguration + RnnOutputLayer. Hence also tests preprocessors. //Network architecture: lstm0 -> Dense -> RnnOutputLayer0 // and lstm1 -> Dense -> RnnOutputLayer1 - ComputationGraphConfiguration conf = new NeuralNetConfiguration.Builder().seed(12345).graphBuilder() + ComputationGraphConfiguration conf = NeuralNetConfiguration.builder().seed(12345).graphBuilder() .addInputs("in0", "in1") .addLayer("lstm0", new org.deeplearning4j.nn.conf.layers.GravesLSTM.Builder().nIn(5).nOut(6) @@ -340,7 +340,7 @@ public class ComputationGraphTestRNN extends BaseDL4JTest { int nIn = 5; int nOut = 4; - ComputationGraphConfiguration conf = new NeuralNetConfiguration.Builder().seed(12345) + ComputationGraphConfiguration conf = NeuralNetConfiguration.builder().seed(12345) .trainingWorkspaceMode(WorkspaceMode.NONE).inferenceWorkspaceMode(WorkspaceMode.NONE) .graphBuilder() .addInputs("in") @@ -360,7 +360,7 @@ public class ComputationGraphTestRNN extends BaseDL4JTest { .setOutputs("out").build(); assertEquals(BackpropType.Standard, conf.getBackpropType()); - ComputationGraphConfiguration confTBPTT = new NeuralNetConfiguration.Builder().seed(12345) + ComputationGraphConfiguration confTBPTT = NeuralNetConfiguration.builder().seed(12345) .trainingWorkspaceMode(WorkspaceMode.NONE).inferenceWorkspaceMode(WorkspaceMode.NONE) .graphBuilder() .addInputs("in") @@ -377,7 +377,7 @@ public class ComputationGraphTestRNN extends BaseDL4JTest { .activation(Activation.SOFTMAX) .dist(new NormalDistribution(0, 0.5)).build(), "1") .setOutputs("out").backpropType(BackpropType.TruncatedBPTT) - .tBPTTForwardLength(timeSeriesLength).tBPTTBackwardLength(timeSeriesLength) + .tbpttFwdLength(timeSeriesLength).tbpttBackLength(timeSeriesLength) .setInputTypes(InputType.recurrent(nIn,timeSeriesLength,RNNFormat.NCW)) .build(); assertEquals(BackpropType.TruncatedBPTT, confTBPTT.getBackpropType()); @@ -391,9 +391,9 @@ public class ComputationGraphTestRNN extends BaseDL4JTest { graphTBPTT.init(); graphTBPTT.clearTbpttState = false; - assertEquals(BackpropType.TruncatedBPTT, graphTBPTT.getConfiguration().getBackpropType()); - assertEquals(timeSeriesLength, graphTBPTT.getConfiguration().getTbpttFwdLength()); - assertEquals(timeSeriesLength, graphTBPTT.getConfiguration().getTbpttBackLength()); + assertEquals(BackpropType.TruncatedBPTT, graphTBPTT.getComputationGraphConfiguration().getBackpropType()); + assertEquals(timeSeriesLength, graphTBPTT.getComputationGraphConfiguration().getTbpttFwdLength()); + assertEquals(timeSeriesLength, graphTBPTT.getComputationGraphConfiguration().getTbpttBackLength()); INDArray inputData = Nd4j.rand(miniBatchSize, nIn, timeSeriesLength); INDArray labels = Nd4j.rand(miniBatchSize, nOut, timeSeriesLength); @@ -456,7 +456,7 @@ public class ComputationGraphTestRNN extends BaseDL4JTest { int nTimeSlices = 20; - ComputationGraphConfiguration conf = new NeuralNetConfiguration.Builder().seed(12345) + ComputationGraphConfiguration conf = NeuralNetConfiguration.builder().seed(12345) .optimizationAlgo(OptimizationAlgorithm.STOCHASTIC_GRADIENT_DESCENT).graphBuilder() .addInputs("in") .addLayer("0", new org.deeplearning4j.nn.conf.layers.GravesLSTM.Builder().nIn(nIn).nOut(7) @@ -473,7 +473,7 @@ public class ComputationGraphTestRNN extends BaseDL4JTest { .dist(new NormalDistribution(0, 0.5)).build(), "1") .setOutputs("out").backpropType(BackpropType.TruncatedBPTT) .setInputTypes(InputType.recurrent(nIn,timeSeriesLength,RNNFormat.NCW)) - .tBPTTBackwardLength(timeSeriesLength).tBPTTForwardLength(timeSeriesLength).build(); + .tbpttBackLength(timeSeriesLength).tbpttFwdLength(timeSeriesLength).build(); Nd4j.getRandom().setSeed(12345); ComputationGraph graph = new ComputationGraph(conf); @@ -493,7 +493,7 @@ public class ComputationGraphTestRNN extends BaseDL4JTest { int nIn = 5; int nOut = 4; - ComputationGraphConfiguration conf = new NeuralNetConfiguration.Builder().seed(12345) + ComputationGraphConfiguration conf = NeuralNetConfiguration.builder().seed(12345) .optimizationAlgo(OptimizationAlgorithm.STOCHASTIC_GRADIENT_DESCENT).graphBuilder() .addInputs("in") .addLayer("0", new org.deeplearning4j.nn.conf.layers.GravesLSTM.Builder().nIn(nIn).nOut(7) @@ -509,7 +509,7 @@ public class ComputationGraphTestRNN extends BaseDL4JTest { .activation(Activation.SOFTMAX) .dist(new NormalDistribution(0, 0.5)).build(), "1") .setOutputs("out").backpropType(BackpropType.TruncatedBPTT) - .tBPTTBackwardLength(tbpttLength).tBPTTForwardLength(tbpttLength) + .tbpttBackLength(tbpttLength).tbpttFwdLength(tbpttLength) .setInputTypes(InputType.recurrent(nIn,timeSeriesLength, RNNFormat.NCW)) .build(); @@ -520,9 +520,9 @@ public class ComputationGraphTestRNN extends BaseDL4JTest { INDArray inputLong = Nd4j.rand(miniBatchSize, nIn, timeSeriesLength); INDArray labelsLong = Nd4j.rand(miniBatchSize, nOut, timeSeriesLength); - INDArray initialParams = graph.params().dup(); + INDArray initialParams = graph.getModelParams().dup(); graph.fit(new INDArray[] {inputLong}, new INDArray[] {labelsLong}); - INDArray afterParams = graph.params(); + INDArray afterParams = graph.getModelParams(); assertNotEquals(initialParams, afterParams); } @@ -530,13 +530,13 @@ public class ComputationGraphTestRNN extends BaseDL4JTest { @Test public void testTbpttMasking() { //Simple "does it throw an exception" type test... - ComputationGraphConfiguration conf = new NeuralNetConfiguration.Builder().seed(12345) + ComputationGraphConfiguration conf = NeuralNetConfiguration.builder().seed(12345) .graphBuilder().addInputs("in") .addLayer("out", new RnnOutputLayer.Builder(LossFunctions.LossFunction.MSE) .activation(Activation.IDENTITY).nIn(1).nOut(1).build(), "in") - .setOutputs("out").backpropType(BackpropType.TruncatedBPTT).tBPTTForwardLength(8) + .setOutputs("out").backpropType(BackpropType.TruncatedBPTT).tbpttFwdLength(8) .setInputTypes(InputType.recurrent(1,1,RNNFormat.NCW)) - .tBPTTBackwardLength(8).build(); + .tbpttBackLength(8).build(); ComputationGraph net = new ComputationGraph(conf); net.init(); @@ -553,12 +553,12 @@ public class ComputationGraphTestRNN extends BaseDL4JTest { public void checkMaskArrayClearance() { for (boolean tbptt : new boolean[] {true, false}) { //Simple "does it throw an exception" type test... - ComputationGraphConfiguration conf = new NeuralNetConfiguration.Builder().seed(12345) + ComputationGraphConfiguration conf = NeuralNetConfiguration.builder().seed(12345) .graphBuilder().addInputs("in") .addLayer("out", new RnnOutputLayer.Builder(LossFunctions.LossFunction.MSE) .activation(Activation.IDENTITY).nIn(1).nOut(1).build(), "in") .setOutputs("out").backpropType(tbptt ? BackpropType.TruncatedBPTT : BackpropType.Standard) - .tBPTTForwardLength(8).tBPTTBackwardLength(8).build(); + .tbpttFwdLength(8).tbpttBackLength(8).build(); ComputationGraph net = new ComputationGraph(conf); net.init(); @@ -616,7 +616,7 @@ public class ComputationGraphTestRNN extends BaseDL4JTest { int nHiddenUnits = 17; try { - new NeuralNetConfiguration.Builder() + NeuralNetConfiguration.builder() .graphBuilder() .addInputs("in") .layer("0", new org.deeplearning4j.nn.conf.layers.LSTM.Builder().nIn(nIn).nOut(nHiddenUnits).build(), "in") diff --git a/cavis-dnn/cavis-dnn-core/src/test/java/org/deeplearning4j/nn/graph/TestCompGraphCNN.java b/cavis-dnn/cavis-dnn-core/src/test/java/org/deeplearning4j/nn/graph/TestCompGraphCNN.java index 95691fed6..4129592b6 100644 --- a/cavis-dnn/cavis-dnn-core/src/test/java/org/deeplearning4j/nn/graph/TestCompGraphCNN.java +++ b/cavis-dnn/cavis-dnn-core/src/test/java/org/deeplearning4j/nn/graph/TestCompGraphCNN.java @@ -57,7 +57,7 @@ public class TestCompGraphCNN extends BaseDL4JTest { protected static ComputationGraphConfiguration getMultiInputGraphConfig() { ComputationGraphConfiguration conf = - new NeuralNetConfiguration.Builder() + NeuralNetConfiguration.builder() .optimizationAlgo(OptimizationAlgorithm.STOCHASTIC_GRADIENT_DESCENT) .graphBuilder().addInputs("input") .setInputTypes(InputType.convolutional(32, 32, 3)) @@ -117,7 +117,7 @@ public class TestCompGraphCNN extends BaseDL4JTest { boolean orderOK = Arrays.equals(expOrder1, order) || Arrays.equals(expOrder2, order); assertTrue(orderOK); - INDArray params = graph.params(); + INDArray params = graph.getModelParams(); assertNotNull(params); // confirm param shape is what is expected @@ -129,7 +129,7 @@ public class TestCompGraphCNN extends BaseDL4JTest { // params are set graph.setParams(arr); - params = graph.params(); + params = graph.getModelParams(); assertEquals(arr, params); //Number of inputs and outputs: @@ -154,7 +154,7 @@ public class TestCompGraphCNN extends BaseDL4JTest { DataSet trainInput; ComputationGraphConfiguration conf = - new NeuralNetConfiguration.Builder() + NeuralNetConfiguration.builder() .optimizationAlgo(OptimizationAlgorithm.STOCHASTIC_GRADIENT_DESCENT) .seed(123).graphBuilder().addInputs("input") .setInputTypes(InputType.convolutional(nChannels, imageWidth, diff --git a/cavis-dnn/cavis-dnn-core/src/test/java/org/deeplearning4j/nn/graph/TestCompGraphUnsupervised.java b/cavis-dnn/cavis-dnn-core/src/test/java/org/deeplearning4j/nn/graph/TestCompGraphUnsupervised.java index a17979bf2..2cf9e0db4 100644 --- a/cavis-dnn/cavis-dnn-core/src/test/java/org/deeplearning4j/nn/graph/TestCompGraphUnsupervised.java +++ b/cavis-dnn/cavis-dnn-core/src/test/java/org/deeplearning4j/nn/graph/TestCompGraphUnsupervised.java @@ -24,7 +24,6 @@ import org.deeplearning4j.BaseDL4JTest; import org.deeplearning4j.datasets.iterator.EarlyTerminationDataSetIterator; import org.deeplearning4j.datasets.iterator.impl.MnistDataSetIterator; import org.deeplearning4j.nn.conf.ComputationGraphConfiguration; -import org.deeplearning4j.nn.conf.MultiLayerConfiguration; import org.deeplearning4j.nn.conf.NeuralNetConfiguration; import org.deeplearning4j.nn.conf.WorkspaceMode; import org.deeplearning4j.nn.conf.layers.variational.BernoulliReconstructionDistribution; @@ -42,7 +41,6 @@ import org.nd4j.linalg.factory.Nd4j; import org.nd4j.linalg.indexing.conditions.Conditions; import org.nd4j.linalg.learning.config.Adam; -import java.util.Arrays; import java.util.HashMap; import java.util.Map; @@ -61,7 +59,7 @@ public class TestCompGraphUnsupervised extends BaseDL4JTest { for(WorkspaceMode wsm : new WorkspaceMode[]{WorkspaceMode.NONE, WorkspaceMode.ENABLED}) { - ComputationGraphConfiguration conf = new NeuralNetConfiguration.Builder() + ComputationGraphConfiguration conf = NeuralNetConfiguration.builder() .seed(12345) .updater(new Adam(1e-3)) .weightInit(WeightInit.XAVIER) @@ -98,11 +96,11 @@ public class TestCompGraphUnsupervised extends BaseDL4JTest { Map paramsBefore = new HashMap<>(); //Pretrain first layer - for(Map.Entry e : cg.paramTable().entrySet()){ + for(Map.Entry e : cg.getParamTable().entrySet()){ paramsBefore.put(e.getKey(), e.getValue().dup()); } cg.pretrainLayer("vae1", ds); - for(Map.Entry e : cg.paramTable().entrySet()){ + for(Map.Entry e : cg.getParamTable().entrySet()){ if(e.getKey().startsWith("vae1")){ assertNotEquals(paramsBefore.get(e.getKey()), e.getValue()); } else { @@ -110,16 +108,16 @@ public class TestCompGraphUnsupervised extends BaseDL4JTest { } } - int count = Nd4j.getExecutioner().exec(new MatchCondition(cg.params(), Conditions.isNan())).getInt(0); + int count = Nd4j.getExecutioner().exec(new MatchCondition(cg.getModelParams(), Conditions.isNan())).getInt(0); assertEquals(0, count); //Pretrain second layer - for(Map.Entry e : cg.paramTable().entrySet()){ + for(Map.Entry e : cg.getParamTable().entrySet()){ paramsBefore.put(e.getKey(), e.getValue().dup()); } cg.pretrainLayer("vae2", ds); - for(Map.Entry e : cg.paramTable().entrySet()){ + for(Map.Entry e : cg.getParamTable().entrySet()){ if(e.getKey().startsWith("vae2")){ assertNotEquals(paramsBefore.get(e.getKey()), e.getValue()); } else { @@ -127,7 +125,7 @@ public class TestCompGraphUnsupervised extends BaseDL4JTest { } } - count = Nd4j.getExecutioner().exec(new MatchCondition(cg.params(), Conditions.isNan())).getInt(0); + count = Nd4j.getExecutioner().exec(new MatchCondition(cg.getModelParams(), Conditions.isNan())).getInt(0); assertEquals(0, count); } } @@ -137,13 +135,13 @@ public class TestCompGraphUnsupervised extends BaseDL4JTest { for(WorkspaceMode wsm : new WorkspaceMode[]{WorkspaceMode.NONE, WorkspaceMode.ENABLED}) { - MultiLayerConfiguration conf2 = new NeuralNetConfiguration.Builder() + NeuralNetConfiguration conf2 = NeuralNetConfiguration.builder() .seed(12345) .updater(new Adam(1e-3)) .weightInit(WeightInit.XAVIER) .inferenceWorkspaceMode(wsm) .trainingWorkspaceMode(wsm) - .list() + .layer(new VariationalAutoencoder.Builder() .nIn(784) .nOut(32) @@ -168,8 +166,8 @@ public class TestCompGraphUnsupervised extends BaseDL4JTest { net.init(); ComputationGraph cg = net.toComputationGraph(); - cg.getConfiguration().setInferenceWorkspaceMode(wsm); - cg.getConfiguration().setTrainingWorkspaceMode(wsm); + cg.getComputationGraphConfiguration().setInferenceWorkspaceMode(wsm); + cg.getComputationGraphConfiguration().setTrainingWorkspaceMode(wsm); DataSetIterator ds = new EarlyTerminationDataSetIterator(new MnistDataSetIterator(1, true, 12345), 1); Nd4j.getRandom().setSeed(12345); net.pretrainLayer(0, ds); @@ -178,7 +176,7 @@ public class TestCompGraphUnsupervised extends BaseDL4JTest { Nd4j.getRandom().setSeed(12345); cg.pretrainLayer("0", ds); - assertEquals(net.params(), cg.params()); + assertEquals(net.getModelParams(), cg.getModelParams()); } } diff --git a/cavis-dnn/cavis-dnn-core/src/test/java/org/deeplearning4j/nn/graph/TestComputationGraphNetwork.java b/cavis-dnn/cavis-dnn-core/src/test/java/org/deeplearning4j/nn/graph/TestComputationGraphNetwork.java index 7a918a674..46180da6d 100644 --- a/cavis-dnn/cavis-dnn-core/src/test/java/org/deeplearning4j/nn/graph/TestComputationGraphNetwork.java +++ b/cavis-dnn/cavis-dnn-core/src/test/java/org/deeplearning4j/nn/graph/TestComputationGraphNetwork.java @@ -98,7 +98,7 @@ public class TestComputationGraphNetwork extends BaseDL4JTest { public File testDir; private static ComputationGraphConfiguration getIrisGraphConfiguration() { - return new NeuralNetConfiguration.Builder().seed(12345) + return NeuralNetConfiguration.builder().seed(12345) .optimizationAlgo(OptimizationAlgorithm.STOCHASTIC_GRADIENT_DESCENT).graphBuilder() .addInputs("input") .addLayer("firstLayer", new DenseLayer.Builder().nIn(4).nOut(5).build(), "input") @@ -106,9 +106,9 @@ public class TestComputationGraphNetwork extends BaseDL4JTest { .setOutputs("outputLayer").build(); } - private static MultiLayerConfiguration getIrisMLNConfiguration() { - return new NeuralNetConfiguration.Builder().seed(12345) - .optimizationAlgo(OptimizationAlgorithm.STOCHASTIC_GRADIENT_DESCENT).list() + private static NeuralNetConfiguration getIrisMLNConfiguration() { + return NeuralNetConfiguration.builder().seed(12345) + .optimizationAlgo(OptimizationAlgorithm.STOCHASTIC_GRADIENT_DESCENT) .layer(0, new DenseLayer.Builder().nIn(4).nOut(5).build()) .layer(1, new OutputLayer.Builder().nIn(5).nOut(3).activation(Activation.SOFTMAX).build()).build(); } @@ -150,7 +150,7 @@ public class TestComputationGraphNetwork extends BaseDL4JTest { ComputationGraph graph = new ComputationGraph(configuration); graph.init(); - MultiLayerConfiguration mlc = getIrisMLNConfiguration(); + NeuralNetConfiguration mlc = getIrisMLNConfiguration(); MultiLayerNetwork net = new MultiLayerNetwork(mlc); net.init(); @@ -159,7 +159,7 @@ public class TestComputationGraphNetwork extends BaseDL4JTest { DataSet ds = iris.next(); graph.setInput(0, ds.getFeatures()); - net.setParams(graph.params()); + net.setParams(graph.getModelParams()); Map activations = graph.feedForward(false); List feedForward = net.feedForward(ds.getFeatures()); @@ -184,7 +184,7 @@ public class TestComputationGraphNetwork extends BaseDL4JTest { int[] expOrder = new int[]{0, 1, 2}; assertArrayEquals(expOrder, order); //Only one valid order: 0 (input) -> 1 (firstlayer) -> 2 (outputlayer) - INDArray params = graph.params(); + INDArray params = graph.getModelParams(); assertNotNull(params); int nParams = getNumParams(); @@ -194,7 +194,7 @@ public class TestComputationGraphNetwork extends BaseDL4JTest { assertEquals(nParams, arr.length()); graph.setParams(arr); - params = graph.params(); + params = graph.getModelParams(); assertEquals(arr, params); //Number of inputs and outputs: @@ -209,7 +209,7 @@ public class TestComputationGraphNetwork extends BaseDL4JTest { ComputationGraph graph = new ComputationGraph(configuration); graph.init(); - MultiLayerConfiguration mlc = getIrisMLNConfiguration(); + NeuralNetConfiguration mlc = getIrisMLNConfiguration(); MultiLayerNetwork net = new MultiLayerNetwork(mlc); net.init(); @@ -244,7 +244,7 @@ public class TestComputationGraphNetwork extends BaseDL4JTest { ComputationGraph graph = new ComputationGraph(configuration); graph.init(); - MultiLayerConfiguration mlc = getIrisMLNConfiguration(); + NeuralNetConfiguration mlc = getIrisMLNConfiguration(); MultiLayerNetwork net = new MultiLayerNetwork(mlc); net.init(); @@ -295,7 +295,7 @@ public class TestComputationGraphNetwork extends BaseDL4JTest { ComputationGraph graph = new ComputationGraph(configuration); graph.init(); - MultiLayerConfiguration mlnConfig = getIrisMLNConfiguration(); + NeuralNetConfiguration mlnConfig = getIrisMLNConfiguration(); MultiLayerNetwork net = new MultiLayerNetwork(mlnConfig); net.init(); @@ -315,8 +315,8 @@ public class TestComputationGraphNetwork extends BaseDL4JTest { graph.fit(iris); //Check that parameters are equal for both models after fitting: - INDArray paramsMLN = net.params(); - INDArray paramsGraph = graph.params(); + INDArray paramsMLN = net.getModelParams(); + INDArray paramsGraph = graph.getModelParams(); assertNotEquals(params, paramsGraph); assertEquals(paramsMLN, paramsGraph); @@ -332,7 +332,7 @@ public class TestComputationGraphNetwork extends BaseDL4JTest { MultiDataSetIterator iter = new RecordReaderMultiDataSetIterator.Builder(10).addReader("iris", rr) .addInput("iris", 0, 3).addOutputOneHot("iris", 4, 3).build(); - ComputationGraphConfiguration config = new NeuralNetConfiguration.Builder() + ComputationGraphConfiguration config = NeuralNetConfiguration.builder() .updater(new Sgd(0.1)) .graphBuilder().addInputs("in") .addLayer("dense", new DenseLayer.Builder().nIn(4).nOut(2).build(), "in").addLayer("out", @@ -377,7 +377,7 @@ public class TestComputationGraphNetwork extends BaseDL4JTest { ComputationGraph graph = new ComputationGraph(configuration); graph.init(); - MultiLayerConfiguration mlc = getIrisMLNConfiguration(); + NeuralNetConfiguration mlc = getIrisMLNConfiguration(); MultiLayerNetwork net = new MultiLayerNetwork(mlc); net.init(); @@ -401,14 +401,14 @@ public class TestComputationGraphNetwork extends BaseDL4JTest { public void testPreprocessorAddition() { //Also check that nIns are set automatically //First: check FF -> RNN - ComputationGraphConfiguration conf1 = new NeuralNetConfiguration.Builder().graphBuilder().addInputs("in") + ComputationGraphConfiguration conf1 = NeuralNetConfiguration.builder().graphBuilder().addInputs("in") .setInputTypes(InputType.feedForward(5)) .addLayer("rnn", new GravesLSTM.Builder().nOut(5).build(), "in") .addLayer("out", new RnnOutputLayer.Builder().nOut(5).activation(Activation.SOFTMAX).build(), "rnn").setOutputs("out").build(); - assertEquals(5, ((FeedForwardLayer) ((LayerVertex) conf1.getVertices().get("rnn")).getLayerConf().getLayer()) + assertEquals(5, ((FeedForwardLayer) ((LayerVertex) conf1.getVertices().get("rnn")).getNetConfiguration().getFlattenedLayerConfigurations().get(0)) .getNIn()); - assertEquals(5, ((FeedForwardLayer) ((LayerVertex) conf1.getVertices().get("out")).getLayerConf().getLayer()) + assertEquals(5, ((FeedForwardLayer) ((LayerVertex) conf1.getVertices().get("out")).getNetConfiguration().getFlattenedLayerConfigurations().get(0)) .getNIn()); LayerVertex lv1 = (LayerVertex) conf1.getVertices().get("rnn"); @@ -417,15 +417,15 @@ public class TestComputationGraphNetwork extends BaseDL4JTest { assertNull(lv2.getPreProcessor()); //Check RNN -> FF -> RNN - ComputationGraphConfiguration conf2 = new NeuralNetConfiguration.Builder().graphBuilder().addInputs("in") + ComputationGraphConfiguration conf2 = NeuralNetConfiguration.builder().graphBuilder().addInputs("in") .setInputTypes(InputType.recurrent(5)) .addLayer("ff", new DenseLayer.Builder().nOut(5).build(), "in") .addLayer("out", new RnnOutputLayer.Builder().nOut(5).activation(Activation.SOFTMAX).build(), "ff") .setOutputs("out").build(); - assertEquals(5, ((FeedForwardLayer) ((LayerVertex) conf2.getVertices().get("ff")).getLayerConf().getLayer()) + assertEquals(5, ((FeedForwardLayer) ((LayerVertex) conf2.getVertices().get("ff")).getNetConfiguration().getFlattenedLayerConfigurations().get(0)) .getNIn()); - assertEquals(5, ((FeedForwardLayer) ((LayerVertex) conf2.getVertices().get("out")).getLayerConf().getLayer()) + assertEquals(5, ((FeedForwardLayer) ((LayerVertex) conf2.getVertices().get("out")).getNetConfiguration().getFlattenedLayerConfigurations().get(0)) .getNIn()); lv1 = (LayerVertex) conf2.getVertices().get("ff"); @@ -434,7 +434,7 @@ public class TestComputationGraphNetwork extends BaseDL4JTest { assertTrue(lv2.getPreProcessor() instanceof FeedForwardToRnnPreProcessor); //CNN -> Dense - ComputationGraphConfiguration conf3 = new NeuralNetConfiguration.Builder().graphBuilder().addInputs("in") + ComputationGraphConfiguration conf3 = NeuralNetConfiguration.builder().graphBuilder().addInputs("in") .setInputTypes(InputType.convolutional(28, 28, 1)) .addLayer("cnn", new ConvolutionLayer.Builder().kernelSize(2, 2).padding(0, 0).stride(2, 2) .nOut(3).build(), "in") //(28-2+0)/2+1 = 14 @@ -460,11 +460,11 @@ public class TestComputationGraphNetwork extends BaseDL4JTest { LayerVertex lv4 = (LayerVertex) conf3.getVertices().get("out"); assertNull(lv4.getPreProcessor()); //Check nIns: - assertEquals(7 * 7 * 3, ((FeedForwardLayer) lv3.getLayerConf().getLayer()).getNIn()); + assertEquals(7 * 7 * 3, ((FeedForwardLayer) lv3.getNetConfiguration().getFlattenedLayerConfigurations().get(0)).getNIn()); //CNN->Dense, RNN->Dense, Dense->RNN ComputationGraphConfiguration conf4 = - new NeuralNetConfiguration.Builder().graphBuilder().addInputs("inCNN", "inRNN") + NeuralNetConfiguration.builder().graphBuilder().addInputs("inCNN", "inRNN") .setInputTypes(InputType.convolutional(28, 28, 1), InputType.recurrent(5)) .addLayer("cnn", new ConvolutionLayer.Builder().kernelSize(2, 2).padding(0, 0) .stride(2, 2).nOut(3).build(), "inCNN") //(28-2+0)/2+1 = 14 @@ -495,14 +495,14 @@ public class TestComputationGraphNetwork extends BaseDL4JTest { LayerVertex lv5 = (LayerVertex) conf4.getVertices().get("out"); assertTrue(lv5.getPreProcessor() instanceof FeedForwardToRnnPreProcessor); //Check nIns: - assertEquals(7 * 7 * 3, ((FeedForwardLayer) lv3.getLayerConf().getLayer()).getNIn()); - assertEquals(5, ((FeedForwardLayer) lv4.getLayerConf().getLayer()).getNIn()); - assertEquals(20, ((FeedForwardLayer) lv5.getLayerConf().getLayer()).getNIn()); //10+10 out of the merge vertex -> 20 in to output layer vertex + assertEquals(7 * 7 * 3, ((FeedForwardLayer) lv3.getNetConfiguration().getFlattenedLayerConfigurations().get(0)).getNIn()); + assertEquals(5, ((FeedForwardLayer) lv4.getNetConfiguration().getFlattenedLayerConfigurations().get(0)).getNIn()); + assertEquals(20, ((FeedForwardLayer) lv5.getNetConfiguration().getFlattenedLayerConfigurations().get(0)).getNIn()); //10+10 out of the merge vertex -> 20 in to output layer vertex //Input to 2 CNN layers: ComputationGraphConfiguration conf5 = - new NeuralNetConfiguration.Builder() + NeuralNetConfiguration.builder() .optimizationAlgo(OptimizationAlgorithm.STOCHASTIC_GRADIENT_DESCENT) .graphBuilder().addInputs("input") .setInputTypes(InputType.convolutional(28, 28, 1)) @@ -575,7 +575,7 @@ public class TestComputationGraphNetwork extends BaseDL4JTest { public void testCompGraphUnderscores() { //Problem: underscores in names could be problematic for ComputationGraphUpdater, HistogramIterationListener - ComputationGraphConfiguration conf = new NeuralNetConfiguration.Builder() + ComputationGraphConfiguration conf = NeuralNetConfiguration.builder() .optimizationAlgo(OptimizationAlgorithm.STOCHASTIC_GRADIENT_DESCENT).graphBuilder() .addInputs("input") .addLayer("first_layer", new DenseLayer.Builder().nIn(4).nOut(5).build(), "input") @@ -594,7 +594,7 @@ public class TestComputationGraphNetwork extends BaseDL4JTest { @Test public void testPreTraining() { ComputationGraphConfiguration conf = - new NeuralNetConfiguration.Builder() + NeuralNetConfiguration.builder() .optimizationAlgo(OptimizationAlgorithm.STOCHASTIC_GRADIENT_DESCENT) .updater(new Sgd(1e-6)) .l2(2e-4).graphBuilder().addInputs("in") @@ -636,7 +636,7 @@ public class TestComputationGraphNetwork extends BaseDL4JTest { ComputationGraph net = new ComputationGraph(conf); net.init(); - net.setListeners(new ScoreIterationListener(1)); + net.addTrainingListeners(new ScoreIterationListener(1)); DataSetIterator iter = new IrisDataSetIterator(10, 150); net.pretrain(iter); @@ -648,7 +648,7 @@ public class TestComputationGraphNetwork extends BaseDL4JTest { int nIn = 5; int nOut = 6; ComputationGraphConfiguration conf = - new NeuralNetConfiguration.Builder().seed(12345).l1(0.01).l2(0.01) + NeuralNetConfiguration.builder().seed(12345).l1(0.01).l2(0.01) .updater(new Sgd(0.1)) .activation(Activation.TANH).weightInit(WeightInit.XAVIER) .graphBuilder().addInputs("in") @@ -660,7 +660,7 @@ public class TestComputationGraphNetwork extends BaseDL4JTest { .setOutputs("2").build(); ComputationGraphConfiguration confNoReg = - new NeuralNetConfiguration.Builder().seed(12345).updater(new Sgd(0.1)).activation(Activation.TANH) + NeuralNetConfiguration.builder().seed(12345).updater(new Sgd(0.1)).activation(Activation.TANH) .weightInit(WeightInit.XAVIER).graphBuilder().addInputs("in") .addLayer("0", new DenseLayer.Builder().nIn(nIn).nOut(20).build(), "in") .addLayer("1", new DenseLayer.Builder().nIn(20).nOut(30).build(), "0") @@ -675,7 +675,7 @@ public class TestComputationGraphNetwork extends BaseDL4JTest { ComputationGraph netNoReg = new ComputationGraph(confNoReg); netNoReg.init(); - netNoReg.setParams(net.params().dup()); + netNoReg.setParams(net.getModelParams().dup()); //Score single example, and compare to scoreExamples: INDArray input = Nd4j.rand(3, nIn); @@ -717,7 +717,7 @@ public class TestComputationGraphNetwork extends BaseDL4JTest { INDArray outData = Nd4j.rand(3, 10); Nd4j.getRandom().setSeed(12345); - ComputationGraphConfiguration standard = new NeuralNetConfiguration.Builder().updater(new Sgd(0.1)) + ComputationGraphConfiguration standard = NeuralNetConfiguration.builder().updater(new Sgd(0.1)) .trainingWorkspaceMode(ws).inferenceWorkspaceMode(ws) .seed(12345).graphBuilder().addInputs("in") .addLayer("l0", new DenseLayer.Builder().nIn(10).nOut(10).build(), "in") @@ -729,7 +729,7 @@ public class TestComputationGraphNetwork extends BaseDL4JTest { Nd4j.getRandom().setSeed(12345); - ComputationGraphConfiguration external = new NeuralNetConfiguration.Builder().updater(new Sgd(0.1)) + ComputationGraphConfiguration external = NeuralNetConfiguration.builder().updater(new Sgd(0.1)) .trainingWorkspaceMode(ws).inferenceWorkspaceMode(ws) .seed(12345).graphBuilder().addInputs("in") .addLayer("l0", new DenseLayer.Builder().nIn(10).nOut(10).build(), "in").setOutputs("l0") @@ -771,7 +771,7 @@ public class TestComputationGraphNetwork extends BaseDL4JTest { for(WorkspaceMode ws : WorkspaceMode.values()) { // System.out.println("***** WORKSPACE: " + ws); - ComputationGraphConfiguration conf = new NeuralNetConfiguration.Builder() + ComputationGraphConfiguration conf = NeuralNetConfiguration.builder() .updater(new Adam(0.01)) .trainingWorkspaceMode(ws) .inferenceWorkspaceMode(ws) @@ -819,7 +819,7 @@ public class TestComputationGraphNetwork extends BaseDL4JTest { int nIn = 2; int nOut = 4; - ComputationGraphConfiguration conf = new NeuralNetConfiguration.Builder() + ComputationGraphConfiguration conf = NeuralNetConfiguration.builder() .graphBuilder() .addInputs("in") .addLayer("0", new DenseLayer.Builder().nIn(nIn).nOut(4).activation(Activation.RELU).build(), "in") @@ -857,7 +857,7 @@ public class TestComputationGraphNetwork extends BaseDL4JTest { expectedGradient.setGradientFor("output_W", Nd4j.ones(5, 3)); expectedGradient.setGradientFor("output_b", Nd4j.ones(1, 3)); - ComputationGraphConfiguration conf = new NeuralNetConfiguration.Builder() + ComputationGraphConfiguration conf = NeuralNetConfiguration.builder() .optimizationAlgo(OptimizationAlgorithm.STOCHASTIC_GRADIENT_DESCENT).graphBuilder() .addInputs("input").addLayer("first", new DenseLayer.Builder().nIn(4).nOut(5).build(), "input") .addLayer("output", new OutputLayer.Builder().nIn(5).nOut(3).activation(Activation.SOFTMAX).build(), "first") @@ -878,13 +878,13 @@ public class TestComputationGraphNetwork extends BaseDL4JTest { net.setParam("first_b", Nd4j.ones(1, 5)); net.setParam("output_W", Nd4j.ones(5, 3)); net.setParam("output_b", Nd4j.ones(1, 3)); - INDArray actualParams = net.params(); + INDArray actualParams = net.getModelParams(); // Confirm params assertEquals(Nd4j.ones(1, 43), actualParams); net.update(expectedGradient); - actualParams = net.params(); + actualParams = net.getModelParams(); assertEquals(Nd4j.ones(1, 43).addi(1), actualParams); } @@ -893,7 +893,7 @@ public class TestComputationGraphNetwork extends BaseDL4JTest { public void testCnnFlatInputType1() { //First: check conv input type. Expect: no preprocessor, nIn set appropriately - ComputationGraphConfiguration conf = new NeuralNetConfiguration.Builder().graphBuilder().addInputs("in") + ComputationGraphConfiguration conf = NeuralNetConfiguration.builder().graphBuilder().addInputs("in") .setInputTypes(InputType.convolutional(10, 8, 3)) .addLayer("layer", new ConvolutionLayer.Builder().kernelSize(2, 2).padding(0, 0).stride(1, 1) @@ -903,14 +903,14 @@ public class TestComputationGraphNetwork extends BaseDL4JTest { .build(); LayerVertex lv = (LayerVertex) conf.getVertices().get("layer"); - FeedForwardLayer l = ((FeedForwardLayer) (lv).getLayerConf().getLayer()); + FeedForwardLayer l = ((FeedForwardLayer) (lv).getNetConfiguration().getFlattenedLayerConfigurations().get(0)); assertEquals(3, l.getNIn()); assertNull(lv.getPreProcessor()); //Check the equivalent config, but with flat conv data input instead //In this case, the only difference should be the addition of a preprocessor //First: check conv input type. Expect: no preprocessor, nIn set appropriately - conf = new NeuralNetConfiguration.Builder().graphBuilder().addInputs("in") + conf = NeuralNetConfiguration.builder().graphBuilder().addInputs("in") .setInputTypes(InputType.convolutionalFlat(10, 8, 3)) .addLayer("layer", new ConvolutionLayer.Builder().kernelSize(2, 2).padding(0, 0).stride(1, 1) @@ -920,7 +920,7 @@ public class TestComputationGraphNetwork extends BaseDL4JTest { .build(); lv = (LayerVertex) conf.getVertices().get("layer"); - l = ((FeedForwardLayer) (lv).getLayerConf().getLayer()); + l = ((FeedForwardLayer) (lv).getNetConfiguration().getFlattenedLayerConfigurations().get(0)); assertEquals(3, l.getNIn()); assertNotNull(lv.getPreProcessor()); InputPreProcessor preProcessor = lv.getPreProcessor(); @@ -932,7 +932,7 @@ public class TestComputationGraphNetwork extends BaseDL4JTest { //Finally, check configuration with a subsampling layer - conf = new NeuralNetConfiguration.Builder().graphBuilder().addInputs("in") + conf = NeuralNetConfiguration.builder().graphBuilder().addInputs("in") .setInputTypes(InputType.convolutionalFlat(10, 8, 3)) .addLayer("l0", new SubsamplingLayer.Builder().kernelSize(2, 2).stride(1, 1).padding(0, 0) .build(), "in") @@ -945,7 +945,7 @@ public class TestComputationGraphNetwork extends BaseDL4JTest { //Check subsampling layer: lv = (LayerVertex) conf.getVertices().get("l0"); - SubsamplingLayer sl = ((SubsamplingLayer) (lv).getLayerConf().getLayer()); + SubsamplingLayer sl = ((SubsamplingLayer) (lv).getNetConfiguration().getFlattenedLayerConfigurations().get(0)); assertNotNull(lv.getPreProcessor()); preProcessor = lv.getPreProcessor(); assertTrue(preProcessor instanceof FeedForwardToCnnPreProcessor); @@ -955,7 +955,7 @@ public class TestComputationGraphNetwork extends BaseDL4JTest { assertEquals(3, preproc.getNumChannels()); //Check dense layer lv = (LayerVertex) conf.getVertices().get("layer"); - l = ((FeedForwardLayer) (lv).getLayerConf().getLayer()); + l = ((FeedForwardLayer) (lv).getNetConfiguration().getFlattenedLayerConfigurations().get(0)); assertEquals(3, l.getNIn()); assertNull(lv.getPreProcessor()); @@ -970,7 +970,7 @@ public class TestComputationGraphNetwork extends BaseDL4JTest { graph.init(); Nd4j.getRandom().setSeed(12345); - MultiLayerConfiguration mlnConfig = getIrisMLNConfiguration(); + NeuralNetConfiguration mlnConfig = getIrisMLNConfiguration(); MultiLayerNetwork net = new MultiLayerNetwork(mlnConfig); net.init(); @@ -999,7 +999,7 @@ public class TestComputationGraphNetwork extends BaseDL4JTest { for (OptimizationAlgorithm oa : oas) { // System.out.println(oa); ComputationGraphConfiguration conf = - new NeuralNetConfiguration.Builder().optimizationAlgo(oa).graphBuilder() + NeuralNetConfiguration.builder().optimizationAlgo(oa).graphBuilder() .addInputs("input") .addLayer("first", new DenseLayer.Builder().nIn(4).nOut(5).build(), "input") .addLayer("output", new OutputLayer.Builder().nIn(5).nOut(3).activation(Activation.SOFTMAX).build(), @@ -1016,7 +1016,7 @@ public class TestComputationGraphNetwork extends BaseDL4JTest { @Test public void testIterationCountAndPersistence() throws IOException { Nd4j.getRandom().setSeed(123); - ComputationGraphConfiguration conf = new NeuralNetConfiguration.Builder() + ComputationGraphConfiguration conf = NeuralNetConfiguration.builder() .optimizationAlgo(OptimizationAlgorithm.STOCHASTIC_GRADIENT_DESCENT).seed(123) .graphBuilder().addInputs("in") .addLayer("0", new DenseLayer.Builder().nIn(4).nOut(3).weightInit(WeightInit.XAVIER) @@ -1033,15 +1033,15 @@ public class TestComputationGraphNetwork extends BaseDL4JTest { DataSetIterator iter = new IrisDataSetIterator(50, 150); - assertEquals(0, network.getConfiguration().getIterationCount()); + assertEquals(0, network.getComputationGraphConfiguration().getIterationCount()); network.fit(iter); - assertEquals(3, network.getConfiguration().getIterationCount()); + assertEquals(3, network.getComputationGraphConfiguration().getIterationCount()); iter.reset(); network.fit(iter); - assertEquals(6, network.getConfiguration().getIterationCount()); + assertEquals(6, network.getComputationGraphConfiguration().getIterationCount()); iter.reset(); network.fit(iter.next()); - assertEquals(7, network.getConfiguration().getIterationCount()); + assertEquals(7, network.getComputationGraphConfiguration().getIterationCount()); ByteArrayOutputStream baos = new ByteArrayOutputStream(); ModelSerializer.writeModel(network, baos, true); @@ -1049,12 +1049,12 @@ public class TestComputationGraphNetwork extends BaseDL4JTest { ByteArrayInputStream bais = new ByteArrayInputStream(asBytes); ComputationGraph net = ModelSerializer.restoreComputationGraph(bais, true); - assertEquals(7, net.getConfiguration().getIterationCount()); + assertEquals(7, net.getComputationGraphConfiguration().getIterationCount()); } @Test public void printSummary() { - NeuralNetConfiguration.Builder overallConf = new NeuralNetConfiguration.Builder().updater(new Sgd(0.1)) + NeuralNetConfiguration.NeuralNetConfigurationBuilder overallConf = NeuralNetConfiguration.builder().updater(new Sgd(0.1)) .activation(Activation.IDENTITY); ComputationGraphConfiguration conf = overallConf.graphBuilder().addInputs("inCentre", "inRight") @@ -1095,7 +1095,7 @@ public class TestComputationGraphNetwork extends BaseDL4JTest { @Test public void testFeedForwardIncludeNonLayerVertices() { - ComputationGraphConfiguration c = new NeuralNetConfiguration.Builder().graphBuilder().addInputs("in") + ComputationGraphConfiguration c = NeuralNetConfiguration.builder().graphBuilder().addInputs("in") .addLayer("0", new DenseLayer.Builder().nIn(5).nOut(5).build(), "in") .addLayer("1", new DenseLayer.Builder().nIn(5).nOut(5).build(), "in") .addVertex("merge", new MergeVertex(), "0", "1") @@ -1123,7 +1123,7 @@ public class TestComputationGraphNetwork extends BaseDL4JTest { //Users generally shouldn't do this, but multiple setOutputs calls should *replace* not *add* outputs - ComputationGraphConfiguration c = new NeuralNetConfiguration.Builder().graphBuilder().addInputs("in") + ComputationGraphConfiguration c = NeuralNetConfiguration.builder().graphBuilder().addInputs("in") .addLayer("out", new OutputLayer.Builder().nIn(10).nOut(5).activation(Activation.SOFTMAX).build(), "in").setOutputs("out") .setOutputs("out").build(); @@ -1135,7 +1135,7 @@ public class TestComputationGraphNetwork extends BaseDL4JTest { public void testDropoutValidation() { //At one point: this threw an exception due to incorrect validation for (boolean dropConnect : new boolean[]{false, true}) { - new NeuralNetConfiguration.Builder().weightNoise(new DropConnect(0.5)) + NeuralNetConfiguration.builder().weightNoise(new DropConnect(0.5)) .graphBuilder().setInputTypes(InputType.feedForward(1)).addInputs("input1") .addLayer("output", new OutputLayer.Builder(LossFunctions.LossFunction.MSE).nIn(1).nOut(1) @@ -1151,7 +1151,7 @@ public class TestComputationGraphNetwork extends BaseDL4JTest { //Don't care about this being valid ComputationGraphConfiguration c = - new NeuralNetConfiguration.Builder().l1(0.5).l2(0.6).graphBuilder() + NeuralNetConfiguration.builder().l1(0.5).l2(0.6).graphBuilder() .addInputs("in") .addLayer("sub1", new SubsamplingLayer.Builder(2, 2).build(), "in") .addLayer("sub2", new Subsampling1DLayer.Builder(2).build(), "sub1") @@ -1178,7 +1178,7 @@ public class TestComputationGraphNetwork extends BaseDL4JTest { @Test public void testErrorNoOutputLayer() { - ComputationGraphConfiguration c = new NeuralNetConfiguration.Builder().graphBuilder().addInputs("in") + ComputationGraphConfiguration c = NeuralNetConfiguration.builder().graphBuilder().addInputs("in") .addLayer("dense", new DenseLayer.Builder().nIn(10).nOut(10).build(), "in").setOutputs("dense") .build(); @@ -1202,7 +1202,7 @@ public class TestComputationGraphNetwork extends BaseDL4JTest { //When a vertex supports only one input, and gets multiple inputs - we should automatically add a merge //vertex - NeuralNetConfiguration nnc = new NeuralNetConfiguration(); + NeuralNetConfiguration nnc = NeuralNetConfiguration.builder().build(); nnc.setLayer(new DenseLayer.Builder().build()); GraphVertex[] singleInputVertices = new GraphVertex[]{new L2NormalizeVertex(), new LayerVertex(nnc, null), new PoolHelperVertex(), new PreprocessorVertex(), new ReshapeVertex(1, 1), @@ -1210,7 +1210,7 @@ public class TestComputationGraphNetwork extends BaseDL4JTest { new DuplicateToTimeSeriesVertex("in1"), new LastTimeStepVertex("in1")}; for (GraphVertex gv : singleInputVertices) { - ComputationGraphConfiguration c = new NeuralNetConfiguration.Builder().graphBuilder() + ComputationGraphConfiguration c = NeuralNetConfiguration.builder().graphBuilder() .addInputs("in1", "in2").addVertex("gv", gv, "in1", "in2").setOutputs("gv").build(); boolean foundMerge = false; @@ -1238,7 +1238,7 @@ public class TestComputationGraphNetwork extends BaseDL4JTest { int depth = 3; INDArray img = Nd4j.ones(minibatch, depth, height, width); - ComputationGraphConfiguration conf = new NeuralNetConfiguration.Builder() + ComputationGraphConfiguration conf = NeuralNetConfiguration.builder() .graphBuilder() .addInputs("input") .addLayer("L1", new ConvolutionLayer.Builder(new int[]{1, 1}, new int[]{1, 1}, new int[]{0, 0}).nIn(depth).nOut(depth) @@ -1262,7 +1262,7 @@ public class TestComputationGraphNetwork extends BaseDL4JTest { @Test public void testEpochCounter() throws Exception { - ComputationGraphConfiguration conf = new NeuralNetConfiguration.Builder() + ComputationGraphConfiguration conf = NeuralNetConfiguration.builder() .graphBuilder() .addInputs("in") .addLayer("out", new OutputLayer.Builder().nIn(4).nOut(3).activation(Activation.SOFTMAX).build(), "in") @@ -1272,18 +1272,18 @@ public class TestComputationGraphNetwork extends BaseDL4JTest { ComputationGraph net = new ComputationGraph(conf); net.init(); - assertEquals(0, net.getConfiguration().getEpochCount()); + assertEquals(0, net.getComputationGraphConfiguration().getEpochCount()); DataSetIterator iter = new IrisDataSetIterator(150, 150); for( int i=0; i<4; i++ ){ - assertEquals(i, net.getConfiguration().getEpochCount()); + assertEquals(i, net.getComputationGraphConfiguration().getEpochCount()); net.fit(iter); - assertEquals(i+1, net.getConfiguration().getEpochCount()); + assertEquals(i+1, net.getComputationGraphConfiguration().getEpochCount()); } - assertEquals(4, net.getConfiguration().getEpochCount()); + assertEquals(4, net.getComputationGraphConfiguration().getEpochCount()); ByteArrayOutputStream baos = new ByteArrayOutputStream(); @@ -1293,7 +1293,7 @@ public class TestComputationGraphNetwork extends BaseDL4JTest { ByteArrayInputStream bais = new ByteArrayInputStream(bytes); ComputationGraph restored = ModelSerializer.restoreComputationGraph(bais, true); - assertEquals(4, restored.getConfiguration().getEpochCount()); + assertEquals(4, restored.getComputationGraphConfiguration().getEpochCount()); } @Test @@ -1302,7 +1302,7 @@ public class TestComputationGraphNetwork extends BaseDL4JTest { int V_HEIGHT = 130; int V_NFRAMES = 150; ComputationGraphConfiguration confForArchitecture = - new NeuralNetConfiguration.Builder().seed(12345).l2(0.001) //l2 regularization on all layers + NeuralNetConfiguration.builder().seed(12345).l2(0.001) //l2 regularization on all layers .updater(new AdaGrad(0.4)).graphBuilder() .addInputs("in") .addLayer("layer0", new ConvolutionLayer.Builder(10, 10).nIn(3) //3 channels: RGB @@ -1331,7 +1331,7 @@ public class TestComputationGraphNetwork extends BaseDL4JTest { .inputPreProcessor("layer3", new CnnToFeedForwardPreProcessor(7, 7, 10)) .inputPreProcessor("layer4", new FeedForwardToRnnPreProcessor()) .backpropType(BackpropType.TruncatedBPTT) - .tBPTTForwardLength(V_NFRAMES / 5).tBPTTBackwardLength(V_NFRAMES / 5).build(); + .tbpttFwdLength(V_NFRAMES / 5).tbpttBackLength(V_NFRAMES / 5).build(); ComputationGraph modelExpectedArch = new ComputationGraph(confForArchitecture); modelExpectedArch.init(); ComputationGraph modelMow = new TransferLearning.GraphBuilder(modelExpectedArch).setFeatureExtractor("layer2").build(); @@ -1347,7 +1347,7 @@ public class TestComputationGraphNetwork extends BaseDL4JTest { public void testInputClearance() throws Exception { //Activations should be cleared - if not, it's possible for out of (workspace) scope arrays to be around // which can cause a crash - ComputationGraphConfiguration conf = new NeuralNetConfiguration.Builder() + ComputationGraphConfiguration conf = NeuralNetConfiguration.builder() .convolutionMode(ConvolutionMode.Same) .graphBuilder() .addInputs("in") @@ -1383,7 +1383,7 @@ public class TestComputationGraphNetwork extends BaseDL4JTest { for(boolean allowDisconnected : new boolean[]{false, true}) { try { - ComputationGraphConfiguration.GraphBuilder b = new NeuralNetConfiguration.Builder() + ComputationGraphConfiguration.GraphBuilder b = NeuralNetConfiguration.builder() .graphBuilder() .addInputs("in") .addLayer("0", new DenseLayer.Builder().activation(Activation.SIGMOID).nOut(8).build(), "in") @@ -1414,7 +1414,7 @@ public class TestComputationGraphNetwork extends BaseDL4JTest { @Test public void testLayerSize(){ - ComputationGraphConfiguration conf = new NeuralNetConfiguration.Builder() + ComputationGraphConfiguration conf = NeuralNetConfiguration.builder() .graphBuilder() .addInputs("in") @@ -1436,7 +1436,7 @@ public class TestComputationGraphNetwork extends BaseDL4JTest { assertEquals(3, net.layerInputSize(0)); assertEquals(0, net.layerInputSize(1)); - assertEquals(((FeedForwardLayer)net.getLayer(2).conf().getLayer()).getNIn(), net.layerInputSize(2)); + assertEquals(((FeedForwardLayer)net.getLayer(2).getLayerConfiguration()).getNIn(), net.layerInputSize(2)); assertEquals(30, net.layerInputSize(3)); assertEquals(6, net.layerSize("0")); @@ -1446,14 +1446,14 @@ public class TestComputationGraphNetwork extends BaseDL4JTest { assertEquals(3, net.layerInputSize("0")); assertEquals(0, net.layerInputSize("1")); - assertEquals(((FeedForwardLayer)net.getLayer(2).conf().getLayer()).getNIn(), net.layerInputSize("2")); + assertEquals(((FeedForwardLayer)net.getLayer(2).getLayerConfiguration()).getNIn(), net.layerInputSize("2")); assertEquals(30, net.layerInputSize("3")); } @Test public void testZeroParamNet() throws Exception { - ComputationGraphConfiguration conf = new NeuralNetConfiguration.Builder() + ComputationGraphConfiguration conf = NeuralNetConfiguration.builder() .graphBuilder() .addInputs("in") .layer("0", new SubsamplingLayer.Builder().kernelSize(2,2).stride(2,2).build(), "in") @@ -1494,7 +1494,7 @@ public class TestComputationGraphNetwork extends BaseDL4JTest { final String inputName = "input"; final String outputName = "output"; final String scaleName = "scale"; - final ComputationGraph graph = new ComputationGraph(new NeuralNetConfiguration.Builder() + final ComputationGraph graph = new ComputationGraph(NeuralNetConfiguration.builder() //.inferenceWorkspaceMode(WorkspaceMode.NONE) .graphBuilder() .addInputs(inputName) @@ -1535,7 +1535,7 @@ public class TestComputationGraphNetwork extends BaseDL4JTest { DataSet all = new IrisDataSetIterator(150,150).next(); DataSetIterator iter = new IrisDataSetIterator(5,150); - ComputationGraphConfiguration conf = new NeuralNetConfiguration.Builder() + ComputationGraphConfiguration conf = NeuralNetConfiguration.builder() .seed(12345) .graphBuilder() .addInputs("in") @@ -1558,7 +1558,7 @@ public class TestComputationGraphNetwork extends BaseDL4JTest { //Test for a simple net: - ComputationGraphConfiguration.GraphBuilder builder = new NeuralNetConfiguration.Builder() + ComputationGraphConfiguration.GraphBuilder builder = NeuralNetConfiguration.builder() .graphBuilder() .addInputs("in1", "in2") .layer("0", new DenseLayer.Builder().nOut(10).build(), "in1") @@ -1595,7 +1595,7 @@ public class TestComputationGraphNetwork extends BaseDL4JTest { @Test public void testTopoSortSaving(){ - ComputationGraphConfiguration conf = new NeuralNetConfiguration.Builder() + ComputationGraphConfiguration conf = NeuralNetConfiguration.builder() .graphBuilder() .addInputs("in1", "in2") .addLayer("l0", new DenseLayer.Builder().nIn(10).nOut(10).build(), "in1") @@ -1619,13 +1619,13 @@ public class TestComputationGraphNetwork extends BaseDL4JTest { GraphIndices indices = cg.calculateIndices(); int[] order = cg.topologicalSortOrder(); - List strOrder = cg.getConfiguration().getTopologicalOrderStr(); + List strOrder = cg.getComputationGraphConfiguration().getTopologicalOrderStr(); INDArray[] out1 = cg.output(in); //Check it's the same after loading: ComputationGraph cg2 = TestUtils.testModelSerialization(cg); int[] order2 = cg2.topologicalSortOrder(); - List strOrder2 = cg.getConfiguration().getTopologicalOrderStr(); + List strOrder2 = cg.getComputationGraphConfiguration().getTopologicalOrderStr(); assertArrayEquals(order, order2); assertEquals(strOrder, strOrder2); @@ -1633,15 +1633,15 @@ public class TestComputationGraphNetwork extends BaseDL4JTest { assertArrayEquals(out1, out2); //Delete the topological order, ensure it gets recreated properly: - ComputationGraphConfiguration conf3 = cg2.getConfiguration().clone(); + ComputationGraphConfiguration conf3 = cg2.getComputationGraphConfiguration().clone(); conf3.setTopologicalOrder(null); conf3.setTopologicalOrderStr(null); ComputationGraph cg3 = new ComputationGraph(conf3); cg3.init(); - cg3.setParams(cg2.params()); + cg3.setParams(cg2.getModelParams()); int[] order3 = cg3.topologicalSortOrder(); - List strOrder3 = cg.getConfiguration().getTopologicalOrderStr(); + List strOrder3 = cg.getComputationGraphConfiguration().getTopologicalOrderStr(); INDArray[] out3 = cg3.output(in); assertArrayEquals(order, order3); assertEquals(strOrder, strOrder3); @@ -1673,7 +1673,7 @@ public class TestComputationGraphNetwork extends BaseDL4JTest { ComputationGraph g = new ComputationGraph(conf2); g.init(); - g.setParamTable(cg.paramTable()); + g.setParamTable(cg.getParamTable()); int[] origOrder = g.topologicalSortOrder(); INDArray[] out4 = g.output(in); @@ -1694,7 +1694,7 @@ public class TestComputationGraphNetwork extends BaseDL4JTest { //The fit methods should *not* do layerwise pretraining: - ComputationGraphConfiguration conf = new NeuralNetConfiguration.Builder() + ComputationGraphConfiguration conf = NeuralNetConfiguration.builder() .graphBuilder() .addInputs("in") @@ -1712,7 +1712,7 @@ public class TestComputationGraphNetwork extends BaseDL4JTest { exp.add(ComputationGraph.class); MultiLayerTest.CheckModelsListener listener = new MultiLayerTest.CheckModelsListener(); - net.setListeners(listener); + net.addTrainingListeners(listener); INDArray f = Nd4j.create(1,10); INDArray l = Nd4j.create(1,10); @@ -1742,7 +1742,7 @@ public class TestComputationGraphNetwork extends BaseDL4JTest { @Test public void testAllowInputModification(){ - ComputationGraphConfiguration conf = new NeuralNetConfiguration.Builder() + ComputationGraphConfiguration conf = NeuralNetConfiguration.builder() .graphBuilder() .addInputs("in1", "in2") @@ -1781,7 +1781,7 @@ public class TestComputationGraphNetwork extends BaseDL4JTest { @Test public void testCompGraphDropoutOutputLayers(){ //https://github.com/deeplearning4j/deeplearning4j/issues/6326 - ComputationGraphConfiguration conf = new NeuralNetConfiguration.Builder() + ComputationGraphConfiguration conf = NeuralNetConfiguration.builder() .dropOut(0.8) .graphBuilder() .addInputs("in1", "in2") @@ -1819,7 +1819,7 @@ public class TestComputationGraphNetwork extends BaseDL4JTest { @Test public void testCompGraphDropoutOutputLayers2() { //https://github.com/deeplearning4j/deeplearning4j/issues/6326 - ComputationGraphConfiguration conf = new NeuralNetConfiguration.Builder() + ComputationGraphConfiguration conf = NeuralNetConfiguration.builder() .dropOut(0.8) .graphBuilder() .addInputs("in1", "in2") @@ -1854,7 +1854,7 @@ public class TestComputationGraphNetwork extends BaseDL4JTest { @Test public void testAddRemoveVertex() { - new NeuralNetConfiguration.Builder().graphBuilder() + NeuralNetConfiguration.builder().graphBuilder() .addVertex("toRemove", new ScaleVertex(0), "don't care") .addVertex("test", new ScaleVertex(0), "toRemove") .removeVertex("toRemove", true); @@ -1864,7 +1864,7 @@ public class TestComputationGraphNetwork extends BaseDL4JTest { @Test public void testGetSetParamUnderscores(){ //Test get/set param with underscores in layer nome - ComputationGraphConfiguration conf = new NeuralNetConfiguration.Builder() + ComputationGraphConfiguration conf = NeuralNetConfiguration.builder() .graphBuilder() .addInputs("in") .layer("layer_zero", new DenseLayer.Builder().nIn(10).nOut(10).build(), "in") @@ -1874,7 +1874,7 @@ public class TestComputationGraphNetwork extends BaseDL4JTest { ComputationGraph cg = new ComputationGraph(conf); cg.init(); - cg.params().assign(Nd4j.linspace(1, 220, 220).reshape(1, -11)); + cg.getModelParams().assign(Nd4j.linspace(1, 220, 220).reshape(1, -11)); INDArray p0w = cg.getParam("layer_zero_W"); assertEquals(Nd4j.linspace(1, 100, 100).reshape('f', 10, 10), p0w); @@ -1890,7 +1890,7 @@ public class TestComputationGraphNetwork extends BaseDL4JTest { @Test public void testOutputSpecificLayers(){ - ComputationGraphConfiguration conf = new NeuralNetConfiguration.Builder() + ComputationGraphConfiguration conf = NeuralNetConfiguration.builder() .seed(12345) .graphBuilder() .addInputs("in") @@ -1918,7 +1918,7 @@ public class TestComputationGraphNetwork extends BaseDL4JTest { @Test public void singleInputElemVertex() { final InputType inputType = InputType.convolutional(10, 10, 2); - final ComputationGraph graph = new ComputationGraph(new NeuralNetConfiguration.Builder() + final ComputationGraph graph = new ComputationGraph(NeuralNetConfiguration.builder() .graphBuilder() .setInputTypes(inputType) .addInputs("input") @@ -1935,7 +1935,7 @@ public class TestComputationGraphNetwork extends BaseDL4JTest { @Test public void testCloneDropoutIndependence(){ - val modelConf = new NeuralNetConfiguration.Builder() + val modelConf = NeuralNetConfiguration.builder() .updater(new Adam(0.01)) .weightInit(WeightInit.XAVIER_UNIFORM) .biasInit(0) @@ -1968,8 +1968,8 @@ public class TestComputationGraphNetwork extends BaseDL4JTest { ComputationGraph cg2 = model.clone(); - IDropout d1 = model.getLayer(0).conf().getLayer().getIDropout(); - IDropout d2 = cg2.getLayer(0).conf().getLayer().getIDropout(); + IDropout d1 = model.getLayer(0).getLayerConfiguration().getIDropout(); + IDropout d2 = cg2.getLayer(0).getLayerConfiguration().getIDropout(); assertNotSame(d1, d2); //Should not be same object! assertEquals(d1, d2); //But should be equal @@ -1982,7 +1982,7 @@ public class TestComputationGraphNetwork extends BaseDL4JTest { int hiddenSize = 100; int dataSize = 10; int seqLen = 5; - ComputationGraphConfiguration configuration = new NeuralNetConfiguration.Builder() + ComputationGraphConfiguration configuration = NeuralNetConfiguration.builder() .updater(new Adam()) .graphBuilder() .addInputs("x_emb") @@ -2021,7 +2021,7 @@ public class TestComputationGraphNetwork extends BaseDL4JTest { //https://github.com/deeplearning4j/deeplearning4j/issues/6809#issuecomment-463892644 double lr = 1e-3; - ComputationGraphConfiguration conf = new NeuralNetConfiguration.Builder() + ComputationGraphConfiguration conf = NeuralNetConfiguration.builder() .seed(12345) .weightInit(WeightInit.XAVIER) .updater(new Adam(lr)) @@ -2121,7 +2121,7 @@ public class TestComputationGraphNetwork extends BaseDL4JTest { int outputSize = 6; int layerSize = 3; - ComputationGraphConfiguration conf = new NeuralNetConfiguration.Builder() + ComputationGraphConfiguration conf = NeuralNetConfiguration.builder() .dataType(DataType.DOUBLE) .seed(12345) .weightInit(WeightInit.XAVIER) @@ -2152,7 +2152,7 @@ public class TestComputationGraphNetwork extends BaseDL4JTest { @Test public void testConv3dMergeVertex(){ - ComputationGraphConfiguration conf = new NeuralNetConfiguration.Builder() + ComputationGraphConfiguration conf = NeuralNetConfiguration.builder() .graphBuilder() .addLayer("l0", new Convolution3D.Builder().kernelSize(2,2,2).stride(1,1,1).nIn(3).nOut(3).dataFormat(Convolution3D.DataFormat.NCDHW).build(), "in") @@ -2172,7 +2172,7 @@ public class TestComputationGraphNetwork extends BaseDL4JTest { @Test public void testDualEmbedding(){ - ComputationGraphConfiguration conf = new NeuralNetConfiguration.Builder() + ComputationGraphConfiguration conf = NeuralNetConfiguration.builder() .graphBuilder() .addInputs("in") .addLayer("e1", new EmbeddingLayer.Builder().nIn(10).nOut(5).build(), "in") @@ -2191,7 +2191,7 @@ public class TestComputationGraphNetwork extends BaseDL4JTest { @Test public void testMergeNchw() throws Exception { - ComputationGraphConfiguration conf = new NeuralNetConfiguration.Builder() + ComputationGraphConfiguration conf = NeuralNetConfiguration.builder() .convolutionMode(ConvolutionMode.Same) .graphBuilder() .addInputs("in") diff --git a/cavis-dnn/cavis-dnn-core/src/test/java/org/deeplearning4j/nn/graph/TestSetGetParameters.java b/cavis-dnn/cavis-dnn-core/src/test/java/org/deeplearning4j/nn/graph/TestSetGetParameters.java index 0c17238db..685920d10 100644 --- a/cavis-dnn/cavis-dnn-core/src/test/java/org/deeplearning4j/nn/graph/TestSetGetParameters.java +++ b/cavis-dnn/cavis-dnn-core/src/test/java/org/deeplearning4j/nn/graph/TestSetGetParameters.java @@ -42,7 +42,7 @@ public class TestSetGetParameters extends BaseDL4JTest { Nd4j.getRandom().setSeed(12345); //Create configuration. Doesn't matter if this doesn't actually work for forward/backward pass here - ComputationGraphConfiguration conf = new NeuralNetConfiguration.Builder().seed(12345).graphBuilder() + ComputationGraphConfiguration conf = NeuralNetConfiguration.builder().seed(12345).graphBuilder() .addInputs("in").addLayer("0", new DenseLayer.Builder().nIn(10).nOut(10).build(), "in") .addLayer("1", new GravesLSTM.Builder().nIn(10).nOut(10).build(), "in") .addLayer("2", new GravesBidirectionalLSTM.Builder().nIn(10).nOut(10).build(), "in") @@ -56,7 +56,7 @@ public class TestSetGetParameters extends BaseDL4JTest { ComputationGraph net = new ComputationGraph(conf); net.init(); - INDArray params = net.params(); + INDArray params = net.getModelParams(); ComputationGraph net2 = new ComputationGraph(conf); @@ -65,16 +65,16 @@ public class TestSetGetParameters extends BaseDL4JTest { ComputationGraph net3 = new ComputationGraph(conf); net3.init(params, false); - assertEquals(params, net2.params()); - assertEquals(params, net3.params()); + assertEquals(params, net2.getModelParams()); + assertEquals(params, net3.getModelParams()); - assertNotSame(params, net2.params()); //Different objects due to clone - assertSame(params, net3.params()); //Same object due to clone + assertNotSame(params, net2.getModelParams()); //Different objects due to clone + assertSame(params, net3.getModelParams()); //Same object due to clone - Map paramsMap = net.paramTable(); - Map paramsMap2 = net2.paramTable(); - Map paramsMap3 = net3.paramTable(); + Map paramsMap = net.getParamTable(); + Map paramsMap2 = net2.getParamTable(); + Map paramsMap3 = net3.getParamTable(); for (String s : paramsMap.keySet()) { assertEquals(paramsMap.get(s), paramsMap2.get(s)); assertEquals(paramsMap.get(s), paramsMap3.get(s)); diff --git a/cavis-dnn/cavis-dnn-core/src/test/java/org/deeplearning4j/nn/graph/TestVariableLengthTSCG.java b/cavis-dnn/cavis-dnn-core/src/test/java/org/deeplearning4j/nn/graph/TestVariableLengthTSCG.java index 96e1dcf12..7023e0039 100644 --- a/cavis-dnn/cavis-dnn-core/src/test/java/org/deeplearning4j/nn/graph/TestVariableLengthTSCG.java +++ b/cavis-dnn/cavis-dnn-core/src/test/java/org/deeplearning4j/nn/graph/TestVariableLengthTSCG.java @@ -68,7 +68,7 @@ public class TestVariableLengthTSCG extends BaseDL4JTest { for (int nExamples : miniBatchSizes) { Nd4j.getRandom().setSeed(12345); - ComputationGraphConfiguration conf = new NeuralNetConfiguration.Builder() + ComputationGraphConfiguration conf = NeuralNetConfiguration.builder() .optimizationAlgo(OptimizationAlgorithm.STOCHASTIC_GRADIENT_DESCENT) .updater(new Sgd(0.1)).seed(12345).graphBuilder().addInputs("in") .addLayer("0", new GravesLSTM.Builder().activation(Activation.TANH).nIn(2).nOut(2).build(), @@ -103,14 +103,14 @@ public class TestVariableLengthTSCG extends BaseDL4JTest { net.setInput(0, in1); net.setLabel(0, labels1); net.computeGradientAndScore(); - double score1 = net.score(); + double score1 = net.getScore(); Gradient g1 = net.gradient(); net.setInput(0, in2); net.setLabel(0, labels2); net.setLayerMaskArrays(null, new INDArray[] {labelMask}); net.computeGradientAndScore(); - double score2 = net.score(); + double score2 = net.getScore(); Gradient g2 = net.gradient(); //Scores and gradients should be identical for two cases (given mask array) @@ -134,7 +134,7 @@ public class TestVariableLengthTSCG extends BaseDL4JTest { } net.setLabel(0, labels2); net.computeGradientAndScore(); - double score2a = net.score(); + double score2a = net.getScore(); Gradient g2a = net.gradient(); assertEquals(score2, score2a, 1e-6); for (String s : g2map.keySet()) { @@ -158,7 +158,7 @@ public class TestVariableLengthTSCG extends BaseDL4JTest { for (int nExamples : miniBatchSizes) { Nd4j.getRandom().setSeed(12345); - ComputationGraphConfiguration conf = new NeuralNetConfiguration.Builder() + ComputationGraphConfiguration conf = NeuralNetConfiguration.builder() .optimizationAlgo(OptimizationAlgorithm.STOCHASTIC_GRADIENT_DESCENT) .weightInit(new NormalDistribution(0,2)) .updater(new Sgd(0.1)).seed(12345).graphBuilder().addInputs("in") @@ -200,7 +200,7 @@ public class TestVariableLengthTSCG extends BaseDL4JTest { net.setInput(0, in1); net.setLabel(0, labels1); net.computeGradientAndScore(); - double score1 = net.score(); + double score1 = net.getScore(); Gradient g1 = net.gradient(); Map map = g1.gradientForVariable(); for (String s : map.keySet()) { @@ -211,7 +211,7 @@ public class TestVariableLengthTSCG extends BaseDL4JTest { net.setLabel(0, labels2); net.setLayerMaskArrays(new INDArray[] {inputMask}, null); net.computeGradientAndScore(); - double score2 = net.score(); + double score2 = net.getScore(); Gradient g2 = net.gradient(); Map activations2 = net.feedForward(); @@ -236,7 +236,7 @@ public class TestVariableLengthTSCG extends BaseDL4JTest { net.setInput(0, in2); net.setLayerMaskArrays(new INDArray[]{inputMask}, null); net.computeGradientAndScore(); - double score2a = net.score(); + double score2a = net.getScore(); Gradient g2a = net.gradient(); assertEquals(score2, score2a, 1e-12); for (String s : g2.gradientForVariable().keySet()) { @@ -300,7 +300,7 @@ public class TestVariableLengthTSCG extends BaseDL4JTest { INDArray labels = Nd4j.ones(miniBatch, nOut, tsLength); ComputationGraphConfiguration conf = - new NeuralNetConfiguration.Builder().seed(12345L) + NeuralNetConfiguration.builder().seed(12345L) .graphBuilder() .addInputs("in").addLayer("0", new GravesLSTM.Builder().nIn(nIn).nOut(5) @@ -330,7 +330,7 @@ public class TestVariableLengthTSCG extends BaseDL4JTest { net.setLabel(0, labels); net.computeGradientAndScore(); - double score = net.score(); + double score = net.getScore(); assertEquals(expScore, score, 0.1, msg); } @@ -370,7 +370,7 @@ public class TestVariableLengthTSCG extends BaseDL4JTest { INDArray input = Nd4j.rand(miniBatch, nIn, tsLength); ComputationGraphConfiguration conf = - new NeuralNetConfiguration.Builder().seed(12345L) + NeuralNetConfiguration.builder().seed(12345L) .graphBuilder() .addInputs("in").addLayer("0", new GravesLSTM.Builder().nIn(nIn).nOut(5) @@ -391,7 +391,7 @@ public class TestVariableLengthTSCG extends BaseDL4JTest { net.init(); ComputationGraphConfiguration conf2 = - new NeuralNetConfiguration.Builder().seed(12345L) + NeuralNetConfiguration.builder().seed(12345L) .graphBuilder() .addInputs("in").addLayer("0", new GravesLSTM.Builder().nIn(nIn).nOut(5) diff --git a/cavis-dnn/cavis-dnn-core/src/test/java/org/deeplearning4j/nn/graph/graphnodes/TestGraphNodes.java b/cavis-dnn/cavis-dnn-core/src/test/java/org/deeplearning4j/nn/graph/graphnodes/TestGraphNodes.java index ba3eb90bb..3ca1aa8bd 100644 --- a/cavis-dnn/cavis-dnn-core/src/test/java/org/deeplearning4j/nn/graph/graphnodes/TestGraphNodes.java +++ b/cavis-dnn/cavis-dnn-core/src/test/java/org/deeplearning4j/nn/graph/graphnodes/TestGraphNodes.java @@ -188,7 +188,7 @@ public class TestGraphNodes extends BaseDL4JTest { @Test public void testLastTimeStepVertex() { - ComputationGraphConfiguration conf = new NeuralNetConfiguration.Builder().graphBuilder().addInputs("in") + ComputationGraphConfiguration conf = NeuralNetConfiguration.builder().graphBuilder().addInputs("in") .addVertex("lastTS", new LastTimeStepVertex("in"), "in") .addLayer("out", new OutputLayer.Builder().nIn(1).nOut(1).activation(Activation.TANH).lossFunction(LossFunctions.LossFunction.MSE).build(), "lastTS").setOutputs("out") .build(); @@ -239,7 +239,7 @@ public class TestGraphNodes extends BaseDL4JTest { @Test public void testDuplicateToTimeSeriesVertex() { - ComputationGraphConfiguration conf = new NeuralNetConfiguration.Builder().graphBuilder() + ComputationGraphConfiguration conf = NeuralNetConfiguration.builder().graphBuilder() .addInputs("in2d", "in3d") .addVertex("duplicateTS", new DuplicateToTimeSeriesVertex("in3d"), "in2d") .addLayer("out", new OutputLayer.Builder().nIn(1).nOut(1).activation(Activation.TANH).lossFunction(LossFunctions.LossFunction.MSE).build(), "duplicateTS") @@ -313,7 +313,7 @@ public class TestGraphNodes extends BaseDL4JTest { null, null); - ComputationGraphConfiguration conf = new NeuralNetConfiguration.Builder().graphBuilder().addInputs("in1", "in2") + ComputationGraphConfiguration conf = NeuralNetConfiguration.builder().graphBuilder().addInputs("in1", "in2") .addVertex("stack", new org.deeplearning4j.nn.conf.graph.StackVertex(), "in1", "in2") .addLayer("1", new EmbeddingLayer.Builder().nIn(5).nOut(5).build(), "stack") .addVertex("unstack1", new org.deeplearning4j.nn.conf.graph.UnstackVertex(0, 2), "1") @@ -540,7 +540,7 @@ public class TestGraphNodes extends BaseDL4JTest { public void testJSON() { //The config here is non-sense, but that doesn't matter for config -> json -> config test ComputationGraphConfiguration conf = - new NeuralNetConfiguration.Builder().graphBuilder().addInputs("in") + NeuralNetConfiguration.builder().graphBuilder().addInputs("in") .addVertex("v1", new ElementWiseVertex(ElementWiseVertex.Op.Add), "in") .addVertex("v2", new org.deeplearning4j.nn.conf.graph.MergeVertex(), "in", "in") .addVertex("v3", new PreprocessorVertex( @@ -565,7 +565,7 @@ public class TestGraphNodes extends BaseDL4JTest { int numLabelClasses = 10; int numInputs = 5; - ComputationGraphConfiguration conf = new NeuralNetConfiguration.Builder() + ComputationGraphConfiguration conf = NeuralNetConfiguration.builder() .trainingWorkspaceMode(WorkspaceMode.NONE) .inferenceWorkspaceMode(WorkspaceMode.NONE) .seed(123) //Random number generator seed for improved repeatability. Optional. diff --git a/cavis-dnn/cavis-dnn-core/src/test/java/org/deeplearning4j/nn/layers/ActivationLayerTest.java b/cavis-dnn/cavis-dnn-core/src/test/java/org/deeplearning4j/nn/layers/ActivationLayerTest.java index 14e169767..629fd7069 100644 --- a/cavis-dnn/cavis-dnn-core/src/test/java/org/deeplearning4j/nn/layers/ActivationLayerTest.java +++ b/cavis-dnn/cavis-dnn-core/src/test/java/org/deeplearning4j/nn/layers/ActivationLayerTest.java @@ -24,7 +24,6 @@ import org.deeplearning4j.BaseDL4JTest; import org.deeplearning4j.datasets.iterator.impl.MnistDataSetIterator; import org.deeplearning4j.nn.api.OptimizationAlgorithm; import org.deeplearning4j.nn.conf.ComputationGraphConfiguration; -import org.deeplearning4j.nn.conf.MultiLayerConfiguration; import org.deeplearning4j.nn.conf.NeuralNetConfiguration; import org.deeplearning4j.nn.conf.inputs.InputType; import org.deeplearning4j.nn.conf.layers.ActivationLayer; @@ -83,15 +82,17 @@ public class ActivationLayerTest extends BaseDL4JTest { DataSet next = iter.next(); // Run without separate activation layer - MultiLayerConfiguration conf = new NeuralNetConfiguration.Builder() - .optimizationAlgo(OptimizationAlgorithm.STOCHASTIC_GRADIENT_DESCENT).seed(123) - .list() - .layer(0, new DenseLayer.Builder().nIn(28 * 28).nOut(10).activation(Activation.RELU) + NeuralNetConfiguration conf = NeuralNetConfiguration.builder() + .optimizationAlgo(OptimizationAlgorithm.LBFGS) + .optimizationAlgo(OptimizationAlgorithm.STOCHASTIC_GRADIENT_DESCENT) + .seed(123) + .layer(0, new DenseLayer.Builder().nIn(28 * 28).nOut(10).activation(Activation.RELU) .weightInit(WeightInit.XAVIER).build()) - .layer(1, new org.deeplearning4j.nn.conf.layers.OutputLayer.Builder( + + .layer(1, new org.deeplearning4j.nn.conf.layers.OutputLayer.Builder( LossFunctions.LossFunction.MCXENT).weightInit(WeightInit.XAVIER) .activation(Activation.SOFTMAX).nIn(10).nOut(10).build()) - .build(); + .build(); MultiLayerNetwork network = new MultiLayerNetwork(conf); network.init(); @@ -99,7 +100,7 @@ public class ActivationLayerTest extends BaseDL4JTest { // Run with separate activation layer - MultiLayerConfiguration conf2 = new NeuralNetConfiguration.Builder() + NeuralNetConfiguration conf2 = NeuralNetConfiguration.builder() .optimizationAlgo(OptimizationAlgorithm.STOCHASTIC_GRADIENT_DESCENT).seed(123) .list() .layer(0, new DenseLayer.Builder().nIn(28 * 28).nOut(10).activation(Activation.IDENTITY) @@ -152,7 +153,7 @@ public class ActivationLayerTest extends BaseDL4JTest { // Run without separate activation layer Nd4j.getRandom().setSeed(12345); - MultiLayerConfiguration conf = new NeuralNetConfiguration.Builder() + NeuralNetConfiguration conf = NeuralNetConfiguration.builder() .optimizationAlgo(OptimizationAlgorithm.STOCHASTIC_GRADIENT_DESCENT).seed(123) .list() .layer(0, new AutoEncoder.Builder().nIn(nIn).nOut(layerSize).corruptionLevel(0.0) @@ -170,7 +171,7 @@ public class ActivationLayerTest extends BaseDL4JTest { // Run with separate activation layer Nd4j.getRandom().setSeed(12345); - MultiLayerConfiguration conf2 = new NeuralNetConfiguration.Builder() + NeuralNetConfiguration conf2 = NeuralNetConfiguration.builder() .optimizationAlgo(OptimizationAlgorithm.STOCHASTIC_GRADIENT_DESCENT).seed(123) .list() .layer(0, new AutoEncoder.Builder().nIn(nIn).nOut(layerSize).corruptionLevel(0.0) @@ -214,7 +215,7 @@ public class ActivationLayerTest extends BaseDL4JTest { DataSet next = iter.next(); // Run without separate activation layer - MultiLayerConfiguration conf = new NeuralNetConfiguration.Builder() + NeuralNetConfiguration conf = NeuralNetConfiguration.builder() .optimizationAlgo(OptimizationAlgorithm.STOCHASTIC_GRADIENT_DESCENT).seed(123) .list() .layer(0, new ConvolutionLayer.Builder(4, 4).stride(2, 2).nIn(1).nOut(20) @@ -222,7 +223,7 @@ public class ActivationLayerTest extends BaseDL4JTest { .layer(1, new org.deeplearning4j.nn.conf.layers.OutputLayer.Builder( LossFunctions.LossFunction.MCXENT).weightInit(WeightInit.XAVIER) .activation(Activation.SOFTMAX).nOut(10).build()) - .setInputType(InputType.convolutionalFlat(28, 28, 1)).build(); + .inputType(InputType.convolutionalFlat(28, 28, 1)).build(); MultiLayerNetwork network = new MultiLayerNetwork(conf); network.init(); @@ -230,8 +231,8 @@ public class ActivationLayerTest extends BaseDL4JTest { // Run with separate activation layer - MultiLayerConfiguration conf2 = - new NeuralNetConfiguration.Builder() + NeuralNetConfiguration conf2 = + NeuralNetConfiguration.builder() .optimizationAlgo(OptimizationAlgorithm.STOCHASTIC_GRADIENT_DESCENT) .seed(123).list() .layer(0, new ConvolutionLayer.Builder(4, 4).stride(2, 2).nIn(1).nOut(20) @@ -243,7 +244,7 @@ public class ActivationLayerTest extends BaseDL4JTest { .weightInit(WeightInit.XAVIER).activation(Activation.SOFTMAX) .nOut(10).build()) - .setInputType(InputType.convolutionalFlat(28, 28, 1)).build(); + .inputType(InputType.convolutionalFlat(28, 28, 1)).build(); MultiLayerNetwork network2 = new MultiLayerNetwork(conf2); network2.init(); @@ -271,7 +272,7 @@ public class ActivationLayerTest extends BaseDL4JTest { @Test public void testActivationInheritance() { - MultiLayerConfiguration conf = new NeuralNetConfiguration.Builder() + NeuralNetConfiguration conf = NeuralNetConfiguration.builder() .optimizationAlgo(OptimizationAlgorithm.STOCHASTIC_GRADIENT_DESCENT).seed(123) .weightInit(WeightInit.XAVIER) .activation(Activation.RATIONALTANH) @@ -287,19 +288,19 @@ public class ActivationLayerTest extends BaseDL4JTest { MultiLayerNetwork network = new MultiLayerNetwork(conf); network.init(); - assertNotNull(((ActivationLayer)network.getLayer(1).conf().getLayer()).getActivationFn()); + assertNotNull(((ActivationLayer)network.getLayer(1).getLayerConfiguration()).getActivationFn()); - assertTrue(((DenseLayer)network.getLayer(0).conf().getLayer()).getActivationFn() instanceof ActivationRationalTanh); - assertTrue(((ActivationLayer)network.getLayer(1).conf().getLayer()).getActivationFn() instanceof ActivationRationalTanh); - assertTrue(((ActivationLayer)network.getLayer(2).conf().getLayer()).getActivationFn() instanceof ActivationRationalTanh); - assertTrue(((ActivationLayer)network.getLayer(3).conf().getLayer()).getActivationFn() instanceof ActivationELU); - assertTrue(((OutputLayer)network.getLayer(4).conf().getLayer()).getActivationFn() instanceof ActivationSoftmax); + assertTrue(((DenseLayer)network.getLayer(0).getLayerConfiguration()).getActivationFn() instanceof ActivationRationalTanh); + assertTrue(((ActivationLayer)network.getLayer(1).getLayerConfiguration()).getActivationFn() instanceof ActivationRationalTanh); + assertTrue(((ActivationLayer)network.getLayer(2).getLayerConfiguration()).getActivationFn() instanceof ActivationRationalTanh); + assertTrue(((ActivationLayer)network.getLayer(3).getLayerConfiguration()).getActivationFn() instanceof ActivationELU); + assertTrue(((OutputLayer)network.getLayer(4).getLayerConfiguration()).getActivationFn() instanceof ActivationSoftmax); } @Test public void testActivationInheritanceCG() { - ComputationGraphConfiguration conf = new NeuralNetConfiguration.Builder() + ComputationGraphConfiguration conf = NeuralNetConfiguration.builder() .optimizationAlgo(OptimizationAlgorithm.STOCHASTIC_GRADIENT_DESCENT).seed(123) .weightInit(WeightInit.XAVIER) .activation(Activation.RATIONALTANH) @@ -317,13 +318,13 @@ public class ActivationLayerTest extends BaseDL4JTest { ComputationGraph network = new ComputationGraph(conf); network.init(); - assertNotNull(((ActivationLayer)network.getLayer("1").conf().getLayer()).getActivationFn()); + assertNotNull(((ActivationLayer)network.getLayer("1").getLayerConfiguration()).getActivationFn()); - assertTrue(((DenseLayer)network.getLayer("0").conf().getLayer()).getActivationFn() instanceof ActivationRationalTanh); - assertTrue(((ActivationLayer)network.getLayer("1").conf().getLayer()).getActivationFn() instanceof ActivationRationalTanh); - assertTrue(((ActivationLayer)network.getLayer("2").conf().getLayer()).getActivationFn() instanceof ActivationRationalTanh); - assertTrue(((ActivationLayer)network.getLayer("3").conf().getLayer()).getActivationFn() instanceof ActivationELU); - assertTrue(((OutputLayer)network.getLayer("4").conf().getLayer()).getActivationFn() instanceof ActivationSoftmax); + assertTrue(((DenseLayer)network.getLayer("0").getLayerConfiguration()).getActivationFn() instanceof ActivationRationalTanh); + assertTrue(((ActivationLayer)network.getLayer("1").getLayerConfiguration()).getActivationFn() instanceof ActivationRationalTanh); + assertTrue(((ActivationLayer)network.getLayer("2").getLayerConfiguration()).getActivationFn() instanceof ActivationRationalTanh); + assertTrue(((ActivationLayer)network.getLayer("3").getLayerConfiguration()).getActivationFn() instanceof ActivationELU); + assertTrue(((OutputLayer)network.getLayer("4").getLayerConfiguration()).getActivationFn() instanceof ActivationSoftmax); } } diff --git a/cavis-dnn/cavis-dnn-core/src/test/java/org/deeplearning4j/nn/layers/AutoEncoderTest.java b/cavis-dnn/cavis-dnn-core/src/test/java/org/deeplearning4j/nn/layers/AutoEncoderTest.java index f841d1454..8b63b88b4 100644 --- a/cavis-dnn/cavis-dnn-core/src/test/java/org/deeplearning4j/nn/layers/AutoEncoderTest.java +++ b/cavis-dnn/cavis-dnn-core/src/test/java/org/deeplearning4j/nn/layers/AutoEncoderTest.java @@ -47,7 +47,7 @@ public class AutoEncoderTest extends BaseDL4JTest { int in2Size = 15; int hiddenSize = 10; - ComputationGraphConfiguration conf = new NeuralNetConfiguration.Builder() + ComputationGraphConfiguration conf = NeuralNetConfiguration.builder() .weightInit(WeightInit.XAVIER) .graphBuilder() .addInputs("in1", "in2") diff --git a/cavis-dnn/cavis-dnn-core/src/test/java/org/deeplearning4j/nn/layers/BaseLayerTest.java b/cavis-dnn/cavis-dnn-core/src/test/java/org/deeplearning4j/nn/layers/BaseLayerConfigurationTest.java similarity index 82% rename from cavis-dnn/cavis-dnn-core/src/test/java/org/deeplearning4j/nn/layers/BaseLayerTest.java rename to cavis-dnn/cavis-dnn-core/src/test/java/org/deeplearning4j/nn/layers/BaseLayerConfigurationTest.java index bc1b2db87..c481d20df 100644 --- a/cavis-dnn/cavis-dnn-core/src/test/java/org/deeplearning4j/nn/layers/BaseLayerTest.java +++ b/cavis-dnn/cavis-dnn-core/src/test/java/org/deeplearning4j/nn/layers/BaseLayerConfigurationTest.java @@ -23,7 +23,6 @@ package org.deeplearning4j.nn.layers; import lombok.val; import org.deeplearning4j.BaseDL4JTest; import org.deeplearning4j.nn.api.Layer; -import org.deeplearning4j.nn.conf.MultiLayerConfiguration; import org.deeplearning4j.nn.conf.NeuralNetConfiguration; import org.deeplearning4j.nn.conf.layers.ConvolutionLayer; import org.deeplearning4j.nn.conf.layers.DenseLayer; @@ -41,7 +40,7 @@ import java.util.Map; import static org.junit.jupiter.api.Assertions.assertEquals; import static org.junit.jupiter.api.Assertions.assertNotEquals; -public class BaseLayerTest extends BaseDL4JTest { +public class BaseLayerConfigurationTest extends BaseDL4JTest { protected INDArray weight = Nd4j.create(new double[] {0.10, -0.20, -0.15, 0.05}, new int[] {2, 2}); protected INDArray bias = Nd4j.create(new double[] {0.5, 0.5}, new int[] {1, 2}); @@ -58,10 +57,10 @@ public class BaseLayerTest extends BaseDL4JTest { @Test public void testSetExistingParamsConvolutionSingleLayer() { Layer layer = configureSingleLayer(); - assertNotEquals(paramTable, layer.paramTable()); + assertNotEquals(paramTable, layer.getParamTable()); layer.setParamTable(paramTable); - assertEquals(paramTable, layer.paramTable()); + assertEquals(paramTable, layer.getParamTable()); } @@ -70,9 +69,9 @@ public class BaseLayerTest extends BaseDL4JTest { MultiLayerNetwork net = configureMultiLayer(); for (Layer layer : net.getLayers()) { - assertNotEquals(paramTable, layer.paramTable()); + assertNotEquals(paramTable, layer.getParamTable()); layer.setParamTable(paramTable); - assertEquals(paramTable, layer.paramTable()); + assertEquals(paramTable, layer.getParamTable()); } } @@ -81,12 +80,12 @@ public class BaseLayerTest extends BaseDL4JTest { int nIn = 2; int nOut = 2; - NeuralNetConfiguration conf = new NeuralNetConfiguration.Builder() + NeuralNetConfiguration conf = NeuralNetConfiguration.builder() .layer(new ConvolutionLayer.Builder().nIn(nIn).nOut(nOut).build()).build(); - val numParams = conf.getLayer().initializer().numParams(conf); + val numParams = conf.getFlattenedLayerConfigurations().get(0).initializer().numParams(conf); INDArray params = Nd4j.create(1, numParams); - return conf.getLayer().instantiate(conf, null, 0, params, true, params.dataType()); + return conf.getFlattenedLayerConfigurations().get(0).instantiate(conf, null, 0, params, true, params.dataType()); } @@ -94,7 +93,7 @@ public class BaseLayerTest extends BaseDL4JTest { int nIn = 2; int nOut = 2; - MultiLayerConfiguration conf = new NeuralNetConfiguration.Builder().list() + NeuralNetConfiguration conf = NeuralNetConfiguration.builder().list() .layer(0, new DenseLayer.Builder().nIn(nIn).nOut(nOut).build()) .layer(1, new OutputLayer.Builder().nIn(nIn).nOut(nOut).activation(Activation.SOFTMAX).build()).build(); diff --git a/cavis-dnn/cavis-dnn-core/src/test/java/org/deeplearning4j/nn/layers/CacheModeTest.java b/cavis-dnn/cavis-dnn-core/src/test/java/org/deeplearning4j/nn/layers/CacheModeTest.java index 7b55a4641..7898d35ad 100644 --- a/cavis-dnn/cavis-dnn-core/src/test/java/org/deeplearning4j/nn/layers/CacheModeTest.java +++ b/cavis-dnn/cavis-dnn-core/src/test/java/org/deeplearning4j/nn/layers/CacheModeTest.java @@ -41,8 +41,8 @@ public class CacheModeTest extends BaseDL4JTest { @Test public void testConvCacheModeSimple(){ - MultiLayerConfiguration conf1 = getConf(CacheMode.NONE); - MultiLayerConfiguration conf2 = getConf(CacheMode.DEVICE); + NeuralNetConfiguration conf1 = getConf(CacheMode.NONE); + NeuralNetConfiguration conf2 = getConf(CacheMode.DEVICE); MultiLayerNetwork net1 = new MultiLayerNetwork(conf1); net1.init(); @@ -56,14 +56,14 @@ public class CacheModeTest extends BaseDL4JTest { INDArray out2 = net2.output(in); assertEquals(out1, out2); - assertEquals(net1.params(), net2.params()); + assertEquals(net1.getModelParams(), net2.getModelParams()); net1.fit(in, labels); net2.fit(in, labels); - assertEquals(net1.params(), net2.params()); + assertEquals(net1.getModelParams(), net2.getModelParams()); } - private static MultiLayerConfiguration getConf(CacheMode cacheMode){ - MultiLayerConfiguration conf = new NeuralNetConfiguration.Builder() + private static NeuralNetConfiguration getConf(CacheMode cacheMode){ + NeuralNetConfiguration conf = NeuralNetConfiguration.builder() .activation(Activation.TANH) .inferenceWorkspaceMode(WorkspaceMode.ENABLED) .trainingWorkspaceMode(WorkspaceMode.ENABLED) @@ -73,7 +73,7 @@ public class CacheModeTest extends BaseDL4JTest { .layer(new ConvolutionLayer.Builder().nOut(3).build()) .layer(new ConvolutionLayer.Builder().nOut(3).build()) .layer(new OutputLayer.Builder().nOut(10).activation(Activation.SOFTMAX).build()) - .setInputType(InputType.convolutionalFlat(28, 28, 1)) + .inputType(InputType.convolutionalFlat(28, 28, 1)) .build(); return conf; @@ -84,8 +84,8 @@ public class CacheModeTest extends BaseDL4JTest { for(boolean graves : new boolean[]{true, false}) { - MultiLayerConfiguration conf1 = getConfLSTM(CacheMode.NONE, graves); - MultiLayerConfiguration conf2 = getConfLSTM(CacheMode.DEVICE, graves); + NeuralNetConfiguration conf1 = getConfLSTM(CacheMode.NONE, graves); + NeuralNetConfiguration conf2 = getConfLSTM(CacheMode.DEVICE, graves); MultiLayerNetwork net1 = new MultiLayerNetwork(conf1); net1.init(); @@ -99,15 +99,15 @@ public class CacheModeTest extends BaseDL4JTest { INDArray out2 = net2.output(in); assertEquals(out1, out2); - assertEquals(net1.params(), net2.params()); + assertEquals(net1.getModelParams(), net2.getModelParams()); net1.fit(in, labels); net2.fit(in, labels); - assertEquals(net1.params(), net2.params()); + assertEquals(net1.getModelParams(), net2.getModelParams()); } } - private static MultiLayerConfiguration getConfLSTM(CacheMode cacheMode, boolean graves){ - MultiLayerConfiguration conf = new NeuralNetConfiguration.Builder() + private static NeuralNetConfiguration getConfLSTM(CacheMode cacheMode, boolean graves){ + NeuralNetConfiguration conf = NeuralNetConfiguration.builder() .activation(Activation.TANH) .inferenceWorkspaceMode(WorkspaceMode.ENABLED) .trainingWorkspaceMode(WorkspaceMode.ENABLED) @@ -145,14 +145,14 @@ public class CacheModeTest extends BaseDL4JTest { INDArray out2 = net2.outputSingle(in); assertEquals(out1, out2); - assertEquals(net1.params(), net2.params()); + assertEquals(net1.getModelParams(), net2.getModelParams()); net1.fit(new DataSet(in, labels)); net2.fit(new DataSet(in, labels)); - assertEquals(net1.params(), net2.params()); + assertEquals(net1.getModelParams(), net2.getModelParams()); } private static ComputationGraphConfiguration getConfCG(CacheMode cacheMode){ - ComputationGraphConfiguration conf = new NeuralNetConfiguration.Builder() + ComputationGraphConfiguration conf = NeuralNetConfiguration.builder() .activation(Activation.TANH) .inferenceWorkspaceMode(WorkspaceMode.ENABLED) .trainingWorkspaceMode(WorkspaceMode.ENABLED) diff --git a/cavis-dnn/cavis-dnn-core/src/test/java/org/deeplearning4j/nn/layers/CenterLossOutputLayerTest.java b/cavis-dnn/cavis-dnn-core/src/test/java/org/deeplearning4j/nn/layers/CenterLossOutputLayerTest.java index 73bd4c333..84f94928b 100644 --- a/cavis-dnn/cavis-dnn-core/src/test/java/org/deeplearning4j/nn/layers/CenterLossOutputLayerTest.java +++ b/cavis-dnn/cavis-dnn-core/src/test/java/org/deeplearning4j/nn/layers/CenterLossOutputLayerTest.java @@ -52,7 +52,7 @@ public class CenterLossOutputLayerTest extends BaseDL4JTest { private ComputationGraph getGraph(int numLabels, double lambda) { Nd4j.getRandom().setSeed(12345); - ComputationGraphConfiguration conf = new NeuralNetConfiguration.Builder().seed(12345) + ComputationGraphConfiguration conf = NeuralNetConfiguration.builder().seed(12345) .optimizationAlgo(OptimizationAlgorithm.STOCHASTIC_GRADIENT_DESCENT) .dist(new NormalDistribution(0, 1)).updater(new NoOp()) .graphBuilder().addInputs("input1") @@ -73,7 +73,7 @@ public class CenterLossOutputLayerTest extends BaseDL4JTest { int nChannels = 1; // Number of input channels int outputNum = 10; // The number of possible outcomes - ComputationGraphConfiguration conf = new NeuralNetConfiguration.Builder().seed(12345) // Training iterations as above + ComputationGraphConfiguration conf = NeuralNetConfiguration.builder().seed(12345) // Training iterations as above .l2(0.0005).weightInit(WeightInit.XAVIER) .updater(new Nesterovs(0.01, 0.9)) .graphBuilder().addInputs("input") @@ -121,7 +121,7 @@ public class CenterLossOutputLayerTest extends BaseDL4JTest { graph.setInput(0, input); graph.setLabel(0, labels); graph.computeGradientAndScore(); - results[i] = graph.score(); + results[i] = graph.getScore(); } assertNotEquals(results[0], results[1]); @@ -137,7 +137,7 @@ public class CenterLossOutputLayerTest extends BaseDL4JTest { ComputationGraph net = getCNNMnistConfig(); net.init(); - net.setListeners(new ScoreIterationListener(1)); + net.addTrainingListeners(new ScoreIterationListener(1)); for (int i = 0; i < 50; i++) { net.fit(mnistTrain.next()); diff --git a/cavis-dnn/cavis-dnn-core/src/test/java/org/deeplearning4j/nn/layers/DropoutLayerTest.java b/cavis-dnn/cavis-dnn-core/src/test/java/org/deeplearning4j/nn/layers/DropoutLayerTest.java index 3aa7e37dd..80cf35543 100644 --- a/cavis-dnn/cavis-dnn-core/src/test/java/org/deeplearning4j/nn/layers/DropoutLayerTest.java +++ b/cavis-dnn/cavis-dnn-core/src/test/java/org/deeplearning4j/nn/layers/DropoutLayerTest.java @@ -25,7 +25,6 @@ import org.deeplearning4j.datasets.iterator.impl.MnistDataSetIterator; import org.deeplearning4j.nn.api.Layer; import org.deeplearning4j.nn.api.OptimizationAlgorithm; import org.deeplearning4j.nn.conf.InputPreProcessor; -import org.deeplearning4j.nn.conf.MultiLayerConfiguration; import org.deeplearning4j.nn.conf.NeuralNetConfiguration; import org.deeplearning4j.nn.conf.dropout.Dropout; import org.deeplearning4j.nn.conf.inputs.InputType; @@ -76,7 +75,7 @@ public class DropoutLayerTest extends BaseDL4JTest { @Test public void testDropoutLayerWithoutTraining() throws Exception { - MultiLayerConfiguration confIntegrated = new NeuralNetConfiguration.Builder().seed(3648) + NeuralNetConfiguration confIntegrated = NeuralNetConfiguration.builder().seed(3648) .list().layer(0, new ConvolutionLayer.Builder(1, 1).stride(1, 1).nIn(1).nOut(1).dropOut(0.25) .activation(Activation.IDENTITY).weightInit(WeightInit.XAVIER) @@ -85,7 +84,7 @@ public class DropoutLayerTest extends BaseDL4JTest { .activation(Activation.SOFTMAX) .weightInit(WeightInit.XAVIER).dropOut(0.25) .nOut(4).build()) - .setInputType(InputType.convolutionalFlat(2, 2, 1)).build(); + .inputType(InputType.convolutionalFlat(2, 2, 1)).build(); MultiLayerNetwork netIntegrated = new MultiLayerNetwork(confIntegrated); netIntegrated.init(); @@ -94,8 +93,8 @@ public class DropoutLayerTest extends BaseDL4JTest { netIntegrated.getLayer(1).setParam("W", Nd4j.eye(4)); netIntegrated.getLayer(1).setParam("b", Nd4j.zeros(4, 1)); - MultiLayerConfiguration confSeparate = - new NeuralNetConfiguration.Builder() + NeuralNetConfiguration confSeparate = + NeuralNetConfiguration.builder() .optimizationAlgo(OptimizationAlgorithm.STOCHASTIC_GRADIENT_DESCENT) .seed(3648) .list().layer(0, @@ -109,7 +108,7 @@ public class DropoutLayerTest extends BaseDL4JTest { .weightInit(WeightInit.XAVIER).activation(Activation.SOFTMAX) .nOut(4).build()) - .setInputType(InputType.convolutionalFlat(2, 2, 1)).build(); + .inputType(InputType.convolutionalFlat(2, 2, 1)).build(); MultiLayerNetwork netSeparate = new MultiLayerNetwork(confSeparate); netSeparate.init(); @@ -137,8 +136,8 @@ public class DropoutLayerTest extends BaseDL4JTest { List actTestSeparate = netSeparate.feedForward(in.dup(), false); //Check masks: - INDArray maskIntegrated = ((Dropout)netIntegrated.getLayer(0).conf().getLayer().getIDropout()).getMask(); - INDArray maskSeparate = ((Dropout)netSeparate.getLayer(0).conf().getLayer().getIDropout()).getMask(); + INDArray maskIntegrated = ((Dropout)netIntegrated.getLayer(0).getLayerConfiguration().getIDropout()).getMask(); + INDArray maskSeparate = ((Dropout)netSeparate.getLayer(0).getLayerConfiguration().getIDropout()).getMask(); assertEquals(maskIntegrated, maskSeparate); @@ -156,7 +155,7 @@ public class DropoutLayerTest extends BaseDL4JTest { DataSet next = iter.next(); // Run without separate activation layer - MultiLayerConfiguration confIntegrated = new NeuralNetConfiguration.Builder() + NeuralNetConfiguration confIntegrated = NeuralNetConfiguration.builder() .optimizationAlgo(OptimizationAlgorithm.STOCHASTIC_GRADIENT_DESCENT).seed(123) .list() .layer(0, new DenseLayer.Builder().nIn(28 * 28).nOut(10) @@ -173,7 +172,7 @@ public class DropoutLayerTest extends BaseDL4JTest { netIntegrated.fit(next); // Run with separate activation layer - MultiLayerConfiguration confSeparate = new NeuralNetConfiguration.Builder() + NeuralNetConfiguration confSeparate = NeuralNetConfiguration.builder() .optimizationAlgo(OptimizationAlgorithm.STOCHASTIC_GRADIENT_DESCENT).seed(123) .list() .layer(0, new DenseLayer.Builder().nIn(28 * 28).nOut(10).activation(Activation.RELU) @@ -229,7 +228,7 @@ public class DropoutLayerTest extends BaseDL4JTest { // Run without separate activation layer Nd4j.getRandom().setSeed(12345); - MultiLayerConfiguration confIntegrated = new NeuralNetConfiguration.Builder().seed(123) + NeuralNetConfiguration confIntegrated = NeuralNetConfiguration.builder().seed(123) .list().layer(0, new ConvolutionLayer.Builder(4, 4).stride(2, 2).nIn(1).nOut(20) .activation(Activation.TANH).weightInit(WeightInit.XAVIER) @@ -237,7 +236,7 @@ public class DropoutLayerTest extends BaseDL4JTest { .layer(1, new OutputLayer.Builder(LossFunctions.LossFunction.MCXENT) .weightInit(WeightInit.XAVIER).activation(Activation.SOFTMAX).dropOut(0.5) .nOut(10).build()) - .setInputType(InputType.convolutionalFlat(28, 28, 1)).build(); + .inputType(InputType.convolutionalFlat(28, 28, 1)).build(); // Run with separate activation layer Nd4j.getRandom().setSeed(12345); @@ -248,14 +247,14 @@ public class DropoutLayerTest extends BaseDL4JTest { Map preProcessorMap = new HashMap<>(); preProcessorMap.put(1, new CnnToFeedForwardPreProcessor(13, 13, 20)); - MultiLayerConfiguration confSeparate = new NeuralNetConfiguration.Builder().seed(123).list() + NeuralNetConfiguration confSeparate = NeuralNetConfiguration.builder().seed(123).list() .layer(0, new ConvolutionLayer.Builder(4, 4).stride(2, 2).nIn(1).nOut(20) .activation(Activation.TANH).weightInit(WeightInit.XAVIER).build()) .layer(1, new DropoutLayer.Builder(0.5).build()) .layer(2, new OutputLayer.Builder(LossFunctions.LossFunction.MCXENT) .weightInit(WeightInit.XAVIER).activation(Activation.SOFTMAX).nOut(10).build()) .inputPreProcessors(preProcessorMap) - .setInputType(InputType.convolutionalFlat(28, 28, 1)).build(); + .inputType(InputType.convolutionalFlat(28, 28, 1)).build(); Nd4j.getRandom().setSeed(12345); @@ -266,7 +265,7 @@ public class DropoutLayerTest extends BaseDL4JTest { MultiLayerNetwork netSeparate = new MultiLayerNetwork(confSeparate); netSeparate.init(); - assertEquals(netIntegrated.params(), netSeparate.params()); + assertEquals(netIntegrated.getModelParams(), netSeparate.getModelParams()); Nd4j.getRandom().setSeed(12345); netIntegrated.fit(next); @@ -274,7 +273,7 @@ public class DropoutLayerTest extends BaseDL4JTest { Nd4j.getRandom().setSeed(12345); netSeparate.fit(next); - assertEquals(netIntegrated.params(), netSeparate.params()); + assertEquals(netIntegrated.getModelParams(), netSeparate.getModelParams()); // check parameters assertEquals(netIntegrated.getLayer(0).getParam("W"), netSeparate.getLayer(0).getParam("W")); diff --git a/cavis-dnn/cavis-dnn-core/src/test/java/org/deeplearning4j/nn/layers/FrozenLayerTest.java b/cavis-dnn/cavis-dnn-core/src/test/java/org/deeplearning4j/nn/layers/FrozenLayerTest.java index c3543e167..20880d71a 100644 --- a/cavis-dnn/cavis-dnn-core/src/test/java/org/deeplearning4j/nn/layers/FrozenLayerTest.java +++ b/cavis-dnn/cavis-dnn-core/src/test/java/org/deeplearning4j/nn/layers/FrozenLayerTest.java @@ -23,9 +23,11 @@ package org.deeplearning4j.nn.layers; import lombok.extern.slf4j.Slf4j; import org.deeplearning4j.BaseDL4JTest; import org.deeplearning4j.nn.conf.ComputationGraphConfiguration; -import org.deeplearning4j.nn.conf.MultiLayerConfiguration; import org.deeplearning4j.nn.conf.NeuralNetConfiguration; +import org.deeplearning4j.nn.conf.NeuralNetConfiguration.NeuralNetConfigurationBuilder; import org.deeplearning4j.nn.conf.layers.DenseLayer; +import org.deeplearning4j.nn.conf.layers.DenseLayer.Builder; +import org.deeplearning4j.nn.conf.layers.OutputLayer; import org.deeplearning4j.nn.graph.ComputationGraph; import org.deeplearning4j.nn.multilayer.MultiLayerNetwork; import org.deeplearning4j.nn.transferlearning.FineTuneConfiguration; @@ -40,6 +42,7 @@ import org.nd4j.linalg.learning.config.Sgd; import org.nd4j.linalg.lossfunctions.LossFunctions; import java.util.List; +import org.nd4j.linalg.lossfunctions.LossFunctions.LossFunction; import static org.junit.jupiter.api.Assertions.assertEquals; @@ -54,19 +57,20 @@ public class FrozenLayerTest extends BaseDL4JTest { public void testFrozen() { DataSet randomData = new DataSet(Nd4j.rand(10, 4), Nd4j.rand(10, 3)); - NeuralNetConfiguration.Builder overallConf = new NeuralNetConfiguration.Builder().updater(new Sgd(0.1)) + NeuralNetConfiguration.NeuralNetConfigurationBuilder overallConf = NeuralNetConfiguration.builder().updater(new Sgd(0.1)) .activation(Activation.IDENTITY); FineTuneConfiguration finetune = new FineTuneConfiguration.Builder().updater(new Sgd(0.1)).build(); - MultiLayerNetwork modelToFineTune = new MultiLayerNetwork(overallConf.clone().list() - .layer(0, new DenseLayer.Builder().nIn(4).nOut(3).build()) - .layer(1, new DenseLayer.Builder().nIn(3).nOut(2).build()) - .layer(2, new DenseLayer.Builder().nIn(2).nOut(3).build()) - .layer(3, new org.deeplearning4j.nn.conf.layers.OutputLayer.Builder( - LossFunctions.LossFunction.MCXENT).activation(Activation.SOFTMAX).nIn(3).nOut(3) - .build()) - .build()); + MultiLayerNetwork modelToFineTune = new MultiLayerNetwork( + (NeuralNetConfiguration) ((NeuralNetConfigurationBuilder)overallConf).clone().list() + .layer(0, new Builder().nIn(4).nOut(3).build()) + .layer(1, new Builder().nIn(3).nOut(2).build()) + .layer(2, new Builder().nIn(2).nOut(3).build()) + .layer(3, new OutputLayer.Builder( + LossFunction.MCXENT).activation(Activation.SOFTMAX).nIn(3).nOut(3) + .build()) + .build()); modelToFineTune.init(); List ff = modelToFineTune.feedForwardToLayer(2, randomData.getFeatures(), false); @@ -76,13 +80,14 @@ public class FrozenLayerTest extends BaseDL4JTest { .setFeatureExtractor(1).build(); INDArray paramsLastTwoLayers = - Nd4j.hstack(modelToFineTune.getLayer(2).params(), modelToFineTune.getLayer(3).params()); - MultiLayerNetwork notFrozen = new MultiLayerNetwork(overallConf.clone().list() - .layer(0, new DenseLayer.Builder().nIn(2).nOut(3).build()) - .layer(1, new org.deeplearning4j.nn.conf.layers.OutputLayer.Builder( - LossFunctions.LossFunction.MCXENT).activation(Activation.SOFTMAX).nIn(3).nOut(3) - .build()) - .build(), paramsLastTwoLayers); + Nd4j.hstack(modelToFineTune.getLayer(2).getParams(), modelToFineTune.getLayer(3).getParams()); + MultiLayerNetwork notFrozen = new MultiLayerNetwork( + (NeuralNetConfiguration) overallConf.clone() + .layer(0, new Builder().nIn(2).nOut(3).build()) + .layer(1, new OutputLayer.Builder( + LossFunction.MCXENT).activation(Activation.SOFTMAX).nIn(3).nOut(3) + .build()) + .build(), paramsLastTwoLayers); // assertEquals(modelNow.getLayer(2).conf(), notFrozen.getLayer(0).conf()); //Equal, other than names // assertEquals(modelNow.getLayer(3).conf(), notFrozen.getLayer(1).conf()); //Equal, other than names @@ -97,9 +102,9 @@ public class FrozenLayerTest extends BaseDL4JTest { modelNow.fit(randomData); } - INDArray expected = Nd4j.hstack(modelToFineTune.getLayer(0).params(), modelToFineTune.getLayer(1).params(), - notFrozen.params()); - INDArray act = modelNow.params(); + INDArray expected = Nd4j.hstack(modelToFineTune.getLayer(0).getParams(), modelToFineTune.getLayer(1).getParams(), + notFrozen.getModelParams()); + INDArray act = modelNow.getModelParams(); assertEquals(expected, act); } @@ -109,16 +114,17 @@ public class FrozenLayerTest extends BaseDL4JTest { DataSet randomData = new DataSet(Nd4j.rand(10, 4), Nd4j.rand(10, 3)); - NeuralNetConfiguration.Builder overallConf = new NeuralNetConfiguration.Builder().updater(new Sgd(0.1)) + NeuralNetConfiguration.NeuralNetConfigurationBuilder overallConf = NeuralNetConfiguration.builder().updater(new Sgd(0.1)) .activation(Activation.IDENTITY); - MultiLayerNetwork modelToFineTune = new MultiLayerNetwork(overallConf.list() - .layer(0, new DenseLayer.Builder().nIn(4).nOut(3).build()) - .layer(1, new DenseLayer.Builder().nIn(3).nOut(2).build()) - .layer(2, new DenseLayer.Builder().nIn(2).nOut(3).build()) - .layer(3, new org.deeplearning4j.nn.conf.layers.OutputLayer.Builder( - LossFunctions.LossFunction.MCXENT).activation(Activation.SOFTMAX).nIn(3).nOut(3) - .build()) - .build()); + MultiLayerNetwork modelToFineTune = new MultiLayerNetwork( + (NeuralNetConfiguration) overallConf + .layer(0, new Builder().nIn(4).nOut(3).build()) + .layer(1, new Builder().nIn(3).nOut(2).build()) + .layer(2, new Builder().nIn(2).nOut(3).build()) + .layer(3, new OutputLayer.Builder( + LossFunction.MCXENT).activation(Activation.SOFTMAX).nIn(3).nOut(3) + .build()) + .build()); modelToFineTune.init(); INDArray asFrozenFeatures = modelToFineTune.feedForwardToLayer(2, randomData.getFeatures(), false).get(2); @@ -127,19 +133,19 @@ public class FrozenLayerTest extends BaseDL4JTest { MultiLayerNetwork clonedModel = modelNow.clone(); //Check json - assertEquals(modelNow.getLayerWiseConfigurations().toJson(), clonedModel.getLayerWiseConfigurations().toJson()); + assertEquals(modelNow.getNetConfiguration().toJson(), clonedModel.getNetConfiguration().toJson()); //Check params - assertEquals(modelNow.params(), clonedModel.params()); + assertEquals(modelNow.getModelParams(), clonedModel.getModelParams()); MultiLayerNetwork notFrozen = new MultiLayerNetwork( - overallConf.list().layer(0, new DenseLayer.Builder().nIn(2).nOut(3).build()) - .layer(1, new org.deeplearning4j.nn.conf.layers.OutputLayer.Builder( - LossFunctions.LossFunction.MCXENT) - .activation(Activation.SOFTMAX).nIn(3).nOut(3) - .build()) - .build(), - Nd4j.hstack(modelToFineTune.getLayer(2).params(), modelToFineTune.getLayer(3).params())); + (NeuralNetConfiguration) overallConf.layer(0, new Builder().nIn(2).nOut(3).build()) + .layer(1, new OutputLayer.Builder( + LossFunction.MCXENT) + .activation(Activation.SOFTMAX).nIn(3).nOut(3) + .build()) + .build(), + Nd4j.hstack(modelToFineTune.getLayer(2).getParams(), modelToFineTune.getLayer(3).getParams())); int i = 0; while (i < 5) { @@ -149,10 +155,10 @@ public class FrozenLayerTest extends BaseDL4JTest { i++; } - INDArray expectedParams = Nd4j.hstack(modelToFineTune.getLayer(0).params(), - modelToFineTune.getLayer(1).params(), notFrozen.params()); - assertEquals(expectedParams, modelNow.params()); - assertEquals(expectedParams, clonedModel.params()); + INDArray expectedParams = Nd4j.hstack(modelToFineTune.getLayer(0).getParams(), + modelToFineTune.getLayer(1).getParams(), notFrozen.getModelParams()); + assertEquals(expectedParams, modelNow.getModelParams()); + assertEquals(expectedParams, clonedModel.getModelParams()); } @@ -161,7 +167,7 @@ public class FrozenLayerTest extends BaseDL4JTest { public void testFrozenCompGraph() { DataSet randomData = new DataSet(Nd4j.rand(10, 4), Nd4j.rand(10, 3)); - NeuralNetConfiguration.Builder overallConf = new NeuralNetConfiguration.Builder().updater(new Sgd(0.1)) + NeuralNetConfiguration.NeuralNetConfigurationBuilder overallConf = NeuralNetConfiguration.builder().updater(new Sgd(0.1)) .activation(Activation.IDENTITY); ComputationGraph modelToFineTune = new ComputationGraph(overallConf.graphBuilder().addInputs("layer0In") @@ -193,8 +199,8 @@ public class FrozenLayerTest extends BaseDL4JTest { .setOutputs("layer1").build()); notFrozen.init(); - notFrozen.setParams(Nd4j.hstack(modelToFineTune.getLayer("layer2").params(), - modelToFineTune.getLayer("layer3").params())); + notFrozen.setParams(Nd4j.hstack(modelToFineTune.getLayer("layer2").getParams(), + modelToFineTune.getLayer("layer3").getParams())); int i = 0; while (i < 5) { @@ -203,8 +209,8 @@ public class FrozenLayerTest extends BaseDL4JTest { i++; } - assertEquals(Nd4j.hstack(modelToFineTune.getLayer("layer0").params(), - modelToFineTune.getLayer("layer1").params(), notFrozen.params()), modelNow.params()); + assertEquals(Nd4j.hstack(modelToFineTune.getLayer("layer0").getParams(), + modelToFineTune.getLayer("layer1").getParams(), notFrozen.getModelParams()), modelNow.getModelParams()); } @Test @@ -212,7 +218,7 @@ public class FrozenLayerTest extends BaseDL4JTest { DataSet randomData = new DataSet(Nd4j.rand(10, 4), Nd4j.rand(10, 3)); - NeuralNetConfiguration.Builder overallConf = new NeuralNetConfiguration.Builder().updater(new Sgd(0.1)) + NeuralNetConfiguration.NeuralNetConfigurationBuilder overallConf = NeuralNetConfiguration.builder().updater(new Sgd(0.1)) .activation(Activation.IDENTITY); ComputationGraph modelToFineTune = new ComputationGraph(overallConf.graphBuilder().addInputs("layer0In") @@ -235,10 +241,10 @@ public class FrozenLayerTest extends BaseDL4JTest { ComputationGraph clonedModel = modelNow.clone(); //Check json - assertEquals(clonedModel.getConfiguration().toJson(), modelNow.getConfiguration().toJson()); + assertEquals(clonedModel.getComputationGraphConfiguration().toJson(), modelNow.getComputationGraphConfiguration().toJson()); //Check params - assertEquals(modelNow.params(), clonedModel.params()); + assertEquals(modelNow.getModelParams(), clonedModel.getModelParams()); ComputationGraph notFrozen = new ComputationGraph(overallConf.graphBuilder().addInputs("layer0In") .addLayer("layer0", new DenseLayer.Builder().nIn(2).nOut(3).build(), "layer0In") @@ -250,8 +256,8 @@ public class FrozenLayerTest extends BaseDL4JTest { "layer0") .setOutputs("layer1").build()); notFrozen.init(); - notFrozen.setParams(Nd4j.hstack(modelToFineTune.getLayer("layer2").params(), - modelToFineTune.getLayer("layer3").params())); + notFrozen.setParams(Nd4j.hstack(modelToFineTune.getLayer("layer2").getParams(), + modelToFineTune.getLayer("layer3").getParams())); int i = 0; @@ -262,10 +268,10 @@ public class FrozenLayerTest extends BaseDL4JTest { i++; } - INDArray expectedParams = Nd4j.hstack(modelToFineTune.getLayer("layer0").params(), - modelToFineTune.getLayer("layer1").params(), notFrozen.params()); - assertEquals(expectedParams, modelNow.params()); - assertEquals(expectedParams, clonedModel.params()); + INDArray expectedParams = Nd4j.hstack(modelToFineTune.getLayer("layer0").getParams(), + modelToFineTune.getLayer("layer1").getParams(), notFrozen.getModelParams()); + assertEquals(expectedParams, modelNow.getModelParams()); + assertEquals(expectedParams, clonedModel.getModelParams()); } @@ -273,7 +279,7 @@ public class FrozenLayerTest extends BaseDL4JTest { public void testFrozenLayerInstantiation() { //We need to be able to instantitate frozen layers from JSON etc, and have them be the same as if // they were initialized via the builder - MultiLayerConfiguration conf1 = new NeuralNetConfiguration.Builder().seed(12345).list() + NeuralNetConfiguration conf1 = NeuralNetConfiguration.builder().seed(12345).list() .layer(0, new DenseLayer.Builder().nIn(10).nOut(10).activation(Activation.TANH) .weightInit(WeightInit.XAVIER).build()) .layer(1, new DenseLayer.Builder().nIn(10).nOut(10).activation(Activation.TANH) @@ -283,7 +289,7 @@ public class FrozenLayerTest extends BaseDL4JTest { .nOut(10).build()) .build(); - MultiLayerConfiguration conf2 = new NeuralNetConfiguration.Builder().seed(12345).list().layer(0, + NeuralNetConfiguration conf2 = NeuralNetConfiguration.builder().seed(12345).list().layer(0, new org.deeplearning4j.nn.conf.layers.misc.FrozenLayer(new DenseLayer.Builder().nIn(10).nOut(10) .activation(Activation.TANH).weightInit(WeightInit.XAVIER).build())) .layer(1, new org.deeplearning4j.nn.conf.layers.misc.FrozenLayer( @@ -299,11 +305,11 @@ public class FrozenLayerTest extends BaseDL4JTest { MultiLayerNetwork net2 = new MultiLayerNetwork(conf2); net2.init(); - assertEquals(net1.params(), net2.params()); + assertEquals(net1.getModelParams(), net2.getModelParams()); String json = conf2.toJson(); - MultiLayerConfiguration fromJson = MultiLayerConfiguration.fromJson(json); + NeuralNetConfiguration fromJson = NeuralNetConfiguration.fromJson(json); assertEquals(conf2, fromJson); @@ -323,7 +329,7 @@ public class FrozenLayerTest extends BaseDL4JTest { //We need to be able to instantitate frozen layers from JSON etc, and have them be the same as if // they were initialized via the builder - ComputationGraphConfiguration conf1 = new NeuralNetConfiguration.Builder().seed(12345).graphBuilder() + ComputationGraphConfiguration conf1 = NeuralNetConfiguration.builder().seed(12345).graphBuilder() .addInputs("in") .addLayer("0", new DenseLayer.Builder().nIn(10).nOut(10).activation(Activation.TANH) .weightInit(WeightInit.XAVIER).build(), "in") @@ -335,7 +341,7 @@ public class FrozenLayerTest extends BaseDL4JTest { "1") .setOutputs("2").build(); - ComputationGraphConfiguration conf2 = new NeuralNetConfiguration.Builder().seed(12345).graphBuilder() + ComputationGraphConfiguration conf2 = NeuralNetConfiguration.builder().seed(12345).graphBuilder() .addInputs("in") .addLayer("0", new org.deeplearning4j.nn.conf.layers.misc.FrozenLayer.Builder() .layer(new DenseLayer.Builder().nIn(10).nOut(10).activation(Activation.TANH) @@ -356,7 +362,7 @@ public class FrozenLayerTest extends BaseDL4JTest { ComputationGraph net2 = new ComputationGraph(conf2); net2.init(); - assertEquals(net1.params(), net2.params()); + assertEquals(net1.getModelParams(), net2.getModelParams()); String json = conf2.toJson(); diff --git a/cavis-dnn/cavis-dnn-core/src/test/java/org/deeplearning4j/nn/layers/FrozenLayerWithBackpropTest.java b/cavis-dnn/cavis-dnn-core/src/test/java/org/deeplearning4j/nn/layers/FrozenLayerWithBackpropTest.java index dce5daebd..d47973a89 100644 --- a/cavis-dnn/cavis-dnn-core/src/test/java/org/deeplearning4j/nn/layers/FrozenLayerWithBackpropTest.java +++ b/cavis-dnn/cavis-dnn-core/src/test/java/org/deeplearning4j/nn/layers/FrozenLayerWithBackpropTest.java @@ -22,17 +22,13 @@ package org.deeplearning4j.nn.layers; import lombok.extern.slf4j.Slf4j; import org.deeplearning4j.BaseDL4JTest; -import org.deeplearning4j.nn.api.Layer; import org.deeplearning4j.nn.conf.ComputationGraphConfiguration; -import org.deeplearning4j.nn.conf.MultiLayerConfiguration; import org.deeplearning4j.nn.conf.NeuralNetConfiguration; import org.deeplearning4j.nn.conf.graph.MergeVertex; import org.deeplearning4j.nn.conf.layers.DenseLayer; import org.deeplearning4j.nn.conf.layers.OutputLayer; import org.deeplearning4j.nn.graph.ComputationGraph; import org.deeplearning4j.nn.multilayer.MultiLayerNetwork; -import org.deeplearning4j.nn.transferlearning.FineTuneConfiguration; -import org.deeplearning4j.nn.transferlearning.TransferLearning; import org.deeplearning4j.nn.weights.WeightInit; import org.junit.jupiter.api.Test; import org.nd4j.linalg.activations.Activation; @@ -42,8 +38,6 @@ import org.nd4j.linalg.factory.Nd4j; import org.nd4j.linalg.learning.config.Sgd; import org.nd4j.linalg.lossfunctions.LossFunctions; -import java.util.List; - import static org.junit.jupiter.api.Assertions.assertEquals; import static org.junit.jupiter.api.Assertions.assertNotEquals; import static org.junit.jupiter.api.Assertions.assertNotNull; @@ -55,7 +49,7 @@ public class FrozenLayerWithBackpropTest extends BaseDL4JTest { public void testFrozenWithBackpropLayerInstantiation() { //We need to be able to instantitate frozen layers from JSON etc, and have them be the same as if // they were initialized via the builder - MultiLayerConfiguration conf1 = new NeuralNetConfiguration.Builder().seed(12345).list() + NeuralNetConfiguration conf1 = NeuralNetConfiguration.builder().seed(12345).list() .layer(0, new DenseLayer.Builder().nIn(10).nOut(10).activation(Activation.TANH) .weightInit(WeightInit.XAVIER).build()) .layer(1, new DenseLayer.Builder().nIn(10).nOut(10).activation(Activation.TANH) @@ -65,7 +59,7 @@ public class FrozenLayerWithBackpropTest extends BaseDL4JTest { .nOut(10).build()) .build(); - MultiLayerConfiguration conf2 = new NeuralNetConfiguration.Builder().seed(12345).list().layer(0, + NeuralNetConfiguration conf2 = NeuralNetConfiguration.builder().seed(12345).list().layer(0, new org.deeplearning4j.nn.conf.layers.misc.FrozenLayerWithBackprop(new DenseLayer.Builder().nIn(10).nOut(10) .activation(Activation.TANH).weightInit(WeightInit.XAVIER).build())) .layer(1, new org.deeplearning4j.nn.conf.layers.misc.FrozenLayerWithBackprop( @@ -81,11 +75,11 @@ public class FrozenLayerWithBackpropTest extends BaseDL4JTest { MultiLayerNetwork net2 = new MultiLayerNetwork(conf2); net2.init(); - assertEquals(net1.params(), net2.params()); + assertEquals(net1.getModelParams(), net2.getModelParams()); String json = conf2.toJson(); - MultiLayerConfiguration fromJson = MultiLayerConfiguration.fromJson(json); + NeuralNetConfiguration fromJson = NeuralNetConfiguration.fromJson(json); assertEquals(conf2, fromJson); @@ -105,7 +99,7 @@ public class FrozenLayerWithBackpropTest extends BaseDL4JTest { //We need to be able to instantitate frozen layers from JSON etc, and have them be the same as if // they were initialized via the builder - ComputationGraphConfiguration conf1 = new NeuralNetConfiguration.Builder().seed(12345).graphBuilder() + ComputationGraphConfiguration conf1 = NeuralNetConfiguration.builder().seed(12345).graphBuilder() .addInputs("in") .addLayer("0", new DenseLayer.Builder().nIn(10).nOut(10).activation(Activation.TANH) .weightInit(WeightInit.XAVIER).build(), "in") @@ -117,7 +111,7 @@ public class FrozenLayerWithBackpropTest extends BaseDL4JTest { "1") .setOutputs("2").build(); - ComputationGraphConfiguration conf2 = new NeuralNetConfiguration.Builder().seed(12345).graphBuilder() + ComputationGraphConfiguration conf2 = NeuralNetConfiguration.builder().seed(12345).graphBuilder() .addInputs("in") .addLayer("0", new org.deeplearning4j.nn.conf.layers.misc.FrozenLayerWithBackprop( new DenseLayer.Builder().nIn(10).nOut(10).activation(Activation.TANH) @@ -136,7 +130,7 @@ public class FrozenLayerWithBackpropTest extends BaseDL4JTest { ComputationGraph net2 = new ComputationGraph(conf2); net2.init(); - assertEquals(net1.params(), net2.params()); + assertEquals(net1.getModelParams(), net2.getModelParams()); String json = conf2.toJson(); @@ -160,7 +154,7 @@ public class FrozenLayerWithBackpropTest extends BaseDL4JTest { Nd4j.getRandom().setSeed(12345); DataSet randomData = new DataSet(Nd4j.rand(100, 4), Nd4j.rand(100, 1)); - MultiLayerConfiguration conf1 = new NeuralNetConfiguration.Builder() + NeuralNetConfiguration conf1 = NeuralNetConfiguration.builder() .seed(12345) .weightInit(WeightInit.XAVIER) .updater(new Sgd(2)) @@ -176,19 +170,19 @@ public class FrozenLayerWithBackpropTest extends BaseDL4JTest { MultiLayerNetwork network = new MultiLayerNetwork(conf1); network.init(); - INDArray unfrozenLayerParams = network.getLayer(0).params().dup(); - INDArray frozenLayerParams1 = network.getLayer(1).params().dup(); - INDArray frozenLayerParams2 = network.getLayer(2).params().dup(); - INDArray frozenOutputLayerParams = network.getLayer(3).params().dup(); + INDArray unfrozenLayerParams = network.getLayer(0).getParams().dup(); + INDArray frozenLayerParams1 = network.getLayer(1).getParams().dup(); + INDArray frozenLayerParams2 = network.getLayer(2).getParams().dup(); + INDArray frozenOutputLayerParams = network.getLayer(3).getParams().dup(); for (int i = 0; i < 100; i++) { network.fit(randomData); } - assertNotEquals(unfrozenLayerParams, network.getLayer(0).params()); - assertEquals(frozenLayerParams1, network.getLayer(1).params()); - assertEquals(frozenLayerParams2, network.getLayer(2).params()); - assertEquals(frozenOutputLayerParams, network.getLayer(3).params()); + assertNotEquals(unfrozenLayerParams, network.getLayer(0).getParams()); + assertEquals(frozenLayerParams1, network.getLayer(1).getParams()); + assertEquals(frozenLayerParams2, network.getLayer(2).getParams()); + assertEquals(frozenOutputLayerParams, network.getLayer(3).getParams()); } @@ -212,7 +206,7 @@ public class FrozenLayerWithBackpropTest extends BaseDL4JTest { String unfrozenLayer1 = unfrozenBranchName + "1"; String unfrozenBranch2 = unfrozenBranchName + "Output"; - ComputationGraphConfiguration computationGraphConf = new NeuralNetConfiguration.Builder() + ComputationGraphConfiguration computationGraphConf = NeuralNetConfiguration.builder() .updater(new Sgd(2.0)) .seed(12345) .graphBuilder() @@ -234,19 +228,19 @@ public class FrozenLayerWithBackpropTest extends BaseDL4JTest { ComputationGraph computationGraph = new ComputationGraph(computationGraphConf); computationGraph.init(); - INDArray unfrozenLayerParams = computationGraph.getLayer(frozenBranchUnfrozenLayer0).params().dup(); - INDArray frozenLayerParams1 = computationGraph.getLayer(frozenBranchFrozenLayer1).params().dup(); - INDArray frozenLayerParams2 = computationGraph.getLayer(frozenBranchFrozenLayer2).params().dup(); - INDArray frozenOutputLayerParams = computationGraph.getLayer(frozenBranchOutput).params().dup(); + INDArray unfrozenLayerParams = computationGraph.getLayer(frozenBranchUnfrozenLayer0).getParams().dup(); + INDArray frozenLayerParams1 = computationGraph.getLayer(frozenBranchFrozenLayer1).getParams().dup(); + INDArray frozenLayerParams2 = computationGraph.getLayer(frozenBranchFrozenLayer2).getParams().dup(); + INDArray frozenOutputLayerParams = computationGraph.getLayer(frozenBranchOutput).getParams().dup(); for (int i = 0; i < 100; i++) { computationGraph.fit(randomData); } - assertNotEquals(unfrozenLayerParams, computationGraph.getLayer(frozenBranchUnfrozenLayer0).params()); - assertEquals(frozenLayerParams1, computationGraph.getLayer(frozenBranchFrozenLayer1).params()); - assertEquals(frozenLayerParams2, computationGraph.getLayer(frozenBranchFrozenLayer2).params()); - assertEquals(frozenOutputLayerParams, computationGraph.getLayer(frozenBranchOutput).params()); + assertNotEquals(unfrozenLayerParams, computationGraph.getLayer(frozenBranchUnfrozenLayer0).getParams()); + assertEquals(frozenLayerParams1, computationGraph.getLayer(frozenBranchFrozenLayer1).getParams()); + assertEquals(frozenLayerParams2, computationGraph.getLayer(frozenBranchFrozenLayer2).getParams()); + assertEquals(frozenOutputLayerParams, computationGraph.getLayer(frozenBranchOutput).getParams()); } @@ -258,7 +252,7 @@ public class FrozenLayerWithBackpropTest extends BaseDL4JTest { Nd4j.getRandom().setSeed(12345); DataSet randomData = new DataSet(Nd4j.rand(100, 4), Nd4j.rand(100, 1)); - MultiLayerConfiguration confSgd = new NeuralNetConfiguration.Builder() + NeuralNetConfiguration confSgd = NeuralNetConfiguration.builder() .seed(12345) .weightInit(WeightInit.XAVIER) .updater(new Sgd(2)) @@ -269,7 +263,7 @@ public class FrozenLayerWithBackpropTest extends BaseDL4JTest { .layer(3,new OutputLayer.Builder(LossFunctions.LossFunction.MSE).updater(new Sgd(0.0)).biasUpdater(new Sgd(0.0)).activation(Activation.TANH).nIn(2).nOut(1).build()) .build(); - MultiLayerConfiguration confFrozen = new NeuralNetConfiguration.Builder() + NeuralNetConfiguration confFrozen = NeuralNetConfiguration.builder() .seed(12345) .weightInit(WeightInit.XAVIER) .updater(new Sgd(2)) @@ -281,17 +275,17 @@ public class FrozenLayerWithBackpropTest extends BaseDL4JTest { .build(); MultiLayerNetwork frozenNetwork = new MultiLayerNetwork(confFrozen); frozenNetwork.init(); - INDArray unfrozenLayerParams = frozenNetwork.getLayer(0).params().dup(); - INDArray frozenLayerParams1 = frozenNetwork.getLayer(1).params().dup(); - INDArray frozenLayerParams2 = frozenNetwork.getLayer(2).params().dup(); - INDArray frozenOutputLayerParams = frozenNetwork.getLayer(3).params().dup(); + INDArray unfrozenLayerParams = frozenNetwork.getLayer(0).getParams().dup(); + INDArray frozenLayerParams1 = frozenNetwork.getLayer(1).getParams().dup(); + INDArray frozenLayerParams2 = frozenNetwork.getLayer(2).getParams().dup(); + INDArray frozenOutputLayerParams = frozenNetwork.getLayer(3).getParams().dup(); MultiLayerNetwork sgdNetwork = new MultiLayerNetwork(confSgd); sgdNetwork.init(); - INDArray unfrozenSgdLayerParams = sgdNetwork.getLayer(0).params().dup(); - INDArray frozenSgdLayerParams1 = sgdNetwork.getLayer(1).params().dup(); - INDArray frozenSgdLayerParams2 = sgdNetwork.getLayer(2).params().dup(); - INDArray frozenSgdOutputLayerParams = sgdNetwork.getLayer(3).params().dup(); + INDArray unfrozenSgdLayerParams = sgdNetwork.getLayer(0).getParams().dup(); + INDArray frozenSgdLayerParams1 = sgdNetwork.getLayer(1).getParams().dup(); + INDArray frozenSgdLayerParams2 = sgdNetwork.getLayer(2).getParams().dup(); + INDArray frozenSgdOutputLayerParams = sgdNetwork.getLayer(3).getParams().dup(); for (int i = 0; i < 100; i++) { frozenNetwork.fit(randomData); @@ -300,10 +294,10 @@ public class FrozenLayerWithBackpropTest extends BaseDL4JTest { sgdNetwork.fit(randomData); } - assertEquals(frozenNetwork.getLayer(0).params(), sgdNetwork.getLayer(0).params()); - assertEquals(frozenNetwork.getLayer(1).params(), sgdNetwork.getLayer(1).params()); - assertEquals(frozenNetwork.getLayer(2).params(), sgdNetwork.getLayer(2).params()); - assertEquals(frozenNetwork.getLayer(3).params(), sgdNetwork.getLayer(3).params()); + assertEquals(frozenNetwork.getLayer(0).getParams(), sgdNetwork.getLayer(0).getParams()); + assertEquals(frozenNetwork.getLayer(1).getParams(), sgdNetwork.getLayer(1).getParams()); + assertEquals(frozenNetwork.getLayer(2).getParams(), sgdNetwork.getLayer(2).getParams()); + assertEquals(frozenNetwork.getLayer(3).getParams(), sgdNetwork.getLayer(3).getParams()); } @@ -326,7 +320,7 @@ public class FrozenLayerWithBackpropTest extends BaseDL4JTest { String unfrozenLayer1 = unfrozenBranchName + "1"; String unfrozenBranch2 = unfrozenBranchName + "Output"; - ComputationGraphConfiguration computationGraphConf = new NeuralNetConfiguration.Builder() + ComputationGraphConfiguration computationGraphConf = NeuralNetConfiguration.builder() .updater(new Sgd(2.0)) .seed(12345) .graphBuilder() @@ -347,7 +341,7 @@ public class FrozenLayerWithBackpropTest extends BaseDL4JTest { .setOutputs(frozenBranchOutput) .build(); - ComputationGraphConfiguration computationSgdGraphConf = new NeuralNetConfiguration.Builder() + ComputationGraphConfiguration computationSgdGraphConf = NeuralNetConfiguration.builder() .updater(new Sgd(2.0)) .seed(12345) .graphBuilder() @@ -366,17 +360,17 @@ public class FrozenLayerWithBackpropTest extends BaseDL4JTest { ComputationGraph frozenComputationGraph = new ComputationGraph(computationGraphConf); frozenComputationGraph.init(); - INDArray unfrozenLayerParams = frozenComputationGraph.getLayer(frozenBranchUnfrozenLayer0).params().dup(); - INDArray frozenLayerParams1 = frozenComputationGraph.getLayer(frozenBranchFrozenLayer1).params().dup(); - INDArray frozenLayerParams2 = frozenComputationGraph.getLayer(frozenBranchFrozenLayer2).params().dup(); - INDArray frozenOutputLayerParams = frozenComputationGraph.getLayer(frozenBranchOutput).params().dup(); + INDArray unfrozenLayerParams = frozenComputationGraph.getLayer(frozenBranchUnfrozenLayer0).getParams().dup(); + INDArray frozenLayerParams1 = frozenComputationGraph.getLayer(frozenBranchFrozenLayer1).getParams().dup(); + INDArray frozenLayerParams2 = frozenComputationGraph.getLayer(frozenBranchFrozenLayer2).getParams().dup(); + INDArray frozenOutputLayerParams = frozenComputationGraph.getLayer(frozenBranchOutput).getParams().dup(); ComputationGraph sgdComputationGraph = new ComputationGraph(computationSgdGraphConf); sgdComputationGraph.init(); - INDArray unfrozenSgdLayerParams = sgdComputationGraph.getLayer(frozenBranchUnfrozenLayer0).params().dup(); - INDArray frozenSgdLayerParams1 = sgdComputationGraph.getLayer(frozenBranchFrozenLayer1).params().dup(); - INDArray frozenSgdLayerParams2 = sgdComputationGraph.getLayer(frozenBranchFrozenLayer2).params().dup(); - INDArray frozenSgdOutputLayerParams = sgdComputationGraph.getLayer(frozenBranchOutput).params().dup(); + INDArray unfrozenSgdLayerParams = sgdComputationGraph.getLayer(frozenBranchUnfrozenLayer0).getParams().dup(); + INDArray frozenSgdLayerParams1 = sgdComputationGraph.getLayer(frozenBranchFrozenLayer1).getParams().dup(); + INDArray frozenSgdLayerParams2 = sgdComputationGraph.getLayer(frozenBranchFrozenLayer2).getParams().dup(); + INDArray frozenSgdOutputLayerParams = sgdComputationGraph.getLayer(frozenBranchOutput).getParams().dup(); for (int i = 0; i < 100; i++) { frozenComputationGraph.fit(randomData); @@ -385,10 +379,10 @@ public class FrozenLayerWithBackpropTest extends BaseDL4JTest { sgdComputationGraph.fit(randomData); } - assertEquals(frozenComputationGraph.getLayer(frozenBranchUnfrozenLayer0).params(), sgdComputationGraph.getLayer(frozenBranchUnfrozenLayer0).params()); - assertEquals(frozenComputationGraph.getLayer(frozenBranchFrozenLayer1).params(), sgdComputationGraph.getLayer(frozenBranchFrozenLayer1).params()); - assertEquals(frozenComputationGraph.getLayer(frozenBranchFrozenLayer2).params(), sgdComputationGraph.getLayer(frozenBranchFrozenLayer2).params()); - assertEquals(frozenComputationGraph.getLayer(frozenBranchOutput).params(), sgdComputationGraph.getLayer(frozenBranchOutput).params()); + assertEquals(frozenComputationGraph.getLayer(frozenBranchUnfrozenLayer0).getParams(), sgdComputationGraph.getLayer(frozenBranchUnfrozenLayer0).getParams()); + assertEquals(frozenComputationGraph.getLayer(frozenBranchFrozenLayer1).getParams(), sgdComputationGraph.getLayer(frozenBranchFrozenLayer1).getParams()); + assertEquals(frozenComputationGraph.getLayer(frozenBranchFrozenLayer2).getParams(), sgdComputationGraph.getLayer(frozenBranchFrozenLayer2).getParams()); + assertEquals(frozenComputationGraph.getLayer(frozenBranchOutput).getParams(), sgdComputationGraph.getLayer(frozenBranchOutput).getParams()); } diff --git a/cavis-dnn/cavis-dnn-core/src/test/java/org/deeplearning4j/nn/layers/OutputLayerTest.java b/cavis-dnn/cavis-dnn-core/src/test/java/org/deeplearning4j/nn/layers/OutputLayerTest.java index 232a9a46e..6e2132d92 100644 --- a/cavis-dnn/cavis-dnn-core/src/test/java/org/deeplearning4j/nn/layers/OutputLayerTest.java +++ b/cavis-dnn/cavis-dnn-core/src/test/java/org/deeplearning4j/nn/layers/OutputLayerTest.java @@ -34,7 +34,6 @@ import org.deeplearning4j.nn.layers.recurrent.RnnOutputLayer; import org.deeplearning4j.nn.multilayer.MultiLayerNetwork; import org.deeplearning4j.nn.weights.WeightInit; import org.deeplearning4j.nn.workspace.LayerWorkspaceMgr; -import org.deeplearning4j.optimize.api.TrainingListener; import org.deeplearning4j.optimize.listeners.ScoreIterationListener; import org.junit.jupiter.api.Test; import org.nd4j.linalg.activations.Activation; @@ -57,7 +56,7 @@ public class OutputLayerTest extends BaseDL4JTest { @Test public void testSetParams() { - NeuralNetConfiguration conf = new NeuralNetConfiguration.Builder() + NeuralNetConfiguration conf = NeuralNetConfiguration.builder() .optimizationAlgo(OptimizationAlgorithm.LINE_GRADIENT_DESCENT) .updater(new Sgd(1e-1)) .layer(new org.deeplearning4j.nn.conf.layers.OutputLayer.Builder().nIn(4).nOut(3) @@ -65,13 +64,13 @@ public class OutputLayerTest extends BaseDL4JTest { .lossFunction(LossFunctions.LossFunction.MCXENT).build()) .build(); - long numParams = conf.getLayer().initializer().numParams(conf); + long numParams = conf.getFlattenedLayerConfigurations().get(0).initializer().numParams(conf); INDArray params = Nd4j.create(1, numParams); - OutputLayer l = (OutputLayer) conf.getLayer().instantiate(conf, + OutputLayer l = (OutputLayer) conf.getFlattenedLayerConfigurations().get(0).instantiate(conf, Collections.singletonList(new ScoreIterationListener(1)), 0, params, true, params.dataType()); - params = l.params(); - l.setParams(params); - assertEquals(params, l.params()); + params = l.getModelParams(); + l.setParamsTable(params); + assertEquals(params, l.getModelParams()); } @Test @@ -94,7 +93,7 @@ public class OutputLayerTest extends BaseDL4JTest { } } - MultiLayerConfiguration conf = new NeuralNetConfiguration.Builder().seed(12345L).list() + NeuralNetConfiguration conf = NeuralNetConfiguration.builder().seed(12345L).list() .layer(0, new GravesLSTM.Builder().nIn(nIn).nOut(layerSize) .dist(new NormalDistribution(0, 1)).activation(Activation.TANH) .updater(new NoOp()).build()) @@ -118,7 +117,7 @@ public class OutputLayerTest extends BaseDL4JTest { //As above, but for RnnOutputLayer. Expect all activations etc. to be 3d - MultiLayerConfiguration confRnn = new NeuralNetConfiguration.Builder().seed(12345L).list() + NeuralNetConfiguration confRnn = NeuralNetConfiguration.builder().seed(12345L).list() .layer(0, new GravesLSTM.Builder().nIn(nIn).nOut(layerSize) .dist(new NormalDistribution(0, 1)).activation(Activation.TANH) .updater(new NoOp()).build()) @@ -175,7 +174,7 @@ public class OutputLayerTest extends BaseDL4JTest { } INDArray labels2d = proc.backprop(labels3d, miniBatchSize, LayerWorkspaceMgr.noWorkspaces()); - MultiLayerConfiguration conf = new NeuralNetConfiguration.Builder().seed(12345L).list() + NeuralNetConfiguration conf = NeuralNetConfiguration.builder().seed(12345L).list() .layer(0, new GravesLSTM.Builder().nIn(nIn).nOut(layerSize) .dist(new NormalDistribution(0, 1)) .activation(Activation.TANH).updater(new NoOp()).build()) @@ -192,7 +191,7 @@ public class OutputLayerTest extends BaseDL4JTest { INDArray out2d = mln.feedForward(input).get(2); INDArray out3d = proc.preProcess(out2d, miniBatchSize, LayerWorkspaceMgr.noWorkspaces()); - MultiLayerConfiguration confRnn = new NeuralNetConfiguration.Builder().seed(12345L).list() + NeuralNetConfiguration confRnn = NeuralNetConfiguration.builder().seed(12345L).list() .layer(0, new GravesLSTM.Builder().nIn(nIn).nOut(layerSize) .dist(new NormalDistribution(0, 1)) .activation(Activation.TANH).updater(new NoOp()).build()) @@ -218,8 +217,8 @@ public class OutputLayerTest extends BaseDL4JTest { //However: OutputLayer version has miniBatch*timeSeriesLength "examples" (after reshaping) //RnnOutputLayer has miniBatch examples //Hence: expect difference in scores by factor of timeSeriesLength - double score = mln.score() * timeSeriesLength; - double scoreRNN = mlnRnn.score(); + double score = mln.getScore() * timeSeriesLength; + double scoreRNN = mlnRnn.getScore(); assertFalse(Double.isNaN(score)); assertFalse(Double.isNaN(scoreRNN)); @@ -235,7 +234,7 @@ public class OutputLayerTest extends BaseDL4JTest { RnnOutputLayer rnnol = (RnnOutputLayer) mlnRnn.getOutputLayer(); //assertArrayEquals(rnnol.getInput().shape(),new int[]{miniBatchSize,layerSize,timeSeriesLength}); - //Input may be set by BaseLayer methods. Thus input may end up as reshaped 2d version instead of original 3d version. + //Input may be set by BaseLayerConfiguration methods. Thus input may end up as reshaped 2d version instead of original 3d version. //Not ideal, but everything else works. assertArrayEquals(rnnol.getLabels().shape(), new long[] {miniBatchSize, nOut, timeSeriesLength}); @@ -271,8 +270,8 @@ public class OutputLayerTest extends BaseDL4JTest { int nOut = 6; int miniBatchSize = 3; - MultiLayerConfiguration conf1 = - new NeuralNetConfiguration.Builder().seed(12345L) + NeuralNetConfiguration conf1 = + NeuralNetConfiguration.builder().seed(12345L) .updater(new NoOp()) .list() .layer(new LSTM.Builder().nIn(nIn).nOut(layerSize).activation(Activation.TANH) @@ -288,8 +287,8 @@ public class OutputLayerTest extends BaseDL4JTest { mln.init(); - MultiLayerConfiguration conf2 = - new NeuralNetConfiguration.Builder().seed(12345L) + NeuralNetConfiguration conf2 = + NeuralNetConfiguration.builder().seed(12345L) .updater(new NoOp()) .list() .layer(new LSTM.Builder().nIn(nIn).nOut(layerSize).activation(Activation.TANH) @@ -304,7 +303,7 @@ public class OutputLayerTest extends BaseDL4JTest { MultiLayerNetwork mln2 = new MultiLayerNetwork(conf2); mln2.init(); - mln2.setParams(mln.params()); + mln2.setParams(mln.getModelParams()); INDArray in = Nd4j.rand(miniBatchSize, nIn, timeSeriesLength); @@ -331,7 +330,7 @@ public class OutputLayerTest extends BaseDL4JTest { mln2.computeGradientAndScore(); assertEquals(mln.gradient().gradient(), mln2.gradient().gradient()); - assertEquals(mln.score(), mln2.score(), 1e-6); + assertEquals(mln.getScore(), mln2.getScore(), 1e-6); TestUtils.testModelSerialization(mln); } @@ -348,8 +347,8 @@ public class OutputLayerTest extends BaseDL4JTest { //Check that (A+identity) is equal to (identity+A), for activation A //i.e., should get same output and weight gradients for both - MultiLayerConfiguration conf1 = - new NeuralNetConfiguration.Builder().seed(12345L) + NeuralNetConfiguration conf1 = + NeuralNetConfiguration.builder().seed(12345L) .updater(new NoOp()) .convolutionMode(ConvolutionMode.Same) .inferenceWorkspaceMode(ws) @@ -364,8 +363,8 @@ public class OutputLayerTest extends BaseDL4JTest { .build()) .build(); - MultiLayerConfiguration conf2 = - new NeuralNetConfiguration.Builder().seed(12345L) + NeuralNetConfiguration conf2 = + NeuralNetConfiguration.builder().seed(12345L) .updater(new NoOp()) .convolutionMode(ConvolutionMode.Same) .inferenceWorkspaceMode(ws) @@ -387,7 +386,7 @@ public class OutputLayerTest extends BaseDL4JTest { mln2.init(); - mln2.setParams(mln.params()); + mln2.setParams(mln.getModelParams()); INDArray in = Nd4j.rand(3, 3, 5, 5); @@ -408,7 +407,7 @@ public class OutputLayerTest extends BaseDL4JTest { mln.computeGradientAndScore(); mln2.computeGradientAndScore(); - assertEquals(mln.score(), mln2.score(), 1e-6); + assertEquals(mln.getScore(), mln2.getScore(), 1e-6); assertEquals(mln.gradient().gradient(), mln2.gradient().gradient()); //Also check computeScoreForExamples @@ -438,7 +437,7 @@ public class OutputLayerTest extends BaseDL4JTest { //i.e., should get same output and weight gradients for both ComputationGraphConfiguration conf1 = - new NeuralNetConfiguration.Builder().seed(12345L) + NeuralNetConfiguration.builder().seed(12345L) .updater(new NoOp()) .convolutionMode(ConvolutionMode.Same) .inferenceWorkspaceMode(ws) @@ -456,7 +455,7 @@ public class OutputLayerTest extends BaseDL4JTest { .build(); ComputationGraphConfiguration conf2 = - new NeuralNetConfiguration.Builder().seed(12345L) + NeuralNetConfiguration.builder().seed(12345L) .updater(new NoOp()) .convolutionMode(ConvolutionMode.Same) .inferenceWorkspaceMode(ws) @@ -480,7 +479,7 @@ public class OutputLayerTest extends BaseDL4JTest { graph2.init(); - graph2.setParams(graph.params()); + graph2.setParams(graph.getModelParams()); INDArray in = Nd4j.rand(3, 3, 5, 5); @@ -501,7 +500,7 @@ public class OutputLayerTest extends BaseDL4JTest { graph.computeGradientAndScore(); graph2.computeGradientAndScore(); - assertEquals(graph.score(), graph2.score(), 1e-6); + assertEquals(graph.getScore(), graph2.getScore(), 1e-6); assertEquals(graph.gradient().gradient(), graph2.gradient().gradient()); //Also check computeScoreForExamples @@ -524,8 +523,8 @@ public class OutputLayerTest extends BaseDL4JTest { public void testCnnOutputLayerSoftmax(){ //Check that softmax is applied channels-wise - MultiLayerConfiguration conf = - new NeuralNetConfiguration.Builder().seed(12345L) + NeuralNetConfiguration conf = + NeuralNetConfiguration.builder().seed(12345L) .updater(new NoOp()) .convolutionMode(ConvolutionMode.Same) .list() @@ -555,19 +554,19 @@ public class OutputLayerTest extends BaseDL4JTest { @Test public void testOutputLayerDefaults(){ - new NeuralNetConfiguration.Builder().list() + NeuralNetConfiguration.builder().list() .layer(new org.deeplearning4j.nn.conf.layers.OutputLayer.Builder().nIn(10).nOut(10).build()) .build(); - new NeuralNetConfiguration.Builder().list() + NeuralNetConfiguration.builder().list() .layer(new org.deeplearning4j.nn.conf.layers.LossLayer.Builder().build()) .build(); - new NeuralNetConfiguration.Builder().list() + NeuralNetConfiguration.builder().list() .layer(new org.deeplearning4j.nn.conf.layers.CnnLossLayer.Builder().build()) .build(); - new NeuralNetConfiguration.Builder().list() + NeuralNetConfiguration.builder().list() .layer(new org.deeplearning4j.nn.conf.layers.CenterLossOutputLayer.Builder().build()) .build(); diff --git a/cavis-dnn/cavis-dnn-core/src/test/java/org/deeplearning4j/nn/layers/RepeatVectorTest.java b/cavis-dnn/cavis-dnn-core/src/test/java/org/deeplearning4j/nn/layers/RepeatVectorTest.java index 3e526e774..a62ccdcf0 100644 --- a/cavis-dnn/cavis-dnn-core/src/test/java/org/deeplearning4j/nn/layers/RepeatVectorTest.java +++ b/cavis-dnn/cavis-dnn-core/src/test/java/org/deeplearning4j/nn/layers/RepeatVectorTest.java @@ -32,8 +32,6 @@ import org.nd4j.linalg.api.ndarray.INDArray; import org.nd4j.linalg.factory.Nd4j; import org.nd4j.common.primitives.Pair; -import java.util.Arrays; - import static org.junit.jupiter.api.Assertions.*; public class RepeatVectorTest extends BaseDL4JTest { @@ -42,10 +40,10 @@ public class RepeatVectorTest extends BaseDL4JTest { private Layer getRepeatVectorLayer() { - NeuralNetConfiguration conf = new NeuralNetConfiguration.Builder().seed(123) + NeuralNetConfiguration conf = NeuralNetConfiguration.builder().seed(123) .dataType(DataType.DOUBLE) .layer(new RepeatVector.Builder(REPEAT).build()).build(); - return conf.getLayer().instantiate(conf, null, 0, + return conf.getFlattenedLayerConfigurations().get(0).instantiate(conf, null, 0, null, false, DataType.DOUBLE); } diff --git a/cavis-dnn/cavis-dnn-core/src/test/java/org/deeplearning4j/nn/layers/SeedTest.java b/cavis-dnn/cavis-dnn-core/src/test/java/org/deeplearning4j/nn/layers/SeedTest.java index 4d46d5066..f1e64f204 100644 --- a/cavis-dnn/cavis-dnn-core/src/test/java/org/deeplearning4j/nn/layers/SeedTest.java +++ b/cavis-dnn/cavis-dnn-core/src/test/java/org/deeplearning4j/nn/layers/SeedTest.java @@ -50,22 +50,22 @@ public class SeedTest extends BaseDL4JTest { .activation(Activation.SIGMOID).build(); NeuralNetConfiguration conf = - new NeuralNetConfiguration.Builder().layer(layerType).seed(123).build(); + NeuralNetConfiguration.builder().layer(layerType).seed(123).build(); - long numParams = conf.getLayer().initializer().numParams(conf); + long numParams = conf.getFlattenedLayerConfigurations().get(0).initializer().numParams(conf); INDArray params = Nd4j.create(1, numParams); - Layer layer = conf.getLayer().instantiate(conf, null, 0, params, true, params.dataType()); + Layer layer = conf.getFlattenedLayerConfigurations().get(0).instantiate(conf, null, 0, params, true, params.dataType()); layer.setBackpropGradientsViewArray(Nd4j.create(1, numParams)); layer.fit(data.getFeatures(), LayerWorkspaceMgr.noWorkspaces()); layer.computeGradientAndScore(LayerWorkspaceMgr.noWorkspaces()); - double score = layer.score(); - INDArray parameters = layer.params(); + double score = layer.getScore(); + INDArray parameters = layer.getParams(); layer.setParams(parameters); layer.computeGradientAndScore(LayerWorkspaceMgr.noWorkspaces()); - double score2 = layer.score(); - assertEquals(parameters, layer.params()); + double score2 = layer.getScore(); + assertEquals(parameters, layer.getParams()); assertEquals(score, score2, 1e-4); } } diff --git a/cavis-dnn/cavis-dnn-core/src/test/java/org/deeplearning4j/nn/layers/TestDropout.java b/cavis-dnn/cavis-dnn-core/src/test/java/org/deeplearning4j/nn/layers/TestDropout.java index 67f66fb21..e17653219 100644 --- a/cavis-dnn/cavis-dnn-core/src/test/java/org/deeplearning4j/nn/layers/TestDropout.java +++ b/cavis-dnn/cavis-dnn-core/src/test/java/org/deeplearning4j/nn/layers/TestDropout.java @@ -20,26 +20,20 @@ package org.deeplearning4j.nn.layers; -import lombok.val; import org.deeplearning4j.BaseDL4JTest; -import org.deeplearning4j.nn.conf.MultiLayerConfiguration; import org.deeplearning4j.nn.conf.NeuralNetConfiguration; -import org.deeplearning4j.nn.conf.distribution.UniformDistribution; -import org.deeplearning4j.nn.conf.layers.DenseLayer; import org.deeplearning4j.nn.conf.layers.OutputLayer; import org.deeplearning4j.nn.multilayer.MultiLayerNetwork; import org.deeplearning4j.nn.weights.WeightInit; import org.junit.jupiter.api.Test; import org.nd4j.linalg.activations.Activation; -import org.nd4j.linalg.api.iter.NdIndexIterator; import org.nd4j.linalg.api.ndarray.INDArray; import org.nd4j.linalg.dataset.DataSet; import org.nd4j.linalg.factory.Nd4j; import org.nd4j.linalg.learning.config.Sgd; import org.nd4j.linalg.lossfunctions.LossFunctions; -import java.lang.reflect.Field; import java.util.List; import static org.junit.jupiter.api.Assertions.assertEquals; @@ -50,12 +44,12 @@ public class TestDropout extends BaseDL4JTest { @Test public void testDropoutSimple() throws Exception { //Testing dropout with a single layer - //Layer input: values should be set to either 0.0 or 2.0x original value + //ILayer input: values should be set to either 0.0 or 2.0x original value int nIn = 8; int nOut = 8; - MultiLayerConfiguration conf = new NeuralNetConfiguration.Builder() + NeuralNetConfiguration conf = NeuralNetConfiguration.builder() .updater(new Sgd()) .dropOut(0.5).list() .layer(0, new OutputLayer.Builder().activation(Activation.IDENTITY) diff --git a/cavis-dnn/cavis-dnn-core/src/test/java/org/deeplearning4j/nn/layers/capsule/CapsNetMNISTTest.java b/cavis-dnn/cavis-dnn-core/src/test/java/org/deeplearning4j/nn/layers/capsule/CapsNetMNISTTest.java index 18c285baf..6b307a68c 100644 --- a/cavis-dnn/cavis-dnn-core/src/test/java/org/deeplearning4j/nn/layers/capsule/CapsNetMNISTTest.java +++ b/cavis-dnn/cavis-dnn-core/src/test/java/org/deeplearning4j/nn/layers/capsule/CapsNetMNISTTest.java @@ -25,7 +25,6 @@ import static org.junit.jupiter.api.Assertions.assertTrue; import java.io.IOException; import org.deeplearning4j.BaseDL4JTest; import org.deeplearning4j.datasets.iterator.impl.MnistDataSetIterator; -import org.deeplearning4j.nn.conf.MultiLayerConfiguration; import org.deeplearning4j.nn.conf.NeuralNetConfiguration; import org.deeplearning4j.nn.conf.inputs.InputType; import org.deeplearning4j.nn.conf.layers.ActivationLayer; @@ -55,7 +54,7 @@ public class CapsNetMNISTTest extends BaseDL4JTest { @Test public void testCapsNetOnMNIST(){ - MultiLayerConfiguration conf = new NeuralNetConfiguration.Builder() + NeuralNetConfiguration conf = NeuralNetConfiguration.builder() .seed(123) .updater(new Adam()) .list() @@ -72,7 +71,7 @@ public class CapsNetMNISTTest extends BaseDL4JTest { .layer(new CapsuleStrengthLayer.Builder().build()) .layer(new ActivationLayer.Builder(new ActivationSoftmax()).build()) .layer(new LossLayer.Builder(new LossNegativeLogLikelihood()).build()) - .setInputType(InputType.convolutionalFlat(28, 28, 1)) + .inputType(InputType.convolutionalFlat(28, 28, 1)) .build(); MultiLayerNetwork model = new MultiLayerNetwork(conf); diff --git a/cavis-dnn/cavis-dnn-core/src/test/java/org/deeplearning4j/nn/layers/capsule/CapsuleLayerTest.java b/cavis-dnn/cavis-dnn-core/src/test/java/org/deeplearning4j/nn/layers/capsule/CapsuleLayerTest.java index 70e503c42..4536b915b 100644 --- a/cavis-dnn/cavis-dnn-core/src/test/java/org/deeplearning4j/nn/layers/capsule/CapsuleLayerTest.java +++ b/cavis-dnn/cavis-dnn-core/src/test/java/org/deeplearning4j/nn/layers/capsule/CapsuleLayerTest.java @@ -26,7 +26,6 @@ import static org.junit.jupiter.api.Assertions.assertFalse; import static org.junit.jupiter.api.Assertions.assertTrue; import org.deeplearning4j.BaseDL4JTest; -import org.deeplearning4j.nn.conf.MultiLayerConfiguration; import org.deeplearning4j.nn.conf.NeuralNetConfiguration; import org.deeplearning4j.nn.conf.inputs.InputType; import org.deeplearning4j.nn.conf.layers.CapsuleLayer; @@ -81,11 +80,11 @@ public class CapsuleLayerTest extends BaseDL4JTest { @Test public void testLayer(){ - MultiLayerConfiguration conf = new NeuralNetConfiguration.Builder() + NeuralNetConfiguration conf = NeuralNetConfiguration.builder() .seed(123) .list() .layer(new CapsuleLayer.Builder(10, 16, 3).build()) - .setInputType(InputType.recurrent(10, 8)) + .inputType(InputType.recurrent(10, 8)) .build(); MultiLayerNetwork model = new MultiLayerNetwork(conf); diff --git a/cavis-dnn/cavis-dnn-core/src/test/java/org/deeplearning4j/nn/layers/capsule/CapsuleStrengthLayerTest.java b/cavis-dnn/cavis-dnn-core/src/test/java/org/deeplearning4j/nn/layers/capsule/CapsuleStrengthLayerTest.java index fac472d68..388d380dc 100644 --- a/cavis-dnn/cavis-dnn-core/src/test/java/org/deeplearning4j/nn/layers/capsule/CapsuleStrengthLayerTest.java +++ b/cavis-dnn/cavis-dnn-core/src/test/java/org/deeplearning4j/nn/layers/capsule/CapsuleStrengthLayerTest.java @@ -24,7 +24,6 @@ import static org.junit.jupiter.api.Assertions.assertArrayEquals; import static org.junit.jupiter.api.Assertions.assertEquals; import org.deeplearning4j.BaseDL4JTest; -import org.deeplearning4j.nn.conf.MultiLayerConfiguration; import org.deeplearning4j.nn.conf.NeuralNetConfiguration; import org.deeplearning4j.nn.conf.inputs.InputType; import org.deeplearning4j.nn.conf.layers.CapsuleStrengthLayer; @@ -52,11 +51,11 @@ public class CapsuleStrengthLayerTest extends BaseDL4JTest { @Test public void testLayer(){ - MultiLayerConfiguration conf = new NeuralNetConfiguration.Builder() + NeuralNetConfiguration conf = NeuralNetConfiguration.builder() .seed(123) .list() .layer(new CapsuleStrengthLayer.Builder().build()) - .setInputType(InputType.recurrent(5, 8)) + .inputType(InputType.recurrent(5, 8)) .build(); MultiLayerNetwork model = new MultiLayerNetwork(conf); diff --git a/cavis-dnn/cavis-dnn-core/src/test/java/org/deeplearning4j/nn/layers/capsule/PrimaryCapsulesTest.java b/cavis-dnn/cavis-dnn-core/src/test/java/org/deeplearning4j/nn/layers/capsule/PrimaryCapsulesTest.java index 5840ec85f..12f63e7ec 100644 --- a/cavis-dnn/cavis-dnn-core/src/test/java/org/deeplearning4j/nn/layers/capsule/PrimaryCapsulesTest.java +++ b/cavis-dnn/cavis-dnn-core/src/test/java/org/deeplearning4j/nn/layers/capsule/PrimaryCapsulesTest.java @@ -26,7 +26,6 @@ import static org.junit.jupiter.api.Assertions.assertFalse; import static org.junit.jupiter.api.Assertions.assertTrue; import org.deeplearning4j.BaseDL4JTest; -import org.deeplearning4j.nn.conf.MultiLayerConfiguration; import org.deeplearning4j.nn.conf.NeuralNetConfiguration; import org.deeplearning4j.nn.conf.inputs.InputType; import org.deeplearning4j.nn.conf.layers.PrimaryCapsules; @@ -106,7 +105,7 @@ public class PrimaryCapsulesTest extends BaseDL4JTest { @Test public void testLayer(){ - MultiLayerConfiguration conf = new NeuralNetConfiguration.Builder() + NeuralNetConfiguration conf = NeuralNetConfiguration.builder() .seed(123) .list() .layer(new PrimaryCapsules.Builder(8, 10) @@ -114,7 +113,7 @@ public class PrimaryCapsulesTest extends BaseDL4JTest { .stride(4, 4) .useLeakyReLU(0.5) .build()) - .setInputType(InputType.convolutional(20, 20, 20)) + .inputType(InputType.convolutional(20, 20, 20)) .build(); MultiLayerNetwork model = new MultiLayerNetwork(conf); diff --git a/cavis-dnn/cavis-dnn-core/src/test/java/org/deeplearning4j/nn/layers/convolution/ConvDataFormatTests.java b/cavis-dnn/cavis-dnn-core/src/test/java/org/deeplearning4j/nn/layers/convolution/ConvDataFormatTests.java index 7c07bfeb2..6f24ff226 100644 --- a/cavis-dnn/cavis-dnn-core/src/test/java/org/deeplearning4j/nn/layers/convolution/ConvDataFormatTests.java +++ b/cavis-dnn/cavis-dnn-core/src/test/java/org/deeplearning4j/nn/layers/convolution/ConvDataFormatTests.java @@ -758,8 +758,8 @@ public class ConvDataFormatTests extends BaseDL4JTest { } } - private MultiLayerNetwork getNetWithLayer(Layer layer, CNN2DFormat format, ConvolutionMode cm, InputType inputType) { - NeuralNetConfiguration.ListBuilder builder = new NeuralNetConfiguration.Builder() + private MultiLayerNetwork getNetWithLayer(LayerConfiguration layer, CNN2DFormat format, ConvolutionMode cm, InputType inputType) { + NeuralNetConfiguration.NeuralNetConfigurationBuilder builder = (NeuralNetConfiguration.NeuralNetConfigurationBuilder) NeuralNetConfiguration.builder() .dataType(this.dataType) .seed(12345) .convolutionMode(cm) @@ -774,7 +774,7 @@ public class ConvDataFormatTests extends BaseDL4JTest { .layer(layer) .layer(new OutputLayer.Builder().nOut(10) .activation(Activation.SOFTMAX).build()) - .setInputType(inputType != null ? inputType : InputType.convolutional(12, 12, 3, format)); + .inputType(inputType != null ? inputType : InputType.convolutional(12, 12, 3, format)); if(format == CNN2DFormat.NHWC && !(layer instanceof GlobalPoolingLayer)){ //Add a preprocessor due to the differences in how NHWC and NCHW activations are flattened @@ -799,7 +799,7 @@ public class ConvDataFormatTests extends BaseDL4JTest { } private MultiLayerNetwork getCnnLossNet(CNN2DFormat format, boolean setOnLayerAlso, ConvolutionMode cm){ - NeuralNetConfiguration.ListBuilder builder = new NeuralNetConfiguration.Builder() + NeuralNetConfiguration.NeuralNetConfigurationBuilder builder = (NeuralNetConfiguration.NeuralNetConfigurationBuilder) NeuralNetConfiguration.builder() .seed(12345) .convolutionMode(cm) .list() @@ -819,7 +819,7 @@ public class ConvDataFormatTests extends BaseDL4JTest { .activation(Activation.SOFTMAX).build()); } - builder.setInputType(InputType.convolutional(12, 12, 3, format)); + builder.inputType(InputType.convolutional(12, 12, 3, format)); MultiLayerNetwork net = new MultiLayerNetwork(builder.build()); net.init(); @@ -845,9 +845,9 @@ public class ConvDataFormatTests extends BaseDL4JTest { public static void testHelper(TestCase tc) { - tc.net2.params().assign(tc.net1.params()); - tc.net3.params().assign(tc.net1.params()); - tc.net4.params().assign(tc.net1.params()); + tc.net2.getModelParams().assign(tc.net1.getModelParams()); + tc.net3.getModelParams().assign(tc.net1.getModelParams()); + tc.net4.getModelParams().assign(tc.net1.getModelParams()); //Test forward pass: INDArray inNCHW = tc.inNCHW; @@ -909,9 +909,9 @@ public class ConvDataFormatTests extends BaseDL4JTest { tc.net3.fit(inNHWC, tc.labelsNHWC); tc.net4.fit(inNHWC, tc.labelsNHWC); - assertEquals(tc.net1.params(), tc.net2.params(), tc.msg); - assertEquals(tc.net1.params(), tc.net3.params(), tc.msg); - assertEquals(tc.net1.params(), tc.net4.params(), tc.msg); + assertEquals(tc.net1.getModelParams(), tc.net2.getModelParams(), tc.msg); + assertEquals(tc.net1.getModelParams(), tc.net3.getModelParams(), tc.msg); + assertEquals(tc.net1.getModelParams(), tc.net4.getModelParams(), tc.msg); //Test serialization MultiLayerNetwork net1a = TestUtils.testModelSerialization(tc.net1); @@ -984,24 +984,24 @@ public class ConvDataFormatTests extends BaseDL4JTest { for(CNN2DFormat df : CNN2DFormat.values()) { for(int i = 0; i < 4; i++) { - NeuralNetConfiguration.ListBuilder b = new NeuralNetConfiguration.Builder() - .list(); + NeuralNetConfiguration.NeuralNetConfigurationBuilder b = NeuralNetConfiguration.builder(); + switch (i){ case 0: b.layer(new ConvolutionLayer.Builder().kernelSize(2,2).nIn(3).nOut(3).dataFormat(df).build()); - b.setInputType(InputType.convolutional(12,12,3,df)); + b.inputType(InputType.convolutional(12,12,3,df)); break; case 1: b.layer(new DepthwiseConvolution2D.Builder().kernelSize(2,2).nIn(3).nOut(3).dataFormat(df).build()); - b.setInputType(InputType.convolutional(12,12,3,df)); + b.inputType(InputType.convolutional(12,12,3,df)); break; case 2: b.layer(new Deconvolution2D.Builder().dataFormat(df).kernelSize(2,2).nIn(3).nOut(3).build()); - b.setInputType(InputType.convolutional(12,12,3,df)); + b.inputType(InputType.convolutional(12,12,3,df)); break; case 3: b.layer(new SeparableConvolution2D.Builder().dataFormat(df).kernelSize(2,2).nIn(3).nOut(3).build()); - b.setInputType(InputType.convolutional(12,12,3,df)); + b.inputType(InputType.convolutional(12,12,3,df)); break; } diff --git a/cavis-dnn/cavis-dnn-core/src/test/java/org/deeplearning4j/nn/layers/convolution/Convolution3DTest.java b/cavis-dnn/cavis-dnn-core/src/test/java/org/deeplearning4j/nn/layers/convolution/Convolution3DTest.java index d282690bb..c8137f4a6 100644 --- a/cavis-dnn/cavis-dnn-core/src/test/java/org/deeplearning4j/nn/layers/convolution/Convolution3DTest.java +++ b/cavis-dnn/cavis-dnn-core/src/test/java/org/deeplearning4j/nn/layers/convolution/Convolution3DTest.java @@ -34,8 +34,6 @@ import org.nd4j.linalg.dataset.DataSet; import org.nd4j.linalg.dataset.api.iterator.DataSetIterator; import org.nd4j.linalg.factory.Nd4j; -import java.util.Arrays; - import static org.junit.jupiter.api.Assertions.*; /** @@ -86,15 +84,15 @@ public class Convolution3DTest extends BaseDL4JTest { } private Layer getConvolution3DLayer(ConvolutionMode mode) { - NeuralNetConfiguration conf = new NeuralNetConfiguration.Builder() + NeuralNetConfiguration conf = NeuralNetConfiguration.builder() .gradientNormalization(GradientNormalization.RenormalizeL2PerLayer).seed(123) .layer(new Convolution3D.Builder().kernelSize(kernelSize).nIn(nChannelsIn).nOut(nChannelsOut) .dataFormat(Convolution3D.DataFormat.NCDHW).convolutionMode(mode).hasBias(false) .build()) .build(); - long numParams = conf.getLayer().initializer().numParams(conf); + long numParams = conf.getFlattenedLayerConfigurations().get(0).initializer().numParams(conf); INDArray params = Nd4j.ones(1, numParams); - return conf.getLayer().instantiate(conf, null, 0, params, true, params.dataType()); + return conf.getFlattenedLayerConfigurations().get(0).instantiate(conf, null, 0, params, true, params.dataType()); } public INDArray getData() throws Exception { diff --git a/cavis-dnn/cavis-dnn-core/src/test/java/org/deeplearning4j/nn/layers/convolution/ConvolutionLayerSetupTest.java b/cavis-dnn/cavis-dnn-core/src/test/java/org/deeplearning4j/nn/layers/convolution/ConvolutionLayerSetupTest.java index 246dfee5b..1af476e5e 100644 --- a/cavis-dnn/cavis-dnn-core/src/test/java/org/deeplearning4j/nn/layers/convolution/ConvolutionLayerSetupTest.java +++ b/cavis-dnn/cavis-dnn-core/src/test/java/org/deeplearning4j/nn/layers/convolution/ConvolutionLayerSetupTest.java @@ -27,8 +27,8 @@ import org.deeplearning4j.BaseDL4JTest; import org.deeplearning4j.datasets.datavec.RecordReaderDataSetIterator; import org.deeplearning4j.datasets.iterator.impl.MnistDataSetIterator; import org.deeplearning4j.nn.api.OptimizationAlgorithm; -import org.deeplearning4j.nn.conf.MultiLayerConfiguration; import org.deeplearning4j.nn.conf.NeuralNetConfiguration; +import org.deeplearning4j.nn.conf.NeuralNetConfiguration.NeuralNetConfigurationBuilder; import org.deeplearning4j.nn.conf.inputs.InputType; import org.deeplearning4j.nn.conf.layers.*; import org.deeplearning4j.nn.conf.layers.ConvolutionLayer; @@ -71,10 +71,10 @@ public class ConvolutionLayerSetupTest extends BaseDL4JTest { @Test public void testConvolutionLayerSetup() { - MultiLayerConfiguration.Builder builder = inComplete(); - builder.setInputType(InputType.convolutionalFlat(28, 28, 1)); - MultiLayerConfiguration completed = complete().build(); - MultiLayerConfiguration test = builder.build(); + NeuralNetConfiguration.NeuralNetConfigurationBuilder builder = inComplete(); + builder.inputType(InputType.convolutionalFlat(28, 28, 1)); + NeuralNetConfiguration completed = complete().build(); + NeuralNetConfiguration test = builder.build(); assertEquals(completed, test); } @@ -90,7 +90,7 @@ public class ConvolutionLayerSetupTest extends BaseDL4JTest { int seed = 123; //setup the network - MultiLayerConfiguration.Builder builder = new NeuralNetConfiguration.Builder().seed(seed) + NeuralNetConfiguration.NeuralNetConfigurationBuilder builder = NeuralNetConfiguration.builder().seed(seed) .l1(1e-1).l2(2e-4).dropOut(0.5).miniBatch(true) .optimizationAlgo(OptimizationAlgorithm.CONJUGATE_GRADIENT).list() .layer(0, new ConvolutionLayer.Builder(5, 5).nOut(5).dropOut(0.5).weightInit(WeightInit.XAVIER) @@ -106,7 +106,7 @@ public class ConvolutionLayerSetupTest extends BaseDL4JTest { .nOut(outputNum).weightInit(WeightInit.XAVIER).activation(Activation.SOFTMAX) .build()) - .setInputType(InputType.convolutional(numRows, numColumns, nChannels)); + .inputType(InputType.convolutional(numRows, numColumns, nChannels)); DataSet d = new DataSet(Nd4j.rand(10, nChannels, numRows, numColumns), FeatureUtil.toOutcomeMatrix(new int[] {1, 1, 1, 1, 1, 1, 1, 1, 1, 1}, 6)); @@ -119,10 +119,10 @@ public class ConvolutionLayerSetupTest extends BaseDL4JTest { @Test public void testMnistLenet() throws Exception { - MultiLayerConfiguration.Builder incomplete = incompleteMnistLenet(); - incomplete.setInputType(InputType.convolutionalFlat(28, 28, 1)); + NeuralNetConfiguration.NeuralNetConfigurationBuilder incomplete = incompleteMnistLenet(); + incomplete.inputType(InputType.convolutionalFlat(28, 28, 1)); - MultiLayerConfiguration testConf = incomplete.build(); + NeuralNetConfiguration testConf = incomplete.build(); assertEquals(800, ((FeedForwardLayer) testConf.getConf(4).getLayer()).getNIn()); assertEquals(500, ((FeedForwardLayer) testConf.getConf(5).getLayer()).getNIn()); @@ -141,9 +141,9 @@ public class ConvolutionLayerSetupTest extends BaseDL4JTest { INDArray labels = Nd4j.rand(10, 2); DataSet next = new DataSet(in, labels); - NeuralNetConfiguration.ListBuilder builder = (NeuralNetConfiguration.ListBuilder) incompleteLFW(); - builder.setInputType(InputType.convolutional(28, 28, 3)); - MultiLayerConfiguration conf = builder.build(); + NeuralNetConfiguration.NeuralNetConfigurationBuilder builder = incompleteLFW(); + builder.inputType(InputType.convolutional(28, 28, 3)); + NeuralNetConfiguration conf = builder.build(); ConvolutionLayer layer2 = (ConvolutionLayer) conf.getConf(2).getLayer(); assertEquals(6, layer2.getNIn()); @@ -163,10 +163,10 @@ public class ConvolutionLayerSetupTest extends BaseDL4JTest { reader.initialize(new FileSplit(new File(rootDir))); DataSetIterator recordReader = new RecordReaderDataSetIterator(reader, 10, 1, labels.size()); labels.remove("lfwtest"); - NeuralNetConfiguration.ListBuilder builder = (NeuralNetConfiguration.ListBuilder) incompleteLRN(); - builder.setInputType(InputType.convolutional(28, 28, 3)); + NeuralNetConfiguration.NeuralNetConfigurationBuilder builder = incompleteLRN(); + builder.inputType(InputType.convolutional(28, 28, 3)); - MultiLayerConfiguration conf = builder.build(); + NeuralNetConfiguration conf = builder.build(); ConvolutionLayer layer2 = (ConvolutionLayer) conf.getConf(3).getLayer(); assertEquals(6, layer2.getNIn()); @@ -174,70 +174,70 @@ public class ConvolutionLayerSetupTest extends BaseDL4JTest { } - public MultiLayerConfiguration.Builder incompleteLRN() { - MultiLayerConfiguration.Builder builder = - new NeuralNetConfiguration.Builder().seed(3) - .optimizationAlgo(OptimizationAlgorithm.CONJUGATE_GRADIENT).list() - .layer(0, new org.deeplearning4j.nn.conf.layers.ConvolutionLayer.Builder( - new int[] {5, 5}).nOut(6).build()) - .layer(1, new org.deeplearning4j.nn.conf.layers.SubsamplingLayer.Builder( - new int[] {2, 2}).build()) - .layer(2, new LocalResponseNormalization.Builder().build()) - .layer(3, new org.deeplearning4j.nn.conf.layers.ConvolutionLayer.Builder( - new int[] {5, 5}).nOut(6).build()) - .layer(4, new org.deeplearning4j.nn.conf.layers.SubsamplingLayer.Builder( - new int[] {2, 2}).build()) - .layer(5, new org.deeplearning4j.nn.conf.layers.OutputLayer.Builder( - LossFunctions.LossFunction.NEGATIVELOGLIKELIHOOD).nOut(2) - .activation(Activation.SOFTMAX).build()); + public NeuralNetConfiguration.NeuralNetConfigurationBuilder incompleteLRN() { + NeuralNetConfiguration.NeuralNetConfigurationBuilder builder = + NeuralNetConfiguration.builder().seed(3) + .optimizationAlgo(OptimizationAlgorithm.CONJUGATE_GRADIENT).list() + .layer(0, new ConvolutionLayer.Builder( + new int[] {5, 5}).nOut(6).build()) + .layer(1, new SubsamplingLayer.Builder( + new int[] {2, 2}).build()) + .layer(2, new LocalResponseNormalization.Builder().build()) + .layer(3, new ConvolutionLayer.Builder( + new int[] {5, 5}).nOut(6).build()) + .layer(4, new SubsamplingLayer.Builder( + new int[] {2, 2}).build()) + .layer(5, new OutputLayer.Builder( + LossFunctions.LossFunction.NEGATIVELOGLIKELIHOOD).nOut(2) + .activation(Activation.SOFTMAX).build()); return builder; } - public MultiLayerConfiguration.Builder incompleteLFW() { - MultiLayerConfiguration.Builder builder = - new NeuralNetConfiguration.Builder().seed(3) - .optimizationAlgo(OptimizationAlgorithm.CONJUGATE_GRADIENT).list() - .layer(0, new org.deeplearning4j.nn.conf.layers.ConvolutionLayer.Builder( - new int[] {5, 5}).nOut(6).build()) - .layer(1, new org.deeplearning4j.nn.conf.layers.SubsamplingLayer.Builder( - new int[] {2, 2}).build()) - .layer(2, new org.deeplearning4j.nn.conf.layers.ConvolutionLayer.Builder( - new int[] {5, 5}).nOut(6).build()) - .layer(3, new org.deeplearning4j.nn.conf.layers.SubsamplingLayer.Builder( - new int[] {2, 2}).build()) - .layer(4, new org.deeplearning4j.nn.conf.layers.OutputLayer.Builder( - LossFunctions.LossFunction.NEGATIVELOGLIKELIHOOD).activation(Activation.SOFTMAX) - .nOut(2).build()); + public NeuralNetConfiguration.NeuralNetConfigurationBuilder incompleteLFW() { + NeuralNetConfiguration.NeuralNetConfigurationBuilder builder = + NeuralNetConfiguration.builder().seed(3) + .optimizationAlgo(OptimizationAlgorithm.CONJUGATE_GRADIENT).list() + .layer(0, new ConvolutionLayer.Builder( + new int[] {5, 5}).nOut(6).build()) + .layer(1, new SubsamplingLayer.Builder( + new int[] {2, 2}).build()) + .layer(2, new ConvolutionLayer.Builder( + new int[] {5, 5}).nOut(6).build()) + .layer(3, new SubsamplingLayer.Builder( + new int[] {2, 2}).build()) + .layer(4, new OutputLayer.Builder( + LossFunctions.LossFunction.NEGATIVELOGLIKELIHOOD).activation(Activation.SOFTMAX) + .nOut(2).build()); return builder; } - public MultiLayerConfiguration.Builder incompleteMnistLenet() { - MultiLayerConfiguration.Builder builder = - new NeuralNetConfiguration.Builder().seed(3) - .optimizationAlgo(OptimizationAlgorithm.CONJUGATE_GRADIENT).list() - .layer(0, new org.deeplearning4j.nn.conf.layers.ConvolutionLayer.Builder( - new int[] {5, 5}).nIn(1).nOut(20).build()) - .layer(1, new org.deeplearning4j.nn.conf.layers.SubsamplingLayer.Builder( - new int[] {2, 2}, new int[] {2, 2}).build()) - .layer(2, new org.deeplearning4j.nn.conf.layers.ConvolutionLayer.Builder( - new int[] {5, 5}).nIn(20).nOut(50).build()) - .layer(3, new org.deeplearning4j.nn.conf.layers.SubsamplingLayer.Builder( - new int[] {2, 2}, new int[] {2, 2}).build()) - .layer(4, new org.deeplearning4j.nn.conf.layers.DenseLayer.Builder().nOut(500) - .build()) - .layer(5, new org.deeplearning4j.nn.conf.layers.OutputLayer.Builder( - LossFunctions.LossFunction.NEGATIVELOGLIKELIHOOD) - .activation(Activation.SOFTMAX).nOut(10) - .build()); + public NeuralNetConfiguration.NeuralNetConfigurationBuilder incompleteMnistLenet() { + NeuralNetConfiguration.NeuralNetConfigurationBuilder builder = + NeuralNetConfiguration.builder().seed(3) + .optimizationAlgo(OptimizationAlgorithm.CONJUGATE_GRADIENT).list() + .layer(0, new ConvolutionLayer.Builder( + new int[] {5, 5}).nIn(1).nOut(20).build()) + .layer(1, new SubsamplingLayer.Builder( + new int[] {2, 2}, new int[] {2, 2}).build()) + .layer(2, new ConvolutionLayer.Builder( + new int[] {5, 5}).nIn(20).nOut(50).build()) + .layer(3, new SubsamplingLayer.Builder( + new int[] {2, 2}, new int[] {2, 2}).build()) + .layer(4, new DenseLayer.Builder().nOut(500) + .build()) + .layer(5, new OutputLayer.Builder( + LossFunctions.LossFunction.NEGATIVELOGLIKELIHOOD) + .activation(Activation.SOFTMAX).nOut(10) + .build()); return builder; } - public MultiLayerConfiguration mnistLenet() { - MultiLayerConfiguration builder = - new NeuralNetConfiguration.Builder().seed(3) + public NeuralNetConfiguration mnistLenet() { + NeuralNetConfiguration builder = + NeuralNetConfiguration.builder().seed(3) .optimizationAlgo(OptimizationAlgorithm.CONJUGATE_GRADIENT).list() .layer(0, new org.deeplearning4j.nn.conf.layers.ConvolutionLayer.Builder( new int[] {5, 5}).nIn(1).nOut(6).build()) @@ -254,12 +254,12 @@ public class ConvolutionLayerSetupTest extends BaseDL4JTest { return builder; } - public MultiLayerConfiguration.Builder inComplete() { + public NeuralNetConfiguration.NeuralNetConfigurationBuilder inComplete() { int nChannels = 1; int outputNum = 10; int seed = 123; - MultiLayerConfiguration.Builder builder = new NeuralNetConfiguration.Builder().seed(seed) + NeuralNetConfiguration.NeuralNetConfigurationBuilder builder = NeuralNetConfiguration.builder().seed(seed) .optimizationAlgo(OptimizationAlgorithm.LINE_GRADIENT_DESCENT).list() .layer(0, new org.deeplearning4j.nn.conf.layers.ConvolutionLayer.Builder(new int[] {10, 10}, new int[] {2, 2}).nIn(nChannels).nOut(6).build()) @@ -274,14 +274,14 @@ public class ConvolutionLayerSetupTest extends BaseDL4JTest { } - public MultiLayerConfiguration.Builder complete() { + public NeuralNetConfiguration.NeuralNetConfigurationBuilder complete() { final int numRows = 28; final int numColumns = 28; int nChannels = 1; int outputNum = 10; int seed = 123; - MultiLayerConfiguration.Builder builder = new NeuralNetConfiguration.Builder().seed(seed) + NeuralNetConfiguration.NeuralNetConfigurationBuilder builder = NeuralNetConfiguration.builder().seed(seed) .optimizationAlgo(OptimizationAlgorithm.LINE_GRADIENT_DESCENT).list() .layer(0, new org.deeplearning4j.nn.conf.layers.ConvolutionLayer.Builder(new int[] {10, 10}, new int[] {2, 2}).nIn(nChannels).nOut(6).build()) @@ -301,15 +301,15 @@ public class ConvolutionLayerSetupTest extends BaseDL4JTest { @Test public void testDeconvolution() { - MultiLayerConfiguration.Builder builder = new NeuralNetConfiguration.Builder().list() + NeuralNetConfiguration.NeuralNetConfigurationBuilder builder = NeuralNetConfiguration.builder().list() //out = stride * (in-1) + filter - 2*pad -> 2 * (28-1) + 2 - 0 = 56 -> 56x56x3 .layer(0, new Deconvolution2D.Builder(2, 2).padding(0, 0).stride(2, 2).nIn(1).nOut(3).build()) //(56-2+2*1)/2+1 = 29 -> 29x29x3 .layer(1, new SubsamplingLayer.Builder().kernelSize(2, 2).padding(1, 1).stride(2, 2).build()) .layer(2, new OutputLayer.Builder().nOut(3).activation(Activation.SOFTMAX).build()) - .setInputType(InputType.convolutional(28, 28, 1)); + .inputType(InputType.convolutional(28, 28, 1)); - MultiLayerConfiguration conf = builder.build(); + NeuralNetConfiguration conf = builder.build(); assertNotNull(conf.getInputPreProcess(2)); assertTrue(conf.getInputPreProcess(2) instanceof CnnToFeedForwardPreProcessor); @@ -324,13 +324,13 @@ public class ConvolutionLayerSetupTest extends BaseDL4JTest { @Test public void testSubSamplingWithPadding() { - MultiLayerConfiguration.Builder builder = new NeuralNetConfiguration.Builder().list() + NeuralNetConfiguration.NeuralNetConfigurationBuilder builder = NeuralNetConfiguration.builder().list() .layer(0, new ConvolutionLayer.Builder(2, 2).padding(0, 0).stride(2, 2).nIn(1).nOut(3).build()) //(28-2+0)/2+1 = 14 .layer(1, new SubsamplingLayer.Builder().kernelSize(2, 2).padding(1, 1).stride(2, 2).build()) //(14-2+2)/2+1 = 8 -> 8x8x3 .layer(2, new OutputLayer.Builder().nOut(3).activation(Activation.SOFTMAX).build()) - .setInputType(InputType.convolutional(28, 28, 1)); + .inputType(InputType.convolutional(28, 28, 1)); - MultiLayerConfiguration conf = builder.build(); + NeuralNetConfiguration conf = builder.build(); assertNotNull(conf.getInputPreProcess(2)); assertTrue(conf.getInputPreProcess(2) instanceof CnnToFeedForwardPreProcessor); @@ -345,13 +345,13 @@ public class ConvolutionLayerSetupTest extends BaseDL4JTest { @Test public void testUpsampling() { - MultiLayerConfiguration.Builder builder = new NeuralNetConfiguration.Builder().list() + NeuralNetConfiguration.NeuralNetConfigurationBuilder builder = NeuralNetConfiguration.builder().list() .layer(new ConvolutionLayer.Builder(2, 2).padding(0, 0).stride(2, 2).nIn(1).nOut(3).build()) //(28-2+0)/2+1 = 14 .layer(new Upsampling2D.Builder().size(3).build()) // 14 * 3 = 42! .layer(new OutputLayer.Builder().nOut(3).activation(Activation.SOFTMAX).build()) - .setInputType(InputType.convolutional(28, 28, 1)); + .inputType(InputType.convolutional(28, 28, 1)); - MultiLayerConfiguration conf = builder.build(); + NeuralNetConfiguration conf = builder.build(); assertNotNull(conf.getInputPreProcess(2)); assertTrue(conf.getInputPreProcess(2) instanceof CnnToFeedForwardPreProcessor); @@ -368,13 +368,13 @@ public class ConvolutionLayerSetupTest extends BaseDL4JTest { int[] blocks = new int[] {2, 2}; - MultiLayerConfiguration.Builder builder = new NeuralNetConfiguration.Builder().list() + NeuralNetConfiguration.NeuralNetConfigurationBuilder builder = NeuralNetConfiguration.builder().list() .layer(new ConvolutionLayer.Builder(2, 2).padding(0, 0).stride(2, 2).nIn(1).nOut(3).build()) //(28-2+0)/2+1 = 14 .layer(new SpaceToBatchLayer.Builder(blocks).build()) // Divide space dimensions by blocks, i.e. 14/2 = 7 .layer(new OutputLayer.Builder().nOut(3).activation(Activation.SOFTMAX).build()) - .setInputType(InputType.convolutional(28, 28, 1)); + .inputType(InputType.convolutional(28, 28, 1)); - MultiLayerConfiguration conf = builder.build(); + NeuralNetConfiguration conf = builder.build(); assertNotNull(conf.getInputPreProcess(2)); assertTrue(conf.getInputPreProcess(2) instanceof CnnToFeedForwardPreProcessor); @@ -389,15 +389,15 @@ public class ConvolutionLayerSetupTest extends BaseDL4JTest { int blocks = 2; - MultiLayerConfiguration.Builder builder = new NeuralNetConfiguration.Builder().list() + NeuralNetConfiguration.NeuralNetConfigurationBuilder builder = NeuralNetConfiguration.builder().list() //(28-2+0)/2+1 = 14 -> 14x14x3 out .layer(new ConvolutionLayer.Builder(2, 2).padding(0, 0).stride(2, 2).nIn(1).nOut(3).build()) // Divide space dimensions by blocks, i.e. 14/2 = 7 -> 7x7x12 out (3x2x2 depth) .layer(new SpaceToDepthLayer.Builder(blocks, SpaceToDepthLayer.DataFormat.NCHW).build()) .layer(new OutputLayer.Builder().nIn(3 * 2 * 2).nOut(3).activation(Activation.SOFTMAX).build()) // nIn of the next layer gets multiplied by 2*2. - .setInputType(InputType.convolutional(28, 28, 1)); + .inputType(InputType.convolutional(28, 28, 1)); - MultiLayerConfiguration conf = builder.build(); + NeuralNetConfiguration conf = builder.build(); assertNotNull(conf.getInputPreProcess(2)); assertTrue(conf.getInputPreProcess(2) instanceof CnnToFeedForwardPreProcessor); @@ -415,7 +415,7 @@ public class ConvolutionLayerSetupTest extends BaseDL4JTest { DataSet next = iter.next(); // Run with separate activation layer - MultiLayerConfiguration conf = new NeuralNetConfiguration.Builder() + NeuralNetConfiguration conf = NeuralNetConfiguration.builder() .optimizationAlgo(OptimizationAlgorithm.STOCHASTIC_GRADIENT_DESCENT).seed(123) .weightInit(WeightInit.XAVIER).list() .layer(0, new ConvolutionLayer.Builder(new int[] {1, 1}, new int[] {1, 1}).nIn(1).nOut(6) @@ -428,7 +428,7 @@ public class ConvolutionLayerSetupTest extends BaseDL4JTest { .layer(5, new ActivationLayer.Builder().activation(Activation.RELU).build()) .layer(6, new OutputLayer.Builder(LossFunctions.LossFunction.MCXENT) .activation(Activation.SOFTMAX).nOut(10).build()) - .setInputType(InputType.convolutionalFlat(28, 28, 1)).build(); + .inputType(InputType.convolutionalFlat(28, 28, 1)).build(); MultiLayerNetwork network = new MultiLayerNetwork(conf); network.init(); @@ -447,16 +447,16 @@ public class ConvolutionLayerSetupTest extends BaseDL4JTest { @Test public void testSeparableConv2D() { - MultiLayerConfiguration.Builder builder = new NeuralNetConfiguration.Builder().list() + NeuralNetConfiguration.NeuralNetConfigurationBuilder builder = NeuralNetConfiguration.builder().list() .layer( new SeparableConvolution2D.Builder(2, 2) .depthMultiplier(2) .padding(0, 0) .stride(2, 2).nIn(1).nOut(3).build()) //(28-2+0)/2+1 = 14 .layer( new SubsamplingLayer.Builder().kernelSize(2, 2).padding(1, 1).stride(2, 2).build()) //(14-2+2)/2+1 = 8 -> 8x8x3 .layer(2, new OutputLayer.Builder().nOut(3).activation(Activation.SOFTMAX).build()) - .setInputType(InputType.convolutional(28, 28, 1)); + .inputType(InputType.convolutional(28, 28, 1)); - MultiLayerConfiguration conf = builder.build(); + NeuralNetConfiguration conf = builder.build(); assertNotNull(conf.getInputPreProcess(2)); assertTrue(conf.getInputPreProcess(2) instanceof CnnToFeedForwardPreProcessor); @@ -471,7 +471,7 @@ public class ConvolutionLayerSetupTest extends BaseDL4JTest { @Test public void testDeconv2D() { - MultiLayerConfiguration.Builder builder = new NeuralNetConfiguration.Builder().list() + NeuralNetConfiguration.NeuralNetConfigurationBuilder builder = NeuralNetConfiguration.builder().list() //out = stride * (in-1) + filter - 2*pad -> 2 * (28-1) + 2 - 0 = 56 -> 56x56x3 .layer( new Deconvolution2D.Builder(2, 2) .padding(0, 0) @@ -479,9 +479,9 @@ public class ConvolutionLayerSetupTest extends BaseDL4JTest { //(56-2+2*1)/2+1 = 29 -> 29x29x3 .layer( new SubsamplingLayer.Builder().kernelSize(2, 2).padding(1, 1).stride(2, 2).build()) .layer(2, new OutputLayer.Builder().nOut(3).activation(Activation.SOFTMAX).build()) - .setInputType(InputType.convolutional(28, 28, 1)); + .inputType(InputType.convolutional(28, 28, 1)); - MultiLayerConfiguration conf = builder.build(); + NeuralNetConfiguration conf = builder.build(); assertNotNull(conf.getInputPreProcess(2)); assertTrue(conf.getInputPreProcess(2) instanceof CnnToFeedForwardPreProcessor); diff --git a/cavis-dnn/cavis-dnn-core/src/test/java/org/deeplearning4j/nn/layers/convolution/ConvolutionLayerTest.java b/cavis-dnn/cavis-dnn-core/src/test/java/org/deeplearning4j/nn/layers/convolution/ConvolutionLayerTest.java index 6b68d6cea..4b5458b15 100644 --- a/cavis-dnn/cavis-dnn-core/src/test/java/org/deeplearning4j/nn/layers/convolution/ConvolutionLayerTest.java +++ b/cavis-dnn/cavis-dnn-core/src/test/java/org/deeplearning4j/nn/layers/convolution/ConvolutionLayerTest.java @@ -29,20 +29,16 @@ import org.deeplearning4j.exception.DL4JInvalidInputException; import org.deeplearning4j.nn.api.Layer; import org.deeplearning4j.nn.api.OptimizationAlgorithm; import org.deeplearning4j.nn.conf.ConvolutionMode; -import org.deeplearning4j.nn.conf.MultiLayerConfiguration; import org.deeplearning4j.nn.conf.NeuralNetConfiguration; import org.deeplearning4j.nn.conf.RNNFormat; import org.deeplearning4j.nn.conf.inputs.InputType; import org.deeplearning4j.nn.conf.layers.Convolution1DLayer; import org.deeplearning4j.nn.conf.layers.ConvolutionLayer; import org.deeplearning4j.nn.conf.layers.*; -import org.deeplearning4j.nn.modelimport.keras.KerasModelImport; import org.deeplearning4j.nn.multilayer.MultiLayerNetwork; import org.deeplearning4j.nn.weights.WeightInit; -import org.deeplearning4j.nn.weights.WeightInitNormal; import org.deeplearning4j.nn.workspace.LayerWorkspaceMgr; import org.junit.jupiter.api.Test; -import org.nd4j.enums.RnnDataFormat; import org.nd4j.linalg.activations.Activation; import org.nd4j.linalg.activations.impl.ActivationSoftmax; import org.nd4j.linalg.api.buffer.DataType; @@ -59,8 +55,6 @@ import org.nd4j.linalg.learning.config.Nesterovs; import org.nd4j.linalg.lossfunctions.LossFunctions; import org.nd4j.linalg.lossfunctions.impl.LossMCXENT; -import java.io.File; -import java.util.Arrays; import java.util.List; import static org.junit.jupiter.api.Assertions.*; @@ -77,7 +71,7 @@ public class ConvolutionLayerTest extends BaseDL4JTest { @Test public void testTwdFirstLayer() throws Exception { - MultiLayerConfiguration.Builder builder = new NeuralNetConfiguration.Builder().seed(123) + NeuralNetConfiguration.NeuralNetConfigurationBuilder builder = NeuralNetConfiguration.builder().seed(123) .optimizationAlgo(OptimizationAlgorithm.STOCHASTIC_GRADIENT_DESCENT).l2(2e-4) .updater(new Nesterovs(0.9)).dropOut(0.5) .list().layer(0, @@ -94,10 +88,10 @@ public class ConvolutionLayerTest extends BaseDL4JTest { .dropOut(0.5).build()) .layer(3, new OutputLayer.Builder(LossFunctions.LossFunction.SQUARED_LOSS) //output layer .nOut(10).weightInit(WeightInit.XAVIER).activation(Activation.SOFTMAX).build()) - .setInputType(InputType.convolutionalFlat(28, 28, 1)); + .inputType(InputType.convolutionalFlat(28, 28, 1)); DataSetIterator iter = new MnistDataSetIterator(10, 10); - MultiLayerConfiguration conf = builder.build(); + NeuralNetConfiguration conf = builder.build(); MultiLayerNetwork network = new MultiLayerNetwork(conf); network.init(); DataSet ds = iter.next(); @@ -118,21 +112,21 @@ public class ConvolutionLayerTest extends BaseDL4JTest { int kernelWidth = 3; DataSet trainInput; - MultiLayerConfiguration.Builder builder = - new NeuralNetConfiguration.Builder() - .seed(123) - .list() - .layer(0, new ConvolutionLayer.Builder(kernelHeight, kernelWidth).stride(1, 1) - .nOut(2).activation(Activation.RELU) - .weightInit(WeightInit.XAVIER).build()) - .layer(1, new SubsamplingLayer.Builder() - .poolingType(SubsamplingLayer.PoolingType.MAX) - .kernelSize(imageHeight - kernelHeight, 1).stride(1, 1).build()) - .layer(2, new OutputLayer.Builder().nOut(classes).weightInit(WeightInit.XAVIER) - .activation(Activation.SOFTMAX).build()) - .setInputType(InputType.convolutionalFlat(imageHeight, imageWidth, nChannels)); + NeuralNetConfiguration.NeuralNetConfigurationBuilder builder = + NeuralNetConfiguration.builder() + .seed(123) + .list() + .layer(0, new ConvolutionLayer.Builder(kernelHeight, kernelWidth).stride(1, 1) + .nOut(2).activation(Activation.RELU) + .weightInit(WeightInit.XAVIER).build()) + .layer(1, new SubsamplingLayer.Builder() + .poolingType(SubsamplingLayer.PoolingType.MAX) + .kernelSize(imageHeight - kernelHeight, 1).stride(1, 1).build()) + .layer(2, new OutputLayer.Builder().nOut(classes).weightInit(WeightInit.XAVIER) + .activation(Activation.SOFTMAX).build()) + .inputType(InputType.convolutionalFlat(imageHeight, imageWidth, nChannels)); - MultiLayerConfiguration conf = builder.build(); + NeuralNetConfiguration conf = builder.build(); MultiLayerNetwork model = new MultiLayerNetwork(conf); model.init(); @@ -155,9 +149,9 @@ public class ConvolutionLayerTest extends BaseDL4JTest { long batchSize = 1; INDArray arr = Nd4j.randn(batchSize,vectorLength,timeSteps); - MultiLayerConfiguration build = new NeuralNetConfiguration.Builder().seed(seed) + NeuralNetConfiguration build = NeuralNetConfiguration.builder().seed(seed) .activation(Activation.RELU) - .weightInit(new WeightInitNormal()) // better init + .weightInit(WeightInit.NORMAL) // better init .updater(new Adam(learningRate)) .list() // block 1 @@ -172,7 +166,7 @@ public class ConvolutionLayerTest extends BaseDL4JTest { .layer(new RnnLossLayer.Builder().dataFormat(RNNFormat.NCW) .activation(new ActivationSoftmax()) .lossFunction(new LossMCXENT()).build()) - .setInputType(InputType.recurrent(vectorLength,timeSteps,RNNFormat.NCW)) + .inputType(InputType.recurrent(vectorLength,timeSteps,RNNFormat.NCW)) .build(); MultiLayerNetwork network = new MultiLayerNetwork(build); @@ -196,18 +190,18 @@ public class ConvolutionLayerTest extends BaseDL4JTest { int kernelWidth = imageWidth + 1; DataSet trainInput; - MultiLayerConfiguration.Builder builder = - new NeuralNetConfiguration.Builder() - .seed(123) - .list() - .layer(0, new ConvolutionLayer.Builder(kernelHeight, kernelWidth) //(img-kernel+2*padding)/stride + 1: must be >= 1. Therefore: with p=0, kernel <= img size - .stride(1, 1).nOut(2).activation(Activation.RELU) - .weightInit(WeightInit.XAVIER).build()) - .layer(1, new OutputLayer.Builder().nOut(classes).weightInit(WeightInit.XAVIER) - .activation(Activation.SOFTMAX).build()) - .setInputType(InputType.convolutionalFlat(imageHeight, imageWidth, nChannels)); + NeuralNetConfiguration.NeuralNetConfigurationBuilder builder = + NeuralNetConfiguration.builder() + .seed(123) + .list() + .layer(0, new ConvolutionLayer.Builder(kernelHeight, kernelWidth) //(img-kernel+2*padding)/stride + 1: must be >= 1. Therefore: with p=0, kernel <= img size + .stride(1, 1).nOut(2).activation(Activation.RELU) + .weightInit(WeightInit.XAVIER).build()) + .layer(1, new OutputLayer.Builder().nOut(classes).weightInit(WeightInit.XAVIER) + .activation(Activation.SOFTMAX).build()) + .inputType(InputType.convolutionalFlat(imageHeight, imageWidth, nChannels)); - MultiLayerConfiguration conf = builder.build(); + NeuralNetConfiguration conf = builder.build(); MultiLayerNetwork model = new MultiLayerNetwork(conf); model.init(); @@ -232,19 +226,19 @@ public class ConvolutionLayerTest extends BaseDL4JTest { int kernelWidth = imageWidth; DataSet trainInput; - MultiLayerConfiguration.Builder builder = - new NeuralNetConfiguration.Builder() - .seed(123) - .list() - .layer(0, new ConvolutionLayer.Builder(kernelHeight, kernelWidth).stride(1, 0) - .nOut(2).activation(Activation.RELU) - .weightInit(WeightInit.XAVIER).build()) - .layer(1, new OutputLayer.Builder().nOut(classes).weightInit(WeightInit.XAVIER) - .activation(Activation.SOFTMAX).build()) + NeuralNetConfiguration.NeuralNetConfigurationBuilder builder = + NeuralNetConfiguration.builder() + .seed(123) + .list() + .layer(0, new ConvolutionLayer.Builder(kernelHeight, kernelWidth).stride(1, 0) + .nOut(2).activation(Activation.RELU) + .weightInit(WeightInit.XAVIER).build()) + .layer(1, new OutputLayer.Builder().nOut(classes).weightInit(WeightInit.XAVIER) + .activation(Activation.SOFTMAX).build()) - .setInputType(InputType.convolutional(imageHeight, imageWidth, nChannels)); + .inputType(InputType.convolutional(imageHeight, imageWidth, nChannels)); - MultiLayerConfiguration conf = builder.build(); + NeuralNetConfiguration conf = builder.build(); MultiLayerNetwork model = new MultiLayerNetwork(conf); model.init(); @@ -260,11 +254,11 @@ public class ConvolutionLayerTest extends BaseDL4JTest { public void testCNNBiasInit() { ConvolutionLayer cnn = new ConvolutionLayer.Builder().nIn(1).nOut(3).biasInit(1).build(); - NeuralNetConfiguration conf = new NeuralNetConfiguration.Builder().layer(cnn).build(); + NeuralNetConfiguration conf = NeuralNetConfiguration.builder().layer(cnn).build(); - val numParams = conf.getLayer().initializer().numParams(conf); + val numParams = conf.getFlattenedLayerConfigurations().get(0).initializer().numParams(conf); INDArray params = Nd4j.create(1, numParams); - Layer layer = conf.getLayer().instantiate(conf, null, 0, params, true, params.dataType()); + Layer layer = conf.getFlattenedLayerConfigurations().get(0).instantiate(conf, null, 0, params, true, params.dataType()); assertEquals(1, layer.getParam("b").size(0)); } @@ -321,11 +315,11 @@ public class ConvolutionLayerTest extends BaseDL4JTest { ConvolutionLayer layer = new ConvolutionLayer.Builder(kernelSize, stride, padding).nIn(nIn).nOut(nOut) .activation(Activation.SIGMOID).build(); - NeuralNetConfiguration conf = new NeuralNetConfiguration.Builder().layer(layer).build(); + NeuralNetConfiguration conf = NeuralNetConfiguration.builder().layer(layer).build(); - val numParams = conf.getLayer().initializer().numParams(conf); + val numParams = conf.getFlattenedLayerConfigurations().get(0).initializer().numParams(conf); INDArray params = Nd4j.create(1, numParams); - return conf.getLayer().instantiate(conf, null, 0, params, true, params.dataType()); + return conf.getFlattenedLayerConfigurations().get(0).instantiate(conf, null, 0, params, true, params.dataType()); } public Layer getMNISTConfig() { @@ -454,10 +448,10 @@ public class ConvolutionLayerTest extends BaseDL4JTest { MultiLayerNetwork net = getCNNMLNConfig(true, false); - INDArray paramsOrig = net.params().dup(); + INDArray paramsOrig = net.getModelParams().dup(); net.setParams(paramsOrig); - INDArray params2 = net.params(); + INDArray params2 = net.getModelParams(); assertEquals(paramsOrig, params2); } @@ -695,17 +689,17 @@ public class ConvolutionLayerTest extends BaseDL4JTest { int outputNum = 10; int seed = 123; - MultiLayerConfiguration.Builder conf = - new NeuralNetConfiguration.Builder().seed(seed) - .optimizationAlgo(OptimizationAlgorithm.LINE_GRADIENT_DESCENT).list() - .layer(0, new ConvolutionLayer.Builder(new int[] {10, 10}).nOut(6).build()) - .layer(1, new SubsamplingLayer.Builder(SubsamplingLayer.PoolingType.MAX, - new int[] {2, 2}).stride(1, 1).build()) - .layer(2, new OutputLayer.Builder( - LossFunctions.LossFunction.NEGATIVELOGLIKELIHOOD) - .nOut(outputNum).weightInit(WeightInit.XAVIER) - .activation(Activation.SOFTMAX).build()) - .setInputType(InputType.convolutionalFlat(28, 28, 1)); + NeuralNetConfiguration.NeuralNetConfigurationBuilder conf = + NeuralNetConfiguration.builder().seed(seed) + .optimizationAlgo(OptimizationAlgorithm.LINE_GRADIENT_DESCENT).list() + .layer(0, new ConvolutionLayer.Builder(new int[] {10, 10}).nOut(6).build()) + .layer(1, new SubsamplingLayer.Builder(SubsamplingLayer.PoolingType.MAX, + new int[] {2, 2}).stride(1, 1).build()) + .layer(2, new OutputLayer.Builder( + LossFunctions.LossFunction.NEGATIVELOGLIKELIHOOD) + .nOut(outputNum).weightInit(WeightInit.XAVIER) + .activation(Activation.SOFTMAX).build()) + .inputType(InputType.convolutionalFlat(28, 28, 1)); MultiLayerNetwork model = new MultiLayerNetwork(conf.build()); model.init(); @@ -718,14 +712,14 @@ public class ConvolutionLayerTest extends BaseDL4JTest { @Test public void test1dInputType(){ - MultiLayerConfiguration conf = new NeuralNetConfiguration.Builder() + NeuralNetConfiguration conf = NeuralNetConfiguration.builder() .convolutionMode(ConvolutionMode.Same) .list() .layer(new Convolution1DLayer.Builder().nOut(3).kernelSize(2).activation(Activation.TANH).build()) .layer(new Subsampling1DLayer.Builder().kernelSize(2).stride(2).build()) .layer(new Upsampling1D.Builder().size(2).build()) .layer(new RnnOutputLayer.Builder().nOut(7).activation(Activation.SOFTMAX).build()) - .setInputType(InputType.recurrent(10)) + .inputType(InputType.recurrent(10)) .build(); MultiLayerNetwork net = new MultiLayerNetwork(conf); @@ -751,7 +745,7 @@ public class ConvolutionLayerTest extends BaseDL4JTest { @Test public void testDeconvBadInput(){ - MultiLayerConfiguration conf = new NeuralNetConfiguration.Builder() + NeuralNetConfiguration conf = NeuralNetConfiguration.builder() .list() .layer(new Deconvolution2D.Builder().nIn(5).nOut(3).build()) .build(); diff --git a/cavis-dnn/cavis-dnn-core/src/test/java/org/deeplearning4j/nn/layers/convolution/LocallyConnectedLayerTest.java b/cavis-dnn/cavis-dnn-core/src/test/java/org/deeplearning4j/nn/layers/convolution/LocallyConnectedLayerTest.java index e4921b555..c39a785c1 100644 --- a/cavis-dnn/cavis-dnn-core/src/test/java/org/deeplearning4j/nn/layers/convolution/LocallyConnectedLayerTest.java +++ b/cavis-dnn/cavis-dnn-core/src/test/java/org/deeplearning4j/nn/layers/convolution/LocallyConnectedLayerTest.java @@ -25,8 +25,8 @@ import org.deeplearning4j.TestUtils; import org.deeplearning4j.nn.api.OptimizationAlgorithm; import org.deeplearning4j.nn.conf.ComputationGraphConfiguration; import org.deeplearning4j.nn.conf.ConvolutionMode; -import org.deeplearning4j.nn.conf.MultiLayerConfiguration; import org.deeplearning4j.nn.conf.NeuralNetConfiguration; +import org.deeplearning4j.nn.conf.NeuralNetConfiguration.NeuralNetConfigurationBuilder; import org.deeplearning4j.nn.conf.inputs.InputType; import org.deeplearning4j.nn.conf.layers.*; import org.deeplearning4j.nn.conf.layers.ConvolutionLayer; @@ -64,7 +64,7 @@ public class LocallyConnectedLayerTest extends BaseDL4JTest { @Test public void test2dForward(){ - MultiLayerConfiguration.Builder builder = new NeuralNetConfiguration.Builder().seed(123) + NeuralNetConfiguration.NeuralNetConfigurationBuilder builder = NeuralNetConfiguration.builder().seed(123) .optimizationAlgo(OptimizationAlgorithm.STOCHASTIC_GRADIENT_DESCENT).l2(2e-4) .updater(new Nesterovs(0.9)).dropOut(0.5) .list() @@ -77,9 +77,9 @@ public class LocallyConnectedLayerTest extends BaseDL4JTest { .build()) .layer(new OutputLayer.Builder(LossFunctions.LossFunction.SQUARED_LOSS) //output layer .nOut(10).weightInit(WeightInit.XAVIER).activation(Activation.SOFTMAX).build()) - .setInputType(InputType.convolutionalFlat(28, 28, 3)); + .inputType(InputType.convolutionalFlat(28, 28, 3)); - MultiLayerConfiguration conf = builder.build(); + NeuralNetConfiguration conf = builder.build(); MultiLayerNetwork network = new MultiLayerNetwork(conf); network.init(); @@ -91,7 +91,7 @@ public class LocallyConnectedLayerTest extends BaseDL4JTest { @Test public void test1dForward(){ - MultiLayerConfiguration.Builder builder = new NeuralNetConfiguration.Builder().seed(123) + NeuralNetConfiguration.NeuralNetConfigurationBuilder builder = NeuralNetConfiguration.builder().seed(123) .optimizationAlgo(OptimizationAlgorithm.STOCHASTIC_GRADIENT_DESCENT).l2(2e-4) .updater(new Nesterovs(0.9)).dropOut(0.5) .list() @@ -104,9 +104,9 @@ public class LocallyConnectedLayerTest extends BaseDL4JTest { .build()) .layer(new OutputLayer.Builder(LossFunctions.LossFunction.SQUARED_LOSS) //output layer .nOut(10).weightInit(WeightInit.XAVIER).activation(Activation.SOFTMAX).build()) - .setInputType(InputType.recurrent(3, 8)); + .inputType(InputType.recurrent(3, 8)); - MultiLayerConfiguration conf = builder.build(); + NeuralNetConfiguration conf = builder.build(); MultiLayerNetwork network = new MultiLayerNetwork(conf); network.init(); @@ -132,7 +132,7 @@ public class LocallyConnectedLayerTest extends BaseDL4JTest { for (int test = 0; test < 2; test++) { String msg = "Global dtype: " + globalDtype + ", network dtype: " + networkDtype + ", test=" + test; - ComputationGraphConfiguration.GraphBuilder b = new NeuralNetConfiguration.Builder() + ComputationGraphConfiguration.GraphBuilder b = NeuralNetConfiguration.builder() .dataType(networkDtype) .seed(123) .updater(new NoOp()) diff --git a/cavis-dnn/cavis-dnn-core/src/test/java/org/deeplearning4j/nn/layers/convolution/SpaceToDepthTest.java b/cavis-dnn/cavis-dnn-core/src/test/java/org/deeplearning4j/nn/layers/convolution/SpaceToDepthTest.java index 0ee4e322f..1c47e1b2d 100644 --- a/cavis-dnn/cavis-dnn-core/src/test/java/org/deeplearning4j/nn/layers/convolution/SpaceToDepthTest.java +++ b/cavis-dnn/cavis-dnn-core/src/test/java/org/deeplearning4j/nn/layers/convolution/SpaceToDepthTest.java @@ -31,8 +31,6 @@ import org.nd4j.linalg.api.ndarray.INDArray; import org.nd4j.linalg.factory.Nd4j; import org.deeplearning4j.nn.workspace.LayerWorkspaceMgr; -import java.util.Arrays; - import static org.junit.jupiter.api.Assertions.*; public class SpaceToDepthTest extends BaseDL4JTest { @@ -61,10 +59,10 @@ public class SpaceToDepthTest extends BaseDL4JTest { } private Layer getSpaceToDepthLayer() { - NeuralNetConfiguration conf = new NeuralNetConfiguration.Builder() + NeuralNetConfiguration conf = NeuralNetConfiguration.builder() .gradientNormalization(GradientNormalization.RenormalizeL2PerLayer).seed(123) .layer(new SpaceToDepthLayer.Builder(blockSize, dataFormat).build()).build(); - return conf.getLayer().instantiate(conf, null, 0, null, true, Nd4j.defaultFloatingPointType()); + return conf.getFlattenedLayerConfigurations().get(0).instantiate(conf, null, 0, null, true, Nd4j.defaultFloatingPointType()); } @Test diff --git a/cavis-dnn/cavis-dnn-core/src/test/java/org/deeplearning4j/nn/layers/convolution/SubsamplingLayerTest.java b/cavis-dnn/cavis-dnn-core/src/test/java/org/deeplearning4j/nn/layers/convolution/SubsamplingLayerTest.java index 75434a4c3..4cc8341cc 100644 --- a/cavis-dnn/cavis-dnn-core/src/test/java/org/deeplearning4j/nn/layers/convolution/SubsamplingLayerTest.java +++ b/cavis-dnn/cavis-dnn-core/src/test/java/org/deeplearning4j/nn/layers/convolution/SubsamplingLayerTest.java @@ -24,8 +24,8 @@ import org.deeplearning4j.BaseDL4JTest; import org.deeplearning4j.datasets.iterator.impl.MnistDataSetIterator; import org.deeplearning4j.nn.api.Layer; import org.deeplearning4j.nn.conf.GradientNormalization; -import org.deeplearning4j.nn.conf.MultiLayerConfiguration; import org.deeplearning4j.nn.conf.NeuralNetConfiguration; +import org.deeplearning4j.nn.conf.NeuralNetConfiguration.NeuralNetConfigurationBuilder; import org.deeplearning4j.nn.conf.inputs.InputType; import org.deeplearning4j.nn.conf.layers.OutputLayer; import org.deeplearning4j.nn.conf.layers.SubsamplingLayer; @@ -44,8 +44,6 @@ import org.nd4j.linalg.dataset.DataSet; import org.nd4j.linalg.dataset.api.iterator.DataSetIterator; import org.nd4j.linalg.factory.Nd4j; -import java.util.Arrays; - import static org.junit.jupiter.api.Assertions.*; /** @@ -170,11 +168,11 @@ public class SubsamplingLayerTest extends BaseDL4JTest { ////////////////////////////////////////////////////////////////////////////////// private Layer getSubsamplingLayer(SubsamplingLayer.PoolingType pooling) { - NeuralNetConfiguration conf = new NeuralNetConfiguration.Builder() + NeuralNetConfiguration conf = NeuralNetConfiguration.builder() .gradientNormalization(GradientNormalization.RenormalizeL2PerLayer).seed(123) .layer(new SubsamplingLayer.Builder(pooling, new int[] {2, 2}).build()).build(); - return conf.getLayer().instantiate(conf, null, 0, null, true, Nd4j.defaultFloatingPointType()); + return conf.getFlattenedLayerConfigurations().get(0).instantiate(conf, null, 0, null, true, Nd4j.defaultFloatingPointType()); } public INDArray getData() throws Exception { @@ -214,23 +212,23 @@ public class SubsamplingLayerTest extends BaseDL4JTest { int kernelWidth = 3; DataSet trainInput; - MultiLayerConfiguration.Builder builder = - new NeuralNetConfiguration.Builder().seed(123).list() - .layer(0, new org.deeplearning4j.nn.conf.layers.ConvolutionLayer.Builder( - kernelHeight, kernelWidth).stride(1, 1).nOut(2) - .activation(Activation.RELU).weightInit( - WeightInit.XAVIER) - .build()) - .layer(1, new SubsamplingLayer.Builder() - .poolingType(SubsamplingLayer.PoolingType.MAX) - .kernelSize(imageHeight - kernelHeight + 2, 1) //imageHeight-kernelHeight+1 is ok: full height - .stride(1, 1).build()) - .layer(2, new OutputLayer.Builder().nOut(classes).weightInit(WeightInit.XAVIER) - .activation(Activation.SOFTMAX).build()) + NeuralNetConfiguration.NeuralNetConfigurationBuilder builder = + NeuralNetConfiguration.builder().seed(123).list() + .layer(0, new org.deeplearning4j.nn.conf.layers.ConvolutionLayer.Builder( + kernelHeight, kernelWidth).stride(1, 1).nOut(2) + .activation(Activation.RELU).weightInit( + WeightInit.XAVIER) + .build()) + .layer(1, new SubsamplingLayer.Builder() + .poolingType(SubsamplingLayer.PoolingType.MAX) + .kernelSize(imageHeight - kernelHeight + 2, 1) //imageHeight-kernelHeight+1 is ok: full height + .stride(1, 1).build()) + .layer(2, new OutputLayer.Builder().nOut(classes).weightInit(WeightInit.XAVIER) + .activation(Activation.SOFTMAX).build()) - .setInputType(InputType.convolutional(imageHeight, imageWidth, nChannels)); + .inputType(InputType.convolutional(imageHeight, imageWidth, nChannels)); - MultiLayerConfiguration conf = builder.build(); + NeuralNetConfiguration conf = builder.build(); MultiLayerNetwork model = new MultiLayerNetwork(conf); model.init(); diff --git a/cavis-dnn/cavis-dnn-core/src/test/java/org/deeplearning4j/nn/layers/convolution/TestConvolutionModes.java b/cavis-dnn/cavis-dnn-core/src/test/java/org/deeplearning4j/nn/layers/convolution/TestConvolutionModes.java index 35ba6d924..8cdc85768 100644 --- a/cavis-dnn/cavis-dnn-core/src/test/java/org/deeplearning4j/nn/layers/convolution/TestConvolutionModes.java +++ b/cavis-dnn/cavis-dnn-core/src/test/java/org/deeplearning4j/nn/layers/convolution/TestConvolutionModes.java @@ -25,7 +25,6 @@ import org.deeplearning4j.BaseDL4JTest; import org.deeplearning4j.exception.DL4JException; import org.deeplearning4j.nn.conf.ComputationGraphConfiguration; import org.deeplearning4j.nn.conf.ConvolutionMode; -import org.deeplearning4j.nn.conf.MultiLayerConfiguration; import org.deeplearning4j.nn.conf.NeuralNetConfiguration; import org.deeplearning4j.nn.conf.graph.LayerVertex; import org.deeplearning4j.nn.conf.inputs.InputType; @@ -79,7 +78,7 @@ public class TestConvolutionModes extends BaseDL4JTest { inputData.get(NDArrayIndex.all(), NDArrayIndex.all(), NDArrayIndex.interval(0, 9), NDArrayIndex.interval(0, 9)).assign(origData); - Layer layer; + LayerConfiguration layer; if (isSubsampling) { layer = new SubsamplingLayer.Builder().kernelSize(3, 3).stride(3, 3).padding(0, 0) .build(); @@ -90,15 +89,15 @@ public class TestConvolutionModes extends BaseDL4JTest { MultiLayerNetwork net = null; try { - MultiLayerConfiguration conf = - new NeuralNetConfiguration.Builder().weightInit(WeightInit.XAVIER) + NeuralNetConfiguration conf = + NeuralNetConfiguration.builder().weightInit(WeightInit.XAVIER) .convolutionMode(cm).list() .layer(0, layer).layer(1, new OutputLayer.Builder() .activation(Activation.SOFTMAX) .lossFunction(LossFunctions.LossFunction.MCXENT) .nOut(3).build()) - .setInputType(InputType.convolutional(inSize, inSize, + .inputType(InputType.convolutional(inSize, inSize, inDepth)) .build(); @@ -158,7 +157,7 @@ public class TestConvolutionModes extends BaseDL4JTest { inputData.get(NDArrayIndex.all(), NDArrayIndex.all(), NDArrayIndex.interval(0, 9), NDArrayIndex.interval(0, 9)).assign(origData); - Layer layer; + LayerConfiguration layer; if (isSubsampling) { layer = new SubsamplingLayer.Builder().kernelSize(3, 3).stride(3, 3).padding(0, 0) .build(); @@ -169,7 +168,7 @@ public class TestConvolutionModes extends BaseDL4JTest { ComputationGraph net = null; try { - ComputationGraphConfiguration conf = new NeuralNetConfiguration.Builder() + ComputationGraphConfiguration conf = NeuralNetConfiguration.builder() .weightInit(WeightInit.XAVIER).convolutionMode(cm).graphBuilder() .addInputs("in").addLayer("0", layer, "in") .addLayer("1", new OutputLayer.Builder() @@ -210,7 +209,7 @@ public class TestConvolutionModes extends BaseDL4JTest { @Test public void testGlobalLocalConfig() { for (ConvolutionMode cm : new ConvolutionMode[] {ConvolutionMode.Strict, ConvolutionMode.Truncate}) { - MultiLayerConfiguration conf = new NeuralNetConfiguration.Builder().weightInit(WeightInit.XAVIER) + NeuralNetConfiguration conf = NeuralNetConfiguration.builder().weightInit(WeightInit.XAVIER) .convolutionMode(cm).list() .layer(0, new ConvolutionLayer.Builder().kernelSize(3, 3).stride(3, 3).padding(0, 0) .nIn(3).nOut( @@ -258,7 +257,7 @@ public class TestConvolutionModes extends BaseDL4JTest { public void testGlobalLocalConfigCompGraph() { for (ConvolutionMode cm : new ConvolutionMode[] {ConvolutionMode.Strict, ConvolutionMode.Truncate, ConvolutionMode.Same}) { - ComputationGraphConfiguration conf = new NeuralNetConfiguration.Builder().weightInit(WeightInit.XAVIER) + ComputationGraphConfiguration conf = NeuralNetConfiguration.builder().weightInit(WeightInit.XAVIER) .convolutionMode(cm).graphBuilder().addInputs("in") .addLayer("0", new ConvolutionLayer.Builder().kernelSize(3, 3).stride(3, 3).padding(0, 0) .nIn(3).nOut( @@ -288,28 +287,28 @@ public class TestConvolutionModes extends BaseDL4JTest { .activation(Activation.SOFTMAX).nOut(3).build(), "7") .setOutputs("8").build(); - assertEquals(cm, ((ConvolutionLayer) ((LayerVertex) conf.getVertices().get("0")).getLayerConf().getLayer()) + assertEquals(cm, ((ConvolutionLayer) ((LayerVertex) conf.getVertices().get("0")).getNetConfiguration().getFlattenedLayerConfigurations().get(0)) .getConvolutionMode()); assertEquals(ConvolutionMode.Strict, - ((ConvolutionLayer) ((LayerVertex) conf.getVertices().get("1")).getLayerConf().getLayer()) + ((ConvolutionLayer) ((LayerVertex) conf.getVertices().get("1")).getNetConfiguration().getFlattenedLayerConfigurations().get(0)) .getConvolutionMode()); assertEquals(ConvolutionMode.Truncate, - ((ConvolutionLayer) ((LayerVertex) conf.getVertices().get("2")).getLayerConf().getLayer()) + ((ConvolutionLayer) ((LayerVertex) conf.getVertices().get("2")).getNetConfiguration().getFlattenedLayerConfigurations().get(0)) .getConvolutionMode()); assertEquals(ConvolutionMode.Same, - ((ConvolutionLayer) ((LayerVertex) conf.getVertices().get("3")).getLayerConf().getLayer()) + ((ConvolutionLayer) ((LayerVertex) conf.getVertices().get("3")).getNetConfiguration().getFlattenedLayerConfigurations().get(0)) .getConvolutionMode()); - assertEquals(cm, ((SubsamplingLayer) ((LayerVertex) conf.getVertices().get("4")).getLayerConf().getLayer()) + assertEquals(cm, ((SubsamplingLayer) ((LayerVertex) conf.getVertices().get("4")).getNetConfiguration().getFlattenedLayerConfigurations().get(0)) .getConvolutionMode()); assertEquals(ConvolutionMode.Strict, - ((SubsamplingLayer) ((LayerVertex) conf.getVertices().get("5")).getLayerConf().getLayer()) + ((SubsamplingLayer) ((LayerVertex) conf.getVertices().get("5")).getNetConfiguration().getFlattenedLayerConfigurations().get(0)) .getConvolutionMode()); assertEquals(ConvolutionMode.Truncate, - ((SubsamplingLayer) ((LayerVertex) conf.getVertices().get("6")).getLayerConf().getLayer()) + ((SubsamplingLayer) ((LayerVertex) conf.getVertices().get("6")).getNetConfiguration().getFlattenedLayerConfigurations().get(0)) .getConvolutionMode()); assertEquals(ConvolutionMode.Same, - ((SubsamplingLayer) ((LayerVertex) conf.getVertices().get("7")).getLayerConf().getLayer()) + ((SubsamplingLayer) ((LayerVertex) conf.getVertices().get("7")).getNetConfiguration().getFlattenedLayerConfigurations().get(0)) .getConvolutionMode()); } } @@ -437,15 +436,15 @@ public class TestConvolutionModes extends BaseDL4JTest { int kH = 3; int kW = 3; - Layer[] l = new Layer[2]; + LayerConfiguration[] l = new LayerConfiguration[2]; l[0] = new ConvolutionLayer.Builder().nOut(4).kernelSize(kH, kW).stride(sH, sW).build(); l[1] = new SubsamplingLayer.Builder().kernelSize(kH, kW).stride(sH, sW).build(); for (int i = 0; i < l.length; i++) { - MultiLayerConfiguration conf = new NeuralNetConfiguration.Builder().convolutionMode(ConvolutionMode.Same) + NeuralNetConfiguration conf = NeuralNetConfiguration.builder().convolutionMode(ConvolutionMode.Same) .list().layer(0, l[i]).layer(1, new OutputLayer.Builder().nOut(3).activation(Activation.SOFTMAX).build()) - .setInputType(InputType.convolutional(inH, inW, inDepth)).build(); + .inputType(InputType.convolutional(inH, inW, inDepth)).build(); MultiLayerNetwork net = new MultiLayerNetwork(conf); net.init(); diff --git a/cavis-dnn/cavis-dnn-core/src/test/java/org/deeplearning4j/nn/layers/convolution/Upsampling1DTest.java b/cavis-dnn/cavis-dnn-core/src/test/java/org/deeplearning4j/nn/layers/convolution/Upsampling1DTest.java index 277b43c31..064464d67 100644 --- a/cavis-dnn/cavis-dnn-core/src/test/java/org/deeplearning4j/nn/layers/convolution/Upsampling1DTest.java +++ b/cavis-dnn/cavis-dnn-core/src/test/java/org/deeplearning4j/nn/layers/convolution/Upsampling1DTest.java @@ -36,8 +36,6 @@ import org.nd4j.linalg.factory.Nd4j; import org.nd4j.common.primitives.Pair; import org.deeplearning4j.nn.workspace.LayerWorkspaceMgr; -import java.util.Arrays; - import static org.junit.jupiter.api.Assertions.*; /** @@ -106,10 +104,10 @@ public class Upsampling1DTest extends BaseDL4JTest { private Layer getUpsampling1DLayer() { - NeuralNetConfiguration conf = new NeuralNetConfiguration.Builder() + NeuralNetConfiguration conf = NeuralNetConfiguration.builder() .gradientNormalization(GradientNormalization.RenormalizeL2PerLayer).seed(123) .layer(new Upsampling1D.Builder(size).build()).build(); - return conf.getLayer().instantiate(conf, null, 0, + return conf.getFlattenedLayerConfigurations().get(0).instantiate(conf, null, 0, null, true, Nd4j.defaultFloatingPointType()); } diff --git a/cavis-dnn/cavis-dnn-core/src/test/java/org/deeplearning4j/nn/layers/convolution/Upsampling2DTest.java b/cavis-dnn/cavis-dnn-core/src/test/java/org/deeplearning4j/nn/layers/convolution/Upsampling2DTest.java index e1d46f911..286259904 100644 --- a/cavis-dnn/cavis-dnn-core/src/test/java/org/deeplearning4j/nn/layers/convolution/Upsampling2DTest.java +++ b/cavis-dnn/cavis-dnn-core/src/test/java/org/deeplearning4j/nn/layers/convolution/Upsampling2DTest.java @@ -36,8 +36,6 @@ import org.nd4j.linalg.factory.Nd4j; import org.nd4j.common.primitives.Pair; import org.deeplearning4j.nn.workspace.LayerWorkspaceMgr; -import java.util.Arrays; - import static org.junit.jupiter.api.Assertions.*; /** @@ -110,10 +108,10 @@ public class Upsampling2DTest extends BaseDL4JTest { private Layer getUpsamplingLayer() { - NeuralNetConfiguration conf = new NeuralNetConfiguration.Builder() + NeuralNetConfiguration conf = NeuralNetConfiguration.builder() .gradientNormalization(GradientNormalization.RenormalizeL2PerLayer).seed(123) .layer(new Upsampling2D.Builder(size).build()).build(); - return conf.getLayer().instantiate(conf, null, 0, null, true, Nd4j.defaultFloatingPointType()); + return conf.getFlattenedLayerConfigurations().get(0).instantiate(conf, null, 0, null, true, Nd4j.defaultFloatingPointType()); } public INDArray getData() throws Exception { diff --git a/cavis-dnn/cavis-dnn-core/src/test/java/org/deeplearning4j/nn/layers/custom/TestCustomActivation.java b/cavis-dnn/cavis-dnn-core/src/test/java/org/deeplearning4j/nn/layers/custom/TestCustomActivation.java index 2f837fc2f..94994ea47 100644 --- a/cavis-dnn/cavis-dnn-core/src/test/java/org/deeplearning4j/nn/layers/custom/TestCustomActivation.java +++ b/cavis-dnn/cavis-dnn-core/src/test/java/org/deeplearning4j/nn/layers/custom/TestCustomActivation.java @@ -21,21 +21,14 @@ package org.deeplearning4j.nn.layers.custom; import org.deeplearning4j.BaseDL4JTest; -import org.deeplearning4j.nn.conf.MultiLayerConfiguration; import org.deeplearning4j.nn.conf.NeuralNetConfiguration; import org.deeplearning4j.nn.conf.layers.DenseLayer; import org.deeplearning4j.nn.conf.layers.OutputLayer; import org.deeplearning4j.nn.layers.custom.testclasses.CustomActivation; import org.junit.jupiter.api.Test; import org.nd4j.linalg.activations.Activation; -import org.nd4j.linalg.activations.IActivation; import org.nd4j.linalg.learning.config.Sgd; import org.nd4j.linalg.lossfunctions.LossFunctions; -import com.fasterxml.jackson.databind.ObjectMapper; -import com.fasterxml.jackson.databind.introspect.AnnotatedClass; -import com.fasterxml.jackson.databind.jsontype.NamedType; - -import java.util.Collection; import static org.junit.jupiter.api.Assertions.assertEquals; import static org.junit.jupiter.api.Assertions.assertTrue; @@ -46,7 +39,7 @@ public class TestCustomActivation extends BaseDL4JTest { public void testCustomActivationFn() { //Second: let's create a MultiLayerCofiguration with one, and check JSON and YAML config actually works... - MultiLayerConfiguration conf = new NeuralNetConfiguration.Builder().updater(new Sgd(0.1)).list() + NeuralNetConfiguration conf = NeuralNetConfiguration.builder().updater(new Sgd(0.1)).list() .layer(0, new DenseLayer.Builder().nIn(10).nOut(10).activation(new CustomActivation()).build()) .layer(1, new OutputLayer.Builder(LossFunctions.LossFunction.MCXENT).activation(Activation.SOFTMAX).nIn(10).nOut(10).build()) .build(); @@ -56,10 +49,10 @@ public class TestCustomActivation extends BaseDL4JTest { // System.out.println(json); - MultiLayerConfiguration confFromJson = MultiLayerConfiguration.fromJson(json); + NeuralNetConfiguration confFromJson = NeuralNetConfiguration.fromJson(json); assertEquals(conf, confFromJson); - MultiLayerConfiguration confFromYaml = MultiLayerConfiguration.fromYaml(yaml); + NeuralNetConfiguration confFromYaml = NeuralNetConfiguration.fromYaml(yaml); assertEquals(conf, confFromYaml); } diff --git a/cavis-dnn/cavis-dnn-core/src/test/java/org/deeplearning4j/nn/layers/custom/TestCustomLayers.java b/cavis-dnn/cavis-dnn-core/src/test/java/org/deeplearning4j/nn/layers/custom/TestCustomLayers.java index a0de7f2df..75e48861d 100644 --- a/cavis-dnn/cavis-dnn-core/src/test/java/org/deeplearning4j/nn/layers/custom/TestCustomLayers.java +++ b/cavis-dnn/cavis-dnn-core/src/test/java/org/deeplearning4j/nn/layers/custom/TestCustomLayers.java @@ -22,10 +22,8 @@ package org.deeplearning4j.nn.layers.custom; import org.deeplearning4j.BaseDL4JTest; import org.deeplearning4j.nn.conf.ComputationGraphConfiguration; -import org.deeplearning4j.nn.conf.MultiLayerConfiguration; import org.deeplearning4j.nn.conf.NeuralNetConfiguration; import org.deeplearning4j.nn.conf.layers.DenseLayer; -import org.deeplearning4j.nn.conf.layers.Layer; import org.deeplearning4j.nn.conf.layers.OutputLayer; import org.deeplearning4j.nn.graph.ComputationGraph; import org.deeplearning4j.nn.layers.custom.testclasses.CustomLayer; @@ -39,13 +37,6 @@ import org.nd4j.linalg.api.ndarray.INDArray; import org.nd4j.linalg.dataset.DataSet; import org.nd4j.linalg.factory.Nd4j; import org.nd4j.linalg.lossfunctions.LossFunctions; -import com.fasterxml.jackson.databind.ObjectMapper; -import com.fasterxml.jackson.databind.introspect.AnnotatedClass; -import com.fasterxml.jackson.databind.jsontype.NamedType; - -import java.util.Collection; -import java.util.HashSet; -import java.util.Set; import static org.junit.jupiter.api.Assertions.assertEquals; import static org.junit.jupiter.api.Assertions.assertTrue; @@ -54,8 +45,8 @@ public class TestCustomLayers extends BaseDL4JTest { @Test public void testJsonMultiLayerNetwork() { - MultiLayerConfiguration conf = - new NeuralNetConfiguration.Builder().list() + NeuralNetConfiguration conf = + NeuralNetConfiguration.builder().list() .layer(0, new DenseLayer.Builder().nIn(10).nOut(10).build()) .layer(1, new CustomLayer(3.14159)).layer(2, new OutputLayer.Builder(LossFunctions.LossFunction.MCXENT) @@ -67,10 +58,10 @@ public class TestCustomLayers extends BaseDL4JTest { // System.out.println(json); - MultiLayerConfiguration confFromJson = MultiLayerConfiguration.fromJson(json); + NeuralNetConfiguration confFromJson = NeuralNetConfiguration.fromJson(json); assertEquals(conf, confFromJson); - MultiLayerConfiguration confFromYaml = MultiLayerConfiguration.fromYaml(yaml); + NeuralNetConfiguration confFromYaml = NeuralNetConfiguration.fromYaml(yaml); assertEquals(conf, confFromYaml); } @@ -78,7 +69,7 @@ public class TestCustomLayers extends BaseDL4JTest { public void testJsonComputationGraph() { //ComputationGraph with a custom layer; check JSON and YAML config actually works... - ComputationGraphConfiguration conf = new NeuralNetConfiguration.Builder().graphBuilder() + ComputationGraphConfiguration conf = NeuralNetConfiguration.builder().graphBuilder() .addInputs("in").addLayer("0", new DenseLayer.Builder().nIn(10).nOut(10).build(), "in") .addLayer("1", new CustomLayer(3.14159), "0").addLayer("2", new OutputLayer.Builder(LossFunctions.LossFunction.MCXENT).activation(Activation.SOFTMAX) @@ -103,7 +94,7 @@ public class TestCustomLayers extends BaseDL4JTest { public void checkInitializationFF() { //Actually create a network with a custom layer; check initialization and forward pass - MultiLayerConfiguration conf = new NeuralNetConfiguration.Builder().list() + NeuralNetConfiguration conf = NeuralNetConfiguration.builder().list() .layer(0, new DenseLayer.Builder().nIn(9).nOut(10).build()).layer(1, new CustomLayer(3.14159)) //hard-coded nIn/nOut of 10 .layer(2, new OutputLayer.Builder(LossFunctions.LossFunction.MCXENT).activation(Activation.SOFTMAX).nIn(10).nOut(11).build()) .build(); @@ -125,8 +116,8 @@ public class TestCustomLayers extends BaseDL4JTest { @Test public void testCustomOutputLayerMLN() { //Second: let's create a MultiLayerCofiguration with one, and check JSON and YAML config actually works... - MultiLayerConfiguration conf = - new NeuralNetConfiguration.Builder().seed(12345).list() + NeuralNetConfiguration conf = + NeuralNetConfiguration.builder().seed(12345).list() .layer(0, new DenseLayer.Builder().nIn(10).nOut(10).build()) .layer(1, new CustomOutputLayer.Builder(LossFunctions.LossFunction.MCXENT) .activation(Activation.SOFTMAX) @@ -138,10 +129,10 @@ public class TestCustomLayers extends BaseDL4JTest { // System.out.println(json); - MultiLayerConfiguration confFromJson = MultiLayerConfiguration.fromJson(json); + NeuralNetConfiguration confFromJson = NeuralNetConfiguration.fromJson(json); assertEquals(conf, confFromJson); - MultiLayerConfiguration confFromYaml = MultiLayerConfiguration.fromYaml(yaml); + NeuralNetConfiguration confFromYaml = NeuralNetConfiguration.fromYaml(yaml); assertEquals(conf, confFromYaml); //Third: check initialization @@ -152,8 +143,8 @@ public class TestCustomLayers extends BaseDL4JTest { assertTrue(net.getLayer(1) instanceof CustomOutputLayerImpl); //Fourth: compare to an equivalent standard output layer (should be identical) - MultiLayerConfiguration conf2 = - new NeuralNetConfiguration.Builder().seed(12345).weightInit(WeightInit.XAVIER) + NeuralNetConfiguration conf2 = + NeuralNetConfiguration.builder().seed(12345).weightInit(WeightInit.XAVIER) .list() .layer(0, new DenseLayer.Builder().nIn(10).nOut(10).build()).layer(1, new OutputLayer.Builder(LossFunctions.LossFunction.MCXENT) @@ -163,7 +154,7 @@ public class TestCustomLayers extends BaseDL4JTest { MultiLayerNetwork net2 = new MultiLayerNetwork(conf2); net2.init(); - assertEquals(net2.params(), net.params()); + assertEquals(net2.getModelParams(), net.getModelParams()); INDArray testFeatures = Nd4j.rand(1, 10); INDArray testLabels = Nd4j.zeros(1, 10); @@ -178,7 +169,7 @@ public class TestCustomLayers extends BaseDL4JTest { @Test public void testCustomOutputLayerCG() { //Create a ComputationGraphConfiguration with custom output layer, and check JSON and YAML config actually works... - ComputationGraphConfiguration conf = new NeuralNetConfiguration.Builder().seed(12345) + ComputationGraphConfiguration conf = NeuralNetConfiguration.builder().seed(12345) .graphBuilder().addInputs("in") .addLayer("0", new DenseLayer.Builder().nIn(10).nOut(10).build(), "in").addLayer("1", new CustomOutputLayer.Builder(LossFunctions.LossFunction.MCXENT).nIn(10) @@ -205,7 +196,7 @@ public class TestCustomLayers extends BaseDL4JTest { assertTrue(net.getLayer(1) instanceof CustomOutputLayerImpl); //Fourth: compare to an equivalent standard output layer (should be identical) - ComputationGraphConfiguration conf2 = new NeuralNetConfiguration.Builder().seed(12345) + ComputationGraphConfiguration conf2 = NeuralNetConfiguration.builder().seed(12345) .graphBuilder().addInputs("in") .addLayer("0", new DenseLayer.Builder().nIn(10).nOut(10).build(), "in").addLayer("1", new OutputLayer.Builder(LossFunctions.LossFunction.MCXENT).nIn(10).nOut(10) @@ -216,7 +207,7 @@ public class TestCustomLayers extends BaseDL4JTest { ComputationGraph net2 = new ComputationGraph(conf2); net2.init(); - assertEquals(net2.params(), net.params()); + assertEquals(net2.getModelParams(), net.getModelParams()); INDArray testFeatures = Nd4j.rand(1, 10); INDArray testLabels = Nd4j.zeros(1, 10); diff --git a/cavis-dnn/cavis-dnn-core/src/test/java/org/deeplearning4j/nn/layers/custom/testclasses/CustomLayer.java b/cavis-dnn/cavis-dnn-core/src/test/java/org/deeplearning4j/nn/layers/custom/testclasses/CustomLayer.java index 1eacc4d20..4fafcde0b 100644 --- a/cavis-dnn/cavis-dnn-core/src/test/java/org/deeplearning4j/nn/layers/custom/testclasses/CustomLayer.java +++ b/cavis-dnn/cavis-dnn-core/src/test/java/org/deeplearning4j/nn/layers/custom/testclasses/CustomLayer.java @@ -27,6 +27,7 @@ import org.deeplearning4j.nn.conf.InputPreProcessor; import org.deeplearning4j.nn.conf.NeuralNetConfiguration; import org.deeplearning4j.nn.conf.inputs.InputType; import org.deeplearning4j.nn.conf.layers.FeedForwardLayer; +import org.deeplearning4j.nn.conf.layers.LayerConfiguration; import org.deeplearning4j.nn.conf.memory.LayerMemoryReport; import org.deeplearning4j.nn.params.DefaultParamInitializer; import org.deeplearning4j.optimize.api.TrainingListener; @@ -53,13 +54,14 @@ public class CustomLayer extends FeedForwardLayer { public org.deeplearning4j.nn.api.Layer instantiate(NeuralNetConfiguration conf, Collection trainingListeners, int layerIndex, INDArray layerParamsView, boolean initializeParams, DataType networkDataType) { - CustomLayerImpl ret = new CustomLayerImpl(conf, networkDataType); - ret.setListeners(trainingListeners); + LayerConfiguration lconf = conf.getFlattenedLayerConfigurations().get(layerIndex); + CustomLayerImpl ret = new CustomLayerImpl(lconf, networkDataType); + ret.addTrainingListeners(trainingListeners); ret.setIndex(layerIndex); ret.setParamsViewArray(layerParamsView); - Map paramTable = initializer().init(conf, layerParamsView, initializeParams); + Map paramTable = initializer().init(this, layerParamsView, initializeParams); ret.setParamTable(paramTable); - ret.setConf(conf); + ret.setLayerConfiguration(lconf); return ret; } diff --git a/cavis-dnn/cavis-dnn-core/src/test/java/org/deeplearning4j/nn/layers/custom/testclasses/CustomLayerImpl.java b/cavis-dnn/cavis-dnn-core/src/test/java/org/deeplearning4j/nn/layers/custom/testclasses/CustomLayerImpl.java index e0f582a52..38a7d215b 100644 --- a/cavis-dnn/cavis-dnn-core/src/test/java/org/deeplearning4j/nn/layers/custom/testclasses/CustomLayerImpl.java +++ b/cavis-dnn/cavis-dnn-core/src/test/java/org/deeplearning4j/nn/layers/custom/testclasses/CustomLayerImpl.java @@ -21,11 +21,12 @@ package org.deeplearning4j.nn.layers.custom.testclasses; import org.deeplearning4j.nn.conf.NeuralNetConfiguration; +import org.deeplearning4j.nn.conf.layers.LayerConfiguration; import org.deeplearning4j.nn.layers.BaseLayer; import org.nd4j.linalg.api.buffer.DataType; public class CustomLayerImpl extends BaseLayer { - public CustomLayerImpl(NeuralNetConfiguration conf, DataType dataType) { + public CustomLayerImpl(LayerConfiguration conf, DataType dataType) { super(conf, dataType); } diff --git a/cavis-dnn/cavis-dnn-core/src/test/java/org/deeplearning4j/nn/layers/custom/testclasses/CustomOutputLayer.java b/cavis-dnn/cavis-dnn-core/src/test/java/org/deeplearning4j/nn/layers/custom/testclasses/CustomOutputLayer.java index 88972c96a..350f24f4b 100644 --- a/cavis-dnn/cavis-dnn-core/src/test/java/org/deeplearning4j/nn/layers/custom/testclasses/CustomOutputLayer.java +++ b/cavis-dnn/cavis-dnn-core/src/test/java/org/deeplearning4j/nn/layers/custom/testclasses/CustomOutputLayer.java @@ -29,7 +29,7 @@ import org.deeplearning4j.nn.api.Layer; import org.deeplearning4j.nn.api.ParamInitializer; import org.deeplearning4j.nn.conf.NeuralNetConfiguration; import org.deeplearning4j.nn.conf.layers.BaseOutputLayer; -import org.deeplearning4j.nn.conf.layers.OutputLayer; +import org.deeplearning4j.nn.conf.layers.LayerConfiguration; import org.deeplearning4j.nn.params.DefaultParamInitializer; import org.deeplearning4j.optimize.api.TrainingListener; import org.nd4j.linalg.api.buffer.DataType; @@ -52,13 +52,14 @@ public class CustomOutputLayer extends BaseOutputLayer { @Override public Layer instantiate(NeuralNetConfiguration conf, Collection trainingListeners, int layerIndex, INDArray layerParamsView, boolean initializeParams, DataType networkDataType) { - CustomOutputLayerImpl ret = new CustomOutputLayerImpl(conf, networkDataType); - ret.setListeners(trainingListeners); + LayerConfiguration lconf = conf.getFlattenedLayerConfigurations().get(layerIndex); + CustomOutputLayerImpl ret = new CustomOutputLayerImpl(lconf, networkDataType); + ret.addTrainingListeners(trainingListeners); ret.setIndex(layerIndex); ret.setParamsViewArray(layerParamsView); - Map paramTable = initializer().init(conf, layerParamsView, initializeParams); + Map paramTable = initializer().init(this, layerParamsView, initializeParams); ret.setParamTable(paramTable); - ret.setConf(conf); + ret.setLayerConfiguration(lconf); return ret; } diff --git a/cavis-dnn/cavis-dnn-core/src/test/java/org/deeplearning4j/nn/layers/custom/testclasses/CustomOutputLayerImpl.java b/cavis-dnn/cavis-dnn-core/src/test/java/org/deeplearning4j/nn/layers/custom/testclasses/CustomOutputLayerImpl.java index 349adba9d..f48f35038 100644 --- a/cavis-dnn/cavis-dnn-core/src/test/java/org/deeplearning4j/nn/layers/custom/testclasses/CustomOutputLayerImpl.java +++ b/cavis-dnn/cavis-dnn-core/src/test/java/org/deeplearning4j/nn/layers/custom/testclasses/CustomOutputLayerImpl.java @@ -21,6 +21,7 @@ package org.deeplearning4j.nn.layers.custom.testclasses; import org.deeplearning4j.nn.conf.NeuralNetConfiguration; +import org.deeplearning4j.nn.conf.layers.LayerConfiguration; import org.deeplearning4j.nn.layers.BaseOutputLayer; import org.deeplearning4j.nn.workspace.ArrayType; import org.deeplearning4j.nn.workspace.LayerWorkspaceMgr; @@ -28,7 +29,7 @@ import org.nd4j.linalg.api.buffer.DataType; import org.nd4j.linalg.api.ndarray.INDArray; public class CustomOutputLayerImpl extends BaseOutputLayer { - public CustomOutputLayerImpl(NeuralNetConfiguration conf, DataType dataType) { + public CustomOutputLayerImpl(LayerConfiguration conf, DataType dataType) { super(conf, dataType); } diff --git a/cavis-dnn/cavis-dnn-core/src/test/java/org/deeplearning4j/nn/layers/feedforward/dense/DenseTest.java b/cavis-dnn/cavis-dnn-core/src/test/java/org/deeplearning4j/nn/layers/feedforward/dense/DenseTest.java index 25c8074a8..01cc7f2dd 100644 --- a/cavis-dnn/cavis-dnn-core/src/test/java/org/deeplearning4j/nn/layers/feedforward/dense/DenseTest.java +++ b/cavis-dnn/cavis-dnn-core/src/test/java/org/deeplearning4j/nn/layers/feedforward/dense/DenseTest.java @@ -24,7 +24,6 @@ import org.deeplearning4j.BaseDL4JTest; import org.deeplearning4j.datasets.iterator.impl.IrisDataSetIterator; import org.deeplearning4j.eval.Evaluation; import org.deeplearning4j.nn.api.Layer; -import org.deeplearning4j.nn.conf.MultiLayerConfiguration; import org.deeplearning4j.nn.conf.NeuralNetConfiguration; import org.deeplearning4j.nn.conf.layers.DenseLayer; import org.deeplearning4j.nn.conf.layers.OutputLayer; @@ -52,11 +51,11 @@ public class DenseTest extends BaseDL4JTest { public void testDenseBiasInit() { DenseLayer build = new DenseLayer.Builder().nIn(1).nOut(3).biasInit(1).build(); - NeuralNetConfiguration conf = new NeuralNetConfiguration.Builder().layer(build).build(); + NeuralNetConfiguration conf = NeuralNetConfiguration.builder().layer(build).build(); - long numParams = conf.getLayer().initializer().numParams(conf); + long numParams = conf.getFlattenedLayerConfigurations().get(0).initializer().numParams(conf); INDArray params = Nd4j.create(1, numParams); - Layer layer = conf.getLayer().instantiate(conf, null, 0, params, true, Nd4j.defaultFloatingPointType()); + Layer layer = conf.getFlattenedLayerConfigurations().get(0).instantiate(conf, null, 0, params, true, Nd4j.defaultFloatingPointType()); assertEquals(1, layer.getParam("b").size(0)); } @@ -73,7 +72,7 @@ public class DenseTest extends BaseDL4JTest { DataSet test = iter.next(); - assertEquals(model.params(), model2.params()); + assertEquals(model.getModelParams(), model2.getModelParams()); Evaluation eval = new Evaluation(); INDArray output = model.output(test.getFeatures()); @@ -100,7 +99,7 @@ public class DenseTest extends BaseDL4JTest { DataSet test = iter.next(); - assertEquals(model.params(), model2.params()); + assertEquals(model.getModelParams(), model2.getModelParams()); Evaluation eval = new Evaluation(); INDArray output = model.output(test.getFeatures()); @@ -124,7 +123,7 @@ public class DenseTest extends BaseDL4JTest { int outputNum = 3; long seed = 6; - MultiLayerConfiguration conf = new NeuralNetConfiguration.Builder().seed(seed) + NeuralNetConfiguration conf = NeuralNetConfiguration.builder().seed(seed) .updater(new Sgd(1e-3)).l1(0.3).l2(1e-3).list() .layer(0, new org.deeplearning4j.nn.conf.layers.DenseLayer.Builder().nIn(numInputs).nOut(3) .activation(Activation.TANH).weightInit(WeightInit.XAVIER).build()) diff --git a/cavis-dnn/cavis-dnn-core/src/test/java/org/deeplearning4j/nn/layers/feedforward/embedding/EmbeddingLayerTest.java b/cavis-dnn/cavis-dnn-core/src/test/java/org/deeplearning4j/nn/layers/feedforward/embedding/EmbeddingLayerTest.java index 259a38382..742f38a2d 100644 --- a/cavis-dnn/cavis-dnn-core/src/test/java/org/deeplearning4j/nn/layers/feedforward/embedding/EmbeddingLayerTest.java +++ b/cavis-dnn/cavis-dnn-core/src/test/java/org/deeplearning4j/nn/layers/feedforward/embedding/EmbeddingLayerTest.java @@ -25,7 +25,6 @@ import org.deeplearning4j.BaseDL4JTest; import org.deeplearning4j.TestUtils; import org.deeplearning4j.nn.api.Layer; import org.deeplearning4j.nn.api.OptimizationAlgorithm; -import org.deeplearning4j.nn.conf.MultiLayerConfiguration; import org.deeplearning4j.nn.conf.NeuralNetConfiguration; import org.deeplearning4j.nn.conf.RNNFormat; import org.deeplearning4j.nn.conf.inputs.InputType; @@ -47,7 +46,6 @@ import org.nd4j.linalg.factory.Nd4j; import org.nd4j.linalg.learning.config.Sgd; import org.nd4j.linalg.lossfunctions.LossFunctions; -import java.util.Arrays; import java.util.List; import java.util.Map; import java.util.Random; @@ -60,7 +58,7 @@ public class EmbeddingLayerTest extends BaseDL4JTest { public void testEmbeddingLayerConfig() { for (boolean hasBias : new boolean[]{true, false}) { - MultiLayerConfiguration conf = new NeuralNetConfiguration.Builder().activation(Activation.TANH).list() + NeuralNetConfiguration conf = NeuralNetConfiguration.builder().activation(Activation.TANH).list() .layer(0, new EmbeddingLayer.Builder().hasBias(hasBias).nIn(10).nOut(5).build()) .layer(1, new OutputLayer.Builder().nIn(5).nOut(4).activation(Activation.SOFTMAX).build()) .build(); @@ -71,8 +69,8 @@ public class EmbeddingLayerTest extends BaseDL4JTest { Layer l0 = net.getLayer(0); assertEquals(org.deeplearning4j.nn.layers.feedforward.embedding.EmbeddingLayer.class, l0.getClass()); - assertEquals(10, ((FeedForwardLayer) l0.conf().getLayer()).getNIn()); - assertEquals(5, ((FeedForwardLayer) l0.conf().getLayer()).getNOut()); + assertEquals(10, ((FeedForwardLayer) l0.getLayerConfiguration()).getNIn()); + assertEquals(5, ((FeedForwardLayer) l0.getLayerConfiguration()).getNOut()); INDArray weights = l0.getParam(DefaultParamInitializer.WEIGHT_KEY); INDArray bias = l0.getParam(DefaultParamInitializer.BIAS_KEY); @@ -92,7 +90,7 @@ public class EmbeddingLayerTest extends BaseDL4JTest { int nout = 4; for (boolean hasBias : new boolean[]{true, false}) { - MultiLayerConfiguration conf = new NeuralNetConfiguration.Builder().activation(Activation.TANH).list() + NeuralNetConfiguration conf = NeuralNetConfiguration.builder().activation(Activation.TANH).list() .layer(new EmbeddingSequenceLayer.Builder().hasBias(hasBias) .inputLength(inputLength).nIn(nIn).nOut(embeddingDim).build()) .layer(new RnnOutputLayer.Builder().nIn(embeddingDim).nOut(nout).activation(Activation.SOFTMAX).build()) @@ -104,8 +102,8 @@ public class EmbeddingLayerTest extends BaseDL4JTest { Layer l0 = net.getLayer(0); assertEquals(org.deeplearning4j.nn.layers.feedforward.embedding.EmbeddingSequenceLayer.class, l0.getClass()); - assertEquals(10, ((FeedForwardLayer) l0.conf().getLayer()).getNIn()); - assertEquals(5, ((FeedForwardLayer) l0.conf().getLayer()).getNOut()); + assertEquals(10, ((FeedForwardLayer) l0.getLayerConfiguration()).getNIn()); + assertEquals(5, ((FeedForwardLayer) l0.getLayerConfiguration()).getNOut()); INDArray weights = l0.getParam(DefaultParamInitializer.WEIGHT_KEY); INDArray bias = l0.getParam(DefaultParamInitializer.BIAS_KEY); @@ -124,7 +122,7 @@ public class EmbeddingLayerTest extends BaseDL4JTest { int embeddingDim = 5; int nOut = 4; - MultiLayerConfiguration conf = new NeuralNetConfiguration.Builder().activation(Activation.TANH).list() + NeuralNetConfiguration conf = NeuralNetConfiguration.builder().activation(Activation.TANH).list() .layer(new EmbeddingSequenceLayer.Builder().inputLength(inputLength) .hasBias(true).nIn(nClassesIn).nOut(embeddingDim).build()) .layer(new RnnOutputLayer.Builder().nIn(embeddingDim).nOut(nOut).activation(Activation.SOFTMAX).build()) @@ -155,12 +153,12 @@ public class EmbeddingLayerTest extends BaseDL4JTest { int embeddingDim = 5; int nOut = 4; - MultiLayerConfiguration conf = new NeuralNetConfiguration.Builder().activation(Activation.TANH).list() + NeuralNetConfiguration conf = NeuralNetConfiguration.builder().activation(Activation.TANH).list() .layer(new EmbeddingSequenceLayer.Builder().inputLength(1) .hasBias(true).nIn(nClassesIn).nOut(embeddingDim).build()) .layer(new RnnOutputLayer.Builder().nIn(embeddingDim).nOut(nOut).activation(Activation.SOFTMAX).build()) .build(); - MultiLayerConfiguration conf2 = new NeuralNetConfiguration.Builder().activation(Activation.TANH).list() + NeuralNetConfiguration conf2 = NeuralNetConfiguration.builder().activation(Activation.TANH).list() .layer(0, new DenseLayer.Builder().nIn(nClassesIn).nOut(5).activation(Activation.IDENTITY).build()) .layer(1, new OutputLayer.Builder().nIn(5).nOut(4).activation(Activation.SOFTMAX).build()) .inputPreProcessor(0, new RnnToFeedForwardPreProcessor()) @@ -171,7 +169,7 @@ public class EmbeddingLayerTest extends BaseDL4JTest { net.init(); net2.init(); - net2.setParams(net.params().dup()); + net2.setParams(net.getModelParams().dup()); int batchSize = 3; INDArray inEmbedding = Nd4j.create(batchSize, 1); @@ -200,15 +198,15 @@ public class EmbeddingLayerTest extends BaseDL4JTest { @Test public void testEmbeddingForwardPass() { //With the same parameters, embedding layer should have same activations as the equivalent one-hot representation - // input with a DenseLayer + // input with a DenseLayerConfiguration int nClassesIn = 10; - MultiLayerConfiguration conf = new NeuralNetConfiguration.Builder().activation(Activation.TANH).list() + NeuralNetConfiguration conf = NeuralNetConfiguration.builder().activation(Activation.TANH).list() .layer(0, new EmbeddingLayer.Builder().hasBias(true).nIn(nClassesIn).nOut(5).build()) .layer(1, new OutputLayer.Builder().nIn(5).nOut(4).activation(Activation.SOFTMAX).build()) .build(); - MultiLayerConfiguration conf2 = new NeuralNetConfiguration.Builder().activation(Activation.TANH).list() + NeuralNetConfiguration conf2 = NeuralNetConfiguration.builder().activation(Activation.TANH).list() .layer(0, new DenseLayer.Builder().nIn(nClassesIn).nOut(5).activation(Activation.IDENTITY).build()) .layer(1, new OutputLayer.Builder().nIn(5).nOut(4).activation(Activation.SOFTMAX).build()) .build(); @@ -218,7 +216,7 @@ public class EmbeddingLayerTest extends BaseDL4JTest { net.init(); net2.init(); - net2.setParams(net.params().dup()); + net2.setParams(net.getModelParams().dup()); int batchSize = 3; INDArray inEmbedding = Nd4j.create(batchSize, 1); @@ -243,16 +241,16 @@ public class EmbeddingLayerTest extends BaseDL4JTest { @Test public void testEmbeddingBackwardPass() { //With the same parameters, embedding layer should have same activations as the equivalent one-hot representation - // input with a DenseLayer + // input with a DenseLayerConfiguration int nClassesIn = 10; - MultiLayerConfiguration conf = new NeuralNetConfiguration.Builder().activation(Activation.TANH).list() + NeuralNetConfiguration conf = NeuralNetConfiguration.builder().activation(Activation.TANH).list() .layer(0, new EmbeddingLayer.Builder().hasBias(true).nIn(nClassesIn).nOut(5).build()).layer(1, new OutputLayer.Builder(LossFunctions.LossFunction.MCXENT).nIn(5).nOut(4) .activation(Activation.SOFTMAX).build()) .build(); - MultiLayerConfiguration conf2 = new NeuralNetConfiguration.Builder().activation(Activation.TANH) + NeuralNetConfiguration conf2 = NeuralNetConfiguration.builder().activation(Activation.TANH) .weightInit(WeightInit.XAVIER).list() .layer(new DenseLayer.Builder().nIn(nClassesIn).nOut(5).activation(Activation.IDENTITY).build()) .layer(new OutputLayer.Builder(LossFunctions.LossFunction.MCXENT).nIn(5).nOut(4) @@ -264,7 +262,7 @@ public class EmbeddingLayerTest extends BaseDL4JTest { net.init(); net2.init(); - net2.setParams(net.params().dup()); + net2.setParams(net.getModelParams().dup()); int batchSize = 3; INDArray inEmbedding = Nd4j.create(batchSize, 1); @@ -289,7 +287,7 @@ public class EmbeddingLayerTest extends BaseDL4JTest { net.computeGradientAndScore(); net2.computeGradientAndScore(); - assertEquals(net2.score(), net.score(), 1e-6); + assertEquals(net2.getScore(), net.getScore(), 1e-6); Map gradient = net.gradient().gradientForVariable(); Map gradient2 = net2.gradient().gradientForVariable(); @@ -308,16 +306,16 @@ public class EmbeddingLayerTest extends BaseDL4JTest { int nOut = 4; int inputLength = 1; - MultiLayerConfiguration conf = new NeuralNetConfiguration.Builder().activation(Activation.TANH).list() + NeuralNetConfiguration conf = NeuralNetConfiguration.builder().activation(Activation.TANH).list() .layer(new EmbeddingSequenceLayer.Builder().inputLength(inputLength) .hasBias(true).nIn(nClassesIn).nOut(embeddingDim).build()) .layer(new RnnOutputLayer.Builder().nIn(embeddingDim).nOut(nOut).activation(Activation.SOFTMAX).build()) - .setInputType(InputType.recurrent(nClassesIn,inputLength,RNNFormat.NCW)) + .inputType(InputType.recurrent(nClassesIn,inputLength,RNNFormat.NCW)) .build(); - MultiLayerConfiguration conf2 = new NeuralNetConfiguration.Builder().activation(Activation.TANH).list() + NeuralNetConfiguration conf2 = NeuralNetConfiguration.builder().activation(Activation.TANH).list() .layer(new DenseLayer.Builder().nIn(nClassesIn).nOut(embeddingDim).activation(Activation.IDENTITY).build()) .layer(new RnnOutputLayer.Builder().nIn(embeddingDim).nOut(nOut).activation(Activation.SOFTMAX).build()) - .setInputType(InputType.recurrent(nClassesIn,inputLength,RNNFormat.NCW)) + .inputType(InputType.recurrent(nClassesIn,inputLength,RNNFormat.NCW)) .build(); MultiLayerNetwork net = new MultiLayerNetwork(conf); @@ -325,7 +323,7 @@ public class EmbeddingLayerTest extends BaseDL4JTest { net.init(); net2.init(); - net2.setParams(net.params().dup()); + net2.setParams(net.getModelParams().dup()); int batchSize = 3; INDArray inEmbedding = Nd4j.create(batchSize, 1); @@ -351,7 +349,7 @@ public class EmbeddingLayerTest extends BaseDL4JTest { net2.computeGradientAndScore(); // System.out.println(net.score() + "\t" + net2.score()); - assertEquals(net2.score(), net.score(), 1e-6); + assertEquals(net2.getScore(), net.getScore(), 1e-6); Map gradient = net.gradient().gradientForVariable(); Map gradient2 = net2.gradient().gradientForVariable(); @@ -368,7 +366,7 @@ public class EmbeddingLayerTest extends BaseDL4JTest { int batchSize = 3; int timeSeriesLength = 8; - MultiLayerConfiguration conf = new NeuralNetConfiguration.Builder().activation(Activation.TANH) + NeuralNetConfiguration conf = NeuralNetConfiguration.builder().activation(Activation.TANH) .dataType(DataType.DOUBLE) .list() .layer(0, new EmbeddingLayer.Builder().hasBias(true).nIn(nClassesIn).nOut(5).build()) @@ -377,9 +375,9 @@ public class EmbeddingLayerTest extends BaseDL4JTest { .activation(Activation.SOFTMAX).build()) .inputPreProcessor(0, new RnnToFeedForwardPreProcessor()) .inputPreProcessor(1, new FeedForwardToRnnPreProcessor()) - .setInputType(InputType.recurrent(nClassesIn,timeSeriesLength, RNNFormat.NCW)) + .inputType(InputType.recurrent(nClassesIn,timeSeriesLength, RNNFormat.NCW)) .build(); - MultiLayerConfiguration conf2 = new NeuralNetConfiguration.Builder().activation(Activation.TANH) + NeuralNetConfiguration conf2 = NeuralNetConfiguration.builder().activation(Activation.TANH) .weightInit(WeightInit.XAVIER) .dataType(DataType.DOUBLE) .list() @@ -389,7 +387,7 @@ public class EmbeddingLayerTest extends BaseDL4JTest { .activation(Activation.SOFTMAX).build()) .inputPreProcessor(0, new RnnToFeedForwardPreProcessor()) .inputPreProcessor(1, new FeedForwardToRnnPreProcessor()) - .setInputType(InputType.recurrent(nClassesIn,timeSeriesLength, RNNFormat.NCW)) + .inputType(InputType.recurrent(nClassesIn,timeSeriesLength, RNNFormat.NCW)) .build(); MultiLayerNetwork net = new MultiLayerNetwork(conf); @@ -397,7 +395,7 @@ public class EmbeddingLayerTest extends BaseDL4JTest { net.init(); net2.init(); - net2.setParams(net.params().dup()); + net2.setParams(net.getModelParams().dup()); INDArray inEmbedding = Nd4j.create(batchSize, 1, timeSeriesLength); INDArray inOneHot = Nd4j.create(batchSize, nClassesIn, timeSeriesLength); @@ -424,7 +422,7 @@ public class EmbeddingLayerTest extends BaseDL4JTest { net2.computeGradientAndScore(); // System.out.println(net.score() + "\t" + net2.score()); - assertEquals(net2.score(), net.score(), 1e-5); + assertEquals(net2.getScore(), net.getScore(), 1e-5); Map gradient = net.gradient().gradientForVariable(); Map gradient2 = net2.gradient().gradientForVariable(); @@ -452,7 +450,7 @@ public class EmbeddingLayerTest extends BaseDL4JTest { for (int nExamples : miniBatchSizes) { Nd4j.getRandom().setSeed(12345); - MultiLayerConfiguration conf = new NeuralNetConfiguration.Builder() + NeuralNetConfiguration conf = NeuralNetConfiguration.builder() .optimizationAlgo(OptimizationAlgorithm.STOCHASTIC_GRADIENT_DESCENT) .updater(new Sgd(0.1)).seed(12345).list() .layer(0, new EmbeddingLayer.Builder().hasBias(true).activation(Activation.TANH).nIn(numInputClasses) @@ -463,13 +461,13 @@ public class EmbeddingLayerTest extends BaseDL4JTest { .nOut(4).build()) .inputPreProcessor(0, new RnnToFeedForwardPreProcessor()) .inputPreProcessor(2, new FeedForwardToRnnPreProcessor()) - .setInputType(InputType.recurrent(numInputClasses,timeSeriesLength, RNNFormat.NCW)) + .inputType(InputType.recurrent(numInputClasses,timeSeriesLength, RNNFormat.NCW)) .build(); MultiLayerNetwork net = new MultiLayerNetwork(conf); net.init(); - MultiLayerConfiguration conf2 = new NeuralNetConfiguration.Builder() + NeuralNetConfiguration conf2 = NeuralNetConfiguration.builder() .optimizationAlgo(OptimizationAlgorithm.STOCHASTIC_GRADIENT_DESCENT) .updater(new Sgd(0.1)).seed(12345).list() .layer(0, new DenseLayer.Builder().activation(Activation.TANH).nIn(numInputClasses).nOut(5) @@ -480,13 +478,13 @@ public class EmbeddingLayerTest extends BaseDL4JTest { .nOut(4).build()) .inputPreProcessor(0, new RnnToFeedForwardPreProcessor()) .inputPreProcessor(2, new FeedForwardToRnnPreProcessor()) - .setInputType(InputType.recurrent(numInputClasses,timeSeriesLength, RNNFormat.NCW)) + .inputType(InputType.recurrent(numInputClasses,timeSeriesLength, RNNFormat.NCW)) .build(); MultiLayerNetwork net2 = new MultiLayerNetwork(conf2); net2.init(); - net2.setParams(net.params().dup()); + net2.setParams(net.getModelParams().dup()); INDArray inEmbedding = Nd4j.zeros(nExamples, 1, timeSeriesLength); INDArray inDense = Nd4j.zeros(nExamples, numInputClasses, timeSeriesLength); @@ -525,7 +523,7 @@ public class EmbeddingLayerTest extends BaseDL4JTest { net2.computeGradientAndScore(); // System.out.println(net.score() + "\t" + net2.score()); - assertEquals(net2.score(), net.score(), 1e-5); + assertEquals(net2.getScore(), net.getScore(), 1e-5); Map gradients = net.gradient().gradientForVariable(); Map gradients2 = net2.gradient().gradientForVariable(); @@ -553,7 +551,7 @@ public class EmbeddingLayerTest extends BaseDL4JTest { el = new EmbeddingLayer.Builder().weightInit(new WordVectorsMockup()).build(); } - MultiLayerConfiguration conf = new NeuralNetConfiguration.Builder() + NeuralNetConfiguration conf = NeuralNetConfiguration.builder() .seed(12345).list() .layer(el) .layer(new DenseLayer.Builder().activation(Activation.TANH).nIn(3).nOut(3).build()) @@ -577,7 +575,7 @@ public class EmbeddingLayerTest extends BaseDL4JTest { esl = new EmbeddingSequenceLayer.Builder().weightInit(new WordVectorsMockup()).build(); } - conf = new NeuralNetConfiguration.Builder() + conf = NeuralNetConfiguration.builder() .seed(12345).list() .layer(esl) .layer(new GlobalPoolingLayer()) @@ -614,7 +612,7 @@ public class EmbeddingLayerTest extends BaseDL4JTest { for (int nExamples : miniBatchSizes) { Nd4j.getRandom().setSeed(12345); - MultiLayerConfiguration conf = new NeuralNetConfiguration.Builder() + NeuralNetConfiguration conf = NeuralNetConfiguration.builder() .optimizationAlgo(OptimizationAlgorithm.STOCHASTIC_GRADIENT_DESCENT) .updater(new Sgd(0.1)).seed(12345).list() .layer(0, new EmbeddingSequenceLayer.Builder().hasBias(true).activation(Activation.TANH).nIn(numInputClasses) @@ -623,12 +621,12 @@ public class EmbeddingLayerTest extends BaseDL4JTest { .layer(2, new LSTM.Builder().activation(Activation.TANH).nIn(4).nOut(3).build()) .layer(3, new RnnOutputLayer.Builder().lossFunction(LossFunctions.LossFunction.MSE).nIn(3) .nOut(4).build()) - .setInputType(InputType.recurrent(numInputClasses,timeSeriesLength,RNNFormat.NCW)).build(); + .inputType(InputType.recurrent(numInputClasses,timeSeriesLength,RNNFormat.NCW)).build(); MultiLayerNetwork net = new MultiLayerNetwork(conf); net.init(); - MultiLayerConfiguration conf2 = new NeuralNetConfiguration.Builder() + NeuralNetConfiguration conf2 = NeuralNetConfiguration.builder() .optimizationAlgo(OptimizationAlgorithm.STOCHASTIC_GRADIENT_DESCENT) .updater(new Sgd(0.1)).seed(12345).list() .layer(0, new DenseLayer.Builder().activation(Activation.TANH).nIn(numInputClasses).nOut(5) @@ -637,12 +635,12 @@ public class EmbeddingLayerTest extends BaseDL4JTest { .layer(2, new LSTM.Builder().activation(Activation.TANH).nIn(4).nOut(3).dataFormat(RNNFormat.NCW).build()) .layer(3, new RnnOutputLayer.Builder().lossFunction(LossFunctions.LossFunction.MSE).nIn(3) .nOut(4).build()) - .setInputType(InputType.recurrent(numInputClasses,1,RNNFormat.NCW)).build(); + .inputType(InputType.recurrent(numInputClasses,1,RNNFormat.NCW)).build(); MultiLayerNetwork net2 = new MultiLayerNetwork(conf2); net2.init(); - net2.setParams(net.params().dup()); + net2.setParams(net.getModelParams().dup()); INDArray inEmbedding = Nd4j.zeros(inLabelDtype, inputRank == 2 ? new long[]{nExamples, timeSeriesLength} : new long[]{nExamples, 1, timeSeriesLength}); INDArray inDense = Nd4j.zeros(inLabelDtype, nExamples, numInputClasses, timeSeriesLength); @@ -680,7 +678,7 @@ public class EmbeddingLayerTest extends BaseDL4JTest { net.computeGradientAndScore(); net2.computeGradientAndScore(); - assertEquals(net2.score(), net.score(), 1e-5); + assertEquals(net2.getScore(), net.getScore(), 1e-5); Map gradients = net.gradient().gradientForVariable(); Map gradients2 = net2.gradient().gradientForVariable(); @@ -722,7 +720,7 @@ public class EmbeddingLayerTest extends BaseDL4JTest { @Test public void testEmbeddingDefaultActivation(){ - MultiLayerConfiguration conf = new NeuralNetConfiguration.Builder() + NeuralNetConfiguration conf = NeuralNetConfiguration.builder() .list() .layer(new EmbeddingLayer.Builder().nIn(10).nOut(10).build()) .layer(new EmbeddingSequenceLayer.Builder().nIn(10).nOut(10).build()) @@ -747,7 +745,7 @@ public class EmbeddingLayerTest extends BaseDL4JTest { for (boolean seq : new boolean[]{false, true}) { Nd4j.getRandom().setSeed(12345); - MultiLayerConfiguration conf = new NeuralNetConfiguration.Builder() + NeuralNetConfiguration conf = NeuralNetConfiguration.builder() .seed(12345) .list() .layer(seq ? @@ -758,7 +756,7 @@ public class EmbeddingLayerTest extends BaseDL4JTest { net.init(); Nd4j.getRandom().setSeed(12345); - MultiLayerConfiguration conf2 = new NeuralNetConfiguration.Builder() + NeuralNetConfiguration conf2 = NeuralNetConfiguration.builder() .seed(12345) .list() .layer(seq ? @@ -769,7 +767,7 @@ public class EmbeddingLayerTest extends BaseDL4JTest { net2.init(); Nd4j.getRandom().setSeed(12345); - MultiLayerConfiguration conf3 = new NeuralNetConfiguration.Builder() + NeuralNetConfiguration conf3 = NeuralNetConfiguration.builder() .seed(12345) .list() .layer(seq ? @@ -779,9 +777,9 @@ public class EmbeddingLayerTest extends BaseDL4JTest { MultiLayerNetwork net3 = new MultiLayerNetwork(conf3); net3.init(); - INDArray p1 = net.params(); - INDArray p2 = net2.params(); - INDArray p3 = net3.params(); + INDArray p1 = net.getModelParams(); + INDArray p2 = net2.getModelParams(); + INDArray p3 = net3.getModelParams(); boolean eq = p1.equalsWithEps(p2, 1e-4); String str = (seq ? "EmbeddingSequenceLayer" : "EmbeddingLayer") + " - " + wi; assertTrue(eq, str + " p1/p2 params not equal"); diff --git a/cavis-dnn/cavis-dnn-core/src/test/java/org/deeplearning4j/nn/layers/normalization/BatchNormalizationTest.java b/cavis-dnn/cavis-dnn-core/src/test/java/org/deeplearning4j/nn/layers/normalization/BatchNormalizationTest.java index c4950d3c4..eb76c88f2 100644 --- a/cavis-dnn/cavis-dnn-core/src/test/java/org/deeplearning4j/nn/layers/normalization/BatchNormalizationTest.java +++ b/cavis-dnn/cavis-dnn-core/src/test/java/org/deeplearning4j/nn/layers/normalization/BatchNormalizationTest.java @@ -29,7 +29,6 @@ import org.deeplearning4j.datasets.iterator.impl.MnistDataSetIterator; import org.deeplearning4j.nn.api.Layer; import org.deeplearning4j.nn.api.OptimizationAlgorithm; import org.deeplearning4j.nn.conf.ConvolutionMode; -import org.deeplearning4j.nn.conf.MultiLayerConfiguration; import org.deeplearning4j.nn.conf.NeuralNetConfiguration; import org.deeplearning4j.nn.conf.Updater; import org.deeplearning4j.nn.conf.inputs.InputType; @@ -129,14 +128,14 @@ public class BatchNormalizationTest extends BaseDL4JTest { b.lockGammaBeta(true).gamma(gamma).beta(beta); } BatchNormalization bN = b.build(); - NeuralNetConfiguration conf = new NeuralNetConfiguration.Builder().layer(bN).build(); + NeuralNetConfiguration conf = NeuralNetConfiguration.builder().layer(bN).build(); - long numParams = conf.getLayer().initializer().numParams(conf); + long numParams = conf.getFlattenedLayerConfigurations().get(0).initializer().numParams(conf); INDArray params = null; if (numParams > 0) { params = Nd4j.create(1, numParams); } - Layer layer = conf.getLayer().instantiate(conf, null, 0, params, true, params == null ? Nd4j.defaultFloatingPointType() : params.dataType()); + Layer layer = conf.getFlattenedLayerConfigurations().get(0).instantiate(conf, null, 0, params, true, params == null ? Nd4j.defaultFloatingPointType() : params.dataType()); if (numParams > 0) { layer.setBackpropGradientsViewArray(Nd4j.create(1, numParams)); } @@ -365,7 +364,7 @@ public class BatchNormalizationTest extends BaseDL4JTest { DataSet next = iter.next(); // Run with separate activation layer - MultiLayerConfiguration conf = new NeuralNetConfiguration.Builder() + NeuralNetConfiguration conf = NeuralNetConfiguration.builder() .optimizationAlgo(OptimizationAlgorithm.STOCHASTIC_GRADIENT_DESCENT).seed(123) .list() .layer(0, new DenseLayer.Builder().nIn(28 * 28).nOut(10).weightInit(WeightInit.XAVIER) @@ -397,7 +396,7 @@ public class BatchNormalizationTest extends BaseDL4JTest { DataSetIterator iter = new MnistDataSetIterator(2, 2); DataSet next = iter.next(); - MultiLayerConfiguration conf = new NeuralNetConfiguration.Builder() + NeuralNetConfiguration conf = NeuralNetConfiguration.builder() .optimizationAlgo(OptimizationAlgorithm.STOCHASTIC_GRADIENT_DESCENT).seed(123) .list() .layer(0, new ConvolutionLayer.Builder().nIn(1).nOut(6).weightInit(WeightInit.XAVIER) @@ -406,7 +405,7 @@ public class BatchNormalizationTest extends BaseDL4JTest { .layer(2, new ActivationLayer.Builder().activation(Activation.RELU).build()) .layer(3, new OutputLayer.Builder(LossFunctions.LossFunction.MCXENT) .weightInit(WeightInit.XAVIER).activation(Activation.SOFTMAX).nOut(10).build()) - .setInputType(InputType.convolutionalFlat(28, 28, 1)).build(); + .inputType(InputType.convolutionalFlat(28, 28, 1)).build(); MultiLayerNetwork network = new MultiLayerNetwork(conf); network.init(); @@ -422,7 +421,7 @@ public class BatchNormalizationTest extends BaseDL4JTest { //Serialize the batch norm network (after training), and make sure we get same activations out as before // i.e., make sure state is properly stored - MultiLayerConfiguration conf = new NeuralNetConfiguration.Builder() + NeuralNetConfiguration conf = NeuralNetConfiguration.builder() .seed(12345) .list() .layer(0, new ConvolutionLayer.Builder().nIn(1).nOut(6).weightInit(WeightInit.XAVIER) @@ -433,7 +432,7 @@ public class BatchNormalizationTest extends BaseDL4JTest { .layer(4, new BatchNormalization.Builder().build()) .layer(5, new OutputLayer.Builder(LossFunctions.LossFunction.MCXENT) .weightInit(WeightInit.XAVIER).activation(Activation.SOFTMAX).nOut(10).build()) - .setInputType(InputType.convolutionalFlat(28, 28, 1)).build(); + .inputType(InputType.convolutionalFlat(28, 28, 1)).build(); MultiLayerNetwork net = new MultiLayerNetwork(conf); net.init(); @@ -461,9 +460,9 @@ public class BatchNormalizationTest extends BaseDL4JTest { public void testGradientAndUpdaters() throws Exception { //Global mean/variance are part of the parameter vector. Expect 0 gradient, and no-op updater for these - MultiLayerConfiguration conf = new NeuralNetConfiguration.Builder() + NeuralNetConfiguration conf = NeuralNetConfiguration.builder() .optimizationAlgo(OptimizationAlgorithm.STOCHASTIC_GRADIENT_DESCENT) - .updater(Updater.RMSPROP).seed(12345).list() + .updater(Updater.RMSPROP.getIUpdaterWithDefaultConfig()).seed(12345).list() .layer(0, new ConvolutionLayer.Builder().nIn(1).nOut(6).weightInit(WeightInit.XAVIER) .activation(Activation.IDENTITY).build()) .layer(1, new BatchNormalization.Builder().build()) @@ -472,7 +471,7 @@ public class BatchNormalizationTest extends BaseDL4JTest { .layer(4, new BatchNormalization.Builder().build()) .layer(5, new OutputLayer.Builder(LossFunctions.LossFunction.MCXENT) .weightInit(WeightInit.XAVIER).activation(Activation.SOFTMAX).nOut(10).build()) - .setInputType(InputType.convolutionalFlat(28, 28, 1)).build(); + .inputType(InputType.convolutionalFlat(28, 28, 1)).build(); MultiLayerNetwork net = new MultiLayerNetwork(conf); net.init(); @@ -519,9 +518,9 @@ public class BatchNormalizationTest extends BaseDL4JTest { for(boolean useLogStd : new boolean[]{true, false}) { //First, Mnist data as 2d input (NOT taking into account convolution property) - MultiLayerConfiguration conf = new NeuralNetConfiguration.Builder() + NeuralNetConfiguration conf = NeuralNetConfiguration.builder() .optimizationAlgo(OptimizationAlgorithm.STOCHASTIC_GRADIENT_DESCENT) - .updater(Updater.RMSPROP).seed(12345) + .updater(Updater.RMSPROP.getIUpdaterWithDefaultConfig()).seed(12345) .list().layer(0, new BatchNormalization.Builder().nIn(10).nOut(10).eps(1e-5).decay(0.95) .useLogStd(useLogStd).build()) @@ -586,13 +585,13 @@ public class BatchNormalizationTest extends BaseDL4JTest { //Check that the internal global mean/variance estimate is approximately correct //First, Mnist data as 2d input (NOT taking into account convolution property) - MultiLayerConfiguration conf = new NeuralNetConfiguration.Builder() + NeuralNetConfiguration conf = NeuralNetConfiguration.builder() .optimizationAlgo(OptimizationAlgorithm.STOCHASTIC_GRADIENT_DESCENT) - .updater(Updater.RMSPROP).seed(12345).list() + .updater(Updater.RMSPROP.getIUpdaterWithDefaultConfig()).seed(12345).list() .layer(0, new BatchNormalization.Builder().nIn(3).nOut(3).eps(1e-5).decay(0.95).useLogStd(useLogStd).build()) .layer(1, new OutputLayer.Builder(LossFunctions.LossFunction.MSE).weightInit(WeightInit.XAVIER) .activation(Activation.IDENTITY).nOut(10).build()) - .setInputType(InputType.convolutional(5, 5, 3)).build(); + .inputType(InputType.convolutional(5, 5, 3)).build(); MultiLayerNetwork net = new MultiLayerNetwork(conf); net.init(); @@ -649,24 +648,24 @@ public class BatchNormalizationTest extends BaseDL4JTest { //Check that the internal global mean/variance estimate is approximately correct //First, Mnist data as 2d input (NOT taking into account convolution property) - MultiLayerConfiguration conf = new NeuralNetConfiguration.Builder() + NeuralNetConfiguration conf = NeuralNetConfiguration.builder() .optimizationAlgo(OptimizationAlgorithm.STOCHASTIC_GRADIENT_DESCENT) - .updater(Updater.RMSPROP).seed(12345).list() + .updater(Updater.RMSPROP.getIUpdaterWithDefaultConfig()).seed(12345).list() .layer(0, new BatchNormalization.Builder().nIn(3).nOut(3).eps(1e-5).decay(0.95).useLogStd(false).build()) .layer(1, new OutputLayer.Builder(LossFunctions.LossFunction.MSE).weightInit(WeightInit.XAVIER) .activation(Activation.IDENTITY).nOut(10).build()) - .setInputType(InputType.convolutional(5, 5, 3)).build(); + .inputType(InputType.convolutional(5, 5, 3)).build(); MultiLayerNetwork net = new MultiLayerNetwork(conf); net.init(); Nd4j.getRandom().setSeed(12345); - MultiLayerConfiguration conf2 = new NeuralNetConfiguration.Builder() + NeuralNetConfiguration conf2 = NeuralNetConfiguration.builder() .optimizationAlgo(OptimizationAlgorithm.STOCHASTIC_GRADIENT_DESCENT) - .updater(Updater.RMSPROP).seed(12345).list() + .updater(Updater.RMSPROP.getIUpdaterWithDefaultConfig()).seed(12345).list() .layer(0, new BatchNormalization.Builder().nIn(3).nOut(3).eps(1e-5).decay(0.95).useLogStd(true).build()) .layer(1, new OutputLayer.Builder(LossFunctions.LossFunction.MSE).weightInit(WeightInit.XAVIER) .activation(Activation.IDENTITY).nOut(10).build()) - .setInputType(InputType.convolutional(5, 5, 3)).build(); + .inputType(InputType.convolutional(5, 5, 3)).build(); MultiLayerNetwork net2 = new MultiLayerNetwork(conf2); net2.init(); @@ -691,7 +690,7 @@ public class BatchNormalizationTest extends BaseDL4JTest { @Test public void testBatchNorm() throws Exception { - MultiLayerConfiguration conf = new NeuralNetConfiguration.Builder() + NeuralNetConfiguration conf = NeuralNetConfiguration.builder() .seed(12345) .updater(new Adam(1e-3)) .activation(Activation.TANH) @@ -700,7 +699,7 @@ public class BatchNormalizationTest extends BaseDL4JTest { .layer(new BatchNormalization()) .layer(new ConvolutionLayer.Builder().nOut(5).kernelSize(2, 2).build()) .layer(new OutputLayer.Builder().activation(Activation.SOFTMAX).lossFunction(LossFunctions.LossFunction.MCXENT).nOut(10).build()) - .setInputType(InputType.convolutionalFlat(28, 28, 1)) + .inputType(InputType.convolutionalFlat(28, 28, 1)) .build(); MultiLayerNetwork net = new MultiLayerNetwork(conf); @@ -728,7 +727,7 @@ public class BatchNormalizationTest extends BaseDL4JTest { for (boolean rnn : new boolean[]{true, false}) { - MultiLayerConfiguration conf = new NeuralNetConfiguration.Builder() + NeuralNetConfiguration conf = NeuralNetConfiguration.builder() .seed(12345) .weightInit(WeightInit.XAVIER) .convolutionMode(ConvolutionMode.Same) @@ -737,7 +736,7 @@ public class BatchNormalizationTest extends BaseDL4JTest { new Convolution1DLayer.Builder().kernelSize(3).stride(1).nOut(3).build()) .layer(new BatchNormalization()) .layer(new RnnOutputLayer.Builder().nOut(3).activation(Activation.TANH).lossFunction(LossFunctions.LossFunction.MSE).build()) - .setInputType(InputType.recurrent(3)) + .inputType(InputType.recurrent(3)) .build(); MultiLayerNetwork net = new MultiLayerNetwork(conf); @@ -757,7 +756,7 @@ public class BatchNormalizationTest extends BaseDL4JTest { @Test public void testInputValidation() { - MultiLayerConfiguration conf = new NeuralNetConfiguration.Builder() + NeuralNetConfiguration conf = NeuralNetConfiguration.builder() .list() .layer(new BatchNormalization.Builder().nIn(10).nOut(10).build()) .build(); diff --git a/cavis-dnn/cavis-dnn-core/src/test/java/org/deeplearning4j/nn/layers/normalization/LocalResponseTest.java b/cavis-dnn/cavis-dnn-core/src/test/java/org/deeplearning4j/nn/layers/normalization/LocalResponseTest.java index e876b736b..c2f8cb3c4 100644 --- a/cavis-dnn/cavis-dnn-core/src/test/java/org/deeplearning4j/nn/layers/normalization/LocalResponseTest.java +++ b/cavis-dnn/cavis-dnn-core/src/test/java/org/deeplearning4j/nn/layers/normalization/LocalResponseTest.java @@ -25,7 +25,6 @@ import org.deeplearning4j.datasets.iterator.impl.MnistDataSetIterator; import org.deeplearning4j.nn.api.Layer; import org.deeplearning4j.nn.api.OptimizationAlgorithm; import org.deeplearning4j.nn.conf.GradientNormalization; -import org.deeplearning4j.nn.conf.MultiLayerConfiguration; import org.deeplearning4j.nn.conf.NeuralNetConfiguration; import org.deeplearning4j.nn.conf.inputs.InputType; import org.deeplearning4j.nn.conf.layers.ConvolutionLayer; @@ -110,7 +109,7 @@ public class LocalResponseTest extends BaseDL4JTest { @BeforeEach public void doBefore() { - NeuralNetConfiguration conf = new NeuralNetConfiguration.Builder() + NeuralNetConfiguration conf = NeuralNetConfiguration.builder() .gradientNormalization(GradientNormalization.RenormalizeL2PerLayer).seed(123) .layer(new LocalResponseNormalization.Builder().k(2).n(5).alpha(1e-4).beta(0.75).build()) .build(); @@ -140,7 +139,7 @@ public class LocalResponseTest extends BaseDL4JTest { public void testRegularization() { // Confirm a structure with regularization true will not throw an error - NeuralNetConfiguration conf = new NeuralNetConfiguration.Builder() + NeuralNetConfiguration conf = NeuralNetConfiguration.builder() .gradientNormalization(GradientNormalization.RenormalizeL2PerLayer).l1(0.2) .l2(0.1).seed(123) .layer(new LocalResponseNormalization.Builder().k(2).n(5).alpha(1e-4).beta(0.75).build()) @@ -149,7 +148,7 @@ public class LocalResponseTest extends BaseDL4JTest { @Test public void testMultiCNNLayer() throws Exception { - MultiLayerConfiguration conf = new NeuralNetConfiguration.Builder() + NeuralNetConfiguration conf = NeuralNetConfiguration.builder() .optimizationAlgo(OptimizationAlgorithm.LINE_GRADIENT_DESCENT).seed(123).list() .layer(0, new ConvolutionLayer.Builder().nIn(1).nOut(6).weightInit(WeightInit.XAVIER) .activation(Activation.RELU).build()) @@ -159,7 +158,7 @@ public class LocalResponseTest extends BaseDL4JTest { .layer(3, new OutputLayer.Builder(LossFunctions.LossFunction.MCXENT) .weightInit(WeightInit.XAVIER).activation(Activation.SOFTMAX).nIn(2).nOut(10) .build()) - .setInputType(InputType.convolutionalFlat(28, 28, 1)).build(); + .inputType(InputType.convolutionalFlat(28, 28, 1)).build(); MultiLayerNetwork network = new MultiLayerNetwork(conf); network.init(); @@ -203,7 +202,7 @@ public class LocalResponseTest extends BaseDL4JTest { } LocalResponseNormalization lrn = new LocalResponseNormalization.Builder().build(); - NeuralNetConfiguration nnc = new NeuralNetConfiguration.Builder().layer(lrn).build(); + NeuralNetConfiguration nnc = NeuralNetConfiguration.builder().layer(lrn).build(); org.deeplearning4j.nn.layers.normalization.LocalResponseNormalization layer = (org.deeplearning4j.nn.layers.normalization.LocalResponseNormalization) lrn.instantiate(nnc, null, 0, null, false, Nd4j.defaultFloatingPointType()); diff --git a/cavis-dnn/cavis-dnn-core/src/test/java/org/deeplearning4j/nn/layers/objdetect/TestYolo2OutputLayer.java b/cavis-dnn/cavis-dnn-core/src/test/java/org/deeplearning4j/nn/layers/objdetect/TestYolo2OutputLayer.java index c732ab366..5ef6fb110 100644 --- a/cavis-dnn/cavis-dnn-core/src/test/java/org/deeplearning4j/nn/layers/objdetect/TestYolo2OutputLayer.java +++ b/cavis-dnn/cavis-dnn-core/src/test/java/org/deeplearning4j/nn/layers/objdetect/TestYolo2OutputLayer.java @@ -34,7 +34,6 @@ import org.deeplearning4j.BaseDL4JTest; import org.deeplearning4j.TestUtils; import org.deeplearning4j.datasets.datavec.RecordReaderDataSetIterator; import org.deeplearning4j.nn.conf.ConvolutionMode; -import org.deeplearning4j.nn.conf.MultiLayerConfiguration; import org.deeplearning4j.nn.conf.NeuralNetConfiguration; import org.deeplearning4j.nn.conf.inputs.InputType; import org.deeplearning4j.nn.conf.layers.ConvolutionLayer; @@ -88,7 +87,7 @@ public class TestYolo2OutputLayer extends BaseDL4JTest { INDArray bbPrior = Nd4j.rand(b, 2).muliRowVector(Nd4j.create(new double[]{w, h})); - MultiLayerConfiguration conf = new NeuralNetConfiguration.Builder() + NeuralNetConfiguration conf = NeuralNetConfiguration.builder() .l2(0.01) .list() .layer(new ConvolutionLayer.Builder().nIn(depth).nOut(depth).kernelSize(1,1).build()) @@ -177,7 +176,7 @@ public class TestYolo2OutputLayer extends BaseDL4JTest { INDArray bbPrior = Nd4j.rand(b, 2).muliRowVector(Nd4j.create(new double[]{w, h})); - MultiLayerConfiguration conf = new NeuralNetConfiguration.Builder() + NeuralNetConfiguration conf = NeuralNetConfiguration.builder() .list() .layer(new ConvolutionLayer.Builder().nIn(1).nOut(1).kernelSize(1,1).build()) .layer(new Yolo2OutputLayer.Builder() @@ -335,7 +334,7 @@ public class TestYolo2OutputLayer extends BaseDL4JTest { //Check IOU calculation - MultiLayerConfiguration conf = new NeuralNetConfiguration.Builder() + NeuralNetConfiguration conf = NeuralNetConfiguration.builder() .list() .layer(new ConvolutionLayer.Builder().kernelSize(3,3).stride(1,1).nIn(3).nOut(3).build()) .layer(new Yolo2OutputLayer.Builder() @@ -495,7 +494,7 @@ public class TestYolo2OutputLayer extends BaseDL4JTest { DataSetIterator iter = new RecordReaderDataSetIterator(rr,1,1,1,true); iter.setPreProcessor(new ImagePreProcessingScaler()); - MultiLayerConfiguration conf = new NeuralNetConfiguration.Builder() + NeuralNetConfiguration conf = NeuralNetConfiguration.builder() .convolutionMode(ConvolutionMode.Same) .updater(new Adam(2e-3)) .gradientNormalization(GradientNormalization.ClipElementWiseAbsoluteValue) @@ -510,12 +509,12 @@ public class TestYolo2OutputLayer extends BaseDL4JTest { .layer(new Yolo2OutputLayer.Builder() .boundingBoxPriors(bbPriors) .build()) - .setInputType(InputType.convolutional(h,w,c)) + .inputType(InputType.convolutional(h,w,c)) .build(); MultiLayerNetwork net = new MultiLayerNetwork(conf); net.init(); - net.setListeners(new ScoreIterationListener(100)); + net.addTrainingListeners(new ScoreIterationListener(100)); int nEpochs = 1000; DataSet ds = iter.next(); diff --git a/cavis-dnn/cavis-dnn-core/src/test/java/org/deeplearning4j/nn/layers/ocnn/OCNNOutputLayerTest.java b/cavis-dnn/cavis-dnn-core/src/test/java/org/deeplearning4j/nn/layers/ocnn/OCNNOutputLayerTest.java index e9f76dfc2..a52716589 100644 --- a/cavis-dnn/cavis-dnn-core/src/test/java/org/deeplearning4j/nn/layers/ocnn/OCNNOutputLayerTest.java +++ b/cavis-dnn/cavis-dnn-core/src/test/java/org/deeplearning4j/nn/layers/ocnn/OCNNOutputLayerTest.java @@ -23,7 +23,6 @@ package org.deeplearning4j.nn.layers.ocnn; import org.deeplearning4j.BaseDL4JTest; import org.deeplearning4j.datasets.iterator.impl.IrisDataSetIterator; import org.deeplearning4j.gradientcheck.GradientCheckUtil; -import org.deeplearning4j.nn.conf.MultiLayerConfiguration; import org.deeplearning4j.nn.conf.NeuralNetConfiguration; import org.deeplearning4j.nn.conf.layers.DenseLayer; import org.deeplearning4j.nn.multilayer.MultiLayerNetwork; @@ -35,7 +34,6 @@ import org.junit.jupiter.api.io.TempDir; import org.nd4j.linalg.activations.impl.ActivationIdentity; import org.nd4j.linalg.activations.impl.ActivationReLU; import org.nd4j.linalg.activations.impl.ActivationSigmoid; -import org.nd4j.linalg.api.buffer.DataBuffer; import org.nd4j.linalg.api.buffer.DataType; import org.nd4j.linalg.api.ndarray.INDArray; import org.nd4j.linalg.dataset.DataSet; @@ -43,10 +41,7 @@ import org.nd4j.linalg.dataset.api.iterator.DataSetIterator; import org.nd4j.linalg.dataset.api.preprocessor.NormalizerStandardize; import org.nd4j.linalg.factory.Nd4j; import org.nd4j.linalg.learning.config.Adam; -import org.nd4j.linalg.learning.config.Nesterovs; import org.nd4j.linalg.learning.config.NoOp; -import org.nd4j.linalg.schedule.ScheduleType; -import org.nd4j.linalg.schedule.StepSchedule; import java.io.File; import java.util.UUID; @@ -84,13 +79,13 @@ public class OCNNOutputLayerTest extends BaseDL4JTest { if (doLearningFirst) { //Run a number of iterations of learning network.setInput(arr); - network.setListeners(new ScoreIterationListener(1)); + network.addTrainingListeners(new ScoreIterationListener(1)); network.computeGradientAndScore(); - double scoreBefore = network.score(); + double scoreBefore = network.getScore(); for (int j = 0; j < 10; j++) network.fit(ds); network.computeGradientAndScore(); - double scoreAfter = network.score(); + double scoreAfter = network.getScore(); //Can't test in 'characteristic mode of operation' if not learning String msg = "testLayer() - score did not (sufficiently) decrease during learning - activationFn=" + "relu" + ", lossFn=" + "ocnn" + ", " + "sigmoid" @@ -104,7 +99,7 @@ public class OCNNOutputLayerTest extends BaseDL4JTest { + "ocnn" + "sigmoid" + ", doLearningFirst=" + doLearningFirst); for (int j = 0; j < network.getnLayers(); j++) - System.out.println("Layer " + j + " # params: " + network.getLayer(j).numParams()); + System.out.println("ILayer " + j + " # params: " + network.getLayer(j).numParams()); } boolean gradOK = GradientCheckUtil.checkGradients(network, DEFAULT_EPS, DEFAULT_MAX_REL_ERROR, @@ -128,7 +123,7 @@ public class OCNNOutputLayerTest extends BaseDL4JTest { DataSet filtered = next.filterBy(new int[]{0, 1}); for (int i = 0; i < 10; i++) { network.setEpochCount(i); - network.getLayerWiseConfigurations().setEpochCount(i); + network.getNetConfiguration().setEpochCount(i); network.fit(filtered); } @@ -152,7 +147,7 @@ public class OCNNOutputLayerTest extends BaseDL4JTest { tmpFile.deleteOnExit(); MultiLayerNetwork multiLayerNetwork = ModelSerializer.restoreMultiLayerNetwork(tmpFile); - assertEquals(network.params(),multiLayerNetwork.params()); + assertEquals(network.getModelParams(),multiLayerNetwork.getModelParams()); assertEquals(network.numParams(),multiLayerNetwork.numParams()); } @@ -170,7 +165,7 @@ public class OCNNOutputLayerTest extends BaseDL4JTest { private MultiLayerNetwork getSingleLayer() { int numHidden = 2; - MultiLayerConfiguration configuration = new NeuralNetConfiguration.Builder() + NeuralNetConfiguration configuration = NeuralNetConfiguration.builder() .seed(12345) .weightInit(WeightInit.XAVIER) .miniBatch(true) @@ -182,8 +177,9 @@ public class OCNNOutputLayerTest extends BaseDL4JTest { // 1e-2, // 0.1, // 20)).build()) - .list(new DenseLayer.Builder().activation(new ActivationReLU()) - .nIn(4).nOut(2).build(), + .layer(new DenseLayer.Builder().activation(new ActivationReLU()) + .nIn(4).nOut(2).build()) + .layer( new org.deeplearning4j.nn.conf.ocnn.OCNNOutputLayer.Builder() .nIn(2).activation(new ActivationSigmoid()).initialRValue(0.1) .nu(0.1) @@ -191,16 +187,17 @@ public class OCNNOutputLayerTest extends BaseDL4JTest { .build(); MultiLayerNetwork network = new MultiLayerNetwork(configuration); network.init(); - network.setListeners(new ScoreIterationListener(1)); + network.addTrainingListeners(new ScoreIterationListener(1)); return network; } public MultiLayerNetwork getGradientCheckNetwork(int numHidden) { - MultiLayerConfiguration configuration = new NeuralNetConfiguration.Builder() + NeuralNetConfiguration configuration = NeuralNetConfiguration.builder() .dataType(DataType.DOUBLE) .seed(42).updater(new NoOp()).miniBatch(false) - .list(new DenseLayer.Builder().activation(new ActivationIdentity()).nIn(4).nOut(4).build(), + .layer(new DenseLayer.Builder().activation(new ActivationIdentity()).nIn(4).nOut(4).build()) + .layer( new org.deeplearning4j.nn.conf.ocnn.OCNNOutputLayer.Builder().nIn(4) .nu(0.002).activation(new ActivationSigmoid()) .hiddenLayerSize(numHidden).build()) diff --git a/cavis-dnn/cavis-dnn-core/src/test/java/org/deeplearning4j/nn/layers/pooling/GlobalPoolingMaskingTests.java b/cavis-dnn/cavis-dnn-core/src/test/java/org/deeplearning4j/nn/layers/pooling/GlobalPoolingMaskingTests.java index a7f3d1867..86d695f3d 100644 --- a/cavis-dnn/cavis-dnn-core/src/test/java/org/deeplearning4j/nn/layers/pooling/GlobalPoolingMaskingTests.java +++ b/cavis-dnn/cavis-dnn-core/src/test/java/org/deeplearning4j/nn/layers/pooling/GlobalPoolingMaskingTests.java @@ -22,7 +22,6 @@ package org.deeplearning4j.nn.layers.pooling; import org.deeplearning4j.BaseDL4JTest; import org.deeplearning4j.nn.conf.ConvolutionMode; -import org.deeplearning4j.nn.conf.MultiLayerConfiguration; import org.deeplearning4j.nn.conf.NeuralNetConfiguration; import org.deeplearning4j.nn.conf.distribution.NormalDistribution; import org.deeplearning4j.nn.conf.layers.*; @@ -59,7 +58,7 @@ public class GlobalPoolingMaskingTests extends BaseDL4JTest { for (int miniBatchSize : minibatchSizes) { - MultiLayerConfiguration conf = new NeuralNetConfiguration.Builder() + NeuralNetConfiguration conf = NeuralNetConfiguration.builder() .updater(new NoOp()) .dist(new NormalDistribution(0, 1.0)).seed(12345L).list() .layer(0, new GravesLSTM.Builder().nIn(nIn).nOut(layerSize).activation(Activation.TANH) @@ -123,7 +122,7 @@ public class GlobalPoolingMaskingTests extends BaseDL4JTest { new PoolingType[] {PoolingType.SUM, PoolingType.AVG, PoolingType.MAX, PoolingType.PNORM}; for (PoolingType pt : poolingTypes) { - MultiLayerConfiguration conf = new NeuralNetConfiguration.Builder().weightInit(WeightInit.XAVIER) + NeuralNetConfiguration conf = NeuralNetConfiguration.builder().weightInit(WeightInit.XAVIER) .convolutionMode(ConvolutionMode.Same).seed(12345L).list() .layer(0, new ConvolutionLayer.Builder().nIn(depthIn).nOut(depthOut).kernelSize(height, 2) .stride(height, 1).activation(Activation.TANH).build()) @@ -186,7 +185,7 @@ public class GlobalPoolingMaskingTests extends BaseDL4JTest { new PoolingType[] {PoolingType.SUM, PoolingType.AVG, PoolingType.MAX, PoolingType.PNORM}; for (PoolingType pt : poolingTypes) { - MultiLayerConfiguration conf = new NeuralNetConfiguration.Builder().weightInit(WeightInit.XAVIER) + NeuralNetConfiguration conf = NeuralNetConfiguration.builder().weightInit(WeightInit.XAVIER) .convolutionMode(ConvolutionMode.Same).seed(12345L).list() .layer(0, new ConvolutionLayer.Builder().nIn(depthIn).nOut(depthOut).kernelSize(2, width) .stride(1, width).activation(Activation.TANH).build()) @@ -250,7 +249,7 @@ public class GlobalPoolingMaskingTests extends BaseDL4JTest { new PoolingType[] {PoolingType.SUM, PoolingType.AVG, PoolingType.MAX, PoolingType.PNORM}; for (PoolingType pt : poolingTypes) { - MultiLayerConfiguration conf = new NeuralNetConfiguration.Builder().weightInit(WeightInit.XAVIER) + NeuralNetConfiguration conf = NeuralNetConfiguration.builder().weightInit(WeightInit.XAVIER) .convolutionMode(ConvolutionMode.Same).seed(12345L).list() .layer(0, new ConvolutionLayer.Builder().nIn(depthIn).nOut(depthOut).kernelSize(height, 2) .stride(height, 1).activation(Activation.TANH).build()) @@ -309,7 +308,7 @@ public class GlobalPoolingMaskingTests extends BaseDL4JTest { new PoolingType[] {PoolingType.SUM, PoolingType.AVG, PoolingType.MAX, PoolingType.PNORM}; for (PoolingType pt : poolingTypes) { - MultiLayerConfiguration conf = new NeuralNetConfiguration.Builder().weightInit(WeightInit.XAVIER) + NeuralNetConfiguration conf = NeuralNetConfiguration.builder().weightInit(WeightInit.XAVIER) .convolutionMode(ConvolutionMode.Same).seed(12345L).list() .layer(0, new ConvolutionLayer.Builder().nIn(depthIn).nOut(depthOut).kernelSize(2, width) .stride(1, width).activation(Activation.TANH).build()) @@ -368,7 +367,7 @@ public class GlobalPoolingMaskingTests extends BaseDL4JTest { new PoolingType[] {PoolingType.SUM, PoolingType.AVG, PoolingType.MAX, PoolingType.PNORM}; for (PoolingType pt : poolingTypes) { - MultiLayerConfiguration conf = new NeuralNetConfiguration.Builder().weightInit(WeightInit.XAVIER) + NeuralNetConfiguration conf = NeuralNetConfiguration.builder().weightInit(WeightInit.XAVIER) .convolutionMode(ConvolutionMode.Same).seed(12345L).list() .layer(0, new ConvolutionLayer.Builder().nIn(depthIn).nOut(depthOut).kernelSize(2, 2) .stride(1, 1).activation(Activation.TANH).build()) @@ -434,7 +433,7 @@ public class GlobalPoolingMaskingTests extends BaseDL4JTest { for(PoolingType pt : PoolingType.values()) { //System.out.println("Net: " + networkDtype + ", mask: " + dt + ", pt=" + pt); - MultiLayerConfiguration conf = new NeuralNetConfiguration.Builder() + NeuralNetConfiguration conf = NeuralNetConfiguration.builder() .list() .layer(new GlobalPoolingLayer(pt)) .layer(new OutputLayer.Builder().nIn(5).nOut(5).activation(Activation.TANH).lossFunction(LossFunctions.LossFunction.MSE).build()) @@ -447,7 +446,7 @@ public class GlobalPoolingMaskingTests extends BaseDL4JTest { net.output(in, false, mask, null); - MultiLayerConfiguration conf2 = new NeuralNetConfiguration.Builder() + NeuralNetConfiguration conf2 = NeuralNetConfiguration.builder() .list() .layer(new RnnOutputLayer.Builder().nIn(5).nOut(5).activation(Activation.TANH).lossFunction(LossFunctions.LossFunction.MSE).build()) diff --git a/cavis-dnn/cavis-dnn-core/src/test/java/org/deeplearning4j/nn/layers/recurrent/BidirectionalTest.java b/cavis-dnn/cavis-dnn-core/src/test/java/org/deeplearning4j/nn/layers/recurrent/BidirectionalTest.java index e785b36e5..101a55edb 100644 --- a/cavis-dnn/cavis-dnn-core/src/test/java/org/deeplearning4j/nn/layers/recurrent/BidirectionalTest.java +++ b/cavis-dnn/cavis-dnn-core/src/test/java/org/deeplearning4j/nn/layers/recurrent/BidirectionalTest.java @@ -85,7 +85,7 @@ public class BidirectionalTest extends BaseDL4JTest { //Bidirectional(GravesLSTM) and GravesBidirectionalLSTM should be equivalent, given equivalent params //Note that GravesBidirectionalLSTM implements ADD mode only - MultiLayerConfiguration conf1 = new NeuralNetConfiguration.Builder() + NeuralNetConfiguration conf1 = NeuralNetConfiguration.builder() .activation(Activation.TANH) .weightInit(WeightInit.XAVIER) .trainingWorkspaceMode(wsm) @@ -98,7 +98,7 @@ public class BidirectionalTest extends BaseDL4JTest { .nIn(10).nOut(10).build()) .build(); - MultiLayerConfiguration conf2 = new NeuralNetConfiguration.Builder() + NeuralNetConfiguration conf2 = NeuralNetConfiguration.builder() .activation(Activation.TANH) .weightInit(WeightInit.XAVIER) .trainingWorkspaceMode(wsm) @@ -124,7 +124,7 @@ public class BidirectionalTest extends BaseDL4JTest { assertEquals(n1, n2); } - net2.setParams(net1.params()); //Assuming exact same layout here... + net2.setParams(net1.getModelParams()); //Assuming exact same layout here... INDArray in; if (rnnDataFormat == NCW){ @@ -154,7 +154,7 @@ public class BidirectionalTest extends BaseDL4JTest { net2.computeGradientAndScore(); //Ensure scores are equal: - assertEquals(net1.score(), net2.score(), 1e-6); + assertEquals(net1.getScore(), net2.getScore(), 1e-6); //Ensure gradients are equal: Gradient g1 = net1.gradient(); @@ -174,8 +174,8 @@ public class BidirectionalTest extends BaseDL4JTest { net1.fit(in, labels); net2.fit(in, labels); - INDArray p1 = net1.params(); - INDArray p2 = net2.params(); + INDArray p1 = net1.getModelParams(); + INDArray p2 = net2.getModelParams(); assertEquals(p1, p2); } } @@ -189,7 +189,7 @@ public class BidirectionalTest extends BaseDL4JTest { //Bidirectional(GravesLSTM) and GravesBidirectionalLSTM should be equivalent, given equivalent params //Note that GravesBidirectionalLSTM implements ADD mode only - ComputationGraphConfiguration conf1 = new NeuralNetConfiguration.Builder() + ComputationGraphConfiguration conf1 = NeuralNetConfiguration.builder() .activation(Activation.TANH) .weightInit(WeightInit.XAVIER) .updater(new Adam()) @@ -204,7 +204,7 @@ public class BidirectionalTest extends BaseDL4JTest { .setOutputs("2") .build(); - ComputationGraphConfiguration conf2 = new NeuralNetConfiguration.Builder() + ComputationGraphConfiguration conf2 = NeuralNetConfiguration.builder() .activation(Activation.TANH) .weightInit(WeightInit.XAVIER) .updater(new Adam()) @@ -232,7 +232,7 @@ public class BidirectionalTest extends BaseDL4JTest { assertEquals(n1, n2); } - net2.setParams(net1.params()); //Assuming exact same layout here... + net2.setParams(net1.getModelParams()); //Assuming exact same layout here... INDArray in = Nd4j.rand(3, 10, 5); @@ -253,7 +253,7 @@ public class BidirectionalTest extends BaseDL4JTest { net2.computeGradientAndScore(); //Ensure scores are equal: - assertEquals(net1.score(), net2.score(), 1e-6); + assertEquals(net1.getScore(), net2.getScore(), 1e-6); //Ensure gradients are equal: Gradient g1 = net1.gradient(); @@ -273,8 +273,8 @@ public class BidirectionalTest extends BaseDL4JTest { net1.fit(new DataSet(in, labels)); net2.fit(new DataSet(in, labels)); - INDArray p1 = net1.params(); - INDArray p2 = net2.params(); + INDArray p1 = net1.getModelParams(); + INDArray p2 = net2.getModelParams(); assertEquals(p1, p2); } } @@ -288,7 +288,7 @@ public class BidirectionalTest extends BaseDL4JTest { Nd4j.getRandom().setSeed(12345); - MultiLayerConfiguration conf1 = new NeuralNetConfiguration.Builder() + NeuralNetConfiguration conf1 = NeuralNetConfiguration.builder() .activation(Activation.TANH) .weightInit(WeightInit.XAVIER) .trainingWorkspaceMode(wsm) @@ -340,7 +340,7 @@ public class BidirectionalTest extends BaseDL4JTest { net1.computeGradientAndScore(); net2.computeGradientAndScore(); - assertEquals(net1.score(), net2.score(), 1e-6); + assertEquals(net1.getScore(), net2.getScore(), 1e-6); assertEquals(net1.gradient().gradient(), net2.gradient().gradient()); } } @@ -354,7 +354,7 @@ public class BidirectionalTest extends BaseDL4JTest { Nd4j.getRandom().setSeed(12345); - ComputationGraphConfiguration conf1 = new NeuralNetConfiguration.Builder() + ComputationGraphConfiguration conf1 = NeuralNetConfiguration.builder() .activation(Activation.TANH) .weightInit(WeightInit.XAVIER) .trainingWorkspaceMode(wsm) @@ -403,7 +403,7 @@ public class BidirectionalTest extends BaseDL4JTest { net1.computeGradientAndScore(); net2.computeGradientAndScore(); - assertEquals(net1.score(), net2.score(), 1e-6); + assertEquals(net1.getScore(), net2.getScore(), 1e-6); assertEquals(net1.gradient().gradient(), net2.gradient().gradient()); } } @@ -422,7 +422,7 @@ public class BidirectionalTest extends BaseDL4JTest { INDArray in = Nd4j.rand(inshape); for (Bidirectional.Mode m : modes) { - MultiLayerConfiguration conf1 = new NeuralNetConfiguration.Builder() + NeuralNetConfiguration conf1 = NeuralNetConfiguration.builder() .dataType(DataType.DOUBLE) .activation(Activation.TANH) .weightInit(WeightInit.XAVIER) @@ -436,7 +436,7 @@ public class BidirectionalTest extends BaseDL4JTest { MultiLayerNetwork net1 = new MultiLayerNetwork(conf1); net1.init(); - MultiLayerConfiguration conf2 = new NeuralNetConfiguration.Builder() + NeuralNetConfiguration conf2 = NeuralNetConfiguration.builder() .dataType(DataType.DOUBLE) .activation(Activation.TANH) .weightInit(WeightInit.XAVIER) @@ -548,7 +548,7 @@ public class BidirectionalTest extends BaseDL4JTest { for (Bidirectional.Mode m : modes) { - ComputationGraphConfiguration conf1 = new NeuralNetConfiguration.Builder() + ComputationGraphConfiguration conf1 = NeuralNetConfiguration.builder() .dataType(DataType.DOUBLE) .activation(Activation.TANH) .weightInit(WeightInit.XAVIER) @@ -564,7 +564,7 @@ public class BidirectionalTest extends BaseDL4JTest { ComputationGraph net1 = new ComputationGraph(conf1); net1.init(); - ComputationGraphConfiguration conf2 = new NeuralNetConfiguration.Builder() + ComputationGraphConfiguration conf2 = NeuralNetConfiguration.builder() .dataType(DataType.DOUBLE) .activation(Activation.TANH) .weightInit(WeightInit.XAVIER) @@ -680,7 +680,7 @@ public class BidirectionalTest extends BaseDL4JTest { int in = 2; int out = 2; - ComputationGraphConfiguration.GraphBuilder builder = new NeuralNetConfiguration.Builder() + ComputationGraphConfiguration.GraphBuilder builder = NeuralNetConfiguration.builder() .updater(new Adam(0.01)) .activation(Activation.RELU) .graphBuilder() diff --git a/cavis-dnn/cavis-dnn-core/src/test/java/org/deeplearning4j/nn/layers/recurrent/GravesBidirectionalLSTMTest.java b/cavis-dnn/cavis-dnn-core/src/test/java/org/deeplearning4j/nn/layers/recurrent/GravesBidirectionalLSTMTest.java index bd1291216..be04304b6 100644 --- a/cavis-dnn/cavis-dnn-core/src/test/java/org/deeplearning4j/nn/layers/recurrent/GravesBidirectionalLSTMTest.java +++ b/cavis-dnn/cavis-dnn-core/src/test/java/org/deeplearning4j/nn/layers/recurrent/GravesBidirectionalLSTMTest.java @@ -24,7 +24,6 @@ import lombok.val; import org.deeplearning4j.BaseDL4JTest; import org.deeplearning4j.nn.api.OptimizationAlgorithm; import org.deeplearning4j.nn.conf.CacheMode; -import org.deeplearning4j.nn.conf.MultiLayerConfiguration; import org.deeplearning4j.nn.conf.NeuralNetConfiguration; import org.deeplearning4j.nn.conf.RNNFormat; import org.deeplearning4j.nn.conf.distribution.UniformDistribution; @@ -64,15 +63,15 @@ public class GravesBidirectionalLSTMTest extends BaseDL4JTest { int nIn = 13; int nHiddenUnits = 17; - final NeuralNetConfiguration conf = new NeuralNetConfiguration.Builder() + final NeuralNetConfiguration conf = NeuralNetConfiguration.builder() .layer(new org.deeplearning4j.nn.conf.layers.GravesBidirectionalLSTM.Builder().nIn(nIn) .nOut(nHiddenUnits).dataFormat(rnnDataFormat).activation(Activation.TANH).build()) .build(); - val numParams = conf.getLayer().initializer().numParams(conf); + val numParams = conf.getFlattenedLayerConfigurations().get(0).initializer().numParams(conf); INDArray params = Nd4j.create(1, numParams); final GravesBidirectionalLSTM layer = - (GravesBidirectionalLSTM) conf.getLayer().instantiate(conf, null, 0, params, true, params.dataType()); + (GravesBidirectionalLSTM) conf.getFlattenedLayerConfigurations().get(0).instantiate(conf, null, 0, params, true, params.dataType()); //Data: has shape [miniBatchSize,nIn,timeSeriesLength]; //Output/activations has shape [miniBatchsize,nHiddenUnits,timeSeriesLength]; @@ -130,17 +129,17 @@ public class GravesBidirectionalLSTMTest extends BaseDL4JTest { INDArray inputData = (rnnDataFormat == RNNFormat.NCW)?Nd4j.ones(miniBatchSize, nIn, timeSeriesLength): Nd4j.ones(miniBatchSize, timeSeriesLength, nIn); - NeuralNetConfiguration conf = new NeuralNetConfiguration.Builder() + NeuralNetConfiguration conf = NeuralNetConfiguration.builder() .layer(new org.deeplearning4j.nn.conf.layers.GravesBidirectionalLSTM.Builder().nIn(nIn) .nOut(lstmNHiddenUnits).dataFormat(rnnDataFormat) .dist(new UniformDistribution(0, 1)).activation(Activation.TANH).build()) .build(); - long numParams = conf.getLayer().initializer().numParams(conf); + long numParams = conf.getFlattenedLayerConfigurations().get(0).initializer().numParams(conf); INDArray params = Nd4j.create(1, numParams); GravesBidirectionalLSTM lstm = - (GravesBidirectionalLSTM) conf.getLayer().instantiate(conf, null, 0, params, true, params.dataType()); - lstm.setBackpropGradientsViewArray(Nd4j.create(1, conf.getLayer().initializer().numParams(conf))); + (GravesBidirectionalLSTM) conf.getFlattenedLayerConfigurations().get(0).instantiate(conf, null, 0, params, true, params.dataType()); + lstm.setBackpropGradientsViewArray(Nd4j.create(1, conf.getFlattenedLayerConfigurations().get(0).initializer().numParams(conf))); //Set input, do a forward pass: lstm.activate(inputData, false, LayerWorkspaceMgr.noWorkspaces()); assertNotNull(lstm.input()); @@ -202,21 +201,21 @@ public class GravesBidirectionalLSTMTest extends BaseDL4JTest { final int miniBatchSize = 4; final int timeSeriesLength = 7; - final NeuralNetConfiguration conf = new NeuralNetConfiguration.Builder() + final NeuralNetConfiguration conf = NeuralNetConfiguration.builder() .layer(new org.deeplearning4j.nn.conf.layers.GravesBidirectionalLSTM.Builder().nIn(nIn) .nOut(layerSize) .dist(new UniformDistribution(0, 1)).activation(Activation.TANH).build()) .build(); - long numParams = conf.getLayer().initializer().numParams(conf); + long numParams = conf.getFlattenedLayerConfigurations().get(0).initializer().numParams(conf); INDArray params = Nd4j.create(1, numParams); final GravesBidirectionalLSTM lstm = - (GravesBidirectionalLSTM) conf.getLayer().instantiate(conf, null, 0, params, true, params.dataType()); + (GravesBidirectionalLSTM) conf.getFlattenedLayerConfigurations().get(0).instantiate(conf, null, 0, params, true, params.dataType()); final INDArray input = Nd4j.rand(miniBatchSize, nIn, timeSeriesLength); lstm.setInput(input, LayerWorkspaceMgr.noWorkspaces()); - final INDArray fwdPassFalse = LSTMHelpers.activateHelper(lstm, lstm.conf(), new ActivationSigmoid(), + final INDArray fwdPassFalse = LSTMHelpers.activateHelper(lstm, lstm.getNetConfiguration(), new ActivationSigmoid(), lstm.input(), lstm.getParam(GravesBidirectionalLSTMParamInitializer.RECURRENT_WEIGHT_KEY_FORWARDS), lstm.getParam(GravesBidirectionalLSTMParamInitializer.INPUT_WEIGHT_KEY_FORWARDS), @@ -224,7 +223,7 @@ public class GravesBidirectionalLSTMTest extends BaseDL4JTest { false, true, GravesBidirectionalLSTMParamInitializer.INPUT_WEIGHT_KEY_FORWARDS, null, true, null, CacheMode.NONE, LayerWorkspaceMgr.noWorkspaces(), true).fwdPassOutput; - final INDArray[] fwdPassTrue = LSTMHelpers.activateHelper(lstm, lstm.conf(), new ActivationSigmoid(), + final INDArray[] fwdPassTrue = LSTMHelpers.activateHelper(lstm, lstm.getNetConfiguration(), new ActivationSigmoid(), lstm.input(), lstm.getParam(GravesBidirectionalLSTMParamInitializer.RECURRENT_WEIGHT_KEY_FORWARDS), lstm.getParam(GravesBidirectionalLSTMParamInitializer.INPUT_WEIGHT_KEY_FORWARDS), @@ -260,16 +259,16 @@ public class GravesBidirectionalLSTMTest extends BaseDL4JTest { Nd4j.getRandom().setSeed(12345); - final NeuralNetConfiguration confBidirectional = new NeuralNetConfiguration.Builder() + final NeuralNetConfiguration confBidirectional = NeuralNetConfiguration.builder() .layer(new org.deeplearning4j.nn.conf.layers.GravesBidirectionalLSTM.Builder().nIn(nIn) .nOut(layerSize).dataFormat(rnnDataFormat) .dist(new UniformDistribution(-0.1, 0.1)).activation(Activation.TANH).build()) .build(); - long numParams = confBidirectional.getLayer().initializer().numParams(confBidirectional); + long numParams = confBidirectional.getFlattenedLayerConfigurations().get(0).initializer().numParams(confBidirectional); INDArray params = Nd4j.create(1, numParams); - final GravesBidirectionalLSTM bidirectionalLSTM = (GravesBidirectionalLSTM) confBidirectional.getLayer() + final GravesBidirectionalLSTM bidirectionalLSTM = (GravesBidirectionalLSTM) confBidirectional.getFlattenedLayerConfigurations().get(0) .instantiate(confBidirectional, null, 0, params, true, params.dataType()); @@ -278,9 +277,9 @@ public class GravesBidirectionalLSTMTest extends BaseDL4JTest { final INDArray act1 = bidirectionalLSTM.activate(sig, false, LayerWorkspaceMgr.noWorkspaces()); - params = bidirectionalLSTM.params(); + params = bidirectionalLSTM.getModelParams(); - bidirectionalLSTM.setParams(params); + bidirectionalLSTM.setParamsTable(params); final INDArray act2 = bidirectionalLSTM.activate(sig, false, LayerWorkspaceMgr.noWorkspaces()); @@ -300,31 +299,31 @@ public class GravesBidirectionalLSTMTest extends BaseDL4JTest { Nd4j.getRandom().setSeed(12345); final NeuralNetConfiguration confBidirectional = - new NeuralNetConfiguration.Builder() + NeuralNetConfiguration.builder() .layer(new org.deeplearning4j.nn.conf.layers.GravesBidirectionalLSTM.Builder() .nIn(nIn).nOut(layerSize).dataFormat(rnnDataFormat) .dist(new UniformDistribution(-0.1, 0.1)) .activation(Activation.TANH).updater(new NoOp()).build()) .build(); - final NeuralNetConfiguration confForwards = new NeuralNetConfiguration.Builder() + final NeuralNetConfiguration confForwards = NeuralNetConfiguration.builder() .layer(new org.deeplearning4j.nn.conf.layers.GravesLSTM.Builder().nIn(nIn).nOut(layerSize).dataFormat(rnnDataFormat) .weightInit(WeightInit.ZERO).activation(Activation.TANH).build()) .build(); - long numParams = confForwards.getLayer().initializer().numParams(confForwards); + long numParams = confForwards.getFlattenedLayerConfigurations().get(0).initializer().numParams(confForwards); INDArray params = Nd4j.create(1, numParams); - long numParamsBD = confBidirectional.getLayer().initializer().numParams(confBidirectional); + long numParamsBD = confBidirectional.getFlattenedLayerConfigurations().get(0).initializer().numParams(confBidirectional); INDArray paramsBD = Nd4j.create(1, numParamsBD); - final GravesBidirectionalLSTM bidirectionalLSTM = (GravesBidirectionalLSTM) confBidirectional.getLayer() + final GravesBidirectionalLSTM bidirectionalLSTM = (GravesBidirectionalLSTM) confBidirectional.getFlattenedLayerConfigurations().get(0) .instantiate(confBidirectional, null, 0, paramsBD, true, params.dataType()); final GravesLSTM forwardsLSTM = - (GravesLSTM) confForwards.getLayer().instantiate(confForwards, null, 0, params, true, params.dataType()); + (GravesLSTM) confForwards.getFlattenedLayerConfigurations().get(0).instantiate(confForwards, null, 0, params, true, params.dataType()); bidirectionalLSTM.setBackpropGradientsViewArray( - Nd4j.create(1, confBidirectional.getLayer().initializer().numParams(confBidirectional))); + Nd4j.create(1, confBidirectional.getFlattenedLayerConfigurations().get(0).initializer().numParams(confBidirectional))); forwardsLSTM.setBackpropGradientsViewArray( - Nd4j.create(1, confForwards.getLayer().initializer().numParams(confForwards))); + Nd4j.create(1, confForwards.getFlattenedLayerConfigurations().get(0).initializer().numParams(confForwards))); final INDArray sig = (rnnDataFormat == RNNFormat.NCW)?Nd4j.rand(miniBatchSize, nIn, timeSeriesLength): @@ -501,7 +500,7 @@ public class GravesBidirectionalLSTMTest extends BaseDL4JTest { @Test public void testSerialization() { - final MultiLayerConfiguration conf1 = new NeuralNetConfiguration.Builder() + final NeuralNetConfiguration conf1 = NeuralNetConfiguration.builder() .optimizationAlgo(OptimizationAlgorithm.STOCHASTIC_GRADIENT_DESCENT) .updater(new AdaGrad(0.1)) .l2(0.001) @@ -520,7 +519,7 @@ public class GravesBidirectionalLSTMTest extends BaseDL4JTest { final String json1 = conf1.toJson(); - final MultiLayerConfiguration conf2 = MultiLayerConfiguration.fromJson(json1); + final NeuralNetConfiguration conf2 = NeuralNetConfiguration.fromJson(json1); final String json2 = conf1.toJson(); @@ -532,7 +531,7 @@ public class GravesBidirectionalLSTMTest extends BaseDL4JTest { public void testGateActivationFnsSanityCheck() { for (String gateAfn : new String[] {"sigmoid", "hardsigmoid"}) { - MultiLayerConfiguration conf = new NeuralNetConfiguration.Builder() + NeuralNetConfiguration conf = NeuralNetConfiguration.builder() .optimizationAlgo(OptimizationAlgorithm.STOCHASTIC_GRADIENT_DESCENT) .seed(12345).list() .layer(0, new org.deeplearning4j.nn.conf.layers.GravesBidirectionalLSTM.Builder() @@ -546,8 +545,8 @@ public class GravesBidirectionalLSTMTest extends BaseDL4JTest { MultiLayerNetwork net = new MultiLayerNetwork(conf); net.init(); - assertEquals(gateAfn, ((org.deeplearning4j.nn.conf.layers.GravesBidirectionalLSTM) net.getLayer(0).conf() - .getLayer()).getGateActivationFn().toString()); + assertEquals(gateAfn, ((org.deeplearning4j.nn.conf.layers.GravesBidirectionalLSTM) net.getLayer(0).getNetConfiguration() + .getFlattenedLayerConfigurations().get(0)).getGateActivationFn().toString()); INDArray in = Nd4j.rand(3, 2, 5); INDArray labels = Nd4j.rand(3, 2, 5); diff --git a/cavis-dnn/cavis-dnn-core/src/test/java/org/deeplearning4j/nn/layers/recurrent/GravesLSTMTest.java b/cavis-dnn/cavis-dnn-core/src/test/java/org/deeplearning4j/nn/layers/recurrent/GravesLSTMTest.java index 679066755..791ff8fa6 100644 --- a/cavis-dnn/cavis-dnn-core/src/test/java/org/deeplearning4j/nn/layers/recurrent/GravesLSTMTest.java +++ b/cavis-dnn/cavis-dnn-core/src/test/java/org/deeplearning4j/nn/layers/recurrent/GravesLSTMTest.java @@ -24,7 +24,6 @@ import lombok.val; import org.deeplearning4j.BaseDL4JTest; import org.deeplearning4j.common.config.DL4JClassLoading; import org.deeplearning4j.nn.api.OptimizationAlgorithm; -import org.deeplearning4j.nn.conf.MultiLayerConfiguration; import org.deeplearning4j.nn.conf.NeuralNetConfiguration; import org.deeplearning4j.nn.conf.distribution.UniformDistribution; import org.deeplearning4j.nn.gradient.Gradient; @@ -59,14 +58,14 @@ public class GravesLSTMTest extends BaseDL4JTest { int nHiddenUnits = 17; NeuralNetConfiguration conf = - new NeuralNetConfiguration.Builder() + NeuralNetConfiguration.builder() .layer(new org.deeplearning4j.nn.conf.layers.GravesLSTM.Builder().nIn(nIn) .nOut(nHiddenUnits).activation(Activation.TANH).build()) .build(); - val numParams = conf.getLayer().initializer().numParams(conf); + val numParams = conf.getFlattenedLayerConfigurations().get(0).initializer().numParams(conf); INDArray params = Nd4j.create(1, numParams); - GravesLSTM layer = (GravesLSTM) conf.getLayer().instantiate(conf, null, 0, params, true, params.dataType()); + GravesLSTM layer = (GravesLSTM) conf.getFlattenedLayerConfigurations().get(0).instantiate(conf, null, 0, params, true, params.dataType()); //Data: has shape [miniBatchSize,nIn,timeSeriesLength]; //Output/activations has shape [miniBatchsize,nHiddenUnits,timeSeriesLength]; @@ -104,16 +103,16 @@ public class GravesLSTMTest extends BaseDL4JTest { INDArray inputData = Nd4j.ones(miniBatchSize, nIn, timeSeriesLength); - NeuralNetConfiguration conf = new NeuralNetConfiguration.Builder() + NeuralNetConfiguration conf = NeuralNetConfiguration.builder() .layer(new org.deeplearning4j.nn.conf.layers.GravesLSTM.Builder().nIn(nIn) .nOut(lstmNHiddenUnits) .dist(new UniformDistribution(0, 1)).activation(Activation.TANH).build()) .build(); - val numParams = conf.getLayer().initializer().numParams(conf); + val numParams = conf.getFlattenedLayerConfigurations().get(0).initializer().numParams(conf); INDArray params = Nd4j.create(1, numParams); - GravesLSTM lstm = (GravesLSTM) conf.getLayer().instantiate(conf, null, 0, params, true, params.dataType()); - lstm.setBackpropGradientsViewArray(Nd4j.create(1, conf.getLayer().initializer().numParams(conf))); + GravesLSTM lstm = (GravesLSTM) conf.getFlattenedLayerConfigurations().get(0).instantiate(conf, null, 0, params, true, params.dataType()); + lstm.setBackpropGradientsViewArray(Nd4j.create(1, conf.getFlattenedLayerConfigurations().get(0).initializer().numParams(conf))); //Set input, do a forward pass: lstm.activate(inputData, false, LayerWorkspaceMgr.noWorkspaces()); assertNotNull(lstm.input()); @@ -155,15 +154,15 @@ public class GravesLSTMTest extends BaseDL4JTest { int miniBatchSize = 4; int timeSeriesLength = 7; - NeuralNetConfiguration conf = new NeuralNetConfiguration.Builder() + NeuralNetConfiguration conf = NeuralNetConfiguration.builder() .layer(new org.deeplearning4j.nn.conf.layers.GravesLSTM.Builder().nIn(nIn).nOut(layerSize) .dist(new UniformDistribution(0, 1)) .activation(Activation.TANH).build()) .build(); - val numParams = conf.getLayer().initializer().numParams(conf); + val numParams = conf.getFlattenedLayerConfigurations().get(0).initializer().numParams(conf); INDArray params = Nd4j.create(1, numParams); - GravesLSTM lstm = (GravesLSTM) conf.getLayer().instantiate(conf, null, 0, params, true, params.dataType()); + GravesLSTM lstm = (GravesLSTM) conf.getFlattenedLayerConfigurations().get(0).instantiate(conf, null, 0, params, true, params.dataType()); INDArray input = Nd4j.rand(miniBatchSize, nIn, timeSeriesLength); lstm.setInput(input, LayerWorkspaceMgr.noWorkspaces()); @@ -197,7 +196,7 @@ public class GravesLSTMTest extends BaseDL4JTest { public void testSingleExample() { Nd4j.getRandom().setSeed(12345); - MultiLayerConfiguration conf = new NeuralNetConfiguration.Builder() + NeuralNetConfiguration conf = NeuralNetConfiguration.builder() .optimizationAlgo(OptimizationAlgorithm.STOCHASTIC_GRADIENT_DESCENT) .updater(new Sgd(0.1)).seed(12345).list() .layer(0, new org.deeplearning4j.nn.conf.layers.GravesLSTM.Builder().activation(Activation.TANH) @@ -254,7 +253,7 @@ public class GravesLSTMTest extends BaseDL4JTest { public void testGateActivationFnsSanityCheck() { for (String gateAfn : new String[] {"sigmoid", "hardsigmoid"}) { - MultiLayerConfiguration conf = new NeuralNetConfiguration.Builder() + NeuralNetConfiguration conf = NeuralNetConfiguration.builder() .optimizationAlgo(OptimizationAlgorithm.STOCHASTIC_GRADIENT_DESCENT) .seed(12345).list() .layer(0, new org.deeplearning4j.nn.conf.layers.GravesLSTM.Builder() @@ -268,7 +267,7 @@ public class GravesLSTMTest extends BaseDL4JTest { MultiLayerNetwork net = new MultiLayerNetwork(conf); net.init(); - assertEquals(gateAfn, ((org.deeplearning4j.nn.conf.layers.GravesLSTM) net.getLayer(0).conf().getLayer()) + assertEquals(gateAfn, ((org.deeplearning4j.nn.conf.layers.GravesLSTM) net.getLayer(0).getLayerConfiguration()) .getGateActivationFn().toString()); INDArray in = Nd4j.rand(3, 2, 5); diff --git a/cavis-dnn/cavis-dnn-core/src/test/java/org/deeplearning4j/nn/layers/recurrent/MaskZeroLayerTest.java b/cavis-dnn/cavis-dnn-core/src/test/java/org/deeplearning4j/nn/layers/recurrent/MaskZeroLayerTest.java index f1fa71ab2..1a3bcbc65 100644 --- a/cavis-dnn/cavis-dnn-core/src/test/java/org/deeplearning4j/nn/layers/recurrent/MaskZeroLayerTest.java +++ b/cavis-dnn/cavis-dnn-core/src/test/java/org/deeplearning4j/nn/layers/recurrent/MaskZeroLayerTest.java @@ -23,13 +23,11 @@ package org.deeplearning4j.nn.layers.recurrent; import org.deeplearning4j.BaseDL4JTest; import org.deeplearning4j.TestUtils; import org.deeplearning4j.nn.api.Layer; -import org.deeplearning4j.nn.conf.MultiLayerConfiguration; import org.deeplearning4j.nn.conf.NeuralNetConfiguration; import org.deeplearning4j.nn.conf.RNNFormat; import org.deeplearning4j.nn.conf.layers.LSTM; import org.deeplearning4j.nn.multilayer.MultiLayerNetwork; import org.deeplearning4j.nn.workspace.LayerWorkspaceMgr; -import org.deeplearning4j.optimize.api.TrainingListener; import org.junit.jupiter.api.Test; import org.nd4j.linalg.activations.Activation; import org.nd4j.linalg.api.ndarray.INDArray; @@ -71,7 +69,7 @@ public class MaskZeroLayerTest extends BaseDL4JTest { .nIn(2) .nOut(1).dataFormat(rnnDataFormat) .build(); - NeuralNetConfiguration conf = new NeuralNetConfiguration(); + NeuralNetConfiguration conf = NeuralNetConfiguration.builder().build(); conf.setLayer(underlying); INDArray params = Nd4j.zeros(1, 16); @@ -108,7 +106,7 @@ public class MaskZeroLayerTest extends BaseDL4JTest { @Test public void testSerialization(){ - MultiLayerConfiguration conf = new NeuralNetConfiguration.Builder() + NeuralNetConfiguration conf = NeuralNetConfiguration.builder() .list() .layer(new org.deeplearning4j.nn.conf.layers.util.MaskZeroLayer.Builder() .setMaskValue(0.0).setUnderlying(new LSTM.Builder().nIn(4).nOut(5).dataFormat(rnnDataFormat).build()).build()) diff --git a/cavis-dnn/cavis-dnn-core/src/test/java/org/deeplearning4j/nn/layers/recurrent/RnnDataFormatTests.java b/cavis-dnn/cavis-dnn-core/src/test/java/org/deeplearning4j/nn/layers/recurrent/RnnDataFormatTests.java index 2b5280339..93a60f38c 100644 --- a/cavis-dnn/cavis-dnn-core/src/test/java/org/deeplearning4j/nn/layers/recurrent/RnnDataFormatTests.java +++ b/cavis-dnn/cavis-dnn-core/src/test/java/org/deeplearning4j/nn/layers/recurrent/RnnDataFormatTests.java @@ -238,14 +238,14 @@ public class RnnDataFormatTests extends BaseDL4JTest { return getNetWithLayer(new SimpleRnn.Builder().nOut(3).build(), format, lastTimeStep, maskZeros); } } - private MultiLayerNetwork getNetWithLayer(Layer layer, RNNFormat format, boolean lastTimeStep, boolean maskZeros) { + private MultiLayerNetwork getNetWithLayer(LayerConfiguration layer, RNNFormat format, boolean lastTimeStep, boolean maskZeros) { if (maskZeros){ layer = new MaskZeroLayer.Builder().setMaskValue(0.).setUnderlying(layer).build(); } if(lastTimeStep){ layer = new LastTimeStep(layer); } - NeuralNetConfiguration.ListBuilder builder = new NeuralNetConfiguration.Builder() + NeuralNetConfiguration.NeuralNetConfigurationBuilder builder = (NeuralNetConfiguration.NeuralNetConfigurationBuilder) NeuralNetConfiguration.builder() .seed(12345) .list() .layer(new LSTM.Builder() @@ -260,7 +260,7 @@ public class RnnDataFormatTests extends BaseDL4JTest { (lastTimeStep)?new OutputLayer.Builder().activation(Activation.SOFTMAX).nOut(10).build(): new RnnOutputLayer.Builder().activation(Activation.SOFTMAX).nOut(10).dataFormat(format).build() ) - .setInputType(InputType.recurrent(3, 12, format)); + .inputType(InputType.recurrent(3, 12, format)); MultiLayerNetwork net = new MultiLayerNetwork(builder.build()); net.init(); @@ -285,9 +285,9 @@ public class RnnDataFormatTests extends BaseDL4JTest { public static void testHelper(TestCase tc) { - tc.net2.params().assign(tc.net1.params()); - tc.net3.params().assign(tc.net1.params()); - tc.net4.params().assign(tc.net1.params()); + tc.net2.getModelParams().assign(tc.net1.getModelParams()); + tc.net3.getModelParams().assign(tc.net1.getModelParams()); + tc.net4.getModelParams().assign(tc.net1.getModelParams()); INDArray inNCW = tc.inNCW; INDArray inNWC = tc.inNCW.permute(0, 2, 1).dup(); @@ -352,9 +352,9 @@ public class RnnDataFormatTests extends BaseDL4JTest { tc.net3.fit(inNWC, tc.labelsNWC); tc.net4.fit(inNWC, tc.labelsNWC); - assertEquals(tc.net1.params(), tc.net2.params(), tc.msg); - assertEquals(tc.net1.params(), tc.net3.params(), tc.msg); - assertEquals(tc.net1.params(), tc.net4.params(), tc.msg); + assertEquals(tc.net1.getModelParams(), tc.net2.getModelParams(), tc.msg); + assertEquals(tc.net1.getModelParams(), tc.net3.getModelParams(), tc.msg); + assertEquals(tc.net1.getModelParams(), tc.net4.getModelParams(), tc.msg); //Test serialization MultiLayerNetwork net1a = TestUtils.testModelSerialization(tc.net1); diff --git a/cavis-dnn/cavis-dnn-core/src/test/java/org/deeplearning4j/nn/layers/recurrent/TestLastTimeStepLayer.java b/cavis-dnn/cavis-dnn-core/src/test/java/org/deeplearning4j/nn/layers/recurrent/TestLastTimeStepLayer.java index 4abcfa768..7755790e4 100644 --- a/cavis-dnn/cavis-dnn-core/src/test/java/org/deeplearning4j/nn/layers/recurrent/TestLastTimeStepLayer.java +++ b/cavis-dnn/cavis-dnn-core/src/test/java/org/deeplearning4j/nn/layers/recurrent/TestLastTimeStepLayer.java @@ -62,7 +62,7 @@ public class TestLastTimeStepLayer extends BaseDL4JTest { @Test public void testLastTimeStepVertex() { - ComputationGraphConfiguration conf = new NeuralNetConfiguration.Builder().graphBuilder().addInputs("in") + ComputationGraphConfiguration conf = NeuralNetConfiguration.builder().graphBuilder().addInputs("in") .addLayer("lastTS", new LastTimeStep(new SimpleRnn.Builder() .nIn(5).nOut(6).dataFormat(rnnDataFormat).build()), "in") .setOutputs("lastTS") @@ -124,7 +124,7 @@ public class TestLastTimeStepLayer extends BaseDL4JTest { @Test public void testMaskingAndAllMasked(){ - ComputationGraphConfiguration.GraphBuilder builder = new NeuralNetConfiguration.Builder() + ComputationGraphConfiguration.GraphBuilder builder = NeuralNetConfiguration.builder() .optimizationAlgo(STOCHASTIC_GRADIENT_DESCENT) .weightInit(XAVIER_UNIFORM) .activation(TANH) diff --git a/cavis-dnn/cavis-dnn-core/src/test/java/org/deeplearning4j/nn/layers/recurrent/TestRecurrentWeightInit.java b/cavis-dnn/cavis-dnn-core/src/test/java/org/deeplearning4j/nn/layers/recurrent/TestRecurrentWeightInit.java index 951680ca7..3ea9cdbdb 100644 --- a/cavis-dnn/cavis-dnn-core/src/test/java/org/deeplearning4j/nn/layers/recurrent/TestRecurrentWeightInit.java +++ b/cavis-dnn/cavis-dnn-core/src/test/java/org/deeplearning4j/nn/layers/recurrent/TestRecurrentWeightInit.java @@ -40,7 +40,7 @@ public class TestRecurrentWeightInit extends BaseDL4JTest { for (boolean rwInit : new boolean[]{false, true}) { for (int i = 0; i < 3; i++) { - NeuralNetConfiguration.ListBuilder b = new NeuralNetConfiguration.Builder() + NeuralNetConfiguration.NeuralNetConfigurationBuilder b = NeuralNetConfiguration.builder() .weightInit(new UniformDistribution(0, 1)) .list(); diff --git a/cavis-dnn/cavis-dnn-core/src/test/java/org/deeplearning4j/nn/layers/recurrent/TestRnnLayers.java b/cavis-dnn/cavis-dnn-core/src/test/java/org/deeplearning4j/nn/layers/recurrent/TestRnnLayers.java index b5fd0ac57..d6e0369d4 100644 --- a/cavis-dnn/cavis-dnn-core/src/test/java/org/deeplearning4j/nn/layers/recurrent/TestRnnLayers.java +++ b/cavis-dnn/cavis-dnn-core/src/test/java/org/deeplearning4j/nn/layers/recurrent/TestRnnLayers.java @@ -22,13 +22,12 @@ package org.deeplearning4j.nn.layers.recurrent; import org.deeplearning4j.BaseDL4JTest; import org.deeplearning4j.TestUtils; -import org.deeplearning4j.nn.conf.MultiLayerConfiguration; import org.deeplearning4j.nn.conf.NeuralNetConfiguration; import org.deeplearning4j.nn.conf.RNNFormat; import org.deeplearning4j.nn.conf.dropout.TestDropout; import org.deeplearning4j.nn.conf.layers.GravesLSTM; import org.deeplearning4j.nn.conf.layers.LSTM; -import org.deeplearning4j.nn.conf.layers.Layer; +import org.deeplearning4j.nn.conf.layers.LayerConfiguration; import org.deeplearning4j.nn.conf.layers.RnnLossLayer; import org.deeplearning4j.nn.conf.layers.RnnOutputLayer; import org.deeplearning4j.nn.conf.layers.recurrent.SimpleRnn; @@ -67,7 +66,7 @@ public class TestRnnLayers extends BaseDL4JTest { int nIn = 12; int nOut = 3; - MultiLayerConfiguration conf = new NeuralNetConfiguration.Builder() + NeuralNetConfiguration conf = NeuralNetConfiguration.builder() .updater(new NoOp()) .weightInit(WeightInit.XAVIER) .list() @@ -119,9 +118,9 @@ public class TestRnnLayers extends BaseDL4JTest { for(String s : layerTypes){ - Layer layer; - Layer layerD; - Layer layerD2; + LayerConfiguration layer; + LayerConfiguration layerD; + LayerConfiguration layerD2; TestDropout.CustomDropout cd = new TestDropout.CustomDropout(); switch (s){ case "graves": @@ -143,21 +142,21 @@ public class TestRnnLayers extends BaseDL4JTest { throw new RuntimeException(s); } - MultiLayerConfiguration conf = new NeuralNetConfiguration.Builder() + NeuralNetConfiguration conf = NeuralNetConfiguration.builder() .seed(12345) .list() .layer(layer) .layer(new RnnOutputLayer.Builder().activation(Activation.TANH).lossFunction(LossFunctions.LossFunction.MSE).nIn(10).nOut(10).dataFormat(rnnDataFormat).build()) .build(); - MultiLayerConfiguration confD = new NeuralNetConfiguration.Builder() + NeuralNetConfiguration confD = NeuralNetConfiguration.builder() .seed(12345) .list() .layer(layerD) .layer(new RnnOutputLayer.Builder().activation(Activation.TANH).lossFunction(LossFunctions.LossFunction.MSE).nIn(10).nOut(10).dataFormat(rnnDataFormat).build()) .build(); - MultiLayerConfiguration confD2 = new NeuralNetConfiguration.Builder() + NeuralNetConfiguration confD2 = NeuralNetConfiguration.builder() .seed(12345) .list() .layer(layerD2) @@ -173,8 +172,8 @@ public class TestRnnLayers extends BaseDL4JTest { MultiLayerNetwork netD2 = new MultiLayerNetwork(confD2); netD2.init(); - assertEquals(net.params(), netD.params(), s); - assertEquals(net.params(), netD2.params(), s); + assertEquals(net.getModelParams(), netD.getModelParams(), s); + assertEquals(net.getModelParams(), netD2.getModelParams(), s); INDArray f = Nd4j.rand(DataType.FLOAT, 3, 10, 10); @@ -193,7 +192,7 @@ public class TestRnnLayers extends BaseDL4JTest { INDArray l = TestUtils.randomOneHotTimeSeries(3, 10, 10, 12345); net.fit(f.dup(), l); netD.fit(f.dup(), l); - assertNotEquals(net.params(), netD.params(), s); + assertNotEquals(net.getModelParams(), netD.getModelParams(), s); netD2.fit(f.dup(), l); netD2.fit(f.dup(), l); @@ -214,9 +213,9 @@ public class TestRnnLayers extends BaseDL4JTest { for( int i=0; i<2; i++ ){ - NeuralNetConfiguration.ListBuilder lb = new NeuralNetConfiguration.Builder() + NeuralNetConfiguration.NeuralNetConfigurationBuilder lb = NeuralNetConfiguration.builder() + - .list() .layer(new SimpleRnn.Builder().nIn(5).nOut(5).dataFormat(rnnDataFormat).build()); switch (i){ @@ -230,7 +229,7 @@ public class TestRnnLayers extends BaseDL4JTest { throw new RuntimeException(); } - MultiLayerConfiguration conf = lb.build(); + NeuralNetConfiguration conf = lb.build(); MultiLayerNetwork net = new MultiLayerNetwork(conf); net.init(); diff --git a/cavis-dnn/cavis-dnn-core/src/test/java/org/deeplearning4j/nn/layers/recurrent/TestSimpleRnn.java b/cavis-dnn/cavis-dnn-core/src/test/java/org/deeplearning4j/nn/layers/recurrent/TestSimpleRnn.java index 9d77537c8..2abd86487 100644 --- a/cavis-dnn/cavis-dnn-core/src/test/java/org/deeplearning4j/nn/layers/recurrent/TestSimpleRnn.java +++ b/cavis-dnn/cavis-dnn-core/src/test/java/org/deeplearning4j/nn/layers/recurrent/TestSimpleRnn.java @@ -22,7 +22,6 @@ package org.deeplearning4j.nn.layers.recurrent; import org.deeplearning4j.BaseDL4JTest; import org.deeplearning4j.TestUtils; -import org.deeplearning4j.nn.conf.MultiLayerConfiguration; import org.deeplearning4j.nn.conf.NeuralNetConfiguration; import org.deeplearning4j.nn.conf.RNNFormat; import org.deeplearning4j.nn.conf.layers.recurrent.SimpleRnn; @@ -68,7 +67,7 @@ public class TestSimpleRnn extends BaseDL4JTest { in = Nd4j.rand(DataType.FLOAT, m, tsLength, nIn); } - MultiLayerConfiguration conf = new NeuralNetConfiguration.Builder() + NeuralNetConfiguration conf = NeuralNetConfiguration.builder() .updater(new NoOp()) .weightInit(WeightInit.XAVIER) .activation(Activation.TANH) @@ -126,7 +125,7 @@ public class TestSimpleRnn extends BaseDL4JTest { int nIn = 5; int layerSize = 6; - MultiLayerConfiguration conf = new NeuralNetConfiguration.Builder() + NeuralNetConfiguration conf = NeuralNetConfiguration.builder() .updater(new NoOp()) .weightInit(WeightInit.XAVIER) .activation(Activation.TANH) diff --git a/cavis-dnn/cavis-dnn-core/src/test/java/org/deeplearning4j/nn/layers/recurrent/TestTimeDistributed.java b/cavis-dnn/cavis-dnn-core/src/test/java/org/deeplearning4j/nn/layers/recurrent/TestTimeDistributed.java index 90a05de95..ec8008379 100644 --- a/cavis-dnn/cavis-dnn-core/src/test/java/org/deeplearning4j/nn/layers/recurrent/TestTimeDistributed.java +++ b/cavis-dnn/cavis-dnn-core/src/test/java/org/deeplearning4j/nn/layers/recurrent/TestTimeDistributed.java @@ -22,7 +22,6 @@ package org.deeplearning4j.nn.layers.recurrent; import org.deeplearning4j.BaseDL4JTest; import org.deeplearning4j.TestUtils; -import org.deeplearning4j.nn.conf.MultiLayerConfiguration; import org.deeplearning4j.nn.conf.NeuralNetConfiguration; import org.deeplearning4j.nn.conf.RNNFormat; import org.deeplearning4j.nn.conf.WorkspaceMode; @@ -62,7 +61,7 @@ public class TestTimeDistributed extends BaseDL4JTest { public void testTimeDistributed(){ for(WorkspaceMode wsm : new WorkspaceMode[]{WorkspaceMode.ENABLED, WorkspaceMode.NONE}) { - MultiLayerConfiguration conf1 = new NeuralNetConfiguration.Builder() + NeuralNetConfiguration conf1 = NeuralNetConfiguration.builder() .trainingWorkspaceMode(wsm) .inferenceWorkspaceMode(wsm) .seed(12345) @@ -72,10 +71,10 @@ public class TestTimeDistributed extends BaseDL4JTest { .layer(new DenseLayer.Builder().nIn(3).nOut(3).activation(Activation.TANH).build()) .layer(new RnnOutputLayer.Builder().nIn(3).nOut(3).activation(Activation.SOFTMAX).dataFormat(rnnDataFormat) .lossFunction(LossFunctions.LossFunction.MCXENT).build()) - .setInputType(InputType.recurrent(3, rnnDataFormat)) + .inputType(InputType.recurrent(3, rnnDataFormat)) .build(); - MultiLayerConfiguration conf2 = new NeuralNetConfiguration.Builder() + NeuralNetConfiguration conf2 = NeuralNetConfiguration.builder() .trainingWorkspaceMode(wsm) .inferenceWorkspaceMode(wsm) .seed(12345) @@ -85,7 +84,7 @@ public class TestTimeDistributed extends BaseDL4JTest { .layer(new TimeDistributed(new DenseLayer.Builder().nIn(3).nOut(3).activation(Activation.TANH).build(), rnnDataFormat)) .layer(new RnnOutputLayer.Builder().nIn(3).nOut(3).activation(Activation.SOFTMAX).dataFormat(rnnDataFormat) .lossFunction(LossFunctions.LossFunction.MCXENT).build()) - .setInputType(InputType.recurrent(3, rnnDataFormat)) + .inputType(InputType.recurrent(3, rnnDataFormat)) .build(); MultiLayerNetwork net1 = new MultiLayerNetwork(conf1); @@ -116,7 +115,7 @@ public class TestTimeDistributed extends BaseDL4JTest { net1.fit(ds); net2.fit(ds); - assertEquals(net1.params(), net2.params()); + assertEquals(net1.getModelParams(), net2.getModelParams()); MultiLayerNetwork net3 = TestUtils.testModelSerialization(net2); out2 = net2.output(in); @@ -135,7 +134,7 @@ public class TestTimeDistributed extends BaseDL4JTest { for( int rnnType=0; rnnType<3; rnnType++ ) { for( int ffType=0; ffType<3; ffType++ ) { - Layer l0, l2; + LayerConfiguration l0, l2; switch (rnnType) { case 0: l0 = new LSTM.Builder().nOut(5).build(); @@ -153,7 +152,7 @@ public class TestTimeDistributed extends BaseDL4JTest { throw new RuntimeException("Not implemented: " + rnnType); } - Layer l1; + LayerConfiguration l1; switch (ffType){ case 0: l1 = new DenseLayer.Builder().nOut(5).build(); @@ -168,13 +167,13 @@ public class TestTimeDistributed extends BaseDL4JTest { throw new RuntimeException("Not implemented: " + ffType); } - MultiLayerConfiguration conf = new NeuralNetConfiguration.Builder() + NeuralNetConfiguration conf = NeuralNetConfiguration.builder() .activation(Activation.TANH) .list() .layer(l0) .layer(l1) .layer(l2) - .setInputType(InputType.recurrent(5, 9, rnnDataFormat)) + .inputType(InputType.recurrent(5, 9, rnnDataFormat)) .build(); BaseRecurrentLayer l0a; diff --git a/cavis-dnn/cavis-dnn-core/src/test/java/org/deeplearning4j/nn/layers/samediff/SameDiffCustomLayerTests.java b/cavis-dnn/cavis-dnn-core/src/test/java/org/deeplearning4j/nn/layers/samediff/SameDiffCustomLayerTests.java index 7b0f6c2cf..534af7bc2 100644 --- a/cavis-dnn/cavis-dnn-core/src/test/java/org/deeplearning4j/nn/layers/samediff/SameDiffCustomLayerTests.java +++ b/cavis-dnn/cavis-dnn-core/src/test/java/org/deeplearning4j/nn/layers/samediff/SameDiffCustomLayerTests.java @@ -23,7 +23,6 @@ package org.deeplearning4j.nn.layers.samediff; import lombok.extern.slf4j.Slf4j; import org.deeplearning4j.BaseDL4JTest; import org.deeplearning4j.nn.conf.ComputationGraphConfiguration; -import org.deeplearning4j.nn.conf.MultiLayerConfiguration; import org.deeplearning4j.nn.conf.NeuralNetConfiguration; import org.deeplearning4j.nn.conf.graph.GraphVertex; import org.deeplearning4j.nn.conf.inputs.InputType; @@ -75,10 +74,10 @@ public class SameDiffCustomLayerTests extends BaseDL4JTest { @Test public void testInputValidationSameDiffLayer() { - final MultiLayerConfiguration config = new NeuralNetConfiguration.Builder().list() + final NeuralNetConfiguration config = NeuralNetConfiguration.builder().list() .layer(new ValidatingSameDiffLayer()) .layer(new OutputLayer.Builder(LossFunctions.LossFunction.MSE).activation(Activation.SIGMOID).nOut(2).build()) - .setInputType(InputType.feedForward(2)) + .inputType(InputType.feedForward(2)) .build(); final MultiLayerNetwork net = new MultiLayerNetwork(config); @@ -95,7 +94,7 @@ public class SameDiffCustomLayerTests extends BaseDL4JTest { @Test public void testInputValidationSameDiffVertex(){ - final ComputationGraphConfiguration config = new NeuralNetConfiguration.Builder().graphBuilder() + final ComputationGraphConfiguration config = NeuralNetConfiguration.builder().graphBuilder() .addVertex("a", new ValidatingSameDiffVertex(), "input") .addLayer("output", new OutputLayer.Builder(LossFunctions.LossFunction.MSE).activation(Activation.SIGMOID).nOut(2).build(), "a") .addInputs("input") diff --git a/cavis-dnn/cavis-dnn-core/src/test/java/org/deeplearning4j/nn/layers/samediff/TestSameDiffConv.java b/cavis-dnn/cavis-dnn-core/src/test/java/org/deeplearning4j/nn/layers/samediff/TestSameDiffConv.java index f0d5d16ce..f0b23e335 100644 --- a/cavis-dnn/cavis-dnn-core/src/test/java/org/deeplearning4j/nn/layers/samediff/TestSameDiffConv.java +++ b/cavis-dnn/cavis-dnn-core/src/test/java/org/deeplearning4j/nn/layers/samediff/TestSameDiffConv.java @@ -25,7 +25,6 @@ import org.deeplearning4j.BaseDL4JTest; import org.deeplearning4j.TestUtils; import org.deeplearning4j.gradientcheck.GradientCheckUtil; import org.deeplearning4j.nn.conf.ConvolutionMode; -import org.deeplearning4j.nn.conf.MultiLayerConfiguration; import org.deeplearning4j.nn.conf.NeuralNetConfiguration; import org.deeplearning4j.nn.conf.WorkspaceMode; import org.deeplearning4j.nn.conf.layers.ConvolutionLayer; @@ -66,7 +65,7 @@ public class TestSameDiffConv extends BaseDL4JTest { int kH = 2; int kW = 3; - MultiLayerConfiguration conf = new NeuralNetConfiguration.Builder() + NeuralNetConfiguration conf = NeuralNetConfiguration.builder() .list() .layer(new SameDiffConv.Builder().nIn(nIn).nOut(nOut).kernelSize(kH, kW).build()) .build(); @@ -74,7 +73,7 @@ public class TestSameDiffConv extends BaseDL4JTest { MultiLayerNetwork net = new MultiLayerNetwork(conf); net.init(); - Map pt1 = net.getLayer(0).paramTable(); + Map pt1 = net.getLayer(0).getParamTable(); assertNotNull(pt1); assertEquals(2, pt1.size()); assertNotNull(pt1.get(ConvolutionParamInitializer.WEIGHT_KEY)); @@ -128,7 +127,7 @@ public class TestSameDiffConv extends BaseDL4JTest { + ", ConvolutionMode=" + cm + ", ActFn=" + a + ", hasBias=" + hasBias; log.info("Starting test: " + msg); - MultiLayerConfiguration conf = new NeuralNetConfiguration.Builder() + NeuralNetConfiguration conf = NeuralNetConfiguration.builder() .dataType(DataType.DOUBLE) .seed(12345) .list() @@ -159,9 +158,9 @@ public class TestSameDiffConv extends BaseDL4JTest { MultiLayerNetwork net = new MultiLayerNetwork(conf); net.init(); - assertNotNull(net.paramTable()); + assertNotNull(net.getParamTable()); - MultiLayerConfiguration conf2 = new NeuralNetConfiguration.Builder() + NeuralNetConfiguration conf2 = NeuralNetConfiguration.builder() .dataType(DataType.DOUBLE) .weightInit(WeightInit.XAVIER) .seed(12345) @@ -193,8 +192,8 @@ public class TestSameDiffConv extends BaseDL4JTest { //Check params: note that samediff/libnd4j conv params are [kH, kW, iC, oC] //DL4J are [nOut, nIn, kH, kW] - Map params1 = net.paramTable(); - Map params2 = net2.paramTable(); + Map params1 = net.getParamTable(); + Map params2 = net2.getParamTable(); for(Map.Entry e : params1.entrySet()){ if(e.getKey().endsWith("_W")){ INDArray p1 = e.getValue(); @@ -267,7 +266,7 @@ public class TestSameDiffConv extends BaseDL4JTest { int outH = cm == ConvolutionMode.Same ? imgH : (imgH-2); int outW = cm == ConvolutionMode.Same ? imgW : (imgW-2); - MultiLayerConfiguration conf = new NeuralNetConfiguration.Builder() + NeuralNetConfiguration conf = NeuralNetConfiguration.builder() .dataType(DataType.DOUBLE) .seed(12345) .updater(new NoOp()) diff --git a/cavis-dnn/cavis-dnn-core/src/test/java/org/deeplearning4j/nn/layers/samediff/TestSameDiffDense.java b/cavis-dnn/cavis-dnn-core/src/test/java/org/deeplearning4j/nn/layers/samediff/TestSameDiffDense.java index 5e1949f8a..93d9421c3 100644 --- a/cavis-dnn/cavis-dnn-core/src/test/java/org/deeplearning4j/nn/layers/samediff/TestSameDiffDense.java +++ b/cavis-dnn/cavis-dnn-core/src/test/java/org/deeplearning4j/nn/layers/samediff/TestSameDiffDense.java @@ -25,7 +25,6 @@ import org.deeplearning4j.BaseDL4JTest; import org.deeplearning4j.TestUtils; import org.deeplearning4j.datasets.iterator.impl.IrisDataSetIterator; import org.deeplearning4j.gradientcheck.GradientCheckUtil; -import org.deeplearning4j.nn.conf.MultiLayerConfiguration; import org.deeplearning4j.nn.conf.NeuralNetConfiguration; import org.deeplearning4j.nn.conf.WorkspaceMode; import org.deeplearning4j.nn.conf.layers.DenseLayer; @@ -64,7 +63,7 @@ public class TestSameDiffDense extends BaseDL4JTest { int nIn = 3; int nOut = 4; - MultiLayerConfiguration conf = new NeuralNetConfiguration.Builder() + NeuralNetConfiguration conf = NeuralNetConfiguration.builder() .list() .layer(new SameDiffDense.Builder().nIn(nIn).nOut(nOut).build()) .build(); @@ -72,7 +71,7 @@ public class TestSameDiffDense extends BaseDL4JTest { MultiLayerNetwork net = new MultiLayerNetwork(conf); net.init(); - Map pt1 = net.getLayer(0).paramTable(); + Map pt1 = net.getLayer(0).getParamTable(); assertNotNull(pt1); assertEquals(2, pt1.size()); assertNotNull(pt1.get(DefaultParamInitializer.WEIGHT_KEY)); @@ -103,7 +102,7 @@ public class TestSameDiffDense extends BaseDL4JTest { for (Activation a : afns) { log.info("Starting test - " + a + ", workspace = " + wsm); - MultiLayerConfiguration conf = new NeuralNetConfiguration.Builder() + NeuralNetConfiguration conf = NeuralNetConfiguration.builder() .inferenceWorkspaceMode(wsm) .trainingWorkspaceMode(wsm) .list() @@ -115,9 +114,9 @@ public class TestSameDiffDense extends BaseDL4JTest { MultiLayerNetwork net = new MultiLayerNetwork(conf); net.init(); - assertNotNull(net.paramTable()); + assertNotNull(net.getParamTable()); - MultiLayerConfiguration conf2 = new NeuralNetConfiguration.Builder() + NeuralNetConfiguration conf2 = NeuralNetConfiguration.builder() .list() .layer(new DenseLayer.Builder().activation(a).nIn(nIn).nOut(nOut).build()) .build(); @@ -125,12 +124,12 @@ public class TestSameDiffDense extends BaseDL4JTest { MultiLayerNetwork net2 = new MultiLayerNetwork(conf2); net2.init(); - net.params().assign(net2.params()); + net.getModelParams().assign(net2.getModelParams()); //Check params: - assertEquals(net2.params(), net.params()); - Map params1 = net.paramTable(); - Map params2 = net2.paramTable(); + assertEquals(net2.getModelParams(), net.getModelParams()); + Map params1 = net.getParamTable(); + Map params2 = net2.getParamTable(); assertEquals(params2, params1); INDArray in = Nd4j.rand(minibatch, nIn); @@ -176,7 +175,7 @@ public class TestSameDiffDense extends BaseDL4JTest { for (Activation a : afns) { log.info("Starting test - " + a + " - workspace=" + wsm); - MultiLayerConfiguration conf = new NeuralNetConfiguration.Builder() + NeuralNetConfiguration conf = NeuralNetConfiguration.builder() .seed(12345) .list() .layer(new SameDiffDense.Builder().nIn(nIn).nOut(nOut) @@ -194,9 +193,9 @@ public class TestSameDiffDense extends BaseDL4JTest { MultiLayerNetwork net = new MultiLayerNetwork(conf); net.init(); - assertNotNull(net.paramTable()); + assertNotNull(net.getParamTable()); - MultiLayerConfiguration conf2 = new NeuralNetConfiguration.Builder() + NeuralNetConfiguration conf2 = NeuralNetConfiguration.builder() .seed(12345) .weightInit(WeightInit.XAVIER) .list() @@ -210,12 +209,12 @@ public class TestSameDiffDense extends BaseDL4JTest { MultiLayerNetwork net2 = new MultiLayerNetwork(conf2); net2.init(); - assertEquals(net2.params(), net.params()); + assertEquals(net2.getModelParams(), net.getModelParams()); //Check params: - assertEquals(net2.params(), net.params()); - Map params1 = net.paramTable(); - Map params2 = net2.paramTable(); + assertEquals(net2.getModelParams(), net.getModelParams()); + Map params1 = net.getParamTable(); + Map params2 = net2.getParamTable(); assertEquals(params2, params1); INDArray in = Nd4j.rand(minibatch, nIn); @@ -264,7 +263,7 @@ public class TestSameDiffDense extends BaseDL4JTest { for (Activation a : afns) { log.info("Starting test - " + a + " - minibatch " + minibatch + ", workspaces: " + workspaces); - MultiLayerConfiguration conf = new NeuralNetConfiguration.Builder() + NeuralNetConfiguration conf = NeuralNetConfiguration.builder() .trainingWorkspaceMode(workspaces ? WorkspaceMode.ENABLED : WorkspaceMode.NONE) .inferenceWorkspaceMode(workspaces ? WorkspaceMode.ENABLED : WorkspaceMode.NONE) .list() @@ -278,7 +277,7 @@ public class TestSameDiffDense extends BaseDL4JTest { MultiLayerNetwork netSD = new MultiLayerNetwork(conf); netSD.init(); - MultiLayerConfiguration conf2 = new NeuralNetConfiguration.Builder() + NeuralNetConfiguration conf2 = NeuralNetConfiguration.builder() .list() .layer(new DenseLayer.Builder().activation(a).nIn(nIn).nOut(nOut).build()) .layer(new OutputLayer.Builder().nIn(nOut).nOut(nOut).activation(Activation.SOFTMAX) @@ -288,11 +287,11 @@ public class TestSameDiffDense extends BaseDL4JTest { MultiLayerNetwork netStandard = new MultiLayerNetwork(conf2); netStandard.init(); - netSD.params().assign(netStandard.params()); + netSD.getModelParams().assign(netStandard.getModelParams()); //Check params: - assertEquals(netStandard.params(), netSD.params()); - assertEquals(netStandard.paramTable(), netSD.paramTable()); + assertEquals(netStandard.getModelParams(), netSD.getModelParams()); + assertEquals(netStandard.getParamTable(), netSD.getParamTable()); INDArray in = Nd4j.rand(minibatch, nIn); INDArray l = TestUtils.randomOneHot(minibatch, nOut, 12345); @@ -352,7 +351,7 @@ public class TestSameDiffDense extends BaseDL4JTest { for(WorkspaceMode wsm : new WorkspaceMode[]{WorkspaceMode.ENABLED, WorkspaceMode.NONE}) { - MultiLayerConfiguration conf = new NeuralNetConfiguration.Builder() + NeuralNetConfiguration conf = NeuralNetConfiguration.builder() .seed(12345) .trainingWorkspaceMode(wsm) .inferenceWorkspaceMode(wsm) @@ -367,7 +366,7 @@ public class TestSameDiffDense extends BaseDL4JTest { MultiLayerNetwork netSD = new MultiLayerNetwork(conf); netSD.init(); - MultiLayerConfiguration conf2 = new NeuralNetConfiguration.Builder() + NeuralNetConfiguration conf2 = NeuralNetConfiguration.builder() .seed(12345) .updater(new Adam(0.1)) .list() @@ -380,11 +379,11 @@ public class TestSameDiffDense extends BaseDL4JTest { MultiLayerNetwork netStandard = new MultiLayerNetwork(conf2); netStandard.init(); - netSD.params().assign(netStandard.params()); + netSD.getModelParams().assign(netStandard.getModelParams()); //Check params: - assertEquals(netStandard.params(), netSD.params()); - assertEquals(netStandard.paramTable(), netSD.paramTable()); + assertEquals(netStandard.getModelParams(), netSD.getModelParams()); + assertEquals(netStandard.getParamTable(), netSD.getParamTable()); DataSetIterator iter = new IrisDataSetIterator(150, 150); DataSet ds = iter.next(); @@ -399,7 +398,7 @@ public class TestSameDiffDense extends BaseDL4JTest { netStandard.fit(ds); String s = String.valueOf(i); assertEquals( netStandard.getFlattenedGradients(), netSD.getFlattenedGradients(), s); - assertEquals( netStandard.params(), netSD.params(), s); + assertEquals( netStandard.getModelParams(), netSD.getModelParams(), s); assertEquals( netStandard.getUpdater().getStateViewArray(), netSD.getUpdater().getStateViewArray(), s); } @@ -422,7 +421,7 @@ public class TestSameDiffDense extends BaseDL4JTest { String msg = "workspaces: " + workspaces + ", " + a; Nd4j.getRandom().setSeed(12345); - MultiLayerConfiguration conf = new NeuralNetConfiguration.Builder() + NeuralNetConfiguration conf = NeuralNetConfiguration.builder() .dataType(DataType.DOUBLE) .seed(12345) .updater(new NoOp()) @@ -433,7 +432,7 @@ public class TestSameDiffDense extends BaseDL4JTest { .layer(new SameDiffDense.Builder().nIn(nOut).nOut(nOut).activation(a).build()) .layer(new OutputLayer.Builder().nIn(nOut).nOut(nOut).activation(Activation.SOFTMAX) .lossFunction(LossFunctions.LossFunction.MCXENT).build()) - //.setInputType(InputType.feedForward(nIn)) //TODO + //.inputType(InputType.feedForward(nIn)) //TODO .build(); MultiLayerNetwork net = new MultiLayerNetwork(conf); diff --git a/cavis-dnn/cavis-dnn-core/src/test/java/org/deeplearning4j/nn/layers/samediff/TestSameDiffDenseVertex.java b/cavis-dnn/cavis-dnn-core/src/test/java/org/deeplearning4j/nn/layers/samediff/TestSameDiffDenseVertex.java index 630ec1231..5fd371d13 100644 --- a/cavis-dnn/cavis-dnn-core/src/test/java/org/deeplearning4j/nn/layers/samediff/TestSameDiffDenseVertex.java +++ b/cavis-dnn/cavis-dnn-core/src/test/java/org/deeplearning4j/nn/layers/samediff/TestSameDiffDenseVertex.java @@ -65,7 +65,7 @@ public class TestSameDiffDenseVertex extends BaseDL4JTest { for (Activation a : afns) { log.info("Starting test - " + a + " - minibatch " + minibatch + ", workspaces: " + workspaces); - ComputationGraphConfiguration conf = new NeuralNetConfiguration.Builder() + ComputationGraphConfiguration conf = NeuralNetConfiguration.builder() .dataType(DataType.DOUBLE) .trainingWorkspaceMode(workspaces ? WorkspaceMode.ENABLED : WorkspaceMode.NONE) .inferenceWorkspaceMode(workspaces ? WorkspaceMode.ENABLED : WorkspaceMode.NONE) @@ -82,7 +82,7 @@ public class TestSameDiffDenseVertex extends BaseDL4JTest { ComputationGraph netSD = new ComputationGraph(conf); netSD.init(); - ComputationGraphConfiguration conf2 = new NeuralNetConfiguration.Builder() + ComputationGraphConfiguration conf2 = NeuralNetConfiguration.builder() .dataType(DataType.DOUBLE) .trainingWorkspaceMode(workspaces ? WorkspaceMode.ENABLED : WorkspaceMode.NONE) .inferenceWorkspaceMode(workspaces ? WorkspaceMode.ENABLED : WorkspaceMode.NONE) @@ -100,11 +100,11 @@ public class TestSameDiffDenseVertex extends BaseDL4JTest { ComputationGraph netStandard = new ComputationGraph(conf2); netStandard.init(); - netSD.params().assign(netStandard.params()); + netSD.getModelParams().assign(netStandard.getModelParams()); //Check params: - assertEquals(netStandard.params(), netSD.params()); - assertEquals(netStandard.paramTable(), netSD.paramTable()); + assertEquals(netStandard.getModelParams(), netSD.getModelParams()); + assertEquals(netStandard.getParamTable(), netSD.getParamTable()); INDArray in = Nd4j.rand(minibatch, nIn); INDArray l = TestUtils.randomOneHot(minibatch, nOut, 12345); @@ -159,8 +159,8 @@ public class TestSameDiffDenseVertex extends BaseDL4JTest { netSD.fit(ds); netStandard.fit(ds); - assertEquals(netStandard.paramTable(), netSD.paramTable()); - assertEquals(netStandard.params(), netSD.params()); + assertEquals(netStandard.getParamTable(), netSD.getParamTable()); + assertEquals(netStandard.getModelParams(), netSD.getModelParams()); assertEquals(netStandard.getFlattenedGradients(), netSD.getFlattenedGradients()); } diff --git a/cavis-dnn/cavis-dnn-core/src/test/java/org/deeplearning4j/nn/layers/samediff/TestSameDiffLambda.java b/cavis-dnn/cavis-dnn-core/src/test/java/org/deeplearning4j/nn/layers/samediff/TestSameDiffLambda.java index 4afbc7e37..1514a6709 100644 --- a/cavis-dnn/cavis-dnn-core/src/test/java/org/deeplearning4j/nn/layers/samediff/TestSameDiffLambda.java +++ b/cavis-dnn/cavis-dnn-core/src/test/java/org/deeplearning4j/nn/layers/samediff/TestSameDiffLambda.java @@ -62,7 +62,7 @@ public class TestSameDiffLambda extends BaseDL4JTest { Nd4j.getRandom().setSeed(12345); - ComputationGraphConfiguration conf = new NeuralNetConfiguration.Builder() + ComputationGraphConfiguration conf = NeuralNetConfiguration.builder() .trainingWorkspaceMode(wsm) .inferenceWorkspaceMode(wsm) .seed(12345) @@ -77,7 +77,7 @@ public class TestSameDiffLambda extends BaseDL4JTest { .build(); //Equavalent, not using SameDiff Lambda: - ComputationGraphConfiguration confStd = new NeuralNetConfiguration.Builder() + ComputationGraphConfiguration confStd = NeuralNetConfiguration.builder() .trainingWorkspaceMode(wsm) .inferenceWorkspaceMode(wsm) .seed(12345) @@ -98,7 +98,7 @@ public class TestSameDiffLambda extends BaseDL4JTest { ComputationGraph std = new ComputationGraph(confStd); std.init(); - lambda.setParams(std.params()); + lambda.setParams(std.getModelParams()); INDArray in = Nd4j.rand(3, 5); INDArray labels = TestUtils.randomOneHot(3, 5); @@ -119,7 +119,7 @@ public class TestSameDiffLambda extends BaseDL4JTest { std.fit(ds); String s = String.valueOf(i); - assertEquals(std.params(), lambda.params(), s); + assertEquals(std.getModelParams(), lambda.getModelParams(), s); assertEquals(std.getFlattenedGradients(), lambda.getFlattenedGradients(), s); } @@ -143,7 +143,7 @@ public class TestSameDiffLambda extends BaseDL4JTest { log.info("--- Workspace Mode: {} ---", wsm); Nd4j.getRandom().setSeed(12345); - ComputationGraphConfiguration conf = new NeuralNetConfiguration.Builder() + ComputationGraphConfiguration conf = NeuralNetConfiguration.builder() .trainingWorkspaceMode(wsm) .inferenceWorkspaceMode(wsm) .dataType(DataType.DOUBLE) @@ -160,7 +160,7 @@ public class TestSameDiffLambda extends BaseDL4JTest { .build(); //Equavalent, not using SameDiff Lambda: - ComputationGraphConfiguration confStd = new NeuralNetConfiguration.Builder() + ComputationGraphConfiguration confStd = NeuralNetConfiguration.builder() .trainingWorkspaceMode(wsm) .inferenceWorkspaceMode(wsm) .dataType(DataType.DOUBLE) @@ -182,7 +182,7 @@ public class TestSameDiffLambda extends BaseDL4JTest { ComputationGraph std = new ComputationGraph(confStd); std.init(); - lambda.setParams(std.params()); + lambda.setParams(std.getModelParams()); INDArray in1 = Nd4j.rand(3, 5); INDArray in2 = Nd4j.rand(3, 5); @@ -204,7 +204,7 @@ public class TestSameDiffLambda extends BaseDL4JTest { std.fit(mds); String s = String.valueOf(i); - assertEquals(std.params(), lambda.params(), s); + assertEquals(std.getModelParams(), lambda.getModelParams(), s); assertEquals(std.getFlattenedGradients(), lambda.getFlattenedGradients(), s); } diff --git a/cavis-dnn/cavis-dnn-core/src/test/java/org/deeplearning4j/nn/layers/samediff/TestSameDiffOutput.java b/cavis-dnn/cavis-dnn-core/src/test/java/org/deeplearning4j/nn/layers/samediff/TestSameDiffOutput.java index 2f0479b67..0a3d2f915 100644 --- a/cavis-dnn/cavis-dnn-core/src/test/java/org/deeplearning4j/nn/layers/samediff/TestSameDiffOutput.java +++ b/cavis-dnn/cavis-dnn-core/src/test/java/org/deeplearning4j/nn/layers/samediff/TestSameDiffOutput.java @@ -22,7 +22,6 @@ package org.deeplearning4j.nn.layers.samediff; import lombok.extern.slf4j.Slf4j; import org.deeplearning4j.BaseDL4JTest; -import org.deeplearning4j.nn.conf.MultiLayerConfiguration; import org.deeplearning4j.nn.conf.NeuralNetConfiguration; import org.deeplearning4j.nn.conf.layers.DenseLayer; import org.deeplearning4j.nn.conf.layers.LossLayer; @@ -48,7 +47,7 @@ public class TestSameDiffOutput extends BaseDL4JTest { public void testOutputMSELossLayer(){ Nd4j.getRandom().setSeed(12345); - MultiLayerConfiguration confSD = new NeuralNetConfiguration.Builder() + NeuralNetConfiguration confSD = NeuralNetConfiguration.builder() .seed(12345) .updater(new Adam(0.01)) .list() @@ -56,7 +55,7 @@ public class TestSameDiffOutput extends BaseDL4JTest { .layer(new SameDiffMSELossLayer()) .build(); - MultiLayerConfiguration confStd = new NeuralNetConfiguration.Builder() + NeuralNetConfiguration confStd = NeuralNetConfiguration.builder() .seed(12345) .updater(new Adam(0.01)) .list() @@ -86,7 +85,7 @@ public class TestSameDiffOutput extends BaseDL4JTest { netSD.fit(ds); netStd.fit(ds); - assertEquals(netStd.params(), netSD.params()); + assertEquals(netStd.getModelParams(), netSD.getModelParams()); assertEquals(netStd.getFlattenedGradients(), netSD.getFlattenedGradients()); } @@ -110,7 +109,7 @@ public class TestSameDiffOutput extends BaseDL4JTest { for(Activation a : new Activation[]{Activation.IDENTITY, Activation.TANH, Activation.SOFTMAX}) { log.info("Starting test: " + a); - MultiLayerConfiguration confSD = new NeuralNetConfiguration.Builder() + NeuralNetConfiguration confSD = NeuralNetConfiguration.builder() .seed(12345) .updater(new Adam(0.01)) .list() @@ -118,7 +117,7 @@ public class TestSameDiffOutput extends BaseDL4JTest { .layer(new SameDiffMSEOutputLayer(5, 5, a, WeightInit.XAVIER)) .build(); - MultiLayerConfiguration confStd = new NeuralNetConfiguration.Builder() + NeuralNetConfiguration confStd = NeuralNetConfiguration.builder() .seed(12345) .updater(new Adam(0.01)) .list() @@ -132,9 +131,9 @@ public class TestSameDiffOutput extends BaseDL4JTest { MultiLayerNetwork netStd = new MultiLayerNetwork(confStd); netStd.init(); - netSD.params().assign(netStd.params()); + netSD.getModelParams().assign(netStd.getModelParams()); - assertEquals(netStd.paramTable(), netSD.paramTable()); + assertEquals(netStd.getParamTable(), netSD.getParamTable()); int minibatch = 2; INDArray in = Nd4j.rand(minibatch, 5); @@ -166,7 +165,7 @@ public class TestSameDiffOutput extends BaseDL4JTest { netSD.fit(ds); netStd.fit(ds); String s = String.valueOf(i); - assertEquals( netStd.params(), netSD.params(), s); + assertEquals( netStd.getModelParams(), netSD.getModelParams(), s); assertEquals( netStd.getFlattenedGradients(), netSD.getFlattenedGradients(),s ); } diff --git a/cavis-dnn/cavis-dnn-core/src/test/java/org/deeplearning4j/nn/layers/samediff/testlayers/MinimalSameDiffDense.java b/cavis-dnn/cavis-dnn-core/src/test/java/org/deeplearning4j/nn/layers/samediff/testlayers/MinimalSameDiffDense.java index 8864448b0..bc0677adb 100644 --- a/cavis-dnn/cavis-dnn-core/src/test/java/org/deeplearning4j/nn/layers/samediff/testlayers/MinimalSameDiffDense.java +++ b/cavis-dnn/cavis-dnn-core/src/test/java/org/deeplearning4j/nn/layers/samediff/testlayers/MinimalSameDiffDense.java @@ -94,5 +94,5 @@ public class MinimalSameDiffDense extends SameDiffLayer { //OPTIONAL methods: // public void setNIn(InputType inputType, boolean override) // public InputPreProcessor getPreProcessorForInputType(InputType inputType) -// public void applyGlobalConfigToLayer(NeuralNetConfiguration.Builder globalConfig) +// public void applyGlobalConfigToLayer(NeuralNetConfiguration.NeuralNetConfigurationBuilder globalConfig) } diff --git a/cavis-dnn/cavis-dnn-core/src/test/java/org/deeplearning4j/nn/layers/samediff/testlayers/SameDiffConv.java b/cavis-dnn/cavis-dnn-core/src/test/java/org/deeplearning4j/nn/layers/samediff/testlayers/SameDiffConv.java index 0049696de..f8a2f173b 100644 --- a/cavis-dnn/cavis-dnn-core/src/test/java/org/deeplearning4j/nn/layers/samediff/testlayers/SameDiffConv.java +++ b/cavis-dnn/cavis-dnn-core/src/test/java/org/deeplearning4j/nn/layers/samediff/testlayers/SameDiffConv.java @@ -154,12 +154,13 @@ public class SameDiffConv extends SameDiffLayer { } @Override - public void applyGlobalConfigToLayer(NeuralNetConfiguration.Builder globalConfig) { + public void applyGlobalConfigToLayer(NeuralNetConfiguration.NeuralNetConfigurationBuilder globalConfig) { + NeuralNetConfiguration clone = globalConfig.clone().build(); if (activation == null) { - activation = SameDiffLayerUtils.fromIActivation(globalConfig.getActivationFn()); + activation = SameDiffLayerUtils.fromIActivation(clone.getActivation()); } if (cm == null) { - cm = globalConfig.getConvolutionMode(); + cm = clone.getConvolutionMode(); } } diff --git a/cavis-dnn/cavis-dnn-core/src/test/java/org/deeplearning4j/nn/layers/samediff/testlayers/SameDiffDense.java b/cavis-dnn/cavis-dnn-core/src/test/java/org/deeplearning4j/nn/layers/samediff/testlayers/SameDiffDense.java index e84390916..e1799443d 100644 --- a/cavis-dnn/cavis-dnn-core/src/test/java/org/deeplearning4j/nn/layers/samediff/testlayers/SameDiffDense.java +++ b/cavis-dnn/cavis-dnn-core/src/test/java/org/deeplearning4j/nn/layers/samediff/testlayers/SameDiffDense.java @@ -98,7 +98,7 @@ public class SameDiffDense extends SameDiffLayer { if(DefaultParamInitializer.BIAS_KEY.equals(e.getKey())){ e.getValue().assign(0.0); } else { - //Normally use 'c' order, but use 'f' for direct comparison to DL4J DenseLayer + //Normally use 'c' order, but use 'f' for direct comparison to DL4J DenseLayerConfiguration WeightInitUtil.initWeights(nIn, nOut, new long[]{nIn, nOut}, weightInit, null, 'f', e.getValue()); } } @@ -116,9 +116,10 @@ public class SameDiffDense extends SameDiffLayer { } @Override - public void applyGlobalConfigToLayer(NeuralNetConfiguration.Builder globalConfig) { + public void applyGlobalConfigToLayer(NeuralNetConfiguration.NeuralNetConfigurationBuilder globalConfig) { + NeuralNetConfiguration clone = globalConfig.clone().build(); if(activation == null){ - activation = SameDiffLayerUtils.fromIActivation(globalConfig.getActivationFn()); + activation = SameDiffLayerUtils.fromIActivation(clone.getActivation()); } } diff --git a/cavis-dnn/cavis-dnn-core/src/test/java/org/deeplearning4j/nn/layers/samediff/testlayers/SameDiffDenseVertex.java b/cavis-dnn/cavis-dnn-core/src/test/java/org/deeplearning4j/nn/layers/samediff/testlayers/SameDiffDenseVertex.java index da674ea7c..baa4cee7e 100644 --- a/cavis-dnn/cavis-dnn-core/src/test/java/org/deeplearning4j/nn/layers/samediff/testlayers/SameDiffDenseVertex.java +++ b/cavis-dnn/cavis-dnn-core/src/test/java/org/deeplearning4j/nn/layers/samediff/testlayers/SameDiffDenseVertex.java @@ -72,14 +72,14 @@ public class SameDiffDenseVertex extends SameDiffVertex { @Override public void initializeParameters(Map params) { - //Normally use 'c' order, but use 'f' for direct comparison to DL4J DenseLayer + //Normally use 'c' order, but use 'f' for direct comparison to DL4J DenseLayerConfiguration WeightInitUtil.initWeights(nIn, nOut, new long[]{nIn, nOut}, weightInit, null, 'f', params.get("W")); params.get("b").assign(0.0); } @Override public char paramReshapeOrder(String paramName){ - return 'f'; //To match DL4J DenseLayer - for easy comparison + return 'f'; //To match DL4J DenseLayerConfiguration - for easy comparison } @Override diff --git a/cavis-dnn/cavis-dnn-core/src/test/java/org/deeplearning4j/nn/layers/samediff/testlayers/SameDiffMSEOutputLayer.java b/cavis-dnn/cavis-dnn-core/src/test/java/org/deeplearning4j/nn/layers/samediff/testlayers/SameDiffMSEOutputLayer.java index 41d149b3b..a93db0e56 100644 --- a/cavis-dnn/cavis-dnn-core/src/test/java/org/deeplearning4j/nn/layers/samediff/testlayers/SameDiffMSEOutputLayer.java +++ b/cavis-dnn/cavis-dnn-core/src/test/java/org/deeplearning4j/nn/layers/samediff/testlayers/SameDiffMSEOutputLayer.java @@ -85,7 +85,7 @@ public class SameDiffMSEOutputLayer extends SameDiffOutputLayer { } @Override - public void applyGlobalConfigToLayer(NeuralNetConfiguration.Builder globalConfig){ + public void applyGlobalConfigToLayer(NeuralNetConfiguration.NeuralNetConfigurationBuilder globalConfig){ } diff --git a/cavis-dnn/cavis-dnn-core/src/test/java/org/deeplearning4j/nn/layers/variational/TestVAE.java b/cavis-dnn/cavis-dnn-core/src/test/java/org/deeplearning4j/nn/layers/variational/TestVAE.java index f535c81fa..b7c89e007 100644 --- a/cavis-dnn/cavis-dnn-core/src/test/java/org/deeplearning4j/nn/layers/variational/TestVAE.java +++ b/cavis-dnn/cavis-dnn-core/src/test/java/org/deeplearning4j/nn/layers/variational/TestVAE.java @@ -21,10 +21,10 @@ package org.deeplearning4j.nn.layers.variational; import org.deeplearning4j.BaseDL4JTest; -import org.deeplearning4j.nn.conf.MultiLayerConfiguration; import org.deeplearning4j.nn.conf.NeuralNetConfiguration; import org.deeplearning4j.nn.conf.WorkspaceMode; import org.deeplearning4j.nn.conf.distribution.NormalDistribution; +import org.deeplearning4j.nn.conf.layers.LayerConfiguration; import org.deeplearning4j.nn.conf.layers.OutputLayer; import org.deeplearning4j.nn.conf.layers.variational.VariationalAutoencoder; import org.deeplearning4j.nn.conf.layers.variational.*; @@ -56,16 +56,16 @@ public class TestVAE extends BaseDL4JTest { @Test public void testInitialization() { - MultiLayerConfiguration mlc = - new NeuralNetConfiguration.Builder().list() + NeuralNetConfiguration mlc = + NeuralNetConfiguration.builder() .layer(0, new org.deeplearning4j.nn.conf.layers.variational.VariationalAutoencoder.Builder() .nIn(10).nOut(5).encoderLayerSizes(12).decoderLayerSizes(13) .build()) .build(); - NeuralNetConfiguration c = mlc.getConf(0); + LayerConfiguration c = mlc.getFlattenedLayerConfigurations().get(0); org.deeplearning4j.nn.conf.layers.variational.VariationalAutoencoder vae = - (org.deeplearning4j.nn.conf.layers.variational.VariationalAutoencoder) c.getLayer(); + (VariationalAutoencoder) c; long allParams = vae.initializer().numParams(c); @@ -77,8 +77,8 @@ public class TestVAE extends BaseDL4JTest { net.init(); System.out.println("Exp num params: " + expNumParams); - assertEquals(expNumParams, net.getLayer(0).params().length()); - Map paramTable = net.getLayer(0).paramTable(); + assertEquals(expNumParams, net.getLayer(0).getParams().length()); + Map paramTable = net.getLayer(0).getParamTable(); int count = 0; for (INDArray arr : paramTable.values()) { count += arr.length(); @@ -94,14 +94,14 @@ public class TestVAE extends BaseDL4JTest { int[][] encLayerSizes = new int[][] {{12}, {12, 13}, {12, 13, 14}}; for (int i = 0; i < encLayerSizes.length; i++) { - MultiLayerConfiguration mlc = new NeuralNetConfiguration.Builder().list().layer(0, + NeuralNetConfiguration mlc = NeuralNetConfiguration.builder().list().layer(0, new org.deeplearning4j.nn.conf.layers.variational.VariationalAutoencoder.Builder().nIn(10) .nOut(5).encoderLayerSizes(encLayerSizes[i]).decoderLayerSizes(13).build()) .build(); - NeuralNetConfiguration c = mlc.getConf(0); + LayerConfiguration c = mlc.getConf(0); org.deeplearning4j.nn.conf.layers.variational.VariationalAutoencoder vae = - (org.deeplearning4j.nn.conf.layers.variational.VariationalAutoencoder) c.getLayer(); + (org.deeplearning4j.nn.conf.layers.variational.VariationalAutoencoder) c; MultiLayerNetwork net = new MultiLayerNetwork(mlc); net.init(); @@ -120,14 +120,14 @@ public class TestVAE extends BaseDL4JTest { int inputSize = 3; - MultiLayerConfiguration mlc = new NeuralNetConfiguration.Builder().list() + NeuralNetConfiguration mlc = NeuralNetConfiguration.builder().list() .layer(0, new org.deeplearning4j.nn.conf.layers.variational.VariationalAutoencoder.Builder() .nIn(inputSize).nOut(4).encoderLayerSizes(5).decoderLayerSizes(6).build()) .build(); - NeuralNetConfiguration c = mlc.getConf(0); + LayerConfiguration c = mlc.getConf(0); org.deeplearning4j.nn.conf.layers.variational.VariationalAutoencoder vae = - (org.deeplearning4j.nn.conf.layers.variational.VariationalAutoencoder) c.getLayer(); + (org.deeplearning4j.nn.conf.layers.variational.VariationalAutoencoder) c; long allParams = vae.initializer().numParams(c); @@ -135,7 +135,7 @@ public class TestVAE extends BaseDL4JTest { net.init(); net.initGradientsView(); //TODO this should happen automatically - Map paramTable = net.getLayer(0).paramTable(); + Map paramTable = net.getLayer(0).getParamTable(); Map gradTable = ((org.deeplearning4j.nn.layers.variational.VariationalAutoencoder) net.getLayer(0)) .getGradientViews(); @@ -158,14 +158,14 @@ public class TestVAE extends BaseDL4JTest { @Test public void testParamGradientOrderAndViews() { Nd4j.getRandom().setSeed(12345); - MultiLayerConfiguration mlc = new NeuralNetConfiguration.Builder().list() + NeuralNetConfiguration mlc = NeuralNetConfiguration.builder().list() .layer(0, new org.deeplearning4j.nn.conf.layers.variational.VariationalAutoencoder.Builder() .nIn(10).nOut(5).encoderLayerSizes(12, 13).decoderLayerSizes(14, 15).build()) .build(); - NeuralNetConfiguration c = mlc.getConf(0); + LayerConfiguration c = mlc.getConf(0); org.deeplearning4j.nn.conf.layers.variational.VariationalAutoencoder vae = - (org.deeplearning4j.nn.conf.layers.variational.VariationalAutoencoder) c.getLayer(); + (org.deeplearning4j.nn.conf.layers.variational.VariationalAutoencoder) c; MultiLayerNetwork net = new MultiLayerNetwork(mlc); net.init(); @@ -175,7 +175,7 @@ public class TestVAE extends BaseDL4JTest { org.deeplearning4j.nn.layers.variational.VariationalAutoencoder layer = (org.deeplearning4j.nn.layers.variational.VariationalAutoencoder) net.getLayer(0); - Map layerParams = layer.paramTable(); + Map layerParams = layer.getParamTable(); Map layerGradViews = layer.getGradientViews(); layer.setInput(Nd4j.rand(3, 10), LayerWorkspaceMgr.noWorkspaces()); @@ -216,16 +216,16 @@ public class TestVAE extends BaseDL4JTest { //Idea: pretrain-specific parameters shouldn't change during backprop Nd4j.getRandom().setSeed(12345); - MultiLayerConfiguration mlc = new NeuralNetConfiguration.Builder().seed(12345).list() + NeuralNetConfiguration mlc = NeuralNetConfiguration.builder().seed(12345).list() .layer(0, new org.deeplearning4j.nn.conf.layers.variational.VariationalAutoencoder.Builder() .nIn(10).nOut(5).encoderLayerSizes(12, 13).decoderLayerSizes(14, 15).build()) .layer(1, new OutputLayer.Builder().lossFunction(LossFunctions.LossFunction.MSE).nIn(5).nOut(6) .activation(new ActivationTanH()).build()) .build(); - NeuralNetConfiguration c = mlc.getConf(0); + LayerConfiguration c = mlc.getConf(0); org.deeplearning4j.nn.conf.layers.variational.VariationalAutoencoder vae = - (org.deeplearning4j.nn.conf.layers.variational.VariationalAutoencoder) c.getLayer(); + (org.deeplearning4j.nn.conf.layers.variational.VariationalAutoencoder) c; MultiLayerNetwork net = new MultiLayerNetwork(mlc); net.init(); @@ -239,7 +239,7 @@ public class TestVAE extends BaseDL4JTest { net.pretrainLayer(0, input); //Get a snapshot of the pretrain params after fitting: - Map layerParams = layer.paramTable(); + Map layerParams = layer.getParamTable(); Map pretrainParamsBefore = new HashMap<>(); for (String s : layerParams.keySet()) { if (layer.isPretrainParam(s)) { @@ -255,7 +255,7 @@ public class TestVAE extends BaseDL4JTest { net.fit(features, labels); } - Map layerParamsAfter = layer.paramTable(); + Map layerParamsAfter = layer.getParamTable(); for (String s : pretrainParamsBefore.keySet()) { INDArray before = pretrainParamsBefore.get(s); @@ -268,7 +268,7 @@ public class TestVAE extends BaseDL4JTest { @Test public void testJsonYaml() { - MultiLayerConfiguration config = new NeuralNetConfiguration.Builder().seed(12345).list() + NeuralNetConfiguration config = NeuralNetConfiguration.builder().seed(12345).list() .layer(0, new org.deeplearning4j.nn.conf.layers.variational.VariationalAutoencoder.Builder() .reconstructionDistribution(new GaussianReconstructionDistribution(Activation.IDENTITY)) .nIn(3).nOut(4).encoderLayerSizes(5).decoderLayerSizes(6).build()) @@ -299,8 +299,8 @@ public class TestVAE extends BaseDL4JTest { String asJson = config.toJson(); String asYaml = config.toYaml(); - MultiLayerConfiguration fromJson = MultiLayerConfiguration.fromJson(asJson); - MultiLayerConfiguration fromYaml = MultiLayerConfiguration.fromYaml(asYaml); + NeuralNetConfiguration fromJson = NeuralNetConfiguration.fromJson(asJson); + NeuralNetConfiguration fromYaml = NeuralNetConfiguration.fromYaml(asYaml); assertEquals(config, fromJson); assertEquals(config, fromYaml); @@ -350,7 +350,7 @@ public class TestVAE extends BaseDL4JTest { throw new RuntimeException(); } - MultiLayerConfiguration conf = new NeuralNetConfiguration.Builder().l2(0.2).l1(0.3) + NeuralNetConfiguration conf = NeuralNetConfiguration.builder().l2(0.2).l1(0.3) .updater(new Sgd(1.0)) .seed(12345L).dist(new NormalDistribution(0, 1)) .list().layer(0, @@ -416,7 +416,7 @@ public class TestVAE extends BaseDL4JTest { for (int i = 0; i < reconstructionDistributions.length; i++) { INDArray data = Nd4j.rand(minibatch, inOutSize).muli(2).subi(1); - MultiLayerConfiguration conf = new NeuralNetConfiguration.Builder().l2(0.2).l1(0.3) + NeuralNetConfiguration conf = NeuralNetConfiguration.builder().l2(0.2).l1(0.3) .updater(new Sgd(1.0)) .seed(12345L).dist(new NormalDistribution(0, 1)) .list().layer(0, @@ -456,7 +456,7 @@ public class TestVAE extends BaseDL4JTest { for(boolean ws : new boolean[]{false, true}) { - MultiLayerConfiguration conf = new NeuralNetConfiguration.Builder() + NeuralNetConfiguration conf = NeuralNetConfiguration.builder() .seed(12345L) .trainingWorkspaceMode(ws ? WorkspaceMode.ENABLED : WorkspaceMode.NONE) .inferenceWorkspaceMode(ws ? WorkspaceMode.ENABLED : WorkspaceMode.NONE) diff --git a/cavis-dnn/cavis-dnn-core/src/test/java/org/deeplearning4j/nn/misc/CloseNetworkTests.java b/cavis-dnn/cavis-dnn-core/src/test/java/org/deeplearning4j/nn/misc/CloseNetworkTests.java index 175292211..5ed2a9c2b 100644 --- a/cavis-dnn/cavis-dnn-core/src/test/java/org/deeplearning4j/nn/misc/CloseNetworkTests.java +++ b/cavis-dnn/cavis-dnn-core/src/test/java/org/deeplearning4j/nn/misc/CloseNetworkTests.java @@ -22,7 +22,6 @@ package org.deeplearning4j.nn.misc; import org.deeplearning4j.BaseDL4JTest; import org.deeplearning4j.TestUtils; import org.deeplearning4j.nn.api.Updater; -import org.deeplearning4j.nn.conf.MultiLayerConfiguration; import org.deeplearning4j.nn.conf.NeuralNetConfiguration; import org.deeplearning4j.nn.conf.inputs.InputType; import org.deeplearning4j.nn.conf.layers.*; @@ -40,15 +39,15 @@ import static org.junit.jupiter.api.Assertions.assertTrue; public class CloseNetworkTests extends BaseDL4JTest { public static MultiLayerNetwork getTestNet() { - MultiLayerConfiguration conf = new NeuralNetConfiguration.Builder() + NeuralNetConfiguration conf = NeuralNetConfiguration.builder() .updater(new Adam(1e-3)) - .list() + .layer(new ConvolutionLayer.Builder().nOut(5).kernelSize(3, 3).activation(Activation.TANH).build()) .layer(new BatchNormalization.Builder().nOut(5).build()) .layer(new SubsamplingLayer.Builder().build()) .layer(new DenseLayer.Builder().nOut(10).activation(Activation.RELU).build()) .layer(new OutputLayer.Builder().nOut(10).build()) - .setInputType(InputType.convolutional(28, 28, 1)) + .inputType(InputType.convolutional(28, 28, 1)) .build(); MultiLayerNetwork net = new MultiLayerNetwork(conf); @@ -80,7 +79,7 @@ public class CloseNetworkTests extends BaseDL4JTest { net.close(); - assertTrue(net.params().wasClosed()); + assertTrue(net.getModelParams().wasClosed()); if(train) { assertTrue(net.getGradientsViewArray().wasClosed()); Updater u = net.getUpdater(false); @@ -128,7 +127,7 @@ public class CloseNetworkTests extends BaseDL4JTest { net.close(); - assertTrue(net.params().wasClosed()); + assertTrue(net.getModelParams().wasClosed()); if(train) { assertTrue(net.getGradientsViewArray().wasClosed()); Updater u = net.getUpdater(false); diff --git a/cavis-dnn/cavis-dnn-core/src/test/java/org/deeplearning4j/nn/misc/LargeNetTest.java b/cavis-dnn/cavis-dnn-core/src/test/java/org/deeplearning4j/nn/misc/LargeNetTest.java index 44d1a2098..052e1fa07 100644 --- a/cavis-dnn/cavis-dnn-core/src/test/java/org/deeplearning4j/nn/misc/LargeNetTest.java +++ b/cavis-dnn/cavis-dnn-core/src/test/java/org/deeplearning4j/nn/misc/LargeNetTest.java @@ -22,7 +22,6 @@ package org.deeplearning4j.nn.misc; import org.deeplearning4j.BaseDL4JTest; import org.deeplearning4j.nn.conf.ComputationGraphConfiguration; -import org.deeplearning4j.nn.conf.MultiLayerConfiguration; import org.deeplearning4j.nn.conf.NeuralNetConfiguration; import org.deeplearning4j.nn.conf.layers.EmbeddingLayer; import org.deeplearning4j.nn.conf.layers.OutputLayer; @@ -31,7 +30,6 @@ import org.deeplearning4j.nn.multilayer.MultiLayerNetwork; import org.junit.jupiter.api.Test; import org.nd4j.linalg.activations.Activation; -import org.nd4j.linalg.api.buffer.DataBuffer; import org.nd4j.linalg.api.buffer.DataType; import org.nd4j.linalg.api.ndarray.INDArray; import org.nd4j.linalg.factory.Nd4j; @@ -50,8 +48,8 @@ public class LargeNetTest extends BaseDL4JTest { //More than 2.1 billion parameters //10M classes plus 300 vector size -> 3 billion elements - MultiLayerConfiguration conf = new NeuralNetConfiguration.Builder() - .list() + NeuralNetConfiguration conf = NeuralNetConfiguration.builder() + .layer(new EmbeddingLayer.Builder().nIn(10_000_000).nOut(300).build()) .layer(new OutputLayer.Builder().nIn(300).nOut(10).activation(Activation.SOFTMAX).build()) .build(); @@ -59,7 +57,7 @@ public class LargeNetTest extends BaseDL4JTest { MultiLayerNetwork net = new MultiLayerNetwork(conf); net.init(); - INDArray params = net.params(); + INDArray params = net.getModelParams(); long paramsLength = params.length(); long expParamsLength = 10_000_000L * 300 + 300 * 10 + 10; assertEquals(expParamsLength, paramsLength); @@ -82,7 +80,7 @@ public class LargeNetTest extends BaseDL4JTest { //More than 2.1 billion parameters //10M classes plus 300 vector size -> 3 billion elements - ComputationGraphConfiguration conf = new NeuralNetConfiguration.Builder() + ComputationGraphConfiguration conf = NeuralNetConfiguration.builder() .graphBuilder() .addInputs("in") .layer("0", new EmbeddingLayer.Builder().nIn(10_000_000).nOut(300).build(), "in") @@ -93,7 +91,7 @@ public class LargeNetTest extends BaseDL4JTest { ComputationGraph net = new ComputationGraph(conf); net.init(); - INDArray params = net.params(); + INDArray params = net.getModelParams(); long paramsLength = params.length(); long expParamsLength = 10_000_000L * 300 + 300 * 10 + 10; assertEquals(expParamsLength, paramsLength); diff --git a/cavis-dnn/cavis-dnn-core/src/test/java/org/deeplearning4j/nn/misc/TestLrChanges.java b/cavis-dnn/cavis-dnn-core/src/test/java/org/deeplearning4j/nn/misc/TestLrChanges.java index 77f3a2342..69099f0a0 100644 --- a/cavis-dnn/cavis-dnn-core/src/test/java/org/deeplearning4j/nn/misc/TestLrChanges.java +++ b/cavis-dnn/cavis-dnn-core/src/test/java/org/deeplearning4j/nn/misc/TestLrChanges.java @@ -22,11 +22,9 @@ package org.deeplearning4j.nn.misc; import org.deeplearning4j.BaseDL4JTest; import org.deeplearning4j.nn.conf.ComputationGraphConfiguration; -import org.deeplearning4j.nn.conf.MultiLayerConfiguration; import org.deeplearning4j.nn.conf.NeuralNetConfiguration; import org.deeplearning4j.nn.conf.layers.DenseLayer; import org.deeplearning4j.nn.conf.layers.OutputLayer; -import org.deeplearning4j.nn.conf.weightnoise.DropConnect; import org.deeplearning4j.nn.graph.ComputationGraph; import org.deeplearning4j.nn.multilayer.MultiLayerNetwork; import org.junit.jupiter.api.Test; @@ -49,10 +47,10 @@ public class TestLrChanges extends BaseDL4JTest { @Test public void testChangeLrMLN(){ //First: Set LR for a *single* layer and compare vs. equivalent net config - MultiLayerConfiguration conf = new NeuralNetConfiguration.Builder() + NeuralNetConfiguration conf = NeuralNetConfiguration.builder() .activation(Activation.TANH) .seed(12345) - .list() + .layer(new DenseLayer.Builder().nIn(10).nOut(10).updater(new Adam(0.1)).build()) .layer(new DenseLayer.Builder().nIn(10).nOut(10).updater(new RmsProp(0.01)).build()) .layer(new OutputLayer.Builder().nIn(10).nOut(10).updater(new NoOp()).lossFunction(LossFunctions.LossFunction.MSE).build()) @@ -66,10 +64,10 @@ public class TestLrChanges extends BaseDL4JTest { } - MultiLayerConfiguration conf2 = new NeuralNetConfiguration.Builder() + NeuralNetConfiguration conf2 = NeuralNetConfiguration.builder() .activation(Activation.TANH) .seed(12345) - .list() + .layer(new DenseLayer.Builder().nIn(10).nOut(10).updater(new Adam(0.5)).build()) //0.5 LR .layer(new DenseLayer.Builder().nIn(10).nOut(10).updater(new RmsProp(0.01)).build()) .layer(new OutputLayer.Builder().nIn(10).nOut(10).updater(new NoOp()).lossFunction(LossFunctions.LossFunction.MSE).build()) @@ -78,7 +76,7 @@ public class TestLrChanges extends BaseDL4JTest { net2.init(); net2.getUpdater().getStateViewArray().assign(net.getUpdater().getStateViewArray()); conf2.setIterationCount(conf.getIterationCount()); - net2.setParams(net.params().dup()); + net2.setParams(net.getModelParams().dup()); assertEquals(0.1, net.getLearningRate(0).doubleValue(), 0.0); net.setLearningRate(0, 0.5); //Set LR for layer 0 to 0.5 @@ -98,7 +96,7 @@ public class TestLrChanges extends BaseDL4JTest { net2.fit(in, l); } - assertEquals(net.params(), net2.params()); + assertEquals(net.getModelParams(), net2.getModelParams()); assertEquals(net.getUpdater().getStateViewArray(), net2.getUpdater().getStateViewArray()); INDArray in1 = Nd4j.rand(10, 10); @@ -112,14 +110,14 @@ public class TestLrChanges extends BaseDL4JTest { net2.setLabels(l1); net2.computeGradientAndScore(); - assertEquals(net.score(), net2.score(), 1e-8); + assertEquals(net.getScore(), net2.getScore(), 1e-8); //Now: Set *all* LRs to say 0.3... - MultiLayerConfiguration conf3 = new NeuralNetConfiguration.Builder() + NeuralNetConfiguration conf3 = NeuralNetConfiguration.builder() .activation(Activation.TANH) .seed(12345) - .list() + .layer(new DenseLayer.Builder().nIn(10).nOut(10).updater(new Adam(0.3)).build()) //0.5 LR .layer(new DenseLayer.Builder().nIn(10).nOut(10).updater(new RmsProp(0.3)).build()) .layer(new OutputLayer.Builder().nIn(10).nOut(10).updater(new NoOp()).lossFunction(LossFunctions.LossFunction.MSE).build()) @@ -128,7 +126,7 @@ public class TestLrChanges extends BaseDL4JTest { net3.init(); net3.getUpdater().getStateViewArray().assign(net.getUpdater().getStateViewArray()); conf3.setIterationCount(conf.getIterationCount()); - net3.setParams(net.params().dup()); + net3.setParams(net.getModelParams().dup()); net.setLearningRate(0.3); @@ -141,18 +139,18 @@ public class TestLrChanges extends BaseDL4JTest { net3.fit(in, l); } - assertEquals(net.params(), net3.params()); + assertEquals(net.getModelParams(), net3.getModelParams()); assertEquals(net.getUpdater().getStateViewArray(), net3.getUpdater().getStateViewArray()); } @Test public void testChangeLSGD() { //Simple test for no updater nets - MultiLayerConfiguration conf = new NeuralNetConfiguration.Builder() + NeuralNetConfiguration conf = NeuralNetConfiguration.builder() .activation(Activation.TANH) .seed(12345) .updater(new Sgd(0.1)) - .list() + .layer(new DenseLayer.Builder().nIn(10).nOut(10).build()) .layer(new DenseLayer.Builder().nIn(10).nOut(10).build()) .layer(new OutputLayer.Builder().nIn(10).nOut(10).lossFunction(LossFunctions.LossFunction.MSE).build()) @@ -177,11 +175,11 @@ public class TestLrChanges extends BaseDL4JTest { @Test public void testChangeLrMLNSchedule(){ //First: Set LR for a *single* layer and compare vs. equivalent net config - MultiLayerConfiguration conf = new NeuralNetConfiguration.Builder() + NeuralNetConfiguration conf = NeuralNetConfiguration.builder() .activation(Activation.TANH) .seed(12345) .updater(new Adam(0.1)) - .list() + .layer(new DenseLayer.Builder().nIn(10).nOut(10).build()) .layer(new DenseLayer.Builder().nIn(10).nOut(10).build()) .layer(new OutputLayer.Builder().nIn(10).nOut(10).activation(Activation.SOFTMAX).build()) @@ -195,11 +193,11 @@ public class TestLrChanges extends BaseDL4JTest { } - MultiLayerConfiguration conf2 = new NeuralNetConfiguration.Builder() + NeuralNetConfiguration conf2 = NeuralNetConfiguration.builder() .activation(Activation.TANH) .seed(12345) .updater(new Adam(new ExponentialSchedule(ScheduleType.ITERATION, 0.5, 0.8 ))) - .list() + .layer(new DenseLayer.Builder().nIn(10).nOut(10).build()) .layer(new DenseLayer.Builder().nIn(10).nOut(10).build()) .layer(new OutputLayer.Builder().nIn(10).nOut(10).activation(Activation.SOFTMAX).build()) @@ -208,7 +206,7 @@ public class TestLrChanges extends BaseDL4JTest { net2.init(); net2.getUpdater().getStateViewArray().assign(net.getUpdater().getStateViewArray()); conf2.setIterationCount(conf.getIterationCount()); - net2.setParams(net.params().dup()); + net2.setParams(net.getModelParams().dup()); net.setLearningRate(new ExponentialSchedule(ScheduleType.ITERATION, 0.5, 0.8 )); //Set LR for layer 0 to 0.5 @@ -226,7 +224,7 @@ public class TestLrChanges extends BaseDL4JTest { net2.fit(in, l); } - assertEquals(net.params(), net2.params()); + assertEquals(net.getModelParams(), net2.getModelParams()); assertEquals(net.getUpdater().getStateViewArray(), net2.getUpdater().getStateViewArray()); } @@ -239,7 +237,7 @@ public class TestLrChanges extends BaseDL4JTest { @Test public void testChangeLrCompGraph(){ //First: Set LR for a *single* layer and compare vs. equivalent net config - ComputationGraphConfiguration conf = new NeuralNetConfiguration.Builder() + ComputationGraphConfiguration conf = NeuralNetConfiguration.builder() .activation(Activation.TANH) .seed(12345) .graphBuilder() @@ -258,7 +256,7 @@ public class TestLrChanges extends BaseDL4JTest { } - ComputationGraphConfiguration conf2 = new NeuralNetConfiguration.Builder() + ComputationGraphConfiguration conf2 = NeuralNetConfiguration.builder() .activation(Activation.TANH) .seed(12345) .graphBuilder() @@ -272,7 +270,7 @@ public class TestLrChanges extends BaseDL4JTest { net2.init(); net2.getUpdater().getStateViewArray().assign(net.getUpdater().getStateViewArray()); conf2.setIterationCount(conf.getIterationCount()); - net2.setParams(net.params().dup()); + net2.setParams(net.getModelParams().dup()); assertEquals(0.1, net.getLearningRate("0").doubleValue(), 0.0); net.setLearningRate("0", 0.5); //Set LR for layer 0 to 0.5 @@ -292,7 +290,7 @@ public class TestLrChanges extends BaseDL4JTest { net2.fit(new DataSet(in, l)); } - assertEquals(net.params(), net2.params()); + assertEquals(net.getModelParams(), net2.getModelParams()); assertEquals(net.getUpdater().getStateViewArray(), net2.getUpdater().getStateViewArray()); INDArray in1 = Nd4j.rand(10, 10); @@ -306,11 +304,11 @@ public class TestLrChanges extends BaseDL4JTest { net2.setLabels(l1); net2.computeGradientAndScore(); - assertEquals(net.score(), net2.score(), 1e-8); + assertEquals(net.getScore(), net2.getScore(), 1e-8); //Now: Set *all* LRs to say 0.3... - MultiLayerConfiguration conf3 = new NeuralNetConfiguration.Builder() + NeuralNetConfiguration conf3 = NeuralNetConfiguration.builder() .activation(Activation.TANH) .seed(12345) .list() @@ -322,7 +320,7 @@ public class TestLrChanges extends BaseDL4JTest { net3.init(); net3.getUpdater().getStateViewArray().assign(net.getUpdater().getStateViewArray()); conf3.setIterationCount(conf.getIterationCount()); - net3.setParams(net.params().dup()); + net3.setParams(net.getModelParams().dup()); net.setLearningRate(0.3); @@ -335,14 +333,14 @@ public class TestLrChanges extends BaseDL4JTest { net3.fit(new DataSet(in, l)); } - assertEquals(net.params(), net3.params()); + assertEquals(net.getModelParams(), net3.getModelParams()); assertEquals(net.getUpdater().getStateViewArray(), net3.getUpdater().getStateViewArray()); } @Test public void testChangeLrCompGraphSchedule(){ //First: Set LR for a *single* layer and compare vs. equivalent net config - ComputationGraphConfiguration conf = new NeuralNetConfiguration.Builder() + ComputationGraphConfiguration conf = NeuralNetConfiguration.builder() .activation(Activation.TANH) .seed(12345) .updater(new Adam(0.1)) @@ -362,7 +360,7 @@ public class TestLrChanges extends BaseDL4JTest { } - ComputationGraphConfiguration conf2 = new NeuralNetConfiguration.Builder() + ComputationGraphConfiguration conf2 = NeuralNetConfiguration.builder() .activation(Activation.TANH) .seed(12345) .updater(new Adam(new ExponentialSchedule(ScheduleType.ITERATION, 0.5, 0.8 ))) @@ -377,7 +375,7 @@ public class TestLrChanges extends BaseDL4JTest { net2.init(); net2.getUpdater().getStateViewArray().assign(net.getUpdater().getStateViewArray()); conf2.setIterationCount(conf.getIterationCount()); - net2.setParams(net.params().dup()); + net2.setParams(net.getModelParams().dup()); net.setLearningRate(new ExponentialSchedule(ScheduleType.ITERATION, 0.5, 0.8 )); //Set LR for layer 0 to 0.5 @@ -395,7 +393,7 @@ public class TestLrChanges extends BaseDL4JTest { net2.fit(new DataSet(in, l)); } - assertEquals(net.params(), net2.params()); + assertEquals(net.getModelParams(), net2.getModelParams()); assertEquals(net.getUpdater().getStateViewArray(), net2.getUpdater().getStateViewArray()); } diff --git a/cavis-dnn/cavis-dnn-core/src/test/java/org/deeplearning4j/nn/misc/TestMemoryReports.java b/cavis-dnn/cavis-dnn-core/src/test/java/org/deeplearning4j/nn/misc/TestMemoryReports.java index a7fcee172..b22bfec2f 100644 --- a/cavis-dnn/cavis-dnn-core/src/test/java/org/deeplearning4j/nn/misc/TestMemoryReports.java +++ b/cavis-dnn/cavis-dnn-core/src/test/java/org/deeplearning4j/nn/misc/TestMemoryReports.java @@ -24,7 +24,6 @@ import org.apache.commons.io.FileUtils; import org.deeplearning4j.BaseDL4JTest; import org.deeplearning4j.nn.conf.CacheMode; import org.deeplearning4j.nn.conf.ComputationGraphConfiguration; -import org.deeplearning4j.nn.conf.MultiLayerConfiguration; import org.deeplearning4j.nn.conf.NeuralNetConfiguration; import org.deeplearning4j.nn.conf.graph.*; import org.deeplearning4j.nn.conf.graph.rnn.DuplicateToTimeSeriesVertex; @@ -53,8 +52,8 @@ import static org.junit.jupiter.api.Assertions.assertEquals; public class TestMemoryReports extends BaseDL4JTest { - public static List> getTestLayers() { - List> l = new ArrayList<>(); + public static List> getTestLayers() { + List> l = new ArrayList<>(); l.add(new Pair<>(new ActivationLayer.Builder().activation(Activation.TANH).build(), InputType.feedForward(20))); l.add(new Pair<>(new DenseLayer.Builder().nIn(20).nOut(20).build(), InputType.feedForward(20))); l.add(new Pair<>(new DropoutLayer.Builder().nIn(20).nOut(20).build(), InputType.feedForward(20))); @@ -100,12 +99,12 @@ public class TestMemoryReports extends BaseDL4JTest { @Test public void testMemoryReportSimple() { - List> l = getTestLayers(); + List> l = getTestLayers(); - for (Pair p : l) { + for (Pair p : l) { - MultiLayerConfiguration conf = new NeuralNetConfiguration.Builder().list().layer(0, p.getFirst().clone()) + NeuralNetConfiguration conf = NeuralNetConfiguration.builder().layer(0, p.getFirst().clone()) .layer(1, p.getFirst().clone()).validateOutputLayerConfig(false).build(); MemoryReport mr = conf.getMemoryReport(p.getSecond()); @@ -128,12 +127,12 @@ public class TestMemoryReports extends BaseDL4JTest { @Test public void testMemoryReportSimpleCG() { - List> l = getTestLayers(); + List> l = getTestLayers(); - for (Pair p : l) { + for (Pair p : l) { - ComputationGraphConfiguration conf = new NeuralNetConfiguration.Builder().graphBuilder().addInputs("in") + ComputationGraphConfiguration conf = NeuralNetConfiguration.builder().graphBuilder().addInputs("in") .addLayer("0", p.getFirst().clone(), "in").addLayer("1", p.getFirst().clone(), "0") .setOutputs("1").validateOutputLayerConfig(false).build(); @@ -168,7 +167,7 @@ public class TestMemoryReports extends BaseDL4JTest { layerInputs = new String[] {"1"}; } - ComputationGraphConfiguration conf = new NeuralNetConfiguration.Builder().graphBuilder().addInputs(inputs) + ComputationGraphConfiguration conf = NeuralNetConfiguration.builder().graphBuilder().addInputs(inputs) .allowDisconnected(true) .addVertex("gv", p.getFirst(), layerInputs).setOutputs("gv").build(); @@ -216,7 +215,7 @@ public class TestMemoryReports extends BaseDL4JTest { @Test public void validateSimple() { - MultiLayerConfiguration conf = new NeuralNetConfiguration.Builder().list() + NeuralNetConfiguration conf = NeuralNetConfiguration.builder() .layer(0, new DenseLayer.Builder().nIn(10).nOut(20).build()) .layer(1, new DenseLayer.Builder().nIn(20).nOut(27).build()).build(); diff --git a/cavis-dnn/cavis-dnn-core/src/test/java/org/deeplearning4j/nn/misc/TestNetConversion.java b/cavis-dnn/cavis-dnn-core/src/test/java/org/deeplearning4j/nn/misc/TestNetConversion.java index fc8312630..01278db4e 100644 --- a/cavis-dnn/cavis-dnn-core/src/test/java/org/deeplearning4j/nn/misc/TestNetConversion.java +++ b/cavis-dnn/cavis-dnn-core/src/test/java/org/deeplearning4j/nn/misc/TestNetConversion.java @@ -22,7 +22,6 @@ package org.deeplearning4j.nn.misc; import org.deeplearning4j.BaseDL4JTest; import org.deeplearning4j.nn.conf.ConvolutionMode; -import org.deeplearning4j.nn.conf.MultiLayerConfiguration; import org.deeplearning4j.nn.conf.NeuralNetConfiguration; import org.deeplearning4j.nn.conf.inputs.InputType; import org.deeplearning4j.nn.conf.layers.*; @@ -78,30 +77,30 @@ public class TestNetConversion extends BaseDL4JTest { n.computeGradientAndScore(); cg.computeGradientAndScore(); - assertEquals(n.score(), cg.score(), 1e-6); + assertEquals(n.getScore(), cg.getScore(), 1e-6); assertEquals(n.gradient().gradient(), cg.gradient().gradient()); n.fit(in, labels); cg.fit(new INDArray[]{in}, new INDArray[]{labels}); - assertEquals(n.params(), cg.params()); + assertEquals(n.getModelParams(), cg.getModelParams()); } } private MultiLayerNetwork getNet1(boolean train) { - MultiLayerConfiguration conf = new NeuralNetConfiguration.Builder() + NeuralNetConfiguration conf = NeuralNetConfiguration.builder() .convolutionMode(ConvolutionMode.Same) .activation(Activation.TANH) .weightInit(WeightInit.XAVIER) .updater(new Sgd(0.1)) - .list() + .layer(new ConvolutionLayer.Builder().nIn(3).nOut(5).kernelSize(2, 2).stride(1, 1).build()) .layer(new SubsamplingLayer.Builder().kernelSize(2, 2).stride(1, 1).build()) .layer(new DenseLayer.Builder().nOut(32).build()) .layer(new OutputLayer.Builder().nOut(10).lossFunction(LossFunctions.LossFunction.MSE).build()) - .setInputType(InputType.convolutional(10, 10, 3)) + .inputType(InputType.convolutional(10, 10, 3)) .build(); MultiLayerNetwork net = new MultiLayerNetwork(conf); @@ -121,16 +120,16 @@ public class TestNetConversion extends BaseDL4JTest { private MultiLayerNetwork getNet2() { - MultiLayerConfiguration conf = new NeuralNetConfiguration.Builder() + NeuralNetConfiguration conf = NeuralNetConfiguration.builder() .convolutionMode(ConvolutionMode.Same) .activation(Activation.TANH) .weightInit(WeightInit.XAVIER) .updater(new Sgd(0.1)) - .list() + .layer(new GravesLSTM.Builder().nOut(8).build()) .layer(new LSTM.Builder().nOut(8).build()) .layer(new RnnOutputLayer.Builder().nOut(10).lossFunction(LossFunctions.LossFunction.MSE).build()) - .setInputType(InputType.recurrent(5)) + .inputType(InputType.recurrent(5)) .build(); MultiLayerNetwork net = new MultiLayerNetwork(conf); diff --git a/cavis-dnn/cavis-dnn-core/src/test/java/org/deeplearning4j/nn/misc/WorkspaceTests.java b/cavis-dnn/cavis-dnn-core/src/test/java/org/deeplearning4j/nn/misc/WorkspaceTests.java index cf7d31bd5..904dd845b 100644 --- a/cavis-dnn/cavis-dnn-core/src/test/java/org/deeplearning4j/nn/misc/WorkspaceTests.java +++ b/cavis-dnn/cavis-dnn-core/src/test/java/org/deeplearning4j/nn/misc/WorkspaceTests.java @@ -73,8 +73,8 @@ public class WorkspaceTests extends BaseDL4JTest { ComputationGraph c = createNet(); for (WorkspaceMode wm : new WorkspaceMode[]{WorkspaceMode.NONE, WorkspaceMode.ENABLED}) { log.info("Starting test: {}", wm); - c.getConfiguration().setTrainingWorkspaceMode(wm); - c.getConfiguration().setInferenceWorkspaceMode(wm); + c.getComputationGraphConfiguration().setTrainingWorkspaceMode(wm); + c.getComputationGraphConfiguration().setInferenceWorkspaceMode(wm); INDArray f = Nd4j.rand(8, 1, 28, 28); INDArray l = Nd4j.rand(8, 10); @@ -93,24 +93,24 @@ public class WorkspaceTests extends BaseDL4JTest { int depthOut = 2; int nOut = 2; - MultiLayerConfiguration conf = new NeuralNetConfiguration.Builder().weightInit(WeightInit.XAVIER) - .convolutionMode(ConvolutionMode.Same).seed(12345L).list() + NeuralNetConfiguration conf = NeuralNetConfiguration.builder().weightInit(WeightInit.XAVIER) + .convolutionMode(ConvolutionMode.Same).seed(12345L) .layer(0, new ConvolutionLayer.Builder().nIn(depthIn).nOut(depthOut).kernelSize(2, 2) .stride(1, 1).activation(Activation.TANH).build()) .layer(1, new OutputLayer.Builder(LossFunctions.LossFunction.MCXENT) .activation(Activation.SOFTMAX).nOut(nOut).build()) - .setInputType(InputType.convolutional(5, 5, 2)) + .inputType(InputType.convolutional(5, 5, 2)) .build(); MultiLayerNetwork net = new MultiLayerNetwork(conf.clone()); net.init(); - net.getLayerWiseConfigurations().setInferenceWorkspaceMode(WorkspaceMode.ENABLED); - net.getLayerWiseConfigurations().setTrainingWorkspaceMode(WorkspaceMode.ENABLED); + net.getNetConfiguration().setInferenceWorkspaceMode(WorkspaceMode.ENABLED); + net.getNetConfiguration().setTrainingWorkspaceMode(WorkspaceMode.ENABLED); MultiLayerNetwork net2 = new MultiLayerNetwork(conf.clone()); net2.init(); - net2.getLayerWiseConfigurations().setInferenceWorkspaceMode(WorkspaceMode.NONE); - net2.getLayerWiseConfigurations().setTrainingWorkspaceMode(WorkspaceMode.NONE); + net2.getNetConfiguration().setInferenceWorkspaceMode(WorkspaceMode.NONE); + net2.getNetConfiguration().setTrainingWorkspaceMode(WorkspaceMode.NONE); INDArray in = Nd4j.rand(1, 2, 5, 5); @@ -120,7 +120,7 @@ public class WorkspaceTests extends BaseDL4JTest { public static ComputationGraph createNet() throws Exception { - ComputationGraphConfiguration conf = new NeuralNetConfiguration.Builder() + ComputationGraphConfiguration conf = NeuralNetConfiguration.builder() .graphBuilder() .addInputs("in") .addLayer("0", new ConvolutionLayer.Builder().nOut(3) @@ -149,7 +149,7 @@ public class WorkspaceTests extends BaseDL4JTest { for (WorkspaceMode wm : WorkspaceMode.values()) { System.out.println(wm); - ComputationGraphConfiguration conf = new NeuralNetConfiguration.Builder() + ComputationGraphConfiguration conf = NeuralNetConfiguration.builder() .trainingWorkspaceMode(wm) .inferenceWorkspaceMode(wm) .graphBuilder() @@ -184,15 +184,15 @@ public class WorkspaceTests extends BaseDL4JTest { public void testWithPreprocessorsMLN() { for (WorkspaceMode wm : WorkspaceMode.values()) { System.out.println(wm); - MultiLayerConfiguration conf = new NeuralNetConfiguration.Builder() + NeuralNetConfiguration conf = NeuralNetConfiguration.builder() .trainingWorkspaceMode(wm) .inferenceWorkspaceMode(wm) - .list() + .layer(new GravesLSTM.Builder().nIn(10).nOut(5).build()) .layer(new GravesLSTM.Builder().nIn(5).nOut(8).build()) .layer(new RnnOutputLayer.Builder(LossFunctions.LossFunction.MSE).activation(Activation.SIGMOID).nOut(3).build()) .inputPreProcessor(0, new DupPreProcessor()) - .setInputType(InputType.recurrent(10)) + .inputType(InputType.recurrent(10)) .build(); MultiLayerNetwork net = new MultiLayerNetwork(conf); @@ -247,14 +247,14 @@ public class WorkspaceTests extends BaseDL4JTest { System.out.println("Starting test: " + ws + " - " + i); - NeuralNetConfiguration.ListBuilder b = new NeuralNetConfiguration.Builder() + NeuralNetConfiguration.NeuralNetConfigurationBuilder b = NeuralNetConfiguration.builder() .weightInit(WeightInit.XAVIER) .activation(Activation.TANH) .inferenceWorkspaceMode(ws) .trainingWorkspaceMode(ws) .list(); - ComputationGraphConfiguration.GraphBuilder gb = new NeuralNetConfiguration.Builder() + ComputationGraphConfiguration.GraphBuilder gb = NeuralNetConfiguration.builder() .weightInit(WeightInit.XAVIER) .activation(Activation.TANH) .inferenceWorkspaceMode(ws) @@ -292,7 +292,7 @@ public class WorkspaceTests extends BaseDL4JTest { gb.addLayer("out", new RnnOutputLayer.Builder().nIn(10).nOut(10).activation(Activation.SOFTMAX).build(), "1"); gb.setOutputs("out"); - MultiLayerConfiguration conf = b.build(); + NeuralNetConfiguration conf = b.build(); ComputationGraphConfiguration conf2 = gb.build(); @@ -320,14 +320,14 @@ public class WorkspaceTests extends BaseDL4JTest { System.out.println("Starting test: " + ws + " - " + i); - NeuralNetConfiguration.ListBuilder b = new NeuralNetConfiguration.Builder() + NeuralNetConfiguration.NeuralNetConfigurationBuilder b = NeuralNetConfiguration.builder() .weightInit(WeightInit.XAVIER) .activation(Activation.TANH) .inferenceWorkspaceMode(ws) .trainingWorkspaceMode(ws) .list(); - ComputationGraphConfiguration.GraphBuilder gb = new NeuralNetConfiguration.Builder() + ComputationGraphConfiguration.GraphBuilder gb = NeuralNetConfiguration.builder() .weightInit(WeightInit.XAVIER) .activation(Activation.TANH) .inferenceWorkspaceMode(ws) @@ -366,14 +366,14 @@ public class WorkspaceTests extends BaseDL4JTest { .nIn(10).nOut(10).build(), "1"); gb.setOutputs("out"); - MultiLayerConfiguration conf = b + NeuralNetConfiguration conf = b .backpropType(BackpropType.TruncatedBPTT) - .tBPTTLength(5) + .tbpttBackLength(5).tbpttFwdLength(5) .build(); ComputationGraphConfiguration conf2 = gb .backpropType(BackpropType.TruncatedBPTT) - .tBPTTForwardLength(5).tBPTTBackwardLength(5) + .tbpttFwdLength(5).tbpttBackLength(5) .build(); @@ -400,7 +400,7 @@ public class WorkspaceTests extends BaseDL4JTest { log.info("WorkspaceMode = " + ws); Nd4j.getRandom().setSeed(12345); - MultiLayerConfiguration conf = new NeuralNetConfiguration.Builder() + NeuralNetConfiguration conf = NeuralNetConfiguration.builder() .weightInit(WeightInit.XAVIER) .seed(12345) .trainingWorkspaceMode(ws).inferenceWorkspaceMode(ws) @@ -429,7 +429,7 @@ public class WorkspaceTests extends BaseDL4JTest { public void testWorkspaceSetting() { for (WorkspaceMode wsm : WorkspaceMode.values()) { - MultiLayerConfiguration conf = new NeuralNetConfiguration.Builder() + NeuralNetConfiguration conf = NeuralNetConfiguration.builder() .weightInit(WeightInit.XAVIER) .seed(12345) .trainingWorkspaceMode(wsm).inferenceWorkspaceMode(wsm) @@ -441,7 +441,7 @@ public class WorkspaceTests extends BaseDL4JTest { assertEquals(wsm, conf.getInferenceWorkspaceMode()); - MultiLayerConfiguration conf2 = new NeuralNetConfiguration.Builder() + NeuralNetConfiguration conf2 = NeuralNetConfiguration.builder() .weightInit(WeightInit.XAVIER) .seed(12345) .trainingWorkspaceMode(wsm).inferenceWorkspaceMode(wsm) @@ -458,7 +458,7 @@ public class WorkspaceTests extends BaseDL4JTest { @Test public void testClearing() { for(WorkspaceMode wsm : WorkspaceMode.values()) { - ComputationGraphConfiguration config = new NeuralNetConfiguration.Builder() + ComputationGraphConfiguration config = NeuralNetConfiguration.builder() .updater(new Adam()) .inferenceWorkspaceMode(wsm) .trainingWorkspaceMode(wsm) @@ -476,7 +476,7 @@ public class WorkspaceTests extends BaseDL4JTest { final ComputationGraph computationGraph = new ComputationGraph(config); computationGraph.init(); - computationGraph.setListeners(new ScoreIterationListener(3)); + computationGraph.addTrainingListeners(new ScoreIterationListener(3)); WSTestDataSetIterator iterator = new WSTestDataSetIterator(); computationGraph.fit(iterator); @@ -501,7 +501,7 @@ public class WorkspaceTests extends BaseDL4JTest { MemoryWorkspace workspace = Nd4j.getWorkspaceManager().getWorkspaceForCurrentThread(conf, wsName); - MultiLayerConfiguration netConf = new NeuralNetConfiguration.Builder() + NeuralNetConfiguration netConf = NeuralNetConfiguration.builder() .seed(12345) .weightInit(WeightInit.XAVIER) .list() @@ -556,7 +556,7 @@ public class WorkspaceTests extends BaseDL4JTest { final INDArray input = Nd4j.rand(1, 30); - final ComputationGraphConfiguration computationGraphConfiguration = new NeuralNetConfiguration.Builder() + final ComputationGraphConfiguration computationGraphConfiguration = NeuralNetConfiguration.builder() .graphBuilder() .addInputs("state") .addLayer("value_output", new OutputLayer.Builder().nIn(30).nOut(1).activation(Activation.IDENTITY) @@ -578,7 +578,7 @@ public class WorkspaceTests extends BaseDL4JTest { INDArray input = Nd4j.rand(1, 30); - MultiLayerConfiguration conf = new NeuralNetConfiguration.Builder() + NeuralNetConfiguration conf = NeuralNetConfiguration.builder() .list() .layer(new OutputLayer.Builder().nIn(30).nOut(1).activation(Activation.IDENTITY).lossFunction(LossFunctions.LossFunction.MSE).build()) .build(); @@ -607,13 +607,13 @@ public class WorkspaceTests extends BaseDL4JTest { - MultiLayerConfiguration mlc = new NeuralNetConfiguration.Builder().weightInit(WeightInit.XAVIER) + NeuralNetConfiguration mlc = NeuralNetConfiguration.builder().weightInit(WeightInit.XAVIER) .convolutionMode(ConvolutionMode.Same).seed(12345L).list() .layer(0, new ConvolutionLayer.Builder().nIn(1).nOut(2).kernelSize(2, 2) .stride(1, 1).activation(Activation.TANH).build()) .layer(1, new OutputLayer.Builder(LossFunctions.LossFunction.MCXENT) .activation(Activation.SOFTMAX).nOut(10).build()) - .setInputType(InputType.convolutional(5, 5, 1)) + .inputType(InputType.convolutional(5, 5, 1)) .build(); MultiLayerNetwork net = new MultiLayerNetwork(mlc); @@ -666,8 +666,8 @@ public class WorkspaceTests extends BaseDL4JTest { ComputationGraph c = createNet(); for (WorkspaceMode wm : new WorkspaceMode[]{WorkspaceMode.NONE, WorkspaceMode.ENABLED}) { log.info("Starting test: {}", wm); - c.getConfiguration().setTrainingWorkspaceMode(wm); - c.getConfiguration().setInferenceWorkspaceMode(wm); + c.getComputationGraphConfiguration().setTrainingWorkspaceMode(wm); + c.getComputationGraphConfiguration().setInferenceWorkspaceMode(wm); INDArray f = Nd4j.rand(8, 1, 28, 28); INDArray l = Nd4j.rand(8, 10); diff --git a/cavis-dnn/cavis-dnn-core/src/test/java/org/deeplearning4j/nn/mkldnn/ValidateMKLDNN.java b/cavis-dnn/cavis-dnn-core/src/test/java/org/deeplearning4j/nn/mkldnn/ValidateMKLDNN.java index 695fdb70d..ca9c0f67c 100644 --- a/cavis-dnn/cavis-dnn-core/src/test/java/org/deeplearning4j/nn/mkldnn/ValidateMKLDNN.java +++ b/cavis-dnn/cavis-dnn-core/src/test/java/org/deeplearning4j/nn/mkldnn/ValidateMKLDNN.java @@ -25,7 +25,6 @@ import org.deeplearning4j.LayerHelperValidationUtil; import org.deeplearning4j.TestUtils; import org.deeplearning4j.datasets.iterator.impl.SingletonDataSetIterator; import org.deeplearning4j.nn.conf.ConvolutionMode; -import org.deeplearning4j.nn.conf.MultiLayerConfiguration; import org.deeplearning4j.nn.conf.NeuralNetConfiguration; import org.deeplearning4j.nn.conf.WorkspaceMode; import org.deeplearning4j.nn.conf.distribution.NormalDistribution; @@ -74,7 +73,7 @@ public class ValidateMKLDNN extends BaseDL4JTest { INDArray f = Nd4j.rand(DataType.FLOAT, inputSize); INDArray l = TestUtils.randomOneHot(minibatch, 10).castTo(DataType.FLOAT); - MultiLayerConfiguration conf = new NeuralNetConfiguration.Builder() + NeuralNetConfiguration conf = NeuralNetConfiguration.builder() .updater(new Adam(0.01)) .convolutionMode(cm) .seed(12345) @@ -98,7 +97,7 @@ public class ValidateMKLDNN extends BaseDL4JTest { .nOut(3) .build()) .layer(new OutputLayer.Builder().nOut(10).activation(Activation.SOFTMAX).lossFunction(LossFunctions.LossFunction.MCXENT).build()) - .setInputType(InputType.convolutional(inputSize[2], inputSize[3], inputSize[1])) + .inputType(InputType.convolutional(inputSize[2], inputSize[3], inputSize[1])) .build(); MultiLayerNetwork netWith = new MultiLayerNetwork(conf.clone()); @@ -149,7 +148,7 @@ public class ValidateMKLDNN extends BaseDL4JTest { INDArray f = Nd4j.rand(Nd4j.defaultFloatingPointType(), inputSize); INDArray l = TestUtils.randomOneHot(minibatch, 10); - MultiLayerConfiguration conf = new NeuralNetConfiguration.Builder() + NeuralNetConfiguration conf = NeuralNetConfiguration.builder() .dataType(DataType.FLOAT) .updater(new Adam(0.01)) .convolutionMode(cm) @@ -169,7 +168,7 @@ public class ValidateMKLDNN extends BaseDL4JTest { .nOut(3) .build()) .layer(new OutputLayer.Builder().nOut(10).activation(Activation.SOFTMAX).lossFunction(LossFunctions.LossFunction.MCXENT).build()) - .setInputType(InputType.convolutional(inputSize[2], inputSize[3], inputSize[1])) + .inputType(InputType.convolutional(inputSize[2], inputSize[3], inputSize[1])) .build(); MultiLayerNetwork netWith = new MultiLayerNetwork(conf.clone()); @@ -223,7 +222,7 @@ public class ValidateMKLDNN extends BaseDL4JTest { INDArray f = Nd4j.rand(Nd4j.defaultFloatingPointType(), inputSize); INDArray l = TestUtils.randomOneHot(minibatch, 10).castTo(DataType.FLOAT); - MultiLayerConfiguration conf = new NeuralNetConfiguration.Builder() + NeuralNetConfiguration conf = NeuralNetConfiguration.builder() .updater(new Adam(0.01)) .convolutionMode(cm) .weightInit(new NormalDistribution(0,1)) @@ -242,7 +241,7 @@ public class ValidateMKLDNN extends BaseDL4JTest { .k(k[i]) .cudnnAllowFallback(false).build()) .layer(new OutputLayer.Builder().nOut(10).activation(Activation.SOFTMAX).lossFunction(LossFunctions.LossFunction.MCXENT).build()) - .setInputType(InputType.convolutional(inputSize[2], inputSize[3], inputSize[1])) + .inputType(InputType.convolutional(inputSize[2], inputSize[3], inputSize[1])) .build(); MultiLayerNetwork netWith = new MultiLayerNetwork(conf.clone()); @@ -292,7 +291,7 @@ public class ValidateMKLDNN extends BaseDL4JTest { INDArray dLdb = beta.ulike(); - MultiLayerConfiguration conf = new NeuralNetConfiguration.Builder() + NeuralNetConfiguration conf = NeuralNetConfiguration.builder() .inferenceWorkspaceMode(WorkspaceMode.NONE) .trainingWorkspaceMode(WorkspaceMode.NONE) .list() diff --git a/cavis-dnn/cavis-dnn-core/src/test/java/org/deeplearning4j/nn/multilayer/BackPropMLPTest.java b/cavis-dnn/cavis-dnn-core/src/test/java/org/deeplearning4j/nn/multilayer/BackPropMLPTest.java index 94f26b712..c818f2281 100644 --- a/cavis-dnn/cavis-dnn-core/src/test/java/org/deeplearning4j/nn/multilayer/BackPropMLPTest.java +++ b/cavis-dnn/cavis-dnn-core/src/test/java/org/deeplearning4j/nn/multilayer/BackPropMLPTest.java @@ -23,7 +23,6 @@ package org.deeplearning4j.nn.multilayer; import org.deeplearning4j.BaseDL4JTest; import org.deeplearning4j.datasets.iterator.impl.IrisDataSetIterator; import org.deeplearning4j.nn.api.Layer; -import org.deeplearning4j.nn.conf.MultiLayerConfiguration; import org.deeplearning4j.nn.conf.NeuralNetConfiguration; import org.deeplearning4j.nn.conf.layers.DenseLayer; import org.deeplearning4j.nn.conf.layers.OutputLayer; @@ -33,11 +32,9 @@ import org.deeplearning4j.nn.weights.WeightInit; import org.deeplearning4j.optimize.listeners.ScoreIterationListener; import org.junit.jupiter.api.Test; import org.nd4j.linalg.activations.Activation; -import org.nd4j.linalg.api.buffer.DataType; import org.nd4j.linalg.api.iter.NdIndexIterator; import org.nd4j.linalg.api.ndarray.INDArray; import org.nd4j.linalg.api.ops.impl.transforms.strict.SigmoidDerivative; -import org.nd4j.linalg.api.ops.impl.transforms.strict.TanhDerivative; import org.nd4j.linalg.dataset.DataSet; import org.nd4j.linalg.dataset.api.iterator.DataSetIterator; import org.nd4j.linalg.exception.ND4JArraySizeException; @@ -57,7 +54,7 @@ public class BackPropMLPTest extends BaseDL4JTest { public void testMLPTrivial() { //Simplest possible case: 1 hidden layer, 1 hidden neuron, batch size of 1. MultiLayerNetwork network = new MultiLayerNetwork(getIrisMLPSimpleConfig(new int[] {1}, Activation.SIGMOID)); - network.setListeners(new ScoreIterationListener(1)); + network.addTrainingListeners(new ScoreIterationListener(1)); network.init(); DataSetIterator iter = new IrisDataSetIterator(1, 10); @@ -69,7 +66,7 @@ public class BackPropMLPTest extends BaseDL4JTest { @Test public void testMLP() { //Simple mini-batch test with multiple hidden layers - MultiLayerConfiguration conf = getIrisMLPSimpleConfig(new int[] {5, 4, 3}, Activation.SIGMOID); + NeuralNetConfiguration conf = getIrisMLPSimpleConfig(new int[] {5, 4, 3}, Activation.SIGMOID); // System.out.println(conf); MultiLayerNetwork network = new MultiLayerNetwork(conf); network.init(); @@ -83,7 +80,7 @@ public class BackPropMLPTest extends BaseDL4JTest { @Test public void testMLP2() { //Simple mini-batch test with multiple hidden layers - MultiLayerConfiguration conf = getIrisMLPSimpleConfig(new int[] {5, 15, 3}, Activation.TANH); + NeuralNetConfiguration conf = getIrisMLPSimpleConfig(new int[] {5, 15, 3}, Activation.TANH); // System.out.println(conf); MultiLayerNetwork network = new MultiLayerNetwork(conf); network.init(); @@ -322,9 +319,9 @@ public class BackPropMLPTest extends BaseDL4JTest { * Learning Rate = 0.1 * No regularization, no Adagrad, no momentum etc. One iteration. */ - private static MultiLayerConfiguration getIrisMLPSimpleConfig(int[] hiddenLayerSizes, + private static NeuralNetConfiguration getIrisMLPSimpleConfig(int[] hiddenLayerSizes, Activation activationFunction) { - NeuralNetConfiguration.ListBuilder lb = new NeuralNetConfiguration.Builder().updater(new Sgd(0.1)) + NeuralNetConfiguration.NeuralNetConfigurationBuilder lb = NeuralNetConfiguration.builder().updater(new Sgd(0.1)) .seed(12345L).list(); for (int i = 0; i < hiddenLayerSizes.length; i++) { diff --git a/cavis-dnn/cavis-dnn-core/src/test/java/org/deeplearning4j/nn/multilayer/MultiLayerTest.java b/cavis-dnn/cavis-dnn-core/src/test/java/org/deeplearning4j/nn/multilayer/MultiLayerTest.java index 056f4a43e..ac1626eda 100644 --- a/cavis-dnn/cavis-dnn-core/src/test/java/org/deeplearning4j/nn/multilayer/MultiLayerTest.java +++ b/cavis-dnn/cavis-dnn-core/src/test/java/org/deeplearning4j/nn/multilayer/MultiLayerTest.java @@ -20,9 +20,31 @@ package org.deeplearning4j.nn.multilayer; +import static org.junit.jupiter.api.Assertions.assertArrayEquals; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertFalse; +import static org.junit.jupiter.api.Assertions.assertNotEquals; +import static org.junit.jupiter.api.Assertions.assertNotNull; +import static org.junit.jupiter.api.Assertions.assertNotSame; +import static org.junit.jupiter.api.Assertions.assertNull; +import static org.junit.jupiter.api.Assertions.assertThrows; +import static org.junit.jupiter.api.Assertions.assertTrue; + +import java.io.ByteArrayInputStream; +import java.io.ByteArrayOutputStream; +import java.io.IOException; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collections; +import java.util.HashSet; +import java.util.List; +import java.util.Map; +import java.util.Random; +import java.util.Set; import lombok.Data; import lombok.EqualsAndHashCode; import lombok.extern.slf4j.Slf4j; +import net.brutex.ai.dnn.api.IModel; import org.deeplearning4j.BaseDL4JTest; import org.deeplearning4j.TestUtils; import org.deeplearning4j.datasets.iterator.ExistingDataSetIterator; @@ -31,12 +53,29 @@ import org.deeplearning4j.datasets.iterator.impl.MnistDataSetIterator; import org.deeplearning4j.datasets.iterator.impl.SingletonMultiDataSetIterator; import org.deeplearning4j.eval.Evaluation; import org.deeplearning4j.exception.DL4JException; -import org.deeplearning4j.nn.api.Model; import org.deeplearning4j.nn.api.OptimizationAlgorithm; -import org.deeplearning4j.nn.conf.*; +import org.deeplearning4j.nn.conf.BackpropType; +import org.deeplearning4j.nn.conf.ConvolutionMode; +import org.deeplearning4j.nn.conf.GradientNormalization; +import org.deeplearning4j.nn.conf.NeuralNetConfiguration; +import org.deeplearning4j.nn.conf.Updater; +import org.deeplearning4j.nn.conf.WorkspaceMode; import org.deeplearning4j.nn.conf.distribution.NormalDistribution; import org.deeplearning4j.nn.conf.inputs.InputType; -import org.deeplearning4j.nn.conf.layers.*; +import org.deeplearning4j.nn.conf.layers.ActivationLayer; +import org.deeplearning4j.nn.conf.layers.AutoEncoder; +import org.deeplearning4j.nn.conf.layers.BaseLayerConfiguration; +import org.deeplearning4j.nn.conf.layers.BatchNormalization; +import org.deeplearning4j.nn.conf.layers.ConvolutionLayer; +import org.deeplearning4j.nn.conf.layers.DenseLayer; +import org.deeplearning4j.nn.conf.layers.FeedForwardLayer; +import org.deeplearning4j.nn.conf.layers.GlobalPoolingLayer; +import org.deeplearning4j.nn.conf.layers.GravesLSTM; +import org.deeplearning4j.nn.conf.layers.LSTM; +import org.deeplearning4j.nn.conf.layers.LossLayer; +import org.deeplearning4j.nn.conf.layers.OutputLayer; +import org.deeplearning4j.nn.conf.layers.RnnOutputLayer; +import org.deeplearning4j.nn.conf.layers.SubsamplingLayer; import org.deeplearning4j.nn.conf.layers.objdetect.Yolo2OutputLayer; import org.deeplearning4j.nn.conf.layers.variational.VariationalAutoencoder; import org.deeplearning4j.nn.conf.preprocessor.CnnToFeedForwardPreProcessor; @@ -58,6 +97,7 @@ import org.junit.jupiter.api.AfterAll; import org.junit.jupiter.api.BeforeAll; import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Test; +import org.nd4j.common.primitives.Pair; import org.nd4j.linalg.activations.Activation; import org.nd4j.linalg.api.buffer.DataType; import org.nd4j.linalg.api.ndarray.INDArray; @@ -78,356 +118,349 @@ import org.nd4j.linalg.learning.config.Adam; import org.nd4j.linalg.learning.config.NoOp; import org.nd4j.linalg.learning.config.Sgd; import org.nd4j.linalg.lossfunctions.LossFunctions; -import org.nd4j.common.primitives.Pair; - -import java.io.ByteArrayInputStream; -import java.io.ByteArrayOutputStream; -import java.io.IOException; -import java.util.*; - -import static org.junit.jupiter.api.Assertions.*; @Slf4j public class MultiLayerTest extends BaseDL4JTest { - private static OpExecutioner.ProfilingMode origMode; + private static OpExecutioner.ProfilingMode origMode; - @BeforeAll - public static void beforeClass(){ - origMode = Nd4j.getExecutioner().getProfilingMode(); + @BeforeAll + public static void beforeClass() { + origMode = Nd4j.getExecutioner().getProfilingMode(); + } + + @AfterAll + public static void afterClass() { + Nd4j.getExecutioner().setProfilingMode(origMode); + } + + private static NeuralNetConfiguration getConf() { + NeuralNetConfiguration conf = + NeuralNetConfiguration.builder().seed(12345L) + .list().layer(0, + new DenseLayer.Builder().nIn(4).nOut(3) + + .dist(new NormalDistribution(0, 1)) + .build()) + .layer(1, new org.deeplearning4j.nn.conf.layers.OutputLayer.Builder( + LossFunctions.LossFunction.MCXENT) + .activation(Activation.SOFTMAX).nIn(3).nOut(3) + + .dist(new NormalDistribution(0, 1)).build()) + .build(); + return conf; + } + + public static float[] asFloat(INDArray arr) { + long len = arr.length(); + + float[] f = new float[(int) len]; + for (int i = 0; i < len; i++) { + f[i] = arr.getFloat(i); + } + return f; + } + + @BeforeEach + public void before() { + Nd4j.getExecutioner().setProfilingMode(OpExecutioner.ProfilingMode.SCOPE_PANIC); + } + + @Override + public DataType getDataType() { + return DataType.FLOAT; + } + + @Test + public void testSetParams() { + NeuralNetConfiguration conf = + NeuralNetConfiguration.builder() + .list().layer(0, + new DenseLayer.Builder().nIn(4).nOut(3) + .activation(Activation.TANH).build()) + .layer(1, new DenseLayer.Builder().nIn(3).nOut(2).build()) + .build(); + + MultiLayerNetwork network3 = new MultiLayerNetwork(conf); + network3.init(); + + INDArray params = network3.getModelParams(); + INDArray weights = network3.getLayer(0).getParam(DefaultParamInitializer.WEIGHT_KEY).dup(); + INDArray bias = network3.getLayer(0).getParam(DefaultParamInitializer.BIAS_KEY).dup(); + network3.setParameters(params); + assertEquals(weights, network3.getLayer(0).getParam(DefaultParamInitializer.WEIGHT_KEY)); + assertEquals(bias, network3.getLayer(0).getParam(DefaultParamInitializer.BIAS_KEY)); + INDArray params4 = network3.getModelParams(); + assertEquals(params, params4); + } + + @Test + public void testBatchNorm() { + Nd4j.getRandom().setSeed(123); + NeuralNetConfiguration conf = NeuralNetConfiguration.builder() + .optimizationAlgo(OptimizationAlgorithm.LINE_GRADIENT_DESCENT).seed(123).list() + .layer(0, new DenseLayer.Builder().nIn(4).nOut(3).weightInit(WeightInit.XAVIER) + .activation(Activation.TANH).build()) + .layer(1, new DenseLayer.Builder().nIn(3).nOut(2).weightInit(WeightInit.XAVIER) + .activation(Activation.TANH).build()) + .layer(2, new BatchNormalization.Builder().nOut(2).build()) + .layer(3, new org.deeplearning4j.nn.conf.layers.OutputLayer.Builder( + LossFunctions.LossFunction.MCXENT).weightInit(WeightInit.XAVIER) + .activation(Activation.SOFTMAX).nIn(2).nOut(3).build()) + .build(); + + MultiLayerNetwork network = new MultiLayerNetwork(conf); + network.init(); + network.addTrainingListeners(new ScoreIterationListener(1)); + + DataSetIterator iter = new IrisDataSetIterator(150, 150); + + DataSet next = iter.next(); + next.normalizeZeroMeanZeroUnitVariance(); + SplitTestAndTrain trainTest = next.splitTestAndTrain(110); + network.setLabels(trainTest.getTrain().getLabels()); + network.init(); + for (int i = 0; i < 5; i++) { + network.fit(trainTest.getTrain()); } - @BeforeEach - public void before(){ - Nd4j.getExecutioner().setProfilingMode(OpExecutioner.ProfilingMode.SCOPE_PANIC); + } + + @Test + public void testBackProp() { + Nd4j.getRandom().setSeed(123); + NeuralNetConfiguration conf = NeuralNetConfiguration.builder() + .optimizationAlgo(OptimizationAlgorithm.LINE_GRADIENT_DESCENT).seed(123).list() + .layer(0, new DenseLayer.Builder().nIn(4).nOut(3).weightInit(WeightInit.XAVIER) + .activation(Activation.TANH).build()) + .layer(1, new DenseLayer.Builder().nIn(3).nOut(2).weightInit(WeightInit.XAVIER) + .activation(Activation.TANH).build()) + .layer(2, new org.deeplearning4j.nn.conf.layers.OutputLayer.Builder( + LossFunctions.LossFunction.MCXENT).weightInit(WeightInit.XAVIER) + .activation(Activation.SOFTMAX).nIn(2).nOut(3).build()) + .build(); + + MultiLayerNetwork network = new MultiLayerNetwork(conf); + network.init(); + network.addTrainingListeners(new ScoreIterationListener(1)); + + DataSetIterator iter = new IrisDataSetIterator(150, 150); + + DataSet next = iter.next(); + next.normalizeZeroMeanZeroUnitVariance(); + SplitTestAndTrain trainTest = next.splitTestAndTrain(110); + network.setInput(trainTest.getTrain().getFeatures()); + network.setLabels(trainTest.getTrain().getLabels()); + network.init(); + for (int i = 0; i < 5; i++) { + network.fit(trainTest.getTrain()); } - @AfterAll - public static void afterClass(){ - Nd4j.getExecutioner().setProfilingMode(origMode); + DataSet test = trainTest.getTest(); + Evaluation eval = new Evaluation(); + INDArray output = network.output(test.getFeatures()); + eval.eval(test.getLabels(), output); + log.info("Score " + eval.stats()); + } + + @Test + public void testGradientWithAsList() { + MultiLayerNetwork net1 = new MultiLayerNetwork(getConf()); + MultiLayerNetwork net2 = new MultiLayerNetwork(getConf()); + net1.init(); + net2.init(); + + DataSet x1 = new IrisDataSetIterator(1, 150).next(); + DataSet all = new IrisDataSetIterator(150, 150).next(); + DataSet x2 = all.asList().get(0); + + //x1 and x2 contain identical data + assertArrayEquals(asFloat(x1.getFeatures()), asFloat(x2.getFeatures()), 0.0f); + assertArrayEquals(asFloat(x1.getLabels()), asFloat(x2.getLabels()), 0.0f); + assertEquals(x1, x2); + + //Set inputs/outputs so gradient can be calculated: + net1.feedForward(x1.getFeatures()); + net2.feedForward(x2.getFeatures()); + ((BaseOutputLayer) net1.getLayer(1)).setLabels(x1.getLabels()); + ((BaseOutputLayer) net2.getLayer(1)).setLabels(x2.getLabels()); + + net1.gradient(); + net2.gradient(); + } + + /** + * This test intended only to test activateSelectedLayers method, it does not involves + * fully-working AutoEncoder. + */ + @Test + public void testSelectedActivations() { + // Train DeepAutoEncoder on very limited trainset + final int numRows = 28; + final int numColumns = 28; + int seed = 123; + int numSamples = 3; + int iterations = 1; + int listenerFreq = iterations / 5; + + log.info("Load data...."); + + float[][] trainingData = new float[numSamples][numColumns * numRows]; + Arrays.fill(trainingData[0], 0.95f); + Arrays.fill(trainingData[1], 0.5f); + Arrays.fill(trainingData[2], 0.05f); + + log.info("Build model...."); + NeuralNetConfiguration conf = NeuralNetConfiguration.builder().seed(seed) + .optimizationAlgo(OptimizationAlgorithm.LINE_GRADIENT_DESCENT).list() + .layer(0, new DenseLayer.Builder().nIn(numRows * numColumns).nOut(1000).build()) + .layer(1, new DenseLayer.Builder().nIn(1000).nOut(500).build()) + .layer(2, new DenseLayer.Builder().nIn(500).nOut(250).build()) + .layer(3, new DenseLayer.Builder().nIn(250).nOut(100).build()) + .layer(4, new DenseLayer.Builder().nIn(100).nOut(30).build()) //encoding stops + .layer(5, new DenseLayer.Builder().nIn(30).nOut(100).build()) //decoding starts + .layer(6, new DenseLayer.Builder().nIn(100).nOut(250).build()) + .layer(7, new DenseLayer.Builder().nIn(250).nOut(500).build()) + .layer(8, new DenseLayer.Builder().nIn(500).nOut(1000).build()) + .layer(9, + new OutputLayer.Builder(LossFunctions.LossFunction.NEGATIVELOGLIKELIHOOD).nIn(1000) + .nOut(numRows * numColumns).activation(Activation.SOFTMAX).build()) + .build(); + + MultiLayerNetwork model = new MultiLayerNetwork(conf); + model.init(); + + model.addTrainingListeners(new ScoreIterationListener(listenerFreq)); + + log.info("Train model...."); + int cnt = 0; + while (cnt < numSamples) { + INDArray input = Nd4j.create(trainingData[cnt]).reshape(1, -1); + model.fit(new DataSet(input, input)); + cnt++; } + // Make two separate selective calls - @Override - public DataType getDataType(){ - return DataType.FLOAT; + log.info("Testing full cycle..."); + + List comparableResult = model.feedForward( + Nd4j.create(trainingData[0], 1, trainingData[0].length)); + + INDArray encodeResult = model.activateSelectedLayers(0, 4, + Nd4j.create(trainingData[0], 1, trainingData[0].length)); + + log.info("Compare feedForward results with selectedActivation"); + + assertEquals(comparableResult.get(5), encodeResult); + + INDArray decodeResults = model.activateSelectedLayers(5, 9, encodeResult); + + log.info("Decode results: " + decodeResults.columns() + " " + decodeResults); + log.info( + "Comparable results: " + comparableResult.get(10).columns() + " " + comparableResult.get( + 10)); + + assertEquals(comparableResult.get(10), decodeResults); + } + + @Test + public void testFeedForwardToLayer() { + + int nIn = 30; + int nOut = 25; + + NeuralNetConfiguration conf = + NeuralNetConfiguration.builder().optimizationAlgo(OptimizationAlgorithm.CONJUGATE_GRADIENT) + .updater(new Sgd(1e-3)) + .list().layer( + 0, new DenseLayer.Builder().nIn(nIn).nOut(600) + + .dist(new NormalDistribution(0, 1e-5)) + .build()) + .layer(1, new DenseLayer.Builder() + .nIn(600).nOut(250) + .dist(new NormalDistribution(0, 1e-5)) + .build()) + .layer(2, new DenseLayer.Builder() + .nIn(250).nOut(100) + .dist(new NormalDistribution(0, 1e-5)) + .build()) + .layer(3, new org.deeplearning4j.nn.conf.layers.OutputLayer.Builder( + LossFunctions.LossFunction.MCXENT).nIn(100).nOut(25) + .activation(Activation.SOFTMAX) + .weightInit(new NormalDistribution(0, 1e-5)).build()) + .build(); + + MultiLayerNetwork network = new MultiLayerNetwork(conf); + network.init(); + + INDArray input = Nd4j.rand(5, nIn); + + List activations = network.feedForward(input); + assertEquals(5, activations.size()); //4 layers + input + + List activationsAll = network.feedForwardToLayer(3, input); + assertEquals(activations, activationsAll); + + for (int i = 3; i >= 0; i--) { + List activationsPartial = network.feedForwardToLayer(i, input); + assertEquals(i + 2, + activationsPartial.size()); //i+2: for layer 3: input + activations of {0,1,2,3} -> 5 total = 3+2 + for (int j = 0; j <= i; j++) { + INDArray exp = activationsAll.get(j); + INDArray act = activationsPartial.get(j); + assertEquals(exp, act); + } } + } - @Test - public void testSetParams() { - MultiLayerConfiguration conf = - new NeuralNetConfiguration.Builder() - .list().layer(0, - new DenseLayer.Builder().nIn(4).nOut(3) - .activation(Activation.TANH).build()) - .layer(1, new DenseLayer.Builder().nIn(3).nOut(2).build()) - .build(); - MultiLayerNetwork network3 = new MultiLayerNetwork(conf); - network3.init(); + @Test + public void testBackpropGradient() { + //Testing: MultiLayerNetwork.backpropGradient() + //i.e., specifically without an output layer - INDArray params = network3.params(); - INDArray weights = network3.getLayer(0).getParam(DefaultParamInitializer.WEIGHT_KEY).dup(); - INDArray bias = network3.getLayer(0).getParam(DefaultParamInitializer.BIAS_KEY).dup(); - network3.setParameters(params); - assertEquals(weights, network3.getLayer(0).getParam(DefaultParamInitializer.WEIGHT_KEY)); - assertEquals(bias, network3.getLayer(0).getParam(DefaultParamInitializer.BIAS_KEY)); - INDArray params4 = network3.params(); - assertEquals(params, params4); + int nIn = 10; + int nOut = 40; + int miniBatch = 5; + + NeuralNetConfiguration conf = NeuralNetConfiguration.builder() + .updater(new Sgd(0.1)).list() + .layer(0, new DenseLayer.Builder().nIn(nIn).nOut(20).activation(Activation.RELU) + .weightInit(WeightInit.XAVIER).build()) + .layer(1, new DenseLayer.Builder().nIn(20).nOut(30).activation(Activation.RELU) + .weightInit(WeightInit.XAVIER).build()) + .layer(2, new DenseLayer.Builder().nIn(30).nOut(nOut).activation(Activation.RELU) + .weightInit(WeightInit.XAVIER).build()) + .build(); + MultiLayerNetwork net = new MultiLayerNetwork(conf); + net.init(); + + Nd4j.getRandom().setSeed(12345); + INDArray eps = Nd4j.rand(miniBatch, nOut); + INDArray input = Nd4j.rand(miniBatch, nIn); + + net.setInput(input); + net.feedForward(true, false); //Need to feed forward before backprop + + Pair pair = net.backpropGradient(eps, LayerWorkspaceMgr.noWorkspaces()); + INDArray epsOut = pair.getSecond(); + assertNotNull(epsOut); + assertArrayEquals(new long[]{miniBatch, nIn}, epsOut.shape()); + + Gradient g = pair.getFirst(); + Map gradMap = g.gradientForVariable(); + assertEquals(6, gradMap.size()); //3 layers, weight + bias gradients for each + + String[] expKeys = {"0_" + DefaultParamInitializer.WEIGHT_KEY, + "0_" + DefaultParamInitializer.BIAS_KEY, + "1_" + DefaultParamInitializer.WEIGHT_KEY, "2_" + DefaultParamInitializer.BIAS_KEY, + "2_" + DefaultParamInitializer.WEIGHT_KEY, "2_" + DefaultParamInitializer.BIAS_KEY}; + Set keys = gradMap.keySet(); + for (String s : expKeys) { + assertTrue(keys.contains(s)); } - @Test - public void testBatchNorm() { - Nd4j.getRandom().setSeed(123); - MultiLayerConfiguration conf = new NeuralNetConfiguration.Builder() - .optimizationAlgo(OptimizationAlgorithm.LINE_GRADIENT_DESCENT).seed(123).list() - .layer(0, new DenseLayer.Builder().nIn(4).nOut(3).weightInit(WeightInit.XAVIER) - .activation(Activation.TANH).build()) - .layer(1, new DenseLayer.Builder().nIn(3).nOut(2).weightInit(WeightInit.XAVIER) - .activation(Activation.TANH).build()) - .layer(2, new BatchNormalization.Builder().nOut(2).build()) - .layer(3, new org.deeplearning4j.nn.conf.layers.OutputLayer.Builder( - LossFunctions.LossFunction.MCXENT).weightInit(WeightInit.XAVIER) - .activation(Activation.SOFTMAX).nIn(2).nOut(3).build()) - .build(); - - - MultiLayerNetwork network = new MultiLayerNetwork(conf); - network.init(); - network.setListeners(new ScoreIterationListener(1)); - - DataSetIterator iter = new IrisDataSetIterator(150, 150); - - DataSet next = iter.next(); - next.normalizeZeroMeanZeroUnitVariance(); - SplitTestAndTrain trainTest = next.splitTestAndTrain(110); - network.setLabels(trainTest.getTrain().getLabels()); - network.init(); - for( int i=0; i<5; i++ ) { - network.fit(trainTest.getTrain()); - } - - } - - @Test - public void testBackProp() { - Nd4j.getRandom().setSeed(123); - MultiLayerConfiguration conf = new NeuralNetConfiguration.Builder() - .optimizationAlgo(OptimizationAlgorithm.LINE_GRADIENT_DESCENT).seed(123).list() - .layer(0, new DenseLayer.Builder().nIn(4).nOut(3).weightInit(WeightInit.XAVIER) - .activation(Activation.TANH).build()) - .layer(1, new DenseLayer.Builder().nIn(3).nOut(2).weightInit(WeightInit.XAVIER) - .activation(Activation.TANH).build()) - .layer(2, new org.deeplearning4j.nn.conf.layers.OutputLayer.Builder( - LossFunctions.LossFunction.MCXENT).weightInit(WeightInit.XAVIER) - .activation(Activation.SOFTMAX).nIn(2).nOut(3).build()) - .build(); - - - MultiLayerNetwork network = new MultiLayerNetwork(conf); - network.init(); - network.setListeners(new ScoreIterationListener(1)); - - DataSetIterator iter = new IrisDataSetIterator(150, 150); - - DataSet next = iter.next(); - next.normalizeZeroMeanZeroUnitVariance(); - SplitTestAndTrain trainTest = next.splitTestAndTrain(110); - network.setInput(trainTest.getTrain().getFeatures()); - network.setLabels(trainTest.getTrain().getLabels()); - network.init(); - for( int i=0; i<5; i++ ) { - network.fit(trainTest.getTrain()); - } - - DataSet test = trainTest.getTest(); - Evaluation eval = new Evaluation(); - INDArray output = network.output(test.getFeatures()); - eval.eval(test.getLabels(), output); - log.info("Score " + eval.stats()); - } - - - - @Test - public void testGradientWithAsList() { - MultiLayerNetwork net1 = new MultiLayerNetwork(getConf()); - MultiLayerNetwork net2 = new MultiLayerNetwork(getConf()); - net1.init(); - net2.init(); - - DataSet x1 = new IrisDataSetIterator(1, 150).next(); - DataSet all = new IrisDataSetIterator(150, 150).next(); - DataSet x2 = all.asList().get(0); - - //x1 and x2 contain identical data - assertArrayEquals(asFloat(x1.getFeatures()), asFloat(x2.getFeatures()), 0.0f); - assertArrayEquals(asFloat(x1.getLabels()), asFloat(x2.getLabels()), 0.0f); - assertEquals(x1, x2); - - //Set inputs/outputs so gradient can be calculated: - net1.feedForward(x1.getFeatures()); - net2.feedForward(x2.getFeatures()); - ((BaseOutputLayer) net1.getLayer(1)).setLabels(x1.getLabels()); - ((BaseOutputLayer) net2.getLayer(1)).setLabels(x2.getLabels()); - - net1.gradient(); - net2.gradient(); - } - - /** - * This test intended only to test activateSelectedLayers method, it does not involves fully-working AutoEncoder. - */ - @Test - public void testSelectedActivations() { - // Train DeepAutoEncoder on very limited trainset - final int numRows = 28; - final int numColumns = 28; - int seed = 123; - int numSamples = 3; - int iterations = 1; - int listenerFreq = iterations / 5; - - log.info("Load data...."); - - float[][] trainingData = new float[numSamples][numColumns * numRows]; - Arrays.fill(trainingData[0], 0.95f); - Arrays.fill(trainingData[1], 0.5f); - Arrays.fill(trainingData[2], 0.05f); - - - - log.info("Build model...."); - MultiLayerConfiguration conf = new NeuralNetConfiguration.Builder().seed(seed) - .optimizationAlgo(OptimizationAlgorithm.LINE_GRADIENT_DESCENT).list() - .layer(0, new DenseLayer.Builder().nIn(numRows * numColumns).nOut(1000).build()) - .layer(1, new DenseLayer.Builder().nIn(1000).nOut(500).build()) - .layer(2, new DenseLayer.Builder().nIn(500).nOut(250).build()) - .layer(3, new DenseLayer.Builder().nIn(250).nOut(100).build()) - .layer(4, new DenseLayer.Builder().nIn(100).nOut(30).build()) //encoding stops - .layer(5, new DenseLayer.Builder().nIn(30).nOut(100).build()) //decoding starts - .layer(6, new DenseLayer.Builder().nIn(100).nOut(250).build()) - .layer(7, new DenseLayer.Builder().nIn(250).nOut(500).build()) - .layer(8, new DenseLayer.Builder().nIn(500).nOut(1000).build()) - .layer(9, new OutputLayer.Builder(LossFunctions.LossFunction.NEGATIVELOGLIKELIHOOD).nIn(1000) - .nOut(numRows * numColumns).activation(Activation.SOFTMAX).build()) - .build(); - - MultiLayerNetwork model = new MultiLayerNetwork(conf); - model.init(); - - model.addListeners(new ScoreIterationListener(listenerFreq)); - - log.info("Train model...."); - int cnt = 0; - while (cnt < numSamples) { - INDArray input = Nd4j.create(trainingData[cnt]).reshape(1, -1); - model.fit(new DataSet(input, input)); - cnt++; - } - // Make two separate selective calls - - log.info("Testing full cycle..."); - - List comparableResult = model.feedForward(Nd4j.create(trainingData[0], 1, trainingData[0].length)); - - INDArray encodeResult = model.activateSelectedLayers(0, 4, Nd4j.create(trainingData[0], 1, trainingData[0].length)); - - log.info("Compare feedForward results with selectedActivation"); - - assertEquals(comparableResult.get(5), encodeResult); - - INDArray decodeResults = model.activateSelectedLayers(5, 9, encodeResult); - - - log.info("Decode results: " + decodeResults.columns() + " " + decodeResults); - log.info("Comparable results: " + comparableResult.get(10).columns() + " " + comparableResult.get(10)); - - assertEquals(comparableResult.get(10), decodeResults); - } - - private static MultiLayerConfiguration getConf() { - MultiLayerConfiguration conf = - new NeuralNetConfiguration.Builder().seed(12345L) - .list().layer(0, - new DenseLayer.Builder().nIn(4).nOut(3) - - .dist(new NormalDistribution(0,1)) - .build()) - .layer(1, new org.deeplearning4j.nn.conf.layers.OutputLayer.Builder( - LossFunctions.LossFunction.MCXENT) - .activation(Activation.SOFTMAX).nIn(3).nOut(3) - - .dist(new NormalDistribution(0, 1)).build()) - .build(); - return conf; - } - - public static float[] asFloat(INDArray arr) { - long len = arr.length(); - - float[] f = new float[(int) len]; - for (int i = 0; i < len; i++) - f[i] = arr.getFloat(i); - return f; - } - - @Test - public void testFeedForwardToLayer() { - - int nIn = 30; - int nOut = 25; - - MultiLayerConfiguration conf = - new NeuralNetConfiguration.Builder().optimizationAlgo(OptimizationAlgorithm.CONJUGATE_GRADIENT) - .updater(new Sgd(1e-3)) - .list().layer( - 0, new DenseLayer.Builder().nIn(nIn).nOut(600) - - .dist(new NormalDistribution(0,1e-5)) - .build()) - .layer(1, new DenseLayer.Builder() - .nIn(600).nOut(250) - .dist(new NormalDistribution(0, 1e-5)) - .build()) - .layer(2, new DenseLayer.Builder() - .nIn(250).nOut(100) - .dist(new NormalDistribution(0, 1e-5)) - .build()) - .layer(3, new org.deeplearning4j.nn.conf.layers.OutputLayer.Builder( - LossFunctions.LossFunction.MCXENT).nIn(100).nOut(25) - .activation(Activation.SOFTMAX) - .weightInit(new NormalDistribution(0, 1e-5)).build()) - .build(); - - MultiLayerNetwork network = new MultiLayerNetwork(conf); - network.init(); - - - INDArray input = Nd4j.rand(5, nIn); - - List activations = network.feedForward(input); - assertEquals(5, activations.size()); //4 layers + input - - List activationsAll = network.feedForwardToLayer(3, input); - assertEquals(activations, activationsAll); - - for (int i = 3; i >= 0; i--) { - List activationsPartial = network.feedForwardToLayer(i, input); - assertEquals(i + 2, activationsPartial.size()); //i+2: for layer 3: input + activations of {0,1,2,3} -> 5 total = 3+2 - for (int j = 0; j <= i; j++) { - INDArray exp = activationsAll.get(j); - INDArray act = activationsPartial.get(j); - assertEquals(exp, act); - } - } - } - - - @Test - public void testBackpropGradient() { - //Testing: MultiLayerNetwork.backpropGradient() - //i.e., specifically without an output layer - - int nIn = 10; - int nOut = 40; - int miniBatch = 5; - - MultiLayerConfiguration conf = new NeuralNetConfiguration.Builder() - .updater(new Sgd(0.1)).list() - .layer(0, new DenseLayer.Builder().nIn(nIn).nOut(20).activation(Activation.RELU) - .weightInit(WeightInit.XAVIER).build()) - .layer(1, new DenseLayer.Builder().nIn(20).nOut(30).activation(Activation.RELU) - .weightInit(WeightInit.XAVIER).build()) - .layer(2, new DenseLayer.Builder().nIn(30).nOut(nOut).activation(Activation.RELU) - .weightInit(WeightInit.XAVIER).build()) - .build(); - MultiLayerNetwork net = new MultiLayerNetwork(conf); - net.init(); - - Nd4j.getRandom().setSeed(12345); - INDArray eps = Nd4j.rand(miniBatch, nOut); - INDArray input = Nd4j.rand(miniBatch, nIn); - - net.setInput(input); - net.feedForward(true, false); //Need to feed forward before backprop - - Pair pair = net.backpropGradient(eps, LayerWorkspaceMgr.noWorkspaces()); - INDArray epsOut = pair.getSecond(); - assertNotNull(epsOut); - assertArrayEquals(new long[] {miniBatch, nIn}, epsOut.shape()); - - Gradient g = pair.getFirst(); - Map gradMap = g.gradientForVariable(); - assertEquals(6, gradMap.size()); //3 layers, weight + bias gradients for each - - String[] expKeys = {"0_" + DefaultParamInitializer.WEIGHT_KEY, "0_" + DefaultParamInitializer.BIAS_KEY, - "1_" + DefaultParamInitializer.WEIGHT_KEY, "2_" + DefaultParamInitializer.BIAS_KEY, - "2_" + DefaultParamInitializer.WEIGHT_KEY, "2_" + DefaultParamInitializer.BIAS_KEY}; - Set keys = gradMap.keySet(); - for (String s : expKeys) { - assertTrue(keys.contains(s)); - } - /* System.out.println(pair); @@ -443,1092 +476,1114 @@ public class MultiLayerTest extends BaseDL4JTest { net.setParams(params); //params() may not be in-place System.out.println(Arrays.toString(params.get(NDArrayIndex.all(), NDArrayIndex.interval(0, 10)).dup().data().asFloat())); */ + } + + @Test + public void testLayerNames() { + int nIn = 10; + int nOut = 40; + + List layerNameList = new ArrayList<>(); + layerNameList.add("dnn1"); + layerNameList.add("dnn2"); + layerNameList.add("dnn3"); + + NeuralNetConfiguration conf = NeuralNetConfiguration.builder() + .updater(new Sgd(0.1)).list() + .layer(0, + new DenseLayer.Builder().name("dnn1").nIn(nIn).nOut(20).activation(Activation.RELU) + .weightInit(WeightInit.XAVIER).build()) + .layer(1, new DenseLayer.Builder().name("dnn2").nIn(20).nOut(30).activation(Activation.RELU) + .weightInit(WeightInit.XAVIER).build()) + .layer(2, new DenseLayer.Builder().name("dnn3").nIn(30).nOut(nOut) + .activation(Activation.SOFTMAX).weightInit(WeightInit.XAVIER).build()) + .build(); + MultiLayerNetwork net = new MultiLayerNetwork(conf); + net.init(); + + assertEquals(layerNameList.get(0), net.getLayer(0).getLayerConfiguration().getLayerName()); + assertEquals(layerNameList, net.getLayerNames()); + BaseLayerConfiguration b = (BaseLayerConfiguration) net.getLayer(layerNameList.get(2)).getLayerConfiguration(); + assertEquals("softmax", b.getActivationFn().toString()); + } + + + @Test + public void testScoreExamples() { + Nd4j.getRandom().setSeed(12345); + int nIn = 5; + int nOut = 6; + NeuralNetConfiguration conf = NeuralNetConfiguration.builder().seed(12345).l1(0.01) + .l2(0.01).updater(new Sgd(0.1)).activation(Activation.TANH).weightInit(WeightInit.XAVIER) + .list() + .layer(0, new DenseLayer.Builder().nIn(nIn).nOut(20).build()) + .layer(1, new DenseLayer.Builder().nIn(20).nOut(30).build()) + .layer(2, new OutputLayer.Builder() + .lossFunction(LossFunctions.LossFunction.MSE).nIn(30).nOut(nOut).build()) + .build(); + + NeuralNetConfiguration confNoReg = NeuralNetConfiguration.builder().seed(12345) + .updater(new Sgd(0.1)).activation(Activation.TANH).weightInit(WeightInit.XAVIER).list() + .layer(0, new DenseLayer.Builder().nIn(nIn).nOut(20).build()) + .layer(1, new DenseLayer.Builder().nIn(20).nOut(30).build()) + .layer(2, new OutputLayer.Builder() + .lossFunction(LossFunctions.LossFunction.MSE).nIn(30).nOut(nOut).build()) + .build(); + + MultiLayerNetwork net = new MultiLayerNetwork(conf); + net.init(); + + MultiLayerNetwork netNoReg = new MultiLayerNetwork(confNoReg); + netNoReg.init(); + netNoReg.setParameters(net.getModelParams().dup()); + + //Score single example, and compare to scoreExamples: + INDArray input = Nd4j.rand(3, nIn); + INDArray output = Nd4j.rand(3, nOut); + DataSet ds = new DataSet(input, output); + + INDArray scoresWithRegularization = net.scoreExamples(ds, true); + INDArray scoresNoRegularization = net.scoreExamples(ds, false); + + assertArrayEquals(new long[]{3, 1}, scoresWithRegularization.shape()); + assertArrayEquals(new long[]{3, 1}, scoresNoRegularization.shape()); + + for (int i = 0; i < 3; i++) { + DataSet singleEx = new DataSet(input.getRow(i, true), output.getRow(i, true)); + double score = net.score(singleEx); + double scoreNoReg = netNoReg.score(singleEx); + + double scoreUsingScoreExamples = scoresWithRegularization.getDouble(i); + double scoreUsingScoreExamplesNoReg = scoresNoRegularization.getDouble(i); + assertEquals(score, scoreUsingScoreExamples, 1e-4); + assertEquals(scoreNoReg, scoreUsingScoreExamplesNoReg, 1e-4); + assertTrue(scoreUsingScoreExamples + > scoreUsingScoreExamplesNoReg); //Regularization term increases score + + // System.out.println(score + "\t" + scoreUsingScoreExamples + "\t|\t" + scoreNoReg + "\t" + scoreUsingScoreExamplesNoReg); + } + } + + @Test + public void testDataSetScore() { + + Nd4j.getRandom().setSeed(12345); + NeuralNetConfiguration conf = NeuralNetConfiguration.builder() + .weightInit(WeightInit.XAVIER).seed(12345L).list() + .layer(0, new DenseLayer.Builder().nIn(4).nOut(3).activation(Activation.SIGMOID).build()) + .layer(1, new OutputLayer.Builder(LossFunctions.LossFunction.MCXENT) + .activation(Activation.SOFTMAX).nIn(3).nOut(3).build()) + .build(); + + MultiLayerNetwork net = new MultiLayerNetwork(conf); + net.init(); + + INDArray in = Nd4j.create(new double[]{1.0, 2.0, 3.0, 4.0}, 1, 4); + INDArray out = Nd4j.create(new double[]{1, 0, 0}, 1, 3); + + double score = net.score(new DataSet(in, out)); + } + + @Test + public void testDataSetScoreCNN() { + + int miniBatch = 3; + int depth = 2; + int width = 3; + int height = 3; + int nOut = 2; + + NeuralNetConfiguration conf = NeuralNetConfiguration.builder() + .seed(12345L).list().layer(0, new ConvolutionLayer.Builder(2, 2).nOut(1).build()) + .layer(1, new OutputLayer.Builder(LossFunctions.LossFunction.MCXENT) + .activation(Activation.SOFTMAX).nOut(2).build()) + .inputType(InputType.convolutionalFlat(height, width, depth)) + .build(); + + MultiLayerNetwork net = new MultiLayerNetwork(conf); + net.init(); + + Nd4j.getRandom().setSeed(12345); + Random r = new Random(12345); + INDArray input = Nd4j.rand(miniBatch, depth * width * height); + INDArray labels = Nd4j.create(miniBatch, nOut); + for (int i = 0; i < miniBatch; i++) { + labels.putScalar(new int[]{i, r.nextInt(nOut)}, 1.0); } - @Test - public void testLayerNames() { - int nIn = 10; - int nOut = 40; + double score = net.score(new DataSet(input, labels)); + } - List layerNameList = new ArrayList<>(); - layerNameList.add("dnn1"); - layerNameList.add("dnn2"); - layerNameList.add("dnn3"); + @Test + public void testPredict() throws Exception { - MultiLayerConfiguration conf = new NeuralNetConfiguration.Builder() - .updater(new Sgd(0.1)).list() - .layer(0, new DenseLayer.Builder().name("dnn1").nIn(nIn).nOut(20).activation(Activation.RELU) - .weightInit(WeightInit.XAVIER).build()) - .layer(1, new DenseLayer.Builder().name("dnn2").nIn(20).nOut(30).activation(Activation.RELU) - .weightInit(WeightInit.XAVIER).build()) - .layer(2, new DenseLayer.Builder().name("dnn3").nIn(30).nOut(nOut) - .activation(Activation.SOFTMAX).weightInit(WeightInit.XAVIER).build()) - .build(); - MultiLayerNetwork net = new MultiLayerNetwork(conf); - net.init(); + Nd4j.getRandom().setSeed(12345); + NeuralNetConfiguration conf = NeuralNetConfiguration.builder() + .weightInit(WeightInit.XAVIER).seed(12345L).list() + .layer(0, new DenseLayer.Builder().nIn(784).nOut(50).activation(Activation.RELU).build()) + .layer(1, new OutputLayer.Builder(LossFunctions.LossFunction.MCXENT) + .activation(Activation.SOFTMAX).nIn(50).nOut(10).build()) + .inputType(InputType.convolutional(28, 28, 1)).build(); - assertEquals(layerNameList.get(0), net.getLayer(0).conf().getLayer().getLayerName()); - assertEquals(layerNameList, net.getLayerNames()); - BaseLayer b = (BaseLayer) net.getLayer(layerNameList.get(2)).conf().getLayer(); - assertEquals("softmax", b.getActivationFn().toString()); + MultiLayerNetwork net = new MultiLayerNetwork(conf); + net.init(); + + DataSetIterator ds = new MnistDataSetIterator(10, 10); + net.fit(ds); + + DataSetIterator testDs = new MnistDataSetIterator(1, 1); + DataSet testData = testDs.next(); + testData.setLabelNames(Arrays.asList("0", "1", "2", "3", "4", "5", "6", "7", "8", "9")); + String actualLables = testData.getLabelName(0); + List prediction = net.predict(testData); + assertNotNull(actualLables); + assertNotNull(prediction.get(0)); + } + + @Test + //@Ignore + public void testCid() throws Exception { + System.out.println(EnvironmentUtils.buildCId()); + + Environment environment = EnvironmentUtils.buildEnvironment(); + environment.setSerialVersionID(EnvironmentUtils.buildCId()); + + Task task = TaskUtils.buildTask(Nd4j.create(new double[]{1, 2, 3, 4, 5, 6}, 1, 6)); + + Heartbeat.getInstance().reportEvent(Event.STANDALONE, environment, task); + + Thread.sleep(25000); + } + + @Test + public void testOutput() throws Exception { + Nd4j.getRandom().setSeed(12345); + NeuralNetConfiguration conf = NeuralNetConfiguration.builder() + .weightInit(WeightInit.XAVIER).seed(12345L).list() + .layer(0, new DenseLayer.Builder().nIn(784).nOut(50).activation(Activation.RELU).build()) + .layer(1, new OutputLayer.Builder(LossFunctions.LossFunction.MCXENT) + .activation(Activation.SOFTMAX).nIn(50).nOut(10).build()) + .inputType(InputType.convolutional(28, 28, 1)).build(); + + MultiLayerNetwork net = new MultiLayerNetwork(conf); + net.init(); + + DataSetIterator fullData = new MnistDataSetIterator(1, 2); + net.fit(fullData); + + fullData.reset(); + DataSet expectedSet = fullData.next(2); + INDArray expectedOut = net.output(expectedSet.getFeatures(), false); + + fullData.reset(); + + INDArray actualOut = net.output(fullData); + + assertEquals(expectedOut, actualOut); + } + + @Test + public void testGradientUpdate() throws Exception { + DataSetIterator iter = new IrisDataSetIterator(1, 1); + + Gradient expectedGradient = new DefaultGradient(); + expectedGradient.setGradientFor("0_W", Nd4j.ones(4, 5)); + expectedGradient.setGradientFor("0_b", Nd4j.ones(1, 5)); + expectedGradient.setGradientFor("1_W", Nd4j.ones(5, 3)); + expectedGradient.setGradientFor("1_b", Nd4j.ones(1, 3)); + + NeuralNetConfiguration conf = + NeuralNetConfiguration.builder().updater(new Sgd(1.0)) + .activation(Activation.RELU).weightInit(WeightInit.XAVIER) + .list().layer(0, new DenseLayer.Builder().name("dnn1").nIn(4).nOut(5).build()) + .layer(1, new OutputLayer.Builder().name("output").nIn(5).nOut(3) + .activation(Activation.SOFTMAX).weightInit(WeightInit.XAVIER) + .build()) + .build(); + MultiLayerNetwork net = new MultiLayerNetwork(conf); + net.init(); + net.fit(iter.next()); + // TODO validate actual layer gradientView - issue getting var out of BaseLayerConfiguration w/o adding MLN getter that gets confused with local gradient vars + Gradient actualGradient = net.gradient; + assertNotEquals(expectedGradient.getGradientFor("0_W"), actualGradient.getGradientFor("0_W")); + + net.update(expectedGradient); + actualGradient = net.gradient; + assertEquals(expectedGradient.getGradientFor("0_W"), actualGradient.getGradientFor("0_W")); + + // Update params with set + net.setParam("0_W", Nd4j.ones(4, 5)); + net.setParam("0_b", Nd4j.ones(1, 5)); + net.setParam("1_W", Nd4j.ones(5, 3)); + net.setParam("1_b", Nd4j.ones(1, 3)); + INDArray actualParams = net.getModelParams(); + + // Confirm params + assertEquals(expectedGradient.gradient(), actualParams); + + net.update(expectedGradient); + actualParams = net.getModelParams(); + assertEquals(Nd4j.ones(1, 43).addi(1), actualParams); + } + + + @Test + public void testCnnInvalidData() { + assertThrows(DL4JException.class, () -> { + int miniBatch = 3; + int depth = 2; + int width = 5; + int height = 5; + + NeuralNetConfiguration conf = NeuralNetConfiguration.builder().list() + .layer(0, + new ConvolutionLayer.Builder().kernelSize(2, 2).stride(1, 1).padding(0, 0).nIn(2) + .nOut(2).build()) + .layer(1, new OutputLayer.Builder(LossFunctions.LossFunction.MCXENT) + .activation(Activation.SOFTMAX).nOut(2).build()) + .inputType(InputType.convolutional(height, width, depth)) + .build(); + + MultiLayerNetwork net = new MultiLayerNetwork(conf); + net.init(); + + INDArray inputWrongDepth = Nd4j.rand(miniBatch, 5, height, + width); //Order: examples, channels, height, width + net.feedForward(inputWrongDepth); + }); + } + + @Test + public void testApplyingPreTrainConfigAndParams() { + int nIn = 10; + int nOut = 10; + + // Test pretrain true + MultiLayerNetwork aePre = getAeModel(true, nIn, nOut); + int actualNP = (int) aePre.numParams(); + assertEquals(2 * (nIn * nOut + nOut) + nIn, actualNP); + INDArray params = aePre.getModelParams(); + assertEquals(params.length(), actualNP); // check num params + Map paramTable = aePre.getParamTable(); + assertTrue(paramTable.containsKey("0_vb")); // check vb exists for pretrain layer + aePre.setParam("0_vb", Nd4j.ones(10)); + params = aePre.getParam("0_vb"); + assertEquals(Nd4j.ones(1, 10), params); // check set params for vb + + // Test pretrain false, expect same for true because its not changed when applying update + MultiLayerNetwork aeNoPre = getAeModel(false, nIn, nOut); + actualNP = (int) aeNoPre.numParams(); + assertEquals(2 * (nIn * nOut + nOut) + nIn, actualNP); + params = aeNoPre.getModelParams(); + assertEquals(params.length(), actualNP); + paramTable = aePre.getParamTable(); + assertTrue(paramTable.containsKey("0_vb")); + } + + public MultiLayerNetwork getAeModel(boolean preTrain, int nIn, int nOut) { + NeuralNetConfiguration vae = NeuralNetConfiguration.builder() + .seed(42).updater(new NoOp()) + .weightInit(WeightInit.UNIFORM) + .layer(new AutoEncoder.Builder() + .activation(Activation.IDENTITY).nOut(nIn).build()) + .layer( + new org.deeplearning4j.nn.conf.layers.OutputLayer.Builder( + LossFunctions.LossFunction.COSINE_PROXIMITY) + .activation(Activation.IDENTITY).nOut(nOut) + .build()) + + .inputType(InputType.feedForward(nOut)).build(); + MultiLayerNetwork network = new MultiLayerNetwork(vae); + network.init(); + return network; + } + + + @Test + public void testIterationCountAndPersistence() throws IOException { + Nd4j.getRandom().setSeed(123); + NeuralNetConfiguration conf = NeuralNetConfiguration.builder() + .optimizationAlgo(OptimizationAlgorithm.STOCHASTIC_GRADIENT_DESCENT).seed(123) + .list() + .layer(0, new DenseLayer.Builder().nIn(4).nOut(3).weightInit(WeightInit.XAVIER) + .activation(Activation.TANH).build()) + .layer(1, new org.deeplearning4j.nn.conf.layers.OutputLayer.Builder( + LossFunctions.LossFunction.MCXENT).activation(Activation.SOFTMAX).nIn(3).nOut(3) + .build()) + .build(); + + MultiLayerNetwork network = new MultiLayerNetwork(conf); + network.init(); + + DataSetIterator iter = new IrisDataSetIterator(50, 150); + + assertEquals(0, network.getNetConfiguration().getIterationCount()); + network.fit(iter); + assertEquals(3, network.getNetConfiguration().getIterationCount()); + iter.reset(); + network.fit(iter); + assertEquals(6, network.getNetConfiguration().getIterationCount()); + iter.reset(); + network.fit(iter.next()); + assertEquals(7, network.getNetConfiguration().getIterationCount()); + + ByteArrayOutputStream baos = new ByteArrayOutputStream(); + ModelSerializer.writeModel(network, baos, true); + byte[] asBytes = baos.toByteArray(); + + ByteArrayInputStream bais = new ByteArrayInputStream(asBytes); + MultiLayerNetwork net = ModelSerializer.restoreMultiLayerNetwork(bais, true); + assertEquals(7, net.getNetConfiguration().getIterationCount()); + } + + + @Test + public void testBiasL1L2() { + + Nd4j.getRandom().setSeed(123); + NeuralNetConfiguration conf1 = NeuralNetConfiguration.builder() + .optimizationAlgo(OptimizationAlgorithm.STOCHASTIC_GRADIENT_DESCENT) + .weightInit(WeightInit.XAVIER).activation(Activation.TANH).seed(123).list() + .layer(0, new DenseLayer.Builder().nIn(10).nOut(10).build()) + .layer(1, new org.deeplearning4j.nn.conf.layers.OutputLayer.Builder( + LossFunctions.LossFunction.MSE).activation(Activation.IDENTITY).nIn(10).nOut(10) + .build()) + .build(); + + NeuralNetConfiguration conf2 = NeuralNetConfiguration.builder() + .optimizationAlgo(OptimizationAlgorithm.STOCHASTIC_GRADIENT_DESCENT) + .l1Bias(0.1).l2Bias(0.2).weightInit(WeightInit.XAVIER).activation(Activation.TANH) + .seed(123).list().layer(0, new DenseLayer.Builder().nIn(10).nOut(10).build()) + .layer(1, new org.deeplearning4j.nn.conf.layers.OutputLayer.Builder( + LossFunctions.LossFunction.MSE).activation(Activation.IDENTITY).nIn(10).nOut(10) + .build()) + .build(); + + MultiLayerNetwork net1 = new MultiLayerNetwork(conf1); + net1.init(); + + MultiLayerNetwork net2 = new MultiLayerNetwork(conf2); + net2.init(); + + BaseLayerConfiguration bl0 = (BaseLayerConfiguration) net2.getLayer(0).getLayerConfiguration(); + assertEquals(0.1, TestUtils.getL1(bl0.getRegularizationBias()), 1e-6); + assertEquals(0.2, TestUtils.getL2(bl0.getRegularizationBias()), 1e-6); + + INDArray features = Nd4j.rand(10, 10); + INDArray labels = Nd4j.rand(10, 10); + + net2.setParams(net1.getModelParams().dup()); + + net1.setInput(features); + net1.setLabels(labels); + net2.setInput(features); + net2.setLabels(labels); + + net1.computeGradientAndScore(); + net2.computeGradientAndScore(); + + double r = net1.calcRegularizationScore(true); + assertEquals(0.0, r, 0.0); + + r = net2.calcRegularizationScore(true); + assertEquals(0.0, r, 0.0); + + double s1 = net1.getScore(); + double s2 = net2.getScore(); + assertEquals(s1, s2, 1e-6); //Biases initialized to 0 -> should initially have same score + + for (int i = 0; i < 10; i++) { + net1.fit(features, labels); } + net2.setParams(net1.getModelParams().dup()); + net1.computeGradientAndScore(); + net2.computeGradientAndScore(); - @Test - public void testScoreExamples() { - Nd4j.getRandom().setSeed(12345); - int nIn = 5; - int nOut = 6; - MultiLayerConfiguration conf = new NeuralNetConfiguration.Builder().seed(12345).l1(0.01) - .l2(0.01).updater(new Sgd(0.1)).activation(Activation.TANH).weightInit(WeightInit.XAVIER).list() - .layer(0, new DenseLayer.Builder().nIn(nIn).nOut(20).build()) - .layer(1, new DenseLayer.Builder().nIn(20).nOut(30).build()).layer(2, new OutputLayer.Builder() - .lossFunction(LossFunctions.LossFunction.MSE).nIn(30).nOut(nOut).build()) - .build(); + r = net1.calcRegularizationScore(true); + assertEquals(0.0, r, 0.0); - MultiLayerConfiguration confNoReg = new NeuralNetConfiguration.Builder().seed(12345) - .updater(new Sgd(0.1)).activation(Activation.TANH).weightInit(WeightInit.XAVIER).list() - .layer(0, new DenseLayer.Builder().nIn(nIn).nOut(20).build()) - .layer(1, new DenseLayer.Builder().nIn(20).nOut(30).build()).layer(2, new OutputLayer.Builder() - .lossFunction(LossFunctions.LossFunction.MSE).nIn(30).nOut(nOut).build()) - .build(); + r = net2.calcRegularizationScore(true); + assertTrue(r > 0.0); + s1 = net1.getScore(); + s2 = net2.getScore(); - MultiLayerNetwork net = new MultiLayerNetwork(conf); - net.init(); + assertNotEquals(s1, s2, 1e-6); //Scores should differ due to bias l1/l2 - MultiLayerNetwork netNoReg = new MultiLayerNetwork(confNoReg); - netNoReg.init(); - netNoReg.setParameters(net.params().dup()); - - //Score single example, and compare to scoreExamples: - INDArray input = Nd4j.rand(3, nIn); - INDArray output = Nd4j.rand(3, nOut); - DataSet ds = new DataSet(input, output); - - INDArray scoresWithRegularization = net.scoreExamples(ds, true); - INDArray scoresNoRegularization = net.scoreExamples(ds, false); - - assertArrayEquals(new long[] {3, 1}, scoresWithRegularization.shape()); - assertArrayEquals(new long[] {3, 1}, scoresNoRegularization.shape()); - - for (int i = 0; i < 3; i++) { - DataSet singleEx = new DataSet(input.getRow(i,true), output.getRow(i,true)); - double score = net.score(singleEx); - double scoreNoReg = netNoReg.score(singleEx); - - double scoreUsingScoreExamples = scoresWithRegularization.getDouble(i); - double scoreUsingScoreExamplesNoReg = scoresNoRegularization.getDouble(i); - assertEquals(score, scoreUsingScoreExamples, 1e-4); - assertEquals(scoreNoReg, scoreUsingScoreExamplesNoReg, 1e-4); - assertTrue(scoreUsingScoreExamples > scoreUsingScoreExamplesNoReg); //Regularization term increases score - - // System.out.println(score + "\t" + scoreUsingScoreExamples + "\t|\t" + scoreNoReg + "\t" + scoreUsingScoreExamplesNoReg); - } + for (int i = 0; i < 2; i++) { + assertEquals(0.0, net1.getLayer(i).calcRegularizationScore(true), 0.0); + assertTrue(net2.getLayer(i).calcRegularizationScore(true) > 0.0); } - - @Test - public void testDataSetScore() { - - Nd4j.getRandom().setSeed(12345); - MultiLayerConfiguration conf = new NeuralNetConfiguration.Builder() - .weightInit(WeightInit.XAVIER).seed(12345L).list() - .layer(0, new DenseLayer.Builder().nIn(4).nOut(3).activation(Activation.SIGMOID).build()) - .layer(1, new OutputLayer.Builder(LossFunctions.LossFunction.MCXENT) - .activation(Activation.SOFTMAX).nIn(3).nOut(3).build()) - .build(); - - MultiLayerNetwork net = new MultiLayerNetwork(conf); - net.init(); - - INDArray in = Nd4j.create(new double[] {1.0, 2.0, 3.0, 4.0}, 1, 4); - INDArray out = Nd4j.create(new double[] {1, 0, 0}, 1,3); - - double score = net.score(new DataSet(in, out)); - } - - @Test - public void testDataSetScoreCNN() { - - int miniBatch = 3; - int depth = 2; - int width = 3; - int height = 3; - int nOut = 2; - - MultiLayerConfiguration conf = new NeuralNetConfiguration.Builder() - .seed(12345L).list().layer(0, new ConvolutionLayer.Builder(2, 2).nOut(1).build()) - .layer(1, new OutputLayer.Builder(LossFunctions.LossFunction.MCXENT) - .activation(Activation.SOFTMAX).nOut(2).build()) - .setInputType(InputType.convolutionalFlat(height, width, depth)) - .build(); - - MultiLayerNetwork net = new MultiLayerNetwork(conf); - net.init(); - - Nd4j.getRandom().setSeed(12345); - Random r = new Random(12345); - INDArray input = Nd4j.rand(miniBatch, depth * width * height); - INDArray labels = Nd4j.create(miniBatch, nOut); - for (int i = 0; i < miniBatch; i++) { - labels.putScalar(new int[] {i, r.nextInt(nOut)}, 1.0); - } - - double score = net.score(new DataSet(input, labels)); - } - - @Test - public void testPredict() throws Exception { - - Nd4j.getRandom().setSeed(12345); - MultiLayerConfiguration conf = new NeuralNetConfiguration.Builder() - .weightInit(WeightInit.XAVIER).seed(12345L).list() - .layer(0, new DenseLayer.Builder().nIn(784).nOut(50).activation(Activation.RELU).build()) - .layer(1, new OutputLayer.Builder(LossFunctions.LossFunction.MCXENT) - .activation(Activation.SOFTMAX).nIn(50).nOut(10).build()) - .setInputType(InputType.convolutional(28, 28, 1)).build(); - - MultiLayerNetwork net = new MultiLayerNetwork(conf); - net.init(); - - DataSetIterator ds = new MnistDataSetIterator(10, 10); - net.fit(ds); - - DataSetIterator testDs = new MnistDataSetIterator(1, 1); - DataSet testData = testDs.next(); - testData.setLabelNames(Arrays.asList("0", "1", "2", "3", "4", "5", "6", "7", "8", "9")); - String actualLables = testData.getLabelName(0); - List prediction = net.predict(testData); - assertNotNull(actualLables); - assertNotNull(prediction.get(0)); - } - - @Test - //@Ignore - public void testCid() throws Exception { - System.out.println(EnvironmentUtils.buildCId()); - - Environment environment = EnvironmentUtils.buildEnvironment(); - environment.setSerialVersionID(EnvironmentUtils.buildCId()); - - Task task = TaskUtils.buildTask(Nd4j.create(new double[] {1, 2, 3, 4, 5, 6}, 1,6)); - - Heartbeat.getInstance().reportEvent(Event.STANDALONE, environment, task); - - Thread.sleep(25000); - } - - @Test - public void testOutput() throws Exception { - Nd4j.getRandom().setSeed(12345); - MultiLayerConfiguration conf = new NeuralNetConfiguration.Builder() - .weightInit(WeightInit.XAVIER).seed(12345L).list() - .layer(0, new DenseLayer.Builder().nIn(784).nOut(50).activation(Activation.RELU).build()) - .layer(1, new OutputLayer.Builder(LossFunctions.LossFunction.MCXENT) - .activation(Activation.SOFTMAX).nIn(50).nOut(10).build()) - .setInputType(InputType.convolutional(28, 28, 1)).build(); - - MultiLayerNetwork net = new MultiLayerNetwork(conf); - net.init(); - - DataSetIterator fullData = new MnistDataSetIterator(1, 2); - net.fit(fullData); - - - fullData.reset(); - DataSet expectedSet = fullData.next(2); - INDArray expectedOut = net.output(expectedSet.getFeatures(), false); - - fullData.reset(); - - INDArray actualOut = net.output(fullData); - - assertEquals(expectedOut, actualOut); - } - - @Test - public void testGradientUpdate() throws Exception { - DataSetIterator iter = new IrisDataSetIterator(1, 1); - - Gradient expectedGradient = new DefaultGradient(); - expectedGradient.setGradientFor("0_W", Nd4j.ones(4, 5)); - expectedGradient.setGradientFor("0_b", Nd4j.ones(1, 5)); - expectedGradient.setGradientFor("1_W", Nd4j.ones(5, 3)); - expectedGradient.setGradientFor("1_b", Nd4j.ones(1, 3)); - - MultiLayerConfiguration conf = - new NeuralNetConfiguration.Builder().updater(new Sgd(1.0)) - .activation(Activation.RELU).weightInit(WeightInit.XAVIER) - .list().layer(0, new DenseLayer.Builder().name("dnn1").nIn(4).nOut(5).build()) - .layer(1, new OutputLayer.Builder().name("output").nIn(5).nOut(3) - .activation(Activation.SOFTMAX).weightInit(WeightInit.XAVIER) - .build()) - .build(); - MultiLayerNetwork net = new MultiLayerNetwork(conf); - net.init(); - net.fit(iter.next()); - // TODO validate actual layer gradientView - issue getting var out of BaseLayer w/o adding MLN getter that gets confused with local gradient vars - Gradient actualGradient = net.gradient; - assertNotEquals(expectedGradient.getGradientFor("0_W"), actualGradient.getGradientFor("0_W")); - - net.update(expectedGradient); - actualGradient = net.gradient; - assertEquals(expectedGradient.getGradientFor("0_W"), actualGradient.getGradientFor("0_W")); - - // Update params with set - net.setParam("0_W", Nd4j.ones(4, 5)); - net.setParam("0_b", Nd4j.ones(1, 5)); - net.setParam("1_W", Nd4j.ones(5, 3)); - net.setParam("1_b", Nd4j.ones(1, 3)); - INDArray actualParams = net.params(); - - // Confirm params - assertEquals(expectedGradient.gradient(), actualParams); - - net.update(expectedGradient); - actualParams = net.params(); - assertEquals(Nd4j.ones(1, 43).addi(1), actualParams); - } - - - @Test - public void testCnnInvalidData() { - assertThrows(DL4JException.class, () -> { - int miniBatch = 3; - int depth = 2; - int width = 5; - int height = 5; - - MultiLayerConfiguration conf = new NeuralNetConfiguration.Builder().list() - .layer(0, new ConvolutionLayer.Builder().kernelSize(2, 2).stride(1, 1).padding(0, 0).nIn(2) - .nOut(2).build()) - .layer(1, new OutputLayer.Builder(LossFunctions.LossFunction.MCXENT) - .activation(Activation.SOFTMAX).nOut(2).build()) - .setInputType(InputType.convolutional(height, width, depth)) - .build(); - - MultiLayerNetwork net = new MultiLayerNetwork(conf); - net.init(); - - INDArray inputWrongDepth = Nd4j.rand(miniBatch, 5, height, width); //Order: examples, channels, height, width - net.feedForward(inputWrongDepth); - }); - } - - @Test - public void testApplyingPreTrainConfigAndParams() { - int nIn = 10; - int nOut = 10; - - // Test pretrain true - MultiLayerNetwork aePre = getAeModel(true, nIn, nOut); - int actualNP = (int)aePre.numParams(); - assertEquals(2 * (nIn * nOut + nOut) + nIn, actualNP); - INDArray params = aePre.params(); - assertEquals(params.length(), actualNP); // check num params - Map paramTable = aePre.paramTable(); - assertTrue(paramTable.containsKey("0_vb")); // check vb exists for pretrain layer - aePre.setParam("0_vb", Nd4j.ones(10)); - params = aePre.getParam("0_vb"); - assertEquals(Nd4j.ones(1,10), params); // check set params for vb - - - // Test pretrain false, expect same for true because its not changed when applying update - MultiLayerNetwork aeNoPre = getAeModel(false, nIn, nOut); - actualNP = (int)aeNoPre.numParams(); - assertEquals(2 * (nIn * nOut + nOut) + nIn, actualNP); - params = aeNoPre.params(); - assertEquals(params.length(), actualNP); - paramTable = aePre.paramTable(); - assertTrue(paramTable.containsKey("0_vb")); - } - - public MultiLayerNetwork getAeModel(boolean preTrain, int nIn, int nOut) { - MultiLayerConfiguration vae = new NeuralNetConfiguration.Builder() - .seed(42).updater(new NoOp()) - .weightInit(WeightInit.UNIFORM) - .list(new AutoEncoder.Builder() - .activation(Activation.IDENTITY).nOut(nIn).build(), - new org.deeplearning4j.nn.conf.layers.OutputLayer.Builder( - LossFunctions.LossFunction.COSINE_PROXIMITY) - .activation(Activation.IDENTITY).nOut(nOut) - .build()) - .setInputType(InputType.feedForward(nOut)).build(); - MultiLayerNetwork network = new MultiLayerNetwork(vae); - network.init(); - return network; - } - - - @Test - public void testIterationCountAndPersistence() throws IOException { - Nd4j.getRandom().setSeed(123); - MultiLayerConfiguration conf = new NeuralNetConfiguration.Builder() - .optimizationAlgo(OptimizationAlgorithm.STOCHASTIC_GRADIENT_DESCENT).seed(123) - .list() - .layer(0, new DenseLayer.Builder().nIn(4).nOut(3).weightInit(WeightInit.XAVIER) - .activation(Activation.TANH).build()) - .layer(1, new org.deeplearning4j.nn.conf.layers.OutputLayer.Builder( - LossFunctions.LossFunction.MCXENT).activation(Activation.SOFTMAX).nIn(3).nOut(3) - .build()) - .build(); - - - MultiLayerNetwork network = new MultiLayerNetwork(conf); - network.init(); - - DataSetIterator iter = new IrisDataSetIterator(50, 150); - - assertEquals(0, network.getLayerWiseConfigurations().getIterationCount()); - network.fit(iter); - assertEquals(3, network.getLayerWiseConfigurations().getIterationCount()); - iter.reset(); - network.fit(iter); - assertEquals(6, network.getLayerWiseConfigurations().getIterationCount()); - iter.reset(); - network.fit(iter.next()); - assertEquals(7, network.getLayerWiseConfigurations().getIterationCount()); - - ByteArrayOutputStream baos = new ByteArrayOutputStream(); - ModelSerializer.writeModel(network, baos, true); - byte[] asBytes = baos.toByteArray(); - - ByteArrayInputStream bais = new ByteArrayInputStream(asBytes); - MultiLayerNetwork net = ModelSerializer.restoreMultiLayerNetwork(bais, true); - assertEquals(7, net.getLayerWiseConfigurations().getIterationCount()); - } - - - @Test - public void testBiasL1L2() { - - - Nd4j.getRandom().setSeed(123); - MultiLayerConfiguration conf1 = new NeuralNetConfiguration.Builder() - .optimizationAlgo(OptimizationAlgorithm.STOCHASTIC_GRADIENT_DESCENT) - .weightInit(WeightInit.XAVIER).activation(Activation.TANH).seed(123).list() - .layer(0, new DenseLayer.Builder().nIn(10).nOut(10).build()) - .layer(1, new org.deeplearning4j.nn.conf.layers.OutputLayer.Builder( - LossFunctions.LossFunction.MSE).activation(Activation.IDENTITY).nIn(10).nOut(10) - .build()) - .build(); - - MultiLayerConfiguration conf2 = new NeuralNetConfiguration.Builder() - .optimizationAlgo(OptimizationAlgorithm.STOCHASTIC_GRADIENT_DESCENT) - .l1Bias(0.1).l2Bias(0.2).weightInit(WeightInit.XAVIER).activation(Activation.TANH) - .seed(123).list().layer(0, new DenseLayer.Builder().nIn(10).nOut(10).build()) - .layer(1, new org.deeplearning4j.nn.conf.layers.OutputLayer.Builder( - LossFunctions.LossFunction.MSE).activation(Activation.IDENTITY).nIn(10).nOut(10) - .build()) - .build(); - - MultiLayerNetwork net1 = new MultiLayerNetwork(conf1); - net1.init(); - - MultiLayerNetwork net2 = new MultiLayerNetwork(conf2); - net2.init(); - - BaseLayer bl0 = (BaseLayer) net2.getLayer(0).conf().getLayer(); - assertEquals(0.1, TestUtils.getL1(bl0.getRegularizationBias()), 1e-6); - assertEquals(0.2, TestUtils.getL2(bl0.getRegularizationBias()), 1e-6); - - INDArray features = Nd4j.rand(10, 10); - INDArray labels = Nd4j.rand(10, 10); - - net2.setParams(net1.params().dup()); - - net1.setInput(features); - net1.setLabels(labels); - net2.setInput(features); - net2.setLabels(labels); - - net1.computeGradientAndScore(); - net2.computeGradientAndScore(); - - double r = net1.calcRegularizationScore(true); - assertEquals(0.0, r, 0.0); - - r = net2.calcRegularizationScore(true); - assertEquals(0.0, r, 0.0); - - - double s1 = net1.score(); - double s2 = net2.score(); - assertEquals(s1, s2, 1e-6); //Biases initialized to 0 -> should initially have same score - - for (int i = 0; i < 10; i++) { - net1.fit(features, labels); - } - - net2.setParams(net1.params().dup()); - net1.computeGradientAndScore(); - net2.computeGradientAndScore(); - - r = net1.calcRegularizationScore(true); - assertEquals(0.0, r, 0.0); - - r = net2.calcRegularizationScore(true); - assertTrue(r > 0.0); - - s1 = net1.score(); - s2 = net2.score(); - - assertNotEquals(s1, s2, 1e-6); //Scores should differ due to bias l1/l2 - - for (int i = 0; i < 2; i++) { - assertEquals(0.0, net1.getLayer(i).calcRegularizationScore(true), 0.0); - assertTrue(net2.getLayer(i).calcRegularizationScore(true) > 0.0); - } - } - - /* - Summary should pick up preprocessors set manually on inputs as well - */ - @Test - public void testSummary() { - int V_WIDTH = 130; - int V_HEIGHT = 130; - int V_NFRAMES = 150; - MultiLayerConfiguration confForArchitecture = - new NeuralNetConfiguration.Builder().seed(12345).l2(0.001) //l2 regularization on all layers - .optimizationAlgo(OptimizationAlgorithm.STOCHASTIC_GRADIENT_DESCENT) - .list() - .layer(0, new ConvolutionLayer.Builder(10, 10).nIn(3) //3 channels: RGB - .nOut(30).stride(4, 4).activation(Activation.RELU).weightInit( - WeightInit.RELU) - .updater(Updater.ADAGRAD).build()) //Output: (130-10+0)/4+1 = 31 -> 31*31*30 - .layer(1, new SubsamplingLayer.Builder(SubsamplingLayer.PoolingType.MAX) - .kernelSize(3, 3).stride(2, 2).build()) //(31-3+0)/2+1 = 15 - .layer(2, new ConvolutionLayer.Builder(3, 3).nIn(30).nOut(10).stride(2, 2) - .activation(Activation.RELU).weightInit(WeightInit.RELU) - .updater(Updater.ADAGRAD).build()) //Output: (15-3+0)/2+1 = 7 -> 7*7*10 = 490 - .layer(3, new DenseLayer.Builder().activation(Activation.RELU).nIn(490).nOut(50) - .weightInit(WeightInit.RELU).updater(Updater.ADAGRAD) - .gradientNormalization( - GradientNormalization.ClipElementWiseAbsoluteValue) - .gradientNormalizationThreshold(10).build()) - .layer(4, new GravesLSTM.Builder().activation(Activation.SOFTSIGN).nIn(50) - .nOut(50).weightInit(WeightInit.XAVIER).updater(Updater.ADAGRAD) - .gradientNormalization( - GradientNormalization.ClipElementWiseAbsoluteValue) - .gradientNormalizationThreshold(10) - .build()) - .layer(5, new RnnOutputLayer.Builder(LossFunctions.LossFunction.MCXENT) - .activation(Activation.SOFTMAX).nIn(50).nOut(4) //4 possible shapes: circle, square, arc, line - .updater(Updater.ADAGRAD).weightInit(WeightInit.XAVIER) - .gradientNormalization( - GradientNormalization.ClipElementWiseAbsoluteValue) - .gradientNormalizationThreshold(10).build()) - .inputPreProcessor(0, new RnnToCnnPreProcessor(V_HEIGHT, V_WIDTH, 3)) - .inputPreProcessor(3, new CnnToFeedForwardPreProcessor(7, 7, 10)) - .inputPreProcessor(4, new FeedForwardToRnnPreProcessor()) - .backpropType(BackpropType.TruncatedBPTT) - .tBPTTForwardLength(V_NFRAMES / 5).tBPTTBackwardLength(V_NFRAMES / 5).build(); - MultiLayerNetwork modelExpectedArch = new MultiLayerNetwork(confForArchitecture); - modelExpectedArch.init(); - MultiLayerNetwork modelMow = new TransferLearning.Builder(modelExpectedArch).setFeatureExtractor(2).build(); + } + + /* + Summary should pick up preprocessors set manually on inputs as well + */ + @Test + public void testSummary() { + int V_WIDTH = 130; + int V_HEIGHT = 130; + int V_NFRAMES = 150; + NeuralNetConfiguration confForArchitecture = + NeuralNetConfiguration.builder().seed(12345).l2(0.001) //l2 regularization on all layers + .optimizationAlgo(OptimizationAlgorithm.STOCHASTIC_GRADIENT_DESCENT) + .list() + .layer(0, new ConvolutionLayer.Builder(10, 10).nIn(3) //3 channels: RGB + .nOut(30).stride(4, 4).activation(Activation.RELU).weightInit( + WeightInit.RELU) + .updater(Updater.ADAGRAD).build()) //Output: (130-10+0)/4+1 = 31 -> 31*31*30 + .layer(1, new SubsamplingLayer.Builder(SubsamplingLayer.PoolingType.MAX) + .kernelSize(3, 3).stride(2, 2).build()) //(31-3+0)/2+1 = 15 + .layer(2, new ConvolutionLayer.Builder(3, 3).nIn(30).nOut(10).stride(2, 2) + .activation(Activation.RELU).weightInit(WeightInit.RELU) + .updater(Updater.ADAGRAD).build()) //Output: (15-3+0)/2+1 = 7 -> 7*7*10 = 490 + .layer(3, new DenseLayer.Builder().activation(Activation.RELU).nIn(490).nOut(50) + .weightInit(WeightInit.RELU).updater(Updater.ADAGRAD) + .gradientNormalization( + GradientNormalization.ClipElementWiseAbsoluteValue) + .gradientNormalizationThreshold(10).build()) + .layer(4, new GravesLSTM.Builder().activation(Activation.SOFTSIGN).nIn(50) + .nOut(50).weightInit(WeightInit.XAVIER).updater(Updater.ADAGRAD) + .gradientNormalization( + GradientNormalization.ClipElementWiseAbsoluteValue) + .gradientNormalizationThreshold(10) + .build()) + .layer(5, new RnnOutputLayer.Builder(LossFunctions.LossFunction.MCXENT) + .activation(Activation.SOFTMAX).nIn(50) + .nOut(4) //4 possible shapes: circle, square, arc, line + .updater(Updater.ADAGRAD).weightInit(WeightInit.XAVIER) + .gradientNormalization( + GradientNormalization.ClipElementWiseAbsoluteValue) + .gradientNormalizationThreshold(10).build()) + .inputPreProcessor(0, new RnnToCnnPreProcessor(V_HEIGHT, V_WIDTH, 3)) + .inputPreProcessor(3, new CnnToFeedForwardPreProcessor(7, 7, 10)) + .inputPreProcessor(4, new FeedForwardToRnnPreProcessor()) + .backpropType(BackpropType.TruncatedBPTT) + .tbpttFwdLength(V_NFRAMES / 5).tbpttBackLength(V_NFRAMES / 5).build(); + MultiLayerNetwork modelExpectedArch = new MultiLayerNetwork(confForArchitecture); + modelExpectedArch.init(); + MultiLayerNetwork modelMow = new TransferLearning.Builder( + modelExpectedArch).setFeatureExtractor(2).build(); // System.out.println(modelExpectedArch.summary()); // System.out.println(modelMow.summary()); // System.out.println(modelMow.summary(InputType.recurrent(V_HEIGHT*V_WIDTH*3))); + } + + @Test + public void testErrorNoOutputLayer() { + assertThrows(DL4JException.class, () -> { + NeuralNetConfiguration c = NeuralNetConfiguration.builder().list() + .layer(0, new DenseLayer.Builder().nIn(10).nOut(10).build()).build(); + + MultiLayerNetwork net = new MultiLayerNetwork(c); + net.init(); + + INDArray f = Nd4j.create(1, 10); + INDArray l = Nd4j.create(1, 10); + + net.setInput(f); + net.setLabels(l); + + net.computeGradientAndScore(); + }); + } + + + @Test + public void testSetParamTable() { + + Nd4j.getRandom().setSeed(123); + NeuralNetConfiguration conf1 = NeuralNetConfiguration.builder().seed(123).list() + .layer(0, new DenseLayer.Builder().nIn(4).nOut(3).weightInit(WeightInit.XAVIER) + .activation(Activation.TANH).build()) + .layer(1, new DenseLayer.Builder().nIn(3).nOut(2).weightInit(WeightInit.XAVIER) + .activation(Activation.TANH).build()) + .layer(2, new LSTM.Builder().nIn(2).nOut(2).build()) + .layer(3, new RnnOutputLayer.Builder(LossFunctions.LossFunction.MCXENT) + .weightInit(WeightInit.XAVIER).activation(Activation.SOFTMAX).nIn(2).nOut(3) + .build()) + .build(); + + NeuralNetConfiguration conf2 = NeuralNetConfiguration.builder().seed(987).list() + .layer(0, new DenseLayer.Builder().nIn(4).nOut(3).weightInit(WeightInit.XAVIER) + .activation(Activation.TANH).build()) + .layer(1, new DenseLayer.Builder().nIn(3).nOut(2).weightInit(WeightInit.XAVIER) + .activation(Activation.TANH).build()) + .layer(2, new LSTM.Builder().nIn(2).nOut(2).build()) + .layer(3, new RnnOutputLayer.Builder(LossFunctions.LossFunction.MCXENT) + .weightInit(WeightInit.XAVIER).activation(Activation.SOFTMAX).nIn(2).nOut(3) + .build()) + .build(); + + MultiLayerNetwork net1 = new MultiLayerNetwork(conf1); + net1.init(); + + MultiLayerNetwork net2 = new MultiLayerNetwork(conf2); + net2.init(); + + assertNotEquals(net1.getModelParams(), net2.getModelParams()); + assertNotEquals(net1.getParamTable(), net2.getParamTable()); + + net1.setParamTable(net2.getParamTable()); + assertEquals(net1.getModelParams(), net2.getModelParams()); + assertEquals(net1.getParamTable(), net2.getParamTable()); + } + + + @Test + public void testCompareLayerMethods() { + //Simple test: compare .layer(int, ILayer) and .layer(ILayer) are identical + + NeuralNetConfiguration conf1 = NeuralNetConfiguration.builder().seed(123).list() + .layer(0, new DenseLayer.Builder().nIn(4).nOut(3).weightInit(WeightInit.XAVIER) + .activation(Activation.TANH).build()) + .layer(1, new DenseLayer.Builder().nIn(3).nOut(2).weightInit(WeightInit.XAVIER) + .activation(Activation.TANH).build()) + .layer(2, new LSTM.Builder().nIn(2).nOut(2).build()) + .layer(3, new RnnOutputLayer.Builder(LossFunctions.LossFunction.MCXENT) + .weightInit(WeightInit.XAVIER).activation(Activation.SOFTMAX).nIn(2).nOut(3) + .build()) + .build(); + + NeuralNetConfiguration conf2 = NeuralNetConfiguration.builder().seed(123).list() + .layer(new DenseLayer.Builder().nIn(4).nOut(3).weightInit(WeightInit.XAVIER) + .activation(Activation.TANH).build()) + .layer(new DenseLayer.Builder().nIn(3).nOut(2).weightInit(WeightInit.XAVIER) + .activation(Activation.TANH).build()) + .layer(new LSTM.Builder().nIn(2).nOut(2).build()) + .layer(new RnnOutputLayer.Builder(LossFunctions.LossFunction.MCXENT) + .weightInit(WeightInit.XAVIER).activation(Activation.SOFTMAX).nIn(2).nOut(3) + .build()) + .build(); + + assertEquals(conf1, conf2); + } + + + @Test + public void testEpochCounter() throws Exception { + + NeuralNetConfiguration conf = NeuralNetConfiguration.builder() + .list() + .layer(new OutputLayer.Builder().nIn(4).nOut(3).activation(Activation.SOFTMAX).build()) + .build(); + + MultiLayerNetwork net = new MultiLayerNetwork(conf); + net.init(); + + assertEquals(0, net.getNetConfiguration().getEpochCount()); + + DataSetIterator iter = new IrisDataSetIterator(150, 150); + + for (int i = 0; i < 4; i++) { + assertEquals(i, net.getNetConfiguration().getEpochCount()); + net.fit(iter); + assertEquals(i + 1, net.getNetConfiguration().getEpochCount()); } - @Test - public void testErrorNoOutputLayer() { - assertThrows(DL4JException.class, () -> { - MultiLayerConfiguration c = new NeuralNetConfiguration.Builder().list() - .layer(0, new DenseLayer.Builder().nIn(10).nOut(10).build()).build(); + assertEquals(4, net.getNetConfiguration().getEpochCount()); - MultiLayerNetwork net = new MultiLayerNetwork(c); - net.init(); + MultiLayerNetwork restored = TestUtils.testModelSerialization(net); + assertEquals(4, restored.getNetConfiguration().getEpochCount()); + } - INDArray f = Nd4j.create(1, 10); - INDArray l = Nd4j.create(1, 10); + @Test + public void testInputClearance() throws Exception { + //Activations should be cleared - if not, it's possible for out of (workspace) scope arrays to be around + // which can cause a crash + NeuralNetConfiguration conf = NeuralNetConfiguration.builder() + .convolutionMode(ConvolutionMode.Same) + .list() + .layer(new ConvolutionLayer.Builder().kernelSize(2, 2).stride(1, 1).nIn(1).nOut(1).build()) + .layer(new SubsamplingLayer.Builder().kernelSize(2, 2).stride(1, 1).build()) + .layer(new DenseLayer.Builder().nOut(10).build()) + .layer(new OutputLayer.Builder().nOut(10).activation(Activation.SOFTMAX).build()) + .inputType(InputType.convolutional(28, 28, 1)) + .build(); - net.setInput(f); - net.setLabels(l); + MultiLayerNetwork net = new MultiLayerNetwork(conf); + net.init(); - net.computeGradientAndScore(); - }); + INDArray content = Nd4j.create(1, 1, 28, 28); + + //Check output: + net.output(content); + for (org.deeplearning4j.nn.api.Layer l : net.getLayers()) { + assertNull(l.input()); } - - @Test - public void testSetParamTable() { - - Nd4j.getRandom().setSeed(123); - MultiLayerConfiguration conf1 = new NeuralNetConfiguration.Builder().seed(123).list() - .layer(0, new DenseLayer.Builder().nIn(4).nOut(3).weightInit(WeightInit.XAVIER) - .activation(Activation.TANH).build()) - .layer(1, new DenseLayer.Builder().nIn(3).nOut(2).weightInit(WeightInit.XAVIER) - .activation(Activation.TANH).build()) - .layer(2, new LSTM.Builder().nIn(2).nOut(2).build()) - .layer(3, new RnnOutputLayer.Builder(LossFunctions.LossFunction.MCXENT) - .weightInit(WeightInit.XAVIER).activation(Activation.SOFTMAX).nIn(2).nOut(3) - .build()) - .build(); - - MultiLayerConfiguration conf2 = new NeuralNetConfiguration.Builder().seed(987).list() - .layer(0, new DenseLayer.Builder().nIn(4).nOut(3).weightInit(WeightInit.XAVIER) - .activation(Activation.TANH).build()) - .layer(1, new DenseLayer.Builder().nIn(3).nOut(2).weightInit(WeightInit.XAVIER) - .activation(Activation.TANH).build()) - .layer(2, new LSTM.Builder().nIn(2).nOut(2).build()) - .layer(3, new RnnOutputLayer.Builder(LossFunctions.LossFunction.MCXENT) - .weightInit(WeightInit.XAVIER).activation(Activation.SOFTMAX).nIn(2).nOut(3) - .build()) - .build(); - - MultiLayerNetwork net1 = new MultiLayerNetwork(conf1); - net1.init(); - - MultiLayerNetwork net2 = new MultiLayerNetwork(conf2); - net2.init(); - - assertNotEquals(net1.params(), net2.params()); - assertNotEquals(net1.paramTable(), net2.paramTable()); - - net1.setParamTable(net2.paramTable()); - assertEquals(net1.params(), net2.params()); - assertEquals(net1.paramTable(), net2.paramTable()); + //Check feedForward: + net.feedForward(content, false); + for (org.deeplearning4j.nn.api.Layer l : net.getLayers()) { + assertNull(l.input()); } + } - @Test - public void testCompareLayerMethods(){ - //Simple test: compare .layer(int, Layer) and .layer(Layer) are identical + @Test + public void testExternalErrors() { + //Simple test: same network, but in one case: one less layer (the OutputLayer), where the epsilons are passed in externally + // instead. Should get identical results - MultiLayerConfiguration conf1 = new NeuralNetConfiguration.Builder().seed(123).list() - .layer(0, new DenseLayer.Builder().nIn(4).nOut(3).weightInit(WeightInit.XAVIER) - .activation(Activation.TANH).build()) - .layer(1, new DenseLayer.Builder().nIn(3).nOut(2).weightInit(WeightInit.XAVIER) - .activation(Activation.TANH).build()) - .layer(2, new LSTM.Builder().nIn(2).nOut(2).build()) - .layer(3, new RnnOutputLayer.Builder(LossFunctions.LossFunction.MCXENT) - .weightInit(WeightInit.XAVIER).activation(Activation.SOFTMAX).nIn(2).nOut(3) - .build()) - .build(); + for (WorkspaceMode ws : WorkspaceMode.values()) { + log.info("Workspace mode: " + ws); - MultiLayerConfiguration conf2 = new NeuralNetConfiguration.Builder().seed(123).list() - .layer(new DenseLayer.Builder().nIn(4).nOut(3).weightInit(WeightInit.XAVIER) - .activation(Activation.TANH).build()) - .layer(new DenseLayer.Builder().nIn(3).nOut(2).weightInit(WeightInit.XAVIER) - .activation(Activation.TANH).build()) - .layer(new LSTM.Builder().nIn(2).nOut(2).build()) - .layer(new RnnOutputLayer.Builder(LossFunctions.LossFunction.MCXENT) - .weightInit(WeightInit.XAVIER).activation(Activation.SOFTMAX).nIn(2).nOut(3) - .build()) - .build(); + Nd4j.getRandom().setSeed(12345); + INDArray inData = Nd4j.rand(3, 10); + INDArray outData = Nd4j.rand(3, 10); - assertEquals(conf1, conf2); + Nd4j.getRandom().setSeed(12345); + NeuralNetConfiguration standard = NeuralNetConfiguration.builder().updater(new Sgd(0.1)) + .trainingWorkspaceMode(ws) + .inferenceWorkspaceMode(ws) + .seed(12345).list() + .layer(new DenseLayer.Builder().nIn(10).nOut(10).build()) + .layer(new OutputLayer.Builder().lossFunction(LossFunctions.LossFunction.MSE).nIn(10) + .nOut(10).build()) + .build(); + MultiLayerNetwork s = new MultiLayerNetwork(standard); + s.init(); + + Nd4j.getRandom().setSeed(12345); + NeuralNetConfiguration external = NeuralNetConfiguration.builder().updater(new Sgd(0.1)) + .trainingWorkspaceMode(ws) + .inferenceWorkspaceMode(ws) + .seed(12345).list() + .layer(new DenseLayer.Builder().nIn(10).nOut(10).build()) + .build(); + + MultiLayerNetwork e = new MultiLayerNetwork(external); + e.init(); + + s.setInput(inData); + s.setLabels(outData); + s.computeGradientAndScore(); + Gradient sGrad = s.gradient(); + + s.setInput(inData); + s.feedForward(true, false); //FF without clearing inputs as we need them later + + e.setInput(inData); + e.feedForward(true, false); //FF without clearing inputs as we need them later + + org.deeplearning4j.nn.layers.OutputLayer ol = (org.deeplearning4j.nn.layers.OutputLayer) s.getLayer( + 1); + Pair olPairStd = ol.backpropGradient(null, + LayerWorkspaceMgr.noWorkspaces()); + + INDArray olEpsilon = olPairStd.getSecond().detach(); + + e.setInput(inData); + e.feedForward(true, false); + Pair extErrorGrad = e.backpropGradient(olEpsilon, + LayerWorkspaceMgr.noWorkspaces()); + + int nParamsDense = 10 * 10 + 10; + assertEquals(sGrad.gradient() + .get(NDArrayIndex.interval(0, 0, true), NDArrayIndex.interval(0, nParamsDense)), + extErrorGrad.getFirst().gradient()); + + Nd4j.getWorkspaceManager().destroyAllWorkspacesForCurrentThread(); } + } + @Test + public void testExternalErrors2() { + Nd4j.getExecutioner().setProfilingMode(OpExecutioner.ProfilingMode.SCOPE_PANIC); + int nIn = 4; + int nOut = 3; - @Test - public void testEpochCounter() throws Exception { - - MultiLayerConfiguration conf = new NeuralNetConfiguration.Builder() - .list() - .layer(new OutputLayer.Builder().nIn(4).nOut(3).activation(Activation.SOFTMAX).build()) - .build(); - - MultiLayerNetwork net = new MultiLayerNetwork(conf); - net.init(); - - assertEquals(0, net.getLayerWiseConfigurations().getEpochCount()); - - - DataSetIterator iter = new IrisDataSetIterator(150, 150); - - for( int i=0; i<4; i++ ){ - assertEquals(i, net.getLayerWiseConfigurations().getEpochCount()); - net.fit(iter); - assertEquals(i+1, net.getLayerWiseConfigurations().getEpochCount()); - } - - assertEquals(4, net.getLayerWiseConfigurations().getEpochCount()); - - MultiLayerNetwork restored = TestUtils.testModelSerialization(net); - assertEquals(4, restored.getLayerWiseConfigurations().getEpochCount()); - } - - @Test - public void testInputClearance() throws Exception { - //Activations should be cleared - if not, it's possible for out of (workspace) scope arrays to be around - // which can cause a crash - MultiLayerConfiguration conf = new NeuralNetConfiguration.Builder() - .convolutionMode(ConvolutionMode.Same) - .list() - .layer(new ConvolutionLayer.Builder().kernelSize(2,2).stride(1,1).nIn(1).nOut(1).build()) - .layer(new SubsamplingLayer.Builder().kernelSize(2,2).stride(1,1).build()) - .layer(new DenseLayer.Builder().nOut(10).build()) - .layer(new OutputLayer.Builder().nOut(10).activation(Activation.SOFTMAX).build()) - .setInputType(InputType.convolutional(28,28,1)) - .build(); - - MultiLayerNetwork net = new MultiLayerNetwork(conf); - net.init(); - - INDArray content = Nd4j.create(1,1,28,28); - - //Check output: - net.output(content); - for(org.deeplearning4j.nn.api.Layer l : net.getLayers()){ - assertNull(l.input()); - } - - //Check feedForward: - net.feedForward(content, false); - for(org.deeplearning4j.nn.api.Layer l : net.getLayers()){ - assertNull(l.input()); - } - } - - - @Test - public void testExternalErrors() { - //Simple test: same network, but in one case: one less layer (the OutputLayer), where the epsilons are passed in externally - // instead. Should get identical results - - for(WorkspaceMode ws : WorkspaceMode.values()) { - log.info("Workspace mode: " + ws); - - Nd4j.getRandom().setSeed(12345); - INDArray inData = Nd4j.rand(3, 10); - INDArray outData = Nd4j.rand(3, 10); - - Nd4j.getRandom().setSeed(12345); - MultiLayerConfiguration standard = new NeuralNetConfiguration.Builder().updater(new Sgd(0.1)) - .trainingWorkspaceMode(ws) - .inferenceWorkspaceMode(ws) - .seed(12345).list() - .layer(new DenseLayer.Builder().nIn(10).nOut(10).build()) - .layer(new OutputLayer.Builder().lossFunction(LossFunctions.LossFunction.MSE).nIn(10) - .nOut(10).build()) - .build(); - MultiLayerNetwork s = new MultiLayerNetwork(standard); - s.init(); - - - Nd4j.getRandom().setSeed(12345); - MultiLayerConfiguration external = new NeuralNetConfiguration.Builder().updater(new Sgd(0.1)) - .trainingWorkspaceMode(ws) - .inferenceWorkspaceMode(ws) - .seed(12345).list() - .layer(new DenseLayer.Builder().nIn(10).nOut(10).build()) - .build(); - - MultiLayerNetwork e = new MultiLayerNetwork(external); - e.init(); - - s.setInput(inData); - s.setLabels(outData); - s.computeGradientAndScore(); - Gradient sGrad = s.gradient(); - - s.setInput(inData); - s.feedForward(true, false); //FF without clearing inputs as we need them later - - e.setInput(inData); - e.feedForward(true, false); //FF without clearing inputs as we need them later - - org.deeplearning4j.nn.layers.OutputLayer ol = (org.deeplearning4j.nn.layers.OutputLayer) s.getLayer(1); - Pair olPairStd = ol.backpropGradient(null, LayerWorkspaceMgr.noWorkspaces()); - - INDArray olEpsilon = olPairStd.getSecond().detach(); - - e.setInput(inData); - e.feedForward(true, false); - Pair extErrorGrad = e.backpropGradient(olEpsilon, LayerWorkspaceMgr.noWorkspaces()); - - int nParamsDense = 10 * 10 + 10; - assertEquals(sGrad.gradient().get(NDArrayIndex.interval(0,0,true), NDArrayIndex.interval(0, nParamsDense)), - extErrorGrad.getFirst().gradient()); - - Nd4j.getWorkspaceManager().destroyAllWorkspacesForCurrentThread(); - } - } - - @Test - public void testExternalErrors2(){ - Nd4j.getExecutioner().setProfilingMode(OpExecutioner.ProfilingMode.SCOPE_PANIC); - int nIn = 4; - int nOut = 3; - - for(WorkspaceMode ws : WorkspaceMode.values()) { + for (WorkspaceMode ws : WorkspaceMode.values()) { // System.out.println("***** WORKSPACE: " + ws); - MultiLayerConfiguration conf = new NeuralNetConfiguration.Builder() - .updater(new Adam(0.01)) - .trainingWorkspaceMode(ws) - .inferenceWorkspaceMode(ws) - .list() - .layer(new DenseLayer.Builder().nIn(nIn).nOut(nOut).activation(Activation.RELU).build()) - .layer(new ActivationLayer.Builder().activation(Activation.IDENTITY).build()) - .inputPreProcessor(0, new RnnToFeedForwardPreProcessor()) - .inputPreProcessor(1, new FeedForwardToRnnPreProcessor()) - .build(); + NeuralNetConfiguration conf = NeuralNetConfiguration.builder() + .updater(new Adam(0.01)) + .trainingWorkspaceMode(ws) + .inferenceWorkspaceMode(ws) + .list() + .layer(new DenseLayer.Builder().nIn(nIn).nOut(nOut).activation(Activation.RELU).build()) + .layer(new ActivationLayer.Builder().activation(Activation.IDENTITY).build()) + .inputPreProcessor(0, new RnnToFeedForwardPreProcessor()) + .inputPreProcessor(1, new FeedForwardToRnnPreProcessor()) + .build(); - MultiLayerNetwork graph = new MultiLayerNetwork(conf); - graph.init(); + MultiLayerNetwork graph = new MultiLayerNetwork(conf); + graph.init(); - final int minibatch = 5; - final int seqLen = 6; + final int minibatch = 5; + final int seqLen = 6; - INDArray param = Nd4j.create(new double[]{0.54, 0.31, 0.98, -0.30, -0.66, -0.19, -0.29, -0.62, 0.13, -0.32, 0.01, -0.03, 0.00, 0.00, 0.00}).reshape(1, -1); - graph.setParams(param); + INDArray param = Nd4j.create( + new double[]{0.54, 0.31, 0.98, -0.30, -0.66, -0.19, -0.29, -0.62, 0.13, -0.32, 0.01, + -0.03, 0.00, 0.00, 0.00}).reshape(1, -1); + graph.setParams(param); - INDArray input = Nd4j.rand(new int[]{minibatch, nIn, seqLen}, 12); - INDArray expected = Nd4j.ones(minibatch, nOut, seqLen); + INDArray input = Nd4j.rand(new int[]{minibatch, nIn, seqLen}, 12); + INDArray expected = Nd4j.ones(minibatch, nOut, seqLen); - graph.setInput(input); - INDArray output = graph.feedForward(false, false).get(2); - INDArray error = output.sub(expected); + graph.setInput(input); + INDArray output = graph.feedForward(false, false).get(2); + INDArray error = output.sub(expected); - for (org.deeplearning4j.nn.api.Layer l : graph.getLayers()) { - assertNotNull(l.input()); - assertFalse(l.input().isAttached()); - } + for (org.deeplearning4j.nn.api.Layer l : graph.getLayers()) { + assertNotNull(l.input()); + assertFalse(l.input().isAttached()); + } - // Compute Gradient - Pair gradient = graph.backpropGradient(error, LayerWorkspaceMgr.noWorkspaces()); - graph.getUpdater().update(graph, gradient.getFirst(), 0, 0, minibatch, LayerWorkspaceMgr.noWorkspaces()); + // Compute Gradient + Pair gradient = graph.backpropGradient(error, + LayerWorkspaceMgr.noWorkspaces()); + graph.getUpdater() + .update(graph, gradient.getFirst(), 0, 0, minibatch, LayerWorkspaceMgr.noWorkspaces()); - Nd4j.getWorkspaceManager().destroyAllWorkspacesForCurrentThread(); - } - - Nd4j.getExecutioner().setProfilingMode(OpExecutioner.ProfilingMode.DISABLED); + Nd4j.getWorkspaceManager().destroyAllWorkspacesForCurrentThread(); } - @Test - public void testLayerSize(){ - MultiLayerConfiguration conf = new NeuralNetConfiguration.Builder() + Nd4j.getExecutioner().setProfilingMode(OpExecutioner.ProfilingMode.DISABLED); + } - .list() - .layer(new ConvolutionLayer.Builder().kernelSize(2,2).nOut(6).build()) - .layer(new SubsamplingLayer.Builder().kernelSize(2,2).build()) - .layer(new DenseLayer.Builder().nOut(30).build()) - .layer(new OutputLayer.Builder().nOut(13).activation(Activation.SOFTMAX).build()) - .setInputType(InputType.convolutional(28,28,3)) - .build(); + @Test + public void testLayerSize() { + NeuralNetConfiguration conf = NeuralNetConfiguration.builder() - MultiLayerNetwork net = new MultiLayerNetwork(conf); - net.init(); + .list() + .layer(new ConvolutionLayer.Builder().kernelSize(2, 2).nOut(6).build()) + .layer(new SubsamplingLayer.Builder().kernelSize(2, 2).build()) + .layer(new DenseLayer.Builder().nOut(30).build()) + .layer(new OutputLayer.Builder().nOut(13).activation(Activation.SOFTMAX).build()) + .inputType(InputType.convolutional(28, 28, 3)) + .build(); - assertEquals(6, net.layerSize(0)); - assertEquals(0, net.layerSize(1)); - assertEquals(30, net.layerSize(2)); - assertEquals(13, net.layerSize(3)); + MultiLayerNetwork net = new MultiLayerNetwork(conf); + net.init(); - assertEquals(3, net.layerInputSize(0)); - assertEquals(0, net.layerInputSize(1)); - assertEquals(((FeedForwardLayer)net.getLayer(2).conf().getLayer()).getNIn(), net.layerInputSize(2)); - assertEquals(30, net.layerInputSize(3)); + assertEquals(6, net.layerSize(0)); + assertEquals(0, net.layerSize(1)); + assertEquals(30, net.layerSize(2)); + assertEquals(13, net.layerSize(3)); + + assertEquals(3, net.layerInputSize(0)); + assertEquals(0, net.layerInputSize(1)); + assertEquals(((FeedForwardLayer) net.getLayer(2).getLayerConfiguration()).getNIn(), + net.layerInputSize(2)); + assertEquals(30, net.layerInputSize(3)); + } + + + @Test + public void testZeroParamNet() throws Exception { + + NeuralNetConfiguration conf = NeuralNetConfiguration.builder() + .list() + .layer(new SubsamplingLayer.Builder().kernelSize(2, 2).stride(2, 2).build()) + .layer(new LossLayer.Builder().activation(Activation.SIGMOID) + .lossFunction(LossFunctions.LossFunction.MSE).build()) + .inputType(InputType.convolutionalFlat(28, 28, 1)) + .build(); + + MultiLayerNetwork net = new MultiLayerNetwork(conf); + net.init(); + + DataSet ds = new MnistDataSetIterator(16, true, 12345).next(); + + INDArray out = net.output(ds.getFeatures()); + + INDArray labelTemp = Nd4j.create(out.shape()); + ds.setLabels(labelTemp); + + net.fit(ds); + + MultiLayerNetwork net2 = TestUtils.testModelSerialization(net); + INDArray out2 = net2.output(ds.getFeatures()); + assertEquals(out, out2); + } + + + @Test + public void testInputActivationGradient() { + Nd4j.setDataType(DataType.DOUBLE); + + NeuralNetConfiguration conf = NeuralNetConfiguration.builder() + .dataType(DataType.DOUBLE) + .seed(12345) + .activation(Activation.TANH) + .list() + .layer(new DenseLayer.Builder().nIn(10).nOut(10).build()) + .layer( + new OutputLayer.Builder().nIn(10).nOut(10).lossFunction(LossFunctions.LossFunction.MSE) + .build()) + .build(); + + MultiLayerNetwork net = new MultiLayerNetwork(conf); + net.init(); + + INDArray in = Nd4j.rand(1, 10); + INDArray label = Nd4j.rand(1, 10); + + Pair p = net.calculateGradients(in, label, null, null); + + //Quick gradient check: + double eps = 1e-6; + double maxRelError = 1e-5; + for (int i = 0; i < 10; i++) { + double orig = in.getDouble(i); + in.putScalar(i, orig + eps); + double scorePlus = net.score(new DataSet(in, label)); + in.putScalar(i, orig - eps); + double scoreMinus = net.score(new DataSet(in, label)); + in.putScalar(i, orig); + + double expGrad = (scorePlus - scoreMinus) / (2.0 * eps); + double actGrad = p.getSecond().getDouble(i); + + double relError = (Math.abs(expGrad - actGrad)) / (Math.abs(expGrad) + Math.abs(actGrad)); + + String str = i + " - " + relError + " - exp=" + expGrad + ", act=" + actGrad; + assertTrue(relError < maxRelError, str); } + } - @Test - public void testZeroParamNet() throws Exception { + @Test + public void testNeuralNetConfigurationActivationTypes() { - MultiLayerConfiguration conf = new NeuralNetConfiguration.Builder() - .list() - .layer(new SubsamplingLayer.Builder().kernelSize(2,2).stride(2,2).build()) - .layer(new LossLayer.Builder().activation(Activation.SIGMOID).lossFunction(LossFunctions.LossFunction.MSE).build()) - .setInputType(InputType.convolutionalFlat(28,28,1)) - .build(); + NeuralNetConfiguration.NeuralNetConfigurationBuilder builder = NeuralNetConfiguration.builder() + .list() + .layer(new LSTM.Builder().nOut(6).build()) + .layer(new LSTM.Builder().nOut(7).build()) + .layer(new GlobalPoolingLayer()) + .layer(new OutputLayer.Builder().nOut(8).activation(Activation.SOFTMAX).build()) + .inputType(InputType.recurrent(10)); - MultiLayerNetwork net = new MultiLayerNetwork(conf); - net.init(); + NeuralNetConfiguration conf = builder.build(); - DataSet ds = new MnistDataSetIterator(16, true, 12345).next(); + List outBuilder = builder.getLayerActivationTypes(); + List outConf = conf.getLayerActivationTypes(InputType.recurrent(10)); - INDArray out = net.output(ds.getFeatures()); + List exp = Arrays.asList( + InputType.recurrent(6), + InputType.recurrent(7), + InputType.feedForward(7), + InputType.feedForward(8) + ); - INDArray labelTemp = Nd4j.create(out.shape()); - ds.setLabels(labelTemp); + assertEquals(exp, outBuilder); + assertEquals(exp, outConf); + } - net.fit(ds); + @Test + public void testMultipleEpochsSimple() { + //Mainly a simple sanity check on the preconditions in the method... + DataSetIterator iter = new IrisDataSetIterator(10, 150); - MultiLayerNetwork net2 = TestUtils.testModelSerialization(net); - INDArray out2 = net2.output(ds.getFeatures()); - assertEquals(out, out2); - } + NeuralNetConfiguration conf = NeuralNetConfiguration.builder() + .list() + .layer(new OutputLayer.Builder().nIn(4).nOut(3).activation(Activation.SOFTMAX).build()) + .build(); + MultiLayerNetwork net = new MultiLayerNetwork(conf); + net.init(); + net.fit(iter, 3); - @Test - public void testInputActivationGradient(){ - Nd4j.setDataType(DataType.DOUBLE); + ComputationGraph g = net.toComputationGraph(); + g.fit(iter, 3); + } - MultiLayerConfiguration conf = new NeuralNetConfiguration.Builder() - .dataType(DataType.DOUBLE) - .seed(12345) - .activation(Activation.TANH) - .list() - .layer(new DenseLayer.Builder().nIn(10).nOut(10).build()) - .layer(new OutputLayer.Builder().nIn(10).nOut(10).lossFunction(LossFunctions.LossFunction.MSE).build()) - .build(); - MultiLayerNetwork net = new MultiLayerNetwork(conf); - net.init(); + @Test + public void testPretrainFitMethods() { - INDArray in = Nd4j.rand(1, 10); - INDArray label = Nd4j.rand(1, 10); + //The fit methods should *not* do layerwise pretraining: - Pair p = net.calculateGradients(in, label, null, null); + NeuralNetConfiguration conf = NeuralNetConfiguration.builder() - //Quick gradient check: - double eps = 1e-6; - double maxRelError = 1e-5; - for( int i=0; i<10; i++ ){ - double orig = in.getDouble(i); - in.putScalar(i, orig + eps); - double scorePlus = net.score(new DataSet(in, label)); - in.putScalar(i, orig - eps); - double scoreMinus = net.score(new DataSet(in, label)); - in.putScalar(i, orig); + .list() + .layer(new VariationalAutoencoder.Builder() + .nIn(10).nOut(10).encoderLayerSizes(10).decoderLayerSizes(10).build()) + .layer(new OutputLayer.Builder().nIn(10).nOut(10).activation(Activation.SOFTMAX).build()) - double expGrad = (scorePlus - scoreMinus) / (2.0 * eps); - double actGrad = p.getSecond().getDouble(i); + .build(); - double relError = (Math.abs(expGrad - actGrad)) / (Math.abs(expGrad) + Math.abs(actGrad)); - - String str = i + " - " + relError + " - exp=" + expGrad + ", act=" + actGrad; - assertTrue(relError < maxRelError, str); - } - } - - - @Test - public void testMultiLayerConfigurationActivationTypes(){ - - NeuralNetConfiguration.ListBuilder builder = new NeuralNetConfiguration.Builder() - .list() - .layer(new LSTM.Builder().nOut(6).build()) - .layer(new LSTM.Builder().nOut(7).build()) - .layer(new GlobalPoolingLayer()) - .layer(new OutputLayer.Builder().nOut(8).activation(Activation.SOFTMAX).build()) - .setInputType(InputType.recurrent(10)); - - MultiLayerConfiguration conf = builder.build(); - - List outBuilder = builder.getLayerActivationTypes(); - List outConf = conf.getLayerActivationTypes(InputType.recurrent(10)); - - List exp = Arrays.asList( - InputType.recurrent(6), - InputType.recurrent(7), - InputType.feedForward(7), - InputType.feedForward(8) - ); - - - assertEquals(exp, outBuilder); - assertEquals(exp, outConf); - } - - @Test - public void testMultipleEpochsSimple(){ - //Mainly a simple sanity check on the preconditions in the method... - DataSetIterator iter = new IrisDataSetIterator(10, 150); - - MultiLayerConfiguration conf = new NeuralNetConfiguration.Builder() - .list() - .layer(new OutputLayer.Builder().nIn(4).nOut(3).activation(Activation.SOFTMAX).build()) - .build(); - MultiLayerNetwork net = new MultiLayerNetwork(conf); - net.init(); - - net.fit(iter, 3); - - ComputationGraph g = net.toComputationGraph(); - g.fit(iter, 3); - } - - - @Test - public void testPretrainFitMethods(){ - - //The fit methods should *not* do layerwise pretraining: - - MultiLayerConfiguration conf = new NeuralNetConfiguration.Builder() - - .list() - .layer(new VariationalAutoencoder.Builder() - .nIn(10).nOut(10).encoderLayerSizes(10).decoderLayerSizes(10).build()) - .layer(new OutputLayer.Builder().nIn(10).nOut(10).activation(Activation.SOFTMAX).build()) - - .build(); - - MultiLayerNetwork net = new MultiLayerNetwork(conf); - net.init(); - - Set> exp = new HashSet<>(); - exp.add(MultiLayerNetwork.class); - - CheckModelsListener listener = new CheckModelsListener(); - net.setListeners(listener); - - INDArray f = Nd4j.create(1,10); - INDArray l = Nd4j.create(1,10); - DataSet ds = new DataSet(f,l); - MultiDataSet mds = new org.nd4j.linalg.dataset.MultiDataSet(f,l); - - DataSetIterator iter = new ExistingDataSetIterator(Collections.singletonList(ds)); - net.fit(iter); - assertEquals(exp, listener.getModelClasses()); - - net.fit(ds); - assertEquals(exp, listener.getModelClasses()); - - net.fit(f, l); - assertEquals(exp, listener.getModelClasses()); - - net.fit(f, l, null, null); - assertEquals(exp, listener.getModelClasses()); - - net.fit(mds); - assertEquals(exp, listener.getModelClasses()); - - net.fit(new SingletonMultiDataSetIterator(mds)); - assertEquals(exp, listener.getModelClasses()); - } - - @Test - public void testINDArrayConfigCloning(){ - //INDArrays in config should be cloned to avoid threading issues - - int mb = 3; - int b = 4; - int c = 3; - int depth = b * (5 + c); - int w = 6; - int h = 6; - - INDArray bbPrior = Nd4j.rand(b, 2).muliRowVector(Nd4j.create(new double[]{w, h}).castTo(Nd4j.defaultFloatingPointType())); - - - MultiLayerConfiguration conf = new NeuralNetConfiguration.Builder() - .l2(0.01) - .list() - .layer(new ConvolutionLayer.Builder().nIn(depth).nOut(depth).kernelSize(1,1).build()) - .layer(new Yolo2OutputLayer.Builder() - .boundingBoxPriors(bbPrior) - .build()) - .build(); - - MultiLayerConfiguration conf2 = conf.clone(); - - INDArray bb1 = ((Yolo2OutputLayer)conf.getConf(1).getLayer()).getBoundingBoxes(); - INDArray bb2 = ((Yolo2OutputLayer)conf2.getConf(1).getLayer()).getBoundingBoxes(); - assertNotSame(bb1, bb2); - - assertEquals(bb1, bb2); - } - - @Data - @EqualsAndHashCode(callSuper = false) - public static class CheckModelsListener extends BaseTrainingListener { - - private Set> modelClasses = new HashSet<>(); - - @Override - public void iterationDone(Model model, int iteration, int epoch) { - modelClasses.add(model.getClass()); - } - } - - - @Test - public void testMLNUpdaterBlocks(){ - //Check that setting learning rate results in correct rearrangement of updater state within updater blocks - //https://github.com/deeplearning4j/deeplearning4j/issues/6809#issuecomment-463892644 - - double lr = 1e-3; - MultiLayerConfiguration conf = new NeuralNetConfiguration.Builder() - .seed(12345) - .weightInit(WeightInit.XAVIER) - .updater(new Adam(lr)) - .list() - .layer(new DenseLayer.Builder().nIn(5).nOut(3).build()) - .layer(new DenseLayer.Builder().nIn(3).nOut(2).build()) - .layer(new OutputLayer.Builder(LossFunctions.LossFunction.XENT).nIn(2).nOut(1) - .activation(Activation.SIGMOID).build()) - .build(); - - MultiLayerNetwork net = new MultiLayerNetwork(conf); - net.init(); - - INDArray in = Nd4j.rand(1, 5); - INDArray lbl = Nd4j.rand(1,1); - - net.fit(new DataSet(in, lbl)); - - INDArray viewArray = net.getUpdater().getStateViewArray(); - INDArray viewArrayCopy = viewArray.dup(); - //Initially updater view array is set out like: - //[m0w, m0b, m1w, m1b, m2w, m2b][v0w, v0b, v1w, v1b, v2w, v2b] - long soFar = 0; - INDArray m0w = viewArray.get(NDArrayIndex.interval(0,0,true), NDArrayIndex.interval(soFar, soFar+5*3)).assign(0); //m0w - soFar += 5*3; - INDArray m0b = viewArray.get(NDArrayIndex.interval(0,0,true), NDArrayIndex.interval(soFar, soFar+3)).assign(1); //m0b - soFar += 3; - INDArray m1w = viewArray.get(NDArrayIndex.interval(0,0,true), NDArrayIndex.interval(soFar, soFar+3*2)).assign(2); //m1w - soFar += 3*2; - INDArray m1b = viewArray.get(NDArrayIndex.interval(0,0,true), NDArrayIndex.interval(soFar, soFar+2)).assign(3); //m1b - soFar += 2; - INDArray m2w = viewArray.get(NDArrayIndex.interval(0,0,true), NDArrayIndex.interval(soFar, soFar+ 2)).assign(4); //m2w - soFar += 2; - INDArray m2b = viewArray.get(NDArrayIndex.interval(0,0,true), NDArrayIndex.interval(soFar, soFar+1)).assign(5); //m2b - soFar += 1; - - INDArray v0w = viewArray.get(NDArrayIndex.interval(0,0,true), NDArrayIndex.interval(soFar, soFar+5*3)).assign(6); //v0w - soFar += 5*3; - INDArray v0b = viewArray.get(NDArrayIndex.interval(0,0,true), NDArrayIndex.interval(soFar, soFar+3)).assign(7); //v0b - soFar += 3; - INDArray v1w = viewArray.get(NDArrayIndex.interval(0,0,true), NDArrayIndex.interval(soFar, soFar+3*2)).assign(8); //v1w - soFar += 3*2; - INDArray v1b = viewArray.get(NDArrayIndex.interval(0,0,true), NDArrayIndex.interval(soFar, soFar+2)).assign(9); //v1b - soFar += 2; - INDArray v2w = viewArray.get(NDArrayIndex.interval(0,0,true), NDArrayIndex.interval(soFar, soFar+ 2)).assign(10); //v2w - soFar += 2; - INDArray v2b = viewArray.get(NDArrayIndex.interval(0,0,true), NDArrayIndex.interval(soFar, soFar+1)).assign(11); //v2b - soFar += 1; - - - net.setLearningRate(0, 0.0); - - //Expect new updater state to look like: - //[m0w, m0b][v0w,v0b], [m1w, m1b, m2w, m2b][v1w, v1b, v2w, v2b] - INDArray exp = Nd4j.concat(1, m0w, m0b, v0w, v0b, - m1w, m1b, m2w, m2b, v1w, v1b, v2w, v2b); - - INDArray act = net.getUpdater().getStateViewArray(); + MultiLayerNetwork net = new MultiLayerNetwork(conf); + net.init(); + + Set> exp = new HashSet<>(); + exp.add(MultiLayerNetwork.class); + + CheckModelsListener listener = new CheckModelsListener(); + net.addTrainingListeners(listener); + + INDArray f = Nd4j.create(1, 10); + INDArray l = Nd4j.create(1, 10); + DataSet ds = new DataSet(f, l); + MultiDataSet mds = new org.nd4j.linalg.dataset.MultiDataSet(f, l); + + DataSetIterator iter = new ExistingDataSetIterator(Collections.singletonList(ds)); + net.fit(iter); + assertEquals(exp, listener.getModelClasses()); + + net.fit(ds); + assertEquals(exp, listener.getModelClasses()); + + net.fit(f, l); + assertEquals(exp, listener.getModelClasses()); + + net.fit(f, l, null, null); + assertEquals(exp, listener.getModelClasses()); + + net.fit(mds); + assertEquals(exp, listener.getModelClasses()); + + net.fit(new SingletonMultiDataSetIterator(mds)); + assertEquals(exp, listener.getModelClasses()); + } + + @Test + public void testINDArrayConfigCloning() { + //INDArrays in config should be cloned to avoid threading issues + + int mb = 3; + int b = 4; + int c = 3; + int depth = b * (5 + c); + int w = 6; + int h = 6; + + INDArray bbPrior = Nd4j.rand(b, 2) + .muliRowVector(Nd4j.create(new double[]{w, h}).castTo(Nd4j.defaultFloatingPointType())); + + NeuralNetConfiguration conf = NeuralNetConfiguration.builder() + .l2(0.01) + .list() + .layer(new ConvolutionLayer.Builder().nIn(depth).nOut(depth).kernelSize(1, 1).build()) + .layer(new Yolo2OutputLayer.Builder() + .boundingBoxPriors(bbPrior) + .build()) + .build(); + + NeuralNetConfiguration conf2 = conf.clone(); + + INDArray bb1 = ((Yolo2OutputLayer) conf.getConf(1).getLayer()).getBoundingBoxes(); + INDArray bb2 = ((Yolo2OutputLayer) conf2.getConf(1).getLayer()).getBoundingBoxes(); + assertNotSame(bb1, bb2); + + assertEquals(bb1, bb2); + } + + @Test + public void testMLNUpdaterBlocks() { + //Check that setting learning rate results in correct rearrangement of updater state within updater blocks + //https://github.com/deeplearning4j/deeplearning4j/issues/6809#issuecomment-463892644 + + double lr = 1e-3; + NeuralNetConfiguration conf = NeuralNetConfiguration.builder() + .seed(12345) + .weightInit(WeightInit.XAVIER) + .updater(new Adam(lr)) + .list() + .layer(new DenseLayer.Builder().nIn(5).nOut(3).build()) + .layer(new DenseLayer.Builder().nIn(3).nOut(2).build()) + .layer(new OutputLayer.Builder(LossFunctions.LossFunction.XENT).nIn(2).nOut(1) + .activation(Activation.SIGMOID).build()) + .build(); + + MultiLayerNetwork net = new MultiLayerNetwork(conf); + net.init(); + + INDArray in = Nd4j.rand(1, 5); + INDArray lbl = Nd4j.rand(1, 1); + + net.fit(new DataSet(in, lbl)); + + INDArray viewArray = net.getUpdater().getStateViewArray(); + INDArray viewArrayCopy = viewArray.dup(); + //Initially updater view array is set out like: + //[m0w, m0b, m1w, m1b, m2w, m2b][v0w, v0b, v1w, v1b, v2w, v2b] + long soFar = 0; + INDArray m0w = viewArray.get(NDArrayIndex.interval(0, 0, true), + NDArrayIndex.interval(soFar, soFar + 5 * 3)).assign(0); //m0w + soFar += 5 * 3; + INDArray m0b = viewArray.get(NDArrayIndex.interval(0, 0, true), + NDArrayIndex.interval(soFar, soFar + 3)).assign(1); //m0b + soFar += 3; + INDArray m1w = viewArray.get(NDArrayIndex.interval(0, 0, true), + NDArrayIndex.interval(soFar, soFar + 3 * 2)).assign(2); //m1w + soFar += 3 * 2; + INDArray m1b = viewArray.get(NDArrayIndex.interval(0, 0, true), + NDArrayIndex.interval(soFar, soFar + 2)).assign(3); //m1b + soFar += 2; + INDArray m2w = viewArray.get(NDArrayIndex.interval(0, 0, true), + NDArrayIndex.interval(soFar, soFar + 2)).assign(4); //m2w + soFar += 2; + INDArray m2b = viewArray.get(NDArrayIndex.interval(0, 0, true), + NDArrayIndex.interval(soFar, soFar + 1)).assign(5); //m2b + soFar += 1; + + INDArray v0w = viewArray.get(NDArrayIndex.interval(0, 0, true), + NDArrayIndex.interval(soFar, soFar + 5 * 3)).assign(6); //v0w + soFar += 5 * 3; + INDArray v0b = viewArray.get(NDArrayIndex.interval(0, 0, true), + NDArrayIndex.interval(soFar, soFar + 3)).assign(7); //v0b + soFar += 3; + INDArray v1w = viewArray.get(NDArrayIndex.interval(0, 0, true), + NDArrayIndex.interval(soFar, soFar + 3 * 2)).assign(8); //v1w + soFar += 3 * 2; + INDArray v1b = viewArray.get(NDArrayIndex.interval(0, 0, true), + NDArrayIndex.interval(soFar, soFar + 2)).assign(9); //v1b + soFar += 2; + INDArray v2w = viewArray.get(NDArrayIndex.interval(0, 0, true), + NDArrayIndex.interval(soFar, soFar + 2)).assign(10); //v2w + soFar += 2; + INDArray v2b = viewArray.get(NDArrayIndex.interval(0, 0, true), + NDArrayIndex.interval(soFar, soFar + 1)).assign(11); //v2b + soFar += 1; + + net.setLearningRate(0, 0.0); + + //Expect new updater state to look like: + //[m0w, m0b][v0w,v0b], [m1w, m1b, m2w, m2b][v1w, v1b, v2w, v2b] + INDArray exp = Nd4j.concat(1, m0w, m0b, v0w, v0b, + m1w, m1b, m2w, m2b, v1w, v1b, v2w, v2b); + + INDArray act = net.getUpdater().getStateViewArray(); // System.out.println(exp); // System.out.println(act); - assertEquals(exp, act); + assertEquals(exp, act); - //And set layer 1 LR: - net.setLearningRate(1, 0.2); - exp = Nd4j.concat(1, m0w, m0b, v0w, v0b, - m1w, m1b, v1w, v1b, - m2w, m2b, v2w, v2b); - assertEquals(exp, net.getUpdater().getStateViewArray()); + //And set layer 1 LR: + net.setLearningRate(1, 0.2); + exp = Nd4j.concat(1, m0w, m0b, v0w, v0b, + m1w, m1b, v1w, v1b, + m2w, m2b, v2w, v2b); + assertEquals(exp, net.getUpdater().getStateViewArray()); + //Set all back to original LR and check again: + net.setLearningRate(1, lr); + net.setLearningRate(0, lr); - //Set all back to original LR and check again: - net.setLearningRate(1, lr); - net.setLearningRate(0, lr); + exp = Nd4j.concat(1, m0w, m0b, m1w, m1b, m2w, m2b, v0w, v0b, v1w, v1b, v2w, v2b); + assertEquals(exp, net.getUpdater().getStateViewArray()); - exp = Nd4j.concat(1, m0w, m0b, m1w, m1b, m2w, m2b, v0w, v0b, v1w, v1b, v2w, v2b); - assertEquals(exp, net.getUpdater().getStateViewArray()); + //Finally, training sanity check (if things are wrong, we get -ve values in adam V, which causes NaNs) + net.getUpdater().getStateViewArray().assign(viewArrayCopy); + net.setLearningRate(0, 0.0); + Nd4j.getExecutioner().setProfilingMode(OpExecutioner.ProfilingMode.NAN_PANIC); + net.fit(new DataSet(in, lbl)); + Nd4j.getExecutioner().setProfilingMode(OpExecutioner.ProfilingMode.SCOPE_PANIC); + } - //Finally, training sanity check (if things are wrong, we get -ve values in adam V, which causes NaNs) - net.getUpdater().getStateViewArray().assign(viewArrayCopy); - net.setLearningRate(0, 0.0); + @Data + @EqualsAndHashCode(callSuper = false) + public static class CheckModelsListener extends BaseTrainingListener { - Nd4j.getExecutioner().setProfilingMode(OpExecutioner.ProfilingMode.NAN_PANIC); - net.fit(new DataSet(in, lbl)); - Nd4j.getExecutioner().setProfilingMode(OpExecutioner.ProfilingMode.SCOPE_PANIC); + private Set> modelClasses = new HashSet<>(); + + @Override + public void iterationDone(IModel model, int iteration, int epoch) { + modelClasses.add(model.getClass()); } + } } diff --git a/cavis-dnn/cavis-dnn-core/src/test/java/org/deeplearning4j/nn/multilayer/MultiLayerTestRNN.java b/cavis-dnn/cavis-dnn-core/src/test/java/org/deeplearning4j/nn/multilayer/MultiLayerTestRNN.java index 5064e44ab..29d7e7a6a 100644 --- a/cavis-dnn/cavis-dnn-core/src/test/java/org/deeplearning4j/nn/multilayer/MultiLayerTestRNN.java +++ b/cavis-dnn/cavis-dnn-core/src/test/java/org/deeplearning4j/nn/multilayer/MultiLayerTestRNN.java @@ -29,6 +29,7 @@ import org.deeplearning4j.nn.conf.*; import org.deeplearning4j.nn.conf.distribution.NormalDistribution; import org.deeplearning4j.nn.conf.layers.DenseLayer; import org.deeplearning4j.nn.conf.layers.GlobalPoolingLayer; +import org.deeplearning4j.nn.conf.layers.LayerConfiguration; import org.deeplearning4j.nn.conf.layers.OutputLayer; import org.deeplearning4j.nn.conf.layers.RnnOutputLayer; import org.deeplearning4j.nn.conf.layers.misc.FrozenLayer; @@ -67,8 +68,8 @@ public class MultiLayerTestRNN extends BaseDL4JTest { int nIn = 8; int nOut = 25; int nHiddenUnits = 17; - MultiLayerConfiguration conf = - new NeuralNetConfiguration.Builder() + NeuralNetConfiguration conf = + NeuralNetConfiguration.builder() .list().layer(0, new org.deeplearning4j.nn.conf.layers.GravesLSTM.Builder() .nIn(nIn).nOut(nHiddenUnits) @@ -85,7 +86,7 @@ public class MultiLayerTestRNN extends BaseDL4JTest { Layer layer = network.getLayer(0); assertTrue(layer instanceof GravesLSTM); - Map paramTable = layer.paramTable(); + Map paramTable = layer.getParamTable(); assertEquals(3, paramTable.size()); //2 sets of weights, 1 set of biases INDArray recurrentWeights = paramTable.get(GravesLSTMParamInitializer.RECURRENT_WEIGHT_KEY); @@ -112,7 +113,7 @@ public class MultiLayerTestRNN extends BaseDL4JTest { int nIn = 8; int nOut = 25; int[] nHiddenUnits = {17, 19, 23}; - MultiLayerConfiguration conf = new NeuralNetConfiguration.Builder().list() + NeuralNetConfiguration conf = NeuralNetConfiguration.builder().list() .layer(0, new org.deeplearning4j.nn.conf.layers.GravesLSTM.Builder().nIn(nIn).nOut(17) .activation(Activation.TANH).build()) .layer(1, new org.deeplearning4j.nn.conf.layers.GravesLSTM.Builder().nIn(17).nOut(19) @@ -130,7 +131,7 @@ public class MultiLayerTestRNN extends BaseDL4JTest { Layer layer = network.getLayer(i); assertTrue(layer instanceof GravesLSTM); - Map paramTable = layer.paramTable(); + Map paramTable = layer.getParamTable(); assertEquals(3, paramTable.size()); //2 sets of weights, 1 set of biases int layerNIn = (i == 0 ? nIn : nHiddenUnits[i - 1]); @@ -160,8 +161,8 @@ public class MultiLayerTestRNN extends BaseDL4JTest { Nd4j.getRandom().setSeed(12345); int timeSeriesLength = 6; - MultiLayerConfiguration conf = - new NeuralNetConfiguration.Builder() + NeuralNetConfiguration conf = + NeuralNetConfiguration.builder() .list().layer(0, new org.deeplearning4j.nn.conf.layers.GravesLSTM.Builder() .nIn(5).nOut(7).activation(Activation.TANH) @@ -225,8 +226,8 @@ public class MultiLayerTestRNN extends BaseDL4JTest { public void testRnnTimeStepLayers() { for( int layerType=0; layerType<3; layerType++ ) { - org.deeplearning4j.nn.conf.layers.Layer l0; - org.deeplearning4j.nn.conf.layers.Layer l1; + LayerConfiguration l0; + LayerConfiguration l1; String lastActKey; if(layerType == 0){ @@ -261,8 +262,8 @@ public class MultiLayerTestRNN extends BaseDL4JTest { Nd4j.getRandom().setSeed(12345); int timeSeriesLength = 12; - //4 layer network: 2 GravesLSTM + DenseLayer + RnnOutputLayer. Hence also tests preprocessors. - MultiLayerConfiguration conf = new NeuralNetConfiguration.Builder().seed(12345).list() + //4 layer network: 2 GravesLSTM + DenseLayerConfiguration + RnnOutputLayer. Hence also tests preprocessors. + NeuralNetConfiguration conf = NeuralNetConfiguration.builder().seed(12345).list() .layer(0, l0) .layer(1, l1) .layer(2, new DenseLayer.Builder().nIn(8).nOut(9).activation(Activation.TANH) @@ -349,8 +350,8 @@ public class MultiLayerTestRNN extends BaseDL4JTest { Nd4j.getRandom().setSeed(12345); int timeSeriesLength = 6; - MultiLayerConfiguration conf = - new NeuralNetConfiguration.Builder() + NeuralNetConfiguration conf = + NeuralNetConfiguration.builder() .list().layer(0, new org.deeplearning4j.nn.conf.layers.GravesLSTM.Builder() .nIn(5).nOut(7).activation(Activation.TANH) @@ -408,7 +409,7 @@ public class MultiLayerTestRNN extends BaseDL4JTest { int nIn = 5; int nOut = 4; - MultiLayerConfiguration conf = new NeuralNetConfiguration.Builder().seed(12345) + NeuralNetConfiguration conf = NeuralNetConfiguration.builder().seed(12345) .trainingWorkspaceMode(WorkspaceMode.NONE).inferenceWorkspaceMode(WorkspaceMode.NONE) .list() .layer(0, new org.deeplearning4j.nn.conf.layers.GravesLSTM.Builder().nIn(nIn).nOut(7) @@ -427,7 +428,7 @@ public class MultiLayerTestRNN extends BaseDL4JTest { .build(); assertEquals(BackpropType.Standard, conf.getBackpropType()); - MultiLayerConfiguration confTBPTT = new NeuralNetConfiguration.Builder().seed(12345) + NeuralNetConfiguration confTBPTT = NeuralNetConfiguration.builder().seed(12345) .trainingWorkspaceMode(WorkspaceMode.NONE).inferenceWorkspaceMode(WorkspaceMode.NONE) .list() .layer(0, new org.deeplearning4j.nn.conf.layers.GravesLSTM.Builder().nIn(nIn).nOut(7) @@ -443,8 +444,9 @@ public class MultiLayerTestRNN extends BaseDL4JTest { .nIn(8).nOut(nOut).activation(Activation.SOFTMAX) .dist(new NormalDistribution(0, 0.5)) .build()) - .backpropType(BackpropType.TruncatedBPTT).tBPTTBackwardLength(timeSeriesLength) - .tBPTTForwardLength(timeSeriesLength).build(); + .backpropType(BackpropType.TruncatedBPTT) + .tbpttBackLength(timeSeriesLength) + .tbpttBackLength(timeSeriesLength).build(); Nd4j.getRandom().setSeed(12345); MultiLayerNetwork mln = new MultiLayerNetwork(conf); @@ -456,9 +458,9 @@ public class MultiLayerTestRNN extends BaseDL4JTest { mlnTBPTT.clearTbpttState = false; - assertEquals(BackpropType.TruncatedBPTT, mlnTBPTT.getLayerWiseConfigurations().getBackpropType()); - assertEquals(timeSeriesLength, mlnTBPTT.getLayerWiseConfigurations().getTbpttFwdLength()); - assertEquals(timeSeriesLength, mlnTBPTT.getLayerWiseConfigurations().getTbpttBackLength()); + assertEquals(BackpropType.TruncatedBPTT, mlnTBPTT.getNetConfiguration().getBackpropType()); + assertEquals(timeSeriesLength, mlnTBPTT.getNetConfiguration().getTbpttFwdLength()); + assertEquals(timeSeriesLength, mlnTBPTT.getNetConfiguration().getTbpttBackLength()); INDArray inputData = Nd4j.rand(miniBatchSize, nIn, timeSeriesLength); INDArray labels = Nd4j.rand(miniBatchSize, nOut, timeSeriesLength); @@ -520,8 +522,8 @@ public class MultiLayerTestRNN extends BaseDL4JTest { int nTimeSlices = 5; - MultiLayerConfiguration conf = - new NeuralNetConfiguration.Builder().seed(12345).list().layer(0, + NeuralNetConfiguration conf = + NeuralNetConfiguration.builder().seed(12345).list().layer(0, new org.deeplearning4j.nn.conf.layers.GravesLSTM.Builder().nIn(nIn).nOut(7) .activation(Activation.TANH) .dist(new NormalDistribution(0, 0.5)).build()) @@ -602,7 +604,7 @@ public class MultiLayerTestRNN extends BaseDL4JTest { int nTimeSlices = 20; - MultiLayerConfiguration conf = new NeuralNetConfiguration.Builder().seed(12345) + NeuralNetConfiguration conf = NeuralNetConfiguration.builder().seed(12345) .optimizationAlgo(OptimizationAlgorithm.STOCHASTIC_GRADIENT_DESCENT).list() .layer(0, new org.deeplearning4j.nn.conf.layers.GravesLSTM.Builder().nIn(nIn).nOut(7) .activation(Activation.TANH) @@ -618,7 +620,7 @@ public class MultiLayerTestRNN extends BaseDL4JTest { .dist(new NormalDistribution(0, 0.5)) .build()) .backpropType(BackpropType.TruncatedBPTT) - .tBPTTBackwardLength(timeSeriesLength).tBPTTForwardLength(timeSeriesLength).build(); + .tbpttBackLength(timeSeriesLength).tbpttFwdLength(timeSeriesLength).build(); Nd4j.getRandom().setSeed(12345); MultiLayerNetwork mln = new MultiLayerNetwork(conf); @@ -639,7 +641,7 @@ public class MultiLayerTestRNN extends BaseDL4JTest { int nIn = 5; int nOut = 4; - MultiLayerConfiguration conf = new NeuralNetConfiguration.Builder().seed(12345) + NeuralNetConfiguration conf = NeuralNetConfiguration.builder().seed(12345) .optimizationAlgo(OptimizationAlgorithm.STOCHASTIC_GRADIENT_DESCENT).list() .layer(0, new org.deeplearning4j.nn.conf.layers.GravesLSTM.Builder().nIn(nIn).nOut(7) .activation(Activation.TANH) @@ -655,7 +657,7 @@ public class MultiLayerTestRNN extends BaseDL4JTest { .dist(new NormalDistribution(0, 0.5)) .build()) .backpropType(BackpropType.TruncatedBPTT) - .tBPTTBackwardLength(tbpttLength).tBPTTForwardLength(tbpttLength).build(); + .tbpttBackLength(tbpttLength).tbpttFwdLength(tbpttLength).build(); Nd4j.getRandom().setSeed(12345); MultiLayerNetwork mln = new MultiLayerNetwork(conf); @@ -675,8 +677,8 @@ public class MultiLayerTestRNN extends BaseDL4JTest { @Test public void testRnnTimeStepWithPreprocessor() { - MultiLayerConfiguration conf = - new NeuralNetConfiguration.Builder() + NeuralNetConfiguration conf = + NeuralNetConfiguration.builder() .optimizationAlgo(OptimizationAlgorithm.STOCHASTIC_GRADIENT_DESCENT) .list() .layer(0, new org.deeplearning4j.nn.conf.layers.GravesLSTM.Builder().nIn(10) @@ -698,7 +700,7 @@ public class MultiLayerTestRNN extends BaseDL4JTest { @Test public void testRnnTimeStepWithPreprocessorGraph() { - ComputationGraphConfiguration conf = new NeuralNetConfiguration.Builder() + ComputationGraphConfiguration conf = NeuralNetConfiguration.builder() .optimizationAlgo(OptimizationAlgorithm.STOCHASTIC_GRADIENT_DESCENT) .graphBuilder().addInputs("in") .addLayer("0", new org.deeplearning4j.nn.conf.layers.GravesLSTM.Builder().nIn(10).nOut(10) @@ -727,7 +729,7 @@ public class MultiLayerTestRNN extends BaseDL4JTest { int nIn = 5; int nOut = 4; - MultiLayerConfiguration conf = new NeuralNetConfiguration.Builder().seed(12345) + NeuralNetConfiguration conf = NeuralNetConfiguration.builder().seed(12345) .optimizationAlgo(OptimizationAlgorithm.STOCHASTIC_GRADIENT_DESCENT) .weightInit(WeightInit.XAVIER).list() .layer(0, new org.deeplearning4j.nn.conf.layers.GravesLSTM.Builder().nIn(nIn).nOut(7) @@ -737,7 +739,7 @@ public class MultiLayerTestRNN extends BaseDL4JTest { .layer(2, new RnnOutputLayer.Builder(LossFunction.MSE).nIn(8).nOut(nOut) .activation(Activation.IDENTITY).build()) .backpropType(BackpropType.TruncatedBPTT) - .tBPTTBackwardLength(tbpttLength).tBPTTForwardLength(tbpttLength).build(); + .tbpttBackLength(tbpttLength).tbpttFwdLength(tbpttLength).build(); Nd4j.getRandom().setSeed(12345); MultiLayerNetwork mln = new MultiLayerNetwork(conf); @@ -751,9 +753,9 @@ public class MultiLayerTestRNN extends BaseDL4JTest { DataSet ds = new DataSet(features, labels, maskArrayInput, maskArrayOutput); - INDArray initialParams = mln.params().dup(); + INDArray initialParams = mln.getModelParams().dup(); mln.fit(ds); - INDArray afterParams = mln.params(); + INDArray afterParams = mln.getModelParams(); assertNotEquals(initialParams, afterParams); } @@ -764,7 +766,7 @@ public class MultiLayerTestRNN extends BaseDL4JTest { int nHiddenUnits = 17; try { - new NeuralNetConfiguration.Builder() + NeuralNetConfiguration.builder() .list() .layer(new org.deeplearning4j.nn.conf.layers.LSTM.Builder().nIn(nIn).nOut(nHiddenUnits).build()) .layer(new GlobalPoolingLayer()) @@ -783,7 +785,7 @@ public class MultiLayerTestRNN extends BaseDL4JTest { @Test public void testWrapperLayerGetPreviousState(){ - MultiLayerConfiguration conf = new NeuralNetConfiguration.Builder() + NeuralNetConfiguration conf = NeuralNetConfiguration.builder() .list() .layer(new FrozenLayer(new org.deeplearning4j.nn.conf.layers.LSTM.Builder() .nIn(5).nOut(5).build())) diff --git a/cavis-dnn/cavis-dnn-core/src/test/java/org/deeplearning4j/nn/multilayer/TestMasking.java b/cavis-dnn/cavis-dnn-core/src/test/java/org/deeplearning4j/nn/multilayer/TestMasking.java index c4c3067a9..d98cd58b2 100644 --- a/cavis-dnn/cavis-dnn-core/src/test/java/org/deeplearning4j/nn/multilayer/TestMasking.java +++ b/cavis-dnn/cavis-dnn-core/src/test/java/org/deeplearning4j/nn/multilayer/TestMasking.java @@ -66,11 +66,12 @@ public class TestMasking extends BaseDL4JTest { public void checkMaskArrayClearance() { for (boolean tbptt : new boolean[] {true, false}) { //Simple "does it throw an exception" type test... - MultiLayerConfiguration conf = new NeuralNetConfiguration.Builder().seed(12345).list() + NeuralNetConfiguration conf = NeuralNetConfiguration.builder().seed(12345).list() .layer(0, new RnnOutputLayer.Builder(LossFunctions.LossFunction.MSE) .activation(Activation.IDENTITY).nIn(1).nOut(1).build()) .backpropType(tbptt ? BackpropType.TruncatedBPTT : BackpropType.Standard) - .tBPTTForwardLength(8).tBPTTBackwardLength(8).build(); + + .tbpttFwdLength(8).tbpttBackLength(8).build(); MultiLayerNetwork net = new MultiLayerNetwork(conf); net.init(); @@ -149,7 +150,7 @@ public class TestMasking extends BaseDL4JTest { Activation a = act[i]; - MultiLayerConfiguration conf = new NeuralNetConfiguration.Builder().updater(new NoOp()) + NeuralNetConfiguration conf = NeuralNetConfiguration.builder().updater(new NoOp()) .dist(new NormalDistribution(0, 1)).seed(12345) .list() .layer(0, new DenseLayer.Builder().nIn(nIn).nOut(layerSize).activation(Activation.TANH) @@ -171,7 +172,7 @@ public class TestMasking extends BaseDL4JTest { net.setLabels(labels); net.computeGradientAndScore(); - double score1 = net.score(); + double score1 = net.getScore(); INDArray grad1 = net.gradient().gradient(); //Now: change the label values for the masked steps. The @@ -186,7 +187,7 @@ public class TestMasking extends BaseDL4JTest { assertNotEquals(labels, newLabels); - double score2 = net.score(); + double score2 = net.getScore(); INDArray grad2 = net.gradient().gradient(); assertEquals(score1, score2, 1e-6); @@ -195,7 +196,7 @@ public class TestMasking extends BaseDL4JTest { //Do the same for CompGraph - ComputationGraphConfiguration conf2 = new NeuralNetConfiguration.Builder().updater(new NoOp()) + ComputationGraphConfiguration conf2 = NeuralNetConfiguration.builder().updater(new NoOp()) .dist(new NormalDistribution(0, 1)).seed(12345) .graphBuilder().addInputs("in") .addLayer("0", new DenseLayer.Builder().nIn(nIn).nOut(layerSize) @@ -213,7 +214,7 @@ public class TestMasking extends BaseDL4JTest { graph.setLabels(labels); graph.computeGradientAndScore(); - double gScore1 = graph.score(); + double gScore1 = graph.getScore(); INDArray gGrad1 = graph.gradient().gradient(); graph.setLayerMaskArrays(null, new INDArray[] {labelMask}); @@ -221,7 +222,7 @@ public class TestMasking extends BaseDL4JTest { graph.setLabels(newLabels); graph.computeGradientAndScore(); - double gScore2 = graph.score(); + double gScore2 = graph.getScore(); INDArray gGrad2 = graph.gradient().gradient(); assertEquals(gScore1, gScore2, 1e-6); @@ -237,7 +238,7 @@ public class TestMasking extends BaseDL4JTest { int nIn = 5; int nOut = 4; - ComputationGraphConfiguration conf2 = new NeuralNetConfiguration.Builder().updater(new NoOp()) + ComputationGraphConfiguration conf2 = NeuralNetConfiguration.builder().updater(new NoOp()) .dist(new NormalDistribution(0, 1)).seed(12345) .graphBuilder().addInputs("in") .addLayer("0", new DenseLayer.Builder().nIn(nIn).nOut(layerSize).activation(Activation.TANH) @@ -269,7 +270,7 @@ public class TestMasking extends BaseDL4JTest { int cnnStride1 = 1; int channels = 1; - ComputationGraphConfiguration conf = new NeuralNetConfiguration.Builder() + ComputationGraphConfiguration conf = NeuralNetConfiguration.builder() .seed(12345) .weightInit(WeightInit.XAVIER) .convolutionMode(ConvolutionMode.Same) @@ -304,7 +305,7 @@ public class TestMasking extends BaseDL4JTest { @Test public void testMaskingStackUnstack(){ - ComputationGraphConfiguration nnConfig = new NeuralNetConfiguration.Builder() + ComputationGraphConfiguration nnConfig = NeuralNetConfiguration.builder() .updater(new Adam(2e-2)) .graphBuilder() .setInputTypes( diff --git a/cavis-dnn/cavis-dnn-core/src/test/java/org/deeplearning4j/nn/multilayer/TestSetGetParameters.java b/cavis-dnn/cavis-dnn-core/src/test/java/org/deeplearning4j/nn/multilayer/TestSetGetParameters.java index cb9536e3d..9c3c1407b 100644 --- a/cavis-dnn/cavis-dnn-core/src/test/java/org/deeplearning4j/nn/multilayer/TestSetGetParameters.java +++ b/cavis-dnn/cavis-dnn-core/src/test/java/org/deeplearning4j/nn/multilayer/TestSetGetParameters.java @@ -21,7 +21,6 @@ package org.deeplearning4j.nn.multilayer; import org.deeplearning4j.BaseDL4JTest; -import org.deeplearning4j.nn.conf.MultiLayerConfiguration; import org.deeplearning4j.nn.conf.NeuralNetConfiguration; import org.deeplearning4j.nn.conf.distribution.NormalDistribution; import org.deeplearning4j.nn.conf.layers.*; @@ -40,7 +39,7 @@ public class TestSetGetParameters extends BaseDL4JTest { @Test public void testSetParameters() { //Set up a MLN, then do set(get) on parameters. Results should be identical compared to before doing this. - MultiLayerConfiguration conf = new NeuralNetConfiguration.Builder().list() + NeuralNetConfiguration conf = NeuralNetConfiguration.builder().list() .layer(0, new DenseLayer.Builder().nIn(9).nOut(10) .dist(new NormalDistribution(0, 1)).build()) .layer(1, new DenseLayer.Builder().nIn(10).nOut(11) @@ -54,13 +53,13 @@ public class TestSetGetParameters extends BaseDL4JTest { MultiLayerNetwork net = new MultiLayerNetwork(conf); net.init(); - INDArray initParams = net.params().dup(); - Map initParams2 = net.paramTable(); + INDArray initParams = net.getModelParams().dup(); + Map initParams2 = net.getParamTable(); - net.setParams(net.params()); + net.setParams(net.getModelParams()); - INDArray initParamsAfter = net.params(); - Map initParams2After = net.paramTable(); + INDArray initParamsAfter = net.getModelParams(); + Map initParams2After = net.getParamTable(); for (String s : initParams2.keySet()) { assertEquals(initParams2.get(s), initParams2After.get(s), "Params differ: " + s); @@ -72,14 +71,14 @@ public class TestSetGetParameters extends BaseDL4JTest { INDArray randomParams = Nd4j.rand(initParams.dataType(), initParams.shape()); net.setParams(randomParams.dup()); - assertEquals(net.params(), randomParams); + assertEquals(net.getModelParams(), randomParams); } @Test public void testSetParametersRNN() { //Set up a MLN, then do set(get) on parameters. Results should be identical compared to before doing this. - MultiLayerConfiguration conf = new NeuralNetConfiguration.Builder().list() + NeuralNetConfiguration conf = NeuralNetConfiguration.builder().list() .layer(0, new GravesLSTM.Builder().nIn(9).nOut(10) .dist(new NormalDistribution(0, 1)).build()) .layer(1, new GravesLSTM.Builder().nIn(10).nOut(11) @@ -91,13 +90,13 @@ public class TestSetGetParameters extends BaseDL4JTest { MultiLayerNetwork net = new MultiLayerNetwork(conf); net.init(); - INDArray initParams = net.params().dup(); - Map initParams2 = net.paramTable(); + INDArray initParams = net.getModelParams().dup(); + Map initParams2 = net.getParamTable(); - net.setParams(net.params()); + net.setParams(net.getModelParams()); - INDArray initParamsAfter = net.params(); - Map initParams2After = net.paramTable(); + INDArray initParamsAfter = net.getModelParams(); + Map initParams2After = net.getParamTable(); for (String s : initParams2.keySet()) { assertEquals(initParams2.get(s), initParams2After.get(s), "Params differ: " + s); @@ -109,7 +108,7 @@ public class TestSetGetParameters extends BaseDL4JTest { INDArray randomParams = Nd4j.rand(initParams.dataType(), initParams.shape()); net.setParams(randomParams.dup()); - assertEquals(net.params(), randomParams); + assertEquals(net.getModelParams(), randomParams); } @Test @@ -118,7 +117,7 @@ public class TestSetGetParameters extends BaseDL4JTest { Nd4j.getRandom().setSeed(12345); //Create configuration. Doesn't matter if this doesn't actually work for forward/backward pass here - MultiLayerConfiguration conf = new NeuralNetConfiguration.Builder().seed(12345).list() + NeuralNetConfiguration conf = NeuralNetConfiguration.builder().seed(12345).list() .layer(0, new ConvolutionLayer.Builder().nIn(10).nOut(10).kernelSize(2, 2).stride(2, 2) .padding(2, 2).build()) .layer(1, new DenseLayer.Builder().nIn(10).nOut(10).build()) @@ -129,7 +128,7 @@ public class TestSetGetParameters extends BaseDL4JTest { MultiLayerNetwork net = new MultiLayerNetwork(conf); net.init(); - INDArray params = net.params(); + INDArray params = net.getModelParams(); MultiLayerNetwork net2 = new MultiLayerNetwork(conf); @@ -138,16 +137,16 @@ public class TestSetGetParameters extends BaseDL4JTest { MultiLayerNetwork net3 = new MultiLayerNetwork(conf); net3.init(params, false); - assertEquals(params, net2.params()); - assertEquals(params, net3.params()); + assertEquals(params, net2.getModelParams()); + assertEquals(params, net3.getModelParams()); - assertNotSame(params, net2.params()); //Different objects due to clone - assertSame(params, net3.params()); //Same object due to clone + assertNotSame(params, net2.getModelParams()); //Different objects due to clone + assertSame(params, net3.getModelParams()); //Same object due to clone - Map paramsMap = net.paramTable(); - Map paramsMap2 = net2.paramTable(); - Map paramsMap3 = net3.paramTable(); + Map paramsMap = net.getParamTable(); + Map paramsMap2 = net2.getParamTable(); + Map paramsMap3 = net3.getParamTable(); for (String s : paramsMap.keySet()) { assertEquals(paramsMap.get(s), paramsMap2.get(s)); assertEquals(paramsMap.get(s), paramsMap3.get(s)); diff --git a/cavis-dnn/cavis-dnn-core/src/test/java/org/deeplearning4j/nn/multilayer/TestVariableLengthTS.java b/cavis-dnn/cavis-dnn-core/src/test/java/org/deeplearning4j/nn/multilayer/TestVariableLengthTS.java index 5d5daed14..6f3747e84 100644 --- a/cavis-dnn/cavis-dnn-core/src/test/java/org/deeplearning4j/nn/multilayer/TestVariableLengthTS.java +++ b/cavis-dnn/cavis-dnn-core/src/test/java/org/deeplearning4j/nn/multilayer/TestVariableLengthTS.java @@ -22,7 +22,6 @@ package org.deeplearning4j.nn.multilayer; import org.deeplearning4j.BaseDL4JTest; import org.deeplearning4j.nn.api.OptimizationAlgorithm; -import org.deeplearning4j.nn.conf.MultiLayerConfiguration; import org.deeplearning4j.nn.conf.NeuralNetConfiguration; import org.deeplearning4j.nn.conf.RNNFormat; import org.deeplearning4j.nn.conf.distribution.NormalDistribution; @@ -48,7 +47,6 @@ import org.nd4j.linalg.learning.config.NoOp; import org.nd4j.linalg.learning.config.Sgd; import org.nd4j.linalg.lossfunctions.LossFunctions; -import java.util.Arrays; import java.util.List; import java.util.Map; import java.util.Random; @@ -72,7 +70,7 @@ public class TestVariableLengthTS extends BaseDL4JTest { for (int nExamples : miniBatchSizes) { Nd4j.getRandom().setSeed(12345); - MultiLayerConfiguration conf = new NeuralNetConfiguration.Builder() + NeuralNetConfiguration conf = NeuralNetConfiguration.builder() .optimizationAlgo(OptimizationAlgorithm.STOCHASTIC_GRADIENT_DESCENT) .updater(new Sgd(0.1)).seed(12345).list() .layer(0, new GravesLSTM.Builder().activation(Activation.TANH).nIn(2).nOut(2).build()) @@ -105,14 +103,14 @@ public class TestVariableLengthTS extends BaseDL4JTest { net.setInput(in1); net.setLabels(labels1); net.computeGradientAndScore(); - double score1 = net.score(); + double score1 = net.getScore(); Gradient g1 = net.gradient(); net.setInput(in2); net.setLabels(labels2); net.setLayerMaskArrays(null, labelMask); net.computeGradientAndScore(); - double score2 = net.score(); + double score2 = net.getScore(); Gradient g2 = net.gradient(); //Scores and gradients should be identical for two cases (given mask array) @@ -136,7 +134,7 @@ public class TestVariableLengthTS extends BaseDL4JTest { } net.setLabels(labels2); net.computeGradientAndScore(); - double score2a = net.score(); + double score2a = net.getScore(); Gradient g2a = net.gradient(); assertEquals(score2, score2a, 1e-6); for (String s : g2map.keySet()) { @@ -160,7 +158,7 @@ public class TestVariableLengthTS extends BaseDL4JTest { for (int nExamples : miniBatchSizes) { Nd4j.getRandom().setSeed(1234); - MultiLayerConfiguration conf = new NeuralNetConfiguration.Builder() + NeuralNetConfiguration conf = NeuralNetConfiguration.builder() .optimizationAlgo(OptimizationAlgorithm.STOCHASTIC_GRADIENT_DESCENT) .updater(new Sgd(0.1)).seed(12345).list() .layer(0, new DenseLayer.Builder().activation(Activation.TANH).nIn(2).nOut(2).build()) @@ -170,7 +168,7 @@ public class TestVariableLengthTS extends BaseDL4JTest { .nOut(1).activation(Activation.TANH).build()) .inputPreProcessor(0, new RnnToFeedForwardPreProcessor()) .inputPreProcessor(2, new FeedForwardToRnnPreProcessor()) - .setInputType(InputType.recurrent(2,-1, RNNFormat.NCW)) + .inputType(InputType.recurrent(2,-1, RNNFormat.NCW)) .build(); MultiLayerNetwork net = new MultiLayerNetwork(conf); @@ -198,7 +196,7 @@ public class TestVariableLengthTS extends BaseDL4JTest { net.setInput(in1); net.setLabels(labels1); net.computeGradientAndScore(); - double score1 = net.score(); + double score1 = net.getScore(); Gradient g1 = net.gradient(); Map map1 = g1.gradientForVariable(); for (String s : map1.keySet()) { @@ -209,7 +207,7 @@ public class TestVariableLengthTS extends BaseDL4JTest { net.setLabels(labels2); net.setLayerMaskArrays(inputMask, null); net.computeGradientAndScore(); - double score2 = net.score(); + double score2 = net.getScore(); Gradient g2 = net.gradient(); net.setInput(in2); @@ -242,7 +240,7 @@ public class TestVariableLengthTS extends BaseDL4JTest { net.setInput(in2); net.setLayerMaskArrays(inputMask, null); net.computeGradientAndScore(); - double score2a = net.score(); + double score2a = net.getScore(); Gradient g2a = net.gradient(); assertEquals(score2, score2a, 1e-12); for (String s : g2.gradientForVariable().keySet()) { @@ -305,8 +303,8 @@ public class TestVariableLengthTS extends BaseDL4JTest { INDArray input = Nd4j.rand(miniBatch, nIn, tsLength); INDArray labels = Nd4j.ones(miniBatch, nOut, tsLength); - MultiLayerConfiguration conf = - new NeuralNetConfiguration.Builder().seed(12345L).list() + NeuralNetConfiguration conf = + NeuralNetConfiguration.builder().seed(12345L).list() .layer(0, new GravesLSTM.Builder().nIn(nIn).nOut(5) .dist(new NormalDistribution(0, 1)) @@ -329,7 +327,7 @@ public class TestVariableLengthTS extends BaseDL4JTest { mln.setLabels(labels); mln.computeGradientAndScore(); - double score = mln.score(); + double score = mln.getScore(); assertEquals(expScore, score, 0.1, msg); } @@ -368,8 +366,8 @@ public class TestVariableLengthTS extends BaseDL4JTest { INDArray input = Nd4j.rand(miniBatch, nIn, tsLength); - MultiLayerConfiguration conf = - new NeuralNetConfiguration.Builder().seed(12345L).list() + NeuralNetConfiguration conf = + NeuralNetConfiguration.builder().seed(12345L).list() .layer(0, new GravesLSTM.Builder().nIn(nIn).nOut(5) .dist(new NormalDistribution(0, 1)) @@ -384,8 +382,8 @@ public class TestVariableLengthTS extends BaseDL4JTest { MultiLayerNetwork mln = new MultiLayerNetwork(conf); mln.init(); - MultiLayerConfiguration conf2 = - new NeuralNetConfiguration.Builder().seed(12345L).list() + NeuralNetConfiguration conf2 = + NeuralNetConfiguration.builder().seed(12345L).list() .layer(0, new GravesLSTM.Builder().nIn(nIn).nOut(5) .dist(new NormalDistribution(0, 1)) @@ -440,7 +438,7 @@ public class TestVariableLengthTS extends BaseDL4JTest { int layerSize = 3; int nOut = 3; - MultiLayerConfiguration conf = new NeuralNetConfiguration.Builder().weightInit(WeightInit.XAVIER) + NeuralNetConfiguration conf = NeuralNetConfiguration.builder().weightInit(WeightInit.XAVIER) .activation(Activation.TANH).list() .layer(0, new GravesBidirectionalLSTM.Builder().nIn(nIn).nOut(layerSize).build()) .layer(1, new GravesBidirectionalLSTM.Builder().nIn(layerSize).nOut(layerSize).build()) @@ -517,7 +515,7 @@ public class TestVariableLengthTS extends BaseDL4JTest { // System.out.println("Starting test: bidirectional = " + bidirectional + ", poolingType = " + pt); - MultiLayerConfiguration conf = new NeuralNetConfiguration.Builder().weightInit(WeightInit.XAVIER) + NeuralNetConfiguration conf = NeuralNetConfiguration.builder().weightInit(WeightInit.XAVIER) .activation(Activation.TANH).list().layer(0, bidirectional ? new GravesBidirectionalLSTM.Builder().nIn(nIn).nOut(layerSize).build() : new GravesLSTM.Builder().nIn(nIn).nOut(layerSize).build()) diff --git a/cavis-dnn/cavis-dnn-core/src/test/java/org/deeplearning4j/nn/rl/TestMultiModelGradientApplication.java b/cavis-dnn/cavis-dnn-core/src/test/java/org/deeplearning4j/nn/rl/TestMultiModelGradientApplication.java index 410abf970..0539c6262 100644 --- a/cavis-dnn/cavis-dnn-core/src/test/java/org/deeplearning4j/nn/rl/TestMultiModelGradientApplication.java +++ b/cavis-dnn/cavis-dnn-core/src/test/java/org/deeplearning4j/nn/rl/TestMultiModelGradientApplication.java @@ -22,7 +22,6 @@ package org.deeplearning4j.nn.rl; import org.deeplearning4j.BaseDL4JTest; import org.deeplearning4j.nn.conf.ComputationGraphConfiguration; -import org.deeplearning4j.nn.conf.MultiLayerConfiguration; import org.deeplearning4j.nn.conf.NeuralNetConfiguration; import org.deeplearning4j.nn.conf.layers.DenseLayer; import org.deeplearning4j.nn.conf.layers.OutputLayer; @@ -55,11 +54,11 @@ public class TestMultiModelGradientApplication extends BaseDL4JTest { for (boolean regularization : new boolean[] {false, true}) { for (IUpdater u : new IUpdater[] {new Sgd(0.1), new Nesterovs(0.1), new Adam(0.1)}) { - MultiLayerConfiguration conf = - new NeuralNetConfiguration.Builder().seed(12345).activation(Activation.TANH) + NeuralNetConfiguration conf = + NeuralNetConfiguration.builder().seed(12345).activation(Activation.TANH) .weightInit(WeightInit.XAVIER).updater(u) .l1(regularization ? 0.2 : 0.0) - .l2(regularization ? 0.3 : 0.0).list() + .l2(regularization ? 0.3 : 0.0) .layer(0, new DenseLayer.Builder().nIn(nIn).nOut(10).build()) .layer(1, new DenseLayer.Builder().nIn(10).nOut(10).build()).layer(2, new OutputLayer.Builder( @@ -78,7 +77,7 @@ public class TestMultiModelGradientApplication extends BaseDL4JTest { MultiLayerNetwork net2GradUpd = new MultiLayerNetwork(conf.clone()); net2GradUpd.init(); - assertEquals(net1GradCalc.params(), net2GradUpd.params()); + assertEquals(net1GradCalc.getModelParams(), net2GradUpd.getModelParams()); INDArray f = Nd4j.rand(minibatch, nIn); INDArray l = Nd4j.create(minibatch, nOut); @@ -110,28 +109,28 @@ public class TestMultiModelGradientApplication extends BaseDL4JTest { //Also: if we apply the gradient using a subi op, we should get the same final params as if we did a fit op // on the original network - net2GradUpd.params().subi(g.gradient()); + net2GradUpd.getModelParams().subi(g.gradient()); net1GradCalc.fit(f, l); - assertEquals(net1GradCalc.params(), net2GradUpd.params()); + assertEquals(net1GradCalc.getModelParams(), net2GradUpd.getModelParams()); //============================= if (!(u instanceof Sgd)) { net2GradUpd.getUpdater().getStateViewArray().assign(net1GradCalc.getUpdater().getStateViewArray()); } - assertEquals(net1GradCalc.params(), net2GradUpd.params()); + assertEquals(net1GradCalc.getModelParams(), net2GradUpd.getModelParams()); assertEquals(net1GradCalc.getUpdater().getStateViewArray(), net2GradUpd.getUpdater().getStateViewArray()); //Remove the next 2 lines: fails - as net 1 is 1 iteration ahead - net1GradCalc.getLayerWiseConfigurations().setIterationCount(0); - net2GradUpd.getLayerWiseConfigurations().setIterationCount(0); + net1GradCalc.getNetConfiguration().setIterationCount(0); + net2GradUpd.getNetConfiguration().setIterationCount(0); for (int i = 0; i < 100; i++) { net1GradCalc.fit(f, l); net2GradUpd.fit(f, l); - assertEquals(net1GradCalc.params(), net2GradUpd.params()); + assertEquals(net1GradCalc.getModelParams(), net2GradUpd.getModelParams()); } } } @@ -148,7 +147,7 @@ public class TestMultiModelGradientApplication extends BaseDL4JTest { for (IUpdater u : new IUpdater[] {new Sgd(0.1), new Adam(0.1)}) { ComputationGraphConfiguration conf = - new NeuralNetConfiguration.Builder().seed(12345).activation(Activation.TANH) + NeuralNetConfiguration.builder().seed(12345).activation(Activation.TANH) .weightInit(WeightInit.XAVIER).updater(u) .l1(regularization ? 0.2 : 0.0) .l2(regularization ? 0.3 : 0.0).graphBuilder().addInputs("in") @@ -170,7 +169,7 @@ public class TestMultiModelGradientApplication extends BaseDL4JTest { ComputationGraph net2GradUpd = new ComputationGraph(conf.clone()); net2GradUpd.init(); - assertEquals(net1GradCalc.params(), net2GradUpd.params()); + assertEquals(net1GradCalc.getModelParams(), net2GradUpd.getModelParams()); INDArray f = Nd4j.rand(minibatch, nIn); INDArray l = Nd4j.create(minibatch, nOut); @@ -202,28 +201,28 @@ public class TestMultiModelGradientApplication extends BaseDL4JTest { //Also: if we apply the gradient using a subi op, we should get the same final params as if we did a fit op // on the original network - net2GradUpd.params().subi(g.gradient()); + net2GradUpd.getModelParams().subi(g.gradient()); net1GradCalc.fit(new INDArray[] {f}, new INDArray[] {l}); - assertEquals(net1GradCalc.params(), net2GradUpd.params()); + assertEquals(net1GradCalc.getModelParams(), net2GradUpd.getModelParams()); //============================= if (!(u instanceof Sgd)) { net2GradUpd.getUpdater().getStateViewArray().assign(net1GradCalc.getUpdater().getStateViewArray()); } - assertEquals(net1GradCalc.params(), net2GradUpd.params()); + assertEquals(net1GradCalc.getModelParams(), net2GradUpd.getModelParams()); assertEquals(net1GradCalc.getUpdater().getStateViewArray(), net2GradUpd.getUpdater().getStateViewArray()); //Remove the next 2 lines: fails - as net 1 is 1 iteration ahead - net1GradCalc.getConfiguration().setIterationCount(0); - net2GradUpd.getConfiguration().setIterationCount(0); + net1GradCalc.getComputationGraphConfiguration().setIterationCount(0); + net2GradUpd.getComputationGraphConfiguration().setIterationCount(0); for (int i = 0; i < 100; i++) { net1GradCalc.fit(new INDArray[] {f}, new INDArray[] {l}); net2GradUpd.fit(new INDArray[] {f}, new INDArray[] {l}); - assertEquals(net1GradCalc.params(), net2GradUpd.params()); + assertEquals(net1GradCalc.getModelParams(), net2GradUpd.getModelParams()); } } } diff --git a/cavis-dnn/cavis-dnn-core/src/test/java/org/deeplearning4j/nn/transferlearning/TestFrozenLayers.java b/cavis-dnn/cavis-dnn-core/src/test/java/org/deeplearning4j/nn/transferlearning/TestFrozenLayers.java index ecda6b48a..5c5fb204e 100644 --- a/cavis-dnn/cavis-dnn-core/src/test/java/org/deeplearning4j/nn/transferlearning/TestFrozenLayers.java +++ b/cavis-dnn/cavis-dnn-core/src/test/java/org/deeplearning4j/nn/transferlearning/TestFrozenLayers.java @@ -23,7 +23,6 @@ package org.deeplearning4j.nn.transferlearning; import org.deeplearning4j.BaseDL4JTest; import org.deeplearning4j.nn.conf.ComputationGraphConfiguration; import org.deeplearning4j.nn.conf.ConvolutionMode; -import org.deeplearning4j.nn.conf.MultiLayerConfiguration; import org.deeplearning4j.nn.conf.NeuralNetConfiguration; import org.deeplearning4j.nn.conf.inputs.InputType; import org.deeplearning4j.nn.conf.layers.ConvolutionLayer; @@ -76,7 +75,7 @@ public class TestFrozenLayers extends BaseDL4JTest { } Map paramsBefore = new LinkedHashMap<>(); - for(Map.Entry entry : transfer.paramTable().entrySet()){ + for(Map.Entry entry : transfer.getParamTable().entrySet()){ paramsBefore.put(entry.getKey(), entry.getValue().dup()); } @@ -86,7 +85,7 @@ public class TestFrozenLayers extends BaseDL4JTest { transfer.fit(f,l); } - for(Map.Entry entry : transfer.paramTable().entrySet()){ + for(Map.Entry entry : transfer.getParamTable().entrySet()){ String s = msg + " - " + entry.getKey(); if(entry.getKey().startsWith("5_")){ //Non-frozen layer @@ -128,7 +127,7 @@ public class TestFrozenLayers extends BaseDL4JTest { } Map paramsBefore = new LinkedHashMap<>(); - for(Map.Entry entry : transfer.paramTable().entrySet()){ + for(Map.Entry entry : transfer.getParamTable().entrySet()){ paramsBefore.put(entry.getKey(), entry.getValue().dup()); } @@ -138,7 +137,7 @@ public class TestFrozenLayers extends BaseDL4JTest { transfer.fit(new INDArray[]{f},new INDArray[]{l}); } - for(Map.Entry entry : transfer.paramTable().entrySet()){ + for(Map.Entry entry : transfer.getParamTable().entrySet()){ String s = msg + " - " + entry.getKey(); if(entry.getKey().startsWith("5_")){ //Non-frozen layer @@ -152,7 +151,7 @@ public class TestFrozenLayers extends BaseDL4JTest { } public static MultiLayerNetwork getOriginalNet(int seed){ - MultiLayerConfiguration conf = new NeuralNetConfiguration.Builder() + NeuralNetConfiguration conf = NeuralNetConfiguration.builder() .seed(seed) .weightInit(WeightInit.XAVIER) .activation(Activation.TANH) @@ -165,7 +164,7 @@ public class TestFrozenLayers extends BaseDL4JTest { .layer(new DenseLayer.Builder().nOut(64).build()) .layer(new DenseLayer.Builder().nIn(64).nOut(64).build()) .layer(new OutputLayer.Builder().nIn(64).nOut(10).lossFunction(LossFunctions.LossFunction.MSE).build()) - .setInputType(InputType.convolutionalFlat(28,28,1)) + .inputType(InputType.convolutionalFlat(28,28,1)) .build(); @@ -175,7 +174,7 @@ public class TestFrozenLayers extends BaseDL4JTest { } public static ComputationGraph getOriginalGraph(int seed){ - ComputationGraphConfiguration conf = new NeuralNetConfiguration.Builder() + ComputationGraphConfiguration conf = NeuralNetConfiguration.builder() .seed(seed) .weightInit(WeightInit.XAVIER) .activation(Activation.TANH) diff --git a/cavis-dnn/cavis-dnn-core/src/test/java/org/deeplearning4j/nn/transferlearning/TestTransferLearningModelSerializer.java b/cavis-dnn/cavis-dnn-core/src/test/java/org/deeplearning4j/nn/transferlearning/TestTransferLearningModelSerializer.java index ad92a7c47..6d6ce41c0 100644 --- a/cavis-dnn/cavis-dnn-core/src/test/java/org/deeplearning4j/nn/transferlearning/TestTransferLearningModelSerializer.java +++ b/cavis-dnn/cavis-dnn-core/src/test/java/org/deeplearning4j/nn/transferlearning/TestTransferLearningModelSerializer.java @@ -23,12 +23,11 @@ package org.deeplearning4j.nn.transferlearning; import org.deeplearning4j.BaseDL4JTest; import org.deeplearning4j.TestUtils; import org.deeplearning4j.nn.conf.ComputationGraphConfiguration; -import org.deeplearning4j.nn.conf.MultiLayerConfiguration; import org.deeplearning4j.nn.conf.NeuralNetConfiguration; import org.deeplearning4j.nn.conf.graph.GraphVertex; import org.deeplearning4j.nn.conf.graph.LayerVertex; import org.deeplearning4j.nn.conf.layers.DenseLayer; -import org.deeplearning4j.nn.conf.layers.Layer; +import org.deeplearning4j.nn.conf.layers.LayerConfiguration; import org.deeplearning4j.nn.graph.ComputationGraph; import org.deeplearning4j.nn.layers.FrozenLayer; import org.deeplearning4j.nn.multilayer.MultiLayerNetwork; @@ -53,7 +52,7 @@ public class TestTransferLearningModelSerializer extends BaseDL4JTest { int nIn = 6; int nOut = 3; - MultiLayerConfiguration origConf = new NeuralNetConfiguration.Builder().updater(new Sgd(0.1)) + NeuralNetConfiguration origConf = NeuralNetConfiguration.builder().updater(new Sgd(0.1)) .activation(Activation.TANH).dropOut(0.5).list() .layer(0, new DenseLayer.Builder().nIn(nIn).nOut(5).build()) .layer(1, new DenseLayer.Builder().nIn(5).nOut(4).build()) @@ -71,9 +70,9 @@ public class TestTransferLearningModelSerializer extends BaseDL4JTest { assertTrue(withFrozen.getLayer(0) instanceof FrozenLayer); assertTrue(withFrozen.getLayer(1) instanceof FrozenLayer); - assertTrue(withFrozen.getLayerWiseConfigurations().getConf(0) + assertTrue(withFrozen.getNetConfiguration().getConf(0) .getLayer() instanceof org.deeplearning4j.nn.conf.layers.misc.FrozenLayer); - assertTrue(withFrozen.getLayerWiseConfigurations().getConf(1) + assertTrue(withFrozen.getNetConfiguration().getConf(1) .getLayer() instanceof org.deeplearning4j.nn.conf.layers.misc.FrozenLayer); MultiLayerNetwork restored = TestUtils.testModelSerialization(withFrozen); @@ -102,7 +101,7 @@ public class TestTransferLearningModelSerializer extends BaseDL4JTest { int nIn = 6; int nOut = 3; - ComputationGraphConfiguration origConf = new NeuralNetConfiguration.Builder().activation(Activation.TANH).graphBuilder().addInputs("in") + ComputationGraphConfiguration origConf = NeuralNetConfiguration.builder().activation(Activation.TANH).graphBuilder().addInputs("in") .addLayer("0", new DenseLayer.Builder().nIn(nIn).nOut(5).build(), "in") .addLayer("1", new DenseLayer.Builder().nIn(5).nOut(4).build(), "0") .addLayer("2", new DenseLayer.Builder().nIn(4).nOut(3).build(), "1") @@ -120,9 +119,9 @@ public class TestTransferLearningModelSerializer extends BaseDL4JTest { assertTrue(withFrozen.getLayer(0) instanceof FrozenLayer); assertTrue(withFrozen.getLayer(1) instanceof FrozenLayer); - Map m = withFrozen.getConfiguration().getVertices(); - Layer l0 = ((LayerVertex) m.get("0")).getLayerConf().getLayer(); - Layer l1 = ((LayerVertex) m.get("1")).getLayerConf().getLayer(); + Map m = withFrozen.getComputationGraphConfiguration().getVertices(); + LayerConfiguration l0 = ((LayerVertex) m.get("0")).getLayerConfiguration(); + LayerConfiguration l1 = ((LayerVertex) m.get("1")).getLayerConfiguration(); assertTrue(l0 instanceof org.deeplearning4j.nn.conf.layers.misc.FrozenLayer); assertTrue(l1 instanceof org.deeplearning4j.nn.conf.layers.misc.FrozenLayer); diff --git a/cavis-dnn/cavis-dnn-core/src/test/java/org/deeplearning4j/nn/transferlearning/TransferLearningCompGraphTest.java b/cavis-dnn/cavis-dnn-core/src/test/java/org/deeplearning4j/nn/transferlearning/TransferLearningCompGraphTest.java index a81d96838..954e8ed18 100644 --- a/cavis-dnn/cavis-dnn-core/src/test/java/org/deeplearning4j/nn/transferlearning/TransferLearningCompGraphTest.java +++ b/cavis-dnn/cavis-dnn-core/src/test/java/org/deeplearning4j/nn/transferlearning/TransferLearningCompGraphTest.java @@ -63,7 +63,7 @@ public class TransferLearningCompGraphTest extends BaseDL4JTest { long rng = 12345L; DataSet randomData = new DataSet(Nd4j.rand(10, 4), Nd4j.rand(10, 3)); //original conf - ComputationGraphConfiguration confToChange = new NeuralNetConfiguration.Builder().seed(rng) + ComputationGraphConfiguration confToChange = NeuralNetConfiguration.builder().seed(rng) .optimizationAlgo(OptimizationAlgorithm.LBFGS).updater(new Nesterovs(0.01, 0.99)) .graphBuilder().addInputs("layer0In").setInputTypes(InputType.feedForward(4)) .addLayer("layer0", new DenseLayer.Builder().nIn(4).nOut(3).build(), "layer0In") @@ -76,7 +76,7 @@ public class TransferLearningCompGraphTest extends BaseDL4JTest { .setOutputs("layer1").build(); //conf with learning parameters changed - ComputationGraphConfiguration expectedConf = new NeuralNetConfiguration.Builder().seed(rng) + ComputationGraphConfiguration expectedConf = NeuralNetConfiguration.builder().seed(rng) .updater(new RmsProp(0.2)) .graphBuilder().addInputs("layer0In") .setInputTypes(InputType.feedForward(4)) @@ -93,7 +93,7 @@ public class TransferLearningCompGraphTest extends BaseDL4JTest { ComputationGraph modelToFineTune = new ComputationGraph(expectedConf); modelToFineTune.init(); - modelToFineTune.setParams(expectedModel.params()); + modelToFineTune.setParams(expectedModel.getModelParams()); //model after applying changes with transfer learning ComputationGraph modelNow = new TransferLearning.GraphBuilder(modelToFineTune) @@ -102,20 +102,20 @@ public class TransferLearningCompGraphTest extends BaseDL4JTest { .build(); //Check json - assertEquals(expectedConf.toJson(), modelNow.getConfiguration().toJson()); + assertEquals(expectedConf.toJson(), modelNow.getComputationGraphConfiguration().toJson()); //Check params after fit modelNow.fit(randomData); expectedModel.fit(randomData); - assertEquals(modelNow.score(), expectedModel.score(), 1e-8); - assertEquals(modelNow.params(), expectedModel.params()); + assertEquals(modelNow.getScore(), expectedModel.getScore(), 1e-8); + assertEquals(modelNow.getModelParams(), expectedModel.getModelParams()); } @Test public void testNoutChanges() { DataSet randomData = new DataSet(Nd4j.rand(10, 4), Nd4j.rand(10, 2)); - NeuralNetConfiguration.Builder overallConf = new NeuralNetConfiguration.Builder().updater(new Sgd(0.1)) + NeuralNetConfiguration.NeuralNetConfigurationBuilder overallConf = NeuralNetConfiguration.builder().updater(new Sgd(0.1)) .activation(Activation.IDENTITY); FineTuneConfiguration fineTuneConfiguration = new FineTuneConfiguration.Builder().updater(new Sgd(0.1)) .activation(Activation.IDENTITY).build(); @@ -138,12 +138,12 @@ public class TransferLearningCompGraphTest extends BaseDL4JTest { //.setOutputs("layer3") .build(); - BaseLayer bl0 = ((BaseLayer) modelNow.getLayer("layer0").conf().getLayer()); - BaseLayer bl1 = ((BaseLayer) modelNow.getLayer("layer1").conf().getLayer()); - BaseLayer bl3 = ((BaseLayer) modelNow.getLayer("layer3").conf().getLayer()); - assertEquals(bl0.getWeightInitFn(), new WeightInitDistribution(new NormalDistribution(1, 1e-1))); - assertEquals(bl1.getWeightInitFn(), new WeightInitXavier()); - assertEquals(bl1.getWeightInitFn(), new WeightInitXavier()); + BaseLayerConfiguration bl0 = ((BaseLayerConfiguration) modelNow.getLayer("layer0").getLayerConfiguration()); + BaseLayerConfiguration bl1 = ((BaseLayerConfiguration) modelNow.getLayer("layer1").getLayerConfiguration()); + BaseLayerConfiguration bl3 = ((BaseLayerConfiguration) modelNow.getLayer("layer3").getLayerConfiguration()); + assertEquals(bl0.getWeightInit(), new WeightInitDistribution(new NormalDistribution(1, 1e-1))); + assertEquals(bl1.getWeightInit(), new WeightInitXavier()); + assertEquals(bl1.getWeightInit(), new WeightInitXavier()); ComputationGraph modelExpectedArch = new ComputationGraph(overallConf.graphBuilder().addInputs("layer0In") .addLayer("layer0", new DenseLayer.Builder().nIn(4).nOut(3).build(), "layer0In") @@ -160,29 +160,29 @@ public class TransferLearningCompGraphTest extends BaseDL4JTest { modelExpectedArch.init(); //modelNow should have the same architecture as modelExpectedArch - assertArrayEquals(modelExpectedArch.params().shape(), modelNow.params().shape()); - assertArrayEquals(modelExpectedArch.getLayer("layer0").params().shape(), - modelNow.getLayer("layer0").params().shape()); - assertArrayEquals(modelExpectedArch.getLayer("layer1").params().shape(), - modelNow.getLayer("layer1").params().shape()); - assertArrayEquals(modelExpectedArch.getLayer("layer2").params().shape(), - modelNow.getLayer("layer2").params().shape()); - assertArrayEquals(modelExpectedArch.getLayer("layer3").params().shape(), - modelNow.getLayer("layer3").params().shape()); + assertArrayEquals(modelExpectedArch.getModelParams().shape(), modelNow.getModelParams().shape()); + assertArrayEquals(modelExpectedArch.getLayer("layer0").getParams().shape(), + modelNow.getLayer("layer0").getParams().shape()); + assertArrayEquals(modelExpectedArch.getLayer("layer1").getParams().shape(), + modelNow.getLayer("layer1").getParams().shape()); + assertArrayEquals(modelExpectedArch.getLayer("layer2").getParams().shape(), + modelNow.getLayer("layer2").getParams().shape()); + assertArrayEquals(modelExpectedArch.getLayer("layer3").getParams().shape(), + modelNow.getLayer("layer3").getParams().shape()); - modelNow.setParams(modelExpectedArch.params()); + modelNow.setParams(modelExpectedArch.getModelParams()); //fit should give the same results modelExpectedArch.fit(randomData); modelNow.fit(randomData); - assertEquals(modelExpectedArch.score(), modelNow.score(), 1e-8); - assertEquals(modelExpectedArch.params(), modelNow.params()); + assertEquals(modelExpectedArch.getScore(), modelNow.getScore(), 1e-8); + assertEquals(modelExpectedArch.getModelParams(), modelNow.getModelParams()); } @Test public void testRemoveAndAdd() { DataSet randomData = new DataSet(Nd4j.rand(10, 4), Nd4j.rand(10, 3)); - NeuralNetConfiguration.Builder overallConf = new NeuralNetConfiguration.Builder().updater(new Sgd(0.1)) + NeuralNetConfiguration.NeuralNetConfigurationBuilder overallConf = NeuralNetConfiguration.builder().updater(new Sgd(0.1)) .activation(Activation.IDENTITY); FineTuneConfiguration fineTuneConfiguration = new FineTuneConfiguration.Builder().updater(new Sgd(0.1)) .activation(Activation.IDENTITY).build(); @@ -226,22 +226,22 @@ public class TransferLearningCompGraphTest extends BaseDL4JTest { modelExpectedArch.init(); //modelNow should have the same architecture as modelExpectedArch - assertArrayEquals(modelExpectedArch.params().shape(), modelNow.params().shape()); - assertArrayEquals(modelExpectedArch.getLayer("layer0").params().shape(), - modelNow.getLayer("layer0").params().shape()); - assertArrayEquals(modelExpectedArch.getLayer("layer1").params().shape(), - modelNow.getLayer("layer1").params().shape()); - assertArrayEquals(modelExpectedArch.getLayer("layer2").params().shape(), - modelNow.getLayer("layer2").params().shape()); - assertArrayEquals(modelExpectedArch.getLayer("layer3").params().shape(), - modelNow.getLayer("layer3").params().shape()); + assertArrayEquals(modelExpectedArch.getModelParams().shape(), modelNow.getModelParams().shape()); + assertArrayEquals(modelExpectedArch.getLayer("layer0").getParams().shape(), + modelNow.getLayer("layer0").getParams().shape()); + assertArrayEquals(modelExpectedArch.getLayer("layer1").getParams().shape(), + modelNow.getLayer("layer1").getParams().shape()); + assertArrayEquals(modelExpectedArch.getLayer("layer2").getParams().shape(), + modelNow.getLayer("layer2").getParams().shape()); + assertArrayEquals(modelExpectedArch.getLayer("layer3").getParams().shape(), + modelNow.getLayer("layer3").getParams().shape()); - modelNow.setParams(modelExpectedArch.params()); + modelNow.setParams(modelExpectedArch.getModelParams()); //fit should give the same results modelExpectedArch.fit(randomData); modelNow.fit(randomData); - assertEquals(modelExpectedArch.score(), modelNow.score(), 1e-8); - assertEquals(modelExpectedArch.params(), modelNow.params()); + assertEquals(modelExpectedArch.getScore(), modelNow.getScore(), 1e-8); + assertEquals(modelExpectedArch.getModelParams(), modelNow.getModelParams()); } @Test @@ -250,7 +250,7 @@ public class TransferLearningCompGraphTest extends BaseDL4JTest { DataSet randomData = new DataSet(Nd4j.rand(10, 28 * 28 * 3).reshape(10, 3, 28, 28), Nd4j.rand(10, 10)); ComputationGraph modelToFineTune = new ComputationGraph( - new NeuralNetConfiguration.Builder().seed(123) + NeuralNetConfiguration.builder().seed(123) .weightInit(WeightInit.XAVIER) .updater(new Nesterovs(0.01, 0.9)).graphBuilder() .addInputs("layer0In") @@ -303,7 +303,7 @@ public class TransferLearningCompGraphTest extends BaseDL4JTest { modelToFineTune.init(); //this will override the learning configuration set in the model - NeuralNetConfiguration.Builder overallConf = new NeuralNetConfiguration.Builder().seed(456).updater(new Sgd(0.001)); + NeuralNetConfiguration.NeuralNetConfigurationBuilder overallConf = NeuralNetConfiguration.builder().seed(456).updater(new Sgd(0.001)); FineTuneConfiguration fineTuneConfiguration = new FineTuneConfiguration.Builder().seed(456).updater(new Sgd(0.001)) .build(); @@ -382,16 +382,16 @@ public class TransferLearningCompGraphTest extends BaseDL4JTest { modelExpectedArch.getVertex("layer0").setLayerAsFrozen(); modelExpectedArch.getVertex("layer1").setLayerAsFrozen(); - assertEquals(modelExpectedArch.getConfiguration().toJson(), modelNow.getConfiguration().toJson()); + assertEquals(modelExpectedArch.getComputationGraphConfiguration().toJson(), modelNow.getComputationGraphConfiguration().toJson()); - modelNow.setParams(modelExpectedArch.params()); + modelNow.setParams(modelExpectedArch.getModelParams()); int i = 0; while (i < 5) { modelExpectedArch.fit(randomData); modelNow.fit(randomData); i++; } - assertEquals(modelExpectedArch.params(), modelNow.params()); + assertEquals(modelExpectedArch.getModelParams(), modelNow.getModelParams()); } @@ -399,7 +399,7 @@ public class TransferLearningCompGraphTest extends BaseDL4JTest { @Test public void testTransferGlobalPool() { - ComputationGraphConfiguration conf = new NeuralNetConfiguration.Builder().seed(12345).updater(new Adam(0.1)) + ComputationGraphConfiguration conf = NeuralNetConfiguration.builder().seed(12345).updater(new Adam(0.1)) .weightInit(WeightInit.XAVIER) .graphBuilder().addInputs("in") .addLayer("blstm1",new GravesBidirectionalLSTM.Builder().nIn(10).nOut(10) @@ -425,7 +425,7 @@ public class TransferLearningCompGraphTest extends BaseDL4JTest { .nIn(10).nOut(5).build(), "dense") .build(); - ComputationGraphConfiguration confExpected = new NeuralNetConfiguration.Builder().seed(12345) + ComputationGraphConfiguration confExpected = NeuralNetConfiguration.builder().seed(12345) .updater(new Sgd(0.01)) .weightInit(WeightInit.XAVIER) .graphBuilder().addInputs("in") @@ -445,14 +445,14 @@ public class TransferLearningCompGraphTest extends BaseDL4JTest { // assertEquals(confExpected, graph.getConfiguration()); - assertEquals(confExpected.toJson(), graph.getConfiguration().toJson()); + assertEquals(confExpected.toJson(), graph.getComputationGraphConfiguration().toJson()); } @Test public void testObjectOverrides(){ //https://github.com/deeplearning4j/deeplearning4j/issues/4368 - ComputationGraphConfiguration conf = new NeuralNetConfiguration.Builder() + ComputationGraphConfiguration conf = NeuralNetConfiguration.builder() .dropOut(0.5) .weightNoise(new DropConnect(0.5)) .l2(0.5) @@ -477,7 +477,7 @@ public class TransferLearningCompGraphTest extends BaseDL4JTest { .fineTuneConfiguration(ftc) .build(); - DenseLayer l = (DenseLayer) transfer.getLayer(0).conf().getLayer(); + DenseLayer l = (DenseLayer) transfer.getLayer(0).getLayerConfiguration(); assertNull(l.getIDropout()); assertNull(l.getWeightNoise()); @@ -494,7 +494,7 @@ public class TransferLearningCompGraphTest extends BaseDL4JTest { final String firstConv = "firstConv"; final String secondConv = "secondConv"; final INDArray input = Nd4j.create(6,6,6,6); - final ComputationGraph graph = new ComputationGraph(new NeuralNetConfiguration.Builder() + final ComputationGraph graph = new ComputationGraph(NeuralNetConfiguration.builder() .weightInit(new ConstantDistribution(666)) .graphBuilder() .addInputs(inputName) @@ -541,7 +541,7 @@ public class TransferLearningCompGraphTest extends BaseDL4JTest { final String afterPoolName = "afterPool"; final String outputName = "output"; final INDArray input = Nd4j.create(new long[] {1, 2, 4, 4}); - final ComputationGraph graph = new ComputationGraph(new NeuralNetConfiguration.Builder() + final ComputationGraph graph = new ComputationGraph(NeuralNetConfiguration.builder() .graphBuilder() .addInputs(inputName) .setOutputs(outputName) @@ -578,7 +578,7 @@ public class TransferLearningCompGraphTest extends BaseDL4JTest { @Test public void testTransferLearningSameDiffLayersGraph(){ - ComputationGraphConfiguration conf = new NeuralNetConfiguration.Builder() + ComputationGraphConfiguration conf = NeuralNetConfiguration.builder() .graphBuilder() .addInputs("in") @@ -604,13 +604,13 @@ public class TransferLearningCompGraphTest extends BaseDL4JTest { cg2.output(arr); - Map m = new HashMap<>(cg.paramTable()); + Map m = new HashMap<>(cg.getParamTable()); m.put("newOut_W", m.remove("out_W")); m.put("newOut_b", m.remove("out_b")); cg2.setParamTable(m); - Map p1 = cg.paramTable(); - Map p2 = cg2.paramTable(); + Map p1 = cg.getParamTable(); + Map p2 = cg2.getParamTable(); for(String s : p1.keySet()){ INDArray i1 = p1.get(s); INDArray i2 = p2.get(s.replaceAll("out", "newOut")); @@ -624,7 +624,7 @@ public class TransferLearningCompGraphTest extends BaseDL4JTest { @Test public void testTransferLearningSameDiffLayersGraphVertex(){ - ComputationGraphConfiguration conf = new NeuralNetConfiguration.Builder() + ComputationGraphConfiguration conf = NeuralNetConfiguration.builder() .graphBuilder() .addInputs("in") @@ -650,13 +650,13 @@ public class TransferLearningCompGraphTest extends BaseDL4JTest { cg2.output(arr); - Map m = new HashMap<>(cg.paramTable()); + Map m = new HashMap<>(cg.getParamTable()); m.put("newOut_W", m.remove("out_W")); m.put("newOut_b", m.remove("out_b")); cg2.setParamTable(m); - Map p1 = cg.paramTable(); - Map p2 = cg2.paramTable(); + Map p1 = cg.getParamTable(); + Map p2 = cg2.getParamTable(); for(String s : p1.keySet()){ INDArray i1 = p1.get(s); INDArray i2 = p2.get(s.replaceAll("out", "newOut")); diff --git a/cavis-dnn/cavis-dnn-core/src/test/java/org/deeplearning4j/nn/transferlearning/TransferLearningComplex.java b/cavis-dnn/cavis-dnn-core/src/test/java/org/deeplearning4j/nn/transferlearning/TransferLearningComplex.java index d30227339..cee6e2f90 100644 --- a/cavis-dnn/cavis-dnn-core/src/test/java/org/deeplearning4j/nn/transferlearning/TransferLearningComplex.java +++ b/cavis-dnn/cavis-dnn-core/src/test/java/org/deeplearning4j/nn/transferlearning/TransferLearningComplex.java @@ -28,7 +28,7 @@ import org.deeplearning4j.nn.conf.ComputationGraphConfiguration; import org.deeplearning4j.nn.conf.NeuralNetConfiguration; import org.deeplearning4j.nn.conf.graph.MergeVertex; import org.deeplearning4j.nn.conf.inputs.InputType; -import org.deeplearning4j.nn.conf.layers.BaseLayer; +import org.deeplearning4j.nn.conf.layers.BaseLayerConfiguration; import org.deeplearning4j.nn.conf.layers.DenseLayer; import org.deeplearning4j.nn.conf.layers.OutputLayer; import org.deeplearning4j.nn.graph.ComputationGraph; @@ -55,7 +55,7 @@ public class TransferLearningComplex extends BaseDL4JTest { // (b) Test global override (should be selective) - ComputationGraphConfiguration conf = new NeuralNetConfiguration.Builder().updater(new Adam(1e-4)) + ComputationGraphConfiguration conf = NeuralNetConfiguration.builder().updater(new Adam(1e-4)) .activation(Activation.LEAKYRELU).graphBuilder().addInputs("in1", "in2") .addLayer("A", new DenseLayer.Builder().nIn(10).nOut(9).build(), "in1") .addLayer("B", new DenseLayer.Builder().nIn(9).nOut(8).build(), "A") @@ -87,9 +87,9 @@ public class TransferLearningComplex extends BaseDL4JTest { Layer[] layers = graph2.getLayers(); for (Layer l : layers) { - String name = l.conf().getLayer().getLayerName(); + String name = l.getLayerConfiguration().getLayerName(); log.info(name + "\t frozen: " + (l instanceof FrozenLayer)); - if ("C".equals(l.conf().getLayer().getLayerName())) { + if ("C".equals(l.getLayerConfiguration().getLayerName())) { //Only C should be frozen in this config cFound = true; assertTrue(l instanceof FrozenLayer, name); @@ -98,7 +98,7 @@ public class TransferLearningComplex extends BaseDL4JTest { } //Also check config: - BaseLayer bl = ((BaseLayer) l.conf().getLayer()); + BaseLayerConfiguration bl = ((BaseLayerConfiguration) l.getLayerConfiguration()); assertEquals(new Adam(2e-2), bl.getIUpdater()); assertEquals(Activation.LEAKYRELU.getActivationFunction(), bl.getActivationFn()); } @@ -109,7 +109,7 @@ public class TransferLearningComplex extends BaseDL4JTest { @Test public void testSimplerMergeBackProp() { - NeuralNetConfiguration.Builder overallConf = new NeuralNetConfiguration.Builder().updater(new Sgd(0.9)) + NeuralNetConfiguration.NeuralNetConfigurationBuilder overallConf = NeuralNetConfiguration.builder().updater(new Sgd(0.9)) .activation(Activation.IDENTITY) .optimizationAlgo(OptimizationAlgorithm.STOCHASTIC_GRADIENT_DESCENT); @@ -153,8 +153,8 @@ public class TransferLearningComplex extends BaseDL4JTest { .setOutputs("outRight").build(); ComputationGraph modelOther = new ComputationGraph(otherConf); modelOther.init(); - modelOther.getLayer("denseRight0").setParams(modelToTune.getLayer("denseRight0").params()); - modelOther.getLayer("outRight").setParams(modelToTune.getLayer("outRight").params()); + modelOther.getLayer("denseRight0").setParams(modelToTune.getLayer("denseRight0").getParams()); + modelOther.getLayer("outRight").setParams(modelToTune.getLayer("outRight").getParams()); modelToTune.getVertex("denseCentre0").setLayerAsFrozen(); ComputationGraph modelNow = @@ -178,11 +178,11 @@ public class TransferLearningComplex extends BaseDL4JTest { assertEquals(otherRandData.getFeatures(0), modelToTune.feedForward(randData.getFeatures(), false).get("denseCentre0")); - assertEquals(modelOther.getLayer("denseRight0").params(), modelNow.getLayer("denseRight0").params()); - assertEquals(modelOther.getLayer("denseRight0").params(), modelToTune.getLayer("denseRight0").params()); + assertEquals(modelOther.getLayer("denseRight0").getParams(), modelNow.getLayer("denseRight0").getParams()); + assertEquals(modelOther.getLayer("denseRight0").getParams(), modelToTune.getLayer("denseRight0").getParams()); - assertEquals(modelOther.getLayer("outRight").params(), modelNow.getLayer("outRight").params()); - assertEquals(modelOther.getLayer("outRight").params(), modelToTune.getLayer("outRight").params()); + assertEquals(modelOther.getLayer("outRight").getParams(), modelNow.getLayer("outRight").getParams()); + assertEquals(modelOther.getLayer("outRight").getParams(), modelToTune.getLayer("outRight").getParams()); n++; } @@ -191,7 +191,7 @@ public class TransferLearningComplex extends BaseDL4JTest { @Test public void testLessSimpleMergeBackProp() { - NeuralNetConfiguration.Builder overallConf = new NeuralNetConfiguration.Builder().updater(new Sgd(0.9)) + NeuralNetConfiguration.NeuralNetConfigurationBuilder overallConf = NeuralNetConfiguration.builder().updater(new Sgd(0.9)) .activation(Activation.IDENTITY); /* @@ -236,11 +236,11 @@ public class TransferLearningComplex extends BaseDL4JTest { assertEquals(otherRandData.getFeatures(0), modelToTune.feedForward(randData.getFeatures(), false).get("denseCentre0")); - assertEquals(modelToTune.getLayer("denseRight0").params(), modelNow.getLayer("denseRight0").params()); + assertEquals(modelToTune.getLayer("denseRight0").getParams(), modelNow.getLayer("denseRight0").getParams()); - assertEquals(modelToTune.getLayer("outRight").params(), modelNow.getLayer("outRight").params()); + assertEquals(modelToTune.getLayer("outRight").getParams(), modelNow.getLayer("outRight").getParams()); - assertEquals(modelToTune.getLayer("outCentre").params(), modelNow.getLayer("outCentre").params()); + assertEquals(modelToTune.getLayer("outCentre").getParams(), modelNow.getLayer("outCentre").getParams()); n++; } @@ -248,7 +248,7 @@ public class TransferLearningComplex extends BaseDL4JTest { @Test public void testAddOutput() { - NeuralNetConfiguration.Builder overallConf = new NeuralNetConfiguration.Builder().updater(new Sgd(0.9)) + NeuralNetConfiguration.NeuralNetConfigurationBuilder overallConf = NeuralNetConfiguration.builder().updater(new Sgd(0.9)) .activation(Activation.IDENTITY); ComputationGraphConfiguration conf = overallConf.graphBuilder().addInputs("inCentre", "inRight") diff --git a/cavis-dnn/cavis-dnn-core/src/test/java/org/deeplearning4j/nn/transferlearning/TransferLearningHelperTest.java b/cavis-dnn/cavis-dnn-core/src/test/java/org/deeplearning4j/nn/transferlearning/TransferLearningHelperTest.java index 0e78a3d6c..48963619b 100644 --- a/cavis-dnn/cavis-dnn-core/src/test/java/org/deeplearning4j/nn/transferlearning/TransferLearningHelperTest.java +++ b/cavis-dnn/cavis-dnn-core/src/test/java/org/deeplearning4j/nn/transferlearning/TransferLearningHelperTest.java @@ -28,6 +28,7 @@ import org.deeplearning4j.nn.conf.NeuralNetConfiguration; import org.deeplearning4j.nn.conf.graph.MergeVertex; import org.deeplearning4j.nn.conf.graph.SubsetVertex; import org.deeplearning4j.nn.conf.layers.DenseLayer; +import org.deeplearning4j.nn.conf.layers.DenseLayer.Builder; import org.deeplearning4j.nn.conf.layers.OutputLayer; import org.deeplearning4j.nn.graph.ComputationGraph; import org.deeplearning4j.nn.multilayer.MultiLayerNetwork; @@ -41,6 +42,7 @@ import org.nd4j.linalg.learning.config.Sgd; import org.nd4j.linalg.lossfunctions.LossFunctions; import java.util.List; +import org.nd4j.linalg.lossfunctions.LossFunctions.LossFunction; import static org.junit.jupiter.api.Assertions.assertEquals; @@ -50,7 +52,7 @@ public class TransferLearningHelperTest extends BaseDL4JTest { @Test public void tesUnfrozenSubset() { - NeuralNetConfiguration.Builder overallConf = new NeuralNetConfiguration.Builder().seed(124) + NeuralNetConfiguration.NeuralNetConfigurationBuilder overallConf = NeuralNetConfiguration.builder().seed(124) .activation(Activation.IDENTITY) .optimizationAlgo(OptimizationAlgorithm.STOCHASTIC_GRADIENT_DESCENT).updater(new Sgd(0.1)); /* @@ -126,13 +128,13 @@ public class TransferLearningHelperTest extends BaseDL4JTest { .setOutputs("outLeft", "outCentre", "outRight").build(); ComputationGraph expectedModel = new ComputationGraph(expectedConf); expectedModel.init(); - assertEquals(expectedConf.toJson(), modelSubset.getConfiguration().toJson()); + assertEquals(expectedConf.toJson(), modelSubset.getComputationGraphConfiguration().toJson()); } @Test public void testFitUnFrozen() { - NeuralNetConfiguration.Builder overallConf = new NeuralNetConfiguration.Builder().updater(new Sgd(0.9)).seed(124) + NeuralNetConfiguration.NeuralNetConfigurationBuilder overallConf = NeuralNetConfiguration.builder().updater(new Sgd(0.9)).seed(124) .activation(Activation.IDENTITY) .optimizationAlgo(OptimizationAlgorithm.STOCHASTIC_GRADIENT_DESCENT); @@ -176,25 +178,25 @@ public class TransferLearningHelperTest extends BaseDL4JTest { TransferLearningHelper helper = new TransferLearningHelper(modelToTune, "denseCentre2"); MultiDataSet featurizedDataSet = helper.featurize(origData); - assertEquals(modelIdentical.getLayer("denseRight0").params(), modelToTune.getLayer("denseRight0").params()); + assertEquals(modelIdentical.getLayer("denseRight0").getParams(), modelToTune.getLayer("denseRight0").getParams()); modelIdentical.fit(origData); helper.fitFeaturized(featurizedDataSet); - assertEquals(modelIdentical.getLayer("denseCentre0").params(), modelToTune.getLayer("denseCentre0").params()); - assertEquals(modelIdentical.getLayer("denseCentre1").params(), modelToTune.getLayer("denseCentre1").params()); - assertEquals(modelIdentical.getLayer("denseCentre2").params(), modelToTune.getLayer("denseCentre2").params()); - assertEquals(modelIdentical.getLayer("denseCentre3").params(), modelToTune.getLayer("denseCentre3").params()); - assertEquals(modelIdentical.getLayer("outCentre").params(), modelToTune.getLayer("outCentre").params()); - assertEquals(modelIdentical.getLayer("denseRight").conf().toJson(), - modelToTune.getLayer("denseRight").conf().toJson()); - assertEquals(modelIdentical.getLayer("denseRight").params(), modelToTune.getLayer("denseRight").params()); - assertEquals(modelIdentical.getLayer("denseRight0").conf().toJson(), - modelToTune.getLayer("denseRight0").conf().toJson()); + assertEquals(modelIdentical.getLayer("denseCentre0").getParams(), modelToTune.getLayer("denseCentre0").getParams()); + assertEquals(modelIdentical.getLayer("denseCentre1").getParams(), modelToTune.getLayer("denseCentre1").getParams()); + assertEquals(modelIdentical.getLayer("denseCentre2").getParams(), modelToTune.getLayer("denseCentre2").getParams()); + assertEquals(modelIdentical.getLayer("denseCentre3").getParams(), modelToTune.getLayer("denseCentre3").getParams()); + assertEquals(modelIdentical.getLayer("outCentre").getParams(), modelToTune.getLayer("outCentre").getParams()); + assertEquals(modelIdentical.getLayer("denseRight").getNetConfiguration().toJson(), + modelToTune.getLayer("denseRight").getNetConfiguration().toJson()); + assertEquals(modelIdentical.getLayer("denseRight").getParams(), modelToTune.getLayer("denseRight").getParams()); + assertEquals(modelIdentical.getLayer("denseRight0").getNetConfiguration().toJson(), + modelToTune.getLayer("denseRight0").getNetConfiguration().toJson()); //assertEquals(modelIdentical.getLayer("denseRight0").params(),modelToTune.getLayer("denseRight0").params()); - assertEquals(modelIdentical.getLayer("denseRight1").params(), modelToTune.getLayer("denseRight1").params()); - assertEquals(modelIdentical.getLayer("outRight").params(), modelToTune.getLayer("outRight").params()); - assertEquals(modelIdentical.getLayer("denseLeft0").params(), modelToTune.getLayer("denseLeft0").params()); - assertEquals(modelIdentical.getLayer("outLeft").params(), modelToTune.getLayer("outLeft").params()); + assertEquals(modelIdentical.getLayer("denseRight1").getParams(), modelToTune.getLayer("denseRight1").getParams()); + assertEquals(modelIdentical.getLayer("outRight").getParams(), modelToTune.getLayer("outRight").getParams()); + assertEquals(modelIdentical.getLayer("denseLeft0").getParams(), modelToTune.getLayer("denseLeft0").getParams()); + assertEquals(modelIdentical.getLayer("outLeft").getParams(), modelToTune.getLayer("outLeft").getParams()); // log.info(modelIdentical.summary()); // log.info(helper.unfrozenGraph().summary()); @@ -206,18 +208,19 @@ public class TransferLearningHelperTest extends BaseDL4JTest { public void testMLN() { DataSet randomData = new DataSet(Nd4j.rand(10, 4), Nd4j.rand(10, 3)); - NeuralNetConfiguration.Builder overallConf = new NeuralNetConfiguration.Builder().updater(new Sgd(0.1)) + NeuralNetConfiguration.NeuralNetConfigurationBuilder overallConf = NeuralNetConfiguration.builder().updater(new Sgd(0.1)) .optimizationAlgo(OptimizationAlgorithm.STOCHASTIC_GRADIENT_DESCENT) .activation(Activation.IDENTITY); - MultiLayerNetwork modelToFineTune = new MultiLayerNetwork(overallConf.clone().list() - .layer(0, new DenseLayer.Builder().nIn(4).nOut(3).build()) - .layer(1, new DenseLayer.Builder().nIn(3).nOut(2).build()) - .layer(2, new DenseLayer.Builder().nIn(2).nOut(3).build()) - .layer(3, new org.deeplearning4j.nn.conf.layers.OutputLayer.Builder( - LossFunctions.LossFunction.MCXENT).activation(Activation.SOFTMAX).nIn(3).nOut(3) - .build()) - .build()); + MultiLayerNetwork modelToFineTune = new MultiLayerNetwork( + (NeuralNetConfiguration) overallConf.clone() + .layer(0, new Builder().nIn(4).nOut(3).build()) + .layer(1, new Builder().nIn(3).nOut(2).build()) + .layer(2, new Builder().nIn(2).nOut(3).build()) + .layer(3, new OutputLayer.Builder( + LossFunction.MCXENT).activation(Activation.SOFTMAX).nIn(3).nOut(3) + .build()) + .build()); modelToFineTune.init(); MultiLayerNetwork modelNow = new TransferLearning.Builder(modelToFineTune).setFeatureExtractor(1).build(); @@ -227,13 +230,14 @@ public class TransferLearningHelperTest extends BaseDL4JTest { TransferLearningHelper helper = new TransferLearningHelper(modelToFineTune, 1); INDArray paramsLastTwoLayers = - Nd4j.hstack(modelToFineTune.getLayer(2).params(), modelToFineTune.getLayer(3).params()); - MultiLayerNetwork notFrozen = new MultiLayerNetwork(overallConf.clone().list() - .layer(0, new DenseLayer.Builder().nIn(2).nOut(3).build()) - .layer(1, new org.deeplearning4j.nn.conf.layers.OutputLayer.Builder( - LossFunctions.LossFunction.MCXENT).activation(Activation.SOFTMAX).nIn(3).nOut(3) - .build()) - .build(), paramsLastTwoLayers); + Nd4j.hstack(modelToFineTune.getLayer(2).getParams(), modelToFineTune.getLayer(3).getParams()); + MultiLayerNetwork notFrozen = new MultiLayerNetwork( + (NeuralNetConfiguration) overallConf.clone().list() + .layer(0, new Builder().nIn(2).nOut(3).build()) + .layer(1, new OutputLayer.Builder( + LossFunction.MCXENT).activation(Activation.SOFTMAX).nIn(3).nOut(3) + .build()) + .build(), paramsLastTwoLayers); assertEquals(asFrozenFeatures, helper.featurize(randomData).getFeatures()); assertEquals(randomData.getLabels(), helper.featurize(randomData).getLabels()); @@ -244,9 +248,9 @@ public class TransferLearningHelperTest extends BaseDL4JTest { modelNow.fit(randomData); } - INDArray expected = Nd4j.hstack(modelToFineTune.getLayer(0).params(), modelToFineTune.getLayer(1).params(), - notFrozen.params()); - INDArray act = modelNow.params(); + INDArray expected = Nd4j.hstack(modelToFineTune.getLayer(0).getParams(), modelToFineTune.getLayer(1).getParams(), + notFrozen.getModelParams()); + INDArray act = modelNow.getModelParams(); assertEquals(expected, act); } } diff --git a/cavis-dnn/cavis-dnn-core/src/test/java/org/deeplearning4j/nn/transferlearning/TransferLearningMLNTest.java b/cavis-dnn/cavis-dnn-core/src/test/java/org/deeplearning4j/nn/transferlearning/TransferLearningMLNTest.java index 005f2158c..7d10a3bc7 100644 --- a/cavis-dnn/cavis-dnn-core/src/test/java/org/deeplearning4j/nn/transferlearning/TransferLearningMLNTest.java +++ b/cavis-dnn/cavis-dnn-core/src/test/java/org/deeplearning4j/nn/transferlearning/TransferLearningMLNTest.java @@ -26,13 +26,13 @@ import org.deeplearning4j.TestUtils; import org.deeplearning4j.nn.api.OptimizationAlgorithm; import org.deeplearning4j.nn.conf.BackpropType; import org.deeplearning4j.nn.conf.GradientNormalization; -import org.deeplearning4j.nn.conf.MultiLayerConfiguration; import org.deeplearning4j.nn.conf.NeuralNetConfiguration; import org.deeplearning4j.nn.conf.constraint.UnitNormConstraint; import org.deeplearning4j.nn.conf.distribution.ConstantDistribution; import org.deeplearning4j.nn.conf.distribution.NormalDistribution; import org.deeplearning4j.nn.conf.inputs.InputType; import org.deeplearning4j.nn.conf.layers.*; +import org.deeplearning4j.nn.conf.layers.DenseLayer.Builder; import org.deeplearning4j.nn.conf.preprocessor.CnnToFeedForwardPreProcessor; import org.deeplearning4j.nn.conf.preprocessor.FeedForwardToRnnPreProcessor; import org.deeplearning4j.nn.conf.preprocessor.RnnToCnnPreProcessor; @@ -54,6 +54,7 @@ import org.nd4j.linalg.lossfunctions.LossFunctions; import com.fasterxml.jackson.core.JsonProcessingException; import java.util.Map; +import org.nd4j.linalg.lossfunctions.LossFunctions.LossFunction; import static org.junit.jupiter.api.Assertions.*; @@ -67,16 +68,17 @@ public class TransferLearningMLNTest extends BaseDL4JTest { Nd4j.getRandom().setSeed(rng); DataSet randomData = new DataSet(Nd4j.rand(DataType.FLOAT, 10, 4), TestUtils.randomOneHot(DataType.FLOAT, 10, 3)); //original conf - NeuralNetConfiguration.Builder confToChange = - new NeuralNetConfiguration.Builder().seed(rng).optimizationAlgo(OptimizationAlgorithm.LBFGS) + NeuralNetConfiguration.NeuralNetConfigurationBuilder confToChange = + (NeuralNetConfiguration.NeuralNetConfigurationBuilder) NeuralNetConfiguration.builder().seed(rng).optimizationAlgo(OptimizationAlgorithm.LBFGS) .updater(new Nesterovs(0.01, 0.99)); - MultiLayerNetwork modelToFineTune = new MultiLayerNetwork(confToChange.list() - .layer(0, new DenseLayer.Builder().nIn(4).nOut(3).build()) - .layer(1, new org.deeplearning4j.nn.conf.layers.OutputLayer.Builder( - LossFunctions.LossFunction.MCXENT).activation(Activation.SOFTMAX).nIn(3).nOut(3) - .build()) - .build()); + MultiLayerNetwork modelToFineTune = new MultiLayerNetwork( + (NeuralNetConfiguration) confToChange.list() + .layer(0, new Builder().nIn(4).nOut(3).build()) + .layer(1, new OutputLayer.Builder( + LossFunction.MCXENT).activation(Activation.SOFTMAX).nIn(3).nOut(3) + .build()) + .build()); modelToFineTune.init(); //model after applying changes with transfer learning @@ -89,37 +91,37 @@ public class TransferLearningMLNTest extends BaseDL4JTest { .build(); for (org.deeplearning4j.nn.api.Layer l : modelNow.getLayers()) { - BaseLayer bl = ((BaseLayer) l.conf().getLayer()); + BaseLayerConfiguration bl = ((BaseLayerConfiguration) l.getLayerConfiguration()); assertEquals(new RmsProp(0.5), bl.getIUpdater()); } - NeuralNetConfiguration.Builder confSet = new NeuralNetConfiguration.Builder().seed(rng) + NeuralNetConfiguration.NeuralNetConfigurationBuilder confSet = NeuralNetConfiguration.builder().seed(rng) .optimizationAlgo(OptimizationAlgorithm.STOCHASTIC_GRADIENT_DESCENT) .updater(new RmsProp(0.5)).l2(0.4); - MultiLayerNetwork expectedModel = new MultiLayerNetwork(confSet.list() - .layer(0, new DenseLayer.Builder().nIn(4).nOut(3).build()) - .layer(1, new org.deeplearning4j.nn.conf.layers.OutputLayer.Builder( - LossFunctions.LossFunction.MCXENT).activation(Activation.SOFTMAX).nIn(3).nOut(3) + MultiLayerNetwork expectedModel = new MultiLayerNetwork((NeuralNetConfiguration) confSet.list() + .layer(0, new Builder().nIn(4).nOut(3).build()) + .layer(1, new OutputLayer.Builder( + LossFunction.MCXENT).activation(Activation.SOFTMAX).nIn(3).nOut(3) .build()) .build()); expectedModel.init(); - expectedModel.setParams(modelToFineTune.params().dup()); + expectedModel.setParams(modelToFineTune.getModelParams().dup()); - assertEquals(expectedModel.params(), modelNow.params()); + assertEquals(expectedModel.getModelParams(), modelNow.getModelParams()); //Check json - MultiLayerConfiguration expectedConf = expectedModel.getLayerWiseConfigurations(); - assertEquals(expectedConf.toJson(), modelNow.getLayerWiseConfigurations().toJson()); + NeuralNetConfiguration expectedConf = expectedModel.getNetConfiguration(); + assertEquals(expectedConf.toJson(), modelNow.getNetConfiguration().toJson()); //Check params after fit modelNow.fit(randomData); expectedModel.fit(randomData); - assertEquals(modelNow.score(), expectedModel.score(), 1e-6); - INDArray pExp = expectedModel.params(); - INDArray pNow = modelNow.params(); + assertEquals(modelNow.getScore(), expectedModel.getScore(), 1e-6); + INDArray pExp = expectedModel.getModelParams(); + INDArray pNow = modelNow.getModelParams(); assertEquals(pExp, pNow); } @@ -128,11 +130,11 @@ public class TransferLearningMLNTest extends BaseDL4JTest { Nd4j.getRandom().setSeed(12345); DataSet randomData = new DataSet(Nd4j.rand(DataType.FLOAT, 10, 4), TestUtils.randomOneHot(DataType.FLOAT,10, 2)); - NeuralNetConfiguration.Builder equivalentConf = new NeuralNetConfiguration.Builder().updater(new Sgd(0.1)); + NeuralNetConfiguration.NeuralNetConfigurationBuilder equivalentConf = NeuralNetConfiguration.builder().updater(new Sgd(0.1)); FineTuneConfiguration overallConf = new FineTuneConfiguration.Builder().updater(new Sgd(0.1)) .build(); - MultiLayerNetwork modelToFineTune = new MultiLayerNetwork(equivalentConf.list() + MultiLayerNetwork modelToFineTune = new MultiLayerNetwork(equivalentConf .layer(0, new DenseLayer.Builder().nIn(4).nOut(5).build()) .layer(1, new DenseLayer.Builder().nIn(3).nOut(2).build()) .layer(2, new DenseLayer.Builder().nIn(2).nOut(3).build()) @@ -145,7 +147,7 @@ public class TransferLearningMLNTest extends BaseDL4JTest { .nOutReplace(3, 2, WeightInit.XAVIER, WeightInit.XAVIER) .nOutReplace(0, 3, WeightInit.XAVIER, new NormalDistribution(1, 1e-1)).build(); - MultiLayerNetwork modelExpectedArch = new MultiLayerNetwork(equivalentConf.list() + MultiLayerNetwork modelExpectedArch = new MultiLayerNetwork(equivalentConf .layer(0, new DenseLayer.Builder().nIn(4).nOut(3).build()) .layer(1, new DenseLayer.Builder().nIn(3).nOut(2).build()) .layer(2, new DenseLayer.Builder().nIn(2).nOut(3).build()) @@ -156,33 +158,33 @@ public class TransferLearningMLNTest extends BaseDL4JTest { modelExpectedArch.init(); //Will fail - expected because of dist and weight init changes - //assertEquals(modelExpectedArch.getLayerWiseConfigurations().toJson(), modelNow.getLayerWiseConfigurations().toJson()); + //assertEquals(modelExpectedArch.getConfiguration().toJson(), modelNow.getConfiguration().toJson()); - BaseLayer bl0 = ((BaseLayer) modelNow.getLayerWiseConfigurations().getConf(0).getLayer()); - BaseLayer bl1 = ((BaseLayer) modelNow.getLayerWiseConfigurations().getConf(1).getLayer()); - BaseLayer bl3 = ((BaseLayer) modelNow.getLayerWiseConfigurations().getConf(3).getLayer()); - assertEquals(bl0.getWeightInitFn().getClass(), WeightInitXavier.class); + BaseLayerConfiguration bl0 = ((BaseLayerConfiguration) modelNow.getNetConfiguration().getConf(0).getLayer()); + BaseLayerConfiguration bl1 = ((BaseLayerConfiguration) modelNow.getNetConfiguration().getConf(1).getLayer()); + BaseLayerConfiguration bl3 = ((BaseLayerConfiguration) modelNow.getNetConfiguration().getConf(3).getLayer()); + assertEquals(bl0.getWeightInit().getClass(), WeightInitXavier.class); try { - assertEquals(JsonMappers.getMapper().writeValueAsString(bl1.getWeightInitFn()), + assertEquals(JsonMappers.getMapper().writeValueAsString(bl1.getWeightInit()), JsonMappers.getMapper().writeValueAsString(new WeightInitDistribution(new NormalDistribution(1, 1e-1)))); } catch (JsonProcessingException e) { throw new RuntimeException(e); } - assertEquals(bl3.getWeightInitFn(), new WeightInitXavier()); + assertEquals(bl3.getWeightInit(), new WeightInitXavier()); //modelNow should have the same architecture as modelExpectedArch - assertArrayEquals(modelExpectedArch.params().shape(), modelNow.params().shape()); - assertArrayEquals(modelExpectedArch.getLayer(0).params().shape(), modelNow.getLayer(0).params().shape()); - assertArrayEquals(modelExpectedArch.getLayer(1).params().shape(), modelNow.getLayer(1).params().shape()); - assertArrayEquals(modelExpectedArch.getLayer(2).params().shape(), modelNow.getLayer(2).params().shape()); - assertArrayEquals(modelExpectedArch.getLayer(3).params().shape(), modelNow.getLayer(3).params().shape()); + assertArrayEquals(modelExpectedArch.getModelParams().shape(), modelNow.getModelParams().shape()); + assertArrayEquals(modelExpectedArch.getLayer(0).getParams().shape(), modelNow.getLayer(0).getParams().shape()); + assertArrayEquals(modelExpectedArch.getLayer(1).getParams().shape(), modelNow.getLayer(1).getParams().shape()); + assertArrayEquals(modelExpectedArch.getLayer(2).getParams().shape(), modelNow.getLayer(2).getParams().shape()); + assertArrayEquals(modelExpectedArch.getLayer(3).getParams().shape(), modelNow.getLayer(3).getParams().shape()); - modelNow.setParams(modelExpectedArch.params()); + modelNow.setParams(modelExpectedArch.getModelParams()); //fit should give the same results modelExpectedArch.fit(randomData); modelNow.fit(randomData); - assertEquals(modelExpectedArch.score(), modelNow.score(), 0.000001); - assertEquals(modelExpectedArch.params(), modelNow.params()); + assertEquals(modelExpectedArch.getScore(), modelNow.getScore(), 0.000001); + assertEquals(modelExpectedArch.getModelParams(), modelNow.getModelParams()); } @@ -191,7 +193,7 @@ public class TransferLearningMLNTest extends BaseDL4JTest { Nd4j.getRandom().setSeed(12345); DataSet randomData = new DataSet(Nd4j.rand(DataType.FLOAT,10, 4), TestUtils.randomOneHot(DataType.FLOAT, 10, 3)); - NeuralNetConfiguration.Builder equivalentConf = new NeuralNetConfiguration.Builder().updater(new Sgd(0.1)); + NeuralNetConfiguration.NeuralNetConfigurationBuilder equivalentConf = (NeuralNetConfiguration.NeuralNetConfigurationBuilder) NeuralNetConfiguration.builder().updater(new Sgd(0.1)); FineTuneConfiguration overallConf = new FineTuneConfiguration.Builder().updater(new Sgd(0.1)).build(); MultiLayerNetwork modelToFineTune = new MultiLayerNetwork(//overallConf.list() @@ -225,20 +227,20 @@ public class TransferLearningMLNTest extends BaseDL4JTest { modelExpectedArch.init(); //modelNow should have the same architecture as modelExpectedArch - assertArrayEquals(modelExpectedArch.params().shape(), modelNow.params().shape()); - assertArrayEquals(modelExpectedArch.getLayer(0).params().shape(), modelNow.getLayer(0).params().shape()); - assertArrayEquals(modelExpectedArch.getLayer(1).params().shape(), modelNow.getLayer(1).params().shape()); - assertArrayEquals(modelExpectedArch.getLayer(2).params().shape(), modelNow.getLayer(2).params().shape()); - assertArrayEquals(modelExpectedArch.getLayer(3).params().shape(), modelNow.getLayer(3).params().shape()); + assertArrayEquals(modelExpectedArch.getModelParams().shape(), modelNow.getModelParams().shape()); + assertArrayEquals(modelExpectedArch.getLayer(0).getParams().shape(), modelNow.getLayer(0).getParams().shape()); + assertArrayEquals(modelExpectedArch.getLayer(1).getParams().shape(), modelNow.getLayer(1).getParams().shape()); + assertArrayEquals(modelExpectedArch.getLayer(2).getParams().shape(), modelNow.getLayer(2).getParams().shape()); + assertArrayEquals(modelExpectedArch.getLayer(3).getParams().shape(), modelNow.getLayer(3).getParams().shape()); - modelNow.setParams(modelExpectedArch.params()); + modelNow.setParams(modelExpectedArch.getModelParams()); //fit should give the same results modelExpectedArch.fit(randomData); modelNow.fit(randomData); - double scoreExpected = modelExpectedArch.score(); - double scoreActual = modelNow.score(); + double scoreExpected = modelExpectedArch.getScore(); + double scoreActual = modelNow.getScore(); assertEquals(scoreExpected, scoreActual, 1e-4); - assertEquals(modelExpectedArch.params(), modelNow.params()); + assertEquals(modelExpectedArch.getModelParams(), modelNow.getModelParams()); } @Test @@ -248,8 +250,8 @@ public class TransferLearningMLNTest extends BaseDL4JTest { int V_HEIGHT = 130; int V_NFRAMES = 150; - MultiLayerConfiguration confForArchitecture = - new NeuralNetConfiguration.Builder().seed(12345).l2(0.001) //l2 regularization on all layers + NeuralNetConfiguration confForArchitecture = + NeuralNetConfiguration.builder().seed(12345).l2(0.001) //l2 regularization on all layers .optimizationAlgo(OptimizationAlgorithm.STOCHASTIC_GRADIENT_DESCENT) .updater(new AdaGrad(0.4)).list() .layer(0, new ConvolutionLayer.Builder(10, 10).nIn(3) //3 channels: RGB @@ -277,13 +279,13 @@ public class TransferLearningMLNTest extends BaseDL4JTest { .inputPreProcessor(3, new CnnToFeedForwardPreProcessor(7, 7, 10)) .inputPreProcessor(4, new FeedForwardToRnnPreProcessor()) .backpropType(BackpropType.TruncatedBPTT) - .tBPTTForwardLength(V_NFRAMES / 5).tBPTTBackwardLength(V_NFRAMES / 5).build(); + .tbpttFwdLength(V_NFRAMES / 5).tbpttBackLength(V_NFRAMES / 5).build(); MultiLayerNetwork modelExpectedArch = new MultiLayerNetwork(confForArchitecture); modelExpectedArch.init(); MultiLayerNetwork modelToTweak = new MultiLayerNetwork( - new NeuralNetConfiguration.Builder().seed(12345) + NeuralNetConfiguration.builder().seed(12345) .updater(new RmsProp(0.1)) .list() .layer(0, new ConvolutionLayer.Builder(10, 10) //Only keep the first layer the same @@ -324,8 +326,8 @@ public class TransferLearningMLNTest extends BaseDL4JTest { .inputPreProcessor(4, new FeedForwardToRnnPreProcessor()) .backpropType(BackpropType.TruncatedBPTT) - .tBPTTForwardLength(V_NFRAMES / 5) - .tBPTTBackwardLength(V_NFRAMES / 5).build()); + .tbpttFwdLength(V_NFRAMES / 5) + .tbpttBackLength(V_NFRAMES / 5).build()); modelToTweak.init(); MultiLayerNetwork modelNow = new TransferLearning.Builder(modelToTweak) @@ -355,27 +357,27 @@ public class TransferLearningMLNTest extends BaseDL4JTest { .setInputPreProcessor(4, new FeedForwardToRnnPreProcessor()).build(); //modelNow should have the same architecture as modelExpectedArch - assertEquals(modelExpectedArch.getLayerWiseConfigurations().getConf(0).toJson(), - modelNow.getLayerWiseConfigurations().getConf(0).toJson()); + assertEquals(modelExpectedArch.getNetConfiguration().getConf(0).toJson(), + modelNow.getNetConfiguration().getConf(0).toJson()); //some learning related info the subsampling layer will not be overwritten - //assertTrue(modelExpectedArch.getLayerWiseConfigurations().getConf(1).toJson().equals(modelNow.getLayerWiseConfigurations().getConf(1).toJson())); - assertEquals(modelExpectedArch.getLayerWiseConfigurations().getConf(2).toJson(), - modelNow.getLayerWiseConfigurations().getConf(2).toJson()); - assertEquals(modelExpectedArch.getLayerWiseConfigurations().getConf(3).toJson(), - modelNow.getLayerWiseConfigurations().getConf(3).toJson()); - assertEquals(modelExpectedArch.getLayerWiseConfigurations().getConf(4).toJson(), - modelNow.getLayerWiseConfigurations().getConf(4).toJson()); - assertEquals(modelExpectedArch.getLayerWiseConfigurations().getConf(5).toJson(), - modelNow.getLayerWiseConfigurations().getConf(5).toJson()); + //assertTrue(modelExpectedArch.getConfiguration().getConf(1).toJson().equals(modelNow.getConfiguration().getConf(1).toJson())); + assertEquals(modelExpectedArch.getNetConfiguration().getConf(2).toJson(), + modelNow.getNetConfiguration().getConf(2).toJson()); + assertEquals(modelExpectedArch.getNetConfiguration().getConf(3).toJson(), + modelNow.getNetConfiguration().getConf(3).toJson()); + assertEquals(modelExpectedArch.getNetConfiguration().getConf(4).toJson(), + modelNow.getNetConfiguration().getConf(4).toJson()); + assertEquals(modelExpectedArch.getNetConfiguration().getConf(5).toJson(), + modelNow.getNetConfiguration().getConf(5).toJson()); - assertArrayEquals(modelExpectedArch.params().shape(), modelNow.params().shape()); - assertArrayEquals(modelExpectedArch.getLayer(0).params().shape(), modelNow.getLayer(0).params().shape()); + assertArrayEquals(modelExpectedArch.getModelParams().shape(), modelNow.getModelParams().shape()); + assertArrayEquals(modelExpectedArch.getLayer(0).getParams().shape(), modelNow.getLayer(0).getParams().shape()); //subsampling has no params //assertArrayEquals(modelExpectedArch.getLayer(1).params().shape(), modelNow.getLayer(1).params().shape()); - assertArrayEquals(modelExpectedArch.getLayer(2).params().shape(), modelNow.getLayer(2).params().shape()); - assertArrayEquals(modelExpectedArch.getLayer(3).params().shape(), modelNow.getLayer(3).params().shape()); - assertArrayEquals(modelExpectedArch.getLayer(4).params().shape(), modelNow.getLayer(4).params().shape()); - assertArrayEquals(modelExpectedArch.getLayer(5).params().shape(), modelNow.getLayer(5).params().shape()); + assertArrayEquals(modelExpectedArch.getLayer(2).getParams().shape(), modelNow.getLayer(2).getParams().shape()); + assertArrayEquals(modelExpectedArch.getLayer(3).getParams().shape(), modelNow.getLayer(3).getParams().shape()); + assertArrayEquals(modelExpectedArch.getLayer(4).getParams().shape(), modelNow.getLayer(4).getParams().shape()); + assertArrayEquals(modelExpectedArch.getLayer(5).getParams().shape(), modelNow.getLayer(5).getParams().shape()); } @@ -386,7 +388,7 @@ public class TransferLearningMLNTest extends BaseDL4JTest { DataSet randomData = new DataSet(Nd4j.rand(DataType.FLOAT, 10, 28 * 28 * 3).reshape(10, 3, 28, 28), TestUtils.randomOneHot(DataType.FLOAT,10, 10)); MultiLayerNetwork modelToFineTune = new MultiLayerNetwork( - new NeuralNetConfiguration.Builder().seed(123) + NeuralNetConfiguration.builder().seed(123) .weightInit(WeightInit.XAVIER) .updater(new Nesterovs(0.01, 0.9)) .list() @@ -413,12 +415,12 @@ public class TransferLearningMLNTest extends BaseDL4JTest { .nOut(100) .activation(Activation.SOFTMAX) .build()) - .setInputType(InputType.convolutionalFlat(28, 28, 3)) + .inputType(InputType.convolutionalFlat(28, 28, 3)) .build()); modelToFineTune.init(); INDArray asFrozenFeatures = modelToFineTune.feedForwardToLayer(2, randomData.getFeatures(), false).get(2); //10x20x12x12 - NeuralNetConfiguration.Builder equivalentConf = new NeuralNetConfiguration.Builder().updater(new Sgd(0.2)) + NeuralNetConfiguration.NeuralNetConfigurationBuilder equivalentConf = (NeuralNetConfiguration.NeuralNetConfigurationBuilder) NeuralNetConfiguration.builder().updater(new Sgd(0.2)) .optimizationAlgo(OptimizationAlgorithm.STOCHASTIC_GRADIENT_DESCENT); FineTuneConfiguration overallConf = new FineTuneConfiguration.Builder().updater(new Sgd(0.2)) @@ -444,26 +446,26 @@ public class TransferLearningMLNTest extends BaseDL4JTest { .layer(5, new DenseLayer.Builder().activation(Activation.RELU).nOut(50).build()) .layer(6, new OutputLayer.Builder(LossFunctions.LossFunction.NEGATIVELOGLIKELIHOOD).nOut(10) .activation(Activation.SOFTMAX).build()) - .setInputType(InputType.convolutionalFlat(12, 12, 20)).build()); + .inputType(InputType.convolutionalFlat(12, 12, 20)).build()); notFrozen.init(); - assertArrayEquals(modelToFineTune.getLayer(0).params().shape(), modelNow.getLayer(0).params().shape()); + assertArrayEquals(modelToFineTune.getLayer(0).getParams().shape(), modelNow.getLayer(0).getParams().shape()); //subsampling has no params //assertArrayEquals(modelExpectedArch.getLayer(1).params().shape(), modelNow.getLayer(1).params().shape()); - assertArrayEquals(notFrozen.getLayer(0).params().shape(), modelNow.getLayer(2).params().shape()); - modelNow.getLayer(2).setParams(notFrozen.getLayer(0).params()); + assertArrayEquals(notFrozen.getLayer(0).getParams().shape(), modelNow.getLayer(2).getParams().shape()); + modelNow.getLayer(2).setParams(notFrozen.getLayer(0).getParams()); //subsampling has no params //assertArrayEquals(notFrozen.getLayer(1).params().shape(), modelNow.getLayer(3).params().shape()); - assertArrayEquals(notFrozen.getLayer(2).params().shape(), modelNow.getLayer(4).params().shape()); - modelNow.getLayer(4).setParams(notFrozen.getLayer(2).params()); - assertArrayEquals(notFrozen.getLayer(3).params().shape(), modelNow.getLayer(5).params().shape()); - modelNow.getLayer(5).setParams(notFrozen.getLayer(3).params()); - assertArrayEquals(notFrozen.getLayer(4).params().shape(), modelNow.getLayer(6).params().shape()); - modelNow.getLayer(6).setParams(notFrozen.getLayer(4).params()); - assertArrayEquals(notFrozen.getLayer(5).params().shape(), modelNow.getLayer(7).params().shape()); - modelNow.getLayer(7).setParams(notFrozen.getLayer(5).params()); - assertArrayEquals(notFrozen.getLayer(6).params().shape(), modelNow.getLayer(8).params().shape()); - modelNow.getLayer(8).setParams(notFrozen.getLayer(6).params()); + assertArrayEquals(notFrozen.getLayer(2).getParams().shape(), modelNow.getLayer(4).getParams().shape()); + modelNow.getLayer(4).setParams(notFrozen.getLayer(2).getParams()); + assertArrayEquals(notFrozen.getLayer(3).getParams().shape(), modelNow.getLayer(5).getParams().shape()); + modelNow.getLayer(5).setParams(notFrozen.getLayer(3).getParams()); + assertArrayEquals(notFrozen.getLayer(4).getParams().shape(), modelNow.getLayer(6).getParams().shape()); + modelNow.getLayer(6).setParams(notFrozen.getLayer(4).getParams()); + assertArrayEquals(notFrozen.getLayer(5).getParams().shape(), modelNow.getLayer(7).getParams().shape()); + modelNow.getLayer(7).setParams(notFrozen.getLayer(5).getParams()); + assertArrayEquals(notFrozen.getLayer(6).getParams().shape(), modelNow.getLayer(8).getParams().shape()); + modelNow.getLayer(8).setParams(notFrozen.getLayer(6).getParams()); int i = 0; while (i < 3) { @@ -472,8 +474,8 @@ public class TransferLearningMLNTest extends BaseDL4JTest { i++; } - INDArray expectedParams = Nd4j.hstack(modelToFineTune.getLayer(0).params(), notFrozen.params()); - assertEquals(expectedParams, modelNow.params()); + INDArray expectedParams = Nd4j.hstack(modelToFineTune.getLayer(0).getParams(), notFrozen.getModelParams()); + assertEquals(expectedParams, modelNow.getModelParams()); } @@ -481,8 +483,8 @@ public class TransferLearningMLNTest extends BaseDL4JTest { public void testFineTuneOverride() { //Check that fine-tune overrides are selective - i.e., if I only specify a new LR, only the LR should be modified - MultiLayerConfiguration conf = - new NeuralNetConfiguration.Builder().updater(new Adam(1e-4)) + NeuralNetConfiguration conf = + NeuralNetConfiguration.builder().updater(new Adam(1e-4)) .activation(Activation.TANH).weightInit(WeightInit.RELU) .l1(0.1).l2(0.2).list() .layer(0, new DenseLayer.Builder().nIn(10).nOut(5).build()).layer(1, @@ -501,34 +503,34 @@ public class TransferLearningMLNTest extends BaseDL4JTest { //Check original net isn't modified: - BaseLayer l0 = (BaseLayer) net.getLayer(0).conf().getLayer(); + BaseLayerConfiguration l0 = (BaseLayerConfiguration) net.getLayer(0).getLayerConfiguration(); assertEquals(new Adam(1e-4), l0.getIUpdater()); assertEquals(Activation.TANH.getActivationFunction(), l0.getActivationFn()); - assertEquals(new WeightInitRelu(), l0.getWeightInitFn()); + assertEquals(new WeightInitRelu(), l0.getWeightInit()); assertEquals(0.1, TestUtils.getL1(l0), 1e-6); - BaseLayer l1 = (BaseLayer) net.getLayer(1).conf().getLayer(); + BaseLayerConfiguration l1 = (BaseLayerConfiguration) net.getLayer(1).getLayerConfiguration(); assertEquals(new Adam(1e-4), l1.getIUpdater()); assertEquals(Activation.HARDSIGMOID.getActivationFunction(), l1.getActivationFn()); - assertEquals(new WeightInitRelu(), l1.getWeightInitFn()); + assertEquals(new WeightInitRelu(), l1.getWeightInit()); assertEquals(0.2, TestUtils.getL2(l1), 1e-6); assertEquals(BackpropType.Standard, conf.getBackpropType()); //Check new net has only the appropriate things modified (i.e., LR) - l0 = (BaseLayer) net2.getLayer(0).conf().getLayer(); + l0 = (BaseLayerConfiguration) net2.getLayer(0).getLayerConfiguration(); assertEquals(new Adam(2e-2), l0.getIUpdater()); assertEquals(Activation.TANH.getActivationFunction(), l0.getActivationFn()); - assertEquals(new WeightInitRelu(), l0.getWeightInitFn()); + assertEquals(new WeightInitRelu(), l0.getWeightInit()); assertEquals(0.1, TestUtils.getL1(l0), 1e-6); - l1 = (BaseLayer) net2.getLayer(1).conf().getLayer(); + l1 = (BaseLayerConfiguration) net2.getLayer(1).getLayerConfiguration(); assertEquals(new Adam(2e-2), l1.getIUpdater()); assertEquals(Activation.HARDSIGMOID.getActivationFunction(), l1.getActivationFn()); - assertEquals(new WeightInitRelu(), l1.getWeightInitFn()); + assertEquals(new WeightInitRelu(), l1.getWeightInit()); assertEquals(0.2, TestUtils.getL2(l1), 1e-6); - assertEquals(BackpropType.TruncatedBPTT, net2.getLayerWiseConfigurations().getBackpropType()); + assertEquals(BackpropType.TruncatedBPTT, net2.getNetConfiguration().getBackpropType()); } @Test @@ -538,7 +540,7 @@ public class TransferLearningMLNTest extends BaseDL4JTest { DataSet randomData = new DataSet(Nd4j.rand(DataType.FLOAT,10, 28 * 28 * 3).reshape(10, 3, 28, 28), TestUtils.randomOneHot(10, 10)); MultiLayerNetwork modelToFineTune = new MultiLayerNetwork( - new NeuralNetConfiguration.Builder().seed(123) + NeuralNetConfiguration.builder().seed(123) .weightInit(WeightInit.XAVIER) .updater(new Nesterovs(0.01, 0.9)) .list() @@ -554,12 +556,12 @@ public class TransferLearningMLNTest extends BaseDL4JTest { .layer(5, new DenseLayer.Builder().activation(Activation.RELU).nOut(250).build()) .layer(6, new OutputLayer.Builder(LossFunctions.LossFunction.NEGATIVELOGLIKELIHOOD) .nOut(100).activation(Activation.SOFTMAX).build()) - .setInputType(InputType.convolutionalFlat(28, 28, 3)) //See note below + .inputType(InputType.convolutionalFlat(28, 28, 3)) //See note below .build()); modelToFineTune.init(); INDArray asFrozenFeatures = modelToFineTune.feedForwardToLayer(2, randomData.getFeatures(), false).get(2); //10x20x12x12 - NeuralNetConfiguration.Builder equivalentConf = new NeuralNetConfiguration.Builder().updater(new Sgd(0.2)); + NeuralNetConfiguration.NeuralNetConfigurationBuilder equivalentConf = (NeuralNetConfiguration.NeuralNetConfigurationBuilder) NeuralNetConfiguration.builder().updater(new Sgd(0.2)); FineTuneConfiguration overallConf = new FineTuneConfiguration.Builder().updater(new Sgd(0.2)).build(); MultiLayerNetwork modelNow = new TransferLearning.Builder(modelToFineTune).fineTuneConfiguration(overallConf) @@ -584,17 +586,17 @@ public class TransferLearningMLNTest extends BaseDL4JTest { .build()); notFrozen.init(); - assertArrayEquals(modelToFineTune.getLayer(0).params().shape(), modelNow.getLayer(0).params().shape()); + assertArrayEquals(modelToFineTune.getLayer(0).getParams().shape(), modelNow.getLayer(0).getParams().shape()); //subsampling has no params //assertArrayEquals(modelExpectedArch.getLayer(1).params().shape(), modelNow.getLayer(1).params().shape()); - assertArrayEquals(notFrozen.getLayer(0).params().shape(), modelNow.getLayer(2).params().shape()); - modelNow.getLayer(2).setParams(notFrozen.getLayer(0).params()); - assertArrayEquals(notFrozen.getLayer(1).params().shape(), modelNow.getLayer(3).params().shape()); - modelNow.getLayer(3).setParams(notFrozen.getLayer(1).params()); - assertArrayEquals(notFrozen.getLayer(2).params().shape(), modelNow.getLayer(4).params().shape()); - modelNow.getLayer(4).setParams(notFrozen.getLayer(2).params()); - assertArrayEquals(notFrozen.getLayer(3).params().shape(), modelNow.getLayer(5).params().shape()); - modelNow.getLayer(5).setParams(notFrozen.getLayer(3).params()); + assertArrayEquals(notFrozen.getLayer(0).getParams().shape(), modelNow.getLayer(2).getParams().shape()); + modelNow.getLayer(2).setParams(notFrozen.getLayer(0).getParams()); + assertArrayEquals(notFrozen.getLayer(1).getParams().shape(), modelNow.getLayer(3).getParams().shape()); + modelNow.getLayer(3).setParams(notFrozen.getLayer(1).getParams()); + assertArrayEquals(notFrozen.getLayer(2).getParams().shape(), modelNow.getLayer(4).getParams().shape()); + modelNow.getLayer(4).setParams(notFrozen.getLayer(2).getParams()); + assertArrayEquals(notFrozen.getLayer(3).getParams().shape(), modelNow.getLayer(5).getParams().shape()); + modelNow.getLayer(5).setParams(notFrozen.getLayer(3).getParams()); int i = 0; while (i < 3) { @@ -603,14 +605,14 @@ public class TransferLearningMLNTest extends BaseDL4JTest { i++; } - INDArray expectedParams = Nd4j.hstack(modelToFineTune.getLayer(0).params(), notFrozen.params()); - assertEquals(expectedParams, modelNow.params()); + INDArray expectedParams = Nd4j.hstack(modelToFineTune.getLayer(0).getParams(), notFrozen.getModelParams()); + assertEquals(expectedParams, modelNow.getModelParams()); } @Test public void testObjectOverrides(){ //https://github.com/deeplearning4j/deeplearning4j/issues/4368 - MultiLayerConfiguration conf = new NeuralNetConfiguration.Builder() + NeuralNetConfiguration conf = NeuralNetConfiguration.builder() .dropOut(0.5) .weightNoise(new DropConnect(0.5)) .l2(0.5) @@ -633,7 +635,7 @@ public class TransferLearningMLNTest extends BaseDL4JTest { .fineTuneConfiguration(ftc) .build(); - DenseLayer l = (DenseLayer) transfer.getLayer(0).conf().getLayer(); + DenseLayer l = (DenseLayer) transfer.getLayer(0).getLayerConfiguration(); assertNull(l.getIDropout()); assertNull(l.getWeightNoise()); @@ -645,10 +647,10 @@ public class TransferLearningMLNTest extends BaseDL4JTest { @Test public void testTransferLearningSubsequent() { final INDArray input = Nd4j.create(6,6,6,6); - final MultiLayerNetwork net = new MultiLayerNetwork(new NeuralNetConfiguration.Builder() + final MultiLayerNetwork net = new MultiLayerNetwork(NeuralNetConfiguration.builder() .weightInit(new ConstantDistribution(666)) .list() - .setInputType(InputType.inferInputTypes(input)[0]) + .inputType(InputType.inferInputTypes(input)[0]) .layer(new Convolution2D.Builder(3, 3).nOut(10).build()) .layer(new Convolution2D.Builder(1, 1).nOut(3).build()) .layer(new OutputLayer.Builder().nOut(2).lossFunction(LossFunctions.LossFunction.MSE) @@ -677,9 +679,9 @@ public class TransferLearningMLNTest extends BaseDL4JTest { @Test public void testChangeNOutNIn() { INDArray input = Nd4j.create(new long[] {1, 2, 4, 4}); - MultiLayerNetwork net = new MultiLayerNetwork(new NeuralNetConfiguration.Builder() + MultiLayerNetwork net = new MultiLayerNetwork( NeuralNetConfiguration.builder() .list() - .setInputType(InputType.inferInputTypes(input)[0]) + .inputType(InputType.inferInputTypes(input)[0]) .layer(new Convolution2D.Builder(1, 1).nOut(10).build()) .layer(new SubsamplingLayer.Builder(1,1).build()) .layer(new Convolution2D.Builder(1, 1).nOut(7).build()) @@ -703,7 +705,7 @@ public class TransferLearningMLNTest extends BaseDL4JTest { @Test public void testTransferLearningSameDiffLayers(){ - MultiLayerConfiguration conf = new NeuralNetConfiguration.Builder() + NeuralNetConfiguration conf = NeuralNetConfiguration.builder() .dataType(DataType.DOUBLE) .activation(Activation.TANH) .updater(new Adam(0.01)) @@ -714,7 +716,7 @@ public class TransferLearningMLNTest extends BaseDL4JTest { .layer(new GlobalPoolingLayer.Builder().poolingType(PoolingType.MAX).build()) .layer(new OutputLayer.Builder().nOut(2).activation(Activation.SOFTMAX) .lossFunction(LossFunctions.LossFunction.MCXENT).build()) - .setInputType(InputType.recurrent(4)) + .inputType(InputType.recurrent(4)) .build(); MultiLayerNetwork net = new MultiLayerNetwork(conf); @@ -733,8 +735,8 @@ public class TransferLearningMLNTest extends BaseDL4JTest { net2.setParam("3_W", net.getParam("3_W")); net2.setParam("3_b", net.getParam("3_b")); - Map p1 = net.paramTable(); - Map p2 = net2.paramTable(); + Map p1 = net.getParamTable(); + Map p2 = net2.getParamTable(); for(String s : p1.keySet()){ INDArray i1 = p1.get(s); INDArray i2 = p2.get(s); diff --git a/cavis-dnn/cavis-dnn-core/src/test/java/org/deeplearning4j/nn/updater/TestGradientNormalization.java b/cavis-dnn/cavis-dnn-core/src/test/java/org/deeplearning4j/nn/updater/TestGradientNormalization.java index 02616d66d..54d3a3174 100644 --- a/cavis-dnn/cavis-dnn-core/src/test/java/org/deeplearning4j/nn/updater/TestGradientNormalization.java +++ b/cavis-dnn/cavis-dnn-core/src/test/java/org/deeplearning4j/nn/updater/TestGradientNormalization.java @@ -46,15 +46,15 @@ public class TestGradientNormalization extends BaseDL4JTest { public void testRenormalizatonPerLayer() { Nd4j.getRandom().setSeed(12345); - NeuralNetConfiguration conf = new NeuralNetConfiguration.Builder() + NeuralNetConfiguration conf = NeuralNetConfiguration.builder() .layer(new DenseLayer.Builder().nIn(10).nOut(20) .updater(new NoOp()) .gradientNormalization(GradientNormalization.RenormalizeL2PerLayer).build()) .build(); - long numParams = conf.getLayer().initializer().numParams(conf); + long numParams = conf.getFlattenedLayerConfigurations().get(0).initializer().numParams(conf); INDArray params = Nd4j.create(1, numParams); - Layer layer = conf.getLayer().instantiate(conf, null, 0, params, true, params.dataType()); + Layer layer = conf.getFlattenedLayerConfigurations().get(0).instantiate(conf, null, 0, params, true, params.dataType()); INDArray gradArray = Nd4j.rand(1, 220).muli(10).subi(5); layer.setBackpropGradientsViewArray(gradArray); INDArray weightGrad = Shape.newShapeNoCopy(gradArray.get(NDArrayIndex.point(0), NDArrayIndex.interval(0, 200)), @@ -92,15 +92,15 @@ public class TestGradientNormalization extends BaseDL4JTest { public void testRenormalizationPerParamType() { Nd4j.getRandom().setSeed(12345); - NeuralNetConfiguration conf = new NeuralNetConfiguration.Builder() + NeuralNetConfiguration conf = NeuralNetConfiguration.builder() .layer(new DenseLayer.Builder().nIn(10).nOut(20) .updater(new NoOp()) .gradientNormalization(GradientNormalization.RenormalizeL2PerParamType).build()) .build(); - long numParams = conf.getLayer().initializer().numParams(conf); + long numParams = conf.getFlattenedLayerConfigurations().get(0).initializer().numParams(conf); INDArray params = Nd4j.create(1, numParams); - Layer layer = conf.getLayer().instantiate(conf, null, 0, params, true, params.dataType()); + Layer layer = conf.getFlattenedLayerConfigurations().get(0).instantiate(conf, null, 0, params, true, params.dataType()); layer.setBackpropGradientsViewArray(Nd4j.create(params.shape())); Updater updater = UpdaterCreator.getUpdater(layer); INDArray weightGrad = Nd4j.rand(10, 20); @@ -125,15 +125,15 @@ public class TestGradientNormalization extends BaseDL4JTest { Nd4j.getRandom().setSeed(12345); double threshold = 3; - NeuralNetConfiguration conf = new NeuralNetConfiguration.Builder().layer( + NeuralNetConfiguration conf = NeuralNetConfiguration.builder().layer( new DenseLayer.Builder().nIn(10).nOut(20).updater(new NoOp()) .gradientNormalization(GradientNormalization.ClipElementWiseAbsoluteValue) .gradientNormalizationThreshold(threshold).build()) .build(); - long numParams = conf.getLayer().initializer().numParams(conf); + long numParams = conf.getFlattenedLayerConfigurations().get(0).initializer().numParams(conf); INDArray params = Nd4j.create(1, numParams); - Layer layer = conf.getLayer().instantiate(conf, null, 0, params, true, params.dataType()); + Layer layer = conf.getFlattenedLayerConfigurations().get(0).instantiate(conf, null, 0, params, true, params.dataType()); INDArray gradArray = Nd4j.rand(1, 220).muli(10).subi(5); layer.setBackpropGradientsViewArray(gradArray); INDArray weightGrad = Shape.newShapeNoCopy(gradArray.get(NDArrayIndex.point(0), NDArrayIndex.interval(0, 200)), @@ -181,15 +181,15 @@ public class TestGradientNormalization extends BaseDL4JTest { //t=0: small -> no clipping //t=1: large -> clipping - NeuralNetConfiguration conf = new NeuralNetConfiguration.Builder().layer( + NeuralNetConfiguration conf = NeuralNetConfiguration.builder().layer( new DenseLayer.Builder().nIn(10).nOut(20).updater(new NoOp()) .gradientNormalization(GradientNormalization.ClipL2PerLayer) .gradientNormalizationThreshold(threshold).build()) .build(); - val numParams = conf.getLayer().initializer().numParams(conf); + val numParams = conf.getFlattenedLayerConfigurations().get(0).initializer().numParams(conf); INDArray params = Nd4j.create(1, numParams); - Layer layer = conf.getLayer().instantiate(conf, null, 0, params, true, params.dataType()); + Layer layer = conf.getFlattenedLayerConfigurations().get(0).instantiate(conf, null, 0, params, true, params.dataType()); INDArray gradArray = Nd4j.rand(1, 220).muli(t == 0 ? 0.05 : 10).subi(t == 0 ? 0 : 5); layer.setBackpropGradientsViewArray(gradArray); INDArray weightGrad = @@ -236,15 +236,15 @@ public class TestGradientNormalization extends BaseDL4JTest { Nd4j.getRandom().setSeed(12345); double threshold = 3; - NeuralNetConfiguration conf = new NeuralNetConfiguration.Builder().layer( + NeuralNetConfiguration conf = NeuralNetConfiguration.builder().layer( new DenseLayer.Builder().nIn(10).nOut(20).updater(new NoOp()) .gradientNormalization(GradientNormalization.ClipL2PerParamType) .gradientNormalizationThreshold(threshold).build()) .build(); - val numParams = conf.getLayer().initializer().numParams(conf); + val numParams = conf.getFlattenedLayerConfigurations().get(0).initializer().numParams(conf); INDArray params = Nd4j.create(1, numParams); - Layer layer = conf.getLayer().instantiate(conf, null, 0, params, true, params.dataType()); + Layer layer = conf.getFlattenedLayerConfigurations().get(0).instantiate(conf, null, 0, params, true, params.dataType()); layer.setBackpropGradientsViewArray(Nd4j.create(params.shape())); Updater updater = UpdaterCreator.getUpdater(layer); INDArray weightGrad = Nd4j.rand(10, 20).muli(0.05); diff --git a/cavis-dnn/cavis-dnn-core/src/test/java/org/deeplearning4j/nn/updater/TestUpdaters.java b/cavis-dnn/cavis-dnn-core/src/test/java/org/deeplearning4j/nn/updater/TestUpdaters.java index 462143897..ce7d713dc 100644 --- a/cavis-dnn/cavis-dnn-core/src/test/java/org/deeplearning4j/nn/updater/TestUpdaters.java +++ b/cavis-dnn/cavis-dnn-core/src/test/java/org/deeplearning4j/nn/updater/TestUpdaters.java @@ -26,7 +26,6 @@ import org.deeplearning4j.BaseDL4JTest; import org.deeplearning4j.nn.api.Layer; import org.deeplearning4j.nn.api.Updater; import org.deeplearning4j.nn.conf.ComputationGraphConfiguration; -import org.deeplearning4j.nn.conf.MultiLayerConfiguration; import org.deeplearning4j.nn.conf.NeuralNetConfiguration; import org.deeplearning4j.nn.conf.layers.*; import org.deeplearning4j.nn.conf.layers.variational.VariationalAutoencoder; @@ -38,6 +37,7 @@ import org.deeplearning4j.nn.multilayer.MultiLayerNetwork; import org.deeplearning4j.nn.params.DefaultParamInitializer; import org.deeplearning4j.nn.params.PretrainParamInitializer; import org.deeplearning4j.nn.updater.graph.ComputationGraphUpdater; +import org.deeplearning4j.nn.weights.WeightInit; import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Test; import org.nd4j.linalg.activations.Activation; @@ -89,18 +89,18 @@ public class TestUpdaters extends BaseDL4JTest { double rho = 0.85; - NeuralNetConfiguration conf = new NeuralNetConfiguration.Builder() + NeuralNetConfiguration conf = NeuralNetConfiguration.builder() .layer(new DenseLayer.Builder().nIn(nIn).nOut(nOut) .updater(new AdaDelta(rho, Nd4j.EPS_THRESHOLD)) .build()) .build(); - long numParams = conf.getLayer().initializer().numParams(conf); + long numParams = conf.getFirstLayer().initializer().numParams(conf); INDArray params = Nd4j.create(1, numParams); - BaseLayer layer = (BaseLayer) conf.getLayer().instantiate(conf, null, 0, params, true, params.dataType()); + BaseLayer layer = (BaseLayer) conf.getFirstLayer().instantiate(conf, null, 0, params, true, params.dataType()); layer.setBackpropGradientsViewArray(gradients); Updater updater = UpdaterCreator.getUpdater(layer); - int updaterStateSize = (int) layer.layerConf().getIUpdater().stateSize(numParams); + int updaterStateSize = (int) layer.getTypedLayerConfiguration().getIUpdater().stateSize(numParams); INDArray updaterState = Nd4j.create(1, updaterStateSize); updater.setStateViewArray(layer, updaterState, true); @@ -145,7 +145,7 @@ public class TestUpdaters extends BaseDL4JTest { msdx.put(key, msdxTmp); count++; } - assertEquals(rho, ((AdaDelta)layer.layerConf().getIUpdater()).getRho(), 1e-4); + assertEquals(rho, ((AdaDelta)layer.getTypedLayerConfiguration().getIUpdater()).getRho(), 1e-4); } assertEquals(4, count); @@ -157,16 +157,16 @@ public class TestUpdaters extends BaseDL4JTest { double epsilon = AdaGrad.DEFAULT_ADAGRAD_EPSILON; NeuralNetConfiguration conf = - new NeuralNetConfiguration.Builder().updater(new AdaGrad(lr)) + NeuralNetConfiguration.builder().updater(new AdaGrad(lr)) .layer(new DenseLayer.Builder().nIn(nIn).nOut(nOut).build()) .build(); - long numParams = conf.getLayer().initializer().numParams(conf); + long numParams = conf.getFirstLayer().initializer().numParams(conf); INDArray params = Nd4j.create(1, numParams); - BaseLayer layer = (BaseLayer) conf.getLayer().instantiate(conf, null, 0, params, true, params.dataType()); + BaseLayer layer = (BaseLayer) conf.getFirstLayer().instantiate(conf, null, 0, params, true, params.dataType()); layer.setBackpropGradientsViewArray(gradients); Updater updater = UpdaterCreator.getUpdater(layer); - int updaterStateSize = (int) layer.layerConf().getIUpdater().stateSize(numParams); + int updaterStateSize = (int) layer.getTypedLayerConfiguration().getIUpdater().stateSize(numParams); INDArray updaterState = Nd4j.create(1, updaterStateSize); updater.setStateViewArray(layer, updaterState, true); @@ -186,7 +186,7 @@ public class TestUpdaters extends BaseDL4JTest { assertEquals(gradExpected, gradient.getGradientFor(entry.getKey())); count++; } - assertEquals(lr, ((AdaGrad)layer.layerConf().getIUpdater()).getLearningRate(), 1e-4); + assertEquals(lr, ((AdaGrad)layer.getTypedLayerConfiguration().getIUpdater()).getLearningRate(), 1e-4); assertEquals(2, count); } @@ -201,16 +201,16 @@ public class TestUpdaters extends BaseDL4JTest { double epsilon = Adam.DEFAULT_ADAM_EPSILON; - NeuralNetConfiguration conf = new NeuralNetConfiguration.Builder().updater(new Adam(lr, beta1, beta2, Adam.DEFAULT_ADAM_EPSILON)) + NeuralNetConfiguration conf = NeuralNetConfiguration.builder().updater(new Adam(lr, beta1, beta2, Adam.DEFAULT_ADAM_EPSILON)) .layer(new DenseLayer.Builder().nIn(nIn).nOut(nOut).build()) .build(); - long numParams = conf.getLayer().initializer().numParams(conf); + long numParams = conf.getFirstLayer().initializer().numParams(conf); INDArray params = Nd4j.create(1, numParams); - BaseLayer layer = (BaseLayer) conf.getLayer().instantiate(conf, null, 0, params, true, params.dataType()); + BaseLayer layer = (BaseLayer) conf.getFirstLayer().instantiate(conf, null, 0, params, true, params.dataType()); layer.setBackpropGradientsViewArray(gradients); Updater updater = UpdaterCreator.getUpdater(layer); - int updaterStateSize = (int) layer.layerConf().getIUpdater().stateSize(numParams); + int updaterStateSize = (int) layer.getTypedLayerConfiguration().getIUpdater().stateSize(numParams); INDArray updaterState = Nd4j.create(1, updaterStateSize); updater.setStateViewArray(layer, updaterState, true); @@ -246,8 +246,8 @@ public class TestUpdaters extends BaseDL4JTest { count++; } - assertEquals(beta1, ((Adam)layer.layerConf().getIUpdater()).getBeta1(), 1e-4); - assertEquals(beta2, ((Adam)layer.layerConf().getIUpdater()).getBeta2(), 1e-4); + assertEquals(beta1, ((Adam)layer.getTypedLayerConfiguration().getIUpdater()).getBeta1(), 1e-4); + assertEquals(beta2, ((Adam)layer.getTypedLayerConfiguration().getIUpdater()).getBeta2(), 1e-4); assertEquals(2, count); } @@ -261,20 +261,20 @@ public class TestUpdaters extends BaseDL4JTest { double epsilon = Nadam.DEFAULT_NADAM_EPSILON; NeuralNetConfiguration conf = - new NeuralNetConfiguration.Builder() + NeuralNetConfiguration.builder() .layer(new DenseLayer.Builder().nIn(nIn).nOut(nOut) .updater(Nadam.builder().learningRate(lr).beta1(beta1) .beta2(beta2).epsilon(epsilon).build()) .build()) .build(); - long numParams = conf.getLayer().initializer().numParams(conf); + long numParams = conf.getFirstLayer().initializer().numParams(conf); INDArray params = Nd4j.create(1, numParams); - BaseLayer layer = (BaseLayer) conf.getLayer().instantiate(conf, null, 0, params, true, params.dataType()); + BaseLayer layer = (BaseLayer) conf.getFirstLayer().instantiate(conf, null, 0, params, true, params.dataType()); layer.setBackpropGradientsViewArray(gradients); Updater updater = UpdaterCreator.getUpdater(layer); - int updaterStateSize = (int) layer.layerConf().getIUpdater().stateSize(numParams); + int updaterStateSize = (int) layer.getTypedLayerConfiguration().getIUpdater().stateSize(numParams); INDArray updaterState = Nd4j.create(1, updaterStateSize); updater.setStateViewArray(layer, updaterState, true); @@ -353,17 +353,17 @@ public class TestUpdaters extends BaseDL4JTest { double beta2 = 0.888; double epsilon = AdaMax.DEFAULT_ADAMAX_EPSILON; - NeuralNetConfiguration conf = new NeuralNetConfiguration.Builder() + NeuralNetConfiguration conf = NeuralNetConfiguration.builder() .updater(new AdaMax(lr, beta1, beta2, AdaMax.DEFAULT_ADAMAX_EPSILON)) .layer(new DenseLayer.Builder().nIn(nIn).nOut(nOut).build()) .build(); - long numParams = conf.getLayer().initializer().numParams(conf); + long numParams = conf.getFirstLayer().initializer().numParams(conf); INDArray params = Nd4j.create(1, numParams); - BaseLayer layer = (BaseLayer) conf.getLayer().instantiate(conf, null, 0, params, true, params.dataType()); + BaseLayer layer = (BaseLayer) conf.getFirstLayer().instantiate(conf, null, 0, params, true, params.dataType()); layer.setBackpropGradientsViewArray(gradients); Updater updater = UpdaterCreator.getUpdater(layer); - int updaterStateSize = (int) layer.layerConf().getIUpdater().stateSize(numParams); + int updaterStateSize = (int) layer.getTypedLayerConfiguration().getIUpdater().stateSize(numParams); INDArray updaterState = Nd4j.create(1, updaterStateSize); updater.setStateViewArray(layer, updaterState, true); @@ -399,8 +399,8 @@ public class TestUpdaters extends BaseDL4JTest { count++; } - assertEquals(beta1, ((AdaMax)layer.layerConf().getIUpdater()).getBeta1(), 1e-4); - assertEquals(beta2, ((AdaMax)layer.layerConf().getIUpdater()).getBeta2(), 1e-4); + assertEquals(beta1, ((AdaMax)layer.getTypedLayerConfiguration().getIUpdater()).getBeta1(), 1e-4); + assertEquals(beta2, ((AdaMax)layer.getTypedLayerConfiguration().getIUpdater()).getBeta2(), 1e-4); assertEquals(2, count); } @@ -410,16 +410,16 @@ public class TestUpdaters extends BaseDL4JTest { double mu = 0.6; NeuralNetConfiguration conf = - new NeuralNetConfiguration.Builder().updater(new Nesterovs(lr, mu)) + NeuralNetConfiguration.builder().updater(new Nesterovs(lr, mu)) .layer(new DenseLayer.Builder().nIn(nIn).nOut(nOut).build()) .build(); - long numParams = conf.getLayer().initializer().numParams(conf); + long numParams = conf.getFirstLayer().initializer().numParams(conf); INDArray params = Nd4j.create(1, numParams); - BaseLayer layer = (BaseLayer) conf.getLayer().instantiate(conf, null, 0, params, true, params.dataType()); + BaseLayer layer = (BaseLayer) conf.getFirstLayer().instantiate(conf, null, 0, params, true, params.dataType()); layer.setBackpropGradientsViewArray(gradients); Updater updater = UpdaterCreator.getUpdater(layer); - int updaterStateSize = (int) layer.layerConf().getIUpdater().stateSize(numParams); + int updaterStateSize = (int) layer.getTypedLayerConfiguration().getIUpdater().stateSize(numParams); INDArray updaterState = Nd4j.create(1, updaterStateSize); updater.setStateViewArray(layer, updaterState, true); @@ -444,7 +444,7 @@ public class TestUpdaters extends BaseDL4JTest { count++; } - assertEquals(mu, ((Nesterovs)layer.layerConf().getIUpdater()).getMomentum(), 1e-4); + assertEquals(mu, ((Nesterovs)layer.getTypedLayerConfiguration().getIUpdater()).getMomentum(), 1e-4); assertEquals(2, count); } @@ -457,16 +457,16 @@ public class TestUpdaters extends BaseDL4JTest { NeuralNetConfiguration conf = - new NeuralNetConfiguration.Builder().updater(new RmsProp(lr,rmsDecay, RmsProp.DEFAULT_RMSPROP_EPSILON)) + NeuralNetConfiguration.builder().updater(new RmsProp(lr,rmsDecay, RmsProp.DEFAULT_RMSPROP_EPSILON)) .layer(new DenseLayer.Builder().nIn(nIn).nOut(nOut).build()) .build(); - long numParams = conf.getLayer().initializer().numParams(conf); + long numParams = conf.getFirstLayer().initializer().numParams(conf); INDArray params = Nd4j.create(1, numParams); - BaseLayer layer = (BaseLayer) conf.getLayer().instantiate(conf, null, 0, params, true, params.dataType()); + BaseLayer layer = (BaseLayer) conf.getFirstLayer().instantiate(conf, null, 0, params, true, params.dataType()); layer.setBackpropGradientsViewArray(gradients); Updater updater = UpdaterCreator.getUpdater(layer); - int updaterStateSize = (int) layer.layerConf().getIUpdater().stateSize(numParams); + int updaterStateSize = (int) layer.getTypedLayerConfiguration().getIUpdater().stateSize(numParams); INDArray updaterState = Nd4j.create(1, updaterStateSize); updater.setStateViewArray(layer, updaterState, true); @@ -496,7 +496,7 @@ public class TestUpdaters extends BaseDL4JTest { assertEquals(gradExpected, gradient.getGradientFor(entry.getKey())); lastG.put(key, lastGTmp); } - assertEquals(rmsDecay, ((RmsProp)layer.layerConf().getIUpdater()).getRmsDecay(), 1e-4); + assertEquals(rmsDecay, ((RmsProp)layer.getTypedLayerConfiguration().getIUpdater()).getRmsDecay(), 1e-4); } @Test @@ -504,13 +504,13 @@ public class TestUpdaters extends BaseDL4JTest { double lr = 0.05; NeuralNetConfiguration conf = - new NeuralNetConfiguration.Builder().updater(new Sgd(lr)) + NeuralNetConfiguration.builder().updater(new Sgd(lr)) .layer(new DenseLayer.Builder().nIn(nIn).nOut(nOut).build()) .build(); - long numParams = conf.getLayer().initializer().numParams(conf); + long numParams = conf.getFirstLayer().initializer().numParams(conf); INDArray params = Nd4j.create(1, numParams); - BaseLayer layer = (BaseLayer) conf.getLayer().instantiate(conf, null, 0, params, true, params.dataType()); + BaseLayer layer = (BaseLayer) conf.getFirstLayer().instantiate(conf, null, 0, params, true, params.dataType()); layer.setBackpropGradientsViewArray(gradients); Updater updater = UpdaterCreator.getUpdater(layer); @@ -528,7 +528,7 @@ public class TestUpdaters extends BaseDL4JTest { gradExpected = val.mul(lr); assertEquals(gradExpected, gradient.getGradientFor(entry.getKey())); } - assertEquals(lr, ((Sgd)layer.layerConf().getIUpdater()).getLearningRate(), 1e-4); + assertEquals(lr, ((Sgd)layer.getTypedLayerConfiguration().getIUpdater()).getLearningRate(), 1e-4); } @@ -538,13 +538,13 @@ public class TestUpdaters extends BaseDL4JTest { double lr = 0.5; NeuralNetConfiguration conf = - new NeuralNetConfiguration.Builder().updater(new NoOp()) + NeuralNetConfiguration.builder().updater(new NoOp()) .layer(new DenseLayer.Builder().nIn(nIn).nOut(nOut).build()) .build(); - long numParams = conf.getLayer().initializer().numParams(conf); + long numParams = conf.getFirstLayer().initializer().numParams(conf); INDArray params = Nd4j.create(1, numParams); - Layer layer = conf.getLayer().instantiate(conf, null, 0, params, true, params.dataType()); + Layer layer = conf.getFirstLayer().instantiate(conf, null, 0, params, true, params.dataType()); layer.setBackpropGradientsViewArray(gradients); Updater updater = UpdaterCreator.getUpdater(layer); @@ -574,7 +574,7 @@ public class TestUpdaters extends BaseDL4JTest { Nd4j.getRandom().setSeed(12345L); double lr = 0.03; - MultiLayerConfiguration conf = new NeuralNetConfiguration.Builder().list() + NeuralNetConfiguration conf = NeuralNetConfiguration.builder().list() .layer(0, new DenseLayer.Builder().nIn(4).nOut(5).updater(new Sgd(lr)).build()) .layer(1, new DenseLayer.Builder().nIn(5).nOut(6) .updater(new NoOp()).build()) @@ -675,7 +675,7 @@ public class TestUpdaters extends BaseDL4JTest { int nIn = 4; int nOut = 8; - MultiLayerConfiguration conf = new NeuralNetConfiguration.Builder().updater(new Nesterovs(lr,0.6)).list() + NeuralNetConfiguration conf = NeuralNetConfiguration.builder().updater(new Nesterovs(lr,0.6)).list() .layer(0, new DenseLayer.Builder().nIn(nIn).nOut(5) .updater(org.deeplearning4j.nn.conf.Updater.SGD).build()) .layer(1, new DenseLayer.Builder().nIn(5).nOut(6) @@ -706,7 +706,7 @@ public class TestUpdaters extends BaseDL4JTest { int nIn = 4; int nOut = 8; - MultiLayerConfiguration conf = new NeuralNetConfiguration.Builder().updater(new Nesterovs(lr,0.6)).list() + NeuralNetConfiguration conf = NeuralNetConfiguration.builder().updater(new Nesterovs(lr,0.6)).list() .layer(0, new DenseLayer.Builder().nIn(nIn).nOut(5) .updater(org.deeplearning4j.nn.conf.Updater.SGD).build()) .layer(1, new DenseLayer.Builder().nIn(5).nOut(6) @@ -743,14 +743,14 @@ public class TestUpdaters extends BaseDL4JTest { gradient.setGradientFor(PretrainParamInitializer.VISIBLE_BIAS_KEY, vbiasGradient); - NeuralNetConfiguration conf = new NeuralNetConfiguration.Builder().updater(new Sgd(lr)).seed(42) + NeuralNetConfiguration conf = NeuralNetConfiguration.builder().updater(new Sgd(lr)).seed(42) .layer(new AutoEncoder.Builder() .lossFunction(LossFunctions.LossFunction.COSINE_PROXIMITY) .activation(Activation.IDENTITY).nIn(nIn).nOut(nOut).build()) .build(); - long numParams = conf.getLayer().initializer().numParams(conf); + long numParams = conf.getFirstLayer().initializer().numParams(conf); INDArray params = Nd4j.create(1, numParams); - BaseLayer layer = (BaseLayer) conf.getLayer().instantiate(conf, null, 0, params, true, params.dataType()); + BaseLayer layer = (BaseLayer) conf.getFirstLayer().instantiate(conf, null, 0, params, true, params.dataType()); layer.setBackpropGradientsViewArray(gradients); Updater updater = UpdaterCreator.getUpdater(layer); @@ -770,7 +770,7 @@ public class TestUpdaters extends BaseDL4JTest { gradExpected = val.mul(lr); assertEquals(gradExpected, gradient.getGradientFor(entry.getKey())); } - assertEquals(lr, ((Sgd)layer.layerConf().getIUpdater()).getLearningRate(), 1e-4); + assertEquals(lr, ((Sgd)layer.getTypedLayerConfiguration().getIUpdater()).getLearningRate(), 1e-4); //Test with pretrain == false @@ -795,10 +795,10 @@ public class TestUpdaters extends BaseDL4JTest { gradientCopyPreUpdate.setFlattenedGradient(g); params = Nd4j.create(1, numParams); - layer = (BaseLayer) conf.getLayer().instantiate(conf, null, 0, params, true, params.dataType()); + layer = (BaseLayer) conf.getFirstLayer().instantiate(conf, null, 0, params, true, params.dataType()); layer.setBackpropGradientsViewArray(gradients); updater = UpdaterCreator.getUpdater(layer); - assertEquals(lr, ((Sgd)layer.layerConf().getIUpdater()).getLearningRate(), 1e-4); + assertEquals(lr, ((Sgd)layer.getTypedLayerConfiguration().getIUpdater()).getLearningRate(), 1e-4); } @Test @@ -807,7 +807,7 @@ public class TestUpdaters extends BaseDL4JTest { List blocks; if (i == 0) { - MultiLayerConfiguration conf = new NeuralNetConfiguration.Builder().list() + NeuralNetConfiguration conf = NeuralNetConfiguration.builder().list() .layer(0, new DenseLayer.Builder().nIn(10).nOut(10).name("l0") .updater(new Adam(0.5)).build()) .layer(1, new DenseLayer.Builder().nIn(10).nOut(10).name("l1") @@ -827,7 +827,7 @@ public class TestUpdaters extends BaseDL4JTest { MultiLayerUpdater u = (MultiLayerUpdater) net.getUpdater(); blocks = u.getUpdaterBlocks(); } else { - ComputationGraphConfiguration conf = new NeuralNetConfiguration.Builder() + ComputationGraphConfiguration conf = NeuralNetConfiguration.builder() .graphBuilder().addInputs("in") .addLayer("l0", new DenseLayer.Builder().nIn(10).nOut(10) .updater(new Adam(0.5)).build(), "in") @@ -859,11 +859,11 @@ public class TestUpdaters extends BaseDL4JTest { //Check first updater block: UpdaterBlock ub0 = blocks.get(0); assertEquals(3, ub0.getLayersAndVariablesInBlock().size()); - assertEquals("l0", ub0.getLayersAndVariablesInBlock().get(0).getLayer().getConfig().getLayerName()); + assertEquals("l0", ub0.getLayersAndVariablesInBlock().get(0).getLayer().getTrainingConfig().getLayerName()); assertEquals(DefaultParamInitializer.WEIGHT_KEY, ub0.getLayersAndVariablesInBlock().get(0).getParamName()); - assertEquals("l0", ub0.getLayersAndVariablesInBlock().get(1).getLayer().getConfig().getLayerName()); + assertEquals("l0", ub0.getLayersAndVariablesInBlock().get(1).getLayer().getTrainingConfig().getLayerName()); assertEquals(DefaultParamInitializer.BIAS_KEY, ub0.getLayersAndVariablesInBlock().get(1).getParamName()); - assertEquals("l1", ub0.getLayersAndVariablesInBlock().get(2).getLayer().getConfig().getLayerName()); + assertEquals("l1", ub0.getLayersAndVariablesInBlock().get(2).getLayer().getTrainingConfig().getLayerName()); assertEquals(DefaultParamInitializer.WEIGHT_KEY, ub0.getLayersAndVariablesInBlock().get(2).getParamName()); int nParams0 = 10 * 10 + 10 + 10 * 10; @@ -876,7 +876,7 @@ public class TestUpdaters extends BaseDL4JTest { //Check second updater block: UpdaterBlock ub1 = blocks.get(1); assertEquals(1, ub1.getLayersAndVariablesInBlock().size()); - assertEquals("l1", ub1.getLayersAndVariablesInBlock().get(0).getLayer().getConfig().getLayerName()); + assertEquals("l1", ub1.getLayersAndVariablesInBlock().get(0).getLayer().getTrainingConfig().getLayerName()); assertEquals(DefaultParamInitializer.BIAS_KEY, ub1.getLayersAndVariablesInBlock().get(0).getParamName()); int nParams1 = 10; @@ -889,9 +889,9 @@ public class TestUpdaters extends BaseDL4JTest { //Check third updater block: UpdaterBlock ub2 = blocks.get(2); assertEquals(2, ub2.getLayersAndVariablesInBlock().size()); - assertEquals("l2", ub2.getLayersAndVariablesInBlock().get(0).getLayer().getConfig().getLayerName()); + assertEquals("l2", ub2.getLayersAndVariablesInBlock().get(0).getLayer().getTrainingConfig().getLayerName()); assertEquals(DefaultParamInitializer.WEIGHT_KEY, ub2.getLayersAndVariablesInBlock().get(0).getParamName()); - assertEquals("l2", ub2.getLayersAndVariablesInBlock().get(1).getLayer().getConfig().getLayerName()); + assertEquals("l2", ub2.getLayersAndVariablesInBlock().get(1).getLayer().getTrainingConfig().getLayerName()); assertEquals(DefaultParamInitializer.BIAS_KEY, ub2.getLayersAndVariablesInBlock().get(1).getParamName()); int nParams2 = 10 * 10 + 10; @@ -904,9 +904,9 @@ public class TestUpdaters extends BaseDL4JTest { //Check fourth updater block: UpdaterBlock ub3 = blocks.get(3); assertEquals(2, ub3.getLayersAndVariablesInBlock().size()); - assertEquals("l3", ub3.getLayersAndVariablesInBlock().get(0).getLayer().getConfig().getLayerName()); + assertEquals("l3", ub3.getLayersAndVariablesInBlock().get(0).getLayer().getTrainingConfig().getLayerName()); assertEquals(DefaultParamInitializer.WEIGHT_KEY, ub3.getLayersAndVariablesInBlock().get(0).getParamName()); - assertEquals("l3", ub3.getLayersAndVariablesInBlock().get(1).getLayer().getConfig().getLayerName()); + assertEquals("l3", ub3.getLayersAndVariablesInBlock().get(1).getLayer().getTrainingConfig().getLayerName()); assertEquals(DefaultParamInitializer.BIAS_KEY, ub3.getLayersAndVariablesInBlock().get(1).getParamName()); int nParams3 = 10 * 10 + 10; @@ -919,9 +919,9 @@ public class TestUpdaters extends BaseDL4JTest { //Check fifth updater black UpdaterBlock ub4 = blocks.get(4); assertEquals(2, ub4.getLayersAndVariablesInBlock().size()); - assertEquals("l4", ub4.getLayersAndVariablesInBlock().get(0).getLayer().getConfig().getLayerName()); + assertEquals("l4", ub4.getLayersAndVariablesInBlock().get(0).getLayer().getTrainingConfig().getLayerName()); assertEquals(DefaultParamInitializer.WEIGHT_KEY, ub4.getLayersAndVariablesInBlock().get(0).getParamName()); - assertEquals("l4", ub4.getLayersAndVariablesInBlock().get(1).getLayer().getConfig().getLayerName()); + assertEquals("l4", ub4.getLayersAndVariablesInBlock().get(1).getLayer().getTrainingConfig().getLayerName()); assertEquals(DefaultParamInitializer.BIAS_KEY, ub4.getLayersAndVariablesInBlock().get(1).getParamName()); int nParams4 = 10 * 10 + 10; @@ -940,8 +940,10 @@ public class TestUpdaters extends BaseDL4JTest { public void testUpdaterBlockVae() { List blocks; - MultiLayerConfiguration conf = - new NeuralNetConfiguration.Builder().updater(new Adam(0.5)).list() + NeuralNetConfiguration conf = + NeuralNetConfiguration.builder() + .updater(new Adam(0.5)) + .weightInit(WeightInit.NORMAL) .layer(0, new VariationalAutoencoder.Builder().nIn(8).nOut(12) .encoderLayerSizes(10, 11).decoderLayerSizes(13, 14).build()) .build(); @@ -981,7 +983,7 @@ public class TestUpdaters extends BaseDL4JTest { public void testDivisionByMinibatch1(){ //No batch norm - should be single INDArray equal to flattened gradient view - MultiLayerConfiguration conf = new NeuralNetConfiguration.Builder() + NeuralNetConfiguration conf = NeuralNetConfiguration.builder() .list() .layer(new DenseLayer.Builder().nIn(10).nOut(10).build()) .layer(new DenseLayer.Builder().nIn(10).nOut(10).build()) @@ -1008,7 +1010,7 @@ public class TestUpdaters extends BaseDL4JTest { //With batch norm - should be multiple 'division by minibatch' array segments //i.e., exclude batch norm mean/variance - MultiLayerConfiguration conf = new NeuralNetConfiguration.Builder() + NeuralNetConfiguration conf = NeuralNetConfiguration.builder() .list() .layer(new DenseLayer.Builder().nIn(10).nOut(9).build()) .layer(new BatchNormalization.Builder().nOut(9).build()) @@ -1059,7 +1061,7 @@ public class TestUpdaters extends BaseDL4JTest { //With batch norm - should be multiple 'division by minibatch' array segments //i.e., exclude batch norm mean/variance - MultiLayerConfiguration conf = new NeuralNetConfiguration.Builder() + NeuralNetConfiguration conf = NeuralNetConfiguration.builder() .list() .layer(new BatchNormalization.Builder().nOut(6).build()) .layer(new ConvolutionLayer.Builder().nIn(6).nOut(5).kernelSize(2,2).build()) diff --git a/cavis-dnn/cavis-dnn-core/src/test/java/org/deeplearning4j/nn/updater/custom/TestCustomUpdater.java b/cavis-dnn/cavis-dnn-core/src/test/java/org/deeplearning4j/nn/updater/custom/TestCustomUpdater.java index 170c6bdc1..e52b126f2 100644 --- a/cavis-dnn/cavis-dnn-core/src/test/java/org/deeplearning4j/nn/updater/custom/TestCustomUpdater.java +++ b/cavis-dnn/cavis-dnn-core/src/test/java/org/deeplearning4j/nn/updater/custom/TestCustomUpdater.java @@ -21,9 +21,8 @@ package org.deeplearning4j.nn.updater.custom; import org.deeplearning4j.BaseDL4JTest; -import org.deeplearning4j.nn.conf.MultiLayerConfiguration; import org.deeplearning4j.nn.conf.NeuralNetConfiguration; -import org.deeplearning4j.nn.conf.layers.BaseLayer; +import org.deeplearning4j.nn.conf.layers.BaseLayerConfiguration; import org.deeplearning4j.nn.conf.layers.DenseLayer; import org.deeplearning4j.nn.conf.layers.OutputLayer; import org.deeplearning4j.nn.multilayer.MultiLayerNetwork; @@ -47,7 +46,7 @@ public class TestCustomUpdater extends BaseDL4JTest { double lr = 0.03; Nd4j.getRandom().setSeed(12345); - MultiLayerConfiguration conf1 = new NeuralNetConfiguration.Builder().seed(12345) + NeuralNetConfiguration conf1 = NeuralNetConfiguration.builder().seed(12345) .activation(Activation.TANH).updater(new CustomIUpdater(lr)) //Specify custom IUpdater .list().layer(0, new DenseLayer.Builder().nIn(10).nOut(10).build()) .layer(1, new OutputLayer.Builder().nIn(10).nOut(10) @@ -55,32 +54,32 @@ public class TestCustomUpdater extends BaseDL4JTest { .build(); Nd4j.getRandom().setSeed(12345); - MultiLayerConfiguration conf2 = new NeuralNetConfiguration.Builder().seed(12345) + NeuralNetConfiguration conf2 = NeuralNetConfiguration.builder().seed(12345) .activation(Activation.TANH).updater(new Sgd(lr)).list() .layer(0, new DenseLayer.Builder().nIn(10).nOut(10).build()).layer(1, new OutputLayer.Builder() .nIn(10).nOut(10).lossFunction(LossFunctions.LossFunction.MSE).build()) .build(); //First: Check updater config - assertTrue(((BaseLayer) conf1.getConf(0).getLayer()).getIUpdater() instanceof CustomIUpdater); - assertTrue(((BaseLayer) conf1.getConf(1).getLayer()).getIUpdater() instanceof CustomIUpdater); - assertTrue(((BaseLayer) conf2.getConf(0).getLayer()).getIUpdater() instanceof Sgd); - assertTrue(((BaseLayer) conf2.getConf(1).getLayer()).getIUpdater() instanceof Sgd); + assertTrue(((BaseLayerConfiguration) conf1.getConf(0).getLayer()).getIUpdater() instanceof CustomIUpdater); + assertTrue(((BaseLayerConfiguration) conf1.getConf(1).getLayer()).getIUpdater() instanceof CustomIUpdater); + assertTrue(((BaseLayerConfiguration) conf2.getConf(0).getLayer()).getIUpdater() instanceof Sgd); + assertTrue(((BaseLayerConfiguration) conf2.getConf(1).getLayer()).getIUpdater() instanceof Sgd); - CustomIUpdater u0_0 = (CustomIUpdater) ((BaseLayer) conf1.getConf(0).getLayer()).getIUpdater(); - CustomIUpdater u0_1 = (CustomIUpdater) ((BaseLayer) conf1.getConf(1).getLayer()).getIUpdater(); + CustomIUpdater u0_0 = (CustomIUpdater) ((BaseLayerConfiguration) conf1.getConf(0).getLayer()).getIUpdater(); + CustomIUpdater u0_1 = (CustomIUpdater) ((BaseLayerConfiguration) conf1.getConf(1).getLayer()).getIUpdater(); assertEquals(lr, u0_0.getLearningRate(), 1e-6); assertEquals(lr, u0_1.getLearningRate(), 1e-6); - Sgd u1_0 = (Sgd) ((BaseLayer) conf2.getConf(0).getLayer()).getIUpdater(); - Sgd u1_1 = (Sgd) ((BaseLayer) conf2.getConf(1).getLayer()).getIUpdater(); + Sgd u1_0 = (Sgd) ((BaseLayerConfiguration) conf2.getConf(0).getLayer()).getIUpdater(); + Sgd u1_1 = (Sgd) ((BaseLayerConfiguration) conf2.getConf(1).getLayer()).getIUpdater(); assertEquals(lr, u1_0.getLearningRate(), 1e-6); assertEquals(lr, u1_1.getLearningRate(), 1e-6); //Second: check JSON String asJson = conf1.toJson(); - MultiLayerConfiguration fromJson = MultiLayerConfiguration.fromJson(asJson); + NeuralNetConfiguration fromJson = NeuralNetConfiguration.fromJson(asJson); assertEquals(conf1, fromJson); Nd4j.getRandom().setSeed(12345); diff --git a/cavis-dnn/cavis-dnn-core/src/test/java/org/deeplearning4j/nn/weights/WeightInitIdentityTest.java b/cavis-dnn/cavis-dnn-core/src/test/java/org/deeplearning4j/nn/weights/WeightInitIdentityTest.java index 8b9b35e4f..b5becc819 100644 --- a/cavis-dnn/cavis-dnn-core/src/test/java/org/deeplearning4j/nn/weights/WeightInitIdentityTest.java +++ b/cavis-dnn/cavis-dnn-core/src/test/java/org/deeplearning4j/nn/weights/WeightInitIdentityTest.java @@ -48,7 +48,7 @@ public class WeightInitIdentityTest extends BaseDL4JTest { final String inputName = "input"; final String conv = "conv"; final String output = "output"; - final ComputationGraph graph = new ComputationGraph(new NeuralNetConfiguration.Builder() + final ComputationGraph graph = new ComputationGraph(NeuralNetConfiguration.builder() .graphBuilder() .addInputs(inputName) .setOutputs(output) @@ -76,7 +76,7 @@ public class WeightInitIdentityTest extends BaseDL4JTest { final String inputName = "input"; final String conv = "conv"; final String output = "output"; - final ComputationGraph graph = new ComputationGraph(new NeuralNetConfiguration.Builder() + final ComputationGraph graph = new ComputationGraph(NeuralNetConfiguration.builder() .graphBuilder() .setInputTypes(InputType.inferInputType(input)) .addInputs(inputName) @@ -103,7 +103,7 @@ public class WeightInitIdentityTest extends BaseDL4JTest { final String inputName = "input"; final String conv = "conv"; final String output = "output"; - final ComputationGraph graph = new ComputationGraph(new NeuralNetConfiguration.Builder() + final ComputationGraph graph = new ComputationGraph(NeuralNetConfiguration.builder() .graphBuilder() .setInputTypes(InputType.inferInputType(input)) .addInputs(inputName) diff --git a/cavis-dnn/cavis-dnn-core/src/test/java/org/deeplearning4j/optimize/solver/BackTrackLineSearchTest.java b/cavis-dnn/cavis-dnn-core/src/test/java/org/deeplearning4j/optimize/solver/BackTrackLineSearchTest.java index 8b73c10ee..91101fccc 100644 --- a/cavis-dnn/cavis-dnn-core/src/test/java/org/deeplearning4j/optimize/solver/BackTrackLineSearchTest.java +++ b/cavis-dnn/cavis-dnn-core/src/test/java/org/deeplearning4j/optimize/solver/BackTrackLineSearchTest.java @@ -24,7 +24,6 @@ import lombok.val; import org.deeplearning4j.BaseDL4JTest; import org.deeplearning4j.datasets.iterator.impl.IrisDataSetIterator; import org.deeplearning4j.nn.api.OptimizationAlgorithm; -import org.deeplearning4j.nn.conf.MultiLayerConfiguration; import org.deeplearning4j.nn.conf.NeuralNetConfiguration; import org.deeplearning4j.nn.conf.layers.DenseLayer; import org.deeplearning4j.nn.layers.OutputLayer; @@ -82,7 +81,7 @@ public class BackTrackLineSearchTest extends BaseDL4JTest { layer.computeGradientAndScore(LayerWorkspaceMgr.noWorkspaces()); BackTrackLineSearch lineSearch = new BackTrackLineSearch(layer, layer.getOptimizer()); - double step = lineSearch.optimize(layer.params(), layer.gradient().gradient(), layer.gradient().gradient(), LayerWorkspaceMgr.noWorkspacesImmutable()); + double step = lineSearch.optimize(layer.getModelParams(), layer.gradient().gradient(), layer.gradient().gradient(), LayerWorkspaceMgr.noWorkspacesImmutable()); assertEquals(1.0, step, 1e-3); } @@ -98,11 +97,11 @@ public class BackTrackLineSearchTest extends BaseDL4JTest { layer.setInput(irisData.getFeatures(), LayerWorkspaceMgr.noWorkspaces()); layer.setLabels(irisData.getLabels()); layer.computeGradientAndScore(LayerWorkspaceMgr.noWorkspaces()); - score1 = layer.score(); + score1 = layer.getScore(); BackTrackLineSearch lineSearch = new BackTrackLineSearch(layer, new NegativeDefaultStepFunction(), layer.getOptimizer()); - double step = lineSearch.optimize(layer.params(), layer.gradient().gradient(), layer.gradient().gradient(), LayerWorkspaceMgr.noWorkspacesImmutable()); + double step = lineSearch.optimize(layer.getModelParams(), layer.gradient().gradient(), layer.gradient().gradient(), LayerWorkspaceMgr.noWorkspacesImmutable()); assertEquals(1.0, step, 1e-3); } @@ -119,18 +118,18 @@ public class BackTrackLineSearchTest extends BaseDL4JTest { layer.setInput(irisData.getFeatures(), LayerWorkspaceMgr.noWorkspaces()); layer.setLabels(irisData.getLabels()); layer.computeGradientAndScore(LayerWorkspaceMgr.noWorkspaces()); - score1 = layer.score(); + score1 = layer.getScore(); INDArray origGradient = layer.gradient().gradient().dup(); NegativeDefaultStepFunction sf = new NegativeDefaultStepFunction(); BackTrackLineSearch lineSearch = new BackTrackLineSearch(layer, sf, layer.getOptimizer()); - double step = lineSearch.optimize(layer.params(), layer.gradient().gradient(), layer.gradient().gradient(), LayerWorkspaceMgr.noWorkspacesImmutable()); - INDArray currParams = layer.params(); + double step = lineSearch.optimize(layer.getModelParams(), layer.gradient().gradient(), layer.gradient().gradient(), LayerWorkspaceMgr.noWorkspacesImmutable()); + INDArray currParams = layer.getModelParams(); sf.step(currParams, origGradient, step); - layer.setParams(currParams); + layer.setParamsTable(currParams); layer.computeGradientAndScore(LayerWorkspaceMgr.noWorkspaces()); - score2 = layer.score(); + score2 = layer.getScore(); assertTrue(score1 > score2, "score1=" + score1 + ", score2=" + score2); @@ -147,19 +146,19 @@ public class BackTrackLineSearchTest extends BaseDL4JTest { layer.setInput(irisData.getFeatures(), LayerWorkspaceMgr.noWorkspaces()); layer.setLabels(irisData.getLabels()); layer.computeGradientAndScore(LayerWorkspaceMgr.noWorkspaces()); - score1 = layer.score(); + score1 = layer.getScore(); INDArray origGradient = layer.gradient().gradient().dup(); DefaultStepFunction sf = new DefaultStepFunction(); BackTrackLineSearch lineSearch = new BackTrackLineSearch(layer, sf, layer.getOptimizer()); - double step = lineSearch.optimize(layer.params().dup(), layer.gradient().gradient().dup(), + double step = lineSearch.optimize(layer.getModelParams().dup(), layer.gradient().gradient().dup(), layer.gradient().gradient().dup(), LayerWorkspaceMgr.noWorkspacesImmutable()); - INDArray currParams = layer.params(); + INDArray currParams = layer.getModelParams(); sf.step(currParams, origGradient, step); - layer.setParams(currParams); + layer.setParamsTable(currParams); layer.computeGradientAndScore(LayerWorkspaceMgr.noWorkspaces()); - score2 = layer.score(); + score2 = layer.getScore(); assertTrue(score1 < score2, "score1 = " + score1 + ", score2 = " + score2); } @@ -167,16 +166,16 @@ public class BackTrackLineSearchTest extends BaseDL4JTest { private static OutputLayer getIrisLogisticLayerConfig(Activation activationFunction, int maxIterations, LossFunctions.LossFunction lossFunction) { NeuralNetConfiguration conf = - new NeuralNetConfiguration.Builder().seed(12345L).miniBatch(true) + NeuralNetConfiguration.builder().seed(12345L).miniBatch(true) .maxNumLineSearchIterations(maxIterations) .layer(new org.deeplearning4j.nn.conf.layers.OutputLayer.Builder(lossFunction) .nIn(4).nOut(3).activation(activationFunction) .weightInit(WeightInit.XAVIER).build()) .build(); - val numParams = conf.getLayer().initializer().numParams(conf); + val numParams = conf.getFirstLayer().initializer().numParams(conf); INDArray params = Nd4j.create(1, numParams); - return (OutputLayer) conf.getLayer().instantiate(conf, null, 0, params, true, params.dataType()); + return (OutputLayer) conf.getFirstLayer().instantiate(conf, null, 0, params, true, params.dataType()); } /////////////////////////////////////////////////////////////////////////// @@ -191,12 +190,12 @@ public class BackTrackLineSearchTest extends BaseDL4JTest { MultiLayerNetwork network = new MultiLayerNetwork(getIrisMultiLayerConfig(Activation.SIGMOID, optimizer)); network.init(); TrainingListener listener = new ScoreIterationListener(10); - network.setListeners(Collections.singletonList(listener)); + network.addTrainingListeners(Collections.singletonList(listener)); double oldScore = network.score(data); for( int i=0; i<100; i++ ) { network.fit(data.getFeatures(), data.getLabels()); } - double score = network.score(); + double score = network.getScore(); assertTrue(score < oldScore); } @@ -209,13 +208,13 @@ public class BackTrackLineSearchTest extends BaseDL4JTest { MultiLayerNetwork network = new MultiLayerNetwork(getIrisMultiLayerConfig(Activation.RELU, optimizer)); network.init(); TrainingListener listener = new ScoreIterationListener(10); - network.setListeners(Collections.singletonList(listener)); + network.addTrainingListeners(Collections.singletonList(listener)); double firstScore = network.score(data); for( int i=0; i<5; i++ ) { network.fit(data.getFeatures(), data.getLabels()); } - double score = network.score(); + double score = network.getScore(); assertTrue(score < firstScore); } @@ -228,19 +227,19 @@ public class BackTrackLineSearchTest extends BaseDL4JTest { MultiLayerNetwork network = new MultiLayerNetwork(getIrisMultiLayerConfig(Activation.RELU, optimizer)); network.init(); TrainingListener listener = new ScoreIterationListener(10); - network.setListeners(Collections.singletonList(listener)); + network.addTrainingListeners(Collections.singletonList(listener)); double oldScore = network.score(data); for( int i=0; i<5; i++ ) { network.fit(data.getFeatures(), data.getLabels()); } - double score = network.score(); + double score = network.getScore(); assertTrue(score < oldScore); } - private static MultiLayerConfiguration getIrisMultiLayerConfig(Activation activationFunction, OptimizationAlgorithm optimizer) { - MultiLayerConfiguration conf = new NeuralNetConfiguration.Builder().optimizationAlgo(optimizer) + private static NeuralNetConfiguration getIrisMultiLayerConfig(Activation activationFunction, OptimizationAlgorithm optimizer) { + NeuralNetConfiguration conf = NeuralNetConfiguration.builder().optimizationAlgo(optimizer) .updater(new Adam(0.01)).seed(12345L).list() .layer(0, new DenseLayer.Builder().nIn(4).nOut(100).weightInit(WeightInit.XAVIER) .activation(activationFunction).build()) diff --git a/cavis-dnn/cavis-dnn-core/src/test/java/org/deeplearning4j/optimize/solver/TestOptimizers.java b/cavis-dnn/cavis-dnn-core/src/test/java/org/deeplearning4j/optimize/solver/TestOptimizers.java index 5b7bec134..7883c899f 100644 --- a/cavis-dnn/cavis-dnn-core/src/test/java/org/deeplearning4j/optimize/solver/TestOptimizers.java +++ b/cavis-dnn/cavis-dnn-core/src/test/java/org/deeplearning4j/optimize/solver/TestOptimizers.java @@ -20,14 +20,17 @@ package org.deeplearning4j.optimize.solver; +import lombok.NonNull; import lombok.val; +import net.brutex.ai.dnn.api.IModel; import org.deeplearning4j.BaseDL4JTest; import org.deeplearning4j.datasets.iterator.impl.IrisDataSetIterator; import org.deeplearning4j.nn.api.*; import org.deeplearning4j.nn.conf.CacheMode; -import org.deeplearning4j.nn.conf.MultiLayerConfiguration; import org.deeplearning4j.nn.conf.NeuralNetConfiguration; +import org.deeplearning4j.nn.conf.layers.BaseLayerConfiguration; import org.deeplearning4j.nn.conf.layers.DenseLayer; +import org.deeplearning4j.nn.conf.layers.LayerConfiguration; import org.deeplearning4j.nn.conf.layers.OutputLayer; import org.deeplearning4j.nn.gradient.DefaultGradient; import org.deeplearning4j.nn.gradient.Gradient; @@ -43,6 +46,7 @@ import org.deeplearning4j.optimize.solvers.LineGradientDescent; import org.deeplearning4j.optimize.solvers.StochasticGradientDescent; import org.deeplearning4j.optimize.stepfunctions.NegativeDefaultStepFunction; import org.junit.jupiter.api.Test; +import org.nd4j.evaluation.IEvaluation; import org.nd4j.linalg.activations.Activation; import org.nd4j.linalg.api.buffer.DataType; import org.nd4j.linalg.api.ndarray.INDArray; @@ -51,7 +55,9 @@ import org.nd4j.linalg.api.ops.impl.transforms.strict.Sin; import org.nd4j.linalg.api.rng.DefaultRandom; import org.nd4j.linalg.api.rng.Random; import org.nd4j.linalg.dataset.DataSet; +import org.nd4j.linalg.dataset.api.MultiDataSet; import org.nd4j.linalg.dataset.api.iterator.DataSetIterator; +import org.nd4j.linalg.dataset.api.iterator.MultiDataSetIterator; import org.nd4j.linalg.exception.ND4JArraySizeException; import org.nd4j.linalg.factory.Nd4j; import org.nd4j.linalg.indexing.conditions.Condition; @@ -134,8 +140,8 @@ public class TestOptimizers extends BaseDL4JTest { } } - private static MultiLayerConfiguration getMLPConfigIris(OptimizationAlgorithm oa) { - MultiLayerConfiguration c = new NeuralNetConfiguration.Builder().optimizationAlgo(oa) + private static NeuralNetConfiguration getMLPConfigIris(OptimizationAlgorithm oa) { + NeuralNetConfiguration c = NeuralNetConfiguration.builder().optimizationAlgo(oa) .updater(new AdaGrad(1e-1)).seed(12345L) .list().layer(0, new DenseLayer.Builder().nIn(4).nOut(3).weightInit(WeightInit.XAVIER) @@ -206,38 +212,38 @@ public class TestOptimizers extends BaseDL4JTest { System.out.println("---------\n Alg= " + oa + ", nIter= " + numLineSearchIter + ", nDimensions= " + nDimensions); - NeuralNetConfiguration conf = new NeuralNetConfiguration.Builder().maxNumLineSearchIterations(numLineSearchIter) + LayerConfiguration conf = NeuralNetConfiguration.builder().maxNumLineSearchIterations(numLineSearchIter) .updater(new Sgd(1e-2)) - .layer(new DenseLayer.Builder().nIn(1).nOut(1).build()).build(); + .layer(new DenseLayer.Builder().nIn(1).nOut(1).build()).build().getFlattenedLayerConfigurations().get(0); conf.addVariable("W"); //Normally done by ParamInitializers, but obviously that isn't done here Random rng = new DefaultRandom(12345L); org.nd4j.linalg.api.rng.distribution.Distribution dist = new org.nd4j.linalg.api.rng.distribution.impl.UniformDistribution(rng, -10, 10); - Model m = new SphereFunctionModel(nDimensions, dist, conf); + IModel m = new SphereFunctionModel(nDimensions, dist, conf); m.computeGradientAndScore(LayerWorkspaceMgr.noWorkspaces()); - double scoreBefore = m.score(); + double scoreBefore = m.getScore(); assertTrue(!Double.isNaN(scoreBefore) && !Double.isInfinite(scoreBefore)); if (PRINT_OPT_RESULTS) { System.out.println("Before:"); System.out.println(scoreBefore); - System.out.println(m.params()); + System.out.println(m.getModelParams()); } - ConvexOptimizer opt = getOptimizer(oa, conf, m); + ConvexOptimizer opt = getOptimizer(oa, conf.getNetConfiguration(), m); opt.setupSearchState(m.gradientAndScore()); for( int i=0; i<100; i++ ) { opt.optimize(LayerWorkspaceMgr.noWorkspaces()); } m.computeGradientAndScore(LayerWorkspaceMgr.noWorkspaces()); - double scoreAfter = m.score(); + double scoreAfter = m.getScore(); assertTrue(!Double.isNaN(scoreAfter) && !Double.isInfinite(scoreAfter)); if (PRINT_OPT_RESULTS) { System.out.println("After:"); System.out.println(scoreAfter); - System.out.println(m.params()); + System.out.println(m.getModelParams()); } //Expected behaviour after optimization: @@ -246,7 +252,7 @@ public class TestOptimizers extends BaseDL4JTest { assertTrue( scoreAfter < scoreBefore, "Score did not improve after optimization (b= " + scoreBefore + " ,a= " + scoreAfter + ")"); } - private static ConvexOptimizer getOptimizer(OptimizationAlgorithm oa, NeuralNetConfiguration conf, Model m) { + private static ConvexOptimizer getOptimizer(OptimizationAlgorithm oa, NeuralNetConfiguration conf, IModel m) { switch (oa) { case STOCHASTIC_GRADIENT_DESCENT: return new StochasticGradientDescent(conf, new NegativeDefaultStepFunction(), null, m); @@ -269,22 +275,22 @@ public class TestOptimizers extends BaseDL4JTest { Random rng = new DefaultRandom(12345L); org.nd4j.linalg.api.rng.distribution.Distribution dist = new org.nd4j.linalg.api.rng.distribution.impl.UniformDistribution(rng, -10, 10); - NeuralNetConfiguration conf = new NeuralNetConfiguration.Builder() + NeuralNetConfiguration conf = NeuralNetConfiguration.builder() .maxNumLineSearchIterations(maxNumLineSearchIter).updater(new Sgd(0.1)) .layer(new DenseLayer.Builder().nIn(1).nOut(1).build()).build(); - conf.addVariable("W"); //Normally done by ParamInitializers, but obviously that isn't done here + conf.addNetWideVariable("W"); //Normally done by ParamInitializers, but obviously that isn't done here - Model m = new SphereFunctionModel(100, dist, conf); + IModel m = new SphereFunctionModel(100, dist, conf.getFlattenedLayerConfigurations().get(0)); if (i == 0) { m.computeGradientAndScore(LayerWorkspaceMgr.noWorkspaces()); - scores[0] = m.score(); //Before optimization + scores[0] = m.getScore(); //Before optimization } else { ConvexOptimizer opt = getOptimizer(oa, conf, m); for( int j=0; j<100; j++ ) { opt.optimize(LayerWorkspaceMgr.noWorkspaces()); } m.computeGradientAndScore(LayerWorkspaceMgr.noWorkspaces()); - scores[i] = m.score(); + scores[i] = m.getScore(); assertTrue(!Double.isNaN(scores[i]) && !Double.isInfinite(scores[i])); } } @@ -311,11 +317,95 @@ public class TestOptimizers extends BaseDL4JTest { private static final long serialVersionUID = -6963606137417355405L; private SphereFunctionModel(int nParams, org.nd4j.linalg.api.rng.distribution.Distribution distribution, - NeuralNetConfiguration conf) { + LayerConfiguration conf) { super(distribution.sample(new int[] {1, nParams}), conf); } + /** + * This method returns updater state (if applicable), null otherwise + * + * @return + */ + @Override + public INDArray updaterState() { + return null; + } + + /** + * This method fits model with a given DataSet + * + * @param dataSet + */ + @Override + public void fit(org.nd4j.linalg.dataset.api.DataSet dataSet) { + + } + + /** + * This method fits model with a given MultiDataSet + * + * @param dataSet + */ + @Override + public void fit(MultiDataSet dataSet) { + + } + + /** + * This method fits model with a given DataSetIterator + * + * @param iterator + */ + @Override + public void fit(DataSetIterator iterator) { + + } + + /** + * This method fits model with a given MultiDataSetIterator + * + * @param iterator + */ + @Override + public void fit(MultiDataSetIterator iterator) { + + } + + /** + * This method executes evaluation of the model against given iterator and evaluation + * implementations + * + * @param iterator + * @param evaluations + */ + @Override + public T[] doEvaluation(DataSetIterator iterator, + T... evaluations) { + return null; + } + + /** + * This method executes evaluation of the model against given iterator and evaluation + * implementations + * + * @param iterator + * @param evaluations + */ + @Override + public T[] doEvaluation(MultiDataSetIterator iterator, + T... evaluations) { + return null; + } + + /** + * @param netConfiguration + */ + @Override + public void setNetConfiguration(@NonNull NeuralNetConfiguration netConfiguration) { + + } + @Override public void computeGradientAndScore(LayerWorkspaceMgr workspaceMgr) { // Gradients: d(x^2)/dx = 2x @@ -348,7 +438,7 @@ public class TestOptimizers extends BaseDL4JTest { } @Override - public void setListeners(TrainingListener... listeners) { + public void addTrainingListeners(TrainingListener... listeners) { } @@ -404,23 +494,23 @@ public class TestOptimizers extends BaseDL4JTest { double[] scores = new double[nOptIter + 1]; for (int i = 0; i <= nOptIter; i++) { - NeuralNetConfiguration conf = new NeuralNetConfiguration.Builder() + NeuralNetConfiguration conf = NeuralNetConfiguration.builder() .maxNumLineSearchIterations(maxNumLineSearchIter).miniBatch(false) .updater(new AdaGrad(1e-2)) .layer(new DenseLayer.Builder().nIn(1).nOut(1).build()).build(); - conf.addVariable("W"); //Normally done by ParamInitializers, but obviously that isn't done here + conf.addNetWideVariable("W"); //Normally done by ParamInitializers, but obviously that isn't done here - Model m = new RastriginFunctionModel(10, conf); + IModel m = new RastriginFunctionModel(10, conf.getFlattenedLayerConfigurations().get(0)); int nParams = (int)m.numParams(); if (i == 0) { m.computeGradientAndScore(LayerWorkspaceMgr.noWorkspaces()); - scores[0] = m.score(); //Before optimization + scores[0] = m.getScore(); //Before optimization } else { ConvexOptimizer opt = getOptimizer(oa, conf, m); opt.getUpdater().setStateViewArray((Layer) m, Nd4j.create(new int[] {1, nParams}, 'c'), true); opt.optimize(LayerWorkspaceMgr.noWorkspaces()); m.computeGradientAndScore(LayerWorkspaceMgr.noWorkspaces()); - scores[i] = m.score(); + scores[i] = m.getScore(); assertTrue(!Double.isNaN(scores[i]) && !Double.isInfinite(scores[i])); } } @@ -451,7 +541,7 @@ public class TestOptimizers extends BaseDL4JTest { private static class RastriginFunctionModel extends SimpleOptimizableModel { private static final long serialVersionUID = -1772954508787487941L; - private RastriginFunctionModel(int nDimensions, NeuralNetConfiguration conf) { + private RastriginFunctionModel(int nDimensions, LayerConfiguration conf) { super(initParams(nDimensions), conf); } @@ -463,6 +553,90 @@ public class TestOptimizers extends BaseDL4JTest { } + /** + * This method returns updater state (if applicable), null otherwise + * + * @return + */ + @Override + public INDArray updaterState() { + return null; + } + + /** + * This method fits model with a given DataSet + * + * @param dataSet + */ + @Override + public void fit(org.nd4j.linalg.dataset.api.DataSet dataSet) { + + } + + /** + * This method fits model with a given MultiDataSet + * + * @param dataSet + */ + @Override + public void fit(MultiDataSet dataSet) { + + } + + /** + * This method fits model with a given DataSetIterator + * + * @param iterator + */ + @Override + public void fit(DataSetIterator iterator) { + + } + + /** + * This method fits model with a given MultiDataSetIterator + * + * @param iterator + */ + @Override + public void fit(MultiDataSetIterator iterator) { + + } + + /** + * This method executes evaluation of the model against given iterator and evaluation + * implementations + * + * @param iterator + * @param evaluations + */ + @Override + public T[] doEvaluation(DataSetIterator iterator, + T... evaluations) { + return null; + } + + /** + * This method executes evaluation of the model against given iterator and evaluation + * implementations + * + * @param iterator + * @param evaluations + */ + @Override + public T[] doEvaluation(MultiDataSetIterator iterator, + T... evaluations) { + return null; + } + + /** + * @param netConfiguration + */ + @Override + public void setNetConfiguration(@NonNull NeuralNetConfiguration netConfiguration) { + + } + @Override public void computeGradientAndScore(LayerWorkspaceMgr workspaceMgr) { //Gradient decomposes due to sum, so: @@ -537,7 +711,7 @@ public class TestOptimizers extends BaseDL4JTest { } @Override - public void setListeners(TrainingListener... listeners) { + public void addTrainingListeners(TrainingListener... listeners) { } @@ -587,23 +761,23 @@ public class TestOptimizers extends BaseDL4JTest { double[] scores = new double[nOptIter + 1]; for (int i = 0; i <= nOptIter; i++) { - NeuralNetConfiguration conf = new NeuralNetConfiguration.Builder() + NeuralNetConfiguration conf = NeuralNetConfiguration.builder() .maxNumLineSearchIterations(maxNumLineSearchIter) .updater(new Sgd(1e-1)) .stepFunction(new org.deeplearning4j.nn.conf.stepfunctions.NegativeDefaultStepFunction()) .layer(new DenseLayer.Builder().nIn(1).nOut(1).build()) .build(); - conf.addVariable("W"); //Normally done by ParamInitializers, but obviously that isn't done here + conf.addNetWideVariable("W"); //Normally done by ParamInitializers, but obviously that isn't done here - Model m = new RosenbrockFunctionModel(100, conf); + IModel m = new RosenbrockFunctionModel(100, conf.getFlattenedLayerConfigurations().get(0)); if (i == 0) { m.computeGradientAndScore(LayerWorkspaceMgr.noWorkspaces()); - scores[0] = m.score(); //Before optimization + scores[0] = m.getScore(); //Before optimization } else { ConvexOptimizer opt = getOptimizer(oa, conf, m); opt.optimize(LayerWorkspaceMgr.noWorkspaces()); m.computeGradientAndScore(LayerWorkspaceMgr.noWorkspaces()); - scores[i] = m.score(); + scores[i] = m.getScore(); assertTrue(!Double.isNaN(scores[i]) && !Double.isInfinite(scores[i]), "NaN or infinite score: " + scores[i]); } } @@ -637,7 +811,7 @@ public class TestOptimizers extends BaseDL4JTest { private static class RosenbrockFunctionModel extends SimpleOptimizableModel { private static final long serialVersionUID = -5129494342531033706L; - private RosenbrockFunctionModel(int nDimensions, NeuralNetConfiguration conf) { + private RosenbrockFunctionModel(int nDimensions, LayerConfiguration conf) { super(initParams(nDimensions), conf); } @@ -648,6 +822,90 @@ public class TestOptimizers extends BaseDL4JTest { return dist.sample(new int[] {1, nDimensions}); } + /** + * This method returns updater state (if applicable), null otherwise + * + * @return + */ + @Override + public INDArray updaterState() { + return null; + } + + /** + * This method fits model with a given DataSet + * + * @param dataSet + */ + @Override + public void fit(org.nd4j.linalg.dataset.api.DataSet dataSet) { + + } + + /** + * This method fits model with a given MultiDataSet + * + * @param dataSet + */ + @Override + public void fit(MultiDataSet dataSet) { + + } + + /** + * This method fits model with a given DataSetIterator + * + * @param iterator + */ + @Override + public void fit(DataSetIterator iterator) { + + } + + /** + * This method fits model with a given MultiDataSetIterator + * + * @param iterator + */ + @Override + public void fit(MultiDataSetIterator iterator) { + + } + + /** + * This method executes evaluation of the model against given iterator and evaluation + * implementations + * + * @param iterator + * @param evaluations + */ + @Override + public T[] doEvaluation(DataSetIterator iterator, + T... evaluations) { + return null; + } + + /** + * This method executes evaluation of the model against given iterator and evaluation + * implementations + * + * @param iterator + * @param evaluations + */ + @Override + public T[] doEvaluation(MultiDataSetIterator iterator, + T... evaluations) { + return null; + } + + /** + * @param netConfiguration + */ + @Override + public void setNetConfiguration(@NonNull NeuralNetConfiguration netConfiguration) { + + } + @Override public void computeGradientAndScore(LayerWorkspaceMgr workspaceMgr) { val nDims = parameters.length(); @@ -738,7 +996,7 @@ public class TestOptimizers extends BaseDL4JTest { } @Override - public void setListeners(TrainingListener... listeners) { + public void addTrainingListeners(TrainingListener... listeners) { } @@ -764,34 +1022,57 @@ public class TestOptimizers extends BaseDL4JTest { } - /** Simple abstract class to deal with the fact that we don't care about the majority of the Model/Layer + /** Simple abstract class to deal with the fact that we don't care about the majority of the Model/ILayer * methods here. Classes extending this model for optimizer tests need only implement the score() and * gradient() methods. */ - private static abstract class SimpleOptimizableModel implements Model, Layer { + private static abstract class SimpleOptimizableModel implements IModel, Layer { private static final long serialVersionUID = 4409380971404019303L; protected INDArray parameters; protected INDArray gradientView; - protected final NeuralNetConfiguration conf; + protected final LayerConfiguration conf; protected Gradient gradient; protected double score; + /** + * @return 1d parameter vector + */ + @Override + public INDArray getParams() { + throw new RuntimeException("Not implemented"); + } + + /** + * Get a reference to the network this layer is part of. + * + * @return + */ + @Override + public IModel getNet() { + throw new RuntimeException("Not implemented"); + } + /**@param parameterInit Initial parameters. Also determines dimensionality of problem. Should be row vector. */ - private SimpleOptimizableModel(INDArray parameterInit, NeuralNetConfiguration conf) { + private SimpleOptimizableModel(INDArray parameterInit, LayerConfiguration conf) { this.parameters = parameterInit.dup(); this.gradientView = Nd4j.create(parameterInit.shape()); this.conf = conf; } + /** + * Return the configuration of this layer + * + * @return the configuration + */ @Override - public void addListeners(TrainingListener... listener) { - // no-op + public LayerConfiguration getLayerConfiguration() { + return this.conf; } @Override - public TrainingConfig getConfig() { - return conf.getLayer(); + public ITraininableLayerConfiguration getTrainingConfig() { + return (BaseLayerConfiguration) conf; } /** @@ -825,7 +1106,7 @@ public class TestOptimizers extends BaseDL4JTest { } @Override - public void setListeners(TrainingListener... listeners) { + public void addTrainingListeners(TrainingListener... listeners) { } @@ -845,7 +1126,7 @@ public class TestOptimizers extends BaseDL4JTest { } @Override - public double score() { + public double getScore() { return score; } @@ -865,7 +1146,7 @@ public class TestOptimizers extends BaseDL4JTest { } @Override - public INDArray params() { + public INDArray getModelParams() { return parameters; } @@ -887,7 +1168,7 @@ public class TestOptimizers extends BaseDL4JTest { @Override public Pair gradientAndScore() { computeGradientAndScore(LayerWorkspaceMgr.noWorkspaces()); - return new Pair<>(gradient(), score()); + return new Pair<>(gradient(), getScore()); } @Override @@ -896,18 +1177,18 @@ public class TestOptimizers extends BaseDL4JTest { } @Override - public NeuralNetConfiguration conf() { - return conf; + public NeuralNetConfiguration getNetConfiguration() { + return conf.getNetConfiguration(); } @Override - public void setConf(NeuralNetConfiguration conf) { + public void setLayerConfiguration(LayerConfiguration layerConfiguration) { throw new UnsupportedOperationException(); } @Override public INDArray input() { - //Work-around for BaseUpdater.postApply(): Uses Layer.input().size(0) + //Work-around for BaseUpdater.postApply(): Uses ILayer.input().size(0) //in order to get mini-batch size. i.e., divide by 1 here. return Nd4j.zeros(1); } @@ -923,13 +1204,13 @@ public class TestOptimizers extends BaseDL4JTest { } @Override - public Map paramTable() { + public Map getParamTable() { return Collections.singletonMap("W", getParam("W")); } @Override - public Map paramTable(boolean backpropParamsOnly) { - return paramTable(); + public Map getParamTable(boolean backpropParamsOnly) { + return getParamTable(); } @Override @@ -958,12 +1239,12 @@ public class TestOptimizers extends BaseDL4JTest { } @Override - public Collection getListeners() { + public Collection getTrainingListeners() { return null; } @Override - public void setListeners(Collection listeners) { + public void addTrainingListeners(Collection listeners) { throw new UnsupportedOperationException(); } @@ -1043,4 +1324,6 @@ public class TestOptimizers extends BaseDL4JTest { public void close(){ } } + + } diff --git a/cavis-dnn/cavis-dnn-core/src/test/java/org/deeplearning4j/optimizer/listener/TestCheckpointListener.java b/cavis-dnn/cavis-dnn-core/src/test/java/org/deeplearning4j/optimizer/listener/TestCheckpointListener.java index 4c3760d95..2e34fcd46 100644 --- a/cavis-dnn/cavis-dnn-core/src/test/java/org/deeplearning4j/optimizer/listener/TestCheckpointListener.java +++ b/cavis-dnn/cavis-dnn-core/src/test/java/org/deeplearning4j/optimizer/listener/TestCheckpointListener.java @@ -22,7 +22,6 @@ package org.deeplearning4j.optimizer.listener; import org.deeplearning4j.BaseDL4JTest; import org.deeplearning4j.datasets.iterator.impl.IrisDataSetIterator; -import org.deeplearning4j.nn.conf.MultiLayerConfiguration; import org.deeplearning4j.nn.conf.NeuralNetConfiguration; import org.deeplearning4j.nn.conf.layers.OutputLayer; import org.deeplearning4j.nn.multilayer.MultiLayerNetwork; @@ -51,7 +50,7 @@ public class TestCheckpointListener extends BaseDL4JTest { public File tempDir; private static Pair getNetAndData(){ - MultiLayerConfiguration conf = new NeuralNetConfiguration.Builder() + NeuralNetConfiguration conf = NeuralNetConfiguration.builder() .list() .layer(new OutputLayer.Builder().nIn(4).nOut(3).activation(Activation.SOFTMAX) .lossFunction(LossFunctions.LossFunction.MCXENT).build()) @@ -77,7 +76,7 @@ public class TestCheckpointListener extends BaseDL4JTest { .keepAll() .saveEveryNEpochs(2) .build(); - net.setListeners(l); + net.addTrainingListeners(l); for(int i=0; i<10; i++ ){ net.fit(iter); @@ -126,7 +125,7 @@ public class TestCheckpointListener extends BaseDL4JTest { .keepLast(3) .saveEveryNIterations(5) .build(); - net.setListeners(l); + net.addTrainingListeners(l); for(int i=0; i<20; i++ ){ //40 iterations total net.fit(iter); @@ -168,7 +167,7 @@ public class TestCheckpointListener extends BaseDL4JTest { MultiLayerNetwork netStatic2 = CheckpointListener.loadLastCheckpointMLN(f); assertEquals(35, netStatic2.getIterationCount()); - assertEquals(netStatic.params(), netStatic2.params()); + assertEquals(netStatic.getModelParams(), netStatic2.getModelParams()); } @Test @@ -183,7 +182,7 @@ public class TestCheckpointListener extends BaseDL4JTest { .keepLast(3) .saveEvery(4900, TimeUnit.MILLISECONDS) .build(); - net.setListeners(l); + net.addTrainingListeners(l); for(int i=0; i<3; i++ ){ //10 iterations total net.fit(iter); @@ -227,7 +226,7 @@ public class TestCheckpointListener extends BaseDL4JTest { .keepLastAndEvery(3, 3) .saveEveryNEpochs(2) .build(); - net.setListeners(l); + net.addTrainingListeners(l); for(int i=0; i<20; i++ ){ //40 iterations total net.fit(iter); @@ -273,7 +272,7 @@ public class TestCheckpointListener extends BaseDL4JTest { .keepAll() .saveEveryNEpochs(1) .build(); - net.setListeners(l); + net.addTrainingListeners(l); for(int i=0; i<3; i++ ){ net.fit(iter); @@ -295,7 +294,7 @@ public class TestCheckpointListener extends BaseDL4JTest { .saveEveryNEpochs(1) .deleteExisting(true) .build(); - net.setListeners(l); + net.addTrainingListeners(l); net.fit(iter); diff --git a/cavis-dnn/cavis-dnn-core/src/test/java/org/deeplearning4j/optimizer/listener/TestFailureListener.java b/cavis-dnn/cavis-dnn-core/src/test/java/org/deeplearning4j/optimizer/listener/TestFailureListener.java index 81786baa7..fb500772d 100644 --- a/cavis-dnn/cavis-dnn-core/src/test/java/org/deeplearning4j/optimizer/listener/TestFailureListener.java +++ b/cavis-dnn/cavis-dnn-core/src/test/java/org/deeplearning4j/optimizer/listener/TestFailureListener.java @@ -22,7 +22,6 @@ package org.deeplearning4j.optimizer.listener; import org.deeplearning4j.BaseDL4JTest; import org.deeplearning4j.datasets.iterator.impl.IrisDataSetIterator; -import org.deeplearning4j.nn.conf.MultiLayerConfiguration; import org.deeplearning4j.nn.conf.NeuralNetConfiguration; import org.deeplearning4j.nn.conf.layers.OutputLayer; import org.deeplearning4j.nn.multilayer.MultiLayerNetwork; @@ -51,7 +50,7 @@ public class TestFailureListener extends BaseDL4JTest { @Test public void testFailureIter5() throws Exception { - MultiLayerConfiguration conf = new NeuralNetConfiguration.Builder() + NeuralNetConfiguration conf = NeuralNetConfiguration.builder() .updater(new Adam(1e-4)) .list() .layer(0, new OutputLayer.Builder().nIn(4).nOut(3).activation(Activation.SOFTMAX).lossFunction(LossFunctions.LossFunction.MCXENT).build()) @@ -59,7 +58,7 @@ public class TestFailureListener extends BaseDL4JTest { MultiLayerNetwork net = new MultiLayerNetwork(conf); net.init(); - net.setListeners(new FailureTestingListener( + net.addTrainingListeners(new FailureTestingListener( // FailureTestingListener.FailureMode.OOM, FailureTestingListener.FailureMode.SYSTEM_EXIT_1, new FailureTestingListener.IterationEpochTrigger(false, 10))); @@ -73,7 +72,7 @@ public class TestFailureListener extends BaseDL4JTest { @Test public void testFailureRandom_OR(){ - MultiLayerConfiguration conf = new NeuralNetConfiguration.Builder() + NeuralNetConfiguration conf = NeuralNetConfiguration.builder() .updater(new Adam(1e-4)) .list() .layer(0, new OutputLayer.Builder().nIn(4).nOut(3).activation(Activation.SOFTMAX).lossFunction(LossFunctions.LossFunction.MCXENT).build()) @@ -85,7 +84,7 @@ public class TestFailureListener extends BaseDL4JTest { assertNotNull(username); assertFalse(username.isEmpty()); - net.setListeners(new FailureTestingListener( + net.addTrainingListeners(new FailureTestingListener( FailureTestingListener.FailureMode.SYSTEM_EXIT_1, new FailureTestingListener.Or( new FailureTestingListener.IterationEpochTrigger(false, 10000), @@ -101,7 +100,7 @@ public class TestFailureListener extends BaseDL4JTest { @Test public void testFailureRandom_AND() throws Exception { - MultiLayerConfiguration conf = new NeuralNetConfiguration.Builder() + NeuralNetConfiguration conf = NeuralNetConfiguration.builder() .updater(new Adam(1e-4)) .list() .layer(0, new OutputLayer.Builder().nIn(4).nOut(3).activation(Activation.SOFTMAX).lossFunction(LossFunctions.LossFunction.MCXENT).build()) @@ -113,7 +112,7 @@ public class TestFailureListener extends BaseDL4JTest { assertNotNull(hostname); assertFalse(hostname.isEmpty()); - net.setListeners(new FailureTestingListener( + net.addTrainingListeners(new FailureTestingListener( FailureTestingListener.FailureMode.ILLEGAL_STATE, new FailureTestingListener.And( new FailureTestingListener.HostNameTrigger(hostname), diff --git a/cavis-dnn/cavis-dnn-core/src/test/java/org/deeplearning4j/optimizer/listener/TestListeners.java b/cavis-dnn/cavis-dnn-core/src/test/java/org/deeplearning4j/optimizer/listener/TestListeners.java index 55b1d39c8..f3d4f5dee 100644 --- a/cavis-dnn/cavis-dnn-core/src/test/java/org/deeplearning4j/optimizer/listener/TestListeners.java +++ b/cavis-dnn/cavis-dnn-core/src/test/java/org/deeplearning4j/optimizer/listener/TestListeners.java @@ -22,14 +22,13 @@ package org.deeplearning4j.optimizer.listener; import lombok.Data; import lombok.extern.slf4j.Slf4j; +import net.brutex.ai.dnn.api.IModel; import org.deeplearning4j.BaseDL4JTest; import org.deeplearning4j.core.storage.StatsStorageRouter; import org.deeplearning4j.core.storage.listener.RoutingIterationListener; import org.deeplearning4j.datasets.iterator.impl.IrisDataSetIterator; import org.deeplearning4j.nn.api.Layer; -import org.deeplearning4j.nn.api.Model; import org.deeplearning4j.nn.conf.ComputationGraphConfiguration; -import org.deeplearning4j.nn.conf.MultiLayerConfiguration; import org.deeplearning4j.nn.conf.NeuralNetConfiguration; import org.deeplearning4j.nn.conf.layers.AutoEncoder; import org.deeplearning4j.nn.conf.layers.OutputLayer; @@ -71,48 +70,48 @@ public class TestListeners extends BaseDL4JTest { public void testSettingListenersUnsupervised() { //Pretrain layers should get copies of the listeners, in addition to the - MultiLayerConfiguration conf = new NeuralNetConfiguration.Builder().list() + NeuralNetConfiguration conf = NeuralNetConfiguration.builder().list() .layer(0, new AutoEncoder.Builder().nIn(10).nOut(10).build()) .layer(1, new VariationalAutoencoder.Builder().nIn(10).nOut(10).build()).build(); MultiLayerNetwork net = new MultiLayerNetwork(conf); net.init(); - net.setListeners(new ScoreIterationListener(), new TestRoutingListener()); + net.addTrainingListeners(new ScoreIterationListener(), new TestRoutingListener()); for (Layer l : net.getLayers()) { - Collection layerListeners = l.getListeners(); + Collection layerListeners = l.getTrainingListeners(); assertEquals(2, layerListeners.size(), l.getClass().toString()); TrainingListener[] lArr = layerListeners.toArray(new TrainingListener[2]); assertTrue(lArr[0] instanceof ScoreIterationListener); assertTrue(lArr[1] instanceof TestRoutingListener); } - Collection netListeners = net.getListeners(); + Collection netListeners = net.getTrainingListeners(); assertEquals(2, netListeners.size()); TrainingListener[] lArr = netListeners.toArray(new TrainingListener[2]); assertTrue(lArr[0] instanceof ScoreIterationListener); assertTrue(lArr[1] instanceof TestRoutingListener); - ComputationGraphConfiguration gConf = new NeuralNetConfiguration.Builder().graphBuilder().addInputs("in") + ComputationGraphConfiguration gConf = NeuralNetConfiguration.builder().graphBuilder().addInputs("in") .addLayer("0", new AutoEncoder.Builder().nIn(10).nOut(10).build(), "in") .addLayer("1", new VariationalAutoencoder.Builder().nIn(10).nOut(10).build(), "0") .setOutputs("1").build(); ComputationGraph cg = new ComputationGraph(gConf); cg.init(); - cg.setListeners(new ScoreIterationListener(), new TestRoutingListener()); + cg.addTrainingListeners(new ScoreIterationListener(), new TestRoutingListener()); for (Layer l : cg.getLayers()) { - Collection layerListeners = l.getListeners(); + Collection layerListeners = l.getTrainingListeners(); assertEquals(2, layerListeners.size()); lArr = layerListeners.toArray(new TrainingListener[2]); assertTrue(lArr[0] instanceof ScoreIterationListener); assertTrue(lArr[1] instanceof TestRoutingListener); } - netListeners = cg.getListeners(); + netListeners = cg.getTrainingListeners(); assertEquals(2, netListeners.size()); lArr = netListeners.toArray(new TrainingListener[2]); assertTrue(lArr[0] instanceof ScoreIterationListener); @@ -151,7 +150,7 @@ public class TestListeners extends BaseDL4JTest { } @Override - public void iterationDone(Model model, int iteration, int epoch) {} + public void iterationDone(IModel model, int iteration, int epoch) {} } @@ -172,7 +171,7 @@ public class TestListeners extends BaseDL4JTest { DataSetIterator iter = new IrisDataSetIterator(10, 150); - MultiLayerConfiguration conf = new NeuralNetConfiguration.Builder() + NeuralNetConfiguration conf = NeuralNetConfiguration.builder() .list() .layer(new OutputLayer.Builder().nIn(4).nOut(3) .activation(Activation.SOFTMAX) @@ -181,7 +180,7 @@ public class TestListeners extends BaseDL4JTest { MultiLayerNetwork net = new MultiLayerNetwork(conf); net.init(); - net.setListeners(listeners); + net.addTrainingListeners(listeners); net.fit(iter); @@ -200,7 +199,7 @@ public class TestListeners extends BaseDL4JTest { listeners2.add(il2); } - net.setListeners(listeners2); + net.addTrainingListeners(listeners2); net.fit(iter); } @@ -208,7 +207,7 @@ public class TestListeners extends BaseDL4JTest { @Test public void testListenerCalls(){ - MultiLayerConfiguration conf = new NeuralNetConfiguration.Builder() + NeuralNetConfiguration conf = NeuralNetConfiguration.builder() .list() .layer(new OutputLayer.Builder().nIn(4).nOut(3).activation(Activation.SOFTMAX).lossFunction(LossFunctions.LossFunction.MCXENT).build()) .build(); @@ -217,7 +216,7 @@ public class TestListeners extends BaseDL4JTest { net.init(); TestListener tl = new TestListener(); - net.setListeners(tl); + net.addTrainingListeners(tl); DataSetIterator irisIter = new IrisDataSetIterator(50, 150); @@ -261,7 +260,7 @@ public class TestListeners extends BaseDL4JTest { tl = new TestListener(); ComputationGraph cg = net.toComputationGraph(); - cg.setListeners(tl); + cg.addTrainingListeners(tl); cg.fit(irisIter, 2); @@ -284,37 +283,37 @@ public class TestListeners extends BaseDL4JTest { @Override - public void iterationDone(Model model, int iteration, int epoch) { + public void iterationDone(IModel model, int iteration, int epoch) { calls.add(new Triple<>(Call.ITER_DONE, iteration, epoch)); } @Override - public void onEpochStart(Model model) { + public void onEpochStart(IModel model) { calls.add(new Triple<>(Call.EPOCH_START, BaseOptimizer.getIterationCount(model), BaseOptimizer.getEpochCount(model))); } @Override - public void onEpochEnd(Model model) { + public void onEpochEnd(IModel model) { calls.add(new Triple<>(Call.EPOCH_END, BaseOptimizer.getIterationCount(model), BaseOptimizer.getEpochCount(model))); } @Override - public void onForwardPass(Model model, List activations) { + public void onForwardPass(IModel model, List activations) { calls.add(new Triple<>(Call.ON_FWD, BaseOptimizer.getIterationCount(model), BaseOptimizer.getEpochCount(model))); } @Override - public void onForwardPass(Model model, Map activations) { + public void onForwardPass(IModel model, Map activations) { calls.add(new Triple<>(Call.ON_FWD, BaseOptimizer.getIterationCount(model), BaseOptimizer.getEpochCount(model))); } @Override - public void onGradientCalculation(Model model) { + public void onGradientCalculation(IModel model) { calls.add(new Triple<>(Call.ON_GRAD, BaseOptimizer.getIterationCount(model), BaseOptimizer.getEpochCount(model))); } @Override - public void onBackwardPass(Model model) { + public void onBackwardPass(IModel model) { calls.add(new Triple<>(Call.ON_BWD, BaseOptimizer.getIterationCount(model), BaseOptimizer.getEpochCount(model))); } } diff --git a/cavis-dnn/cavis-dnn-core/src/test/java/org/deeplearning4j/parallelism/RandomTests.java b/cavis-dnn/cavis-dnn-core/src/test/java/org/deeplearning4j/parallelism/RandomTests.java index 97a1cb799..2c214eeff 100644 --- a/cavis-dnn/cavis-dnn-core/src/test/java/org/deeplearning4j/parallelism/RandomTests.java +++ b/cavis-dnn/cavis-dnn-core/src/test/java/org/deeplearning4j/parallelism/RandomTests.java @@ -20,9 +20,8 @@ package org.deeplearning4j.parallelism; +import net.brutex.ai.dnn.api.IModel; import org.deeplearning4j.BaseDL4JTest; -import org.deeplearning4j.nn.api.Model; -import org.deeplearning4j.nn.conf.MultiLayerConfiguration; import org.deeplearning4j.nn.conf.NeuralNetConfiguration; import org.deeplearning4j.nn.conf.WorkspaceMode; import org.deeplearning4j.nn.conf.inputs.InputType; @@ -52,13 +51,13 @@ public class RandomTests extends BaseDL4JTest { */ @Test public void testModelInitialParamsEquality1() throws Exception { - final List models = new CopyOnWriteArrayList<>(); + final List models = new CopyOnWriteArrayList<>(); for (int i = 0; i < 4; i++) { Thread thread = new Thread(new Runnable() { @Override public void run() { - MultiLayerConfiguration conf = new NeuralNetConfiguration.Builder().seed(119) // Training iterations as above + NeuralNetConfiguration conf = NeuralNetConfiguration.builder().seed(119) // Training iterations as above .l2(0.0005) //.learningRateDecayPolicy(LearningRatePolicy.Inverse).lrPolicyDecayRate(0.001).lrPolicyPower(0.75) .weightInit(WeightInit.XAVIER) @@ -78,7 +77,7 @@ public class RandomTests extends BaseDL4JTest { .layer(4, new DenseLayer.Builder().activation(Activation.RELU).nOut(500).build()) .layer(5, new OutputLayer.Builder(LossFunctions.LossFunction.NEGATIVELOGLIKELIHOOD) .nOut(10).activation(Activation.SOFTMAX).build()) - .setInputType(InputType.convolutionalFlat(28, 28, 1)) //See note below + .inputType(InputType.convolutionalFlat(28, 28, 1)) //See note below .build(); MultiLayerNetwork network = new MultiLayerNetwork(conf); @@ -95,7 +94,7 @@ public class RandomTests extends BaseDL4JTest { // at the end of day, model params has to for (int i = 0; i < models.size(); i++) { - assertEquals(models.get(0).params(), models.get(i).params()); + assertEquals(models.get(0).getModelParams(), models.get(i).getModelParams()); } } @@ -104,7 +103,7 @@ public class RandomTests extends BaseDL4JTest { public void testRngInitMLN() { Nd4j.getRandom().setSeed(12345); - MultiLayerConfiguration conf = new NeuralNetConfiguration.Builder().seed(12345).activation(Activation.TANH) + NeuralNetConfiguration conf = NeuralNetConfiguration.builder().seed(12345).activation(Activation.TANH) .weightInit(WeightInit.XAVIER).list() .layer(0, new DenseLayer.Builder().nIn(10).nOut(10).build()) .layer(1, new DenseLayer.Builder().nIn(10).nOut(10).build()).layer(2, @@ -120,14 +119,14 @@ public class RandomTests extends BaseDL4JTest { MultiLayerNetwork net2 = new MultiLayerNetwork(conf); net2.init(); - assertEquals(net1.params(), net2.params()); + assertEquals(net1.getModelParams(), net2.getModelParams()); - MultiLayerConfiguration fromJson = MultiLayerConfiguration.fromJson(json); + NeuralNetConfiguration fromJson = NeuralNetConfiguration.fromJson(json); Nd4j.getRandom().setSeed(987654321); MultiLayerNetwork net3 = new MultiLayerNetwork(fromJson); net3.init(); - assertEquals(net1.params(), net3.params()); + assertEquals(net1.getModelParams(), net3.getModelParams()); } } diff --git a/cavis-dnn/cavis-dnn-core/src/test/java/org/deeplearning4j/perf/listener/TestSystemInfoPrintListener.java b/cavis-dnn/cavis-dnn-core/src/test/java/org/deeplearning4j/perf/listener/TestSystemInfoPrintListener.java index 02e089090..6b2d882e3 100644 --- a/cavis-dnn/cavis-dnn-core/src/test/java/org/deeplearning4j/perf/listener/TestSystemInfoPrintListener.java +++ b/cavis-dnn/cavis-dnn-core/src/test/java/org/deeplearning4j/perf/listener/TestSystemInfoPrintListener.java @@ -24,7 +24,6 @@ import org.deeplearning4j.BaseDL4JTest; import org.deeplearning4j.core.listener.SystemInfoFilePrintListener; import org.deeplearning4j.core.listener.SystemInfoPrintListener; import org.deeplearning4j.datasets.iterator.impl.IrisDataSetIterator; -import org.deeplearning4j.nn.conf.MultiLayerConfiguration; import org.deeplearning4j.nn.conf.NeuralNetConfiguration; import org.deeplearning4j.nn.conf.layers.OutputLayer; import org.deeplearning4j.nn.multilayer.MultiLayerNetwork; @@ -57,14 +56,14 @@ public class TestSystemInfoPrintListener extends BaseDL4JTest { .build(); tmpFile.deleteOnExit(); - MultiLayerConfiguration conf = new NeuralNetConfiguration.Builder() + NeuralNetConfiguration conf = NeuralNetConfiguration.builder() .list() .layer(new OutputLayer.Builder().nIn(4).nOut(3).activation(Activation.SOFTMAX).build()) .build(); MultiLayerNetwork net = new MultiLayerNetwork(conf); net.init(); - net.setListeners(systemInfoFilePrintListener); + net.addTrainingListeners(systemInfoFilePrintListener); DataSetIterator iter = new IrisDataSetIterator(10, 150); diff --git a/cavis-dnn/cavis-dnn-core/src/test/java/org/deeplearning4j/regressiontest/MiscRegressionTests.java b/cavis-dnn/cavis-dnn-core/src/test/java/org/deeplearning4j/regressiontest/MiscRegressionTests.java index 686501ff8..316ad2f46 100644 --- a/cavis-dnn/cavis-dnn-core/src/test/java/org/deeplearning4j/regressiontest/MiscRegressionTests.java +++ b/cavis-dnn/cavis-dnn-core/src/test/java/org/deeplearning4j/regressiontest/MiscRegressionTests.java @@ -23,12 +23,11 @@ package org.deeplearning4j.regressiontest; import org.apache.commons.io.FileUtils; import org.deeplearning4j.BaseDL4JTest; import org.deeplearning4j.nn.conf.ComputationGraphConfiguration; -import org.deeplearning4j.nn.conf.MultiLayerConfiguration; import org.deeplearning4j.nn.conf.NeuralNetConfiguration; import org.deeplearning4j.nn.conf.graph.GraphVertex; import org.deeplearning4j.nn.conf.graph.LayerVertex; import org.deeplearning4j.nn.conf.layers.DenseLayer; -import org.deeplearning4j.nn.conf.layers.Layer; +import org.deeplearning4j.nn.conf.layers.LayerConfiguration; import org.deeplearning4j.nn.conf.layers.misc.FrozenLayer; import org.junit.jupiter.api.Test; import org.nd4j.common.io.ClassPathResource; @@ -55,7 +54,7 @@ public class MiscRegressionTests extends BaseDL4JTest { assertNotNull(gv); if(gv instanceof LayerVertex){ LayerVertex lv = (LayerVertex)gv; - Layer layer = lv.getLayerConf().getLayer(); + LayerConfiguration layer = lv.getNetConfiguration().getFirstLayer(); if(layer instanceof FrozenLayer) countFrozen++; } @@ -66,13 +65,13 @@ public class MiscRegressionTests extends BaseDL4JTest { @Test public void testFrozenNewFormat(){ - MultiLayerConfiguration configuration = new NeuralNetConfiguration.Builder() + NeuralNetConfiguration configuration = NeuralNetConfiguration.builder() .list() .layer(0, new FrozenLayer(new DenseLayer.Builder().nIn(10).nOut(10).build())) .build(); String json = configuration.toJson(); - MultiLayerConfiguration fromJson = MultiLayerConfiguration.fromJson(json); + NeuralNetConfiguration fromJson = NeuralNetConfiguration.fromJson(json); assertEquals(configuration, fromJson); } } diff --git a/cavis-dnn/cavis-dnn-core/src/test/java/org/deeplearning4j/regressiontest/RegressionTest050.java b/cavis-dnn/cavis-dnn-core/src/test/java/org/deeplearning4j/regressiontest/RegressionTest050.java index 022545685..a771e414b 100644 --- a/cavis-dnn/cavis-dnn-core/src/test/java/org/deeplearning4j/regressiontest/RegressionTest050.java +++ b/cavis-dnn/cavis-dnn-core/src/test/java/org/deeplearning4j/regressiontest/RegressionTest050.java @@ -23,7 +23,7 @@ package org.deeplearning4j.regressiontest; import org.deeplearning4j.BaseDL4JTest; import org.deeplearning4j.TestUtils; import org.deeplearning4j.nn.conf.ConvolutionMode; -import org.deeplearning4j.nn.conf.MultiLayerConfiguration; +import org.deeplearning4j.nn.conf.NeuralNetConfiguration; import org.deeplearning4j.nn.conf.distribution.NormalDistribution; import org.deeplearning4j.nn.conf.dropout.Dropout; import org.deeplearning4j.nn.conf.layers.*; @@ -65,14 +65,14 @@ public class RegressionTest050 extends BaseDL4JTest { MultiLayerNetwork net = ModelSerializer.restoreMultiLayerNetwork(f, true); - MultiLayerConfiguration conf = net.getLayerWiseConfigurations(); - assertEquals(2, conf.getConfs().size()); + NeuralNetConfiguration conf = net.getNetConfiguration(); + assertEquals(2, conf.getNetConfigurations().size()); DenseLayer l0 = (DenseLayer) conf.getConf(0).getLayer(); assertEquals("relu", l0.getActivationFn().toString()); assertEquals(3, l0.getNIn()); assertEquals(4, l0.getNOut()); - assertEquals(new WeightInitXavier(), l0.getWeightInitFn()); + assertEquals(new WeightInitXavier(), l0.getWeightInit()); assertEquals(new Nesterovs(0.15, 0.9), l0.getIUpdater()); assertEquals(0.15, ((Nesterovs)l0.getIUpdater()).getLearningRate(), 1e-6); @@ -81,13 +81,13 @@ public class RegressionTest050 extends BaseDL4JTest { assertTrue(l1.getLossFn() instanceof LossMCXENT); assertEquals(4, l1.getNIn()); assertEquals(5, l1.getNOut()); - assertEquals(new WeightInitXavier(), l1.getWeightInitFn()); + assertEquals(new WeightInitXavier(), l1.getWeightInit()); assertEquals(new Nesterovs(0.15, 0.9), l1.getIUpdater()); assertEquals(0.9, ((Nesterovs)l1.getIUpdater()).getMomentum(), 1e-6); assertEquals(0.15, ((Nesterovs)l1.getIUpdater()).getLearningRate(), 1e-6); int numParams = (int)net.numParams(); - assertEquals(Nd4j.linspace(1, numParams, numParams, Nd4j.dataType()).reshape(1,numParams), net.params()); + assertEquals(Nd4j.linspace(1, numParams, numParams, Nd4j.dataType()).reshape(1,numParams), net.getModelParams()); int updaterSize = (int) new Nesterovs().stateSize(net.numParams()); assertEquals(Nd4j.linspace(1, updaterSize, updaterSize, Nd4j.dataType()).reshape(1,numParams), net.getUpdater().getStateViewArray()); } @@ -99,14 +99,14 @@ public class RegressionTest050 extends BaseDL4JTest { MultiLayerNetwork net = ModelSerializer.restoreMultiLayerNetwork(f, true); - MultiLayerConfiguration conf = net.getLayerWiseConfigurations(); - assertEquals(2, conf.getConfs().size()); + NeuralNetConfiguration conf = net.getNetConfiguration(); + assertEquals(2, conf.getNetConfigurations().size()); DenseLayer l0 = (DenseLayer) conf.getConf(0).getLayer(); assertTrue(l0.getActivationFn() instanceof ActivationLReLU); assertEquals(3, l0.getNIn()); assertEquals(4, l0.getNOut()); - assertEquals(new WeightInitDistribution(new NormalDistribution(0.1, 1.2)), l0.getWeightInitFn()); + assertEquals(new WeightInitDistribution(new NormalDistribution(0.1, 1.2)), l0.getWeightInit()); assertEquals(new RmsProp(0.15, 0.96, RmsProp.DEFAULT_RMSPROP_EPSILON), l0.getIUpdater()); assertEquals(0.15, ((RmsProp)l0.getIUpdater()).getLearningRate(), 1e-6); assertEquals(new Dropout(0.6), l0.getIDropout()); @@ -118,7 +118,7 @@ public class RegressionTest050 extends BaseDL4JTest { assertTrue(l1.getLossFn() instanceof LossMSE); assertEquals(4, l1.getNIn()); assertEquals(5, l1.getNOut()); - assertEquals(new WeightInitDistribution(new NormalDistribution(0.1, 1.2)), l0.getWeightInitFn()); + assertEquals(new WeightInitDistribution(new NormalDistribution(0.1, 1.2)), l0.getWeightInit()); assertEquals(new RmsProp(0.15, 0.96, RmsProp.DEFAULT_RMSPROP_EPSILON), l1.getIUpdater()); assertEquals(0.15, ((RmsProp)l1.getIUpdater()).getLearningRate(), 1e-6); assertEquals(new Dropout(0.6), l1.getIDropout()); @@ -126,7 +126,7 @@ public class RegressionTest050 extends BaseDL4JTest { assertEquals(new WeightDecay(0.2, false), TestUtils.getWeightDecayReg(l1)); int numParams = (int)net.numParams(); - assertEquals(Nd4j.linspace(1, numParams, numParams, Nd4j.dataType()).reshape(1,numParams), net.params()); + assertEquals(Nd4j.linspace(1, numParams, numParams, Nd4j.dataType()).reshape(1,numParams), net.getModelParams()); int updaterSize = (int) new RmsProp().stateSize(numParams); assertEquals(Nd4j.linspace(1, updaterSize, updaterSize, Nd4j.dataType()).reshape(1,numParams), net.getUpdater().getStateViewArray()); } @@ -138,14 +138,14 @@ public class RegressionTest050 extends BaseDL4JTest { MultiLayerNetwork net = ModelSerializer.restoreMultiLayerNetwork(f, true); - MultiLayerConfiguration conf = net.getLayerWiseConfigurations(); - assertEquals(3, conf.getConfs().size()); + NeuralNetConfiguration conf = net.getNetConfiguration(); + assertEquals(3, conf.getNetConfigurations().size()); ConvolutionLayer l0 = (ConvolutionLayer) conf.getConf(0).getLayer(); assertEquals("tanh", l0.getActivationFn().toString()); assertEquals(3, l0.getNIn()); assertEquals(3, l0.getNOut()); - assertEquals(new WeightInitRelu(), l0.getWeightInitFn()); + assertEquals(new WeightInitRelu(), l0.getWeightInit()); assertEquals(new RmsProp(0.15, 0.96, RmsProp.DEFAULT_RMSPROP_EPSILON), l0.getIUpdater()); assertEquals(0.15, ((RmsProp)l0.getIUpdater()).getLearningRate(), 1e-6); assertArrayEquals(new int[] {2, 2}, l0.getKernelSize()); @@ -165,12 +165,12 @@ public class RegressionTest050 extends BaseDL4JTest { assertTrue(l2.getLossFn() instanceof LossNegativeLogLikelihood); assertEquals(26 * 26 * 3, l2.getNIn()); assertEquals(5, l2.getNOut()); - assertEquals(new WeightInitRelu(), l0.getWeightInitFn()); + assertEquals(new WeightInitRelu(), l0.getWeightInit()); assertEquals(new RmsProp(0.15, 0.96, RmsProp.DEFAULT_RMSPROP_EPSILON), l0.getIUpdater()); assertEquals(0.15, ((RmsProp)l0.getIUpdater()).getLearningRate(), 1e-6); int numParams = (int)net.numParams(); - assertEquals(Nd4j.linspace(1, numParams, numParams, Nd4j.dataType()).reshape(1,numParams), net.params()); + assertEquals(Nd4j.linspace(1, numParams, numParams, Nd4j.dataType()).reshape(1,numParams), net.getModelParams()); int updaterSize = (int) new RmsProp().stateSize(numParams); assertEquals(Nd4j.linspace(1, updaterSize, updaterSize, Nd4j.dataType()).reshape(1,numParams), net.getUpdater().getStateViewArray()); } diff --git a/cavis-dnn/cavis-dnn-core/src/test/java/org/deeplearning4j/regressiontest/RegressionTest060.java b/cavis-dnn/cavis-dnn-core/src/test/java/org/deeplearning4j/regressiontest/RegressionTest060.java index 985f347d8..8d6dae94a 100644 --- a/cavis-dnn/cavis-dnn-core/src/test/java/org/deeplearning4j/regressiontest/RegressionTest060.java +++ b/cavis-dnn/cavis-dnn-core/src/test/java/org/deeplearning4j/regressiontest/RegressionTest060.java @@ -25,7 +25,7 @@ import org.deeplearning4j.TestUtils; import org.deeplearning4j.nn.conf.ComputationGraphConfiguration; import org.deeplearning4j.nn.conf.ConvolutionMode; import org.deeplearning4j.nn.conf.GradientNormalization; -import org.deeplearning4j.nn.conf.MultiLayerConfiguration; +import org.deeplearning4j.nn.conf.NeuralNetConfiguration; import org.deeplearning4j.nn.conf.distribution.NormalDistribution; import org.deeplearning4j.nn.conf.dropout.Dropout; import org.deeplearning4j.nn.conf.graph.LayerVertex; @@ -67,14 +67,14 @@ public class RegressionTest060 extends BaseDL4JTest { MultiLayerNetwork net = ModelSerializer.restoreMultiLayerNetwork(f, true); - MultiLayerConfiguration conf = net.getLayerWiseConfigurations(); - assertEquals(2, conf.getConfs().size()); + NeuralNetConfiguration conf = net.getNetConfiguration(); + assertEquals(2, conf.getNetConfigurations().size()); DenseLayer l0 = (DenseLayer) conf.getConf(0).getLayer(); assertEquals("relu", l0.getActivationFn().toString()); assertEquals(3, l0.getNIn()); assertEquals(4, l0.getNOut()); - assertEquals(new WeightInitXavier(), l0.getWeightInitFn()); + assertEquals(new WeightInitXavier(), l0.getWeightInit()); assertEquals(new Nesterovs(0.15, 0.9), l0.getIUpdater()); assertEquals(0.15, ((Nesterovs)l0.getIUpdater()).getLearningRate(), 1e-6); @@ -83,13 +83,13 @@ public class RegressionTest060 extends BaseDL4JTest { assertTrue(l1.getLossFn() instanceof LossMCXENT); assertEquals(4, l1.getNIn()); assertEquals(5, l1.getNOut()); - assertEquals(new WeightInitXavier(), l1.getWeightInitFn()); + assertEquals(new WeightInitXavier(), l1.getWeightInit()); assertEquals(new Nesterovs(0.15, 0.9), l1.getIUpdater()); assertEquals(0.9, ((Nesterovs)l1.getIUpdater()).getMomentum(), 1e-6); assertEquals(0.15, ((Nesterovs)l1.getIUpdater()).getLearningRate(), 1e-6); int numParams = (int)net.numParams(); - assertEquals(Nd4j.linspace(1, numParams, numParams, Nd4j.dataType()).reshape(1,numParams), net.params()); + assertEquals(Nd4j.linspace(1, numParams, numParams, Nd4j.dataType()).reshape(1,numParams), net.getModelParams()); int updaterSize = (int) new Nesterovs().stateSize(numParams); assertEquals(Nd4j.linspace(1, updaterSize, updaterSize, Nd4j.dataType()).reshape(1,numParams), net.getUpdater().getStateViewArray()); } @@ -101,14 +101,14 @@ public class RegressionTest060 extends BaseDL4JTest { MultiLayerNetwork net = ModelSerializer.restoreMultiLayerNetwork(f, true); - MultiLayerConfiguration conf = net.getLayerWiseConfigurations(); - assertEquals(2, conf.getConfs().size()); + NeuralNetConfiguration conf = net.getNetConfiguration(); + assertEquals(2, conf.getNetConfigurations().size()); DenseLayer l0 = (DenseLayer) conf.getConf(0).getLayer(); assertTrue(l0.getActivationFn() instanceof ActivationLReLU); assertEquals(3, l0.getNIn()); assertEquals(4, l0.getNOut()); - assertEquals(new WeightInitDistribution(new NormalDistribution(0.1, 1.2)), l0.getWeightInitFn()); + assertEquals(new WeightInitDistribution(new NormalDistribution(0.1, 1.2)), l0.getWeightInit()); assertEquals(new RmsProp(0.15, 0.96, RmsProp.DEFAULT_RMSPROP_EPSILON), l0.getIUpdater()); assertEquals(0.15, ((RmsProp)l0.getIUpdater()).getLearningRate(), 1e-6); assertEquals(new Dropout(0.6), l0.getIDropout()); @@ -122,7 +122,7 @@ public class RegressionTest060 extends BaseDL4JTest { assertTrue(l1.getLossFn() instanceof LossMSE); assertEquals(4, l1.getNIn()); assertEquals(5, l1.getNOut()); - assertEquals(new WeightInitDistribution(new NormalDistribution(0.1, 1.2)), l0.getWeightInitFn()); + assertEquals(new WeightInitDistribution(new NormalDistribution(0.1, 1.2)), l0.getWeightInit()); assertEquals(new RmsProp(0.15, 0.96, RmsProp.DEFAULT_RMSPROP_EPSILON), l1.getIUpdater()); assertEquals(0.15, ((RmsProp)l1.getIUpdater()).getLearningRate(), 1e-6); assertEquals(new Dropout(0.6), l1.getIDropout()); @@ -132,7 +132,7 @@ public class RegressionTest060 extends BaseDL4JTest { assertEquals(1.5, l1.getGradientNormalizationThreshold(), 1e-5); int numParams = (int)net.numParams(); - assertEquals(Nd4j.linspace(1, numParams, numParams, Nd4j.dataType()).reshape(1,numParams), net.params()); + assertEquals(Nd4j.linspace(1, numParams, numParams, Nd4j.dataType()).reshape(1,numParams), net.getModelParams()); int updaterSize = (int) new RmsProp().stateSize(numParams); assertEquals(Nd4j.linspace(1, updaterSize, updaterSize, Nd4j.dataType()).reshape(1,numParams), net.getUpdater().getStateViewArray()); } @@ -144,14 +144,14 @@ public class RegressionTest060 extends BaseDL4JTest { MultiLayerNetwork net = ModelSerializer.restoreMultiLayerNetwork(f, true); - MultiLayerConfiguration conf = net.getLayerWiseConfigurations(); - assertEquals(3, conf.getConfs().size()); + NeuralNetConfiguration conf = net.getNetConfiguration(); + assertEquals(3, conf.getNetConfigurations().size()); ConvolutionLayer l0 = (ConvolutionLayer) conf.getConf(0).getLayer(); assertEquals("tanh", l0.getActivationFn().toString()); assertEquals(3, l0.getNIn()); assertEquals(3, l0.getNOut()); - assertEquals(new WeightInitRelu(), l0.getWeightInitFn()); + assertEquals(new WeightInitRelu(), l0.getWeightInit()); assertEquals(new RmsProp(0.15, 0.96, RmsProp.DEFAULT_RMSPROP_EPSILON), l0.getIUpdater()); assertEquals(0.15, ((RmsProp)l0.getIUpdater()).getLearningRate(), 1e-6); assertArrayEquals(new int[] {2, 2}, l0.getKernelSize()); @@ -171,14 +171,14 @@ public class RegressionTest060 extends BaseDL4JTest { assertTrue(l2.getLossFn() instanceof LossNegativeLogLikelihood); //TODO assertEquals(26 * 26 * 3, l2.getNIn()); assertEquals(5, l2.getNOut()); - assertEquals(new WeightInitRelu(), l0.getWeightInitFn()); + assertEquals(new WeightInitRelu(), l0.getWeightInit()); assertEquals(new RmsProp(0.15, 0.96, RmsProp.DEFAULT_RMSPROP_EPSILON), l0.getIUpdater()); assertEquals(0.15, ((RmsProp)l0.getIUpdater()).getLearningRate(), 1e-6); assertTrue(conf.getInputPreProcess(2) instanceof CnnToFeedForwardPreProcessor); int numParams = (int)net.numParams(); - assertEquals(Nd4j.linspace(1, numParams, numParams, Nd4j.dataType()).reshape(1,numParams), net.params()); + assertEquals(Nd4j.linspace(1, numParams, numParams, Nd4j.dataType()).reshape(1,numParams), net.getModelParams()); int updaterSize = (int) new RmsProp().stateSize(numParams); assertEquals(Nd4j.linspace(1, updaterSize, updaterSize, Nd4j.dataType()).reshape(1,numParams), net.getUpdater().getStateViewArray()); } @@ -190,8 +190,8 @@ public class RegressionTest060 extends BaseDL4JTest { MultiLayerNetwork net = ModelSerializer.restoreMultiLayerNetwork(f, true); - MultiLayerConfiguration conf = net.getLayerWiseConfigurations(); - assertEquals(3, conf.getConfs().size()); + NeuralNetConfiguration conf = net.getNetConfiguration(); + assertEquals(3, conf.getNetConfigurations().size()); GravesLSTM l0 = (GravesLSTM) conf.getConf(0).getLayer(); assertEquals("tanh", l0.getActivationFn().toString()); @@ -221,10 +221,10 @@ public class RegressionTest060 extends BaseDL4JTest { ComputationGraph net = ModelSerializer.restoreComputationGraph(f, true); - ComputationGraphConfiguration conf = net.getConfiguration(); + ComputationGraphConfiguration conf = net.getComputationGraphConfiguration(); assertEquals(3, conf.getVertices().size()); - GravesLSTM l0 = (GravesLSTM) ((LayerVertex) conf.getVertices().get("0")).getLayerConf().getLayer(); + GravesLSTM l0 = (GravesLSTM) ((LayerVertex) conf.getVertices().get("0")).getNetConfiguration().getFirstLayer(); assertEquals("tanh", l0.getActivationFn().toString()); assertEquals(3, l0.getNIn()); assertEquals(4, l0.getNOut()); @@ -232,14 +232,14 @@ public class RegressionTest060 extends BaseDL4JTest { assertEquals(1.5, l0.getGradientNormalizationThreshold(), 1e-5); GravesBidirectionalLSTM l1 = - (GravesBidirectionalLSTM) ((LayerVertex) conf.getVertices().get("1")).getLayerConf().getLayer(); + (GravesBidirectionalLSTM) ((LayerVertex) conf.getVertices().get("1")).getNetConfiguration().getFirstLayer(); assertEquals("softsign", l1.getActivationFn().toString()); assertEquals(4, l1.getNIn()); assertEquals(4, l1.getNOut()); assertEquals(GradientNormalization.ClipElementWiseAbsoluteValue, l1.getGradientNormalization()); assertEquals(1.5, l1.getGradientNormalizationThreshold(), 1e-5); - RnnOutputLayer l2 = (RnnOutputLayer) ((LayerVertex) conf.getVertices().get("2")).getLayerConf().getLayer(); + RnnOutputLayer l2 = (RnnOutputLayer) ((LayerVertex) conf.getVertices().get("2")).getNetConfiguration().getFirstLayer(); assertEquals(4, l2.getNIn()); assertEquals(5, l2.getNOut()); assertEquals("softmax", l2.getActivationFn().toString()); diff --git a/cavis-dnn/cavis-dnn-core/src/test/java/org/deeplearning4j/regressiontest/RegressionTest071.java b/cavis-dnn/cavis-dnn-core/src/test/java/org/deeplearning4j/regressiontest/RegressionTest071.java index 2a75e7994..8589b7de2 100644 --- a/cavis-dnn/cavis-dnn-core/src/test/java/org/deeplearning4j/regressiontest/RegressionTest071.java +++ b/cavis-dnn/cavis-dnn-core/src/test/java/org/deeplearning4j/regressiontest/RegressionTest071.java @@ -25,7 +25,7 @@ import org.deeplearning4j.TestUtils; import org.deeplearning4j.nn.conf.ComputationGraphConfiguration; import org.deeplearning4j.nn.conf.ConvolutionMode; import org.deeplearning4j.nn.conf.GradientNormalization; -import org.deeplearning4j.nn.conf.MultiLayerConfiguration; +import org.deeplearning4j.nn.conf.NeuralNetConfiguration; import org.deeplearning4j.nn.conf.distribution.NormalDistribution; import org.deeplearning4j.nn.conf.dropout.Dropout; import org.deeplearning4j.nn.conf.graph.LayerVertex; @@ -68,14 +68,14 @@ public class RegressionTest071 extends BaseDL4JTest { MultiLayerNetwork net = ModelSerializer.restoreMultiLayerNetwork(f, true); - MultiLayerConfiguration conf = net.getLayerWiseConfigurations(); - assertEquals(2, conf.getConfs().size()); + NeuralNetConfiguration conf = net.getNetConfiguration(); + assertEquals(2, conf.getNetConfigurations().size()); DenseLayer l0 = (DenseLayer) conf.getConf(0).getLayer(); assertEquals("relu", l0.getActivationFn().toString()); assertEquals(3, l0.getNIn()); assertEquals(4, l0.getNOut()); - assertEquals(new WeightInitXavier(), l0.getWeightInitFn()); + assertEquals(new WeightInitXavier(), l0.getWeightInit()); assertEquals(new Nesterovs(0.15, 0.9), l0.getIUpdater()); assertEquals(0.15, ((Nesterovs)l0.getIUpdater()).getLearningRate(), 1e-6); @@ -84,13 +84,13 @@ public class RegressionTest071 extends BaseDL4JTest { assertTrue(l1.getLossFn() instanceof LossMCXENT); assertEquals(4, l1.getNIn()); assertEquals(5, l1.getNOut()); - assertEquals(new WeightInitXavier(), l1.getWeightInitFn()); + assertEquals(new WeightInitXavier(), l1.getWeightInit()); assertEquals(0.9, ((Nesterovs)l1.getIUpdater()).getMomentum(), 1e-6); assertEquals(0.9, ((Nesterovs)l1.getIUpdater()).getMomentum(), 1e-6); assertEquals(0.15, ((Nesterovs)l1.getIUpdater()).getLearningRate(), 1e-6); long numParams = (int)net.numParams(); - assertEquals(Nd4j.linspace(1, numParams, numParams).reshape(1,numParams), net.params()); + assertEquals(Nd4j.linspace(1, numParams, numParams).reshape(1,numParams), net.getModelParams()); int updaterSize = (int) new Nesterovs().stateSize(numParams); assertEquals(Nd4j.linspace(1, updaterSize, updaterSize).reshape(1,numParams), net.getUpdater().getStateViewArray()); } @@ -102,14 +102,14 @@ public class RegressionTest071 extends BaseDL4JTest { MultiLayerNetwork net = ModelSerializer.restoreMultiLayerNetwork(f, true); - MultiLayerConfiguration conf = net.getLayerWiseConfigurations(); - assertEquals(2, conf.getConfs().size()); + NeuralNetConfiguration conf = net.getNetConfiguration(); + assertEquals(2, conf.getNetConfigurations().size()); DenseLayer l0 = (DenseLayer) conf.getConf(0).getLayer(); assertTrue(l0.getActivationFn() instanceof ActivationLReLU); assertEquals(3, l0.getNIn()); assertEquals(4, l0.getNOut()); - assertEquals(new WeightInitDistribution(new NormalDistribution(0.1, 1.2)), l0.getWeightInitFn()); + assertEquals(new WeightInitDistribution(new NormalDistribution(0.1, 1.2)), l0.getWeightInit()); assertEquals(new RmsProp(0.15, 0.96, RmsProp.DEFAULT_RMSPROP_EPSILON), l0.getIUpdater()); assertEquals(0.15, ((RmsProp)l0.getIUpdater()).getLearningRate(), 1e-6); assertEquals(new Dropout(0.6), l0.getIDropout()); @@ -123,7 +123,7 @@ public class RegressionTest071 extends BaseDL4JTest { assertTrue(l1.getLossFn() instanceof LossMSE); assertEquals(4, l1.getNIn()); assertEquals(5, l1.getNOut()); - assertEquals(new WeightInitDistribution(new NormalDistribution(0.1, 1.2)), l0.getWeightInitFn()); + assertEquals(new WeightInitDistribution(new NormalDistribution(0.1, 1.2)), l0.getWeightInit()); assertEquals(new RmsProp(0.15, 0.96, RmsProp.DEFAULT_RMSPROP_EPSILON), l1.getIUpdater()); assertEquals(0.15, ((RmsProp)l0.getIUpdater()).getLearningRate(), 1e-6); assertEquals(new Dropout(0.6), l1.getIDropout()); @@ -133,7 +133,7 @@ public class RegressionTest071 extends BaseDL4JTest { assertEquals(1.5, l1.getGradientNormalizationThreshold(), 1e-5); long numParams = net.numParams(); - assertEquals(Nd4j.linspace(1, numParams, numParams).reshape(1,numParams), net.params()); + assertEquals(Nd4j.linspace(1, numParams, numParams).reshape(1,numParams), net.getModelParams()); int updaterSize = (int) new RmsProp().stateSize(numParams); assertEquals(Nd4j.linspace(1, updaterSize, updaterSize).reshape(1,numParams), net.getUpdater().getStateViewArray()); } @@ -145,14 +145,14 @@ public class RegressionTest071 extends BaseDL4JTest { MultiLayerNetwork net = ModelSerializer.restoreMultiLayerNetwork(f, true); - MultiLayerConfiguration conf = net.getLayerWiseConfigurations(); - assertEquals(3, conf.getConfs().size()); + NeuralNetConfiguration conf = net.getNetConfiguration(); + assertEquals(3, conf.getNetConfigurations().size()); ConvolutionLayer l0 = (ConvolutionLayer) conf.getConf(0).getLayer(); assertEquals("tanh", l0.getActivationFn().toString()); assertEquals(3, l0.getNIn()); assertEquals(3, l0.getNOut()); - assertEquals(new WeightInitRelu(), l0.getWeightInitFn()); + assertEquals(new WeightInitRelu(), l0.getWeightInit()); assertEquals(new RmsProp(0.15, 0.96, RmsProp.DEFAULT_RMSPROP_EPSILON), l0.getIUpdater()); assertEquals(0.15, ((RmsProp)l0.getIUpdater()).getLearningRate(), 1e-6); assertArrayEquals(new int[] {2, 2}, l0.getKernelSize()); @@ -172,14 +172,14 @@ public class RegressionTest071 extends BaseDL4JTest { assertTrue(l2.getLossFn() instanceof LossNegativeLogLikelihood); //TODO assertEquals(26 * 26 * 3, l2.getNIn()); assertEquals(5, l2.getNOut()); - assertEquals(new WeightInitRelu(), l0.getWeightInitFn()); + assertEquals(new WeightInitRelu(), l0.getWeightInit()); assertEquals(new RmsProp(0.15, 0.96, RmsProp.DEFAULT_RMSPROP_EPSILON), l0.getIUpdater()); assertEquals(0.15, ((RmsProp)l0.getIUpdater()).getLearningRate(), 1e-6); assertTrue(conf.getInputPreProcess(2) instanceof CnnToFeedForwardPreProcessor); long numParams = net.numParams(); - assertEquals(Nd4j.linspace(1, numParams, numParams).reshape(1,numParams), net.params()); + assertEquals(Nd4j.linspace(1, numParams, numParams).reshape(1,numParams), net.getModelParams()); int updaterSize = (int) new RmsProp().stateSize(numParams); assertEquals(Nd4j.linspace(1, updaterSize, updaterSize).reshape(1,numParams), net.getUpdater().getStateViewArray()); } @@ -191,8 +191,8 @@ public class RegressionTest071 extends BaseDL4JTest { MultiLayerNetwork net = ModelSerializer.restoreMultiLayerNetwork(f, true); - MultiLayerConfiguration conf = net.getLayerWiseConfigurations(); - assertEquals(3, conf.getConfs().size()); + NeuralNetConfiguration conf = net.getNetConfiguration(); + assertEquals(3, conf.getNetConfigurations().size()); GravesLSTM l0 = (GravesLSTM) conf.getConf(0).getLayer(); assertEquals("tanh", l0.getActivationFn().toString()); @@ -221,10 +221,10 @@ public class RegressionTest071 extends BaseDL4JTest { ComputationGraph net = ModelSerializer.restoreComputationGraph(f, true); - ComputationGraphConfiguration conf = net.getConfiguration(); + ComputationGraphConfiguration conf = net.getComputationGraphConfiguration(); assertEquals(3, conf.getVertices().size()); - GravesLSTM l0 = (GravesLSTM) ((LayerVertex) conf.getVertices().get("0")).getLayerConf().getLayer(); + GravesLSTM l0 = (GravesLSTM) ((LayerVertex) conf.getVertices().get("0")).getNetConfiguration().getFirstLayer(); assertEquals("tanh", l0.getActivationFn().toString()); assertEquals(3, l0.getNIn()); assertEquals(4, l0.getNOut()); @@ -232,14 +232,14 @@ public class RegressionTest071 extends BaseDL4JTest { assertEquals(1.5, l0.getGradientNormalizationThreshold(), 1e-5); GravesBidirectionalLSTM l1 = - (GravesBidirectionalLSTM) ((LayerVertex) conf.getVertices().get("1")).getLayerConf().getLayer(); + (GravesBidirectionalLSTM) ((LayerVertex) conf.getVertices().get("1")).getNetConfiguration().getFirstLayer(); assertEquals("softsign", l1.getActivationFn().toString()); assertEquals(4, l1.getNIn()); assertEquals(4, l1.getNOut()); assertEquals(GradientNormalization.ClipElementWiseAbsoluteValue, l1.getGradientNormalization()); assertEquals(1.5, l1.getGradientNormalizationThreshold(), 1e-5); - RnnOutputLayer l2 = (RnnOutputLayer) ((LayerVertex) conf.getVertices().get("2")).getLayerConf().getLayer(); + RnnOutputLayer l2 = (RnnOutputLayer) ((LayerVertex) conf.getVertices().get("2")).getNetConfiguration().getFirstLayer(); assertEquals(4, l2.getNIn()); assertEquals(5, l2.getNOut()); assertEquals("softmax", l2.getActivationFn().toString()); diff --git a/cavis-dnn/cavis-dnn-core/src/test/java/org/deeplearning4j/regressiontest/RegressionTest080.java b/cavis-dnn/cavis-dnn-core/src/test/java/org/deeplearning4j/regressiontest/RegressionTest080.java index 6566f03fe..90cb2c126 100644 --- a/cavis-dnn/cavis-dnn-core/src/test/java/org/deeplearning4j/regressiontest/RegressionTest080.java +++ b/cavis-dnn/cavis-dnn-core/src/test/java/org/deeplearning4j/regressiontest/RegressionTest080.java @@ -25,7 +25,7 @@ import org.deeplearning4j.TestUtils; import org.deeplearning4j.nn.conf.ComputationGraphConfiguration; import org.deeplearning4j.nn.conf.ConvolutionMode; import org.deeplearning4j.nn.conf.GradientNormalization; -import org.deeplearning4j.nn.conf.MultiLayerConfiguration; +import org.deeplearning4j.nn.conf.NeuralNetConfiguration; import org.deeplearning4j.nn.conf.distribution.NormalDistribution; import org.deeplearning4j.nn.conf.dropout.Dropout; import org.deeplearning4j.nn.conf.graph.LayerVertex; @@ -67,14 +67,14 @@ public class RegressionTest080 extends BaseDL4JTest { MultiLayerNetwork net = ModelSerializer.restoreMultiLayerNetwork(f, true); - MultiLayerConfiguration conf = net.getLayerWiseConfigurations(); - assertEquals(2, conf.getConfs().size()); + NeuralNetConfiguration conf = net.getNetConfiguration(); + assertEquals(2, conf.getNetConfigurations().size()); DenseLayer l0 = (DenseLayer) conf.getConf(0).getLayer(); assertTrue(l0.getActivationFn() instanceof ActivationReLU); assertEquals(3, l0.getNIn()); assertEquals(4, l0.getNOut()); - assertEquals(new WeightInitXavier(), l0.getWeightInitFn()); + assertEquals(new WeightInitXavier(), l0.getWeightInit()); assertTrue(l0.getIUpdater() instanceof Nesterovs); Nesterovs n = (Nesterovs) l0.getIUpdater(); assertEquals(0.9, n.getMomentum(), 1e-6); @@ -87,14 +87,14 @@ public class RegressionTest080 extends BaseDL4JTest { assertTrue(l1.getLossFn() instanceof LossMCXENT); assertEquals(4, l1.getNIn()); assertEquals(5, l1.getNOut()); - assertEquals(new WeightInitXavier(), l1.getWeightInitFn()); + assertEquals(new WeightInitXavier(), l1.getWeightInit()); assertTrue(l1.getIUpdater() instanceof Nesterovs); assertEquals(0.9, ((Nesterovs)l1.getIUpdater()).getMomentum(), 1e-6); assertEquals(0.15, ((Nesterovs)l1.getIUpdater()).getLearningRate(), 1e-6); assertEquals(0.15, n.getLearningRate(), 1e-6); int numParams = (int)net.numParams(); - assertEquals(Nd4j.linspace(1, numParams, numParams, Nd4j.dataType()).reshape(1,numParams), net.params()); + assertEquals(Nd4j.linspace(1, numParams, numParams, Nd4j.dataType()).reshape(1,numParams), net.getModelParams()); int updaterSize = (int) new Nesterovs().stateSize(numParams); assertEquals(Nd4j.linspace(1, updaterSize, updaterSize, Nd4j.dataType()).reshape(1,numParams), net.getUpdater().getStateViewArray()); } @@ -106,14 +106,14 @@ public class RegressionTest080 extends BaseDL4JTest { MultiLayerNetwork net = ModelSerializer.restoreMultiLayerNetwork(f, true); - MultiLayerConfiguration conf = net.getLayerWiseConfigurations(); - assertEquals(2, conf.getConfs().size()); + NeuralNetConfiguration conf = net.getNetConfiguration(); + assertEquals(2, conf.getNetConfigurations().size()); DenseLayer l0 = (DenseLayer) conf.getConf(0).getLayer(); assertTrue(l0.getActivationFn() instanceof ActivationLReLU); assertEquals(3, l0.getNIn()); assertEquals(4, l0.getNOut()); - assertEquals(new WeightInitDistribution(new NormalDistribution(0.1, 1.2)), l0.getWeightInitFn()); + assertEquals(new WeightInitDistribution(new NormalDistribution(0.1, 1.2)), l0.getWeightInit()); assertTrue(l0.getIUpdater() instanceof RmsProp); RmsProp r = (RmsProp) l0.getIUpdater(); assertEquals(0.96, r.getRmsDecay(), 1e-6); @@ -130,7 +130,7 @@ public class RegressionTest080 extends BaseDL4JTest { assertTrue(l1.getLossFn() instanceof LossMSE); assertEquals(4, l1.getNIn()); assertEquals(5, l1.getNOut()); - assertEquals(new WeightInitDistribution(new NormalDistribution(0.1, 1.2)), l1.getWeightInitFn()); + assertEquals(new WeightInitDistribution(new NormalDistribution(0.1, 1.2)), l1.getWeightInit()); assertTrue(l1.getIUpdater() instanceof RmsProp); r = (RmsProp) l1.getIUpdater(); assertEquals(0.96, r.getRmsDecay(), 1e-6); @@ -143,7 +143,7 @@ public class RegressionTest080 extends BaseDL4JTest { assertEquals(1.5, l1.getGradientNormalizationThreshold(), 1e-5); int numParams = (int)net.numParams(); - assertEquals(Nd4j.linspace(1, numParams, numParams, Nd4j.dataType()).reshape(1,numParams), net.params()); + assertEquals(Nd4j.linspace(1, numParams, numParams, Nd4j.dataType()).reshape(1,numParams), net.getModelParams()); int updaterSize = (int) new RmsProp().stateSize(numParams); assertEquals(Nd4j.linspace(1, updaterSize, updaterSize, Nd4j.dataType()).reshape(1,numParams), net.getUpdater().getStateViewArray()); } @@ -155,14 +155,14 @@ public class RegressionTest080 extends BaseDL4JTest { MultiLayerNetwork net = ModelSerializer.restoreMultiLayerNetwork(f, true); - MultiLayerConfiguration conf = net.getLayerWiseConfigurations(); - assertEquals(3, conf.getConfs().size()); + NeuralNetConfiguration conf = net.getNetConfiguration(); + assertEquals(3, conf.getNetConfigurations().size()); ConvolutionLayer l0 = (ConvolutionLayer) conf.getConf(0).getLayer(); assertTrue(l0.getActivationFn() instanceof ActivationTanH); assertEquals(3, l0.getNIn()); assertEquals(3, l0.getNOut()); - assertEquals(new WeightInitRelu(), l0.getWeightInitFn()); + assertEquals(new WeightInitRelu(), l0.getWeightInit()); assertTrue(l0.getIUpdater() instanceof RmsProp); RmsProp r = (RmsProp) l0.getIUpdater(); assertEquals(0.96, r.getRmsDecay(), 1e-6); @@ -185,7 +185,7 @@ public class RegressionTest080 extends BaseDL4JTest { assertTrue(l2.getLossFn() instanceof LossNegativeLogLikelihood); assertEquals(26 * 26 * 3, l2.getNIn()); assertEquals(5, l2.getNOut()); - assertEquals(new WeightInitRelu(), l2.getWeightInitFn()); + assertEquals(new WeightInitRelu(), l2.getWeightInit()); assertTrue(l2.getIUpdater() instanceof RmsProp); r = (RmsProp) l2.getIUpdater(); assertEquals(0.96, r.getRmsDecay(), 1e-6); @@ -194,7 +194,7 @@ public class RegressionTest080 extends BaseDL4JTest { assertTrue(conf.getInputPreProcess(2) instanceof CnnToFeedForwardPreProcessor); int numParams = (int)net.numParams(); - assertEquals(Nd4j.linspace(1, numParams, numParams, Nd4j.dataType()).reshape(1,numParams), net.params()); + assertEquals(Nd4j.linspace(1, numParams, numParams, Nd4j.dataType()).reshape(1,numParams), net.getModelParams()); int updaterSize = (int) new RmsProp().stateSize(numParams); assertEquals(Nd4j.linspace(1, updaterSize, updaterSize, Nd4j.dataType()).reshape(1,numParams), net.getUpdater().getStateViewArray()); } @@ -206,8 +206,8 @@ public class RegressionTest080 extends BaseDL4JTest { MultiLayerNetwork net = ModelSerializer.restoreMultiLayerNetwork(f, true); - MultiLayerConfiguration conf = net.getLayerWiseConfigurations(); - assertEquals(3, conf.getConfs().size()); + NeuralNetConfiguration conf = net.getNetConfiguration(); + assertEquals(3, conf.getNetConfigurations().size()); GravesLSTM l0 = (GravesLSTM) conf.getConf(0).getLayer(); assertTrue(l0.getActivationFn() instanceof ActivationTanH); @@ -237,10 +237,10 @@ public class RegressionTest080 extends BaseDL4JTest { ComputationGraph net = ModelSerializer.restoreComputationGraph(f, true); - ComputationGraphConfiguration conf = net.getConfiguration(); + ComputationGraphConfiguration conf = net.getComputationGraphConfiguration(); assertEquals(3, conf.getVertices().size()); - GravesLSTM l0 = (GravesLSTM) ((LayerVertex) conf.getVertices().get("0")).getLayerConf().getLayer(); + GravesLSTM l0 = (GravesLSTM) ((LayerVertex) conf.getVertices().get("0")).getNetConfiguration().getFirstLayer(); assertTrue(l0.getActivationFn() instanceof ActivationTanH); assertEquals(3, l0.getNIn()); assertEquals(4, l0.getNOut()); @@ -248,14 +248,14 @@ public class RegressionTest080 extends BaseDL4JTest { assertEquals(1.5, l0.getGradientNormalizationThreshold(), 1e-5); GravesBidirectionalLSTM l1 = - (GravesBidirectionalLSTM) ((LayerVertex) conf.getVertices().get("1")).getLayerConf().getLayer(); + (GravesBidirectionalLSTM) ((LayerVertex) conf.getVertices().get("1")).getNetConfiguration().getFirstLayer(); assertTrue(l1.getActivationFn() instanceof ActivationSoftSign); assertEquals(4, l1.getNIn()); assertEquals(4, l1.getNOut()); assertEquals(GradientNormalization.ClipElementWiseAbsoluteValue, l1.getGradientNormalization()); assertEquals(1.5, l1.getGradientNormalizationThreshold(), 1e-5); - RnnOutputLayer l2 = (RnnOutputLayer) ((LayerVertex) conf.getVertices().get("2")).getLayerConf().getLayer(); + RnnOutputLayer l2 = (RnnOutputLayer) ((LayerVertex) conf.getVertices().get("2")).getNetConfiguration().getFirstLayer(); assertEquals(4, l2.getNIn()); assertEquals(5, l2.getNOut()); assertTrue(l2.getActivationFn() instanceof ActivationSoftmax); diff --git a/cavis-dnn/cavis-dnn-core/src/test/java/org/deeplearning4j/regressiontest/RegressionTest100a.java b/cavis-dnn/cavis-dnn-core/src/test/java/org/deeplearning4j/regressiontest/RegressionTest100a.java index acee54871..6555c5eec 100644 --- a/cavis-dnn/cavis-dnn-core/src/test/java/org/deeplearning4j/regressiontest/RegressionTest100a.java +++ b/cavis-dnn/cavis-dnn-core/src/test/java/org/deeplearning4j/regressiontest/RegressionTest100a.java @@ -86,30 +86,30 @@ public class RegressionTest100a extends BaseDL4JTest { File f = Resources.asFile("regression_testing/100a/GravesLSTMCharModelingExample_100a.bin"); MultiLayerNetwork net = MultiLayerNetwork.load(f, true); - GravesLSTM l0 = (GravesLSTM) net.getLayer(0).conf().getLayer(); + GravesLSTM l0 = (GravesLSTM) net.getLayer(0).getLayerConfiguration(); assertEquals(new ActivationTanH(), l0.getActivationFn()); assertEquals(200, l0.getNOut()); - assertEquals(new WeightInitXavier(), l0.getWeightInitFn()); + assertEquals(new WeightInitXavier(), l0.getWeightInit()); assertEquals(new WeightDecay(0.001, false), TestUtils.getWeightDecayReg(l0)); assertEquals(new RmsProp(0.1), l0.getIUpdater()); - GravesLSTM l1 = (GravesLSTM) net.getLayer(1).conf().getLayer(); + GravesLSTM l1 = (GravesLSTM) net.getLayer(1).getLayerConfiguration(); assertEquals(new ActivationTanH(), l1.getActivationFn()); assertEquals(200, l1.getNOut()); - assertEquals(new WeightInitXavier(), l1.getWeightInitFn()); + assertEquals(new WeightInitXavier(), l1.getWeightInit()); assertEquals(new WeightDecay(0.001, false), TestUtils.getWeightDecayReg(l1)); assertEquals(new RmsProp(0.1), l1.getIUpdater()); - RnnOutputLayer l2 = (RnnOutputLayer) net.getLayer(2).conf().getLayer(); + RnnOutputLayer l2 = (RnnOutputLayer) net.getLayer(2).getLayerConfiguration(); assertEquals(new ActivationSoftmax(), l2.getActivationFn()); assertEquals(77, l2.getNOut()); - assertEquals(new WeightInitXavier(), l2.getWeightInitFn()); + assertEquals(new WeightInitXavier(), l2.getWeightInit()); assertEquals(new WeightDecay(0.001, false), TestUtils.getWeightDecayReg(l0)); assertEquals(new RmsProp(0.1), l0.getIUpdater()); - assertEquals(BackpropType.TruncatedBPTT, net.getLayerWiseConfigurations().getBackpropType()); - assertEquals(50, net.getLayerWiseConfigurations().getTbpttBackLength()); - assertEquals(50, net.getLayerWiseConfigurations().getTbpttFwdLength()); + assertEquals(BackpropType.TruncatedBPTT, net.getNetConfiguration().getBackpropType()); + assertEquals(50, net.getNetConfiguration().getTbpttBackLength()); + assertEquals(50, net.getNetConfiguration().getTbpttFwdLength()); INDArray outExp; File f2 = Resources.asFile("regression_testing/100a/GravesLSTMCharModelingExample_Output_100a.bin"); @@ -134,12 +134,12 @@ public class RegressionTest100a extends BaseDL4JTest { File f = Resources.asFile("regression_testing/100a/VaeMNISTAnomaly_100a.bin"); MultiLayerNetwork net = MultiLayerNetwork.load(f, true); - VariationalAutoencoder l0 = (VariationalAutoencoder) net.getLayer(0).conf().getLayer(); + VariationalAutoencoder l0 = (VariationalAutoencoder) net.getLayer(0).getLayerConfiguration(); assertEquals(new ActivationLReLU(), l0.getActivationFn()); assertEquals(32, l0.getNOut()); assertArrayEquals(new int[]{256, 256}, l0.getEncoderLayerSizes()); assertArrayEquals(new int[]{256, 256}, l0.getDecoderLayerSizes()); - assertEquals(new WeightInitXavier(), l0.getWeightInitFn()); + assertEquals(new WeightInitXavier(), l0.getWeightInit()); assertEquals(new WeightDecay(1e-4, false), TestUtils.getWeightDecayReg(l0)); assertEquals(new Adam(0.05), l0.getIUpdater()); @@ -171,11 +171,11 @@ public class RegressionTest100a extends BaseDL4JTest { int nBoxes = 5; int nClasses = 10; - ConvolutionLayer cl = (ConvolutionLayer)((LayerVertex)net.getConfiguration().getVertices().get("convolution2d_9")).getLayerConf().getLayer(); + ConvolutionLayer cl = (ConvolutionLayer)((LayerVertex)net.getComputationGraphConfiguration().getVertices().get("convolution2d_9")).getNetConfiguration().getFirstLayer(); assertEquals(nBoxes * (5 + nClasses), cl.getNOut()); assertEquals(new ActivationIdentity(), cl.getActivationFn()); assertEquals(ConvolutionMode.Same, cl.getConvolutionMode()); - assertEquals(new WeightInitXavier(), cl.getWeightInitFn()); + assertEquals(new WeightInitXavier(), cl.getWeightInit()); assertArrayEquals(new int[]{1,1}, cl.getKernelSize()); assertArrayEquals(new int[]{1,1}, cl.getKernelSize()); @@ -195,8 +195,8 @@ public class RegressionTest100a extends BaseDL4JTest { //Which means: the record output doesn't have this. To account for this, we'll manually set eps to 0.0 here //https://github.com/deeplearning4j/deeplearning4j/issues/5836#issuecomment-405526228 for(Layer l : net.getLayers()){ - if(l.conf().getLayer() instanceof BatchNormalization){ - BatchNormalization bn = (BatchNormalization) l.conf().getLayer(); + if(l.getLayerConfiguration() instanceof BatchNormalization){ + BatchNormalization bn = (BatchNormalization) l.getLayerConfiguration(); bn.setEps(0.0); } } diff --git a/cavis-dnn/cavis-dnn-core/src/test/java/org/deeplearning4j/regressiontest/RegressionTest100b3.java b/cavis-dnn/cavis-dnn-core/src/test/java/org/deeplearning4j/regressiontest/RegressionTest100b3.java index 8df2f258b..223b7be91 100644 --- a/cavis-dnn/cavis-dnn-core/src/test/java/org/deeplearning4j/regressiontest/RegressionTest100b3.java +++ b/cavis-dnn/cavis-dnn-core/src/test/java/org/deeplearning4j/regressiontest/RegressionTest100b3.java @@ -72,12 +72,12 @@ public class RegressionTest100b3 extends BaseDL4JTest { MultiLayerNetwork net = MultiLayerNetwork.load(f, true); // net = net.clone(); - DenseLayer l0 = (DenseLayer) net.getLayer(0).conf().getLayer(); + DenseLayer l0 = (DenseLayer) net.getLayer(0).getLayerConfiguration(); assertEquals(new ActivationTanH(), l0.getActivationFn()); assertEquals(new WeightDecay(0.03, false), TestUtils.getWeightDecayReg(l0)); assertEquals(new RmsProp(0.95), l0.getIUpdater()); - CustomLayer l1 = (CustomLayer) net.getLayer(1).conf().getLayer(); + CustomLayer l1 = (CustomLayer) net.getLayer(1).getLayerConfiguration(); assertEquals(new ActivationTanH(), l1.getActivationFn()); assertEquals(new ActivationSigmoid(), l1.getSecondActivationFunction()); assertEquals(new RmsProp(0.95), l1.getIUpdater()); @@ -97,7 +97,7 @@ public class RegressionTest100b3 extends BaseDL4JTest { assertEquals(dt, in.dataType()); assertEquals(dt, outExp.dataType()); - assertEquals(dt, net.params().dataType()); + assertEquals(dt, net.getModelParams().dataType()); assertEquals(dt, net.getFlattenedGradients().dataType()); assertEquals(dt, net.getUpdater().getStateViewArray().dataType()); @@ -108,8 +108,8 @@ public class RegressionTest100b3 extends BaseDL4JTest { List activations = net.feedForward(in); - assertEquals(dt, net.getLayerWiseConfigurations().getDataType()); - assertEquals(dt, net.params().dataType()); + assertEquals(dt, net.getNetConfiguration().getDataType()); + assertEquals(dt, net.getModelParams().dataType()); assertEquals( outExp, outAct, dtype); } } @@ -121,30 +121,30 @@ public class RegressionTest100b3 extends BaseDL4JTest { File f = Resources.asFile("regression_testing/100b3/GravesLSTMCharModelingExample_100b3.bin"); MultiLayerNetwork net = MultiLayerNetwork.load(f, true); - LSTM l0 = (LSTM) net.getLayer(0).conf().getLayer(); + LSTM l0 = (LSTM) net.getLayer(0).getLayerConfiguration(); assertEquals(new ActivationTanH(), l0.getActivationFn()); assertEquals(200, l0.getNOut()); - assertEquals(new WeightInitXavier(), l0.getWeightInitFn()); + assertEquals(new WeightInitXavier(), l0.getWeightInit()); assertEquals(new WeightDecay(0.0001, false), TestUtils.getWeightDecayReg(l0)); assertEquals(new Adam(0.005), l0.getIUpdater()); - LSTM l1 = (LSTM) net.getLayer(1).conf().getLayer(); + LSTM l1 = (LSTM) net.getLayer(1).getLayerConfiguration(); assertEquals(new ActivationTanH(), l1.getActivationFn()); assertEquals(200, l1.getNOut()); - assertEquals(new WeightInitXavier(), l1.getWeightInitFn()); + assertEquals(new WeightInitXavier(), l1.getWeightInit()); assertEquals(new WeightDecay(0.0001, false), TestUtils.getWeightDecayReg(l1)); assertEquals(new Adam(0.005), l1.getIUpdater()); - RnnOutputLayer l2 = (RnnOutputLayer) net.getLayer(2).conf().getLayer(); + RnnOutputLayer l2 = (RnnOutputLayer) net.getLayer(2).getLayerConfiguration(); assertEquals(new ActivationSoftmax(), l2.getActivationFn()); assertEquals(77, l2.getNOut()); - assertEquals(new WeightInitXavier(), l2.getWeightInitFn()); + assertEquals(new WeightInitXavier(), l2.getWeightInit()); assertEquals(new WeightDecay(0.0001, false), TestUtils.getWeightDecayReg(l0)); assertEquals(new Adam(0.005), l0.getIUpdater()); - assertEquals(BackpropType.TruncatedBPTT, net.getLayerWiseConfigurations().getBackpropType()); - assertEquals(50, net.getLayerWiseConfigurations().getTbpttBackLength()); - assertEquals(50, net.getLayerWiseConfigurations().getTbpttFwdLength()); + assertEquals(BackpropType.TruncatedBPTT, net.getNetConfiguration().getBackpropType()); + assertEquals(50, net.getNetConfiguration().getTbpttBackLength()); + assertEquals(50, net.getNetConfiguration().getTbpttFwdLength()); INDArray outExp; File f2 = Resources.asFile("regression_testing/100b3/GravesLSTMCharModelingExample_Output_100b3.bin"); @@ -169,12 +169,12 @@ public class RegressionTest100b3 extends BaseDL4JTest { File f = Resources.asFile("regression_testing/100b3/VaeMNISTAnomaly_100b3.bin"); MultiLayerNetwork net = MultiLayerNetwork.load(f, true); - VariationalAutoencoder l0 = (VariationalAutoencoder) net.getLayer(0).conf().getLayer(); + VariationalAutoencoder l0 = (VariationalAutoencoder) net.getLayer(0).getLayerConfiguration(); assertEquals(new ActivationLReLU(), l0.getActivationFn()); assertEquals(32, l0.getNOut()); assertArrayEquals(new int[]{256, 256}, l0.getEncoderLayerSizes()); assertArrayEquals(new int[]{256, 256}, l0.getDecoderLayerSizes()); - assertEquals(new WeightInitXavier(), l0.getWeightInitFn()); + assertEquals(new WeightInitXavier(), l0.getWeightInit()); assertEquals(new WeightDecay(1e-4, false), TestUtils.getWeightDecayReg(l0)); assertEquals(new Adam(1e-3), l0.getIUpdater()); @@ -206,11 +206,11 @@ public class RegressionTest100b3 extends BaseDL4JTest { int nBoxes = 5; int nClasses = 10; - ConvolutionLayer cl = (ConvolutionLayer)((LayerVertex)net.getConfiguration().getVertices().get("convolution2d_9")).getLayerConf().getLayer(); + ConvolutionLayer cl = (ConvolutionLayer)((LayerVertex)net.getComputationGraphConfiguration().getVertices().get("convolution2d_9")).getNetConfiguration().getFirstLayer(); assertEquals(nBoxes * (5 + nClasses), cl.getNOut()); assertEquals(new ActivationIdentity(), cl.getActivationFn()); assertEquals(ConvolutionMode.Same, cl.getConvolutionMode()); - assertEquals(new WeightInitXavier(), cl.getWeightInitFn()); + assertEquals(new WeightInitXavier(), cl.getWeightInit()); assertArrayEquals(new int[]{1,1}, cl.getKernelSize()); assertArrayEquals(new int[]{1,1}, cl.getKernelSize()); diff --git a/cavis-dnn/cavis-dnn-core/src/test/java/org/deeplearning4j/regressiontest/RegressionTest100b4.java b/cavis-dnn/cavis-dnn-core/src/test/java/org/deeplearning4j/regressiontest/RegressionTest100b4.java index 5b4270a4e..6cdede6bd 100644 --- a/cavis-dnn/cavis-dnn-core/src/test/java/org/deeplearning4j/regressiontest/RegressionTest100b4.java +++ b/cavis-dnn/cavis-dnn-core/src/test/java/org/deeplearning4j/regressiontest/RegressionTest100b4.java @@ -91,12 +91,12 @@ public class RegressionTest100b4 extends BaseDL4JTest { MultiLayerNetwork net = MultiLayerNetwork.load(f, true); // net = net.clone(); - DenseLayer l0 = (DenseLayer) net.getLayer(0).conf().getLayer(); + DenseLayer l0 = (DenseLayer) net.getLayer(0).getLayerConfiguration(); assertEquals(new ActivationTanH(), l0.getActivationFn()); assertEquals(new L2Regularization(0.03), TestUtils.getL2Reg(l0)); assertEquals(new RmsProp(0.95), l0.getIUpdater()); - CustomLayer l1 = (CustomLayer) net.getLayer(1).conf().getLayer(); + CustomLayer l1 = (CustomLayer) net.getLayer(1).getLayerConfiguration(); assertEquals(new ActivationTanH(), l1.getActivationFn()); assertEquals(new ActivationSigmoid(), l1.getSecondActivationFunction()); assertEquals(new RmsProp(0.95), l1.getIUpdater()); @@ -116,7 +116,7 @@ public class RegressionTest100b4 extends BaseDL4JTest { assertEquals(dtype, in.dataType()); assertEquals(dtype, outExp.dataType()); - assertEquals(dtype, net.params().dataType()); + assertEquals(dtype, net.getModelParams().dataType()); assertEquals(dtype, net.getFlattenedGradients().dataType()); assertEquals(dtype, net.getUpdater().getStateViewArray().dataType()); @@ -125,8 +125,8 @@ public class RegressionTest100b4 extends BaseDL4JTest { INDArray outAct = net.output(in); assertEquals(dtype, outAct.dataType()); - assertEquals(dtype, net.getLayerWiseConfigurations().getDataType()); - assertEquals(dtype, net.params().dataType()); + assertEquals(dtype, net.getNetConfiguration().getDataType()); + assertEquals(dtype, net.getModelParams().dataType()); boolean eq = outExp.equalsWithEps(outAct, 0.01); assertTrue(eq, "Test for dtype: " + dtypeName + "\n" + outExp + " vs " + outAct); } @@ -139,30 +139,30 @@ public class RegressionTest100b4 extends BaseDL4JTest { File f = Resources.asFile("regression_testing/100b4/GravesLSTMCharModelingExample_100b4.bin"); MultiLayerNetwork net = MultiLayerNetwork.load(f, true); - LSTM l0 = (LSTM) net.getLayer(0).conf().getLayer(); + LSTM l0 = (LSTM) net.getLayer(0).getLayerConfiguration(); assertEquals(new ActivationTanH(), l0.getActivationFn()); assertEquals(200, l0.getNOut()); - assertEquals(new WeightInitXavier(), l0.getWeightInitFn()); + assertEquals(new WeightInitXavier(), l0.getWeightInit()); assertEquals(new L2Regularization(0.0001), TestUtils.getL2Reg(l0)); assertEquals(new Adam(0.005), l0.getIUpdater()); - LSTM l1 = (LSTM) net.getLayer(1).conf().getLayer(); + LSTM l1 = (LSTM) net.getLayer(1).getLayerConfiguration(); assertEquals(new ActivationTanH(), l1.getActivationFn()); assertEquals(200, l1.getNOut()); - assertEquals(new WeightInitXavier(), l1.getWeightInitFn()); + assertEquals(new WeightInitXavier(), l1.getWeightInit()); assertEquals(new L2Regularization(0.0001), TestUtils.getL2Reg(l1)); assertEquals(new Adam(0.005), l1.getIUpdater()); - RnnOutputLayer l2 = (RnnOutputLayer) net.getLayer(2).conf().getLayer(); + RnnOutputLayer l2 = (RnnOutputLayer) net.getLayer(2).getLayerConfiguration(); assertEquals(new ActivationSoftmax(), l2.getActivationFn()); assertEquals(77, l2.getNOut()); - assertEquals(new WeightInitXavier(), l2.getWeightInitFn()); + assertEquals(new WeightInitXavier(), l2.getWeightInit()); assertEquals(new L2Regularization(0.0001), TestUtils.getL2Reg(l2)); assertEquals(new Adam(0.005), l2.getIUpdater()); - assertEquals(BackpropType.TruncatedBPTT, net.getLayerWiseConfigurations().getBackpropType()); - assertEquals(50, net.getLayerWiseConfigurations().getTbpttBackLength()); - assertEquals(50, net.getLayerWiseConfigurations().getTbpttFwdLength()); + assertEquals(BackpropType.TruncatedBPTT, net.getNetConfiguration().getBackpropType()); + assertEquals(50, net.getNetConfiguration().getTbpttBackLength()); + assertEquals(50, net.getNetConfiguration().getTbpttFwdLength()); INDArray outExp; File f2 = Resources.asFile("regression_testing/100b4/GravesLSTMCharModelingExample_Output_100b4.bin"); @@ -187,12 +187,12 @@ public class RegressionTest100b4 extends BaseDL4JTest { File f = Resources.asFile("regression_testing/100b4/VaeMNISTAnomaly_100b4.bin"); MultiLayerNetwork net = MultiLayerNetwork.load(f, true); - VariationalAutoencoder l0 = (VariationalAutoencoder) net.getLayer(0).conf().getLayer(); + VariationalAutoencoder l0 = (VariationalAutoencoder) net.getLayer(0).getLayerConfiguration(); assertEquals(new ActivationLReLU(), l0.getActivationFn()); assertEquals(32, l0.getNOut()); assertArrayEquals(new int[]{256, 256}, l0.getEncoderLayerSizes()); assertArrayEquals(new int[]{256, 256}, l0.getDecoderLayerSizes()); - assertEquals(new WeightInitXavier(), l0.getWeightInitFn()); + assertEquals(new WeightInitXavier(), l0.getWeightInit()); assertEquals(new L2Regularization(0.0001), TestUtils.getL2Reg(l0)); assertEquals(new Adam(1e-3), l0.getIUpdater()); @@ -224,12 +224,12 @@ public class RegressionTest100b4 extends BaseDL4JTest { int nBoxes = 5; int nClasses = 10; - ConvolutionLayer cl = (ConvolutionLayer) ((LayerVertex) net.getConfiguration().getVertices() - .get("convolution2d_9")).getLayerConf().getLayer(); + ConvolutionLayer cl = (ConvolutionLayer) ((LayerVertex) net.getComputationGraphConfiguration().getVertices() + .get("convolution2d_9")).getNetConfiguration().getFirstLayer(); assertEquals(nBoxes * (5 + nClasses), cl.getNOut()); assertEquals(new ActivationIdentity(), cl.getActivationFn()); assertEquals(ConvolutionMode.Same, cl.getConvolutionMode()); - assertEquals(new WeightInitXavier(), cl.getWeightInitFn()); + assertEquals(new WeightInitXavier(), cl.getWeightInit()); assertArrayEquals(new int[]{1, 1}, cl.getKernelSize()); INDArray outExp; @@ -257,10 +257,10 @@ public class RegressionTest100b4 extends BaseDL4JTest { File f = Resources.asFile("regression_testing/100b4/SyntheticCNN_100b4.bin"); MultiLayerNetwork net = MultiLayerNetwork.load(f, true); - ConvolutionLayer l0 = (ConvolutionLayer) net.getLayer(0).conf().getLayer(); + ConvolutionLayer l0 = (ConvolutionLayer) net.getLayer(0).getLayerConfiguration(); assertEquals(new ActivationReLU(), l0.getActivationFn()); assertEquals(4, l0.getNOut()); - assertEquals(new WeightInitXavier(), l0.getWeightInitFn()); + assertEquals(new WeightInitXavier(), l0.getWeightInit()); assertEquals(new L2Regularization(0.0001), TestUtils.getL2Reg(l0)); assertEquals(new Adam(0.005), l0.getIUpdater()); assertArrayEquals(new int[]{3, 3}, l0.getKernelSize()); @@ -268,10 +268,10 @@ public class RegressionTest100b4 extends BaseDL4JTest { assertArrayEquals(new int[]{1, 1}, l0.getDilation()); assertArrayEquals(new int[]{0, 0}, l0.getPadding()); - SeparableConvolution2D l1 = (SeparableConvolution2D) net.getLayer(1).conf().getLayer(); + SeparableConvolution2D l1 = (SeparableConvolution2D) net.getLayer(1).getLayerConfiguration(); assertEquals(new ActivationReLU(), l1.getActivationFn()); assertEquals(8, l1.getNOut()); - assertEquals(new WeightInitXavier(), l1.getWeightInitFn()); + assertEquals(new WeightInitXavier(), l1.getWeightInit()); assertEquals(new L2Regularization(0.0001), TestUtils.getL2Reg(l1)); assertEquals(new Adam(0.005), l1.getIUpdater()); assertArrayEquals(new int[]{3, 3}, l1.getKernelSize()); @@ -281,23 +281,23 @@ public class RegressionTest100b4 extends BaseDL4JTest { assertEquals(ConvolutionMode.Same, l1.getConvolutionMode()); assertEquals(1, l1.getDepthMultiplier()); - SubsamplingLayer l2 = (SubsamplingLayer) net.getLayer(2).conf().getLayer(); + SubsamplingLayer l2 = (SubsamplingLayer) net.getLayer(2).getLayerConfiguration(); assertArrayEquals(new int[]{3, 3}, l2.getKernelSize()); assertArrayEquals(new int[]{2, 2}, l2.getStride()); assertArrayEquals(new int[]{1, 1}, l2.getDilation()); assertArrayEquals(new int[]{0, 0}, l2.getPadding()); assertEquals(PoolingType.MAX, l2.getPoolingType()); - ZeroPaddingLayer l3 = (ZeroPaddingLayer) net.getLayer(3).conf().getLayer(); + ZeroPaddingLayer l3 = (ZeroPaddingLayer) net.getLayer(3).getLayerConfiguration(); assertArrayEquals(new int[]{4, 4, 4, 4}, l3.getPadding()); - Upsampling2D l4 = (Upsampling2D) net.getLayer(4).conf().getLayer(); + Upsampling2D l4 = (Upsampling2D) net.getLayer(4).getLayerConfiguration(); assertArrayEquals(new int[]{3, 3}, l4.getSize()); - DepthwiseConvolution2D l5 = (DepthwiseConvolution2D) net.getLayer(5).conf().getLayer(); + DepthwiseConvolution2D l5 = (DepthwiseConvolution2D) net.getLayer(5).getLayerConfiguration(); assertEquals(new ActivationReLU(), l5.getActivationFn()); assertEquals(16, l5.getNOut()); - assertEquals(new WeightInitXavier(), l5.getWeightInitFn()); + assertEquals(new WeightInitXavier(), l5.getWeightInit()); assertEquals(new L2Regularization(0.0001), TestUtils.getL2Reg(l5)); assertEquals(new Adam(0.005), l5.getIUpdater()); assertArrayEquals(new int[]{3, 3}, l5.getKernelSize()); @@ -306,19 +306,19 @@ public class RegressionTest100b4 extends BaseDL4JTest { assertArrayEquals(new int[]{0, 0}, l5.getPadding()); assertEquals(2, l5.getDepthMultiplier()); - SubsamplingLayer l6 = (SubsamplingLayer) net.getLayer(6).conf().getLayer(); + SubsamplingLayer l6 = (SubsamplingLayer) net.getLayer(6).getLayerConfiguration(); assertArrayEquals(new int[]{2, 2}, l6.getKernelSize()); assertArrayEquals(new int[]{2, 2}, l6.getStride()); assertArrayEquals(new int[]{1, 1}, l6.getDilation()); assertArrayEquals(new int[]{0, 0}, l6.getPadding()); assertEquals(PoolingType.MAX, l6.getPoolingType()); - Cropping2D l7 = (Cropping2D) net.getLayer(7).conf().getLayer(); + Cropping2D l7 = (Cropping2D) net.getLayer(7).getLayerConfiguration(); assertArrayEquals(new int[]{3, 3, 2, 2}, l7.getCropping()); - ConvolutionLayer l8 = (ConvolutionLayer) net.getLayer(8).conf().getLayer(); + ConvolutionLayer l8 = (ConvolutionLayer) net.getLayer(8).getLayerConfiguration(); assertEquals(4, l8.getNOut()); - assertEquals(new WeightInitXavier(), l8.getWeightInitFn()); + assertEquals(new WeightInitXavier(), l8.getWeightInit()); assertEquals(new L2Regularization(0.0001), TestUtils.getL2Reg(l8)); assertEquals(new Adam(0.005), l8.getIUpdater()); assertArrayEquals(new int[]{4, 4}, l8.getKernelSize()); @@ -326,8 +326,8 @@ public class RegressionTest100b4 extends BaseDL4JTest { assertArrayEquals(new int[]{1, 1}, l8.getDilation()); assertArrayEquals(new int[]{0, 0}, l8.getPadding()); - CnnLossLayer l9 = (CnnLossLayer) net.getLayer(9).conf().getLayer(); - assertEquals(new WeightInitXavier(), l9.getWeightInitFn()); + CnnLossLayer l9 = (CnnLossLayer) net.getLayer(9).getLayerConfiguration(); + assertEquals(new WeightInitXavier(), l9.getWeightInit()); assertEquals(new L2Regularization(0.0001), TestUtils.getL2Reg(l9)); assertEquals(new Adam(0.005), l9.getIUpdater()); assertEquals(new LossMAE(), l9.getLossFn()); @@ -361,7 +361,7 @@ public class RegressionTest100b4 extends BaseDL4JTest { File f = Resources.asFile("regression_testing/100b4/SyntheticBidirectionalRNNGraph_100b4.bin"); ComputationGraph net = ComputationGraph.load(f, true); - Bidirectional l0 = (Bidirectional) net.getLayer("rnn1").conf().getLayer(); + Bidirectional l0 = (Bidirectional) net.getLayer("rnn1").getLayerConfiguration(); LSTM l1 = (LSTM) l0.getFwd(); assertEquals(16, l1.getNOut()); @@ -373,7 +373,7 @@ public class RegressionTest100b4 extends BaseDL4JTest { assertEquals(new ActivationReLU(), l2.getActivationFn()); assertEquals(new L2Regularization(0.0001), TestUtils.getL2Reg(l2)); - Bidirectional l3 = (Bidirectional) net.getLayer("rnn2").conf().getLayer(); + Bidirectional l3 = (Bidirectional) net.getLayer("rnn2").getLayerConfiguration(); SimpleRnn l4 = (SimpleRnn) l3.getFwd(); assertEquals(16, l4.getNOut()); @@ -387,12 +387,12 @@ public class RegressionTest100b4 extends BaseDL4JTest { MergeVertex mv = (MergeVertex) net.getVertex("concat"); - GlobalPoolingLayer gpl = (GlobalPoolingLayer) net.getLayer("pooling").conf().getLayer(); + GlobalPoolingLayer gpl = (GlobalPoolingLayer) net.getLayer("pooling").getLayerConfiguration(); assertEquals(PoolingType.MAX, gpl.getPoolingType()); assertArrayEquals(new int[]{2}, gpl.getPoolingDimensions()); assertTrue(gpl.isCollapseDimensions()); - OutputLayer outl = (OutputLayer) net.getLayer("out").conf().getLayer(); + OutputLayer outl = (OutputLayer) net.getLayer("out").getLayerConfiguration(); assertEquals(3, outl.getNOut()); assertEquals(new LossMCXENT(), outl.getLossFn()); diff --git a/cavis-dnn/cavis-dnn-core/src/test/java/org/deeplearning4j/regressiontest/RegressionTest100b6.java b/cavis-dnn/cavis-dnn-core/src/test/java/org/deeplearning4j/regressiontest/RegressionTest100b6.java index 40df45924..c0ee3dca2 100644 --- a/cavis-dnn/cavis-dnn-core/src/test/java/org/deeplearning4j/regressiontest/RegressionTest100b6.java +++ b/cavis-dnn/cavis-dnn-core/src/test/java/org/deeplearning4j/regressiontest/RegressionTest100b6.java @@ -73,12 +73,12 @@ public class RegressionTest100b6 extends BaseDL4JTest { MultiLayerNetwork net = MultiLayerNetwork.load(f, true); // net = net.clone(); - DenseLayer l0 = (DenseLayer) net.getLayer(0).conf().getLayer(); + DenseLayer l0 = (DenseLayer) net.getLayer(0).getLayerConfiguration(); assertEquals(new ActivationTanH(), l0.getActivationFn()); assertEquals(new L2Regularization(0.03), TestUtils.getL2Reg(l0)); assertEquals(new RmsProp(0.95), l0.getIUpdater()); - CustomLayer l1 = (CustomLayer) net.getLayer(1).conf().getLayer(); + CustomLayer l1 = (CustomLayer) net.getLayer(1).getLayerConfiguration(); assertEquals(new ActivationTanH(), l1.getActivationFn()); assertEquals(new ActivationSigmoid(), l1.getSecondActivationFunction()); assertEquals(new RmsProp(0.95), l1.getIUpdater()); @@ -98,7 +98,7 @@ public class RegressionTest100b6 extends BaseDL4JTest { assertEquals(dtype, in.dataType()); assertEquals(dtype, outExp.dataType()); - assertEquals(dtype, net.params().dataType()); + assertEquals(dtype, net.getModelParams().dataType()); assertEquals(dtype, net.getFlattenedGradients().dataType()); assertEquals(dtype, net.getUpdater().getStateViewArray().dataType()); @@ -107,8 +107,8 @@ public class RegressionTest100b6 extends BaseDL4JTest { INDArray outAct = net.output(in); assertEquals(dtype, outAct.dataType()); - assertEquals(dtype, net.getLayerWiseConfigurations().getDataType()); - assertEquals(dtype, net.params().dataType()); + assertEquals(dtype, net.getNetConfiguration().getDataType()); + assertEquals(dtype, net.getModelParams().dataType()); boolean eq = outExp.equalsWithEps(outAct, 0.01); assertTrue( eq, "Test for dtype: " + dtypeName + " - " + outExp + " vs " + outAct); } @@ -121,30 +121,30 @@ public class RegressionTest100b6 extends BaseDL4JTest { File f = Resources.asFile("regression_testing/100b6/GravesLSTMCharModelingExample_100b6.bin"); MultiLayerNetwork net = MultiLayerNetwork.load(f, true); - LSTM l0 = (LSTM) net.getLayer(0).conf().getLayer(); + LSTM l0 = (LSTM) net.getLayer(0).getLayerConfiguration(); assertEquals(new ActivationTanH(), l0.getActivationFn()); assertEquals(200, l0.getNOut()); - assertEquals(new WeightInitXavier(), l0.getWeightInitFn()); + assertEquals(new WeightInitXavier(), l0.getWeightInit()); assertEquals(new L2Regularization(0.0001), TestUtils.getL2Reg(l0)); assertEquals(new Adam(0.005), l0.getIUpdater()); - LSTM l1 = (LSTM) net.getLayer(1).conf().getLayer(); + LSTM l1 = (LSTM) net.getLayer(1).getLayerConfiguration(); assertEquals(new ActivationTanH(), l1.getActivationFn()); assertEquals(200, l1.getNOut()); - assertEquals(new WeightInitXavier(), l1.getWeightInitFn()); + assertEquals(new WeightInitXavier(), l1.getWeightInit()); assertEquals(new L2Regularization(0.0001), TestUtils.getL2Reg(l1)); assertEquals(new Adam(0.005), l1.getIUpdater()); - RnnOutputLayer l2 = (RnnOutputLayer) net.getLayer(2).conf().getLayer(); + RnnOutputLayer l2 = (RnnOutputLayer) net.getLayer(2).getLayerConfiguration(); assertEquals(new ActivationSoftmax(), l2.getActivationFn()); assertEquals(77, l2.getNOut()); - assertEquals(new WeightInitXavier(), l2.getWeightInitFn()); + assertEquals(new WeightInitXavier(), l2.getWeightInit()); assertEquals(new L2Regularization(0.0001), TestUtils.getL2Reg(l2)); assertEquals(new Adam(0.005), l2.getIUpdater()); - assertEquals(BackpropType.TruncatedBPTT, net.getLayerWiseConfigurations().getBackpropType()); - assertEquals(50, net.getLayerWiseConfigurations().getTbpttBackLength()); - assertEquals(50, net.getLayerWiseConfigurations().getTbpttFwdLength()); + assertEquals(BackpropType.TruncatedBPTT, net.getNetConfiguration().getBackpropType()); + assertEquals(50, net.getNetConfiguration().getTbpttBackLength()); + assertEquals(50, net.getNetConfiguration().getTbpttFwdLength()); INDArray outExp; File f2 = Resources.asFile("regression_testing/100b6/GravesLSTMCharModelingExample_Output_100b6.bin"); @@ -169,12 +169,12 @@ public class RegressionTest100b6 extends BaseDL4JTest { File f = Resources.asFile("regression_testing/100b6/VaeMNISTAnomaly_100b6.bin"); MultiLayerNetwork net = MultiLayerNetwork.load(f, true); - VariationalAutoencoder l0 = (VariationalAutoencoder) net.getLayer(0).conf().getLayer(); + VariationalAutoencoder l0 = (VariationalAutoencoder) net.getLayer(0).getLayerConfiguration(); assertEquals(new ActivationLReLU(), l0.getActivationFn()); assertEquals(32, l0.getNOut()); assertArrayEquals(new int[]{256, 256}, l0.getEncoderLayerSizes()); assertArrayEquals(new int[]{256, 256}, l0.getDecoderLayerSizes()); - assertEquals(new WeightInitXavier(), l0.getWeightInitFn()); + assertEquals(new WeightInitXavier(), l0.getWeightInit()); assertEquals(new L2Regularization(0.0001), TestUtils.getL2Reg(l0)); assertEquals(new Adam(1e-3), l0.getIUpdater()); @@ -205,12 +205,12 @@ public class RegressionTest100b6 extends BaseDL4JTest { int nBoxes = 5; int nClasses = 10; - ConvolutionLayer cl = (ConvolutionLayer) ((LayerVertex) net.getConfiguration().getVertices() - .get("convolution2d_9")).getLayerConf().getLayer(); + ConvolutionLayer cl = (ConvolutionLayer) ((LayerVertex) net.getComputationGraphConfiguration().getVertices() + .get("convolution2d_9")).getNetConfiguration().getFirstLayer(); assertEquals(nBoxes * (5 + nClasses), cl.getNOut()); assertEquals(new ActivationIdentity(), cl.getActivationFn()); assertEquals(ConvolutionMode.Same, cl.getConvolutionMode()); - assertEquals(new WeightInitXavier(), cl.getWeightInitFn()); + assertEquals(new WeightInitXavier(), cl.getWeightInit()); assertArrayEquals(new int[]{1, 1}, cl.getKernelSize()); INDArray outExp; @@ -237,10 +237,10 @@ public class RegressionTest100b6 extends BaseDL4JTest { File f = Resources.asFile("regression_testing/100b6/SyntheticCNN_100b6.bin"); MultiLayerNetwork net = MultiLayerNetwork.load(f, true); - ConvolutionLayer l0 = (ConvolutionLayer) net.getLayer(0).conf().getLayer(); + ConvolutionLayer l0 = (ConvolutionLayer) net.getLayer(0).getLayerConfiguration(); assertEquals(new ActivationReLU(), l0.getActivationFn()); assertEquals(4, l0.getNOut()); - assertEquals(new WeightInitXavier(), l0.getWeightInitFn()); + assertEquals(new WeightInitXavier(), l0.getWeightInit()); assertEquals(new L2Regularization(0.0001), TestUtils.getL2Reg(l0)); assertEquals(new Adam(0.005), l0.getIUpdater()); assertArrayEquals(new int[]{3, 3}, l0.getKernelSize()); @@ -248,10 +248,10 @@ public class RegressionTest100b6 extends BaseDL4JTest { assertArrayEquals(new int[]{1, 1}, l0.getDilation()); assertArrayEquals(new int[]{0, 0}, l0.getPadding()); - SeparableConvolution2D l1 = (SeparableConvolution2D) net.getLayer(1).conf().getLayer(); + SeparableConvolution2D l1 = (SeparableConvolution2D) net.getLayer(1).getLayerConfiguration(); assertEquals(new ActivationReLU(), l1.getActivationFn()); assertEquals(8, l1.getNOut()); - assertEquals(new WeightInitXavier(), l1.getWeightInitFn()); + assertEquals(new WeightInitXavier(), l1.getWeightInit()); assertEquals(new L2Regularization(0.0001), TestUtils.getL2Reg(l1)); assertEquals(new Adam(0.005), l1.getIUpdater()); assertArrayEquals(new int[]{3, 3}, l1.getKernelSize()); @@ -261,23 +261,23 @@ public class RegressionTest100b6 extends BaseDL4JTest { assertEquals(ConvolutionMode.Same, l1.getConvolutionMode()); assertEquals(1, l1.getDepthMultiplier()); - SubsamplingLayer l2 = (SubsamplingLayer) net.getLayer(2).conf().getLayer(); + SubsamplingLayer l2 = (SubsamplingLayer) net.getLayer(2).getLayerConfiguration(); assertArrayEquals(new int[]{3, 3}, l2.getKernelSize()); assertArrayEquals(new int[]{2, 2}, l2.getStride()); assertArrayEquals(new int[]{1, 1}, l2.getDilation()); assertArrayEquals(new int[]{0, 0}, l2.getPadding()); assertEquals(PoolingType.MAX, l2.getPoolingType()); - ZeroPaddingLayer l3 = (ZeroPaddingLayer) net.getLayer(3).conf().getLayer(); + ZeroPaddingLayer l3 = (ZeroPaddingLayer) net.getLayer(3).getLayerConfiguration(); assertArrayEquals(new int[]{4, 4, 4, 4}, l3.getPadding()); - Upsampling2D l4 = (Upsampling2D) net.getLayer(4).conf().getLayer(); + Upsampling2D l4 = (Upsampling2D) net.getLayer(4).getLayerConfiguration(); assertArrayEquals(new int[]{3, 3}, l4.getSize()); - DepthwiseConvolution2D l5 = (DepthwiseConvolution2D) net.getLayer(5).conf().getLayer(); + DepthwiseConvolution2D l5 = (DepthwiseConvolution2D) net.getLayer(5).getLayerConfiguration(); assertEquals(new ActivationReLU(), l5.getActivationFn()); assertEquals(16, l5.getNOut()); - assertEquals(new WeightInitXavier(), l5.getWeightInitFn()); + assertEquals(new WeightInitXavier(), l5.getWeightInit()); assertEquals(new L2Regularization(0.0001), TestUtils.getL2Reg(l5)); assertEquals(new Adam(0.005), l5.getIUpdater()); assertArrayEquals(new int[]{3, 3}, l5.getKernelSize()); @@ -286,19 +286,19 @@ public class RegressionTest100b6 extends BaseDL4JTest { assertArrayEquals(new int[]{0, 0}, l5.getPadding()); assertEquals(2, l5.getDepthMultiplier()); - SubsamplingLayer l6 = (SubsamplingLayer) net.getLayer(6).conf().getLayer(); + SubsamplingLayer l6 = (SubsamplingLayer) net.getLayer(6).getLayerConfiguration(); assertArrayEquals(new int[]{2, 2}, l6.getKernelSize()); assertArrayEquals(new int[]{2, 2}, l6.getStride()); assertArrayEquals(new int[]{1, 1}, l6.getDilation()); assertArrayEquals(new int[]{0, 0}, l6.getPadding()); assertEquals(PoolingType.MAX, l6.getPoolingType()); - Cropping2D l7 = (Cropping2D) net.getLayer(7).conf().getLayer(); + Cropping2D l7 = (Cropping2D) net.getLayer(7).getLayerConfiguration(); assertArrayEquals(new int[]{3, 3, 2, 2}, l7.getCropping()); - ConvolutionLayer l8 = (ConvolutionLayer) net.getLayer(8).conf().getLayer(); + ConvolutionLayer l8 = (ConvolutionLayer) net.getLayer(8).getLayerConfiguration(); assertEquals(4, l8.getNOut()); - assertEquals(new WeightInitXavier(), l8.getWeightInitFn()); + assertEquals(new WeightInitXavier(), l8.getWeightInit()); assertEquals(new L2Regularization(0.0001), TestUtils.getL2Reg(l8)); assertEquals(new Adam(0.005), l8.getIUpdater()); assertArrayEquals(new int[]{4, 4}, l8.getKernelSize()); @@ -306,8 +306,8 @@ public class RegressionTest100b6 extends BaseDL4JTest { assertArrayEquals(new int[]{1, 1}, l8.getDilation()); assertArrayEquals(new int[]{0, 0}, l8.getPadding()); - CnnLossLayer l9 = (CnnLossLayer) net.getLayer(9).conf().getLayer(); - assertEquals(new WeightInitXavier(), l9.getWeightInitFn()); + CnnLossLayer l9 = (CnnLossLayer) net.getLayer(9).getLayerConfiguration(); + assertEquals(new WeightInitXavier(), l9.getWeightInit()); assertEquals(new L2Regularization(0.0001), TestUtils.getL2Reg(l9)); assertEquals(new Adam(0.005), l9.getIUpdater()); assertEquals(new LossMAE(), l9.getLossFn()); @@ -341,7 +341,7 @@ public class RegressionTest100b6 extends BaseDL4JTest { File f = Resources.asFile("regression_testing/100b6/SyntheticBidirectionalRNNGraph_100b6.bin"); ComputationGraph net = ComputationGraph.load(f, true); - Bidirectional l0 = (Bidirectional) net.getLayer("rnn1").conf().getLayer(); + Bidirectional l0 = (Bidirectional) net.getLayer("rnn1").getLayerConfiguration(); LSTM l1 = (LSTM) l0.getFwd(); assertEquals(16, l1.getNOut()); @@ -353,7 +353,7 @@ public class RegressionTest100b6 extends BaseDL4JTest { assertEquals(new ActivationReLU(), l2.getActivationFn()); assertEquals(new L2Regularization(0.0001), TestUtils.getL2Reg(l2)); - Bidirectional l3 = (Bidirectional) net.getLayer("rnn2").conf().getLayer(); + Bidirectional l3 = (Bidirectional) net.getLayer("rnn2").getLayerConfiguration(); SimpleRnn l4 = (SimpleRnn) l3.getFwd(); assertEquals(16, l4.getNOut()); @@ -367,12 +367,12 @@ public class RegressionTest100b6 extends BaseDL4JTest { MergeVertex mv = (MergeVertex) net.getVertex("concat"); - GlobalPoolingLayer gpl = (GlobalPoolingLayer) net.getLayer("pooling").conf().getLayer(); + GlobalPoolingLayer gpl = (GlobalPoolingLayer) net.getLayer("pooling").getLayerConfiguration(); assertEquals(PoolingType.MAX, gpl.getPoolingType()); assertArrayEquals(new int[]{2}, gpl.getPoolingDimensions()); assertTrue(gpl.isCollapseDimensions()); - OutputLayer outl = (OutputLayer) net.getLayer("out").conf().getLayer(); + OutputLayer outl = (OutputLayer) net.getLayer("out").getLayerConfiguration(); assertEquals(3, outl.getNOut()); assertEquals(new LossMCXENT(), outl.getLossFn()); diff --git a/cavis-dnn/cavis-dnn-core/src/test/java/org/deeplearning4j/regressiontest/customlayer100a/CustomLayer.java b/cavis-dnn/cavis-dnn-core/src/test/java/org/deeplearning4j/regressiontest/customlayer100a/CustomLayer.java index 00a2b6242..b20ad6f00 100644 --- a/cavis-dnn/cavis-dnn-core/src/test/java/org/deeplearning4j/regressiontest/customlayer100a/CustomLayer.java +++ b/cavis-dnn/cavis-dnn-core/src/test/java/org/deeplearning4j/regressiontest/customlayer100a/CustomLayer.java @@ -28,6 +28,7 @@ import org.deeplearning4j.nn.api.ParamInitializer; import org.deeplearning4j.nn.conf.NeuralNetConfiguration; import org.deeplearning4j.nn.conf.inputs.InputType; import org.deeplearning4j.nn.conf.layers.FeedForwardLayer; +import org.deeplearning4j.nn.conf.layers.LayerConfiguration; import org.deeplearning4j.nn.conf.memory.LayerMemoryReport; import org.deeplearning4j.nn.conf.memory.MemoryReport; import org.deeplearning4j.nn.params.DefaultParamInitializer; @@ -68,12 +69,14 @@ public class CustomLayer extends FeedForwardLayer { @Override public Layer instantiate(NeuralNetConfiguration conf, Collection iterationListeners, int layerIndex, INDArray layerParamsView, boolean initializeParams, DataType networkDataType) { + + LayerConfiguration lconf = conf.getFlattenedLayerConfigurations().get(0); //The instantiate method is how we go from the configuration class (i.e., this class) to the implementation class // (i.e., a CustomLayerImpl instance) //For the most part, it's the same for each type of layer - CustomLayerImpl myCustomLayer = new CustomLayerImpl(conf, networkDataType); - myCustomLayer.setListeners(iterationListeners); //Set the iteration listeners, if any + CustomLayerImpl myCustomLayer = new CustomLayerImpl(lconf, networkDataType); + myCustomLayer.addTrainingListeners(iterationListeners); //Set the iteration listeners, if any myCustomLayer.setIndex(layerIndex); //Integer index of the layer //Parameter view array: In Deeplearning4j, the network parameters for the entire network (all layers) are @@ -85,16 +88,16 @@ public class CustomLayer extends FeedForwardLayer { //Initialize the layer parameters. For example, // Note that the entries in paramTable (2 entries here: a weight array of shape [nIn,nOut] and biases of shape [1,nOut] // are in turn a view of the 'layerParamsView' array. - Map paramTable = initializer().init(conf, layerParamsView, initializeParams); + Map paramTable = initializer().init(this, layerParamsView, initializeParams); myCustomLayer.setParamTable(paramTable); - myCustomLayer.setConf(conf); + myCustomLayer.setLayerConfiguration(lconf); return myCustomLayer; } @Override public ParamInitializer initializer() { //This method returns the parameter initializer for this type of layer - //In this case, we can use the DefaultParamInitializer, which is the same one used for DenseLayer + //In this case, we can use the DefaultParamInitializer, which is the same one used for DenseLayerConfiguration //For more complex layers, you may need to implement a custom parameter initializer //See the various parameter initializers here: //https://github.com/deeplearning4j/deeplearning4j/tree/master/deeplearning4j-core/src/main/java/org/deeplearning4j/nn/params @@ -108,7 +111,7 @@ public class CustomLayer extends FeedForwardLayer { //If you don't need this functionality for your custom layer, you can return a LayerMemoryReport // with all 0s, or - //This implementation: based on DenseLayer implementation + //This implementation: based on DenseLayerConfiguration implementation InputType outputType = getOutputType(-1, inputType); val numParams = initializer().numParams(this); @@ -131,7 +134,7 @@ public class CustomLayer extends FeedForwardLayer { .workingMemory(0, 0, trainSizeFixed, trainSizeVariable) //No additional memory (beyond activations) for inference .cacheMemory(MemoryReport.CACHE_MODE_ALL_ZEROS, - MemoryReport.CACHE_MODE_ALL_ZEROS) //No caching in DenseLayer + MemoryReport.CACHE_MODE_ALL_ZEROS) //No caching in DenseLayerConfiguration .build(); } diff --git a/cavis-dnn/cavis-dnn-core/src/test/java/org/deeplearning4j/regressiontest/customlayer100a/CustomLayerImpl.java b/cavis-dnn/cavis-dnn-core/src/test/java/org/deeplearning4j/regressiontest/customlayer100a/CustomLayerImpl.java index 18c0ab8e0..14e13634b 100644 --- a/cavis-dnn/cavis-dnn-core/src/test/java/org/deeplearning4j/regressiontest/customlayer100a/CustomLayerImpl.java +++ b/cavis-dnn/cavis-dnn-core/src/test/java/org/deeplearning4j/regressiontest/customlayer100a/CustomLayerImpl.java @@ -20,7 +20,7 @@ package org.deeplearning4j.regressiontest.customlayer100a; -import org.deeplearning4j.nn.conf.NeuralNetConfiguration; +import org.deeplearning4j.nn.conf.layers.LayerConfiguration; import org.deeplearning4j.nn.gradient.DefaultGradient; import org.deeplearning4j.nn.gradient.Gradient; import org.deeplearning4j.nn.layers.BaseLayer; @@ -35,7 +35,7 @@ import org.nd4j.common.primitives.Pair; public class CustomLayerImpl extends BaseLayer { //Generic parameter here: the configuration class type - public CustomLayerImpl(NeuralNetConfiguration conf, DataType dataType) { + public CustomLayerImpl(LayerConfiguration conf, DataType dataType) { super(conf, dataType); } @@ -55,8 +55,8 @@ public class CustomLayerImpl extends BaseLayer { //Generic paramete INDArray firstHalf = output.get(NDArrayIndex.all(), NDArrayIndex.interval(0, columns / 2)); INDArray secondHalf = output.get(NDArrayIndex.all(), NDArrayIndex.interval(columns / 2, columns)); - IActivation activation1 = layerConf().getActivationFn(); - IActivation activation2 = ((CustomLayer) conf.getLayer()).getSecondActivationFunction(); + IActivation activation1 = getTypedLayerConfiguration().getActivationFn(); + IActivation activation2 = ((CustomLayer) getLayerConfiguration()).getSecondActivationFunction(); //IActivation function instances modify the activation functions in-place activation1.getActivation(firstHalf, training); @@ -74,7 +74,7 @@ public class CustomLayerImpl extends BaseLayer { //Generic paramete @Override public Pair backpropGradient(INDArray epsilon, LayerWorkspaceMgr workspaceMgr) { /* - The baockprop gradient method here is very similar to the BaseLayer backprop gradient implementation + The baockprop gradient method here is very similar to the BaseLayerConfiguration backprop gradient implementation The only major difference is the two activation functions we have added in this example. Note that epsilon is dL/da - i.e., the derivative of the loss function with respect to the activations. @@ -104,14 +104,14 @@ public class CustomLayerImpl extends BaseLayer { //Generic paramete INDArray epsilonFirstHalf = epsilon.get(NDArrayIndex.all(), NDArrayIndex.interval(0, columns / 2)); INDArray epsilonSecondHalf = epsilon.get(NDArrayIndex.all(), NDArrayIndex.interval(columns / 2, columns)); - IActivation activation1 = layerConf().getActivationFn(); - IActivation activation2 = ((CustomLayer) conf.getLayer()).getSecondActivationFunction(); + IActivation activation1 = getTypedLayerConfiguration().getActivationFn(); + IActivation activation2 = ((CustomLayer) getLayerConfiguration()).getSecondActivationFunction(); //IActivation backprop method modifies the 'firstHalf' and 'secondHalf' arrays in-place, to contain dL/dz activation1.backprop(firstHalf, epsilonFirstHalf); activation2.backprop(secondHalf, epsilonSecondHalf); - //The remaining code for this method: just copy & pasted from BaseLayer.backpropGradient + //The remaining code for this method: just copy & pasted from BaseLayerConfiguration.backpropGradient // INDArray delta = epsilon.muli(activationDerivative); if (maskArray != null) { activationDerivative.muliColumnVector(maskArray); @@ -127,7 +127,7 @@ public class CustomLayerImpl extends BaseLayer { //Generic paramete ret.gradientForVariable().put(DefaultParamInitializer.WEIGHT_KEY, weightGrad); ret.gradientForVariable().put(DefaultParamInitializer.BIAS_KEY, biasGrad); - INDArray epsilonNext = params.get(DefaultParamInitializer.WEIGHT_KEY).mmul(activationDerivative.transpose()).transpose(); + INDArray epsilonNext = getParamTable().get(DefaultParamInitializer.WEIGHT_KEY).mmul(activationDerivative.transpose()).transpose(); return new Pair<>(ret, epsilonNext); } diff --git a/cavis-dnn/cavis-dnn-core/src/test/java/org/deeplearning4j/samediff/CompareTrainingImplementations.java b/cavis-dnn/cavis-dnn-core/src/test/java/org/deeplearning4j/samediff/CompareTrainingImplementations.java index 73610f45e..b4edb0ba8 100644 --- a/cavis-dnn/cavis-dnn-core/src/test/java/org/deeplearning4j/samediff/CompareTrainingImplementations.java +++ b/cavis-dnn/cavis-dnn-core/src/test/java/org/deeplearning4j/samediff/CompareTrainingImplementations.java @@ -23,7 +23,6 @@ package org.deeplearning4j.samediff; import lombok.extern.slf4j.Slf4j; import org.deeplearning4j.BaseDL4JTest; import org.deeplearning4j.datasets.iterator.impl.IrisDataSetIterator; -import org.deeplearning4j.nn.conf.MultiLayerConfiguration; import org.deeplearning4j.nn.conf.NeuralNetConfiguration; import org.deeplearning4j.nn.conf.layers.DenseLayer; import org.deeplearning4j.nn.conf.layers.OutputLayer; @@ -151,7 +150,7 @@ public class CompareTrainingImplementations extends BaseDL4JTest { //Create equivalent DL4J net - MultiLayerConfiguration mlc = new NeuralNetConfiguration.Builder() + NeuralNetConfiguration mlc = NeuralNetConfiguration.builder() .dataType(DataType.DOUBLE) .weightInit(WeightInit.XAVIER).seed(12345) .l1(l1Val).l2(l2Val) @@ -165,7 +164,7 @@ public class CompareTrainingImplementations extends BaseDL4JTest { MultiLayerNetwork net = new MultiLayerNetwork(mlc); net.init(); - Map oldParams = net.paramTable(); + Map oldParams = net.getParamTable(); //Assign parameters so we have identical models at the start: w0.getArr().assign(net.getParam("0_W")); @@ -191,7 +190,7 @@ public class CompareTrainingImplementations extends BaseDL4JTest { //Check score - double scoreDl4j = net.score(); + double scoreDl4j = net.getScore(); double scoreSd = map.get(lossMse.name()).getDouble(0) + sd.calcRegularizationScore(); assertEquals(scoreDl4j, scoreSd, 1e-6, testName); @@ -215,7 +214,7 @@ public class CompareTrainingImplementations extends BaseDL4JTest { //Check training with updater - mlc = new NeuralNetConfiguration.Builder() + mlc = NeuralNetConfiguration.builder() .dataType(DataType.DOUBLE) .weightInit(WeightInit.XAVIER).seed(12345) .l1(l1Val).l2(l2Val) diff --git a/cavis-dnn/cavis-dnn-core/src/test/java/org/deeplearning4j/util/CrashReportingUtilTest.java b/cavis-dnn/cavis-dnn-core/src/test/java/org/deeplearning4j/util/CrashReportingUtilTest.java index 4da9883b8..e0eeef88d 100644 --- a/cavis-dnn/cavis-dnn-core/src/test/java/org/deeplearning4j/util/CrashReportingUtilTest.java +++ b/cavis-dnn/cavis-dnn-core/src/test/java/org/deeplearning4j/util/CrashReportingUtilTest.java @@ -24,7 +24,6 @@ import org.apache.commons.io.FileUtils; import org.deeplearning4j.BaseDL4JTest; import org.deeplearning4j.datasets.iterator.EarlyTerminationDataSetIterator; import org.deeplearning4j.datasets.iterator.impl.MnistDataSetIterator; -import org.deeplearning4j.nn.conf.MultiLayerConfiguration; import org.deeplearning4j.nn.conf.NeuralNetConfiguration; import org.deeplearning4j.nn.conf.distribution.NormalDistribution; import org.deeplearning4j.nn.conf.inputs.InputType; @@ -80,8 +79,8 @@ public class CrashReportingUtilTest extends BaseDL4JTest { int width = 28; - MultiLayerConfiguration conf = - new NeuralNetConfiguration.Builder().updater(new NoOp()) + NeuralNetConfiguration conf = + NeuralNetConfiguration.builder().updater(new NoOp()) .dist(new NormalDistribution(0, 1)) .list().layer(0, @@ -99,13 +98,13 @@ public class CrashReportingUtilTest extends BaseDL4JTest { .layer(2, new OutputLayer.Builder(LossFunctions.LossFunction.MCXENT) .activation(Activation.SOFTMAX) .nOut(10).build()) - .setInputType(InputType.convolutionalFlat(height, width, + .inputType(InputType.convolutionalFlat(height, width, inputDepth)) .build(); MultiLayerNetwork net = new MultiLayerNetwork(conf); net.init(); - net.addListeners(new ScoreIterationListener(1)); + net.addTrainingListeners(new ScoreIterationListener(1)); //Test net that hasn't been trained yet Exception e = new Exception(); @@ -117,7 +116,7 @@ public class CrashReportingUtilTest extends BaseDL4JTest { String str = FileUtils.readFileToString(list[0]); // System.out.println(str); assertTrue(str.contains("Network Information")); - assertTrue(str.contains("Layer Helpers")); + assertTrue(str.contains("ILayer Helpers")); assertTrue(str.contains("JavaCPP")); assertTrue(str.contains("ScoreIterationListener")); @@ -134,7 +133,7 @@ public class CrashReportingUtilTest extends BaseDL4JTest { assertEquals(1, list.length); str = FileUtils.readFileToString(list[0]); assertTrue(str.contains("Network Information")); - assertTrue(str.contains("Layer Helpers")); + assertTrue(str.contains("ILayer Helpers")); assertTrue(str.contains("JavaCPP")); assertTrue(str.contains("ScoreIterationListener(1)")); @@ -150,7 +149,7 @@ public class CrashReportingUtilTest extends BaseDL4JTest { // System.out.println("///////////////////////////////////////////////////////////"); assertTrue(mlnMemoryInfo.contains("Network Information")); - assertTrue(mlnMemoryInfo.contains("Layer Helpers")); + assertTrue(mlnMemoryInfo.contains("ILayer Helpers")); assertTrue(mlnMemoryInfo.contains("JavaCPP")); assertTrue(mlnMemoryInfo.contains("ScoreIterationListener(1)")); @@ -162,7 +161,7 @@ public class CrashReportingUtilTest extends BaseDL4JTest { CrashReportingUtil.crashDumpOutputDirectory(dir); ComputationGraph cg = net.toComputationGraph(); - cg.setListeners(new ScoreIterationListener(1)); + cg.addTrainingListeners(new ScoreIterationListener(1)); //Test net that hasn't been trained yet CrashReportingUtil.writeMemoryCrashDump(cg, e); @@ -172,7 +171,7 @@ public class CrashReportingUtilTest extends BaseDL4JTest { assertEquals(1, list.length); str = FileUtils.readFileToString(list[0]); assertTrue(str.contains("Network Information")); - assertTrue(str.contains("Layer Helpers")); + assertTrue(str.contains("ILayer Helpers")); assertTrue(str.contains("JavaCPP")); assertTrue(str.contains("ScoreIterationListener(1)")); @@ -187,7 +186,7 @@ public class CrashReportingUtilTest extends BaseDL4JTest { assertEquals(1, list.length); str = FileUtils.readFileToString(list[0]); assertTrue(str.contains("Network Information")); - assertTrue(str.contains("Layer Helpers")); + assertTrue(str.contains("ILayer Helpers")); assertTrue(str.contains("JavaCPP")); assertTrue(str.contains("ScoreIterationListener(1)")); @@ -203,7 +202,7 @@ public class CrashReportingUtilTest extends BaseDL4JTest { // System.out.println("///////////////////////////////////////////////////////////"); assertTrue(cgMemoryInfo.contains("Network Information")); - assertTrue(cgMemoryInfo.contains("Layer Helpers")); + assertTrue(cgMemoryInfo.contains("ILayer Helpers")); assertTrue(cgMemoryInfo.contains("JavaCPP")); assertTrue(cgMemoryInfo.contains("ScoreIterationListener(1)")); diff --git a/cavis-dnn/cavis-dnn-core/src/test/java/org/deeplearning4j/util/ModelGuesserTest.java b/cavis-dnn/cavis-dnn-core/src/test/java/org/deeplearning4j/util/ModelGuesserTest.java index 2ff1c481d..e941c75ee 100644 --- a/cavis-dnn/cavis-dnn-core/src/test/java/org/deeplearning4j/util/ModelGuesserTest.java +++ b/cavis-dnn/cavis-dnn-core/src/test/java/org/deeplearning4j/util/ModelGuesserTest.java @@ -20,12 +20,11 @@ package org.deeplearning4j.util; +import net.brutex.ai.dnn.api.IModel; import org.apache.commons.compress.utils.IOUtils; import org.deeplearning4j.BaseDL4JTest; import org.deeplearning4j.core.util.ModelGuesser; -import org.deeplearning4j.nn.api.Model; import org.deeplearning4j.nn.conf.ComputationGraphConfiguration; -import org.deeplearning4j.nn.conf.MultiLayerConfiguration; import org.deeplearning4j.nn.conf.NeuralNetConfiguration; import org.deeplearning4j.nn.conf.layers.DenseLayer; import org.deeplearning4j.nn.conf.layers.OutputLayer; @@ -60,11 +59,11 @@ public class ModelGuesserTest extends BaseDL4JTest { public void testModelGuessFile() throws Exception { File f = Resources.asFile("modelimport/keras/examples/mnist_mlp/mnist_mlp_tf_keras_1_model.h5"); assertTrue(f.exists()); - Model guess1 = ModelGuesser.loadModelGuess(f.getAbsolutePath()); + IModel guess1 = ModelGuesser.loadModelGuess(f.getAbsolutePath()); Assertions.assertNotNull(guess1); f = Resources.asFile("modelimport/keras/examples/mnist_cnn/mnist_cnn_tf_keras_1_model.h5"); assertTrue(f.exists()); - Model guess2 = ModelGuesser.loadModelGuess(f.getAbsolutePath()); + IModel guess2 = ModelGuesser.loadModelGuess(f.getAbsolutePath()); Assertions.assertNotNull(guess2); } @@ -75,7 +74,7 @@ public class ModelGuesserTest extends BaseDL4JTest { assertTrue(f.exists()); try (InputStream inputStream = new FileInputStream(f)) { - Model guess1 = ModelGuesser.loadModelGuess(inputStream); + IModel guess1 = ModelGuesser.loadModelGuess(inputStream); Assertions.assertNotNull(guess1); } @@ -83,7 +82,7 @@ public class ModelGuesserTest extends BaseDL4JTest { assertTrue(f.exists()); try (InputStream inputStream = new FileInputStream(f)) { - Model guess1 = ModelGuesser.loadModelGuess(inputStream); + IModel guess1 = ModelGuesser.loadModelGuess(inputStream); Assertions.assertNotNull(guess1); } } @@ -101,7 +100,7 @@ public class ModelGuesserTest extends BaseDL4JTest { NormalizerMinMaxScaler normalizer = new NormalizerMinMaxScaler(0, 1); normalizer.fit(new DataSet(Nd4j.rand(2, 2), Nd4j.rand(2, 2))); ModelSerializer.addNormalizerToModel(tempFile, normalizer); - Model model = ModelGuesser.loadModelGuess(tempFile.getAbsolutePath()); + IModel model = ModelGuesser.loadModelGuess(tempFile.getAbsolutePath()); Normalizer normalizer1 = ModelGuesser.loadNormalizer(tempFile.getAbsolutePath()); assertEquals(model, net); assertEquals(normalizer, normalizer1); @@ -119,7 +118,7 @@ public class ModelGuesserTest extends BaseDL4JTest { normalizer.fit(new DataSet(Nd4j.rand(2, 2), Nd4j.rand(2, 2))); ModelSerializer.writeModel(net, tempFile, true,normalizer); - Model model = ModelGuesser.loadModelGuess(tempFile.getAbsolutePath()); + IModel model = ModelGuesser.loadModelGuess(tempFile.getAbsolutePath()); Normalizer normalizer1 = ModelGuesser.loadNormalizer(tempFile.getAbsolutePath()); assertEquals(model, net); assertEquals(normalizer, normalizer1); @@ -137,7 +136,7 @@ public class ModelGuesserTest extends BaseDL4JTest { NormalizerMinMaxScaler normalizer = new NormalizerMinMaxScaler(0, 1); normalizer.fit(new DataSet(Nd4j.rand(2, 2), Nd4j.rand(2, 2))); ModelSerializer.addNormalizerToModel(tempFile, normalizer); - Model model = ModelGuesser.loadModelGuess(tempFile.getAbsolutePath()); + IModel model = ModelGuesser.loadModelGuess(tempFile.getAbsolutePath()); try (InputStream inputStream = new FileInputStream(tempFile)) { Normalizer normalizer1 = ModelGuesser.loadNormalizer(inputStream); assertEquals(model, net); @@ -156,8 +155,8 @@ public class ModelGuesserTest extends BaseDL4JTest { ModelSerializer.writeModel(net, tempFile, true); MultiLayerNetwork network = (MultiLayerNetwork) ModelGuesser.loadModelGuess(tempFile.getAbsolutePath()); - assertEquals(network.getLayerWiseConfigurations().toJson(), net.getLayerWiseConfigurations().toJson()); - assertEquals(net.params(), network.params()); + assertEquals(network.getNetConfiguration().toJson(), net.getNetConfiguration().toJson()); + assertEquals(net.getModelParams(), network.getModelParams()); assertEquals(net.getUpdater().getStateViewArray(), network.getUpdater().getStateViewArray()); } @@ -173,8 +172,8 @@ public class ModelGuesserTest extends BaseDL4JTest { try (InputStream inputStream = new FileInputStream(tempFile)) { MultiLayerNetwork network = (MultiLayerNetwork) ModelGuesser.loadModelGuess(inputStream); Assertions.assertNotNull(network); - assertEquals(network.getLayerWiseConfigurations().toJson(), net.getLayerWiseConfigurations().toJson()); - assertEquals(net.params(), network.params()); + assertEquals(network.getNetConfiguration().toJson(), net.getNetConfiguration().toJson()); + assertEquals(net.getModelParams(), network.getModelParams()); assertEquals(net.getUpdater().getStateViewArray(), network.getUpdater().getStateViewArray()); } } @@ -187,7 +186,7 @@ public class ModelGuesserTest extends BaseDL4JTest { File f = getTempFile(resource); String configFilename = f.getAbsolutePath(); Object conf = ModelGuesser.loadConfigGuess(configFilename); - assertTrue(conf instanceof MultiLayerConfiguration); + assertTrue(conf instanceof NeuralNetConfiguration); ClassPathResource sequenceResource = new ClassPathResource("/keras/simple/mlp_fapi_multiloss_config.json"); File f2 = getTempFile(sequenceResource); @@ -212,7 +211,7 @@ public class ModelGuesserTest extends BaseDL4JTest { try (InputStream inputStream = new FileInputStream(f)) { Object conf = ModelGuesser.loadConfigGuess(inputStream); - assertTrue(conf instanceof MultiLayerConfiguration); + assertTrue(conf instanceof NeuralNetConfiguration); } ClassPathResource sequenceResource = new ClassPathResource("/keras/simple/mlp_fapi_multiloss_config.json"); @@ -249,7 +248,7 @@ public class ModelGuesserTest extends BaseDL4JTest { int nIn = 5; int nOut = 6; - MultiLayerConfiguration conf = new NeuralNetConfiguration.Builder().seed(12345).l1(0.01).l2(0.01) + NeuralNetConfiguration conf = NeuralNetConfiguration.builder().seed(12345).l1(0.01).l2(0.01) .updater(new Sgd(0.1)).activation(Activation.TANH).weightInit(WeightInit.XAVIER).list() .layer(0, new DenseLayer.Builder().nIn(nIn).nOut(20).build()) .layer(1, new DenseLayer.Builder().nIn(20).nOut(30).build()).layer(2, new OutputLayer.Builder() diff --git a/cavis-dnn/cavis-dnn-core/src/test/java/org/deeplearning4j/util/ModelSerializerTest.java b/cavis-dnn/cavis-dnn-core/src/test/java/org/deeplearning4j/util/ModelSerializerTest.java index 610cb0961..495b403d5 100644 --- a/cavis-dnn/cavis-dnn-core/src/test/java/org/deeplearning4j/util/ModelSerializerTest.java +++ b/cavis-dnn/cavis-dnn-core/src/test/java/org/deeplearning4j/util/ModelSerializerTest.java @@ -26,7 +26,6 @@ import org.deeplearning4j.BaseDL4JTest; import org.deeplearning4j.datasets.iterator.impl.IrisDataSetIterator; import org.deeplearning4j.nn.api.OptimizationAlgorithm; import org.deeplearning4j.nn.conf.ComputationGraphConfiguration; -import org.deeplearning4j.nn.conf.MultiLayerConfiguration; import org.deeplearning4j.nn.conf.NeuralNetConfiguration; import org.deeplearning4j.nn.conf.layers.DenseLayer; import org.deeplearning4j.nn.conf.layers.OutputLayer; @@ -65,7 +64,7 @@ public class ModelSerializerTest extends BaseDL4JTest { int nIn = 5; int nOut = 6; - MultiLayerConfiguration conf = new NeuralNetConfiguration.Builder().seed(12345).l1(0.01) + NeuralNetConfiguration conf = NeuralNetConfiguration.builder().seed(12345).l1(0.01) .l2(0.01).updater(new Sgd(0.1)).activation(Activation.TANH).weightInit(WeightInit.XAVIER).list() .layer(0, new DenseLayer.Builder().nIn(nIn).nOut(20).build()) .layer(1, new DenseLayer.Builder().nIn(20).nOut(30).build()).layer(2, new OutputLayer.Builder() @@ -81,8 +80,8 @@ public class ModelSerializerTest extends BaseDL4JTest { MultiLayerNetwork network = ModelSerializer.restoreMultiLayerNetwork(tempFile); - assertEquals(network.getLayerWiseConfigurations().toJson(), net.getLayerWiseConfigurations().toJson()); - assertEquals(net.params(), network.params()); + assertEquals(network.getNetConfiguration().toJson(), net.getNetConfiguration().toJson()); + assertEquals(net.getModelParams(), network.getModelParams()); assertEquals(net.getUpdater().getStateViewArray(), network.getUpdater().getStateViewArray()); } @@ -91,7 +90,7 @@ public class ModelSerializerTest extends BaseDL4JTest { int nIn = 5; int nOut = 6; - MultiLayerConfiguration conf = new NeuralNetConfiguration.Builder().seed(12345).l1(0.01) + NeuralNetConfiguration conf = NeuralNetConfiguration.builder().seed(12345).l1(0.01) .l2(0.01).updater(new Sgd(0.1)).activation(Activation.TANH).weightInit(WeightInit.XAVIER).list() .layer(0, new DenseLayer.Builder().nIn(nIn).nOut(20).build()) .layer(1, new DenseLayer.Builder().nIn(20).nOut(30).build()).layer(2, new OutputLayer.Builder() @@ -125,15 +124,15 @@ public class ModelSerializerTest extends BaseDL4JTest { MultiLayerNetwork network = ModelSerializer.restoreMultiLayerNetwork(fis); - assertEquals(network.getLayerWiseConfigurations().toJson(), net.getLayerWiseConfigurations().toJson()); - assertEquals(net.params(), network.params()); + assertEquals(network.getNetConfiguration().toJson(), net.getNetConfiguration().toJson()); + assertEquals(net.getModelParams(), network.getModelParams()); assertEquals(net.getUpdater().getStateViewArray(), network.getUpdater().getStateViewArray()); } @Test public void testWriteCGModel() throws Exception { - ComputationGraphConfiguration config = new NeuralNetConfiguration.Builder() + ComputationGraphConfiguration config = NeuralNetConfiguration.builder() .optimizationAlgo(OptimizationAlgorithm.STOCHASTIC_GRADIENT_DESCENT).updater(new Sgd(0.1)) .graphBuilder().addInputs("in") .addLayer("dense", new DenseLayer.Builder().nIn(4).nOut(2).build(), "in").addLayer("out", @@ -151,14 +150,14 @@ public class ModelSerializerTest extends BaseDL4JTest { ComputationGraph network = ModelSerializer.restoreComputationGraph(tempFile); - assertEquals(network.getConfiguration().toJson(), cg.getConfiguration().toJson()); - assertEquals(cg.params(), network.params()); + assertEquals(network.getComputationGraphConfiguration().toJson(), cg.getComputationGraphConfiguration().toJson()); + assertEquals(cg.getModelParams(), network.getModelParams()); assertEquals(cg.getUpdater().getStateViewArray(), network.getUpdater().getStateViewArray()); } @Test public void testWriteCGModelInputStream() throws Exception { - ComputationGraphConfiguration config = new NeuralNetConfiguration.Builder() + ComputationGraphConfiguration config = NeuralNetConfiguration.builder() .optimizationAlgo(OptimizationAlgorithm.STOCHASTIC_GRADIENT_DESCENT).updater(new Sgd(0.1)) .graphBuilder().addInputs("in") .addLayer("dense", new DenseLayer.Builder().nIn(4).nOut(2).build(), "in").addLayer("out", @@ -177,8 +176,8 @@ public class ModelSerializerTest extends BaseDL4JTest { ComputationGraph network = ModelSerializer.restoreComputationGraph(fis); - assertEquals(network.getConfiguration().toJson(), cg.getConfiguration().toJson()); - assertEquals(cg.params(), network.params()); + assertEquals(network.getComputationGraphConfiguration().toJson(), cg.getComputationGraphConfiguration().toJson()); + assertEquals(cg.getModelParams(), network.getModelParams()); assertEquals(cg.getUpdater().getStateViewArray(), network.getUpdater().getStateViewArray()); } @@ -189,7 +188,7 @@ public class ModelSerializerTest extends BaseDL4JTest { } private ComputationGraph simpleComputationGraph() { - ComputationGraphConfiguration config = new NeuralNetConfiguration.Builder() + ComputationGraphConfiguration config = NeuralNetConfiguration.builder() .optimizationAlgo(OptimizationAlgorithm.STOCHASTIC_GRADIENT_DESCENT).updater(new Sgd(0.1)) .graphBuilder().addInputs("in") .addLayer("dense", new DenseLayer.Builder().nIn(4).nOut(2).build(), "in").addLayer("out", @@ -253,7 +252,7 @@ public class ModelSerializerTest extends BaseDL4JTest { @Test public void testInvalidLoading1() throws Exception { - ComputationGraphConfiguration config = new NeuralNetConfiguration.Builder() + ComputationGraphConfiguration config = NeuralNetConfiguration.builder() .graphBuilder().addInputs("in") .addLayer("dense", new DenseLayer.Builder().nIn(4).nOut(2).build(), "in") .addLayer("out",new OutputLayer.Builder(LossFunctions.LossFunction.MCXENT) @@ -282,7 +281,7 @@ public class ModelSerializerTest extends BaseDL4JTest { int nIn = 5; int nOut = 6; - MultiLayerConfiguration conf = new NeuralNetConfiguration.Builder().seed(12345).l1(0.01) + NeuralNetConfiguration conf = NeuralNetConfiguration.builder().seed(12345).l1(0.01) .l2(0.01).updater(new Sgd(0.1)).activation(Activation.TANH).weightInit(WeightInit.XAVIER).list() .layer(0, new DenseLayer.Builder().nIn(nIn).nOut(20).build()) .layer(1, new DenseLayer.Builder().nIn(20).nOut(30).build()).layer(2, new OutputLayer.Builder() @@ -310,7 +309,7 @@ public class ModelSerializerTest extends BaseDL4JTest { int nIn = 5; int nOut = 6; - MultiLayerConfiguration conf = new NeuralNetConfiguration.Builder().seed(12345).l1(0.01) + NeuralNetConfiguration conf = NeuralNetConfiguration.builder().seed(12345).l1(0.01) .list() .layer(new OutputLayer.Builder().nIn(nIn).nOut(nOut).activation(Activation.SOFTMAX).build()) .build(); @@ -347,7 +346,7 @@ public class ModelSerializerTest extends BaseDL4JTest { //Also test reading both model and normalizer from stream (correctly) Pair pair = ModelSerializer.restoreMultiLayerNetworkAndNormalizer(new FileInputStream(tempFile), true); - assertEquals(net.params(), pair.getFirst().params()); + assertEquals(net.getModelParams(), pair.getFirst().getModelParams()); assertNotNull(pair.getSecond()); } @@ -357,7 +356,7 @@ public class ModelSerializerTest extends BaseDL4JTest { int nIn = 5; int nOut = 6; - ComputationGraphConfiguration conf = new NeuralNetConfiguration.Builder().seed(12345).l1(0.01) + ComputationGraphConfiguration conf = NeuralNetConfiguration.builder().seed(12345).l1(0.01) .graphBuilder() .addInputs("in") .layer("0", new OutputLayer.Builder().nIn(nIn).nOut(nOut).activation(Activation.SOFTMAX).build(), "in") @@ -396,7 +395,7 @@ public class ModelSerializerTest extends BaseDL4JTest { //Also test reading both model and normalizer from stream (correctly) Pair pair = ModelSerializer.restoreComputationGraphAndNormalizer(new FileInputStream(tempFile), true); - assertEquals(net.params(), pair.getFirst().params()); + assertEquals(net.getModelParams(), pair.getFirst().getModelParams()); assertNotNull(pair.getSecond()); } @@ -406,7 +405,7 @@ public class ModelSerializerTest extends BaseDL4JTest { int nIn = 5; int nOut = 6; - ComputationGraphConfiguration conf = new NeuralNetConfiguration.Builder().seed(12345).l1(0.01) + ComputationGraphConfiguration conf = NeuralNetConfiguration.builder().seed(12345).l1(0.01) .graphBuilder() .addInputs("in") .layer("0", new OutputLayer.Builder().nIn(nIn).nOut(nOut).build(), "in") @@ -433,7 +432,7 @@ public class ModelSerializerTest extends BaseDL4JTest { int nIn = 5; int nOut = 6; - MultiLayerConfiguration conf = new NeuralNetConfiguration.Builder().seed(12345).l1(0.01) + NeuralNetConfiguration conf = NeuralNetConfiguration.builder().seed(12345).l1(0.01) .list() .layer(0, new OutputLayer.Builder().nIn(nIn).nOut(nOut).activation(Activation.SOFTMAX).build()) .build(); @@ -458,7 +457,7 @@ public class ModelSerializerTest extends BaseDL4JTest { int nIn = 5; int nOut = 6; - ComputationGraphConfiguration conf = new NeuralNetConfiguration.Builder().seed(12345).l1(0.01) + ComputationGraphConfiguration conf = NeuralNetConfiguration.builder().seed(12345).l1(0.01) .graphBuilder() .addInputs("in") .layer("0", new OutputLayer.Builder().nIn(nIn).nOut(nOut).activation(Activation.SOFTMAX).build(), "in") @@ -497,6 +496,6 @@ public class ModelSerializerTest extends BaseDL4JTest { assertTrue(entries.contains("otherData.bin")); ComputationGraph restoredNet = ModelSerializer.restoreComputationGraph(tempFile); - assertEquals(net.params(), restoredNet.params()); + assertEquals(net.getModelParams(), restoredNet.getModelParams()); } } diff --git a/cavis-dnn/cavis-dnn-core/src/test/java/org/deeplearning4j/util/ModelValidatorTests.java b/cavis-dnn/cavis-dnn-core/src/test/java/org/deeplearning4j/util/ModelValidatorTests.java index 9d6a27183..eef3472d2 100644 --- a/cavis-dnn/cavis-dnn-core/src/test/java/org/deeplearning4j/util/ModelValidatorTests.java +++ b/cavis-dnn/cavis-dnn-core/src/test/java/org/deeplearning4j/util/ModelValidatorTests.java @@ -23,7 +23,6 @@ package org.deeplearning4j.util; import org.apache.commons.io.FileUtils; import org.apache.commons.io.IOUtils; import org.deeplearning4j.BaseDL4JTest; -import org.deeplearning4j.nn.conf.MultiLayerConfiguration; import org.deeplearning4j.nn.conf.NeuralNetConfiguration; import org.deeplearning4j.nn.conf.layers.DenseLayer; import org.deeplearning4j.nn.conf.layers.OutputLayer; @@ -167,7 +166,7 @@ public class ModelValidatorTests extends BaseDL4JTest { assertFalse(vr6.isValid()); s = vr6.getIssues().get(0); assertEquals(1, vr6.getIssues().size()); - assertTrue(s.contains("JSON") && s.contains("valid") && s.contains("MultiLayerConfiguration"), s); + assertTrue(s.contains("JSON") && s.contains("valid") && s.contains("NeuralNetConfiguration"), s); assertEquals("MultiLayerNetwork", vr6.getFormatType()); assertEquals(MultiLayerNetwork.class, vr6.getFormatClass()); assertNotNull(vr6.getException()); @@ -296,7 +295,7 @@ public class ModelValidatorTests extends BaseDL4JTest { public static MultiLayerNetwork getSimpleNet(){ - MultiLayerConfiguration conf = new NeuralNetConfiguration.Builder() + NeuralNetConfiguration conf = NeuralNetConfiguration.builder() .seed(12345) .updater(new Adam(0.01)) .list() diff --git a/cavis-dnn/cavis-dnn-cudnn/src/main/java/org/deeplearning4j/cuda/recurrent/CudnnLSTMHelper.java b/cavis-dnn/cavis-dnn-cudnn/src/main/java/org/deeplearning4j/cuda/recurrent/CudnnLSTMHelper.java index 120078d07..2b71d920a 100644 --- a/cavis-dnn/cavis-dnn-cudnn/src/main/java/org/deeplearning4j/cuda/recurrent/CudnnLSTMHelper.java +++ b/cavis-dnn/cavis-dnn-cudnn/src/main/java/org/deeplearning4j/cuda/recurrent/CudnnLSTMHelper.java @@ -198,7 +198,7 @@ public class CudnnLSTMHelper extends BaseCudnnHelper implements LSTMHelper { } if (!(activationFn instanceof ActivationTanH)) { supported = false; - log.warn("Not supported: Layer activation functions != ActivationTanH"); + log.warn("Not supported: ILayer activation functions != ActivationTanH"); } if (hasPeepholeConnections) { supported = false; diff --git a/cavis-dnn/cavis-dnn-modelimport/src/main/java/org/deeplearning4j/nn/modelimport/keras/KerasLayer.java b/cavis-dnn/cavis-dnn-modelimport/src/main/java/org/deeplearning4j/nn/modelimport/keras/KerasLayer.java index 5c8c829c4..810dbce85 100644 --- a/cavis-dnn/cavis-dnn-modelimport/src/main/java/org/deeplearning4j/nn/modelimport/keras/KerasLayer.java +++ b/cavis-dnn/cavis-dnn-modelimport/src/main/java/org/deeplearning4j/nn/modelimport/keras/KerasLayer.java @@ -27,13 +27,12 @@ import org.deeplearning4j.nn.conf.InputPreProcessor; import org.deeplearning4j.nn.conf.graph.GraphVertex; import org.deeplearning4j.nn.conf.inputs.InputType; import org.deeplearning4j.nn.conf.layers.FeedForwardLayer; -import org.deeplearning4j.nn.conf.layers.Layer; +import org.deeplearning4j.nn.conf.layers.LayerConfiguration; import org.deeplearning4j.nn.conf.layers.samediff.SameDiffLambdaLayer; import org.deeplearning4j.nn.modelimport.keras.config.KerasLayerConfiguration; import org.deeplearning4j.nn.modelimport.keras.config.KerasLayerConfigurationFactory; import org.deeplearning4j.nn.modelimport.keras.exceptions.InvalidKerasConfigurationException; import org.deeplearning4j.nn.modelimport.keras.exceptions.UnsupportedKerasConfigurationException; -import org.deeplearning4j.nn.modelimport.keras.layers.convolutional.KerasConvolutionUtils; import org.deeplearning4j.nn.modelimport.keras.utils.KerasLayerUtils; import org.deeplearning4j.nn.modelimport.keras.utils.KerasRegularizerUtils; import org.nd4j.common.util.ArrayUtil; @@ -57,7 +56,7 @@ public class KerasLayer { protected DimOrder dimOrder; // Keras layer backend dimension order protected List inboundLayerNames; // List of inbound layers protected List outboundLayerNames; //List of outbound layers - protected Layer layer; // Resulting DL4J layer + protected LayerConfiguration layer; // Resulting DL4J layer protected GraphVertex vertex; // Resulting DL4J vertex protected Map weights; // Weights protected double weightL1Regularization = 0.0; // L1 regularization @@ -295,14 +294,14 @@ public class KerasLayer { } /** - * Copy Keras layer weights to DL4J Layer. + * Copy Keras layer weights to DL4J ILayer. * * @param layer DL4J layer * @throws InvalidKerasConfigurationException Invalid Keras configuration */ public void copyWeightsToLayer(org.deeplearning4j.nn.api.Layer layer) throws InvalidKerasConfigurationException { if (this.getNumParams() > 0) { - String dl4jLayerName = layer.conf().getLayer().getLayerName(); + String dl4jLayerName = layer.getLayerConfiguration().getLayerName(); String kerasLayerName = this.getLayerName(); String msg = "Error when attempting to copy weights from Keras layer " + kerasLayerName + " to DL4J layer " + dl4jLayerName; @@ -310,7 +309,7 @@ public class KerasLayer { if (getWeights() == null) throw new InvalidKerasConfigurationException(msg + "(weights is null)"); - Set paramsInLayer = new HashSet<>(layer.paramTable().keySet()); + Set paramsInLayer = new HashSet<>(layer.getParamTable().keySet()); Set paramsInKerasLayer = new HashSet<>(this.weights.keySet()); /* Check for parameters in layer for which we don't have weights. */ @@ -322,7 +321,7 @@ public class KerasLayer { } /* Check for parameters NOT in layer for which we DO have weights. */ - paramsInKerasLayer.removeAll(layer.paramTable().keySet()); + paramsInKerasLayer.removeAll(layer.getParamTable().keySet()); if (!paramsInKerasLayer.isEmpty()) { String joinedParamsInKerasLayer = StringUtils.join(paramsInKerasLayer, ", "); throw new InvalidKerasConfigurationException( @@ -330,9 +329,9 @@ public class KerasLayer { } /* Copy weights. */ - for (String paramName : layer.paramTable().keySet()) { + for (String paramName : layer.getParamTable().keySet()) { try { - long[] dl4jWeights = layer.paramTable().get(paramName).shape(); + long[] dl4jWeights = layer.getParamTable().get(paramName).shape(); long[] kerasWeights = weights.get(paramName).shape(); INDArray variable = this.weights.get(paramName); if(!Arrays.equals(dl4jWeights,kerasWeights) && @@ -348,7 +347,7 @@ public class KerasLayer { log.error(e.getMessage()); throw new InvalidKerasConfigurationException(e.getMessage() + "\nTried to set weights for layer with name " + this.getLayerName() - + ", of " + layer.conf().getLayer().getClass() + ".\n" + + ", of " + layer.getLayerConfiguration().getClass() + ".\n" + "Failed to set weights for parameter " + paramName + "\n" + "Expected shape for this parameter: " + layer.getParam(paramName).shapeInfoToString() + ", \ngot: " + this.weights.get(paramName).shapeInfoToString()); @@ -358,7 +357,7 @@ public class KerasLayer { } /** - * Whether this Keras layer maps to a DL4J Layer. + * Whether this Keras layer maps to a DL4J ILayer. * * @return true or false */ @@ -367,16 +366,16 @@ public class KerasLayer { } /** - * Gets corresponding DL4J Layer, if any. + * Gets corresponding DL4J ILayer, if any. * - * @return DL4J Layer + * @return DL4J ILayer * @see org.deeplearning4j.nn.api.Layer */ - public Layer getLayer() { + public LayerConfiguration getLayer() { return this.layer; } - public void setLayer(Layer layer){ + public void setLayer(LayerConfiguration layer){ this.layer = layer; } diff --git a/cavis-dnn/cavis-dnn-modelimport/src/main/java/org/deeplearning4j/nn/modelimport/keras/KerasModel.java b/cavis-dnn/cavis-dnn-modelimport/src/main/java/org/deeplearning4j/nn/modelimport/keras/KerasModel.java index d4bf6ba92..4ce518eac 100644 --- a/cavis-dnn/cavis-dnn-modelimport/src/main/java/org/deeplearning4j/nn/modelimport/keras/KerasModel.java +++ b/cavis-dnn/cavis-dnn-modelimport/src/main/java/org/deeplearning4j/nn/modelimport/keras/KerasModel.java @@ -22,11 +22,10 @@ package org.deeplearning4j.nn.modelimport.keras; import lombok.Data; import lombok.extern.slf4j.Slf4j; -import org.apache.commons.collections4.set.ListOrderedSet; import org.deeplearning4j.nn.conf.*; import org.deeplearning4j.nn.conf.graph.PreprocessorVertex; import org.deeplearning4j.nn.conf.inputs.InputType; -import org.deeplearning4j.nn.conf.layers.Layer; +import org.deeplearning4j.nn.conf.layers.LayerConfiguration; import org.deeplearning4j.nn.conf.layers.samediff.SameDiffLambdaLayer; import org.deeplearning4j.nn.graph.ComputationGraph; import org.deeplearning4j.nn.modelimport.keras.config.KerasLayerConfiguration; @@ -44,13 +43,10 @@ import org.deeplearning4j.nn.modelimport.keras.utils.KerasModelBuilder; import org.deeplearning4j.nn.modelimport.keras.utils.KerasModelUtils; import org.deeplearning4j.nn.modelimport.keras.utils.KerasOptimizerUtils; import org.deeplearning4j.util.ConvolutionUtils; -import org.nd4j.autodiff.samediff.internal.DependencyList; -import org.nd4j.autodiff.samediff.internal.DependencyTracker; import org.nd4j.common.primitives.Counter; import org.nd4j.common.primitives.Pair; import org.nd4j.linalg.learning.config.IUpdater; import com.google.common.collect.Lists; -import org.tensorflow.framework.NodeDef; import java.io.IOException; import java.util.*; @@ -444,7 +440,7 @@ public class KerasModel { } KerasInput kerasInput = (KerasInput) layer; - Layer layer1 = layersOrdered.get(kerasLayerIdx + 1).layer; + LayerConfiguration layer1 = layersOrdered.get(kerasLayerIdx + 1).layer; //no dim order, try to pull it from the next layer if there is one if(ConvolutionUtils.layerHasConvolutionLayout(layer1)) { CNN2DFormat formatForLayer = ConvolutionUtils.getFormatForLayer(layer1); @@ -491,7 +487,7 @@ public class KerasModel { && !this.className.equals(config.getFieldNameClassFunctional())) throw new InvalidKerasConfigurationException( "Keras model class name " + this.className + " incompatible with ComputationGraph"); - NeuralNetConfiguration.Builder modelBuilder = new NeuralNetConfiguration.Builder(); + NeuralNetConfiguration.NeuralNetConfigurationBuilder modelBuilder = NeuralNetConfiguration.builder(); if (optimizer != null) { modelBuilder.updater(optimizer); @@ -583,8 +579,8 @@ public class KerasModel { graphBuilder.addVertex(layer.getLayerName(), layer.getVertex(), inboundLayerNamesArray); } else if (layer.isInputPreProcessor()) { if (preprocessor == null) - throw new UnsupportedKerasConfigurationException("Layer " + layer.getLayerName() - + " could not be mapped to Layer, Vertex, or InputPreProcessor"); + throw new UnsupportedKerasConfigurationException("ILayer " + layer.getLayerName() + + " could not be mapped to ILayer, Vertex, or InputPreProcessor"); graphBuilder.addVertex(layer.getLayerName(), new PreprocessorVertex(preprocessor), inboundLayerNamesArray); } @@ -597,8 +593,8 @@ public class KerasModel { /* Whether to use standard backprop (or BPTT) or truncated BPTT. */ if (this.useTruncatedBPTT && this.truncatedBPTT > 0) - graphBuilder.backpropType(BackpropType.TruncatedBPTT).tBPTTForwardLength(truncatedBPTT) - .tBPTTBackwardLength(truncatedBPTT); + graphBuilder.backpropType(BackpropType.TruncatedBPTT).tbpttFwdLength(truncatedBPTT) + .tbpttBackLength(truncatedBPTT); else graphBuilder.backpropType(BackpropType.Standard); diff --git a/cavis-dnn/cavis-dnn-modelimport/src/main/java/org/deeplearning4j/nn/modelimport/keras/KerasModelImport.java b/cavis-dnn/cavis-dnn-modelimport/src/main/java/org/deeplearning4j/nn/modelimport/keras/KerasModelImport.java index c9f3d15a0..850cdd7ad 100644 --- a/cavis-dnn/cavis-dnn-modelimport/src/main/java/org/deeplearning4j/nn/modelimport/keras/KerasModelImport.java +++ b/cavis-dnn/cavis-dnn-modelimport/src/main/java/org/deeplearning4j/nn/modelimport/keras/KerasModelImport.java @@ -23,7 +23,7 @@ package org.deeplearning4j.nn.modelimport.keras; import lombok.extern.slf4j.Slf4j; import org.apache.commons.io.IOUtils; import org.deeplearning4j.nn.conf.ComputationGraphConfiguration; -import org.deeplearning4j.nn.conf.MultiLayerConfiguration; +import org.deeplearning4j.nn.conf.NeuralNetConfiguration; import org.deeplearning4j.nn.graph.ComputationGraph; import org.deeplearning4j.nn.modelimport.keras.exceptions.InvalidKerasConfigurationException; import org.deeplearning4j.nn.modelimport.keras.exceptions.UnsupportedKerasConfigurationException; @@ -341,12 +341,12 @@ public class KerasModelImport { * @throws IOException IO exception * @see MultiLayerNetwork */ - public static MultiLayerConfiguration importKerasSequentialConfiguration(String modelJsonFilename, + public static NeuralNetConfiguration importKerasSequentialConfiguration(String modelJsonFilename, boolean enforceTrainingConfig) throws IOException, InvalidKerasConfigurationException, UnsupportedKerasConfigurationException { KerasSequentialModel kerasModel = new KerasSequentialModel().modelBuilder().modelJsonFilename(modelJsonFilename) .enforceTrainingConfig(enforceTrainingConfig).buildSequential(); - return kerasModel.getMultiLayerConfiguration(); + return kerasModel.getNeuralNetConfiguration(); } /** @@ -358,11 +358,11 @@ public class KerasModelImport { * @throws IOException IO exception * @see MultiLayerNetwork */ - public static MultiLayerConfiguration importKerasSequentialConfiguration(String modelJsonFilename) + public static NeuralNetConfiguration importKerasSequentialConfiguration(String modelJsonFilename) throws IOException, InvalidKerasConfigurationException, UnsupportedKerasConfigurationException { KerasSequentialModel kerasModel = new KerasSequentialModel().modelBuilder().modelJsonFilename(modelJsonFilename) .enforceTrainingConfig(false).buildSequential(); - return kerasModel.getMultiLayerConfiguration(); + return kerasModel.getNeuralNetConfiguration(); } private static File toTempFile(InputStream is) throws IOException { diff --git a/cavis-dnn/cavis-dnn-modelimport/src/main/java/org/deeplearning4j/nn/modelimport/keras/KerasSequentialModel.java b/cavis-dnn/cavis-dnn-modelimport/src/main/java/org/deeplearning4j/nn/modelimport/keras/KerasSequentialModel.java index 696dc3df9..2a99d0c34 100644 --- a/cavis-dnn/cavis-dnn-modelimport/src/main/java/org/deeplearning4j/nn/modelimport/keras/KerasSequentialModel.java +++ b/cavis-dnn/cavis-dnn-modelimport/src/main/java/org/deeplearning4j/nn/modelimport/keras/KerasSequentialModel.java @@ -23,7 +23,6 @@ package org.deeplearning4j.nn.modelimport.keras; import lombok.extern.slf4j.Slf4j; import org.deeplearning4j.nn.conf.BackpropType; import org.deeplearning4j.nn.conf.InputPreProcessor; -import org.deeplearning4j.nn.conf.MultiLayerConfiguration; import org.deeplearning4j.nn.conf.NeuralNetConfiguration; import org.deeplearning4j.nn.conf.inputs.InputType; import org.deeplearning4j.nn.modelimport.keras.exceptions.InvalidKerasConfigurationException; @@ -159,11 +158,11 @@ public class KerasSequentialModel extends KerasModel { } /** - * Configure a MultiLayerConfiguration from this Keras Sequential model configuration. + * Configure a NeuralNetConfiguration from this Keras Sequential model configuration. * - * @return MultiLayerConfiguration + * @return NeuralNetConfiguration */ - public MultiLayerConfiguration getMultiLayerConfiguration() + public NeuralNetConfiguration getNeuralNetConfiguration() throws InvalidKerasConfigurationException, UnsupportedKerasConfigurationException { if (!this.className.equals(config.getFieldClassNameSequential())) throw new InvalidKerasConfigurationException( @@ -175,15 +174,15 @@ public class KerasSequentialModel extends KerasModel { throw new InvalidKerasConfigurationException( "MultiLayerNetwork expects only 1 output (found " + this.outputLayerNames.size() + ")"); - NeuralNetConfiguration.Builder modelBuilder = new NeuralNetConfiguration.Builder(); + NeuralNetConfiguration.NeuralNetConfigurationBuilder modelBuilder = NeuralNetConfiguration.builder(); if (optimizer != null) { modelBuilder.updater(optimizer); } - NeuralNetConfiguration.ListBuilder listBuilder = modelBuilder.list(); - //don't forcibly over ride for keras import - listBuilder.overrideNinUponBuild(false); + + //don't forcibly override for keras import + modelBuilder.overrideNinUponBuild(false); /* Add layers one at a time. */ KerasLayer prevLayer = null; int layerIndex = 0; @@ -192,7 +191,7 @@ public class KerasSequentialModel extends KerasModel { int nbInbound = layer.getInboundLayerNames().size(); if (nbInbound != 1) throw new InvalidKerasConfigurationException( - "Layers in MultiLayerConfiguration must have exactly one inbound layer (found " + "Layers in NeuralNetConfiguration must have exactly one inbound layer (found " + nbInbound + " for layer " + layer.getLayerName() + ")"); if (prevLayer != null) { InputType[] inputTypes = new InputType[1]; @@ -201,39 +200,40 @@ public class KerasSequentialModel extends KerasModel { inputTypes[0] = this.outputTypes.get(prevLayer.getInboundLayerNames().get(0)); preprocessor = prevLayer.getInputPreprocessor(inputTypes); InputType outputType = preprocessor.getOutputType(inputTypes[0]); - layer.getLayer().setNIn(outputType,listBuilder.isOverrideNinUponBuild()); + layer.getLayer().setNIn(outputType,modelBuilder.isOverrideNinUponBuild()); } else { inputTypes[0] = this.outputTypes.get(prevLayer.getLayerName()); preprocessor = layer.getInputPreprocessor(inputTypes); if(preprocessor != null) { InputType outputType = preprocessor.getOutputType(inputTypes[0]); - layer.getLayer().setNIn(outputType,listBuilder.isOverrideNinUponBuild()); + layer.getLayer().setNIn(outputType,modelBuilder.isOverrideNinUponBuild()); } else - layer.getLayer().setNIn(inputTypes[0],listBuilder.isOverrideNinUponBuild()); + layer.getLayer().setNIn(inputTypes[0],modelBuilder.isOverrideNinUponBuild()); } if (preprocessor != null) - listBuilder.inputPreProcessor(layerIndex, preprocessor); + modelBuilder.inputPreProcessor(layerIndex, preprocessor); } - listBuilder.layer(layerIndex++, layer.getLayer()); + modelBuilder.layer(layerIndex++, layer.getLayer()); } else if (layer.getVertex() != null) - throw new InvalidKerasConfigurationException("Cannot add vertex to MultiLayerConfiguration (class name " + throw new InvalidKerasConfigurationException("Cannot add vertex to NeuralNetConfiguration (class name " + layer.getClassName() + ", layer name " + layer.getLayerName() + ")"); prevLayer = layer; } /* Whether to use standard backprop (or BPTT) or truncated BPTT. */ if (this.useTruncatedBPTT && this.truncatedBPTT > 0) - listBuilder.backpropType(BackpropType.TruncatedBPTT).tBPTTForwardLength(truncatedBPTT) - .tBPTTBackwardLength(truncatedBPTT); + modelBuilder.backpropType(BackpropType.TruncatedBPTT) + .tbpttFwdLength(truncatedBPTT) + .tbpttBackLength(truncatedBPTT); else - listBuilder.backpropType(BackpropType.Standard); + modelBuilder.backpropType(BackpropType.Standard); - MultiLayerConfiguration build = listBuilder.build(); + NeuralNetConfiguration build = modelBuilder.build(); return build; @@ -256,7 +256,7 @@ public class KerasSequentialModel extends KerasModel { */ public MultiLayerNetwork getMultiLayerNetwork(boolean importWeights) throws InvalidKerasConfigurationException, UnsupportedKerasConfigurationException { - MultiLayerNetwork model = new MultiLayerNetwork(getMultiLayerConfiguration()); + MultiLayerNetwork model = new MultiLayerNetwork(getNeuralNetConfiguration()); model.init(); if (importWeights) model = (MultiLayerNetwork) KerasModelUtils.copyWeightsToModel(model, this.layers); diff --git a/cavis-dnn/cavis-dnn-modelimport/src/main/java/org/deeplearning4j/nn/modelimport/keras/config/KerasLayerConfiguration.java b/cavis-dnn/cavis-dnn-modelimport/src/main/java/org/deeplearning4j/nn/modelimport/keras/config/KerasLayerConfiguration.java index a0082f4f1..d454d1e97 100644 --- a/cavis-dnn/cavis-dnn-modelimport/src/main/java/org/deeplearning4j/nn/modelimport/keras/config/KerasLayerConfiguration.java +++ b/cavis-dnn/cavis-dnn-modelimport/src/main/java/org/deeplearning4j/nn/modelimport/keras/config/KerasLayerConfiguration.java @@ -246,7 +246,7 @@ public class KerasLayerConfiguration { private final String LAYER_FIELD_RATE = "rate"; private final String LAYER_FIELD_GAUSSIAN_VARIANCE = ""; // 1: sigma, 2: stddev - /* Layer wrappers */ + /* ILayer wrappers */ // Missing: TimeDistributed diff --git a/cavis-dnn/cavis-dnn-modelimport/src/main/java/org/deeplearning4j/nn/modelimport/keras/layers/TFOpLayer.java b/cavis-dnn/cavis-dnn-modelimport/src/main/java/org/deeplearning4j/nn/modelimport/keras/layers/TFOpLayer.java index 8e30f72f2..86ebdf3ea 100644 --- a/cavis-dnn/cavis-dnn-modelimport/src/main/java/org/deeplearning4j/nn/modelimport/keras/layers/TFOpLayer.java +++ b/cavis-dnn/cavis-dnn-modelimport/src/main/java/org/deeplearning4j/nn/modelimport/keras/layers/TFOpLayer.java @@ -21,14 +21,12 @@ package org.deeplearning4j.nn.modelimport.keras.layers; import org.deeplearning4j.nn.api.ParamInitializer; -import org.deeplearning4j.nn.conf.GradientNormalization; import org.deeplearning4j.nn.conf.InputPreProcessor; import org.deeplearning4j.nn.conf.NeuralNetConfiguration; import org.deeplearning4j.nn.conf.RNNFormat; import org.deeplearning4j.nn.conf.inputs.InputType; -import org.deeplearning4j.nn.conf.layers.Layer; +import org.deeplearning4j.nn.conf.layers.LayerConfiguration; import org.deeplearning4j.nn.conf.memory.LayerMemoryReport; -import org.deeplearning4j.nn.modelimport.keras.layers.TFOpLayerImpl; import org.deeplearning4j.nn.params.EmptyParamInitializer; import org.deeplearning4j.optimize.api.TrainingListener; import org.nd4j.linalg.api.buffer.DataType; @@ -41,7 +39,7 @@ import java.util.List; import java.util.Map; -public class TFOpLayer extends Layer { +public class TFOpLayer extends LayerConfiguration { private final Map nodeDef; private final Map constants; @@ -81,24 +79,18 @@ public class TFOpLayer extends Layer { public void setNIn(InputType inputType, boolean override){} - @Override - public GradientNormalization getGradientNormalization(){return null;} - - @Override public org.deeplearning4j.nn.api.Layer instantiate(NeuralNetConfiguration conf, Collection trainingListeners, int layerIndex, INDArray layerParamsView, boolean initializeParams, DataType networkDataType) { - TFOpLayerImpl tfOpLayerImpl = new TFOpLayerImpl(nodeDef, constants, conf, networkDataType); - tfOpLayerImpl.setListeners(trainingListeners); + LayerConfiguration lconf = conf.getFlattenedLayerConfigurations().get(layerIndex); + TFOpLayerImpl tfOpLayerImpl = new TFOpLayerImpl(nodeDef, constants, lconf, networkDataType); + tfOpLayerImpl.addTrainingListeners(trainingListeners); tfOpLayerImpl.setIndex(layerIndex); return tfOpLayerImpl; } - @Override - public double getGradientNormalizationThreshold(){return 0.;} - @Override public List getRegularizationByParam(String paramName){return null;} diff --git a/cavis-dnn/cavis-dnn-modelimport/src/main/java/org/deeplearning4j/nn/modelimport/keras/layers/TFOpLayerImpl.java b/cavis-dnn/cavis-dnn-modelimport/src/main/java/org/deeplearning4j/nn/modelimport/keras/layers/TFOpLayerImpl.java index ba2b98db4..43ce8e985 100644 --- a/cavis-dnn/cavis-dnn-modelimport/src/main/java/org/deeplearning4j/nn/modelimport/keras/layers/TFOpLayerImpl.java +++ b/cavis-dnn/cavis-dnn-modelimport/src/main/java/org/deeplearning4j/nn/modelimport/keras/layers/TFOpLayerImpl.java @@ -26,6 +26,7 @@ import lombok.extern.slf4j.Slf4j; import org.apache.commons.lang3.ArrayUtils; import org.deeplearning4j.common.config.DL4JClassLoading; import org.deeplearning4j.nn.conf.NeuralNetConfiguration; +import org.deeplearning4j.nn.conf.layers.LayerConfiguration; import org.deeplearning4j.nn.gradient.Gradient; import org.deeplearning4j.nn.layers.AbstractLayer; import org.deeplearning4j.nn.workspace.LayerWorkspaceMgr; @@ -56,7 +57,7 @@ public class TFOpLayerImpl extends AbstractLayer { private List inputNames; TFGraphRunnerService graphRunnerService; - public TFOpLayerImpl(Map nodeDef, Map constants, NeuralNetConfiguration conf, DataType dtype){ + public TFOpLayerImpl(Map nodeDef, Map constants, LayerConfiguration conf, DataType dtype){ super(conf, dtype); this.nodeDef = nodeDef; this.constants = constants; diff --git a/cavis-dnn/cavis-dnn-modelimport/src/main/java/org/deeplearning4j/nn/modelimport/keras/layers/core/KerasDense.java b/cavis-dnn/cavis-dnn-modelimport/src/main/java/org/deeplearning4j/nn/modelimport/keras/layers/core/KerasDense.java index 9eae1f08e..f49599ccf 100644 --- a/cavis-dnn/cavis-dnn-modelimport/src/main/java/org/deeplearning4j/nn/modelimport/keras/layers/core/KerasDense.java +++ b/cavis-dnn/cavis-dnn-modelimport/src/main/java/org/deeplearning4j/nn/modelimport/keras/layers/core/KerasDense.java @@ -115,9 +115,9 @@ public class KerasDense extends KerasLayer { } /** - * Get DL4J DenseLayer. + * Get DL4J DenseLayerConfiguration. * - * @return DenseLayer + * @return DenseLayerConfiguration */ public DenseLayer getDenseLayer() { return (DenseLayer) this.layer; diff --git a/cavis-dnn/cavis-dnn-modelimport/src/main/java/org/deeplearning4j/nn/modelimport/keras/layers/recurrent/KerasLSTM.java b/cavis-dnn/cavis-dnn-modelimport/src/main/java/org/deeplearning4j/nn/modelimport/keras/layers/recurrent/KerasLSTM.java index 4e35a6867..b2e5a15a2 100644 --- a/cavis-dnn/cavis-dnn-modelimport/src/main/java/org/deeplearning4j/nn/modelimport/keras/layers/recurrent/KerasLSTM.java +++ b/cavis-dnn/cavis-dnn-modelimport/src/main/java/org/deeplearning4j/nn/modelimport/keras/layers/recurrent/KerasLSTM.java @@ -31,7 +31,7 @@ import org.deeplearning4j.nn.conf.inputs.InputType; import org.deeplearning4j.nn.conf.layers.*; import org.deeplearning4j.nn.conf.layers.recurrent.LastTimeStep; import org.deeplearning4j.nn.conf.layers.util.MaskZeroLayer; -import org.deeplearning4j.nn.conf.layers.wrapper.BaseWrapperLayer; +import org.deeplearning4j.nn.conf.layers.wrapper.BaseWrapperLayerConfiguration; import org.deeplearning4j.nn.modelimport.keras.KerasLayer; import org.deeplearning4j.nn.modelimport.keras.exceptions.InvalidKerasConfigurationException; import org.deeplearning4j.nn.modelimport.keras.exceptions.UnsupportedKerasConfigurationException; @@ -211,12 +211,12 @@ public class KerasLSTM extends KerasLayer { } /** - * Get DL4J Layer. If returnSequences is true, this can be casted to an "LSTM" layer, otherwise it can be casted + * Get DL4J ILayer. If returnSequences is true, this can be casted to an "LSTM" layer, otherwise it can be casted * to a "LastTimeStep" layer. * - * @return LSTM Layer + * @return LSTM ILayer */ - public Layer getLSTMLayer() { + public LayerConfiguration getLSTMLayer() { return layer; } @@ -448,8 +448,8 @@ public class KerasLSTM extends KerasLayer { FeedForwardLayer ffl; - if(this.layer instanceof BaseWrapperLayer){ - BaseWrapperLayer bwl = (BaseWrapperLayer)this.layer; + if(this.layer instanceof BaseWrapperLayerConfiguration){ + BaseWrapperLayerConfiguration bwl = (BaseWrapperLayerConfiguration)this.layer; ffl = (FeedForwardLayer)bwl.getUnderlying(); } else { ffl = (FeedForwardLayer) this.layer; diff --git a/cavis-dnn/cavis-dnn-modelimport/src/main/java/org/deeplearning4j/nn/modelimport/keras/layers/recurrent/KerasSimpleRnn.java b/cavis-dnn/cavis-dnn-modelimport/src/main/java/org/deeplearning4j/nn/modelimport/keras/layers/recurrent/KerasSimpleRnn.java index ac2d4c234..3c850ecfa 100644 --- a/cavis-dnn/cavis-dnn-modelimport/src/main/java/org/deeplearning4j/nn/modelimport/keras/layers/recurrent/KerasSimpleRnn.java +++ b/cavis-dnn/cavis-dnn-modelimport/src/main/java/org/deeplearning4j/nn/modelimport/keras/layers/recurrent/KerasSimpleRnn.java @@ -29,11 +29,11 @@ import org.deeplearning4j.nn.conf.RNNFormat; import org.deeplearning4j.nn.conf.inputs.InputType; import org.deeplearning4j.nn.conf.layers.FeedForwardLayer; import org.deeplearning4j.nn.conf.layers.InputTypeUtil; -import org.deeplearning4j.nn.conf.layers.Layer; +import org.deeplearning4j.nn.conf.layers.LayerConfiguration; import org.deeplearning4j.nn.conf.layers.recurrent.LastTimeStep; import org.deeplearning4j.nn.conf.layers.recurrent.SimpleRnn; import org.deeplearning4j.nn.conf.layers.util.MaskZeroLayer; -import org.deeplearning4j.nn.conf.layers.wrapper.BaseWrapperLayer; +import org.deeplearning4j.nn.conf.layers.wrapper.BaseWrapperLayerConfiguration; import org.deeplearning4j.nn.modelimport.keras.KerasLayer; import org.deeplearning4j.nn.modelimport.keras.exceptions.InvalidKerasConfigurationException; import org.deeplearning4j.nn.modelimport.keras.exceptions.UnsupportedKerasConfigurationException; @@ -184,9 +184,9 @@ public class KerasSimpleRnn extends KerasLayer { /** * Get DL4J SimpleRnn layer. * - * @return SimpleRnn Layer + * @return SimpleRnn ILayer */ - public Layer getSimpleRnnLayer() { + public LayerConfiguration getSimpleRnnLayer() { return this.layer; } @@ -296,8 +296,8 @@ public class KerasSimpleRnn extends KerasLayer { } FeedForwardLayer ffl; - if(this.layer instanceof BaseWrapperLayer){ - BaseWrapperLayer bwl = (BaseWrapperLayer)this.layer; + if(this.layer instanceof BaseWrapperLayerConfiguration){ + BaseWrapperLayerConfiguration bwl = (BaseWrapperLayerConfiguration)this.layer; ffl = (FeedForwardLayer)bwl.getUnderlying(); } else { ffl = (FeedForwardLayer) this.layer; diff --git a/cavis-dnn/cavis-dnn-modelimport/src/main/java/org/deeplearning4j/nn/modelimport/keras/layers/wrappers/KerasBidirectional.java b/cavis-dnn/cavis-dnn-modelimport/src/main/java/org/deeplearning4j/nn/modelimport/keras/layers/wrappers/KerasBidirectional.java index fa5f5b508..3da1a4642 100644 --- a/cavis-dnn/cavis-dnn-modelimport/src/main/java/org/deeplearning4j/nn/modelimport/keras/layers/wrappers/KerasBidirectional.java +++ b/cavis-dnn/cavis-dnn-modelimport/src/main/java/org/deeplearning4j/nn/modelimport/keras/layers/wrappers/KerasBidirectional.java @@ -24,10 +24,9 @@ import org.deeplearning4j.nn.conf.InputPreProcessor; import org.deeplearning4j.nn.conf.inputs.InputType; import org.deeplearning4j.nn.conf.layers.InputTypeUtil; import org.deeplearning4j.nn.conf.layers.LSTM; -import org.deeplearning4j.nn.conf.layers.Layer; +import org.deeplearning4j.nn.conf.layers.LayerConfiguration; import org.deeplearning4j.nn.conf.layers.recurrent.Bidirectional; import org.deeplearning4j.nn.conf.layers.recurrent.LastTimeStep; -import org.deeplearning4j.nn.conf.layers.recurrent.SimpleRnn; import org.deeplearning4j.nn.modelimport.keras.KerasLayer; import org.deeplearning4j.nn.modelimport.keras.exceptions.InvalidKerasConfigurationException; import org.deeplearning4j.nn.modelimport.keras.exceptions.UnsupportedKerasConfigurationException; @@ -146,7 +145,7 @@ public class KerasBidirectional extends KerasLayer { break; case "SimpleRNN": kerasRnnlayer = new KerasSimpleRnn(innerRnnConfig, enforceTrainingConfig, previousLayers); - Layer rnnLayer = ((KerasSimpleRnn) kerasRnnlayer).getSimpleRnnLayer(); + LayerConfiguration rnnLayer = ((KerasSimpleRnn) kerasRnnlayer).getSimpleRnnLayer(); this.layer = new Bidirectional(mode, rnnLayer); layer.setLayerName(layerName); break; @@ -160,16 +159,16 @@ public class KerasBidirectional extends KerasLayer { /** * Return the underlying recurrent layer of this bidirectional layer * - * @return Layer, recurrent layer + * @return ILayer, recurrent layer */ - public Layer getUnderlyingRecurrentLayer() { + public LayerConfiguration getUnderlyingRecurrentLayer() { return kerasRnnlayer.getLayer(); } /** * Get DL4J Bidirectional layer. * - * @return Bidirectional Layer + * @return Bidirectional ILayer */ public Bidirectional getBidirectionalLayer() { return (Bidirectional) this.layer; @@ -240,7 +239,7 @@ public class KerasBidirectional extends KerasLayer { } - private Map getUnderlyingWeights(Layer l, Map weights, String direction) + private Map getUnderlyingWeights(LayerConfiguration l, Map weights, String direction) throws InvalidKerasConfigurationException { int keras1SubstringLength; if (kerasRnnlayer instanceof KerasLSTM) @@ -269,7 +268,7 @@ public class KerasBidirectional extends KerasLayer { weights = newWeights; } - Layer layerBefore = kerasRnnlayer.getLayer(); + LayerConfiguration layerBefore = kerasRnnlayer.getLayer(); kerasRnnlayer.setLayer(l); kerasRnnlayer.setWeights(weights); Map ret = kerasRnnlayer.getWeights(); diff --git a/cavis-dnn/cavis-dnn-modelimport/src/main/java/org/deeplearning4j/nn/modelimport/keras/utils/KerasLayerUtils.java b/cavis-dnn/cavis-dnn-modelimport/src/main/java/org/deeplearning4j/nn/modelimport/keras/utils/KerasLayerUtils.java index 536afb915..883ff4dd7 100644 --- a/cavis-dnn/cavis-dnn-modelimport/src/main/java/org/deeplearning4j/nn/modelimport/keras/utils/KerasLayerUtils.java +++ b/cavis-dnn/cavis-dnn-modelimport/src/main/java/org/deeplearning4j/nn/modelimport/keras/utils/KerasLayerUtils.java @@ -22,7 +22,7 @@ package org.deeplearning4j.nn.modelimport.keras.utils; import lombok.extern.slf4j.Slf4j; import org.deeplearning4j.nn.conf.graph.ElementWiseVertex; -import org.deeplearning4j.nn.conf.layers.Layer; +import org.deeplearning4j.nn.conf.layers.LayerConfiguration; import org.deeplearning4j.nn.conf.layers.samediff.SameDiffLambdaLayer; import org.deeplearning4j.nn.modelimport.keras.KerasLayer; import org.deeplearning4j.nn.modelimport.keras.config.Keras2LayerConfiguration; @@ -34,7 +34,6 @@ import org.deeplearning4j.nn.modelimport.keras.layers.KerasTFOpLayer; import org.deeplearning4j.nn.modelimport.keras.layers.advanced.activations.*; import org.deeplearning4j.nn.modelimport.keras.layers.convolutional.*; import org.deeplearning4j.nn.modelimport.keras.layers.core.*; -import org.deeplearning4j.nn.modelimport.keras.layers.embeddings.Keras2DEmbedding; import org.deeplearning4j.nn.modelimport.keras.layers.embeddings.KerasEmbedding; import org.deeplearning4j.nn.modelimport.keras.layers.local.KerasLocallyConnected1D; import org.deeplearning4j.nn.modelimport.keras.layers.noise.KerasAlphaDropout; @@ -48,7 +47,6 @@ import org.deeplearning4j.nn.modelimport.keras.layers.pooling.KerasPooling3D; import org.deeplearning4j.nn.modelimport.keras.layers.recurrent.KerasLSTM; import org.deeplearning4j.nn.modelimport.keras.layers.recurrent.KerasSimpleRnn; import org.deeplearning4j.nn.modelimport.keras.layers.wrappers.KerasBidirectional; -import org.nd4j.common.primitives.Counter; import org.nd4j.linalg.api.ndarray.INDArray; import org.nd4j.common.primitives.Pair; @@ -154,7 +152,7 @@ public class KerasLayerUtils { * * @param layerConfig map containing Keras layer properties * @return KerasLayer - * @see Layer + * @see LayerConfiguration */ public static KerasLayer getKerasLayerFromConfig(Map layerConfig, KerasLayerConfiguration conf, @@ -174,7 +172,7 @@ public class KerasLayerUtils { * @param layerConfig map containing Keras layer properties * @param enforceTrainingConfig whether to enforce training-only configurations * @return KerasLayer - * @see Layer + * @see LayerConfiguration */ public static KerasLayer getKerasLayerFromConfig(Map layerConfig, boolean enforceTrainingConfig, diff --git a/cavis-dnn/cavis-dnn-modelimport/src/main/java/org/deeplearning4j/nn/modelimport/keras/utils/KerasModelUtils.java b/cavis-dnn/cavis-dnn-modelimport/src/main/java/org/deeplearning4j/nn/modelimport/keras/utils/KerasModelUtils.java index 43f3b244f..969626676 100644 --- a/cavis-dnn/cavis-dnn-modelimport/src/main/java/org/deeplearning4j/nn/modelimport/keras/utils/KerasModelUtils.java +++ b/cavis-dnn/cavis-dnn-modelimport/src/main/java/org/deeplearning4j/nn/modelimport/keras/utils/KerasModelUtils.java @@ -24,7 +24,7 @@ package org.deeplearning4j.nn.modelimport.keras.utils; import lombok.extern.slf4j.Slf4j; import org.apache.commons.lang3.StringUtils; import org.deeplearning4j.nn.api.Layer; -import org.deeplearning4j.nn.api.Model; +import net.brutex.ai.dnn.api.IModel; import org.deeplearning4j.nn.graph.ComputationGraph; import org.deeplearning4j.nn.modelimport.keras.Hdf5Archive; import org.deeplearning4j.nn.modelimport.keras.KerasLayer; @@ -55,7 +55,7 @@ public class KerasModelUtils { * @return DL4J Model interface * @throws InvalidKerasConfigurationException Invalid Keras config */ - public static Model copyWeightsToModel(Model model, Map kerasLayers) + public static IModel copyWeightsToModel(IModel model, Map kerasLayers) throws InvalidKerasConfigurationException { /* Get list if layers from model. */ Layer[] layersFromModel; @@ -67,7 +67,7 @@ public class KerasModelUtils { /* Iterate over layers in model, setting weights when relevant. */ Set layerNames = new HashSet<>(kerasLayers.keySet()); for (org.deeplearning4j.nn.api.Layer layer : layersFromModel) { - String layerName = layer.conf().getLayer().getLayerName(); + String layerName = layer.getLayerConfiguration().getLayerName(); if (!kerasLayers.containsKey(layerName)) throw new InvalidKerasConfigurationException( "No weights found for layer in model (named " + layerName + ")"); diff --git a/cavis-dnn/cavis-dnn-modelimport/src/test/java/org/deeplearning4j/nn/modelimport/keras/KerasTestUtils.java b/cavis-dnn/cavis-dnn-modelimport/src/test/java/org/deeplearning4j/nn/modelimport/keras/KerasTestUtils.java index 12a00d4f7..0ee7ce776 100644 --- a/cavis-dnn/cavis-dnn-modelimport/src/test/java/org/deeplearning4j/nn/modelimport/keras/KerasTestUtils.java +++ b/cavis-dnn/cavis-dnn-modelimport/src/test/java/org/deeplearning4j/nn/modelimport/keras/KerasTestUtils.java @@ -20,7 +20,7 @@ package org.deeplearning4j.nn.modelimport.keras; -import org.deeplearning4j.nn.conf.layers.BaseLayer; +import org.deeplearning4j.nn.conf.layers.BaseLayerConfiguration; import org.deeplearning4j.nn.conf.layers.samediff.AbstractSameDiffLayer; import org.nd4j.linalg.learning.regularization.L1Regularization; import org.nd4j.linalg.learning.regularization.L2Regularization; @@ -34,7 +34,7 @@ public class KerasTestUtils { private KerasTestUtils(){ } - public static double getL1(BaseLayer layer) { + public static double getL1(BaseLayerConfiguration layer) { List l = layer.getRegularization(); return getL1(l); } @@ -49,7 +49,7 @@ public class KerasTestUtils { return l1Reg.getL1().valueAt(0,0); } - public static double getL2(BaseLayer layer) { + public static double getL2(BaseLayerConfiguration layer) { List l = layer.getRegularization(); return getL2(l); } diff --git a/cavis-dnn/cavis-dnn-modelimport/src/test/java/org/deeplearning4j/nn/modelimport/keras/configurations/FullModelComparisons.java b/cavis-dnn/cavis-dnn-modelimport/src/test/java/org/deeplearning4j/nn/modelimport/keras/configurations/FullModelComparisons.java index 1120dfbb8..db0fc466b 100644 --- a/cavis-dnn/cavis-dnn-modelimport/src/test/java/org/deeplearning4j/nn/modelimport/keras/configurations/FullModelComparisons.java +++ b/cavis-dnn/cavis-dnn-modelimport/src/test/java/org/deeplearning4j/nn/modelimport/keras/configurations/FullModelComparisons.java @@ -48,7 +48,6 @@ import org.nd4j.common.resources.Resources; import java.io.File; import java.io.IOException; import java.io.InputStream; -import java.util.Arrays; import java.util.LinkedList; import java.util.List; @@ -85,10 +84,10 @@ public class FullModelComparisons extends BaseDL4JTest { System.out.println(model.summary()); - // 1. Layer + // 1. ILayer LSTM firstLstm = (LSTM) model.getLayer(0); org.deeplearning4j.nn.conf.layers.LSTM firstConf = - (org.deeplearning4j.nn.conf.layers.LSTM) firstLstm.conf().getLayer(); + (org.deeplearning4j.nn.conf.layers.LSTM) firstLstm.getLayerConfiguration(); // "unit_forget_bias": true assertEquals(1.0, firstConf.getForgetGateBiasInit()); @@ -123,10 +122,10 @@ public class FullModelComparisons extends BaseDL4JTest { Assertions.assertEquals(b.getDouble(0, 192), -0.13569744, 1e-7); // Keras O Assertions.assertEquals(b.getDouble(0, 0), -0.2587392, 1e-7); // Keras C - // 2. Layer + // 2. ILayer LSTM secondLstm = (LSTM) ((LastTimeStepLayer) model.getLayer(1)).getUnderlying(); org.deeplearning4j.nn.conf.layers.LSTM secondConf = - (org.deeplearning4j.nn.conf.layers.LSTM) secondLstm.conf().getLayer(); + (org.deeplearning4j.nn.conf.layers.LSTM) secondLstm.getLayerConfiguration(); // "unit_forget_bias": true assertEquals(1.0, secondConf.getForgetGateBiasInit()); diff --git a/cavis-dnn/cavis-dnn-modelimport/src/test/java/org/deeplearning4j/nn/modelimport/keras/configurations/Keras1ModelConfigurationTest.java b/cavis-dnn/cavis-dnn-modelimport/src/test/java/org/deeplearning4j/nn/modelimport/keras/configurations/Keras1ModelConfigurationTest.java index fc48183e2..0a1bcb4a9 100644 --- a/cavis-dnn/cavis-dnn-modelimport/src/test/java/org/deeplearning4j/nn/modelimport/keras/configurations/Keras1ModelConfigurationTest.java +++ b/cavis-dnn/cavis-dnn-modelimport/src/test/java/org/deeplearning4j/nn/modelimport/keras/configurations/Keras1ModelConfigurationTest.java @@ -22,7 +22,7 @@ package org.deeplearning4j.nn.modelimport.keras.configurations; import lombok.extern.slf4j.Slf4j; import org.deeplearning4j.nn.conf.ComputationGraphConfiguration; -import org.deeplearning4j.nn.conf.MultiLayerConfiguration; +import org.deeplearning4j.nn.conf.NeuralNetConfiguration; import org.deeplearning4j.nn.graph.ComputationGraph; import org.deeplearning4j.BaseDL4JTest; import org.deeplearning4j.nn.modelimport.keras.KerasModel; @@ -140,9 +140,9 @@ public class Keras1ModelConfigurationTest extends BaseDL4JTest { private void runSequentialConfigTest(String path, boolean training) throws Exception { try(InputStream is = Resources.asStream(path)) { - MultiLayerConfiguration config = + NeuralNetConfiguration config = new KerasModel().modelBuilder().modelJsonInputStream(is) - .enforceTrainingConfig(training).buildSequential().getMultiLayerConfiguration(); + .enforceTrainingConfig(training).buildSequential().getNeuralNetConfiguration(); MultiLayerNetwork model = new MultiLayerNetwork(config); model.init(); } diff --git a/cavis-dnn/cavis-dnn-modelimport/src/test/java/org/deeplearning4j/nn/modelimport/keras/configurations/Keras2ModelConfigurationTest.java b/cavis-dnn/cavis-dnn-modelimport/src/test/java/org/deeplearning4j/nn/modelimport/keras/configurations/Keras2ModelConfigurationTest.java index 05f6162f3..9bb3e5b4a 100644 --- a/cavis-dnn/cavis-dnn-modelimport/src/test/java/org/deeplearning4j/nn/modelimport/keras/configurations/Keras2ModelConfigurationTest.java +++ b/cavis-dnn/cavis-dnn-modelimport/src/test/java/org/deeplearning4j/nn/modelimport/keras/configurations/Keras2ModelConfigurationTest.java @@ -23,7 +23,7 @@ package org.deeplearning4j.nn.modelimport.keras.configurations; import lombok.extern.slf4j.Slf4j; import lombok.val; import org.deeplearning4j.nn.conf.ComputationGraphConfiguration; -import org.deeplearning4j.nn.conf.MultiLayerConfiguration; +import org.deeplearning4j.nn.conf.NeuralNetConfiguration; import org.deeplearning4j.nn.graph.ComputationGraph; import org.deeplearning4j.BaseDL4JTest; import org.deeplearning4j.nn.modelimport.keras.KerasLayer; @@ -42,7 +42,6 @@ import org.nd4j.common.resources.Resources; import java.io.File; import java.io.IOException; import java.io.InputStream; -import java.util.Arrays; import static org.junit.jupiter.api.Assertions.assertTrue; import static org.junit.jupiter.api.Assertions.assertArrayEquals; @@ -260,9 +259,9 @@ public class Keras2ModelConfigurationTest extends BaseDL4JTest { @Test public void oneLstmLayerTest() throws Exception { try(InputStream is = Resources.asStream("/modelimport/keras/configs/keras2/one_lstm_no_sequences_tf_keras_2.json")) { - MultiLayerConfiguration config = + NeuralNetConfiguration config = new KerasModel().modelBuilder().modelJsonInputStream(is) - .enforceTrainingConfig(false).buildSequential().getMultiLayerConfiguration(); + .enforceTrainingConfig(false).buildSequential().getNeuralNetConfiguration(); MultiLayerNetwork model = new MultiLayerNetwork(config); model.init(); INDArray input = Nd4j.create(DataType.FLOAT, 50, 1500, 500); //NWC format - [Minibatch, seqLength, channels] @@ -287,9 +286,9 @@ public class Keras2ModelConfigurationTest extends BaseDL4JTest { private void runSequentialConfigTest(String path) throws Exception { try(InputStream is = Resources.asStream(path)) { - MultiLayerConfiguration config = + NeuralNetConfiguration config = new KerasModel().modelBuilder().modelJsonInputStream(is) - .enforceTrainingConfig(false).buildSequential().getMultiLayerConfiguration(); + .enforceTrainingConfig(false).buildSequential().getNeuralNetConfiguration(); MultiLayerNetwork model = new MultiLayerNetwork(config); model.init(); } diff --git a/cavis-dnn/cavis-dnn-modelimport/src/test/java/org/deeplearning4j/nn/modelimport/keras/configurations/KerasInitilizationTest.java b/cavis-dnn/cavis-dnn-modelimport/src/test/java/org/deeplearning4j/nn/modelimport/keras/configurations/KerasInitilizationTest.java index e97a1685e..eec8658cc 100644 --- a/cavis-dnn/cavis-dnn-modelimport/src/test/java/org/deeplearning4j/nn/modelimport/keras/configurations/KerasInitilizationTest.java +++ b/cavis-dnn/cavis-dnn-modelimport/src/test/java/org/deeplearning4j/nn/modelimport/keras/configurations/KerasInitilizationTest.java @@ -167,7 +167,7 @@ public class KerasInitilizationTest extends BaseDL4JTest { layerConfig.put(conf.getLAYER_FIELD_KERAS_VERSION(), kerasVersion); DenseLayer layer = new KerasDense(layerConfig, false).getDenseLayer(); - assertEquals(dl4jInitializer, layer.getWeightInitFn()); + assertEquals(dl4jInitializer, layer.getWeightInit()); } } diff --git a/cavis-dnn/cavis-dnn-modelimport/src/test/java/org/deeplearning4j/nn/modelimport/keras/configurations/KerasModelImportTest.java b/cavis-dnn/cavis-dnn-modelimport/src/test/java/org/deeplearning4j/nn/modelimport/keras/configurations/KerasModelImportTest.java index c45b3c52b..02c478093 100644 --- a/cavis-dnn/cavis-dnn-modelimport/src/test/java/org/deeplearning4j/nn/modelimport/keras/configurations/KerasModelImportTest.java +++ b/cavis-dnn/cavis-dnn-modelimport/src/test/java/org/deeplearning4j/nn/modelimport/keras/configurations/KerasModelImportTest.java @@ -20,12 +20,12 @@ package org.deeplearning4j.nn.modelimport.keras.configurations; +import java.util.List; import lombok.extern.slf4j.Slf4j; import org.deeplearning4j.BaseDL4JTest; import org.deeplearning4j.nn.conf.CNN2DFormat; -import org.deeplearning4j.nn.conf.MultiLayerConfiguration; -import org.deeplearning4j.nn.conf.NeuralNetConfiguration; import org.deeplearning4j.nn.conf.layers.ConvolutionLayer; +import org.deeplearning4j.nn.conf.layers.LayerConfiguration; import org.deeplearning4j.nn.conf.layers.SubsamplingLayer; import org.deeplearning4j.nn.modelimport.keras.KerasModelImport; import org.deeplearning4j.nn.modelimport.keras.exceptions.InvalidKerasConfigurationException; @@ -34,7 +34,6 @@ import org.deeplearning4j.nn.multilayer.MultiLayerNetwork; import org.junit.jupiter.api.Test; import org.junit.jupiter.api.Timeout; import org.nd4j.common.resources.Resources; -import org.nd4j.linalg.convolution.Convolution; import org.nd4j.linalg.factory.Nd4j; import java.io.IOException; @@ -57,12 +56,12 @@ public class KerasModelImportTest extends BaseDL4JTest { @Test public void testNCHWNWHCChangeImport() { MultiLayerNetwork model = loadModel("modelimport/keras/weights/conv2dnchw/simpleconv2d.hdf5"); - MultiLayerConfiguration multiLayerConfiguration = model.getLayerWiseConfigurations(); - ConvolutionLayer convolutionLayer = (ConvolutionLayer) multiLayerConfiguration.getConf(0).getLayer(); + List layerConfigs = model.getNetConfiguration().getFlattenedLayerConfigurations(); + ConvolutionLayer convolutionLayer = (ConvolutionLayer) layerConfigs.get(0); assertEquals(CNN2DFormat.NCHW,convolutionLayer.getCnn2dDataFormat()); - SubsamplingLayer subsamplingLayer = (SubsamplingLayer) multiLayerConfiguration.getConf(1).getLayer(); + SubsamplingLayer subsamplingLayer = (SubsamplingLayer) layerConfigs.get(1); assertEquals(CNN2DFormat.NHWC,subsamplingLayer.getCnn2dDataFormat()); - ConvolutionLayer convolutionLayer1 = (ConvolutionLayer) multiLayerConfiguration.getConf(2).getLayer(); + ConvolutionLayer convolutionLayer1 = (ConvolutionLayer) layerConfigs.get(2); assertEquals(CNN2DFormat.NHWC,convolutionLayer1.getCnn2dDataFormat()); model.output(Nd4j.zeros(1,1,28,28)); diff --git a/cavis-dnn/cavis-dnn-modelimport/src/test/java/org/deeplearning4j/nn/modelimport/keras/e2e/KerasCustomLayerTest.java b/cavis-dnn/cavis-dnn-modelimport/src/test/java/org/deeplearning4j/nn/modelimport/keras/e2e/KerasCustomLayerTest.java index 67caf1e3b..f5b7584d3 100644 --- a/cavis-dnn/cavis-dnn-modelimport/src/test/java/org/deeplearning4j/nn/modelimport/keras/e2e/KerasCustomLayerTest.java +++ b/cavis-dnn/cavis-dnn-modelimport/src/test/java/org/deeplearning4j/nn/modelimport/keras/e2e/KerasCustomLayerTest.java @@ -21,6 +21,7 @@ package org.deeplearning4j.nn.modelimport.keras.e2e; import lombok.extern.slf4j.Slf4j; +import net.brutex.ai.dnn.api.IModel; import org.apache.commons.io.FileUtils; import org.deeplearning4j.common.resources.DL4JResources; import org.deeplearning4j.nn.graph.ComputationGraph; @@ -61,7 +62,7 @@ public class KerasCustomLayerTest extends BaseDL4JTest { cachedKerasFile.deleteOnExit(); } - org.deeplearning4j.nn.api.Model importedModel = + IModel importedModel = KerasModelImport.importKerasModelAndWeights(cachedKerasFile.getAbsolutePath()); ModelSerializer.writeModel(importedModel, outputPath, false); diff --git a/cavis-dnn/cavis-dnn-modelimport/src/test/java/org/deeplearning4j/nn/modelimport/keras/e2e/KerasModelEndToEndTest.java b/cavis-dnn/cavis-dnn-modelimport/src/test/java/org/deeplearning4j/nn/modelimport/keras/e2e/KerasModelEndToEndTest.java index 9b6797c06..1dad7c549 100644 --- a/cavis-dnn/cavis-dnn-modelimport/src/test/java/org/deeplearning4j/nn/modelimport/keras/e2e/KerasModelEndToEndTest.java +++ b/cavis-dnn/cavis-dnn-modelimport/src/test/java/org/deeplearning4j/nn/modelimport/keras/e2e/KerasModelEndToEndTest.java @@ -31,6 +31,7 @@ import org.deeplearning4j.nn.api.layers.IOutputLayer; import org.deeplearning4j.nn.conf.ConvolutionMode; import org.deeplearning4j.nn.conf.layers.Convolution1DLayer; import org.deeplearning4j.nn.conf.layers.FeedForwardLayer; +import org.deeplearning4j.nn.conf.layers.LayerConfiguration; import org.deeplearning4j.nn.conf.layers.LossLayer; import org.deeplearning4j.nn.conf.layers.RnnOutputLayer; import org.deeplearning4j.nn.graph.ComputationGraph; @@ -38,7 +39,6 @@ import org.deeplearning4j.BaseDL4JTest; import org.deeplearning4j.nn.modelimport.keras.Hdf5Archive; import org.deeplearning4j.nn.modelimport.keras.KerasModel; import org.deeplearning4j.nn.modelimport.keras.KerasSequentialModel; -import org.deeplearning4j.nn.modelimport.keras.exceptions.InvalidKerasConfigurationException; import org.deeplearning4j.nn.modelimport.keras.utils.KerasModelBuilder; import org.deeplearning4j.nn.modelimport.keras.utils.KerasModelUtils; import org.deeplearning4j.nn.multilayer.MultiLayerNetwork; @@ -286,7 +286,7 @@ public class KerasModelEndToEndTest extends BaseDL4JTest { MultiLayerNetwork net = importEndModelTest(modelPath, inputsOutputPath, true, true, true, true); Layer outLayer = net.getOutputLayer(); assertTrue(outLayer instanceof org.deeplearning4j.nn.layers.LossLayer); - LossLayer llConf = (LossLayer) outLayer.getConfig(); + LossLayer llConf = (LossLayer) outLayer.getTrainingConfig(); assertEquals(new LossSparseMCXENT(), llConf.getLossFn()); } @@ -656,7 +656,7 @@ public class KerasModelEndToEndTest extends BaseDL4JTest { MultiLayerNetwork net = importEndModelTest(modelPath, inputsOutputPath, true, true, true, true, false, null, null); Layer l = net.getLayer(0); - Convolution1DLayer c1d = (Convolution1DLayer) l.getConfig(); + Convolution1DLayer c1d = (Convolution1DLayer) l.getTrainingConfig(); assertEquals(ConvolutionMode.Causal, c1d.getConvolutionMode()); } } @@ -971,7 +971,7 @@ public class KerasModelEndToEndTest extends BaseDL4JTest { if (net.getOutputLayer() instanceof IOutputLayer) { netToTest = net; } else { - org.deeplearning4j.nn.conf.layers.Layer l; + LayerConfiguration l; if (labels.rank() == 2) { l = new LossLayer.Builder() .lossFunction(LossFunctions.LossFunction.MSE) @@ -1000,11 +1000,11 @@ public class KerasModelEndToEndTest extends BaseDL4JTest { for (Layer l : netToTest.getLayers()) { // Remove any dropout manually - until this is fixed: // https://github.com/eclipse/deeplearning4j/issues/4368 - l.conf().getLayer().setIDropout(null); + l.getLayerConfiguration().setIDropout(null); //Also swap out activation functions... this is a bit of a hack, but should make the net gradient checkable... - if (l.conf().getLayer() instanceof FeedForwardLayer) { - FeedForwardLayer ffl = (FeedForwardLayer) l.conf().getLayer(); + if (l.getLayerConfiguration() instanceof FeedForwardLayer) { + FeedForwardLayer ffl = (FeedForwardLayer) l.getLayerConfiguration(); IActivation activation = ffl.getActivationFn(); if (activation instanceof ActivationReLU || activation instanceof ActivationLReLU) { ffl.setActivationFn(new ActivationSoftPlus()); diff --git a/cavis-dnn/cavis-dnn-modelimport/src/test/java/org/deeplearning4j/nn/modelimport/keras/layers/advanced/activation/KerasPReLUTest.java b/cavis-dnn/cavis-dnn-modelimport/src/test/java/org/deeplearning4j/nn/modelimport/keras/layers/advanced/activation/KerasPReLUTest.java index 202e06426..053eb1fab 100644 --- a/cavis-dnn/cavis-dnn-modelimport/src/test/java/org/deeplearning4j/nn/modelimport/keras/layers/advanced/activation/KerasPReLUTest.java +++ b/cavis-dnn/cavis-dnn-modelimport/src/test/java/org/deeplearning4j/nn/modelimport/keras/layers/advanced/activation/KerasPReLUTest.java @@ -79,7 +79,7 @@ public class KerasPReLUTest extends BaseDL4JTest { PReLULayer layer = kerasPReLU.getPReLULayer(); assertArrayEquals(layer.getInputShape(), new long[] {3, 5, 4}); - assertEquals(INIT_DL4J, layer.getWeightInitFn()); + assertEquals(INIT_DL4J, layer.getWeightInit()); assertEquals(layerName, layer.getLayerName()); } diff --git a/cavis-dnn/cavis-dnn-modelimport/src/test/java/org/deeplearning4j/nn/modelimport/keras/layers/convolution/KerasAtrousConvolution1DTest.java b/cavis-dnn/cavis-dnn-modelimport/src/test/java/org/deeplearning4j/nn/modelimport/keras/layers/convolution/KerasAtrousConvolution1DTest.java index f5e25ea9f..10330113c 100644 --- a/cavis-dnn/cavis-dnn-modelimport/src/test/java/org/deeplearning4j/nn/modelimport/keras/layers/convolution/KerasAtrousConvolution1DTest.java +++ b/cavis-dnn/cavis-dnn-modelimport/src/test/java/org/deeplearning4j/nn/modelimport/keras/layers/convolution/KerasAtrousConvolution1DTest.java @@ -100,7 +100,7 @@ public class KerasAtrousConvolution1DTest extends BaseDL4JTest { Convolution1DLayer layer = new KerasAtrousConvolution1D(layerConfig).getAtrousConvolution1D(); assertEquals(ACTIVATION_DL4J, layer.getActivationFn().toString()); assertEquals(LAYER_NAME, layer.getLayerName()); - assertEquals(INIT_DL4J, layer.getWeightInitFn()); + assertEquals(INIT_DL4J, layer.getWeightInit()); assertEquals(L1_REGULARIZATION, KerasTestUtils.getL1(layer), 0.0); assertEquals(L2_REGULARIZATION, KerasTestUtils.getL2(layer), 0.0); assertEquals(new Dropout(DROPOUT_DL4J), layer.getIDropout()); diff --git a/cavis-dnn/cavis-dnn-modelimport/src/test/java/org/deeplearning4j/nn/modelimport/keras/layers/convolution/KerasAtrousConvolution2DTest.java b/cavis-dnn/cavis-dnn-modelimport/src/test/java/org/deeplearning4j/nn/modelimport/keras/layers/convolution/KerasAtrousConvolution2DTest.java index f2eebb8f2..7f1d65b3b 100644 --- a/cavis-dnn/cavis-dnn-modelimport/src/test/java/org/deeplearning4j/nn/modelimport/keras/layers/convolution/KerasAtrousConvolution2DTest.java +++ b/cavis-dnn/cavis-dnn-modelimport/src/test/java/org/deeplearning4j/nn/modelimport/keras/layers/convolution/KerasAtrousConvolution2DTest.java @@ -114,7 +114,7 @@ public class KerasAtrousConvolution2DTest extends BaseDL4JTest { ConvolutionLayer layer = new KerasAtrousConvolution2D(layerConfig).getAtrousConvolution2D(); assertEquals(ACTIVATION_DL4J, layer.getActivationFn().toString()); assertEquals(LAYER_NAME, layer.getLayerName()); - assertEquals(INIT_DL4J, layer.getWeightInitFn()); + assertEquals(INIT_DL4J, layer.getWeightInit()); assertEquals(L1_REGULARIZATION, KerasTestUtils.getL1(layer), 0.0); assertEquals(L2_REGULARIZATION, KerasTestUtils.getL2(layer), 0.0); assertEquals(new Dropout(DROPOUT_DL4J), layer.getIDropout()); diff --git a/cavis-dnn/cavis-dnn-modelimport/src/test/java/org/deeplearning4j/nn/modelimport/keras/layers/convolution/KerasConvolution1DTest.java b/cavis-dnn/cavis-dnn-modelimport/src/test/java/org/deeplearning4j/nn/modelimport/keras/layers/convolution/KerasConvolution1DTest.java index 994d3affe..b8629573f 100644 --- a/cavis-dnn/cavis-dnn-modelimport/src/test/java/org/deeplearning4j/nn/modelimport/keras/layers/convolution/KerasConvolution1DTest.java +++ b/cavis-dnn/cavis-dnn-modelimport/src/test/java/org/deeplearning4j/nn/modelimport/keras/layers/convolution/KerasConvolution1DTest.java @@ -122,7 +122,7 @@ public class KerasConvolution1DTest extends BaseDL4JTest { Convolution1DLayer layer = new KerasConvolution1D(layerConfig).getConvolution1DLayer(); assertEquals(ACTIVATION_DL4J, layer.getActivationFn().toString()); assertEquals(LAYER_NAME, layer.getLayerName()); - assertEquals(INIT_DL4J, layer.getWeightInitFn()); + assertEquals(INIT_DL4J, layer.getWeightInit()); assertEquals(L1_REGULARIZATION, KerasTestUtils.getL1(layer), 0.0); assertEquals(L2_REGULARIZATION, KerasTestUtils.getL2(layer), 0.0); assertEquals(new Dropout(DROPOUT_DL4J), layer.getIDropout()); diff --git a/cavis-dnn/cavis-dnn-modelimport/src/test/java/org/deeplearning4j/nn/modelimport/keras/layers/convolution/KerasConvolution2DTest.java b/cavis-dnn/cavis-dnn-modelimport/src/test/java/org/deeplearning4j/nn/modelimport/keras/layers/convolution/KerasConvolution2DTest.java index b92ab0432..4ba12c10f 100644 --- a/cavis-dnn/cavis-dnn-modelimport/src/test/java/org/deeplearning4j/nn/modelimport/keras/layers/convolution/KerasConvolution2DTest.java +++ b/cavis-dnn/cavis-dnn-modelimport/src/test/java/org/deeplearning4j/nn/modelimport/keras/layers/convolution/KerasConvolution2DTest.java @@ -123,7 +123,7 @@ public class KerasConvolution2DTest extends BaseDL4JTest { ConvolutionLayer layer = new KerasConvolution2D(layerConfig).getConvolution2DLayer(); assertEquals(ACTIVATION_DL4J, layer.getActivationFn().toString()); assertEquals(LAYER_NAME, layer.getLayerName()); - assertEquals(INIT_DL4J, layer.getWeightInitFn()); + assertEquals(INIT_DL4J, layer.getWeightInit()); assertEquals(L1_REGULARIZATION, KerasTestUtils.getL1(layer), 0.0); assertEquals(L2_REGULARIZATION, KerasTestUtils.getL2(layer), 0.0); assertEquals(new Dropout(DROPOUT_DL4J), layer.getIDropout()); diff --git a/cavis-dnn/cavis-dnn-modelimport/src/test/java/org/deeplearning4j/nn/modelimport/keras/layers/convolution/KerasConvolution3DTest.java b/cavis-dnn/cavis-dnn-modelimport/src/test/java/org/deeplearning4j/nn/modelimport/keras/layers/convolution/KerasConvolution3DTest.java index c36b0351d..f52939947 100644 --- a/cavis-dnn/cavis-dnn-modelimport/src/test/java/org/deeplearning4j/nn/modelimport/keras/layers/convolution/KerasConvolution3DTest.java +++ b/cavis-dnn/cavis-dnn-modelimport/src/test/java/org/deeplearning4j/nn/modelimport/keras/layers/convolution/KerasConvolution3DTest.java @@ -119,7 +119,7 @@ public class KerasConvolution3DTest extends BaseDL4JTest { ConvolutionLayer layer = new KerasConvolution3D(layerConfig).getConvolution3DLayer(); assertEquals(ACTIVATION_DL4J, layer.getActivationFn().toString()); assertEquals(LAYER_NAME, layer.getLayerName()); - assertEquals(INIT_DL4J, layer.getWeightInitFn()); + assertEquals(INIT_DL4J, layer.getWeightInit()); assertEquals(L1_REGULARIZATION, KerasTestUtils.getL1(layer), 0.0); assertEquals(L2_REGULARIZATION, KerasTestUtils.getL2(layer), 0.0); assertEquals(new Dropout(DROPOUT_DL4J), layer.getIDropout()); diff --git a/cavis-dnn/cavis-dnn-modelimport/src/test/java/org/deeplearning4j/nn/modelimport/keras/layers/convolution/KerasDeconvolution2DTest.java b/cavis-dnn/cavis-dnn-modelimport/src/test/java/org/deeplearning4j/nn/modelimport/keras/layers/convolution/KerasDeconvolution2DTest.java index c0db1c47b..9fecab86c 100644 --- a/cavis-dnn/cavis-dnn-modelimport/src/test/java/org/deeplearning4j/nn/modelimport/keras/layers/convolution/KerasDeconvolution2DTest.java +++ b/cavis-dnn/cavis-dnn-modelimport/src/test/java/org/deeplearning4j/nn/modelimport/keras/layers/convolution/KerasDeconvolution2DTest.java @@ -123,7 +123,7 @@ public class KerasDeconvolution2DTest extends BaseDL4JTest { Deconvolution2D layer = new KerasDeconvolution2D(layerConfig).getDeconvolution2DLayer(); assertEquals(ACTIVATION_DL4J, layer.getActivationFn().toString()); assertEquals(LAYER_NAME, layer.getLayerName()); - assertEquals(INIT_DL4J, layer.getWeightInitFn()); + assertEquals(INIT_DL4J, layer.getWeightInit()); assertEquals(L1_REGULARIZATION, KerasTestUtils.getL1(layer), 0.0); assertEquals(L2_REGULARIZATION, KerasTestUtils.getL2(layer), 0.0); assertEquals(new Dropout(DROPOUT_DL4J), layer.getIDropout()); diff --git a/cavis-dnn/cavis-dnn-modelimport/src/test/java/org/deeplearning4j/nn/modelimport/keras/layers/convolution/KerasDepthwiseConvolution2DTest.java b/cavis-dnn/cavis-dnn-modelimport/src/test/java/org/deeplearning4j/nn/modelimport/keras/layers/convolution/KerasDepthwiseConvolution2DTest.java index 4dc4856c0..eef103f98 100644 --- a/cavis-dnn/cavis-dnn-modelimport/src/test/java/org/deeplearning4j/nn/modelimport/keras/layers/convolution/KerasDepthwiseConvolution2DTest.java +++ b/cavis-dnn/cavis-dnn-modelimport/src/test/java/org/deeplearning4j/nn/modelimport/keras/layers/convolution/KerasDepthwiseConvolution2DTest.java @@ -128,7 +128,7 @@ public class KerasDepthwiseConvolution2DTest extends BaseDL4JTest { DepthwiseConvolution2D layer = kerasLayer.getDepthwiseConvolution2DLayer(); assertEquals(ACTIVATION_DL4J, layer.getActivationFn().toString()); assertEquals(LAYER_NAME, layer.getLayerName()); - assertEquals(INIT_DL4J, layer.getWeightInitFn()); + assertEquals(INIT_DL4J, layer.getWeightInit()); assertEquals(DEPTH_MULTIPLIER, layer.getDepthMultiplier()); assertEquals(L1_REGULARIZATION, KerasTestUtils.getL1(layer), 0.0); assertEquals(L2_REGULARIZATION, KerasTestUtils.getL2(layer), 0.0); diff --git a/cavis-dnn/cavis-dnn-modelimport/src/test/java/org/deeplearning4j/nn/modelimport/keras/layers/convolution/KerasSeparableConvolution2DTest.java b/cavis-dnn/cavis-dnn-modelimport/src/test/java/org/deeplearning4j/nn/modelimport/keras/layers/convolution/KerasSeparableConvolution2DTest.java index 54f50a478..9745ff5ed 100644 --- a/cavis-dnn/cavis-dnn-modelimport/src/test/java/org/deeplearning4j/nn/modelimport/keras/layers/convolution/KerasSeparableConvolution2DTest.java +++ b/cavis-dnn/cavis-dnn-modelimport/src/test/java/org/deeplearning4j/nn/modelimport/keras/layers/convolution/KerasSeparableConvolution2DTest.java @@ -130,7 +130,7 @@ public class KerasSeparableConvolution2DTest extends BaseDL4JTest { SeparableConvolution2D layer = new KerasSeparableConvolution2D(layerConfig).getSeparableConvolution2DLayer(); assertEquals(ACTIVATION_DL4J, layer.getActivationFn().toString()); assertEquals(LAYER_NAME, layer.getLayerName()); - assertEquals(INIT_DL4J, layer.getWeightInitFn()); + assertEquals(INIT_DL4J, layer.getWeightInit()); assertEquals(L1_REGULARIZATION, KerasTestUtils.getL1(layer), 0.0); assertEquals(L2_REGULARIZATION, KerasTestUtils.getL2(layer), 0.0); assertEquals(DEPTH_MULTIPLIER, layer.getDepthMultiplier()); diff --git a/cavis-dnn/cavis-dnn-modelimport/src/test/java/org/deeplearning4j/nn/modelimport/keras/layers/core/KerasDenseTest.java b/cavis-dnn/cavis-dnn-modelimport/src/test/java/org/deeplearning4j/nn/modelimport/keras/layers/core/KerasDenseTest.java index c9c70e5ff..637ce5915 100644 --- a/cavis-dnn/cavis-dnn-modelimport/src/test/java/org/deeplearning4j/nn/modelimport/keras/layers/core/KerasDenseTest.java +++ b/cavis-dnn/cavis-dnn-modelimport/src/test/java/org/deeplearning4j/nn/modelimport/keras/layers/core/KerasDenseTest.java @@ -89,7 +89,7 @@ public class KerasDenseTest extends BaseDL4JTest { DenseLayer layer = new KerasDense(layerConfig, false).getDenseLayer(); assertEquals(ACTIVATION_DL4J, layer.getActivationFn().toString()); assertEquals(LAYER_NAME, layer.getLayerName()); - assertEquals(INIT_DL4J, layer.getWeightInitFn()); + assertEquals(INIT_DL4J, layer.getWeightInit()); assertEquals(L1_REGULARIZATION, KerasTestUtils.getL1(layer), 0.0); assertEquals(L2_REGULARIZATION, KerasTestUtils.getL2(layer), 0.0); assertEquals(new Dropout(DROPOUT_DL4J), layer.getIDropout()); diff --git a/cavis-dnn/cavis-dnn-modelimport/src/test/java/org/deeplearning4j/nn/modelimport/keras/layers/recurrent/KerasLSTMTest.java b/cavis-dnn/cavis-dnn-modelimport/src/test/java/org/deeplearning4j/nn/modelimport/keras/layers/recurrent/KerasLSTMTest.java index 7ce6bf0b3..1bfc3a4ce 100644 --- a/cavis-dnn/cavis-dnn-modelimport/src/test/java/org/deeplearning4j/nn/modelimport/keras/layers/recurrent/KerasLSTMTest.java +++ b/cavis-dnn/cavis-dnn-modelimport/src/test/java/org/deeplearning4j/nn/modelimport/keras/layers/recurrent/KerasLSTMTest.java @@ -38,7 +38,6 @@ import org.deeplearning4j.nn.weights.WeightInitXavier; import org.junit.jupiter.api.Assertions; import org.junit.jupiter.api.Test; -import java.util.Arrays; import java.util.Collections; import java.util.HashMap; import java.util.Map; @@ -131,7 +130,7 @@ public class KerasLSTMTest extends BaseDL4JTest { } assertEquals(ACTIVATION_DL4J, layer.getActivationFn().toString()); assertEquals(LAYER_NAME, layer.getLayerName()); - assertEquals(INIT_DL4J, layer.getWeightInitFn()); + assertEquals(INIT_DL4J, layer.getWeightInit()); assertEquals(L1_REGULARIZATION, KerasTestUtils.getL1(layer), 0.0); assertEquals(L2_REGULARIZATION, KerasTestUtils.getL2(layer), 0.0); assertEquals(new Dropout(DROPOUT_DL4J), layer.getIDropout()); diff --git a/cavis-dnn/cavis-dnn-modelimport/src/test/java/org/deeplearning4j/nn/modelimport/keras/layers/recurrent/KerasSimpleRnnTest.java b/cavis-dnn/cavis-dnn-modelimport/src/test/java/org/deeplearning4j/nn/modelimport/keras/layers/recurrent/KerasSimpleRnnTest.java index c8e8287fb..1b143a706 100644 --- a/cavis-dnn/cavis-dnn-modelimport/src/test/java/org/deeplearning4j/nn/modelimport/keras/layers/recurrent/KerasSimpleRnnTest.java +++ b/cavis-dnn/cavis-dnn-modelimport/src/test/java/org/deeplearning4j/nn/modelimport/keras/layers/recurrent/KerasSimpleRnnTest.java @@ -101,7 +101,7 @@ public class KerasSimpleRnnTest extends BaseDL4JTest { (SimpleRnn) ((LastTimeStep) new KerasSimpleRnn(layerConfig).getSimpleRnnLayer()).getUnderlying(); assertEquals(ACTIVATION, layer.getActivationFn().toString()); assertEquals(LAYER_NAME, layer.getLayerName()); - assertEquals(INIT_DL4J, layer.getWeightInitFn()); + assertEquals(INIT_DL4J, layer.getWeightInit()); assertEquals(L1_REGULARIZATION, KerasTestUtils.getL1(layer), 0.0); assertEquals(L2_REGULARIZATION, KerasTestUtils.getL2(layer), 0.0); assertEquals(new Dropout(DROPOUT_DL4J), layer.getIDropout()); diff --git a/cavis-dnn/cavis-dnn-nlp/src/test/java/org/deeplearning4j/models/word2vec/Word2VecTestsSmall.java b/cavis-dnn/cavis-dnn-nlp/src/test/java/org/deeplearning4j/models/word2vec/Word2VecTestsSmall.java index 19681185c..bd89639b3 100644 --- a/cavis-dnn/cavis-dnn-nlp/src/test/java/org/deeplearning4j/models/word2vec/Word2VecTestsSmall.java +++ b/cavis-dnn/cavis-dnn-nlp/src/test/java/org/deeplearning4j/models/word2vec/Word2VecTestsSmall.java @@ -24,7 +24,6 @@ import lombok.extern.slf4j.Slf4j; import lombok.val; import org.deeplearning4j.BaseDL4JTest; import org.deeplearning4j.models.paragraphvectors.ParagraphVectorsTest; -import org.deeplearning4j.nn.conf.MultiLayerConfiguration; import org.deeplearning4j.nn.conf.NeuralNetConfiguration; import org.deeplearning4j.nn.conf.layers.DenseLayer; import org.deeplearning4j.nn.conf.layers.EmbeddingLayer; @@ -53,7 +52,6 @@ import java.io.ByteArrayInputStream; import java.io.ByteArrayOutputStream; import java.io.File; import java.util.Collection; -import java.util.concurrent.Callable; import static org.junit.jupiter.api.Assertions.assertEquals; import static org.junit.jupiter.api.Assertions.assertTrue; @@ -189,7 +187,7 @@ public class Word2VecTestsSmall extends BaseDL4JTest { INDArray w = vec.lookupTable().getWeights(); System.out.println(w); - MultiLayerConfiguration conf = new NeuralNetConfiguration.Builder() + NeuralNetConfiguration conf = NeuralNetConfiguration.builder() .seed(12345).list() .layer(new EmbeddingLayer.Builder().weightInit(vec).build()) .layer(new DenseLayer.Builder().activation(Activation.TANH).nIn(w.size(1)).nOut(3).build()) @@ -210,7 +208,7 @@ public class Word2VecTestsSmall extends BaseDL4JTest { ByteArrayInputStream bais = new ByteArrayInputStream(bytes); final MultiLayerNetwork restored = ModelSerializer.restoreMultiLayerNetwork(bais, true); - assertEquals(net.getLayerWiseConfigurations(), restored.getLayerWiseConfigurations()); - assertTrue(net.params().equalsWithEps(restored.params(), 2e-3)); + assertEquals(net.getNetConfiguration(), restored.getNetConfiguration()); + assertTrue(net.getModelParams().equalsWithEps(restored.getModelParams(), 2e-3)); } } diff --git a/cavis-dnn/cavis-dnn-nn-api/build.gradle b/cavis-dnn/cavis-dnn-nn-api/build.gradle new file mode 100644 index 000000000..e41b96b8d --- /dev/null +++ b/cavis-dnn/cavis-dnn-nn-api/build.gradle @@ -0,0 +1,27 @@ +/* + * + * ****************************************************************************** + * * + * * This program and the accompanying materials are made available under the + * * terms of the Apache License, Version 2.0 which is available at + * * https://www.apache.org/licenses/LICENSE-2.0. + * * + * * See the NOTICE file distributed with this work for additional + * * information regarding copyright ownership. + * * Unless required by applicable law or agreed to in writing, software + * * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * * License for the specific language governing permissions and limitations + * * under the License. + * * + * * SPDX-License-Identifier: Apache-2.0 + * ***************************************************************************** + * + */ +//apply from: "${project.rootProject.projectDir}/createTestBackends.gradle" + +dependencies { + implementation platform(projects.cavisCommonPlatform) + implementation projects.cavisDnn.cavisDnnApi + implementation projects.cavisDnn.cavisDnnNn +} \ No newline at end of file diff --git a/cavis-dnn/cavis-dnn-nn-api/src/main/java/net/brutex/ai/dnn/api/Layer.java b/cavis-dnn/cavis-dnn-nn-api/src/main/java/net/brutex/ai/dnn/api/Layer.java new file mode 100644 index 000000000..c4c81f8ad --- /dev/null +++ b/cavis-dnn/cavis-dnn-nn-api/src/main/java/net/brutex/ai/dnn/api/Layer.java @@ -0,0 +1,40 @@ +/* + * + * ****************************************************************************** + * * + * * This program and the accompanying materials are made available under the + * * terms of the Apache License, Version 2.0 which is available at + * * https://www.apache.org/licenses/LICENSE-2.0. + * * + * * See the NOTICE file distributed with this work for additional + * * information regarding copyright ownership. + * * Unless required by applicable law or agreed to in writing, software + * * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * * License for the specific language governing permissions and limitations + * * under the License. + * * + * * SPDX-License-Identifier: Apache-2.0 + * ***************************************************************************** + * + */ + +package net.brutex.ai.dnn.api; + +/** + * This is an "executable" Layer, that is based on a {@link LayerConfiguration} + */ +public interface Layer { + + /** + * Get the underlying configuration for this Layer + * @return configuration + */ + LayerConfiguration getLayerConfiguration(); + + /** + * Set the underlying layer configuration + * @param conf The new configuration + */ + void setLayerConfiguration(LayerConfiguration conf); +} diff --git a/cavis-dnn/cavis-dnn-nn-api/src/main/java/net/brutex/ai/dnn/api/LayerConfiguration.java b/cavis-dnn/cavis-dnn-nn-api/src/main/java/net/brutex/ai/dnn/api/LayerConfiguration.java new file mode 100644 index 000000000..6b395a5b2 --- /dev/null +++ b/cavis-dnn/cavis-dnn-nn-api/src/main/java/net/brutex/ai/dnn/api/LayerConfiguration.java @@ -0,0 +1,51 @@ +/* + * + * ****************************************************************************** + * * + * * This program and the accompanying materials are made available under the + * * terms of the Apache License, Version 2.0 which is available at + * * https://www.apache.org/licenses/LICENSE-2.0. + * * + * * See the NOTICE file distributed with this work for additional + * * information regarding copyright ownership. + * * Unless required by applicable law or agreed to in writing, software + * * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * * License for the specific language governing permissions and limitations + * * under the License. + * * + * * SPDX-License-Identifier: Apache-2.0 + * ***************************************************************************** + * + */ + +package net.brutex.ai.dnn.api; + +public interface LayerConfiguration { + + /** + * Create and return an instance of a LayerConfiguration. + * + * @param network the "holding" network for the instance + * @return the new layer instance + */ + Layer instantiate(NeuralNetwork network); + + + /** + * Defines the valid input type for this Layer + * + * @return InputType + */ + org.deeplearning4j.nn.conf.inputs.InputType.Type getInputType(); + + + /** + * Defines the valid input type for this Layer + * + * @return InputType + */ + org.deeplearning4j.nn.conf.inputs.InputType.Type getOutputType(); + + +} diff --git a/cavis-dnn/cavis-dnn-nn-api/src/main/java/net/brutex/ai/dnn/api/NeuralNetwork.java b/cavis-dnn/cavis-dnn-nn-api/src/main/java/net/brutex/ai/dnn/api/NeuralNetwork.java new file mode 100644 index 000000000..93ef1263d --- /dev/null +++ b/cavis-dnn/cavis-dnn-nn-api/src/main/java/net/brutex/ai/dnn/api/NeuralNetwork.java @@ -0,0 +1,69 @@ +/* + * + * ****************************************************************************** + * * + * * This program and the accompanying materials are made available under the + * * terms of the Apache License, Version 2.0 which is available at + * * https://www.apache.org/licenses/LICENSE-2.0. + * * + * * See the NOTICE file distributed with this work for additional + * * information regarding copyright ownership. + * * Unless required by applicable law or agreed to in writing, software + * * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * * License for the specific language governing permissions and limitations + * * under the License. + * * + * * SPDX-License-Identifier: Apache-2.0 + * ***************************************************************************** + * + */ + +package net.brutex.ai.dnn.api; + +import org.nd4j.linalg.dataset.api.DataSet; +import org.nd4j.linalg.dataset.api.MultiDataSet; + +/** + * A Neural Network is an instance of a {@link NeuralNetworkConfiguration}, that can be trained, + * evaluated, saved, exported, etc. Its configuration state is defined with the + * {@link #setConfiguration(NeuralNetworkConfiguration)} and {@link #getConfiguration()} methods. + * + */ +public interface NeuralNetwork { + + /** + * The configuration that defines this Neural Network + * + * @param conf the configuration to use for this network + */ + void setConfiguration(NeuralNetworkConfiguration conf); + NeuralNetworkConfiguration getConfiguration(); + + /** + * This method fits model with a given DataSet + * + * @param dataSet the dataset to use for training + */ + void fit(DataSet dataSet); + + /** + * This method fits model with a given MultiDataSet + * + * @param dataSet the multi dataset to use for training + */ + void fit(MultiDataSet dataSet); + + /** + * The name of the Neural Network + * @return the name + */ + String getName(); + + /** + * Set the name for this Neural Network + * @param name the name + */ + void setName(String name); + +} diff --git a/cavis-dnn/cavis-dnn-nn-api/src/main/java/net/brutex/ai/dnn/api/NeuralNetworkConfiguration.java b/cavis-dnn/cavis-dnn-nn-api/src/main/java/net/brutex/ai/dnn/api/NeuralNetworkConfiguration.java new file mode 100644 index 000000000..f29dd4916 --- /dev/null +++ b/cavis-dnn/cavis-dnn-nn-api/src/main/java/net/brutex/ai/dnn/api/NeuralNetworkConfiguration.java @@ -0,0 +1,43 @@ +/* + * + * ****************************************************************************** + * * + * * This program and the accompanying materials are made available under the + * * terms of the Apache License, Version 2.0 which is available at + * * https://www.apache.org/licenses/LICENSE-2.0. + * * + * * See the NOTICE file distributed with this work for additional + * * information regarding copyright ownership. + * * Unless required by applicable law or agreed to in writing, software + * * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * * License for the specific language governing permissions and limitations + * * under the License. + * * + * * SPDX-License-Identifier: Apache-2.0 + * ***************************************************************************** + * + */ + +package net.brutex.ai.dnn.api; + +import java.util.List; + +public interface NeuralNetworkConfiguration { + + /** + * Provides a flat list of all embedded layer configurations, this + * can only be called after the layer is initialized or {@link #getLayerConfigurations()} is + * called. + * + * @return unstacked layer configurations + */ + List getLayerConfigurations(); + + + /** + * This uncollables any stacked layer configurations within building blocks like + * @link BuildingBlockLayer} + */ + void calculateInnerLayerConfigurations(); +} diff --git a/cavis-dnn/cavis-dnn-nn/build.gradle b/cavis-dnn/cavis-dnn-nn/build.gradle index e38b43f1d..59ff712ab 100644 --- a/cavis-dnn/cavis-dnn-nn/build.gradle +++ b/cavis-dnn/cavis-dnn-nn/build.gradle @@ -22,7 +22,7 @@ apply from: "${project.rootProject.projectDir}/createTestBackends.gradle" dependencies { implementation platform(projects.cavisCommonPlatform) - +// implementation projects.cavisDnn.cavisDnnNnApi implementation projects.cavisDnn.cavisDnnData.cavisDnnDataUtilityIterators implementation 'org.lucee:oswego-concurrent:1.3.4' implementation projects.cavisDnn.cavisDnnCommon @@ -57,4 +57,5 @@ dependencies { // define any required OkHttp artifacts without version implementation "com.squareup.okhttp3:okhttp" implementation "com.squareup.okhttp3:logging-interceptor" -} \ No newline at end of file +} + diff --git a/cavis-dnn/cavis-dnn-nn/src/main/java/net/brutex/ai/dnn/api/Animal.java b/cavis-dnn/cavis-dnn-nn/src/main/java/net/brutex/ai/dnn/api/Animal.java new file mode 100644 index 000000000..2b5ac714c --- /dev/null +++ b/cavis-dnn/cavis-dnn-nn/src/main/java/net/brutex/ai/dnn/api/Animal.java @@ -0,0 +1,68 @@ +/* + * + * ****************************************************************************** + * * + * * This program and the accompanying materials are made available under the + * * terms of the Apache License, Version 2.0 which is available at + * * https://www.apache.org/licenses/LICENSE-2.0. + * * + * * See the NOTICE file distributed with this work for additional + * * information regarding copyright ownership. + * * Unless required by applicable law or agreed to in writing, software + * * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * * License for the specific language governing permissions and limitations + * * under the License. + * * + * * SPDX-License-Identifier: Apache-2.0 + * ***************************************************************************** + * + */ + +package net.brutex.ai.dnn.api; + +public class Animal { + + private String animalString; + + protected Animal(AnimalBuilder b) { + this.animalString = b.animalString; + } + + public static AnimalBuilder builder() { + return new AnimalBuilderImpl(); + } + + public static abstract class AnimalBuilder> { + + private String animalString; + + public B animalString(String animalString) { + this.animalString = animalString; + return self(); + } + + protected abstract B self(); + + public abstract C build(); + + public String toString() { + return "Animal.AnimalBuilder(animalString=" + this.animalString + ")"; + } + } + + private static final class AnimalBuilderImpl extends + AnimalBuilder { + + private AnimalBuilderImpl() { + } + + protected AnimalBuilderImpl self() { + return this; + } + + public Animal build() { + return new Animal(this); + } + } +} diff --git a/cavis-dnn/cavis-dnn-nn/src/main/java/net/brutex/ai/dnn/api/IActivationFunction.java b/cavis-dnn/cavis-dnn-nn/src/main/java/net/brutex/ai/dnn/api/IActivationFunction.java new file mode 100644 index 000000000..18794b8fe --- /dev/null +++ b/cavis-dnn/cavis-dnn-nn/src/main/java/net/brutex/ai/dnn/api/IActivationFunction.java @@ -0,0 +1,57 @@ +/* + * + * ****************************************************************************** + * * + * * This program and the accompanying materials are made available under the + * * terms of the Apache License, Version 2.0 which is available at + * * https://www.apache.org/licenses/LICENSE-2.0. + * * + * * See the NOTICE file distributed with this work for additional + * * information regarding copyright ownership. + * * Unless required by applicable law or agreed to in writing, software + * * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * * License for the specific language governing permissions and limitations + * * under the License. + * * + * * SPDX-License-Identifier: Apache-2.0 + * ***************************************************************************** + * + */ + +package net.brutex.ai.dnn.api; + +/** + * Activation Function An activation function takes in weighted data (matrix multiplication between + * input data and weights) and outputs a non-linear transformation of the data. For example, output + * = max(0,weighted_data) is the rectified linear activation function (essentially set all negative + * values to zero). The difference between units and activation functions is that units can be more + * complex, that is, a unit can have multiple activation functions (for example LSTM units) or a + * slightly more complex structure (for example maxout units). + *

+ * The difference between linear and non-linear activation functions can be shown with the + * relationship of some weighted values: Imagine the four points A1, A2, B1 and B2. The pairs A1 / + * A2, and B1 / B2 lie close to each other, but A1 is distant from B1 and B2, and vice versa; the + * same for A2. + *

+ * With a linear transformation the relationship between pairs might change. For example A1 and A2 + * might be far apart, but this implies that B1 and B2 are also far apart. The distance between the + * pairs might shrink, but if it does, then both B1 and B2 will be close to A1 and A2 at the same + * time. We can apply many linear transformations, but the relationship between A1 / A2 and B1 / B2 + * will always be similar. + *

+ * In contrast, with a non-linear activation function we can increase the distance between A1 and A2 + * while we decrease the distance between B1 and B2. We can make B1 close to A1, but B2 distant from + * A1. By applying non-linear functions, we create new relationships between the points. With every + * new non-linear transformation we can increase the complexity of the relationships. In deep + * learning, using non-linear activation functions creates increasingly complex features with every + * layer. + *

+ * In contrast, the features of 1000 layers of pure linear transformations can be reproduced by a + * single layer (because a chain of matrix multiplication can always be represented by a single + * matrix multiplication). This is why non-linear activation functions are so important in deep + * learning. + */ +public interface IActivationFunction { + +} diff --git a/cavis-dnn/cavis-dnn-nn/src/main/java/net/brutex/ai/dnn/api/ILayer.java b/cavis-dnn/cavis-dnn-nn/src/main/java/net/brutex/ai/dnn/api/ILayer.java new file mode 100644 index 000000000..a43b94265 --- /dev/null +++ b/cavis-dnn/cavis-dnn-nn/src/main/java/net/brutex/ai/dnn/api/ILayer.java @@ -0,0 +1,46 @@ +/* + * + * ****************************************************************************** + * * + * * This program and the accompanying materials are made available under the + * * terms of the Apache License, Version 2.0 which is available at + * * https://www.apache.org/licenses/LICENSE-2.0. + * * + * * See the NOTICE file distributed with this work for additional + * * information regarding copyright ownership. + * * Unless required by applicable law or agreed to in writing, software + * * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * * License for the specific language governing permissions and limitations + * * under the License. + * * + * * SPDX-License-Identifier: Apache-2.0 + * ***************************************************************************** + * + */ + +package net.brutex.ai.dnn.api; + +/** + * This is an "executable" ILayer, that is based on a {@link ILayerConfiguration} + */ +public interface ILayer { + + /** + * Get the underlying configuration for this ILayer + * @return configuration + */ + ILayerConfiguration getLayerConfiguration(); + + /** + * Set the underlying layer configuration + * @param conf The new configuration + */ + void setLayerConfiguration(ILayerConfiguration conf); + + /** + * An implementation should provide a method to validate the network + * @return true if no errors found; false otherwise + */ + boolean isValid(); +} diff --git a/cavis-dnn/cavis-dnn-nn/src/main/java/net/brutex/ai/dnn/api/ILayerConfiguration.java b/cavis-dnn/cavis-dnn-nn/src/main/java/net/brutex/ai/dnn/api/ILayerConfiguration.java new file mode 100644 index 000000000..1462661bb --- /dev/null +++ b/cavis-dnn/cavis-dnn-nn/src/main/java/net/brutex/ai/dnn/api/ILayerConfiguration.java @@ -0,0 +1,27 @@ +/* + * + * ****************************************************************************** + * * + * * This program and the accompanying materials are made available under the + * * terms of the Apache License, Version 2.0 which is available at + * * https://www.apache.org/licenses/LICENSE-2.0. + * * + * * See the NOTICE file distributed with this work for additional + * * information regarding copyright ownership. + * * Unless required by applicable law or agreed to in writing, software + * * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * * License for the specific language governing permissions and limitations + * * under the License. + * * + * * SPDX-License-Identifier: Apache-2.0 + * ***************************************************************************** + * + */ + +package net.brutex.ai.dnn.api; + +public interface ILayerConfiguration { + + +} diff --git a/cavis-dnn/cavis-dnn-nn/src/main/java/net/brutex/ai/dnn/api/IModel.java b/cavis-dnn/cavis-dnn-nn/src/main/java/net/brutex/ai/dnn/api/IModel.java new file mode 100644 index 000000000..3f84a7004 --- /dev/null +++ b/cavis-dnn/cavis-dnn-nn/src/main/java/net/brutex/ai/dnn/api/IModel.java @@ -0,0 +1,301 @@ +/* + * + * ****************************************************************************** + * * + * * This program and the accompanying materials are made available under the + * * terms of the Apache License, Version 2.0 which is available at + * * https://www.apache.org/licenses/LICENSE-2.0. + * * + * * See the NOTICE file distributed with this work for additional + * * information regarding copyright ownership. + * * Unless required by applicable law or agreed to in writing, software + * * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * * License for the specific language governing permissions and limitations + * * under the License. + * * + * * SPDX-License-Identifier: Apache-2.0 + * ***************************************************************************** + * + */ + +package net.brutex.ai.dnn.api; + +import java.util.Collection; +import java.util.Map; +import lombok.NonNull; +import org.deeplearning4j.nn.api.ITrainableLayer; +import org.deeplearning4j.nn.conf.NeuralNetConfiguration; +import org.deeplearning4j.nn.gradient.Gradient; +import org.deeplearning4j.nn.workspace.LayerWorkspaceMgr; +import org.deeplearning4j.optimize.api.ConvexOptimizer; +import org.deeplearning4j.optimize.api.TrainingListener; +import org.nd4j.common.primitives.Pair; +import org.nd4j.evaluation.IEvaluation; +import org.nd4j.linalg.api.ndarray.INDArray; +import org.nd4j.linalg.dataset.api.DataSet; +import org.nd4j.linalg.dataset.api.MultiDataSet; +import org.nd4j.linalg.dataset.api.iterator.DataSetIterator; +import org.nd4j.linalg.dataset.api.iterator.MultiDataSetIterator; + +/** + * A Neural Network is an instance of a {@link INeuralNetworkConfiguration}, that can be trained, + * evaluated, saved, exported, etc. Its configuration state is defined with the + * {@link #setNetConfiguration(NeuralNetConfiguration)} (INeuralNetworkConfiguration)} and + * {@link #getNetConfiguration()} methods. + **/ + +public interface IModel extends ITrainableLayer { + + /** + * The full param table for the model. Each layer may get a subset of its parameters. + * + * @return full table of parameters + */ + Map getParamTable(); + + Map getParamTable(boolean backpropOnly); + void setParamTable(Map paramTable); + + /** + * This method returns updater state (if applicable), null otherwise + * + * @return + */ + INDArray updaterState(); + + /** + * This method returns Optimizer used for training + * + * @return + */ + ConvexOptimizer getOptimizer(); + + /** + * This method fits model with a given DataSet + * + * @param dataSet + */ + void fit(DataSet dataSet); + + /** + * This method fits model with a given MultiDataSet + * + * @param dataSet + */ + void fit(MultiDataSet dataSet); + + /** + * This method fits model with a given DataSetIterator + * + * @param iterator + */ + void fit(DataSetIterator iterator); + + /** + * This method fits model with a given MultiDataSetIterator + * + * @param iterator + */ + void fit(MultiDataSetIterator iterator); + + /** + * This method executes evaluation of the model against given iterator and evaluation + * implementations + * + * @param iterator + */ + T[] doEvaluation(DataSetIterator iterator, T... evaluations); + + /** + * This method executes evaluation of the model against given iterator and evaluation + * implementations + * + * @param iterator + */ + T[] doEvaluation(MultiDataSetIterator iterator, T... evaluations); + + /** + * Get the configuration of this model. + * + * @return the neural net configuration + */ + NeuralNetConfiguration getNetConfiguration(); + + void setNetConfiguration(@NonNull NeuralNetConfiguration netConfiguration); + + /** + * Init the model + */ + void init(); + + /** + * Get the number of parameters in this model + * + * @return number of parameters + */ + long numParams(); + + /** + * All models have a fit method + */ + @Deprecated + void fit(); + + /** + * Update layer weights and biases with gradient change + */ + void update(Gradient gradient); + + /** + * Perform one update applying the gradient + * + * @param gradient the gradient to apply + */ + void update(INDArray gradient, String paramType); + + + /** + * The score for the model. No calculation occurs, this simply returns the score calculated before + * by the {@link #computeGradientAndScore(LayerWorkspaceMgr)} method. + * + * @return the score for the model + */ + double getScore(); + + + /** + * Update the score + */ + void computeGradientAndScore(LayerWorkspaceMgr workspaceMgr); + + /** + * Parameters of the model (if any) + * + * @return the parameters of the model + */ + INDArray getModelParams(); + + + /** + * the number of parameters for the model + * + * @return the number of parameters for the model + */ + long numParams(boolean backwards); + + /** + * Set the parameters for this model. This expects a linear ndarray which then be unpacked + * internally relative to the expected ordering of the model + * + * @param params the parameters for the model + */ + void setParams(INDArray params); + + /** + * Set the initial parameters array as a view of the full (backprop) network parameters NOTE: this + * is intended to be used internally in MultiLayerNetwork and ComputationGraph, not by users. + * + * @param params a 1 x nParams row vector that is a view of the larger (MLN/CG) parameters array + */ + void setParamsViewArray(INDArray params); + + + INDArray getGradientsViewArray(); + + /** + * Set the gradients array as a view of the full (backprop) network parameters NOTE: this is + * intended to be used internally in MultiLayerNetwork and ComputationGraph, not by users. + * + * @param gradients a 1 x nParams row vector that is a view of the larger (MLN/CG) gradients + * array + */ + void setBackpropGradientsViewArray(INDArray gradients); + + /** + * Fit the model to the given data + * + * @param data the data to fit the model to + */ + void fit(INDArray data, LayerWorkspaceMgr workspaceMgr); + + + /** + * Get the gradient. Note that this method will not calculate the gradient, it will rather return + * the gradient that has been computed before. For calculating the gradient, see + * {@link IModel#computeGradientAndScore(LayerWorkspaceMgr)} } . + * + * @return the gradient for this model, as calculated before + */ + Gradient gradient(); + + /** + * Get the gradient and score + * + * @return the gradient and score + */ + Pair gradientAndScore(); + + /** + * The current inputs batch size + * + * @return the current inputs batch size + */ + int batchSize(); + + /** + * The input/feature matrix for the model + * + * @return the input/feature matrix for the model + */ + INDArray input(); + + /** + * Get a parameter array for a given parameter type key + * + * @param param the key of the parameter + * @return ndarray of parameters + */ + INDArray getParam(String param); + + + /** + * Set the parameters for a given parameter type. + * + * @param key the param type key to set + * @param val the new parameters ndarray + */ + void setParam(String key, INDArray val); + + /** + * Clear input + */ + void clear(); + + + /** + * Apply any constraints to the model + */ + void applyConstraints(int iteration, int epoch); + + + void close(); + + /** + * Get the TrainingListeners + * + * @return training listener + */ + Collection getTrainingListeners(); + + /** + * Replace the TrainingListeners for this model + * + * @param listeners new listeners + */ + void addTrainingListeners(TrainingListener... listeners); + + void addTrainingListeners(Collection listeners); + + +} diff --git a/cavis-dnn/cavis-dnn-nn/src/main/java/net/brutex/ai/dnn/api/INeuralNetworkConfiguration.java b/cavis-dnn/cavis-dnn-nn/src/main/java/net/brutex/ai/dnn/api/INeuralNetworkConfiguration.java new file mode 100644 index 000000000..3c679267c --- /dev/null +++ b/cavis-dnn/cavis-dnn-nn/src/main/java/net/brutex/ai/dnn/api/INeuralNetworkConfiguration.java @@ -0,0 +1,39 @@ +/* + * + * ****************************************************************************** + * * + * * This program and the accompanying materials are made available under the + * * terms of the Apache License, Version 2.0 which is available at + * * https://www.apache.org/licenses/LICENSE-2.0. + * * + * * See the NOTICE file distributed with this work for additional + * * information regarding copyright ownership. + * * Unless required by applicable law or agreed to in writing, software + * * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * * License for the specific language governing permissions and limitations + * * under the License. + * * + * * SPDX-License-Identifier: Apache-2.0 + * ***************************************************************************** + * + */ + +package net.brutex.ai.dnn.api; + +import java.io.Serializable; +import java.util.List; +import org.deeplearning4j.nn.conf.NeuralNetConfiguration; + +public interface INeuralNetworkConfiguration extends Serializable, Cloneable { + + INeuralNetworkConfiguration clone(); + + void init(); + + /** + * The model (if initiated) + * @return + */ + IModel getNet(); + } \ No newline at end of file diff --git a/cavis-dnn/cavis-dnn-nn/src/main/java/net/brutex/ai/dnn/api/IUnit.java b/cavis-dnn/cavis-dnn-nn/src/main/java/net/brutex/ai/dnn/api/IUnit.java new file mode 100644 index 000000000..dd9643c6b --- /dev/null +++ b/cavis-dnn/cavis-dnn-nn/src/main/java/net/brutex/ai/dnn/api/IUnit.java @@ -0,0 +1,47 @@ +/* + * + * ****************************************************************************** + * * + * * This program and the accompanying materials are made available under the + * * terms of the Apache License, Version 2.0 which is available at + * * https://www.apache.org/licenses/LICENSE-2.0. + * * + * * See the NOTICE file distributed with this work for additional + * * information regarding copyright ownership. + * * Unless required by applicable law or agreed to in writing, software + * * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * * License for the specific language governing permissions and limitations + * * under the License. + * * + * * SPDX-License-Identifier: Apache-2.0 + * ***************************************************************************** + * + */ + +package net.brutex.ai.dnn.api; + +/** + * Unit A unit often refers to the activation function in a layer by which the inputs are + * transformed via a nonlinear activation function (for example by the logistic sigmoid function). + * Usually, a unit has several incoming connections and several outgoing connections. However, units + * can also be more complex, like long short-term memory (LSTM) units, which have multiple + * activation functions with a distinct layout of connections to the nonlinear activation functions, + * or maxout units, which compute the final output over an array of nonlinearly transformed input + * values. Pooling, convolution, and other input transforming functions are usually not referred to + * as units. + *

+ * Artificial Neuron The term artificial neuron—or most often just neuron—is an equivalent term to + * unit, but implies a close connection to neurobiology and the human brain while deep learning has + * very little to do with the brain (for example, it is now thought that biological neurons are more + * similar to entire multilayer perceptrons rather than a single unit in a neural network). The term + * neuron was encouraged after the last AI winter to differentiate the more successful neural + * network from the failing and abandoned perceptron. However, since the wild successes of deep + * learning after 2012, the media often picked up on the term “neuron” and sought to explain deep + * learning as mimicry of the human brain, which is very misleading and potentially dangerous for + * the perception of the field of deep learning. Now the term neuron is discouraged and the more + * descriptive term unit should be used instead. + */ +public interface IUnit { + +} diff --git a/cavis-dnn/cavis-dnn-nn/src/main/java/net/brutex/ai/dnn/api/LayerType.java b/cavis-dnn/cavis-dnn-nn/src/main/java/net/brutex/ai/dnn/api/LayerType.java new file mode 100644 index 000000000..ba432d132 --- /dev/null +++ b/cavis-dnn/cavis-dnn-nn/src/main/java/net/brutex/ai/dnn/api/LayerType.java @@ -0,0 +1,52 @@ +/* + * + * ****************************************************************************** + * * + * * This program and the accompanying materials are made available under the + * * terms of the Apache License, Version 2.0 which is available at + * * https://www.apache.org/licenses/LICENSE-2.0. + * * + * * See the NOTICE file distributed with this work for additional + * * information regarding copyright ownership. + * * Unless required by applicable law or agreed to in writing, software + * * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * * License for the specific language governing permissions and limitations + * * under the License. + * * + * * SPDX-License-Identifier: Apache-2.0 + * ***************************************************************************** + * + */ + +package net.brutex.ai.dnn.api; + +import lombok.Getter; +import org.deeplearning4j.nn.conf.layers.ActivationLayer; +import org.deeplearning4j.nn.conf.layers.BatchNormalization; +import org.deeplearning4j.nn.conf.layers.ConvolutionLayer; +import org.deeplearning4j.nn.conf.layers.DropoutLayer; +import org.deeplearning4j.nn.conf.layers.FeedForwardLayer; +import org.deeplearning4j.nn.conf.layers.LayerConfiguration; +import org.deeplearning4j.nn.conf.layers.NoParamLayer; + +public enum LayerType { + CONV("CONV", "Convolutional", ConvolutionLayer.class), + ACT("ACT", "Activation", ActivationLayer.class), + POOL( "POOL", "Pooling/ Subsampling", NoParamLayer.class), + FC( "FC", "Fully Connected", FeedForwardLayer.class), + BN("BN", "Batch Normalization", BatchNormalization.class), + DO("DO", "Dropout", DropoutLayer.class), + UNKNOWN("UNKNOWN", "Type not specified", LayerConfiguration.class); + +@Getter + String description; + @Getter String name; + @Getter Class clazz; + + LayerType(String name, String description, Class clazz) { + this.name = name; + this.description = description; + this.clazz = clazz; + } +} diff --git a/cavis-dnn/cavis-dnn-nn/src/main/java/net/brutex/ai/dnn/api/NN.java b/cavis-dnn/cavis-dnn-nn/src/main/java/net/brutex/ai/dnn/api/NN.java new file mode 100644 index 000000000..8d6f778d0 --- /dev/null +++ b/cavis-dnn/cavis-dnn-nn/src/main/java/net/brutex/ai/dnn/api/NN.java @@ -0,0 +1,43 @@ +/* + * + * ****************************************************************************** + * * + * * This program and the accompanying materials are made available under the + * * terms of the Apache License, Version 2.0 which is available at + * * https://www.apache.org/licenses/LICENSE-2.0. + * * + * * See the NOTICE file distributed with this work for additional + * * information regarding copyright ownership. + * * Unless required by applicable law or agreed to in writing, software + * * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * * License for the specific language governing permissions and limitations + * * under the License. + * * + * * SPDX-License-Identifier: Apache-2.0 + * ***************************************************************************** + * + */ + +package net.brutex.ai.dnn.api; + +import org.deeplearning4j.nn.conf.NeuralNetConfiguration; +import org.deeplearning4j.nn.conf.NeuralNetConfiguration.NeuralNetConfigurationBuilder; +import org.deeplearning4j.nn.conf.layers.DenseLayer; + +/** + * A fluent API to configure and create artificial neural networks + */ +public class NN { + + + public static NeuralNetConfigurationBuilder net() { + return NeuralNetConfiguration.builder(); + } + + void test() { + Dog.DogBuilder builder = Dog.builder() + .animalString("") + .dogString(""); + } +} diff --git a/cavis-dnn/cavis-dnn-nn/src/main/java/net/brutex/ai/dnn/conf/layer/Layer_Descriptions.md b/cavis-dnn/cavis-dnn-nn/src/main/java/net/brutex/ai/dnn/conf/layer/Layer_Descriptions.md new file mode 100644 index 000000000..74343c891 --- /dev/null +++ b/cavis-dnn/cavis-dnn-nn/src/main/java/net/brutex/ai/dnn/conf/layer/Layer_Descriptions.md @@ -0,0 +1,31 @@ +# Layer Descriptions # + +## abstract LayerConfiguration and Interface TrainingConfig ## + +Every layer configuration is inherited from LayerConfiguration (and some also from TrainableLayerConfiguration) + + +### NoParamLayer ### + +The following are examples of No ParamLayers. No parameter layers are not inheriting from BaseConfigurationLayer, +but directly from LayerConfiguration. + +* ActivationLayer +* SubsamplingLayer +* ZeroPadding1DLayer +* MaskLayer +* CroppingLayer +* GlobalPoolingLayer + +### SameDiffLayer ### + +### BaseWrapperLayer ### + +### FrozenLayer ### + +### LocalResponseNormalization ### + +### Bidirectional ### + +### TFOpLayer ### + diff --git a/cavis-dnn/cavis-dnn-nn/src/main/java/net/brutex/ai/dnn/networks/ArtificialNeuralNetwork.java b/cavis-dnn/cavis-dnn-nn/src/main/java/net/brutex/ai/dnn/networks/ArtificialNeuralNetwork.java new file mode 100644 index 000000000..aa0465659 --- /dev/null +++ b/cavis-dnn/cavis-dnn-nn/src/main/java/net/brutex/ai/dnn/networks/ArtificialNeuralNetwork.java @@ -0,0 +1,153 @@ +/* + * + * ****************************************************************************** + * * + * * This program and the accompanying materials are made available under the + * * terms of the Apache License, Version 2.0 which is available at + * * https://www.apache.org/licenses/LICENSE-2.0. + * * + * * See the NOTICE file distributed with this work for additional + * * information regarding copyright ownership. + * * Unless required by applicable law or agreed to in writing, software + * * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * * License for the specific language governing permissions and limitations + * * under the License. + * * + * * SPDX-License-Identifier: Apache-2.0 + * ***************************************************************************** + * + */ + +package net.brutex.ai.dnn.networks; + +import java.util.Arrays; +import java.util.HashMap; +import java.util.Map; +import lombok.Getter; +import lombok.NonNull; +import lombok.Setter; +import net.brutex.ai.dnn.api.IModel; +import org.deeplearning4j.nn.conf.NeuralNetConfiguration; +import org.deeplearning4j.nn.gradient.Gradient; +import org.nd4j.linalg.api.ndarray.INDArray; + + +/** + * Artificial Neural Network An artificial neural network (1) takes some input data, and (2) + * transforms this input data by calculating a weighted sum over the inputs and (3) applies a + * non-linear function to this transformation to calculate an intermediate state. The three steps + * above constitute what is known as a layer, and the transformative function is often referred to + * as a unit. The intermediate states—often termed features—are used as the input into another + * layer. + *

+ * Through repetition of these steps, the artificial neural network learns multiple layers of + * non-linear features, which it then combines in a final layer to create a prediction. + *

+ * The neural network learns by generating an error signal that measures the difference between the + * predictions of the network and the desired values and then using this error signal to change the + * weights (or parameters) so that predictions get more accurate. + */ +public abstract class ArtificialNeuralNetwork implements IModel { + + /** + * A neural network is created from a configuration. + * + * @param conf The (new net.brutex.ai) configuration for the network + */ + @Getter + @Setter + @NonNull + private NeuralNetConfiguration netConfiguration; + + @Getter + @Setter + private Map paramTable; + + /** + * Table of parameters by key, for backprop. For many models (dense layers, etc) - all parameters + * are backprop parameters + * + * @param backpropParamsOnly If true, return backprop params only. If false: return all params + * (equivalent to paramsTable()) + */ + @Override + public Map getParamTable(boolean backpropParamsOnly) { + return paramTable; + } + + + /** + * Set the parameters of the network. Note that the parameter keys must match the format as + * described in {@link #getParam(String)} and {@link #getParamTable()}. Note that the values of the + * parameters used as an argument to this method are copied - i.e., it is safe to later + * modify/reuse the values in the provided paramTable without this impacting the network. + * + * @param paramTable Parameters to set + */ + @Override + public void setParamTable(Map paramTable) { + Map currParamTable = getParamTable(); + if(currParamTable == null) { + currParamTable = paramTable; + } else if (!currParamTable.keySet().equals(paramTable.keySet())) { + throw new IllegalArgumentException( + "Cannot set param table: parameter keys do not match.\n" + "Current: " + + currParamTable.keySet() + "\nTo set: " + paramTable.keySet()); + } + + for (String s : paramTable.keySet()) { + INDArray curr = currParamTable.get(s); + INDArray toSet = paramTable.get(s); + if (!Arrays.equals(curr.shape(), toSet.shape())) { + throw new IllegalArgumentException( + "Cannot set parameter table: parameter \"" + s + "\" shapes " + + "do not match. Current = " + Arrays.toString(curr.shape()) + ", to set = " + + Arrays.toString(toSet.shape())); + } + } + + //Now that we've checked ALL params (to avoid leaving net in half-modified state) + for (String s : paramTable.keySet()) { + INDArray curr = currParamTable.get(s); + INDArray toSet = paramTable.get(s); + curr.assign(toSet); + } + } + + + + /** + * Create a new network from configuration + * + * @param conf the configuration + */ + public ArtificialNeuralNetwork(NeuralNetConfiguration conf) { + this.netConfiguration = conf; + } + + /** + * Update all parameters (for all parameter types) with the given gradient. + * + * @param gradient the gradients to add + */ + public void update(Gradient gradient) { + for (String paramType : gradient.gradientForVariable().keySet()) { + update(gradient.getGradientFor(paramType), paramType); + } + } + + /** + * Update the parameters of a given type with a given gradient. + * + * @param gradient the gradient to apply + * @param paramType + */ + public void update(INDArray gradient, String paramType) { + setParam(paramType, getParam(paramType).addi(gradient)); + } + + + + +} diff --git a/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/earlystopping/EarlyStoppingConfiguration.java b/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/earlystopping/EarlyStoppingConfiguration.java index 8f55745ed..d95c5aab6 100644 --- a/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/earlystopping/EarlyStoppingConfiguration.java +++ b/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/earlystopping/EarlyStoppingConfiguration.java @@ -22,12 +22,12 @@ package org.deeplearning4j.earlystopping; import lombok.Data; import lombok.NoArgsConstructor; +import net.brutex.ai.dnn.api.IModel; import org.deeplearning4j.earlystopping.saver.InMemoryModelSaver; import org.deeplearning4j.earlystopping.scorecalc.ScoreCalculator; import org.deeplearning4j.earlystopping.termination.EpochTerminationCondition; import org.deeplearning4j.earlystopping.termination.IterationTerminationCondition; import org.deeplearning4j.exception.DL4JInvalidConfigException; -import org.deeplearning4j.nn.api.Model; import org.nd4j.common.function.Supplier; import java.io.Serializable; @@ -37,7 +37,7 @@ import java.util.List; @Data @NoArgsConstructor -public class EarlyStoppingConfiguration implements Serializable { +public class EarlyStoppingConfiguration implements Serializable { private EarlyStoppingModelSaver modelSaver; private List epochTerminationConditions; @@ -89,7 +89,7 @@ public class EarlyStoppingConfiguration implements Serializable } - public static class Builder { + public static class Builder { private EarlyStoppingModelSaver modelSaver = new InMemoryModelSaver<>(); private List epochTerminationConditions = new ArrayList<>(); diff --git a/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/earlystopping/EarlyStoppingModelSaver.java b/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/earlystopping/EarlyStoppingModelSaver.java index a9793175a..9037e0792 100644 --- a/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/earlystopping/EarlyStoppingModelSaver.java +++ b/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/earlystopping/EarlyStoppingModelSaver.java @@ -20,10 +20,10 @@ package org.deeplearning4j.earlystopping; +import net.brutex.ai.dnn.api.IModel; import org.deeplearning4j.earlystopping.saver.InMemoryModelSaver; import org.deeplearning4j.earlystopping.saver.LocalFileGraphSaver; import org.deeplearning4j.earlystopping.saver.LocalFileModelSaver; -import org.deeplearning4j.nn.api.Model; import com.fasterxml.jackson.annotation.JsonInclude; import com.fasterxml.jackson.annotation.JsonSubTypes; import com.fasterxml.jackson.annotation.JsonTypeInfo; @@ -38,7 +38,7 @@ import java.io.Serializable; }) @JsonTypeInfo(use = JsonTypeInfo.Id.CLASS, include = JsonTypeInfo.As.PROPERTY, property = "@class") -public interface EarlyStoppingModelSaver extends Serializable { +public interface EarlyStoppingModelSaver extends Serializable { /** Save the best model (so far) learned during early stopping training */ void saveBestModel(T net, double score) throws IOException; diff --git a/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/earlystopping/EarlyStoppingResult.java b/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/earlystopping/EarlyStoppingResult.java index 6f44c7fdb..817f4c7db 100644 --- a/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/earlystopping/EarlyStoppingResult.java +++ b/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/earlystopping/EarlyStoppingResult.java @@ -21,13 +21,13 @@ package org.deeplearning4j.earlystopping; import lombok.Data; -import org.deeplearning4j.nn.api.Model; +import net.brutex.ai.dnn.api.IModel; import java.io.Serializable; import java.util.Map; @Data -public class EarlyStoppingResult implements Serializable { +public class EarlyStoppingResult implements Serializable { public enum TerminationReason { Error, IterationTerminationCondition, EpochTerminationCondition } diff --git a/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/earlystopping/listener/EarlyStoppingListener.java b/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/earlystopping/listener/EarlyStoppingListener.java index 191870de3..016b31881 100644 --- a/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/earlystopping/listener/EarlyStoppingListener.java +++ b/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/earlystopping/listener/EarlyStoppingListener.java @@ -20,11 +20,11 @@ package org.deeplearning4j.earlystopping.listener; +import net.brutex.ai.dnn.api.IModel; import org.deeplearning4j.earlystopping.EarlyStoppingConfiguration; import org.deeplearning4j.earlystopping.EarlyStoppingResult; -import org.deeplearning4j.nn.api.Model; -public interface EarlyStoppingListener { +public interface EarlyStoppingListener { /**Method to be called when early stopping training is first started */ diff --git a/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/earlystopping/saver/InMemoryModelSaver.java b/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/earlystopping/saver/InMemoryModelSaver.java index 4e63ef0c5..b24b47651 100644 --- a/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/earlystopping/saver/InMemoryModelSaver.java +++ b/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/earlystopping/saver/InMemoryModelSaver.java @@ -21,11 +21,11 @@ package org.deeplearning4j.earlystopping.saver; import org.deeplearning4j.earlystopping.EarlyStoppingModelSaver; -import org.deeplearning4j.nn.api.Model; +import net.brutex.ai.dnn.api.IModel; import java.io.IOException; -public class InMemoryModelSaver implements EarlyStoppingModelSaver { +public class InMemoryModelSaver implements EarlyStoppingModelSaver { private transient T bestModel; private transient T latestModel; diff --git a/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/earlystopping/scorecalc/AutoencoderScoreCalculator.java b/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/earlystopping/scorecalc/AutoencoderScoreCalculator.java index 0c70667dd..69f1785e4 100644 --- a/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/earlystopping/scorecalc/AutoencoderScoreCalculator.java +++ b/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/earlystopping/scorecalc/AutoencoderScoreCalculator.java @@ -20,9 +20,9 @@ package org.deeplearning4j.earlystopping.scorecalc; +import net.brutex.ai.dnn.api.IModel; import org.deeplearning4j.earlystopping.scorecalc.base.BaseScoreCalculator; import org.deeplearning4j.nn.api.Layer; -import org.deeplearning4j.nn.api.Model; import org.deeplearning4j.nn.graph.ComputationGraph; import org.deeplearning4j.nn.layers.feedforward.autoencoder.AutoEncoder; import org.deeplearning4j.nn.multilayer.MultiLayerNetwork; @@ -32,7 +32,7 @@ import org.nd4j.linalg.api.ndarray.INDArray; import org.nd4j.linalg.dataset.api.iterator.DataSetIterator; import org.deeplearning4j.nn.workspace.LayerWorkspaceMgr; -public class AutoencoderScoreCalculator extends BaseScoreCalculator { +public class AutoencoderScoreCalculator extends BaseScoreCalculator { protected final Metric metric; protected RegressionEvaluation evaluation; @@ -48,7 +48,7 @@ public class AutoencoderScoreCalculator extends BaseScoreCalculator { } @Override - protected INDArray output(Model net, INDArray input, INDArray fMask, INDArray lMask) { + protected INDArray output(IModel net, INDArray input, INDArray fMask, INDArray lMask) { Layer l; if(net instanceof MultiLayerNetwork) { @@ -71,19 +71,19 @@ public class AutoencoderScoreCalculator extends BaseScoreCalculator { } @Override - protected INDArray[] output(Model network, INDArray[] input, INDArray[] fMask, INDArray[] lMask) { + protected INDArray[] output(IModel network, INDArray[] input, INDArray[] fMask, INDArray[] lMask) { return new INDArray[]{output(network, get0(input), get0(fMask), get0(lMask))}; } @Override - protected double scoreMinibatch(Model network, INDArray features, INDArray labels, INDArray fMask, + protected double scoreMinibatch(IModel network, INDArray features, INDArray labels, INDArray fMask, INDArray lMask, INDArray output) { evaluation.eval(features, output); return 0.0; //Not used } @Override - protected double scoreMinibatch(Model network, INDArray[] features, INDArray[] labels, INDArray[] fMask, INDArray[] lMask, INDArray[] output) { + protected double scoreMinibatch(IModel network, INDArray[] features, INDArray[] labels, INDArray[] fMask, INDArray[] lMask, INDArray[] output) { return scoreMinibatch(network, get0(features), get0(labels), get0(fMask), get0(lMask), get0(output)); } diff --git a/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/earlystopping/scorecalc/ClassificationScoreCalculator.java b/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/earlystopping/scorecalc/ClassificationScoreCalculator.java index ae13edc79..b9884f68f 100644 --- a/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/earlystopping/scorecalc/ClassificationScoreCalculator.java +++ b/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/earlystopping/scorecalc/ClassificationScoreCalculator.java @@ -20,13 +20,13 @@ package org.deeplearning4j.earlystopping.scorecalc; +import net.brutex.ai.dnn.api.IModel; import org.deeplearning4j.earlystopping.scorecalc.base.BaseIEvaluationScoreCalculator; -import org.deeplearning4j.nn.api.Model; import org.nd4j.evaluation.classification.Evaluation; import org.nd4j.linalg.dataset.api.iterator.DataSetIterator; import org.nd4j.linalg.dataset.api.iterator.MultiDataSetIterator; -public class ClassificationScoreCalculator extends BaseIEvaluationScoreCalculator { +public class ClassificationScoreCalculator extends BaseIEvaluationScoreCalculator { protected final Evaluation.Metric metric; diff --git a/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/earlystopping/scorecalc/DataSetLossCalculator.java b/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/earlystopping/scorecalc/DataSetLossCalculator.java index e8d403a7f..2f6199449 100644 --- a/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/earlystopping/scorecalc/DataSetLossCalculator.java +++ b/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/earlystopping/scorecalc/DataSetLossCalculator.java @@ -21,7 +21,7 @@ package org.deeplearning4j.earlystopping.scorecalc; import org.deeplearning4j.earlystopping.scorecalc.base.BaseScoreCalculator; -import org.deeplearning4j.nn.api.Model; +import net.brutex.ai.dnn.api.IModel; import org.deeplearning4j.nn.graph.ComputationGraph; import org.deeplearning4j.nn.multilayer.MultiLayerNetwork; import org.nd4j.linalg.api.ndarray.INDArray; @@ -31,7 +31,7 @@ import org.nd4j.linalg.dataset.api.iterator.DataSetIterator; import org.nd4j.linalg.dataset.api.iterator.MultiDataSetIterator; import com.fasterxml.jackson.annotation.JsonProperty; -public class DataSetLossCalculator extends BaseScoreCalculator { +public class DataSetLossCalculator extends BaseScoreCalculator { @JsonProperty private boolean average; @@ -70,12 +70,12 @@ public class DataSetLossCalculator extends BaseScoreCalculator { } @Override - protected INDArray output(Model network, INDArray input, INDArray fMask, INDArray lMask) { + protected INDArray output(IModel network, INDArray input, INDArray fMask, INDArray lMask) { return output(network, arr(input), arr(fMask), arr(lMask))[0]; } @Override - protected INDArray[] output(Model network, INDArray[] input, INDArray[] fMask, INDArray[] lMask) { + protected INDArray[] output(IModel network, INDArray[] input, INDArray[] fMask, INDArray[] lMask) { if(network instanceof MultiLayerNetwork){ INDArray out = ((MultiLayerNetwork) network).output(input[0], false, get0(fMask), get0(lMask)); return new INDArray[]{out}; @@ -87,7 +87,7 @@ public class DataSetLossCalculator extends BaseScoreCalculator { } @Override - protected double scoreMinibatch(Model network, INDArray[] features, INDArray[] labels, INDArray[] fMask, INDArray[] lMask, INDArray[] output) { + protected double scoreMinibatch(IModel network, INDArray[] features, INDArray[] labels, INDArray[] fMask, INDArray[] lMask, INDArray[] output) { if(network instanceof MultiLayerNetwork){ return ((MultiLayerNetwork) network).score(new DataSet(get0(features), get0(labels), get0(fMask), get0(lMask)), false) * features[0].size(0); diff --git a/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/earlystopping/scorecalc/ROCScoreCalculator.java b/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/earlystopping/scorecalc/ROCScoreCalculator.java index 27fdbd8aa..ca3e5ab1c 100644 --- a/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/earlystopping/scorecalc/ROCScoreCalculator.java +++ b/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/earlystopping/scorecalc/ROCScoreCalculator.java @@ -20,8 +20,8 @@ package org.deeplearning4j.earlystopping.scorecalc; +import net.brutex.ai.dnn.api.IModel; import org.deeplearning4j.earlystopping.scorecalc.base.BaseIEvaluationScoreCalculator; -import org.deeplearning4j.nn.api.Model; import org.nd4j.evaluation.IEvaluation; import org.nd4j.evaluation.classification.ROC; import org.nd4j.evaluation.classification.ROCBinary; @@ -29,7 +29,7 @@ import org.nd4j.evaluation.classification.ROCMultiClass; import org.nd4j.linalg.dataset.api.iterator.DataSetIterator; import org.nd4j.linalg.dataset.api.iterator.MultiDataSetIterator; -public class ROCScoreCalculator extends BaseIEvaluationScoreCalculator { +public class ROCScoreCalculator extends BaseIEvaluationScoreCalculator { public enum ROCType {ROC, BINARY, MULTICLASS} public enum Metric {AUC, AUPRC} diff --git a/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/earlystopping/scorecalc/RegressionScoreCalculator.java b/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/earlystopping/scorecalc/RegressionScoreCalculator.java index 5dab31e29..3ffd58a6a 100644 --- a/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/earlystopping/scorecalc/RegressionScoreCalculator.java +++ b/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/earlystopping/scorecalc/RegressionScoreCalculator.java @@ -20,13 +20,13 @@ package org.deeplearning4j.earlystopping.scorecalc; +import net.brutex.ai.dnn.api.IModel; import org.deeplearning4j.earlystopping.scorecalc.base.BaseIEvaluationScoreCalculator; -import org.deeplearning4j.nn.api.Model; import org.nd4j.evaluation.regression.RegressionEvaluation; import org.nd4j.evaluation.regression.RegressionEvaluation.Metric; import org.nd4j.linalg.dataset.api.iterator.DataSetIterator; -public class RegressionScoreCalculator extends BaseIEvaluationScoreCalculator { +public class RegressionScoreCalculator extends BaseIEvaluationScoreCalculator { protected final Metric metric; diff --git a/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/earlystopping/scorecalc/ScoreCalculator.java b/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/earlystopping/scorecalc/ScoreCalculator.java index 8e994a678..a9568d2d9 100644 --- a/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/earlystopping/scorecalc/ScoreCalculator.java +++ b/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/earlystopping/scorecalc/ScoreCalculator.java @@ -20,7 +20,7 @@ package org.deeplearning4j.earlystopping.scorecalc; -import org.deeplearning4j.nn.api.Model; +import net.brutex.ai.dnn.api.IModel; import com.fasterxml.jackson.annotation.JsonInclude; import com.fasterxml.jackson.annotation.JsonSubTypes; import com.fasterxml.jackson.annotation.JsonTypeInfo; @@ -34,7 +34,7 @@ import java.io.Serializable; @JsonSubTypes.Type(value = DataSetLossCalculatorCG.class, name = "MaxEpochsTerminationCondition"), }) -public interface ScoreCalculator extends Serializable { +public interface ScoreCalculator extends Serializable { /** Calculate the score for the given MultiLayerNetwork */ double calculateScore(T network); diff --git a/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/earlystopping/scorecalc/VAEReconErrorScoreCalculator.java b/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/earlystopping/scorecalc/VAEReconErrorScoreCalculator.java index 687eb9969..4b2f1eb9f 100644 --- a/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/earlystopping/scorecalc/VAEReconErrorScoreCalculator.java +++ b/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/earlystopping/scorecalc/VAEReconErrorScoreCalculator.java @@ -20,9 +20,9 @@ package org.deeplearning4j.earlystopping.scorecalc; +import net.brutex.ai.dnn.api.IModel; import org.deeplearning4j.earlystopping.scorecalc.base.BaseScoreCalculator; import org.deeplearning4j.nn.api.Layer; -import org.deeplearning4j.nn.api.Model; import org.deeplearning4j.nn.graph.ComputationGraph; import org.deeplearning4j.nn.layers.variational.VariationalAutoencoder; import org.deeplearning4j.nn.multilayer.MultiLayerNetwork; @@ -32,7 +32,7 @@ import org.nd4j.linalg.api.ndarray.INDArray; import org.nd4j.linalg.dataset.api.iterator.DataSetIterator; import org.deeplearning4j.nn.workspace.LayerWorkspaceMgr; -public class VAEReconErrorScoreCalculator extends BaseScoreCalculator { +public class VAEReconErrorScoreCalculator extends BaseScoreCalculator { protected final Metric metric; protected RegressionEvaluation evaluation; @@ -54,7 +54,7 @@ public class VAEReconErrorScoreCalculator extends BaseScoreCalculator { } @Override - protected INDArray output(Model net, INDArray input, INDArray fMask, INDArray lMask) { + protected INDArray output(IModel net, INDArray input, INDArray fMask, INDArray lMask) { Layer l; if(net instanceof MultiLayerNetwork) { MultiLayerNetwork network = (MultiLayerNetwork)net; @@ -74,19 +74,19 @@ public class VAEReconErrorScoreCalculator extends BaseScoreCalculator { } @Override - protected INDArray[] output(Model network, INDArray[] input, INDArray[] fMask, INDArray[] lMask) { + protected INDArray[] output(IModel network, INDArray[] input, INDArray[] fMask, INDArray[] lMask) { return new INDArray[]{output(network, get0(input), get0(fMask), get0(lMask))}; } @Override - protected double scoreMinibatch(Model network, INDArray features, INDArray labels, INDArray fMask, + protected double scoreMinibatch(IModel network, INDArray features, INDArray labels, INDArray fMask, INDArray lMask, INDArray output) { evaluation.eval(features, output); return 0.0; //Not used } @Override - protected double scoreMinibatch(Model network, INDArray[] features, INDArray[] labels, INDArray[] fMask, INDArray[] lMask, INDArray[] output) { + protected double scoreMinibatch(IModel network, INDArray[] features, INDArray[] labels, INDArray[] fMask, INDArray[] lMask, INDArray[] output) { return scoreMinibatch(network, get0(features), get0(labels), get0(fMask), get0(lMask), get0(output)); } diff --git a/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/earlystopping/scorecalc/VAEReconProbScoreCalculator.java b/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/earlystopping/scorecalc/VAEReconProbScoreCalculator.java index 0ed2aef4b..0328d7e66 100644 --- a/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/earlystopping/scorecalc/VAEReconProbScoreCalculator.java +++ b/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/earlystopping/scorecalc/VAEReconProbScoreCalculator.java @@ -20,16 +20,16 @@ package org.deeplearning4j.earlystopping.scorecalc; +import net.brutex.ai.dnn.api.IModel; import org.deeplearning4j.earlystopping.scorecalc.base.BaseScoreCalculator; import org.deeplearning4j.nn.api.Layer; -import org.deeplearning4j.nn.api.Model; import org.deeplearning4j.nn.graph.ComputationGraph; import org.deeplearning4j.nn.layers.variational.VariationalAutoencoder; import org.deeplearning4j.nn.multilayer.MultiLayerNetwork; import org.nd4j.linalg.api.ndarray.INDArray; import org.nd4j.linalg.dataset.api.iterator.DataSetIterator; -public class VAEReconProbScoreCalculator extends BaseScoreCalculator { +public class VAEReconProbScoreCalculator extends BaseScoreCalculator { protected final int reconstructionProbNumSamples; protected final boolean logProb; @@ -73,17 +73,17 @@ public class VAEReconProbScoreCalculator extends BaseScoreCalculator { } @Override - protected INDArray output(Model network, INDArray input, INDArray fMask, INDArray lMask) { + protected INDArray output(IModel network, INDArray input, INDArray fMask, INDArray lMask) { return null; //Not used } @Override - protected INDArray[] output(Model network, INDArray[] input, INDArray[] fMask, INDArray[] lMask) { + protected INDArray[] output(IModel network, INDArray[] input, INDArray[] fMask, INDArray[] lMask) { return null; //Not used } @Override - protected double scoreMinibatch(Model net, INDArray features, INDArray labels, INDArray fMask, + protected double scoreMinibatch(IModel net, INDArray features, INDArray labels, INDArray fMask, INDArray lMask, INDArray output) { Layer l; if(net instanceof MultiLayerNetwork) { @@ -108,7 +108,7 @@ public class VAEReconProbScoreCalculator extends BaseScoreCalculator { } @Override - protected double scoreMinibatch(Model network, INDArray[] features, INDArray[] labels, INDArray[] fMask, INDArray[] lMask, INDArray[] output) { + protected double scoreMinibatch(IModel network, INDArray[] features, INDArray[] labels, INDArray[] fMask, INDArray[] lMask, INDArray[] output) { return 0; } diff --git a/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/earlystopping/scorecalc/base/BaseIEvaluationScoreCalculator.java b/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/earlystopping/scorecalc/base/BaseIEvaluationScoreCalculator.java index 89dd780dc..7a064c151 100644 --- a/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/earlystopping/scorecalc/base/BaseIEvaluationScoreCalculator.java +++ b/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/earlystopping/scorecalc/base/BaseIEvaluationScoreCalculator.java @@ -22,7 +22,7 @@ package org.deeplearning4j.earlystopping.scorecalc.base; import org.deeplearning4j.datasets.iterator.MultiDataSetWrapperIterator; import org.deeplearning4j.earlystopping.scorecalc.ScoreCalculator; -import org.deeplearning4j.nn.api.Model; +import net.brutex.ai.dnn.api.IModel; import org.deeplearning4j.nn.graph.ComputationGraph; import org.deeplearning4j.nn.multilayer.MultiLayerNetwork; import org.nd4j.evaluation.IEvaluation; @@ -30,7 +30,7 @@ import org.nd4j.linalg.dataset.adapter.MultiDataSetIteratorAdapter; import org.nd4j.linalg.dataset.api.iterator.DataSetIterator; import org.nd4j.linalg.dataset.api.iterator.MultiDataSetIterator; -public abstract class BaseIEvaluationScoreCalculator implements ScoreCalculator { +public abstract class BaseIEvaluationScoreCalculator implements ScoreCalculator { protected MultiDataSetIterator iterator; protected DataSetIterator iter; diff --git a/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/earlystopping/scorecalc/base/BaseScoreCalculator.java b/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/earlystopping/scorecalc/base/BaseScoreCalculator.java index d0407b2e9..ce01ebfcd 100644 --- a/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/earlystopping/scorecalc/base/BaseScoreCalculator.java +++ b/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/earlystopping/scorecalc/base/BaseScoreCalculator.java @@ -22,14 +22,14 @@ package org.deeplearning4j.earlystopping.scorecalc.base; import lombok.NonNull; import org.deeplearning4j.earlystopping.scorecalc.ScoreCalculator; -import org.deeplearning4j.nn.api.Model; +import net.brutex.ai.dnn.api.IModel; import org.nd4j.linalg.api.ndarray.INDArray; import org.nd4j.linalg.dataset.DataSet; import org.nd4j.linalg.dataset.api.MultiDataSet; import org.nd4j.linalg.dataset.api.iterator.DataSetIterator; import org.nd4j.linalg.dataset.api.iterator.MultiDataSetIterator; -public abstract class BaseScoreCalculator implements ScoreCalculator { +public abstract class BaseScoreCalculator implements ScoreCalculator { protected MultiDataSetIterator mdsIterator; protected DataSetIterator iterator; diff --git a/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/earlystopping/trainer/BaseEarlyStoppingTrainer.java b/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/earlystopping/trainer/BaseEarlyStoppingTrainer.java index a39a08d97..770512e4d 100644 --- a/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/earlystopping/trainer/BaseEarlyStoppingTrainer.java +++ b/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/earlystopping/trainer/BaseEarlyStoppingTrainer.java @@ -20,13 +20,13 @@ package org.deeplearning4j.earlystopping.trainer; +import net.brutex.ai.dnn.api.IModel; import org.deeplearning4j.earlystopping.EarlyStoppingConfiguration; import org.deeplearning4j.earlystopping.EarlyStoppingResult; import org.deeplearning4j.earlystopping.listener.EarlyStoppingListener; import org.deeplearning4j.earlystopping.scorecalc.ScoreCalculator; import org.deeplearning4j.earlystopping.termination.EpochTerminationCondition; import org.deeplearning4j.earlystopping.termination.IterationTerminationCondition; -import org.deeplearning4j.nn.api.Model; import org.deeplearning4j.nn.graph.ComputationGraph; import org.deeplearning4j.nn.multilayer.MultiLayerNetwork; import org.deeplearning4j.optimize.api.TrainingListener; @@ -47,7 +47,7 @@ import java.util.Iterator; import java.util.LinkedHashMap; import java.util.Map; -public abstract class BaseEarlyStoppingTrainer implements IEarlyStoppingTrainer { +public abstract class BaseEarlyStoppingTrainer implements IEarlyStoppingTrainer { private static final Logger log = LoggerFactory.getLogger(BaseEarlyStoppingTrainer.class); @@ -168,12 +168,12 @@ public abstract class BaseEarlyStoppingTrainer implements IEarl if(pretrain){ //TODO support for non-first-layer pretraining if(model instanceof MultiLayerNetwork){ - lastScore = (((MultiLayerNetwork) model).getLayer(0)).score(); + lastScore = (((MultiLayerNetwork) model).getLayer(0)).getScore(); } else { - lastScore = (((ComputationGraph) model).getLayer(0)).score(); + lastScore = (((ComputationGraph) model).getLayer(0)).getScore(); } } else { - lastScore = model.score(); + lastScore = model.getScore(); } for (IterationTerminationCondition c : esConfig.getIterationTerminationConditions()) { if (c.terminate(lastScore)) { @@ -337,16 +337,16 @@ public abstract class BaseEarlyStoppingTrainer implements IEarl } //Trigger epoch listener methods manually - these won't be triggered due to not calling fit(DataSetIterator) etc - protected void triggerEpochListeners(boolean epochStart, Model model, int epochNum){ + protected void triggerEpochListeners(boolean epochStart, IModel model, int epochNum){ Collection listeners; if(model instanceof MultiLayerNetwork){ MultiLayerNetwork n = ((MultiLayerNetwork) model); - listeners = n.getListeners(); + listeners = n.getTrainingListeners(); n.setEpochCount(epochNum); } else if(model instanceof ComputationGraph){ ComputationGraph cg = ((ComputationGraph) model); - listeners = cg.getListeners(); - cg.getConfiguration().setEpochCount(epochNum); + listeners = cg.getTrainingListeners(); + cg.getComputationGraphConfiguration().setEpochCount(epochNum); } else { return; } diff --git a/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/earlystopping/trainer/EarlyStoppingTrainer.java b/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/earlystopping/trainer/EarlyStoppingTrainer.java index f4df7a3d4..8c36c07d2 100644 --- a/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/earlystopping/trainer/EarlyStoppingTrainer.java +++ b/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/earlystopping/trainer/EarlyStoppingTrainer.java @@ -25,8 +25,7 @@ import org.deeplearning4j.datasets.iterator.impl.SingletonDataSetIterator; import org.deeplearning4j.datasets.iterator.impl.SingletonMultiDataSetIterator; import org.deeplearning4j.earlystopping.EarlyStoppingConfiguration; import org.deeplearning4j.earlystopping.listener.EarlyStoppingListener; -import org.deeplearning4j.nn.conf.MultiLayerConfiguration; -import org.deeplearning4j.nn.graph.ComputationGraph; +import org.deeplearning4j.nn.conf.NeuralNetConfiguration; import org.deeplearning4j.nn.multilayer.MultiLayerNetwork; import org.nd4j.linalg.dataset.DataSet; import org.nd4j.linalg.dataset.api.MultiDataSet; @@ -39,7 +38,7 @@ public class EarlyStoppingTrainer extends BaseEarlyStoppingTrainer earlyStoppingConfiguration, - MultiLayerConfiguration configuration, DataSetIterator train) { + NeuralNetConfiguration configuration, DataSetIterator train) { this(earlyStoppingConfiguration, new MultiLayerNetwork(configuration), train); net.init(); } diff --git a/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/earlystopping/trainer/IEarlyStoppingTrainer.java b/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/earlystopping/trainer/IEarlyStoppingTrainer.java index fd86168c6..718e10d0d 100644 --- a/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/earlystopping/trainer/IEarlyStoppingTrainer.java +++ b/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/earlystopping/trainer/IEarlyStoppingTrainer.java @@ -20,11 +20,11 @@ package org.deeplearning4j.earlystopping.trainer; +import net.brutex.ai.dnn.api.IModel; import org.deeplearning4j.earlystopping.EarlyStoppingResult; import org.deeplearning4j.earlystopping.listener.EarlyStoppingListener; -import org.deeplearning4j.nn.api.Model; -public interface IEarlyStoppingTrainer { +public interface IEarlyStoppingTrainer { /** Conduct early stopping training */ EarlyStoppingResult fit(); diff --git a/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/gradientcheck/GradientCheckUtil.java b/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/gradientcheck/GradientCheckUtil.java index 121102214..0cccc2a4f 100644 --- a/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/gradientcheck/GradientCheckUtil.java +++ b/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/gradientcheck/GradientCheckUtil.java @@ -23,6 +23,8 @@ package org.deeplearning4j.gradientcheck; import lombok.*; import lombok.experimental.Accessors; import lombok.extern.slf4j.Slf4j; +import org.deeplearning4j.nn.conf.layers.BaseLayerConfiguration; +import org.deeplearning4j.nn.conf.layers.LayerConfiguration; import org.nd4j.linalg.api.buffer.DataType; import org.nd4j.linalg.exception.ND4JArraySizeException; import org.nd4j.common.function.Consumer; @@ -31,10 +33,8 @@ import org.nd4j.common.primitives.Pair; import org.deeplearning4j.nn.api.Layer; import org.deeplearning4j.nn.api.Updater; import org.deeplearning4j.nn.api.layers.IOutputLayer; -import org.deeplearning4j.nn.conf.NeuralNetConfiguration; import org.deeplearning4j.nn.conf.graph.GraphVertex; import org.deeplearning4j.nn.conf.graph.LayerVertex; -import org.deeplearning4j.nn.conf.layers.BaseLayer; import org.deeplearning4j.nn.gradient.Gradient; import org.deeplearning4j.nn.graph.ComputationGraph; import org.deeplearning4j.nn.layers.BaseOutputLayer; @@ -82,12 +82,12 @@ public class GradientCheckUtil { IActivation afn = null; if(outputLayer instanceof BaseOutputLayer){ BaseOutputLayer o = (BaseOutputLayer)outputLayer; - lfn = ((org.deeplearning4j.nn.conf.layers.BaseOutputLayer)o.layerConf()).getLossFn(); - afn = o.layerConf().getActivationFn(); + lfn = ((org.deeplearning4j.nn.conf.layers.BaseOutputLayer)o.getTypedLayerConfiguration()).getLossFn(); + afn = o.getLayerConfiguration().getActivationFn(); } else if(outputLayer instanceof LossLayer){ LossLayer o = (LossLayer) outputLayer; - lfn = o.layerConf().getLossFn(); - afn = o.layerConf().getActivationFn(); + lfn = o.getTypedLayerConfiguration().getLossFn(); + afn = o.getTypedLayerConfiguration().getActivationFn(); } if (lfn instanceof LossMCXENT && afn instanceof ActivationSoftmax && ((LossMCXENT) lfn).getSoftmaxClipEps() != 0) { @@ -204,23 +204,23 @@ public class GradientCheckUtil { + "DataTypeUtil.setDTypeForContext(DataType.DOUBLE); before using GradientCheckUtil"); } - DataType netDataType = c.net.getLayerWiseConfigurations().getDataType(); + DataType netDataType = c.net.getNetConfiguration().getDataType(); if (netDataType != DataType.DOUBLE) { throw new IllegalStateException("Cannot perform gradient check: Network datatype is not set to double precision (" + "is: " + netDataType + "). Double precision must be used for gradient checks. Create network with .dataType(DataType.DOUBLE) before using GradientCheckUtil"); } - if(netDataType != c.net.params().dataType()){ + if(netDataType != c.net.getModelParams().dataType()){ throw new IllegalStateException("Parameters datatype does not match network configuration datatype (" - + "is: " + c.net.params().dataType() + "). If network datatype is set to DOUBLE, parameters must also be DOUBLE."); + + "is: " + c.net.getModelParams().dataType() + "). If network datatype is set to DOUBLE, parameters must also be DOUBLE."); } //Check network configuration: int layerCount = 0; - for (NeuralNetConfiguration n : c.net.getLayerWiseConfigurations().getConfs()) { - if (n.getLayer() instanceof BaseLayer) { - BaseLayer bl = (BaseLayer) n.getLayer(); + for (LayerConfiguration n : c.net.getNetConfiguration().getFlattenedLayerConfigurations()) { + if (n instanceof BaseLayerConfiguration) { + BaseLayerConfiguration bl = (BaseLayerConfiguration) n; IUpdater u = bl.getIUpdater(); if (u instanceof Sgd) { //Must have LR of 1.0 @@ -228,7 +228,7 @@ public class GradientCheckUtil { if (lr != 1.0) { throw new IllegalStateException("When using SGD updater, must also use lr=1.0 for layer " + layerCount + "; got " + u + " with lr=" + lr + " for layer \"" - + n.getLayer().getLayerName() + "\""); + + n.getLayerName() + "\""); } } else if (!(u instanceof NoOp)) { throw new IllegalStateException( @@ -238,7 +238,7 @@ public class GradientCheckUtil { IActivation activation = bl.getActivationFn(); if (activation != null) { if (!VALID_ACTIVATION_FUNCTIONS.contains(activation.getClass())) { - log.warn("Layer " + layerCount + " is possibly using an unsuitable activation function: " + log.warn("LayerConfiguration " + layerCount + " is possibly using an unsuitable activation function: " + activation.getClass() + ". Activation functions for gradient checks must be smooth (like sigmoid, tanh, softmax) and not " + "contain discontinuities like ReLU or LeakyReLU (these may cause spurious failures)"); @@ -246,10 +246,10 @@ public class GradientCheckUtil { } } - if (n.getLayer().getIDropout() != null && c.callEachIter == null) { + if (n.getIDropout() != null && c.callEachIter == null) { throw new IllegalStateException("When gradient checking dropout, need to reset RNG seed each iter, or no" + " dropout should be present during gradient checks - got dropout = " - + n.getLayer().getIDropout() + " for layer " + layerCount); + + n.getIDropout() + " for layer " + layerCount); } } @@ -273,11 +273,11 @@ public class GradientCheckUtil { updater.update(c.net, gradAndScore.getFirst(), 0, 0, c.net.batchSize(), LayerWorkspaceMgr.noWorkspaces()); INDArray gradientToCheck = gradAndScore.getFirst().gradient().dup(); //need dup: gradients are a *view* of the full gradient array (which will change every time backprop is done) - INDArray originalParams = c.net.params().dup(); //need dup: params are a *view* of full parameters + INDArray originalParams = c.net.getModelParams().dup(); //need dup: params are a *view* of full parameters val nParams = originalParams.length(); - Map paramTable = c.net.paramTable(); + Map paramTable = c.net.getParamTable(); List paramNames = new ArrayList<>(paramTable.keySet()); val paramEnds = new long[paramNames.size()]; paramEnds[0] = paramTable.get(paramNames.get(0)).length(); @@ -306,8 +306,8 @@ public class GradientCheckUtil { if(c.print == PrintMode.ALL) { int i=0; for (Layer l : c.net.getLayers()) { - Set s = l.paramTable().keySet(); - log.info("Layer " + i + ": " + l.getClass().getSimpleName() + " - params " + s); + Set s = l.getParamTable().keySet(); + log.info("LayerConfiguration " + i + ": " + l.getClass().getSimpleName() + " - params " + s); i++; } } @@ -322,7 +322,7 @@ public class GradientCheckUtil { log.info("NOTE: parameters will be skipped due to config: {}", c.excludeParams); } - INDArray params = c.net.params(); //Assumption here: params is a view that we can modify in-place + INDArray params = c.net.getModelParams(); //Assumption here: params is a view that we can modify in-place for (long i = 0; i < nParams; ) { //Get param name if (i >= paramEnds[currParamNameIdx]) { @@ -431,27 +431,27 @@ public class GradientCheckUtil { + "DataTypeUtil.setDTypeForContext(DataType.DOUBLE); before using GradientCheckUtil"); } - DataType netDataType = c.net.getConfiguration().getDataType(); + DataType netDataType = c.net.getComputationGraphConfiguration().getDataType(); if (netDataType != DataType.DOUBLE) { throw new IllegalStateException("Cannot perform gradient check: Network datatype is not set to double precision (" + "is: " + netDataType + "). Double precision must be used for gradient checks. Create network with .dataType(DataType.DOUBLE) before using GradientCheckUtil"); } - if(netDataType != c.net.params().dataType()){ + if(netDataType != c.net.getModelParams().dataType()){ throw new IllegalStateException("Parameters datatype does not match network configuration datatype (" - + "is: " + c.net.params().dataType() + "). If network datatype is set to DOUBLE, parameters must also be DOUBLE."); + + "is: " + c.net.getModelParams().dataType() + "). If network datatype is set to DOUBLE, parameters must also be DOUBLE."); } //Check configuration int layerCount = 0; - for (String vertexName : c.net.getConfiguration().getVertices().keySet()) { - GraphVertex gv = c.net.getConfiguration().getVertices().get(vertexName); + for (String vertexName : c.net.getComputationGraphConfiguration().getVertices().keySet()) { + GraphVertex gv = c.net.getComputationGraphConfiguration().getVertices().get(vertexName); if (!(gv instanceof LayerVertex)) continue; LayerVertex lv = (LayerVertex) gv; - if (lv.getLayerConf().getLayer() instanceof BaseLayer) { - BaseLayer bl = (BaseLayer) lv.getLayerConf().getLayer(); + if (lv.getLayerConfiguration() instanceof BaseLayerConfiguration) { + BaseLayerConfiguration bl = (BaseLayerConfiguration) lv.getLayerConfiguration(); IUpdater u = bl.getIUpdater(); if (u instanceof Sgd) { //Must have LR of 1.0 @@ -459,7 +459,7 @@ public class GradientCheckUtil { if (lr != 1.0) { throw new IllegalStateException("When using SGD updater, must also use lr=1.0 for layer " + layerCount + "; got " + u + " with lr=" + lr + " for layer \"" - + lv.getLayerConf().getLayer().getLayerName() + "\""); + + lv.getLayerConfiguration().getLayerName() + "\""); } } else if (!(u instanceof NoOp)) { throw new IllegalStateException( @@ -469,7 +469,7 @@ public class GradientCheckUtil { IActivation activation = bl.getActivationFn(); if (activation != null) { if (!VALID_ACTIVATION_FUNCTIONS.contains(activation.getClass())) { - log.warn("Layer \"" + vertexName + "\" is possibly using an unsuitable activation function: " + log.warn("LayerConfiguration \"" + vertexName + "\" is possibly using an unsuitable activation function: " + activation.getClass() + ". Activation functions for gradient checks must be smooth (like sigmoid, tanh, softmax) and not " + "contain discontinuities like ReLU or LeakyReLU (these may cause spurious failures)"); @@ -477,10 +477,10 @@ public class GradientCheckUtil { } } - if (lv.getLayerConf().getLayer().getIDropout() != null && c.callEachIter == null) { + if (lv.getLayerConfiguration().getIDropout() != null && c.callEachIter == null) { throw new IllegalStateException("When gradient checking dropout, rng seed must be reset each iteration, or no" + " dropout should be present during gradient checks - got dropout = " - + lv.getLayerConf().getLayer().getIDropout() + " for layer " + layerCount); + + lv.getLayerConfiguration().getIDropout() + " for layer " + layerCount); } } @@ -509,11 +509,11 @@ public class GradientCheckUtil { updater.update(gradAndScore.getFirst(), 0, 0, c.net.batchSize(), LayerWorkspaceMgr.noWorkspaces()); INDArray gradientToCheck = gradAndScore.getFirst().gradient().dup(); //need dup: gradients are a *view* of the full gradient array (which will change every time backprop is done) - INDArray originalParams = c.net.params().dup(); //need dup: params are a *view* of full parameters + INDArray originalParams = c.net.getModelParams().dup(); //need dup: params are a *view* of full parameters val nParams = originalParams.length(); - Map paramTable = c.net.paramTable(); + Map paramTable = c.net.getParamTable(); List paramNames = new ArrayList<>(paramTable.keySet()); val paramEnds = new long[paramNames.size()]; paramEnds[0] = paramTable.get(paramNames.get(0)).length(); @@ -529,7 +529,7 @@ public class GradientCheckUtil { int totalNFailures = 0; double maxError = 0.0; MultiDataSet mds = new MultiDataSet(c.inputs, c.labels, c.inputMask, c.labelMask); - INDArray params = c.net.params(); //Assumption here: params is a view that we can modify in-place + INDArray params = c.net.getModelParams(); //Assumption here: params is a view that we can modify in-place for (long i = 0; i < nParams; i++) { //Get param name if (i >= paramEnds[currParamNameIdx]) { @@ -642,11 +642,11 @@ public class GradientCheckUtil { updater.update(layer, gradAndScore.getFirst(), 0, 0, layer.batchSize(), LayerWorkspaceMgr.noWorkspaces()); INDArray gradientToCheck = gradAndScore.getFirst().gradient().dup(); //need dup: gradients are a *view* of the full gradient array (which will change every time backprop is done) - INDArray originalParams = layer.params().dup(); //need dup: params are a *view* of full parameters + INDArray originalParams = layer.getParams().dup(); //need dup: params are a *view* of full parameters val nParams = originalParams.length(); - Map paramTable = layer.paramTable(); + Map paramTable = layer.getParamTable(); List paramNames = new ArrayList<>(paramTable.keySet()); val paramEnds = new long[paramNames.size()]; paramEnds[0] = paramTable.get(paramNames.get(0)).length(); @@ -659,7 +659,7 @@ public class GradientCheckUtil { double maxError = 0.0; int currParamNameIdx = 0; - INDArray params = layer.params(); //Assumption here: params is a view that we can modify in-place + INDArray params = layer.getParams(); //Assumption here: params is a view that we can modify in-place for (int i = 0; i < nParams; i++) { //Get param name if (i >= paramEnds[currParamNameIdx]) { @@ -674,13 +674,13 @@ public class GradientCheckUtil { //TODO add a 'score' method that doesn't calculate gradients... Nd4j.getRandom().setSeed(rngSeed); layer.computeGradientAndScore(mgr); - double scorePlus = layer.score(); + double scorePlus = layer.getScore(); //(w-epsilon): Do forward pass and score params.putScalar(i, origValue - epsilon); Nd4j.getRandom().setSeed(rngSeed); layer.computeGradientAndScore(mgr); - double scoreMinus = layer.score(); + double scoreMinus = layer.getScore(); //Reset original param value params.putScalar(i, origValue); diff --git a/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/nn/adapters/YoloModelAdapter.java b/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/nn/adapters/YoloModelAdapter.java index 57ec18aa1..ea435af20 100644 --- a/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/nn/adapters/YoloModelAdapter.java +++ b/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/nn/adapters/YoloModelAdapter.java @@ -24,7 +24,7 @@ import lombok.AllArgsConstructor; import lombok.Builder; import lombok.NoArgsConstructor; import lombok.val; -import org.deeplearning4j.nn.api.Model; +import net.brutex.ai.dnn.api.IModel; import org.deeplearning4j.nn.api.ModelAdapter; import org.deeplearning4j.nn.graph.ComputationGraph; import org.deeplearning4j.nn.layers.objdetect.DetectedObject; @@ -43,7 +43,7 @@ public class YoloModelAdapter implements ModelAdapter> { @Builder.Default private double detectionThreshold = 0.5; @Override - public List apply(Model model, INDArray[] inputs, INDArray[] masks, INDArray[] labelsMasks) { + public List apply(IModel model, INDArray[] inputs, INDArray[] masks, INDArray[] labelsMasks) { if (model instanceof ComputationGraph) { val blindLayer = ((ComputationGraph) model).getOutputLayer(outputLayerIndex); if (blindLayer instanceof Yolo2OutputLayer) { diff --git a/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/nn/api/AbstractParamInitializer.java b/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/nn/api/AbstractParamInitializer.java new file mode 100644 index 000000000..d93c96448 --- /dev/null +++ b/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/nn/api/AbstractParamInitializer.java @@ -0,0 +1,39 @@ +/* + * + * ****************************************************************************** + * * + * * This program and the accompanying materials are made available under the + * * terms of the Apache License, Version 2.0 which is available at + * * https://www.apache.org/licenses/LICENSE-2.0. + * * + * * See the NOTICE file distributed with this work for additional + * * information regarding copyright ownership. + * * Unless required by applicable law or agreed to in writing, software + * * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * * License for the specific language governing permissions and limitations + * * under the License. + * * + * * SPDX-License-Identifier: Apache-2.0 + * ***************************************************************************** + * + */ + +package org.deeplearning4j.nn.api; + +import lombok.Getter; +import org.deeplearning4j.nn.conf.NeuralNetConfiguration; +import org.deeplearning4j.nn.conf.layers.LayerConfiguration; + +public abstract class AbstractParamInitializer implements ParamInitializer { + + @Deprecated + public long numParams(NeuralNetConfiguration conf) { + long res = 0; + for(LayerConfiguration lc : conf.getFlattenedLayerConfigurations()) { + res += lc.initializer().numParams(lc); + } + return res; + } + +} diff --git a/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/nn/api/Classifier.java b/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/nn/api/Classifier.java index 3643297d3..631f1bed4 100644 --- a/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/nn/api/Classifier.java +++ b/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/nn/api/Classifier.java @@ -20,6 +20,7 @@ package org.deeplearning4j.nn.api; +import net.brutex.ai.dnn.api.IModel; import org.nd4j.linalg.api.ndarray.INDArray; import org.nd4j.linalg.dataset.api.DataSet; import org.nd4j.linalg.dataset.api.iterator.DataSetIterator; @@ -27,7 +28,7 @@ import org.nd4j.linalg.dataset.api.iterator.DataSetIterator; import java.util.List; -public interface Classifier extends Model { +public interface Classifier extends IModel { diff --git a/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/nn/api/Trainable.java b/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/nn/api/ITrainableLayer.java similarity index 70% rename from cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/nn/api/Trainable.java rename to cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/nn/api/ITrainableLayer.java index f93e1c5ee..d9c85d1f3 100644 --- a/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/nn/api/Trainable.java +++ b/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/nn/api/ITrainableLayer.java @@ -20,16 +20,21 @@ package org.deeplearning4j.nn.api; +import java.util.Map; import org.nd4j.linalg.api.ndarray.INDArray; -import java.util.Map; +public interface ITrainableLayer { -public interface Trainable { + + Map getParamTable(); + Map getParamTable(boolean isBackprop); + + void setParamTable(Map paramTable); /** * @return Training configuration */ - TrainingConfig getConfig(); + ITraininableLayerConfiguration getTrainingConfig(); /** * @return Number of parameters @@ -39,13 +44,32 @@ public interface Trainable { /** * @return 1d parameter vector */ - INDArray params(); + INDArray getParams(); /** - * @param backpropOnly If true: return only parameters that are not exclusively used for layerwise pretraining - * @return Parameter table + * The param table + * + * @return + + Map getParamTable(); */ - Map paramTable(boolean backpropOnly); + + /** + * Table of parameters by key, for backprop. For many models (dense layers, etc) - all parameters + * are backprop parameters + * + * @param backpropParamsOnly If true, return backprop params only. If false: return all params + * (equivalent to paramsTable()) + + Map getParamTable(boolean backpropParamsOnly); +*/ + /** + * Setter for the param table + * + * @param paramTable + void setParamTable(Map paramTable); +*/ + /** * DL4J layers typically produce the sum of the gradients during the backward pass for each layer, and if required diff --git a/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/nn/api/TrainingConfig.java b/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/nn/api/ITraininableLayerConfiguration.java similarity index 91% rename from cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/nn/api/TrainingConfig.java rename to cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/nn/api/ITraininableLayerConfiguration.java index ae7601a6f..40a3170b4 100644 --- a/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/nn/api/TrainingConfig.java +++ b/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/nn/api/ITraininableLayerConfiguration.java @@ -27,7 +27,7 @@ import org.nd4j.linalg.learning.regularization.Regularization; import java.util.List; -public interface TrainingConfig { +public interface ITraininableLayerConfiguration { /** * @return Name of the layer @@ -47,7 +47,7 @@ public interface TrainingConfig { * Is the specified parameter a layerwise pretraining only parameter?
* For example, visible bias params in an autoencoder (or, decoder params in a variational autoencoder) aren't * used during supervised backprop.
- * Layers (like DenseLayer, etc) with no pretrainable parameters will return false for all (valid) inputs. + * Layers (like DenseLayerConfiguration, etc) with no pretrainable parameters will return false for all (valid) inputs. * * @param paramName Parameter name/key * @return True if the parameter is for layerwise pretraining only, false otherwise @@ -55,7 +55,7 @@ public interface TrainingConfig { boolean isPretrainParam(String paramName); /** - * Get the updater for the given parameter. Typically the same updater will be used for all updaters, but this + * Get the updater for the given parameter. Typically the same updater will be used for all parameters, but this * is not necessarily the case * * @param paramName Parameter name @@ -74,5 +74,4 @@ public interface TrainingConfig { double getGradientNormalizationThreshold(); void setDataType(DataType dataType); - } diff --git a/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/nn/api/Layer.java b/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/nn/api/Layer.java index 60780ab99..7ff694e99 100644 --- a/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/nn/api/Layer.java +++ b/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/nn/api/Layer.java @@ -21,205 +21,228 @@ package org.deeplearning4j.nn.api; +import java.util.Map; +import net.brutex.ai.dnn.api.IModel; import org.deeplearning4j.nn.conf.CacheMode; +import org.deeplearning4j.nn.conf.NeuralNetConfiguration; +import org.deeplearning4j.nn.conf.layers.LayerConfiguration; import org.deeplearning4j.nn.gradient.Gradient; import org.deeplearning4j.nn.layers.LayerHelper; import org.deeplearning4j.nn.workspace.LayerWorkspaceMgr; -import org.deeplearning4j.optimize.api.TrainingListener; import org.nd4j.linalg.api.ndarray.INDArray; import org.nd4j.common.primitives.Pair; import java.io.Serializable; -import java.util.Collection; -public interface Layer extends Serializable, Cloneable, Model, Trainable { +/** + * A layer is the highest-level building block in deep learning. A layer is a container that usually + * receives weighted input, transforms it with a set of mostly non-linear functions and then passes + * these values as output to the next layer. A layer is usually uniform, that is it only contains + * one type of activation function, pooling, convolution etc. so that it can be easily compared to + * other parts of the network. The first and last layers in a network are called input and output + * layers, respectively, and all layers in between are called hidden layers. + * + * @see NVIDIA Deep Learning In A Nutshell + */ +public interface Layer extends Serializable, Cloneable, IModel { //IModel - enum Type { - FEED_FORWARD, RECURRENT, CONVOLUTIONAL, CONVOLUTIONAL3D, - SUBSAMPLING, UPSAMPLING, RECURSIVE, MULTILAYER, NORMALIZATION - } + /** + * Return the configuration of this layer + * @return the configuration + */ + LayerConfiguration getLayerConfiguration(); - enum TrainingMode { - TRAIN, TEST - } + /** + * Set a new layer configuration, new init() needs to be called afterwards. + * @param lconf layer configuration + */ + void setLayerConfiguration(LayerConfiguration lconf); + /** + * Convenient method to get the network configuration + * @return the configuration of the network this layer is part of + * + */ + NeuralNetConfiguration getNetConfiguration(); - /** - * This method sets given CacheMode for current layer - * - * @param mode - */ - void setCacheMode(CacheMode mode); + /** + * This method sets given CacheMode for current layer + * + * @param mode + */ + void setCacheMode(CacheMode mode); - /** - * Calculate the regularization component of the score, for the parameters in this layer
- * For example, the L1, L2 and/or weight decay components of the loss function
- * - * @param backpropOnlyParams If true: calculate regularization score based on backprop params only. If false: calculate - * based on all params (including pretrain params, if any) - * @return the regularization score of - */ - double calcRegularizationScore(boolean backpropOnlyParams); + /** + * Calculate the regularization component of the score, for the parameters in this layer
For + * example, the L1, L2 and/or weight decay components of the loss function
+ * + * @param backpropOnlyParams If true: calculate regularization score based on backprop params + * only. If false: calculate based on all params (including pretrain + * params, if any) + * @return the regularization score of + */ + double calcRegularizationScore(boolean backpropOnlyParams); - /** - * Returns the layer type - * - * @return - */ - Type type(); + /** + * Returns the layer type + * + * @return + */ + Type type(); + + /** + * Calculate the gradient relative to the error in the next layer + * + * @param epsilon w^(L+1)*delta^(L+1). Or, equiv: dC/da, i.e., (dC/dz)*(dz/da) = dC/da, where + * C is cost function a=sigma(z) is activation. + * @param workspaceMgr Workspace manager + * @return Pair where Gradient is gradient for this layer, INDArray is + * epsilon (activation gradient) needed by next layer, but before element-wise multiply by + * sigmaPrime(z). So for standard feed-forward layer, if this layer is L, then return.getSecond() + * == dL/dIn = (w^(L)*(delta^(L))^T)^T. Note that the returned array should be placed in the + * {@link org.deeplearning4j.nn.workspace.ArrayType#ACTIVATION_GRAD} workspace via the workspace + * manager + */ + Pair backpropGradient(INDArray epsilon, LayerWorkspaceMgr workspaceMgr); + + /** + * Perform forward pass and return the activations array with the last set input + * + * @param training training or test mode + * @param workspaceMgr Workspace manager + * @return the activation (layer output) of the last specified input. Note that the returned array + * should be placed in the {@link org.deeplearning4j.nn.workspace.ArrayType#ACTIVATIONS} workspace + * via the workspace manager + */ + INDArray activate(boolean training, LayerWorkspaceMgr workspaceMgr); + + /** + * Perform forward pass and return the activations array with the specified input + * + * @param input the input to use + * @param training train or test mode + * @param mgr Workspace manager. + * @return Activations array. Note that the returned array should be placed in the + * {@link org.deeplearning4j.nn.workspace.ArrayType#ACTIVATIONS} workspace via the workspace + * manager + */ + INDArray activate(INDArray input, boolean training, LayerWorkspaceMgr mgr); + + /** + * Get the layer index. + */ + int getIndex(); + + /** + * Set the layer index. + */ + void setIndex(int index); + + /** + * @return The current iteration count (number of parameter updates) for the layer/network + */ + int getIterationCount(); + + /** + * Set the current iteration count (number of parameter updates) for the layer/network + */ + void setIterationCount(int iterationCount); + + /** + * @return The current epoch count (number of training epochs passed) for the layer/network + */ + int getEpochCount(); + + /** + * Set the current epoch count (number of epochs passed ) for the layer/network + */ + void setEpochCount(int epochCount); + + /** + * Set the layer input. + */ + void setInput(INDArray input, LayerWorkspaceMgr workspaceMgr); + + /** + * Get current/last input mini-batch size, as set by setInputMiniBatchSize(int) + * + * @see Layer#setInputMiniBatchSize(int) + */ + int getInputMiniBatchSize(); + + /** + * Set current/last input mini-batch size.
Used for score and gradient calculations. Mini + * batch size may be different from getInput().size(0) due to reshaping operations - for example, + * when using RNNs with DenseLayerConfiguration and OutputLayer. Called automatically during + * forward pass. + */ + void setInputMiniBatchSize(int size); + + INDArray getMaskArray(); + + /** + * Set the mask array. Note: In general, {@link #feedForwardMaskArray(INDArray, MaskState, int)} + * should be used in preference to this. + * + * @param maskArray Mask array to set + */ + void setMaskArray(INDArray maskArray); + + /** + * Returns true if the layer can be trained in an unsupervised/pretrain manner (AE, VAE, etc) + * + * @return true if the layer can be pretrained (using fit(INDArray), false otherwise + */ + boolean isPretrainLayer(); + + void clearNoiseWeightParams(); + + /** + * A performance optimization: mark whether the layer is allowed to modify its input array + * in-place. In many cases, this is totally safe - in others, the input array will be shared by + * multiple layers, and hence it's not safe to modify the input array. This is usually used by ops + * such as dropout. + * + * @param allow If true: the input array is safe to modify. If false: the input array should be + * copied before it is modified (i.e., in-place modifications are un-safe) + */ + void allowInputModification(boolean allow); + + /** + * Feed forward the input mask array, setting in the layer as appropriate. This allows different + * layers to handle masks differently - for example, bidirectional RNNs and normal RNNs operate + * differently with masks (the former sets activations to 0 outside of the data present region + * (and keeps the mask active for future layers like dense layers), whereas normal RNNs don't zero + * out the activations/errors )instead relying on backpropagated error arrays to handle the + * variable length case.
This is also used for example for networks that contain global + * pooling layers, arbitrary preprocessors, etc. + * + * @param maskArray Mask array to set + * @param currentMaskState Current state of the mask - see {@link MaskState} + * @param minibatchSize Current minibatch size. Needs to be known as it cannot always be + * inferred from the activations array due to reshaping (such as a + * DenseLayerConfiguration within a recurrent neural network) + * @return New mask array after this layer, along with the new mask state. + */ + Pair feedForwardMaskArray(INDArray maskArray, MaskState currentMaskState, + int minibatchSize); + + /** + * @return Get the layer helper, if any + */ + LayerHelper getHelper(); - /** - * Calculate the gradient relative to the error in the next layer - * - * @param epsilon w^(L+1)*delta^(L+1). Or, equiv: dC/da, i.e., (dC/dz)*(dz/da) = dC/da, where C - * is cost function a=sigma(z) is activation. - * @param workspaceMgr Workspace manager - * @return Pair where Gradient is gradient for this layer, INDArray is epsilon (activation gradient) - * needed by next layer, but before element-wise multiply by sigmaPrime(z). So for standard feed-forward layer, if this layer is - * L, then return.getSecond() == dL/dIn = (w^(L)*(delta^(L))^T)^T. Note that the returned array should be placed in the - * {@link org.deeplearning4j.nn.workspace.ArrayType#ACTIVATION_GRAD} workspace via the workspace manager - */ - Pair backpropGradient(INDArray epsilon, LayerWorkspaceMgr workspaceMgr); + /** + * Get a reference to the network this layer is part of. + * @return + */ + IModel getNet(); + enum Type { + FEED_FORWARD, RECURRENT, CONVOLUTIONAL, CONVOLUTIONAL3D, + SUBSAMPLING, UPSAMPLING, RECURSIVE, MULTILAYER, NORMALIZATION + } - - /** - * Perform forward pass and return the activations array with the last set input - * - * @param training training or test mode - * @param workspaceMgr Workspace manager - * @return the activation (layer output) of the last specified input. Note that the returned array should be placed - * in the {@link org.deeplearning4j.nn.workspace.ArrayType#ACTIVATIONS} workspace via the workspace manager - */ - INDArray activate(boolean training, LayerWorkspaceMgr workspaceMgr); - - /** - * Perform forward pass and return the activations array with the specified input - * - * @param input the input to use - * @param training train or test mode - * @param mgr Workspace manager. - * @return Activations array. Note that the returned array should be placed in the - * {@link org.deeplearning4j.nn.workspace.ArrayType#ACTIVATIONS} workspace via the workspace manager - */ - INDArray activate(INDArray input, boolean training, LayerWorkspaceMgr mgr); - - /** - * Get the iteration listeners for this layer. - */ - Collection getListeners(); - - /** - * Set the {@link TrainingListener}s for this model. If any listeners have previously been set, they will be - * replaced by this method - */ - void setListeners(TrainingListener... listeners); - - /** - * Set the {@link TrainingListener}s for this model. If any listeners have previously been set, they will be - * replaced by this method - */ - void setListeners(Collection listeners); - - /** - * Set the layer index. - */ - void setIndex(int index); - - /** - * Get the layer index. - */ - int getIndex(); - - /** - * @return The current iteration count (number of parameter updates) for the layer/network - */ - int getIterationCount(); - - /** - * @return The current epoch count (number of training epochs passed) for the layer/network - */ - int getEpochCount(); - - /** - * Set the current iteration count (number of parameter updates) for the layer/network - */ - void setIterationCount(int iterationCount); - - /** - * Set the current epoch count (number of epochs passed ) for the layer/network - */ - void setEpochCount(int epochCount); - - /** - * Set the layer input. - */ - void setInput(INDArray input, LayerWorkspaceMgr workspaceMgr); - - /** - * Set current/last input mini-batch size.
- * Used for score and gradient calculations. Mini batch size may be different from - * getInput().size(0) due to reshaping operations - for example, when using RNNs with - * DenseLayer and OutputLayer. Called automatically during forward pass. - */ - void setInputMiniBatchSize(int size); - - /** - * Get current/last input mini-batch size, as set by setInputMiniBatchSize(int) - * - * @see Layer#setInputMiniBatchSize(int) - */ - int getInputMiniBatchSize(); - - /** - * Set the mask array. Note: In general, {@link #feedForwardMaskArray(INDArray, MaskState, int)} should be used in - * preference to this. - * - * @param maskArray Mask array to set - */ - void setMaskArray(INDArray maskArray); - - - INDArray getMaskArray(); - - /** - * Returns true if the layer can be trained in an unsupervised/pretrain manner (AE, VAE, etc) - * - * @return true if the layer can be pretrained (using fit(INDArray), false otherwise - */ - boolean isPretrainLayer(); - - - void clearNoiseWeightParams(); - - /** - * A performance optimization: mark whether the layer is allowed to modify its input array in-place. In many cases, - * this is totally safe - in others, the input array will be shared by multiple layers, and hence it's not safe to - * modify the input array. - * This is usually used by ops such as dropout. - * @param allow If true: the input array is safe to modify. If false: the input array should be copied before it - * is modified (i.e., in-place modifications are un-safe) - */ - void allowInputModification(boolean allow); - - - /** - * Feed forward the input mask array, setting in the layer as appropriate. This allows different layers to - * handle masks differently - for example, bidirectional RNNs and normal RNNs operate differently with masks (the - * former sets activations to 0 outside of the data present region (and keeps the mask active for future layers like - * dense layers), whereas normal RNNs don't zero out the activations/errors )instead relying on backpropagated error - * arrays to handle the variable length case.
- * This is also used for example for networks that contain global pooling layers, arbitrary preprocessors, etc. - * - * @param maskArray Mask array to set - * @param currentMaskState Current state of the mask - see {@link MaskState} - * @param minibatchSize Current minibatch size. Needs to be known as it cannot always be inferred from the activations - * array due to reshaping (such as a DenseLayer within a recurrent neural network) - * @return New mask array after this layer, along with the new mask state. - */ - Pair feedForwardMaskArray(INDArray maskArray, MaskState currentMaskState, int minibatchSize); - - /** - * @return Get the layer helper, if any - */ - LayerHelper getHelper(); + enum TrainingMode { + TRAIN, TEST + } } diff --git a/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/nn/api/Model.java b/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/nn/api/Model.java deleted file mode 100644 index 53107fdc5..000000000 --- a/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/nn/api/Model.java +++ /dev/null @@ -1,237 +0,0 @@ -/* - * ****************************************************************************** - * * - * * - * * This program and the accompanying materials are made available under the - * * terms of the Apache License, Version 2.0 which is available at - * * https://www.apache.org/licenses/LICENSE-2.0. - * * - * * See the NOTICE file distributed with this work for additional - * * information regarding copyright ownership. - * * Unless required by applicable law or agreed to in writing, software - * * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * * License for the specific language governing permissions and limitations - * * under the License. - * * - * * SPDX-License-Identifier: Apache-2.0 - * ***************************************************************************** - */ - -package org.deeplearning4j.nn.api; - -import org.deeplearning4j.nn.conf.NeuralNetConfiguration; -import org.deeplearning4j.nn.gradient.Gradient; -import org.deeplearning4j.nn.workspace.LayerWorkspaceMgr; -import org.deeplearning4j.optimize.api.ConvexOptimizer; -import org.deeplearning4j.optimize.api.TrainingListener; -import org.nd4j.linalg.api.ndarray.INDArray; -import org.nd4j.common.primitives.Pair; - -import java.util.Collection; -import java.util.Map; - -public interface Model { - - /** - * Init the model - */ - void init(); - - - /** - * Set the trainingListeners for the ComputationGraph (and all layers in the network) - */ - void setListeners(Collection listeners); - - - /** - * Set the trainingListeners for the ComputationGraph (and all layers in the network) - */ - void setListeners(TrainingListener... listeners); - - /** - * This method ADDS additional TrainingListener to existing listeners - * - * @param listener - */ - void addListeners(TrainingListener... listener); - - - /** - * All models have a fit method - */ - @Deprecated - void fit(); - - /** - * Update layer weights and biases with gradient change - */ - void update(Gradient gradient); - - /** - * Perform one update applying the gradient - * @param gradient the gradient to apply - */ - void update(INDArray gradient, String paramType); - - - /** - * The score for the model - * @return the score for the model - */ - double score(); - - - /** - * Update the score - */ - void computeGradientAndScore(LayerWorkspaceMgr workspaceMgr); - - /** - * Parameters of the model (if any) - * @return the parameters of the model - */ - INDArray params(); - - /** - * the number of parameters for the model - * @return the number of parameters for the model - * - */ - long numParams(); - - - /** - * the number of parameters for the model - * @return the number of parameters for the model - * - */ - long numParams(boolean backwards); - - /** - * Set the parameters for this model. - * This expects a linear ndarray which then be unpacked internally - * relative to the expected ordering of the model - * @param params the parameters for the model - */ - void setParams(INDArray params); - - /** - * Set the initial parameters array as a view of the full (backprop) network parameters - * NOTE: this is intended to be used internally in MultiLayerNetwork and ComputationGraph, not by users. - * @param params a 1 x nParams row vector that is a view of the larger (MLN/CG) parameters array - */ - void setParamsViewArray(INDArray params); - - - INDArray getGradientsViewArray(); - - /** - * Set the gradients array as a view of the full (backprop) network parameters - * NOTE: this is intended to be used internally in MultiLayerNetwork and ComputationGraph, not by users. - * @param gradients a 1 x nParams row vector that is a view of the larger (MLN/CG) gradients array - */ - void setBackpropGradientsViewArray(INDArray gradients); - - /** - * Fit the model to the given data - * @param data the data to fit the model to - */ - void fit(INDArray data, LayerWorkspaceMgr workspaceMgr); - - - /** - * Get the gradient. Note that this method will not calculate the gradient, it will rather return the gradient - * that has been computed before. - * For calculating the gradient, see {@link Model#computeGradientAndScore(LayerWorkspaceMgr)} } . - * @return the gradient for this model, as calculated before - */ - Gradient gradient(); - - /** - * Get the gradient and score - * @return the gradient and score - */ - Pair gradientAndScore(); - - /** - * The current inputs batch size - * @return the current inputs batch size - */ - int batchSize(); - - - /** - * The configuration for the neural network - * @return the configuration for the neural network - */ - NeuralNetConfiguration conf(); - - /** - * Setter for the configuration - * @param conf - */ - void setConf(NeuralNetConfiguration conf); - - /** - * The input/feature matrix for the model - * @return the input/feature matrix for the model - */ - INDArray input(); - - /** - * Returns this models optimizer - * @return this models optimizer - */ - ConvexOptimizer getOptimizer(); - - /** - * Get the parameter - * @param param the key of the parameter - * @return the parameter vector/matrix with that particular key - */ - INDArray getParam(String param); - - /** - * The param table - * @return - */ - Map paramTable(); - - /** - * Table of parameters by key, for backprop - * For many models (dense layers, etc) - all parameters are backprop parameters - * @param backpropParamsOnly If true, return backprop params only. If false: return all params (equivalent to - * paramsTable()) - */ - Map paramTable(boolean backpropParamsOnly); - - /** - * Setter for the param table - * @param paramTable - */ - void setParamTable(Map paramTable); - - - /** - * Set the parameter with a new ndarray - * @param key the key to se t - * @param val the new ndarray - */ - void setParam(String key, INDArray val); - - /** - * Clear input - */ - void clear(); - - - /** - * Apply any constraints to the model - */ - void applyConstraints(int iteration, int epoch); - - - void close(); -} diff --git a/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/nn/api/ModelAdapter.java b/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/nn/api/ModelAdapter.java index 8b7d816d6..1f87ea69b 100644 --- a/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/nn/api/ModelAdapter.java +++ b/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/nn/api/ModelAdapter.java @@ -20,13 +20,14 @@ package org.deeplearning4j.nn.api; +import net.brutex.ai.dnn.api.IModel; import org.nd4j.adapters.OutputAdapter; import org.nd4j.linalg.api.ndarray.INDArray; public interface ModelAdapter extends OutputAdapter { /** - * This method invokes model internally, and does convertion to T + * This method invokes model internally, and does conversion to T * @return */ - T apply(Model model, INDArray[] inputs, INDArray[] inputMasks, INDArray[] labelsMasks); + T apply(IModel model, INDArray[] inputs, INDArray[] inputMasks, INDArray[] labelsMasks); } diff --git a/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/nn/api/NeuralNetwork.java b/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/nn/api/NeuralNetwork.java deleted file mode 100644 index 30215e916..000000000 --- a/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/nn/api/NeuralNetwork.java +++ /dev/null @@ -1,104 +0,0 @@ -/* - * ****************************************************************************** - * * - * * - * * This program and the accompanying materials are made available under the - * * terms of the Apache License, Version 2.0 which is available at - * * https://www.apache.org/licenses/LICENSE-2.0. - * * - * * See the NOTICE file distributed with this work for additional - * * information regarding copyright ownership. - * * Unless required by applicable law or agreed to in writing, software - * * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * * License for the specific language governing permissions and limitations - * * under the License. - * * - * * SPDX-License-Identifier: Apache-2.0 - * ***************************************************************************** - */ - -package org.deeplearning4j.nn.api; - -import org.deeplearning4j.optimize.api.ConvexOptimizer; -import org.nd4j.evaluation.IEvaluation; -import org.nd4j.linalg.api.ndarray.INDArray; -import org.nd4j.linalg.dataset.api.DataSet; -import org.nd4j.linalg.dataset.api.MultiDataSet; -import org.nd4j.linalg.dataset.api.iterator.DataSetIterator; -import org.nd4j.linalg.dataset.api.iterator.MultiDataSetIterator; - -/** - * @author raver119 - */ -public interface NeuralNetwork { - - /** - * This method does initialization of model - * - * PLEASE NOTE: All implementations should track own state, to avoid double spending - */ - void init(); - - /** - * This method returns model parameters as single INDArray - * - * @return - */ - INDArray params(); - - /** - * This method returns updater state (if applicable), null otherwise - * @return - */ - INDArray updaterState(); - - /** - * This method returns Optimizer used for training - * - * @return - */ - ConvexOptimizer getOptimizer(); - - /** - * This method fits model with a given DataSet - * - * @param dataSet - */ - void fit(DataSet dataSet); - - /** - * This method fits model with a given MultiDataSet - * - * @param dataSet - */ - void fit(MultiDataSet dataSet); - - /** - * This method fits model with a given DataSetIterator - * - * @param iterator - */ - void fit(DataSetIterator iterator); - - /** - * This method fits model with a given MultiDataSetIterator - * - * @param iterator - */ - void fit(MultiDataSetIterator iterator); - - /** - * This method executes evaluation of the model against given iterator and evaluation implementations - * - * @param iterator - */ - T[] doEvaluation(DataSetIterator iterator, T... evaluations); - - /** - * This method executes evaluation of the model against given iterator and evaluation implementations - * - * @param iterator - */ - T[] doEvaluation(MultiDataSetIterator iterator, T... evaluations); -} diff --git a/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/nn/api/ParamInitializer.java b/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/nn/api/ParamInitializer.java index 7170953e9..2505e05f8 100644 --- a/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/nn/api/ParamInitializer.java +++ b/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/nn/api/ParamInitializer.java @@ -21,7 +21,7 @@ package org.deeplearning4j.nn.api; import org.deeplearning4j.nn.conf.NeuralNetConfiguration; -import org.deeplearning4j.nn.conf.layers.Layer; +import org.deeplearning4j.nn.conf.layers.LayerConfiguration; import org.nd4j.linalg.api.ndarray.INDArray; import java.util.List; @@ -34,62 +34,62 @@ import java.util.Map; */ public interface ParamInitializer { - long numParams(NeuralNetConfiguration conf); - - long numParams(org.deeplearning4j.nn.conf.layers.Layer layer); + long numParams(LayerConfiguration layer); + @Deprecated + long numParams(NeuralNetConfiguration netConfiguration); /** * Get a list of all parameter keys given the layer configuration * - * @param layer Layer + * @param layer ILayer * @return All parameter keys */ - List paramKeys(org.deeplearning4j.nn.conf.layers.Layer layer); + List paramKeys(LayerConfiguration layer); /** * Weight parameter keys given the layer configuration * - * @param layer Layer + * @param layer ILayer * @return Weight parameter keys */ - List weightKeys(org.deeplearning4j.nn.conf.layers.Layer layer); + List weightKeys(LayerConfiguration layer); /** * Bias parameter keys given the layer configuration * - * @param layer Layer + * @param layer ILayer * @return Bias parameter keys */ - List biasKeys(org.deeplearning4j.nn.conf.layers.Layer layer); + List biasKeys(LayerConfiguration layer); /** * Is the specified parameter a weight? * - * @param layer Layer + * @param layer ILayer * @param key Key to check * @return True if parameter is a weight */ - boolean isWeightParam(Layer layer, String key); + boolean isWeightParam(LayerConfiguration layer, String key); /** * Is the specified parameter a bias? * - * @param layer Layer + * @param layer ILayer * @param key Key to check * @return True if parameter is a bias */ - boolean isBiasParam(Layer layer, String key); + boolean isBiasParam(LayerConfiguration layer, String key); /** * Initialize the parameters * - * @param conf the configuration + * @param conf the configuration of the layer * @param paramsView a view of the full network (backprop) parameters * @param initializeParams if true: initialize the parameters according to the configuration. If false: don't modify the * values in the paramsView array (but do select out the appropriate subset, reshape etc as required) * @return Map of parameters keyed by type (view of the 'paramsView' array) */ - Map init(NeuralNetConfiguration conf, INDArray paramsView, boolean initializeParams); + Map init(LayerConfiguration conf, INDArray paramsView, boolean initializeParams); /** * Return a map of gradients (in their standard non-flattened representation), taken from the flattened (row vector) gradientView array. @@ -100,6 +100,6 @@ public interface ParamInitializer { * @param gradientView The flattened gradients array, as a view of the larger array * @return A map containing an array by parameter type, that is a view of the full network gradients array */ - Map getGradientsFromFlattened(NeuralNetConfiguration conf, INDArray gradientView); + Map getGradientsFromFlattened(LayerConfiguration conf, INDArray gradientView); } diff --git a/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/nn/api/Updater.java b/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/nn/api/Updater.java index d63b57bb8..ae301a40c 100644 --- a/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/nn/api/Updater.java +++ b/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/nn/api/Updater.java @@ -36,11 +36,11 @@ public interface Updater extends Serializable { /** * Set the internal (historical) state view array for this updater * - * @param layer Layer that this updater belongs to + * @param layer ILayer that this updater belongs to * @param viewArray View array * @param initialize Whether to initialize the array or not */ - void setStateViewArray(Trainable layer, INDArray viewArray, boolean initialize); + void setStateViewArray(ITrainableLayer layer, INDArray viewArray, boolean initialize); /** * @return the view array for this updater @@ -54,5 +54,5 @@ public interface Updater extends Serializable { * @param gradient * @param iteration */ - void update(Trainable layer, Gradient gradient, int iteration, int epoch, int miniBatchSize, LayerWorkspaceMgr workspaceMgr); + void update(ITrainableLayer layer, Gradient gradient, int iteration, int epoch, int miniBatchSize, LayerWorkspaceMgr workspaceMgr); } diff --git a/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/nn/api/layers/LayerConstraint.java b/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/nn/api/layers/LayerConstraint.java index fff8bd77d..cfa82b050 100644 --- a/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/nn/api/layers/LayerConstraint.java +++ b/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/nn/api/layers/LayerConstraint.java @@ -33,7 +33,7 @@ public interface LayerConstraint extends Cloneable, Serializable { * Apply a given constraint to a layer at each iteration * in the provided epoch, after parameters have been updated. * - * @param layer org.deeplearning4j.nn.api.Layer + * @param layer org.deeplearning4j.nn.api.ILayer * @param iteration given iteration as integer * @param epoch current epoch as integer */ diff --git a/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/nn/api/layers/RecurrentLayer.java b/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/nn/api/layers/RecurrentLayer.java index 62050b88e..61c50b161 100644 --- a/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/nn/api/layers/RecurrentLayer.java +++ b/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/nn/api/layers/RecurrentLayer.java @@ -66,10 +66,10 @@ public interface RecurrentLayer extends Layer { * (a) result in the same output
* (b) leave the state maps (both stateMap and tBpttStateMap) in an identical state * - * @param input Layer input + * @param input ILayer input * @param training if true: training. Otherwise: test * @param storeLastForTBPTT If true: store the final state in tBpttStateMap for use in truncated BPTT training - * @return Layer activations + * @return ILayer activations */ INDArray rnnActivateUsingStoredState(INDArray input, boolean training, boolean storeLastForTBPTT, LayerWorkspaceMgr workspaceMg); @@ -92,12 +92,10 @@ public interface RecurrentLayer extends Layer { void rnnSetTBPTTState(Map state); /** - * Truncated BPTT equivalent of Layer.backpropGradient(). + * Truncated BPTT equivalent of ILayer.backpropGradient(). * Primary difference here is that forward pass in the context of BPTT is that we do * forward pass using stored state for truncated BPTT vs. from zero initialization * for standard BPTT. */ Pair tbpttBackpropGradient(INDArray epsilon, int tbpttBackLength, LayerWorkspaceMgr workspaceMgr); - - } diff --git a/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/nn/conf/ComputationGraphConfiguration.java b/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/nn/conf/ComputationGraphConfiguration.java index 8fe4b99a3..dac126dd7 100644 --- a/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/nn/conf/ComputationGraphConfiguration.java +++ b/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/nn/conf/ComputationGraphConfiguration.java @@ -27,10 +27,9 @@ import org.deeplearning4j.nn.conf.graph.LayerVertex; import org.deeplearning4j.nn.conf.graph.MergeVertex; import org.deeplearning4j.nn.conf.graph.rnn.LastTimeStepVertex; import org.deeplearning4j.nn.conf.inputs.InputType; -import org.deeplearning4j.nn.conf.layers.BaseLayer; +import org.deeplearning4j.nn.conf.layers.BaseLayerConfiguration; import org.deeplearning4j.nn.conf.layers.GlobalPoolingLayer; -import org.deeplearning4j.nn.conf.layers.Layer; -import org.deeplearning4j.nn.conf.layers.OutputLayer; +import org.deeplearning4j.nn.conf.layers.LayerConfiguration; import org.deeplearning4j.nn.conf.layers.recurrent.LastTimeStep; import org.deeplearning4j.nn.conf.layers.samediff.SameDiffVertex; import org.deeplearning4j.nn.conf.memory.MemoryReport; @@ -140,7 +139,7 @@ public class ComputationGraphConfiguration implements Serializable, Cloneable { * @return JSON representation of computation graph configuration */ public String toJson() { - //As per MultiLayerConfiguration.toJson() + //As per NeuralNetConfiguration.toJson() ObjectMapper mapper = NeuralNetConfiguration.mapper(); synchronized (mapper) { //JSON mappers are supposed to be thread safe: however, in practice they seem to miss fields occasionally @@ -160,7 +159,7 @@ public class ComputationGraphConfiguration implements Serializable, Cloneable { * @return {@link ComputationGraphConfiguration} */ public static ComputationGraphConfiguration fromJson(String json) { - //As per MultiLayerConfiguration.fromJson() + //As per NeuralNetConfiguration.fromJson() ObjectMapper mapper = NeuralNetConfiguration.mapper(); ComputationGraphConfiguration conf; try { @@ -171,7 +170,7 @@ public class ComputationGraphConfiguration implements Serializable, Cloneable { //JSON may be legacy (1.0.0-alpha or earlier), attempt to load it using old format return JsonMappers.getLegacyMapper().readValue(json, ComputationGraphConfiguration.class); } catch (InvalidTypeIdException e2){ - //Check for legacy custom layers: "Could not resolve type id 'CustomLayer' as a subtype of [simple type, class org.deeplearning4j.nn.conf.layers.Layer]: known type ids = [Bidirectional, CenterLossOutputLayer, CnnLossLayer, ..." + //Check for legacy custom layers: "Could not resolve type id 'CustomLayer' as a subtype of [simple type, class org.deeplearning4j.nn.conf.layers.LayerConfiguration]: known type ids = [Bidirectional, CenterLossOutputLayer, CnnLossLayer, ..." //1.0.0-beta5: dropping support for custom layers defined in pre-1.0.0-beta format. Built-in layers from these formats still work String msg = e2.getMessage(); if(msg != null && msg.contains("Could not resolve type id")){ @@ -207,10 +206,11 @@ public class ComputationGraphConfiguration implements Serializable, Cloneable { } LayerVertex lv = (LayerVertex) entry.getValue(); - if (lv.getLayerConf() != null && lv.getLayerConf().getLayer() != null) { - Layer layer = lv.getLayerConf().getLayer(); + if (lv.getNetConfiguration() != null && lv.getLayerConfiguration() != null) { + LayerConfiguration layer = lv.getLayerConfiguration(); - if (layer instanceof BaseLayer && ((BaseLayer) layer).getActivationFn() == null) { + if (layer instanceof BaseLayerConfiguration + && ((BaseLayerConfiguration) layer).getActivationFn() == null) { String layerName = layer.getLayerName(); try { @@ -236,11 +236,11 @@ public class ComputationGraphConfiguration implements Serializable, Cloneable { if (activationFunction != null) { IActivation ia = Activation.fromString(activationFunction.asText()).getActivationFunction(); - ((BaseLayer) layer).setActivationFn(ia); + ((BaseLayerConfiguration) layer).setActivationFn(ia); } } catch (IOException e) { - log.warn("Layer with null ActivationFn field or pre-0.7.2 activation function detected: could not parse JSON", + log.warn("LayerConfiguration with null ActivationFn field or pre-0.7.2 activation function detected: could not parse JSON", e); } } @@ -257,8 +257,9 @@ public class ComputationGraphConfiguration implements Serializable, Cloneable { * above. * @return True if all is well and layer iteration shall continue. False else-wise. */ - private static void handleLegacyWeightInitFromJson(String json, Layer layer, ObjectMapper mapper, JsonNode vertices) { - if (layer instanceof BaseLayer && ((BaseLayer) layer).getWeightInitFn() == null) { + private static void handleLegacyWeightInitFromJson(String json, LayerConfiguration layer, ObjectMapper mapper, JsonNode vertices) { + if (layer instanceof BaseLayerConfiguration + && ((BaseLayerConfiguration) layer).getWeightInit() == null) { String layerName = layer.getLayerName(); try { @@ -290,11 +291,11 @@ public class ComputationGraphConfiguration implements Serializable, Cloneable { if (weightInit != null) { final IWeightInit wi = WeightInit.valueOf(weightInit.asText()).getWeightInitFunction(dist); - ((BaseLayer) layer).setWeightInitFn(wi); + ((BaseLayerConfiguration) layer).setWeightInit(wi); } } catch (IOException e) { - log.warn("Layer with null ActivationFn field or pre-0.7.2 activation function detected: could not parse JSON", + log.warn("LayerConfiguration with null ActivationFn field or pre-0.7.2 activation function detected: could not parse JSON", e); } } @@ -331,7 +332,7 @@ public class ComputationGraphConfiguration implements Serializable, Cloneable { conf.trainingWorkspaceMode = trainingWorkspaceMode; conf.inferenceWorkspaceMode = inferenceWorkspaceMode; conf.cacheMode = this.cacheMode; - conf.defaultConfiguration.cacheMode = this.cacheMode; + conf.defaultConfiguration.setCacheMode(this.cacheMode); conf.validateOutputLayerConfig = this.validateOutputLayerConfig; conf.dataType = this.dataType; @@ -517,7 +518,7 @@ public class ComputationGraphConfiguration implements Serializable, Cloneable { inputTypeList.add(layerInput); LayerVertex lv = (LayerVertex) gv; - Layer l = lv.getLayerConf().getLayer(); + LayerConfiguration l = lv.getLayerConfiguration(); //Preprocessors - add if necessary if (lv.getPreProcessor() == null && addPreprocIfNecessary) { @@ -710,7 +711,7 @@ public class ComputationGraphConfiguration implements Serializable, Cloneable { protected Map inputPreProcessors = new LinkedHashMap<>(); - protected NeuralNetConfiguration.Builder globalConfiguration; + protected NeuralNetConfiguration globalConfiguration; protected boolean allowDisconnected = false; protected boolean allowNoOutput = false; @@ -719,11 +720,11 @@ public class ComputationGraphConfiguration implements Serializable, Cloneable { protected String lastAdded = null; - public GraphBuilder(NeuralNetConfiguration.Builder globalConfiguration) { - this.globalConfiguration = globalConfiguration; + public GraphBuilder(NeuralNetConfiguration.NeuralNetConfigurationBuilder globalConfiguration) { + this.globalConfiguration = globalConfiguration.build(); } - public GraphBuilder(ComputationGraphConfiguration newConf, NeuralNetConfiguration.Builder globalConfiguration) { + public GraphBuilder(ComputationGraphConfiguration newConf, NeuralNetConfiguration globalConfiguration) { ComputationGraphConfiguration clonedConf = newConf.clone(); @@ -742,7 +743,7 @@ public class ComputationGraphConfiguration implements Serializable, Cloneable { /** * Specify the processors for a given layer * These are used at each layer for doing things like normalization and shaping of input.
- * Note: preprocessors can also be defined using the {@link #addLayer(String, Layer, InputPreProcessor, String...)} method. + * Note: preprocessors can also be defined using the {@link #addLayer(String, LayerConfiguration, InputPreProcessor, String...)} method. * * @param layer the name of the layer that this preprocessor will be used with * @param processor the preprocessor to use for the specified layer @@ -776,7 +777,7 @@ public class ComputationGraphConfiguration implements Serializable, Cloneable { * * @param forwardLength Forward length > 0, >= backwardLength */ - public GraphBuilder tBPTTForwardLength(int forwardLength) { + public GraphBuilder tbpttFwdLength(int forwardLength) { this.tbpttFwdLength = forwardLength; return this; } @@ -789,7 +790,7 @@ public class ComputationGraphConfiguration implements Serializable, Cloneable { * * @param backwardLength <= forwardLength */ - public GraphBuilder tBPTTBackwardLength(int backwardLength) { + public GraphBuilder tbpttBackLength(int backwardLength) { this.tbpttBackLength = backwardLength; return this; } @@ -802,8 +803,8 @@ public class ComputationGraphConfiguration implements Serializable, Cloneable { * @param tbpttLength length > 0 */ public GraphBuilder tBPTTLength(int tbpttLength){ - tBPTTForwardLength(tbpttLength); - return tBPTTBackwardLength(tbpttLength); + tbpttFwdLength(tbpttLength); + return tbpttBackLength(tbpttLength); } /** @@ -813,9 +814,9 @@ public class ComputationGraphConfiguration implements Serializable, Cloneable { * @param layer The layer configuration * @param layerInputs Inputs to this layer. Inputs may be other layers, GraphVertex objects, * on a combination of the two. - * @see #addLayer(String, Layer, InputPreProcessor, String...) + * @see #addLayer(String, LayerConfiguration, InputPreProcessor, String...) */ - public GraphBuilder addLayer(String layerName, Layer layer, String... layerInputs) { + public GraphBuilder addLayer(String layerName, LayerConfiguration layer, String... layerInputs) { return addLayer(layerName, layer, null, layerInputs); } @@ -825,9 +826,9 @@ public class ComputationGraphConfiguration implements Serializable, Cloneable { * * @param layerName Name/label of the layer to add * @param layer The layer configuration - * @see #addLayer(String, Layer, InputPreProcessor, String...) + * @see #addLayer(String, LayerConfiguration, InputPreProcessor, String...) */ - public GraphBuilder appendLayer(String layerName, Layer layer) { + public GraphBuilder appendLayer(String layerName, LayerConfiguration layer) { return appendLayer(layerName, layer, null); } @@ -838,9 +839,9 @@ public class ComputationGraphConfiguration implements Serializable, Cloneable { * @param layer The layer configuration * @param layerInputs Inputs to this layer. Inputs may be other layers, GraphVertex objects, * on a combination of the two. - * @see #addLayer(String, Layer, InputPreProcessor, String...) + * @see #addLayer(String, LayerConfiguration, InputPreProcessor, String...) */ - public GraphBuilder layer(int layerName, Layer layer, String... layerInputs) { + public GraphBuilder layer(int layerName, LayerConfiguration layer, String... layerInputs) { return addLayer(String.valueOf(layerName), layer, null, layerInputs); } @@ -851,9 +852,9 @@ public class ComputationGraphConfiguration implements Serializable, Cloneable { * @param layer The layer configuration * @param layerInputs Inputs to this layer. Inputs may be other layers, GraphVertex objects, * on a combination of the two. - * @see #addLayer(String, Layer, InputPreProcessor, String...) + * @see #addLayer(String, LayerConfiguration, InputPreProcessor, String...) */ - public GraphBuilder layer(String layerName, Layer layer, String... layerInputs) { + public GraphBuilder layer(String layerName, LayerConfiguration layer, String... layerInputs) { return addLayer(layerName, layer, null, layerInputs); } @@ -866,11 +867,11 @@ public class ComputationGraphConfiguration implements Serializable, Cloneable { * @param layerInputs Inputs to this layer. Inputs may be other layers, GraphVertex objects, * on a combination of the two. */ - public GraphBuilder addLayer(String layerName, Layer layer, InputPreProcessor preProcessor, + public GraphBuilder addLayer(String layerName, LayerConfiguration layer, InputPreProcessor preProcessor, String... layerInputs) { - NeuralNetConfiguration.Builder builder = globalConfiguration.clone(); - builder.layer(layer); - addVertex(layerName, new LayerVertex(builder.build(), preProcessor), layerInputs); + NeuralNetConfiguration conf = globalConfiguration.clone(); + conf.getLayerConfigurations().add(layer); + addVertex(layerName, new LayerVertex(conf, preProcessor), layerInputs); layer.setLayerName(layerName); return this; } @@ -883,7 +884,7 @@ public class ComputationGraphConfiguration implements Serializable, Cloneable { * @param layer The layer configuration * @param preProcessor The InputPreProcessor to use with this layer. */ - public GraphBuilder appendLayer(String layerName, Layer layer, InputPreProcessor preProcessor) { + public GraphBuilder appendLayer(String layerName, LayerConfiguration layer, InputPreProcessor preProcessor) { if(lastAdded == null){ throw new IllegalStateException("Can not use appendLayer with no previous layers"); @@ -902,7 +903,7 @@ public class ComputationGraphConfiguration implements Serializable, Cloneable { * @param layerInputs Inputs to this layer. Inputs may be other layers, GraphVertex objects, * on a combination of the two. */ - public GraphBuilder layer(String layerName, Layer layer, InputPreProcessor preProcessor, + public GraphBuilder layer(String layerName, LayerConfiguration layer, InputPreProcessor preProcessor, String... layerInputs) { return addLayer(layerName, layer, preProcessor, layerInputs); } @@ -1173,13 +1174,13 @@ public class ComputationGraphConfiguration implements Serializable, Cloneable { conf.vertices = this.vertices; conf.vertexInputs = this.vertexInputs; - conf.trainingWorkspaceMode = globalConfiguration.trainingWorkspaceMode; - conf.inferenceWorkspaceMode = globalConfiguration.inferenceWorkspaceMode; - conf.cacheMode = globalConfiguration.cacheMode; + conf.trainingWorkspaceMode = getGlobalConfiguration().getTrainingWorkspaceMode(); + conf.inferenceWorkspaceMode = getGlobalConfiguration().getInferenceWorkspaceMode(); + conf.cacheMode = globalConfiguration.getCacheMode(); conf.validateOutputLayerConfig = validateOutputConfig; - conf.dataType = globalConfiguration.dataType; + conf.dataType = globalConfiguration.getDataType(); - conf.defaultConfiguration = globalConfiguration.build(); + conf.defaultConfiguration = globalConfiguration; //Add preprocessors that were defined separately to the Layers to which they belong for (Map.Entry entry : inputPreProcessors.entrySet()) { @@ -1198,7 +1199,7 @@ public class ComputationGraphConfiguration implements Serializable, Cloneable { for (Map.Entry gv : vertices.entrySet()) { if (gv.getValue() instanceof LayerVertex) { LayerVertex lv = (LayerVertex) gv.getValue(); - Layer l = lv.getLayerConf().getLayer(); + LayerConfiguration l = lv.getLayerConfiguration(); } if (gv.getValue() instanceof SameDiffVertex) ((SameDiffVertex) gv.getValue()).applyGlobalConfig(globalConfiguration); @@ -1226,7 +1227,7 @@ public class ComputationGraphConfiguration implements Serializable, Cloneable { //Validate output layer configurations... for (Map.Entry e : conf.getVertices().entrySet()) { if (e.getValue() instanceof LayerVertex) { - Layer l = ((LayerVertex) e.getValue()).getLayerConf().getLayer(); + LayerConfiguration l = ((LayerVertex) e.getValue()).getLayerConfiguration(); OutputLayerUtil.validateOutputLayer(e.getKey(), l); //No-op for non output/loss layers } } @@ -1236,7 +1237,7 @@ public class ComputationGraphConfiguration implements Serializable, Cloneable { //Check for invalid combination - tbptt plus LastTimeStepLayer or for(Map.Entry e : vertices.entrySet()){ GraphVertex gv = e.getValue(); - Layer l = (gv instanceof LayerVertex ? ((LayerVertex)gv).getLayerConf().getLayer() : null); + LayerConfiguration l = (gv instanceof LayerVertex ? ((LayerVertex)gv).getLayerConfiguration() : null); if(gv instanceof LastTimeStepVertex || (l != null && (l instanceof LastTimeStep || l instanceof GlobalPoolingLayer))){ String s = (l == null ? gv.getClass().getName() : l.getClass().getName()); String n = e.getKey(); diff --git a/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/nn/conf/MultiLayerConfiguration.java b/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/nn/conf/MultiLayerConfiguration.java deleted file mode 100644 index a48dc85ba..000000000 --- a/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/nn/conf/MultiLayerConfiguration.java +++ /dev/null @@ -1,769 +0,0 @@ -/* - * ****************************************************************************** - * * - * * - * * This program and the accompanying materials are made available under the - * * terms of the Apache License, Version 2.0 which is available at - * * https://www.apache.org/licenses/LICENSE-2.0. - * * - * * See the NOTICE file distributed with this work for additional - * * information regarding copyright ownership. - * * Unless required by applicable law or agreed to in writing, software - * * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * * License for the specific language governing permissions and limitations - * * under the License. - * * - * * SPDX-License-Identifier: Apache-2.0 - * ***************************************************************************** - */ - - -package org.deeplearning4j.nn.conf; - -import lombok.*; -import lombok.extern.slf4j.Slf4j; -import org.deeplearning4j.nn.conf.distribution.Distribution; -import org.deeplearning4j.nn.conf.inputs.InputType; -import org.deeplearning4j.nn.conf.layers.*; -import org.deeplearning4j.nn.conf.layers.recurrent.LastTimeStep; -import org.deeplearning4j.nn.conf.memory.LayerMemoryReport; -import org.deeplearning4j.nn.conf.memory.MemoryReport; -import org.deeplearning4j.nn.conf.memory.NetworkMemoryReport; -import org.deeplearning4j.nn.conf.serde.JsonMappers; -import org.deeplearning4j.nn.weights.IWeightInit; -import org.deeplearning4j.nn.weights.WeightInit; -import org.deeplearning4j.util.OutputLayerUtil; -import org.nd4j.linalg.activations.Activation; -import org.nd4j.linalg.activations.IActivation; -import org.nd4j.linalg.api.buffer.DataType; -import org.nd4j.linalg.factory.Nd4j; -import org.nd4j.linalg.lossfunctions.LossFunctions; -import org.nd4j.linalg.lossfunctions.impl.LossBinaryXENT; -import org.nd4j.linalg.lossfunctions.impl.LossMCXENT; -import org.nd4j.linalg.lossfunctions.impl.LossMSE; -import org.nd4j.linalg.lossfunctions.impl.LossNegativeLogLikelihood; -import com.fasterxml.jackson.databind.JsonNode; -import com.fasterxml.jackson.databind.ObjectMapper; -import com.fasterxml.jackson.databind.exc.InvalidTypeIdException; -import com.fasterxml.jackson.databind.node.ArrayNode; - -import java.io.IOException; -import java.io.Serializable; -import java.util.*; - -@Data -@AllArgsConstructor(access = AccessLevel.PRIVATE) -@NoArgsConstructor -@Slf4j -public class MultiLayerConfiguration implements Serializable, Cloneable { - - protected List confs; - protected Map inputPreProcessors = new HashMap<>(); - protected BackpropType backpropType = BackpropType.Standard; - protected int tbpttFwdLength = 20; - protected int tbpttBackLength = 20; - protected boolean validateOutputLayerConfig = true; //Default to legacy for pre 1.0.0-beta3 networks on deserialization - - @Getter - @Setter - protected WorkspaceMode trainingWorkspaceMode = WorkspaceMode.ENABLED; - - @Getter - @Setter - protected WorkspaceMode inferenceWorkspaceMode = WorkspaceMode.ENABLED; - - @Getter - @Setter - protected CacheMode cacheMode; - - @Getter - @Setter - protected DataType dataType = DataType.FLOAT; //Default to float for deserialization of beta3 and earlier nets - - //Counter for the number of parameter updates so far - // This is important for learning rate schedules, for example, and is stored here to ensure it is persisted - // for Spark and model serialization - protected int iterationCount = 0; - - //Counter for the number of epochs completed so far. Used for per-epoch schedules - protected int epochCount = 0; - - public int getEpochCount() { - return epochCount; - } - - public void setEpochCount(int epochCount) { - this.epochCount = epochCount; - for (int i = 0; i < confs.size(); i++) { - getConf(i).setEpochCount(epochCount); - } - } - - /** - * @return JSON representation of NN configuration - */ - public String toYaml() { - ObjectMapper mapper = NeuralNetConfiguration.mapperYaml(); - synchronized (mapper) { - try { - return mapper.writeValueAsString(this); - } catch (com.fasterxml.jackson.core.JsonProcessingException e) { - throw new RuntimeException(e); - } - } - } - - /** - * Create a neural net configuration from json - * - * @param json the neural net configuration from json - * @return {@link MultiLayerConfiguration} - */ - public static MultiLayerConfiguration fromYaml(String json) { - ObjectMapper mapper = NeuralNetConfiguration.mapperYaml(); - try { - return mapper.readValue(json, MultiLayerConfiguration.class); - } catch (IOException e) { - throw new RuntimeException(e); - } - } - - - /** - * @return JSON representation of NN configuration - */ - public String toJson() { - ObjectMapper mapper = NeuralNetConfiguration.mapper(); - synchronized (mapper) { - //JSON mappers are supposed to be thread safe: however, in practice they seem to miss fields occasionally - //when writeValueAsString is used by multiple threads. This results in invalid JSON. See issue #3243 - try { - return mapper.writeValueAsString(this); - } catch (com.fasterxml.jackson.core.JsonProcessingException e) { - throw new RuntimeException(e); - } - } - } - - /** - * Create a neural net configuration from json - * - * @param json the neural net configuration from json - * @return {@link MultiLayerConfiguration} - */ - public static MultiLayerConfiguration fromJson(String json) { - MultiLayerConfiguration conf; - ObjectMapper mapper = NeuralNetConfiguration.mapper(); - try { - conf = mapper.readValue(json, MultiLayerConfiguration.class); - } catch (InvalidTypeIdException e){ - if(e.getMessage().contains("@class")){ - try { - //JSON may be legacy (1.0.0-alpha or earlier), attempt to load it using old format - return JsonMappers.getLegacyMapper().readValue(json, MultiLayerConfiguration.class); - } catch (InvalidTypeIdException e2){ - //Check for legacy custom layers: "Could not resolve type id 'CustomLayer' as a subtype of [simple type, class org.deeplearning4j.nn.conf.layers.Layer]: known type ids = [Bidirectional, CenterLossOutputLayer, CnnLossLayer, ..." - //1.0.0-beta5: dropping support for custom layers defined in pre-1.0.0-beta format. Built-in layers from these formats still work - String msg = e2.getMessage(); - if(msg != null && msg.contains("Could not resolve type id")){ - throw new RuntimeException("Error deserializing MultiLayerConfiguration - configuration may have a custom " + - "layer, vertex or preprocessor, in pre version 1.0.0-beta JSON format.\nModels in legacy format with custom" + - " layers should be loaded in 1.0.0-beta to 1.0.0-beta4 and saved again, before loading in the current version of DL4J", e); - } - throw new RuntimeException(e2); - } catch (IOException e2){ - throw new RuntimeException(e2); - } - } - throw new RuntimeException(e); - } catch (IOException e) { - //Check if this exception came from legacy deserializer... - String msg = e.getMessage(); - if (msg != null && msg.contains("legacy")) { - throw new RuntimeException("Error deserializing MultiLayerConfiguration - configuration may have a custom " + - "layer, vertex or preprocessor, in pre version 1.0.0-alpha JSON format. These layers can be " + - "deserialized by first registering them with NeuralNetConfiguration.registerLegacyCustomClassesForJSON(Class...)", e); - } - throw new RuntimeException(e); - } - - - //To maintain backward compatibility after loss function refactoring (configs generated with v0.5.0 or earlier) - // Previously: enumeration used for loss functions. Now: use classes - // IN the past, could have only been an OutputLayer or RnnOutputLayer using these enums - int layerCount = 0; - JsonNode confs = null; - for (NeuralNetConfiguration nnc : conf.getConfs()) { - Layer l = nnc.getLayer(); - if (l instanceof BaseOutputLayer && ((BaseOutputLayer) l).getLossFn() == null) { - //lossFn field null -> may be an old config format, with lossFunction field being for the enum - //if so, try walking the JSON graph to extract out the appropriate enum value - - BaseOutputLayer ol = (BaseOutputLayer) l; - try { - JsonNode jsonNode = mapper.readTree(json); - if (confs == null) { - confs = jsonNode.get("confs"); - } - if (confs instanceof ArrayNode) { - ArrayNode layerConfs = (ArrayNode) confs; - JsonNode outputLayerNNCNode = layerConfs.get(layerCount); - if (outputLayerNNCNode == null) - return conf; //Should never happen... - JsonNode outputLayerNode = outputLayerNNCNode.get("layer"); - - JsonNode lossFunctionNode = null; - if (outputLayerNode.has("output")) { - lossFunctionNode = outputLayerNode.get("output").get("lossFunction"); - } else if (outputLayerNode.has("rnnoutput")) { - lossFunctionNode = outputLayerNode.get("rnnoutput").get("lossFunction"); - } - - if (lossFunctionNode != null) { - String lossFunctionEnumStr = lossFunctionNode.asText(); - LossFunctions.LossFunction lossFunction = null; - try { - lossFunction = LossFunctions.LossFunction.valueOf(lossFunctionEnumStr); - } catch (Exception e) { - log.warn("OutputLayer with null LossFunction or pre-0.6.0 loss function configuration detected: could not parse JSON", - e); - } - - if (lossFunction != null) { - switch (lossFunction) { - case MSE: - ol.setLossFn(new LossMSE()); - break; - case XENT: - ol.setLossFn(new LossBinaryXENT()); - break; - case NEGATIVELOGLIKELIHOOD: - ol.setLossFn(new LossNegativeLogLikelihood()); - break; - case MCXENT: - ol.setLossFn(new LossMCXENT()); - break; - - //Remaining: TODO - case SQUARED_LOSS: - case RECONSTRUCTION_CROSSENTROPY: - default: - log.warn("OutputLayer with null LossFunction or pre-0.6.0 loss function configuration detected: could not set loss function for {}", - lossFunction); - break; - } - } - } - - } else { - log.warn("OutputLayer with null LossFunction or pre-0.6.0 loss function configuration detected: could not parse JSON: layer 'confs' field is not an ArrayNode (is: {})", - (confs != null ? confs.getClass() : null)); - } - } catch (IOException e) { - log.warn("OutputLayer with null LossFunction or pre-0.6.0 loss function configuration detected: could not parse JSON", - e); - break; - } - } - - //Also, pre 0.7.2: activation functions were Strings ("activationFunction" field), not classes ("activationFn") - //Try to load the old format if necessary, and create the appropriate IActivation instance - if ((l instanceof BaseLayer) && ((BaseLayer) l).getActivationFn() == null) { - try { - JsonNode jsonNode = mapper.readTree(json); - if (confs == null) { - confs = jsonNode.get("confs"); - } - if (confs instanceof ArrayNode) { - ArrayNode layerConfs = (ArrayNode) confs; - JsonNode outputLayerNNCNode = layerConfs.get(layerCount); - if (outputLayerNNCNode == null) - return conf; //Should never happen... - JsonNode layerWrapperNode = outputLayerNNCNode.get("layer"); - - if (layerWrapperNode == null || layerWrapperNode.size() != 1) { - continue; - } - - JsonNode layerNode = layerWrapperNode.elements().next(); - JsonNode activationFunction = layerNode.get("activationFunction"); //Should only have 1 element: "dense", "output", etc - - if (activationFunction != null) { - IActivation ia = Activation.fromString(activationFunction.asText()).getActivationFunction(); - ((BaseLayer) l).setActivationFn(ia); - } - } - - } catch (IOException e) { - log.warn("Layer with null ActivationFn field or pre-0.7.2 activation function detected: could not parse JSON", - e); - } - } - - if(!handleLegacyWeightInitFromJson(json, l, mapper, confs, layerCount)) { - return conf; - } - - layerCount++; - } - return conf; - } - - /** - * Handle {@link WeightInit} and {@link Distribution} from legacy configs in Json format. Copied from handling of {@link Activation} - * above. - * @return True if all is well and layer iteration shall continue. False else-wise. - */ - private static boolean handleLegacyWeightInitFromJson(String json, Layer l, ObjectMapper mapper, JsonNode confs, int layerCount) { - if ((l instanceof BaseLayer) && ((BaseLayer) l).getWeightInitFn() == null) { - try { - JsonNode jsonNode = mapper.readTree(json); - if (confs == null) { - confs = jsonNode.get("confs"); - } - if (confs instanceof ArrayNode) { - ArrayNode layerConfs = (ArrayNode) confs; - JsonNode outputLayerNNCNode = layerConfs.get(layerCount); - if (outputLayerNNCNode == null) - return false; //Should never happen... - JsonNode layerWrapperNode = outputLayerNNCNode.get("layer"); - - if (layerWrapperNode == null || layerWrapperNode.size() != 1) { - return true; - } - - JsonNode layerNode = layerWrapperNode.elements().next(); - JsonNode weightInit = layerNode.get("weightInit"); //Should only have 1 element: "dense", "output", etc - JsonNode distribution = layerNode.get("dist"); - - Distribution dist = null; - if(distribution != null) { - dist = mapper.treeToValue(distribution, Distribution.class); - } - - if (weightInit != null) { - final IWeightInit wi = WeightInit.valueOf(weightInit.asText()).getWeightInitFunction(dist); - ((BaseLayer) l).setWeightInitFn(wi); - } - } - - } catch (IOException e) { - log.warn("Layer with null WeightInit detected: " + l.getLayerName() + ", could not parse JSON", - e); - } - } - return true; - - } - - @Override - public String toString() { - return toJson(); - } - - public NeuralNetConfiguration getConf(int i) { - return confs.get(i); - } - - @Override - public MultiLayerConfiguration clone() { - try { - MultiLayerConfiguration clone = (MultiLayerConfiguration) super.clone(); - - if (clone.confs != null) { - List list = new ArrayList<>(); - for (NeuralNetConfiguration conf : clone.confs) { - list.add(conf.clone()); - } - clone.confs = list; - } - - if (clone.inputPreProcessors != null) { - Map map = new HashMap<>(); - for (Map.Entry entry : clone.inputPreProcessors.entrySet()) { - map.put(entry.getKey(), entry.getValue().clone()); - } - clone.inputPreProcessors = map; - } - - clone.inferenceWorkspaceMode = this.inferenceWorkspaceMode; - clone.trainingWorkspaceMode = this.trainingWorkspaceMode; - clone.cacheMode = this.cacheMode; - clone.validateOutputLayerConfig = this.validateOutputLayerConfig; - clone.dataType = this.dataType; - - return clone; - - } catch (CloneNotSupportedException e) { - throw new RuntimeException(e); - } - } - - public InputPreProcessor getInputPreProcess(int curr) { - return inputPreProcessors.get(curr); - } - - /** - * Get a {@link MemoryReport} for the given MultiLayerConfiguration. This is used to estimate the - * memory requirements for the given network configuration and input - * - * @param inputType Input types for the network - * @return Memory report for the network - */ - public NetworkMemoryReport getMemoryReport(InputType inputType) { - - Map memoryReportMap = new LinkedHashMap<>(); - int nLayers = confs.size(); - for (int i = 0; i < nLayers; i++) { - String layerName = confs.get(i).getLayer().getLayerName(); - if (layerName == null) { - layerName = String.valueOf(i); - } - - //Pass input type through preprocessor, if necessary - InputPreProcessor preproc = getInputPreProcess(i); - //TODO memory requirements for preprocessor - if (preproc != null) { - inputType = preproc.getOutputType(inputType); - } - - LayerMemoryReport report = confs.get(i).getLayer().getMemoryReport(inputType); - memoryReportMap.put(layerName, report); - - inputType = confs.get(i).getLayer().getOutputType(i, inputType); - } - - return new NetworkMemoryReport(memoryReportMap, MultiLayerConfiguration.class, "MultiLayerNetwork", inputType); - } - - /** - * For the given input shape/type for the network, return a list of activation sizes for each layer in the network.
- * i.e., list.get(i) is the output activation sizes for layer i - * - * @param inputType Input type for the network - * @return A lits of activation types for the network, indexed by layer number - */ - public List getLayerActivationTypes(@NonNull InputType inputType) { - List out = new ArrayList<>(); - int nLayers = confs.size(); - for (int i = 0; i < nLayers; i++) { - InputPreProcessor preproc = getInputPreProcess(i); - if (preproc != null) { - inputType = preproc.getOutputType(inputType); - } - - inputType = confs.get(i).getLayer().getOutputType(i, inputType); - out.add(inputType); - } - return out; - } - - @Data - public static class Builder { - - private static final int DEFAULT_TBPTT_LENGTH = 20; - - protected List confs = new ArrayList<>(); - protected double dampingFactor = 100; - protected Map inputPreProcessors = new HashMap<>(); - protected BackpropType backpropType = BackpropType.Standard; - protected int tbpttFwdLength = DEFAULT_TBPTT_LENGTH; - protected int tbpttBackLength = DEFAULT_TBPTT_LENGTH; - protected InputType inputType; - - protected WorkspaceMode trainingWorkspaceMode = WorkspaceMode.ENABLED; - protected WorkspaceMode inferenceWorkspaceMode = WorkspaceMode.ENABLED; - protected CacheMode cacheMode = CacheMode.NONE; - protected boolean validateOutputConfig = true; - protected boolean validateTbpttConfig = true; - protected DataType dataType; - protected boolean overrideNinUponBuild = true; - - - /** - * Whether to over ride the nIn - * configuration forcibly upon construction. - * Default value is true - * @param overrideNinUponBuild Whether to over ride the nIn - * configuration forcibly upon construction. - * @return builder pattern - */ - public Builder overrideNinUponBuild(boolean overrideNinUponBuild) { - this.overrideNinUponBuild = overrideNinUponBuild; - return this; - } - - /** - * Specify the processors. - * These are used at each layer for doing things like normalization and - * shaping of input. - * - * @param processor what to use to preProcess the data. - * @return builder pattern - */ - public Builder inputPreProcessor(Integer layer, InputPreProcessor processor) { - inputPreProcessors.put(layer, processor); - return this; - } - - public Builder inputPreProcessors(Map processors) { - this.inputPreProcessors = processors; - return this; - } - - /** - * @deprecated Use {@link NeuralNetConfiguration.Builder#trainingWorkspaceMode(WorkspaceMode)} - */ - @Deprecated - public Builder trainingWorkspaceMode(@NonNull WorkspaceMode workspaceMode) { - this.trainingWorkspaceMode = workspaceMode; - return this; - } - - /** - * @deprecated Use {@link NeuralNetConfiguration.Builder#inferenceWorkspaceMode(WorkspaceMode)} - */ - @Deprecated - public Builder inferenceWorkspaceMode(@NonNull WorkspaceMode workspaceMode) { - this.inferenceWorkspaceMode = workspaceMode; - return this; - } - - /** - * This method defines how/if preOutput cache is handled: - * NONE: cache disabled (default value) - * HOST: Host memory will be used - * DEVICE: GPU memory will be used (on CPU backends effect will be the same as for HOST) - * - * @param cacheMode - * @return - */ - public Builder cacheMode(@NonNull CacheMode cacheMode) { - this.cacheMode = cacheMode; - return this; - } - - /** - * The type of backprop. Default setting is used for most networks (MLP, CNN etc), - * but optionally truncated BPTT can be used for training recurrent neural networks. - * If using TruncatedBPTT make sure you set both tBPTTForwardLength() and tBPTTBackwardLength() - */ - public Builder backpropType(@NonNull BackpropType type) { - this.backpropType = type; - return this; - } - - /** - * When doing truncated BPTT: how many steps should we do?
- * Only applicable when doing backpropType(BackpropType.TruncatedBPTT)
- * See: http://www.cs.utoronto.ca/~ilya/pubs/ilya_sutskever_phd_thesis.pdf - * - * @param bpttLength length > 0 - */ - public Builder tBPTTLength(int bpttLength) { - tBPTTForwardLength(bpttLength); - return tBPTTBackwardLength(bpttLength); - } - - /** - * When doing truncated BPTT: how many steps of forward pass should we do - * before doing (truncated) backprop?
- * Only applicable when doing backpropType(BackpropType.TruncatedBPTT)
- * Typically tBPTTForwardLength parameter is same as the tBPTTBackwardLength parameter, - * but may be larger than it in some circumstances (but never smaller)
- * Ideally your training data time series length should be divisible by this - * This is the k1 parameter on pg23 of - * http://www.cs.utoronto.ca/~ilya/pubs/ilya_sutskever_phd_thesis.pdf - * - * @param forwardLength Forward length > 0, >= backwardLength - */ - public Builder tBPTTForwardLength(int forwardLength) { - this.tbpttFwdLength = forwardLength; - return this; - } - - /** - * When doing truncated BPTT: how many steps of backward should we do?
- * Only applicable when doing backpropType(BackpropType.TruncatedBPTT)
- * This is the k2 parameter on pg23 of - * http://www.cs.utoronto.ca/~ilya/pubs/ilya_sutskever_phd_thesis.pdf - * - * @param backwardLength <= forwardLength - */ - public Builder tBPTTBackwardLength(int backwardLength) { - this.tbpttBackLength = backwardLength; - return this; - } - - public Builder confs(List confs) { - this.confs = confs; - return this; - } - - public Builder setInputType(InputType inputType) { - this.inputType = inputType; - return this; - } - - /** - * Enabled by default. If enabled, the output layer configuration will be validated, to throw an exception on - * likely invalid outputs - such as softmax + nOut=1, or LossMCXENT + Tanh.
- * If disabled (false) no output layer validation will be performed.
- * Disabling this validation is not recommended, as the configurations that fail validation usually will - * not be able to learn correctly. However, the option to disable this validation is provided for advanced users - * when creating non-standard architectures. - * - * @param validate If true: validate output layer configuration. False: don't validate - */ - public Builder validateOutputLayerConfig(boolean validate) { - this.validateOutputConfig = validate; - return this; - } - - /** - * Enabled by default. If enabled, an exception will be throw when using the (invalid) combination of truncated - * backpropagation through time (TBPTT) with either a GlobalPoolingLayer or LastTimeStepLayer.
- * It is possible to disable this validation to allow what is almost certainly an invalid configuration to be used, - * however this is not recommended. - * - * @param validate Whether TBPTT validation should be performed - */ - public Builder validateTbpttConfig(boolean validate){ - this.validateTbpttConfig = validate; - return this; - } - - /** - * Set the DataType for the network parameters and activations for all layers in the network. Default: Float - * @param dataType Datatype to use for parameters and activations - */ - public Builder dataType(@NonNull DataType dataType){ - this.dataType = dataType; - return this; - } - - - public MultiLayerConfiguration build() { - //Validate BackpropType setting - if ((tbpttBackLength != DEFAULT_TBPTT_LENGTH || tbpttFwdLength != DEFAULT_TBPTT_LENGTH) && backpropType != BackpropType.TruncatedBPTT) { - log.warn("Truncated backpropagation through time lengths have been configured with values " + tbpttFwdLength - + " and " + tbpttBackLength + " but backprop type is set to " + backpropType + ". TBPTT configuration" + - " settings will only take effect if backprop type is set to BackpropType.TruncatedBPTT"); - } - - if(backpropType == BackpropType.TruncatedBPTT && validateTbpttConfig) { - //Check for invalid combination - tbptt plus LastTimeStepLayer or - for( int i = 0; i < confs.size(); i++) { - Layer l = confs.get(i).getLayer(); - if(l instanceof LastTimeStep || l instanceof GlobalPoolingLayer){ - throw new IllegalStateException("Invalid network configuration detected: Truncated backpropagation through time (TBPTT)" + - " cannot be used with layer " + i + " of type " + l.getClass().getName() + ": TBPTT is incompatible with this layer type (which is designed " + - "to process entire sequences at once, and does support the type of sequence segments that TPBTT uses).\n" + - "This check can be disabled using validateTbpttConfig(false) but this is not recommended."); - } - } - } - - - if (inputType == null && inputPreProcessors.get(0) == null) { - //User hasn't set the InputType. Sometimes we can infer it... - // For example, Dense/RNN layers, where preprocessor isn't set -> user is *probably* going to feed in - // standard feedforward or RNN data - //This isn't the most elegant implementation, but should avoid breaking backward compatibility here - //Can't infer InputType for CNN layers, however (don't know image dimensions/depth) - Layer firstLayer = confs.get(0).getLayer(); - if (firstLayer instanceof BaseRecurrentLayer) { - BaseRecurrentLayer brl = (BaseRecurrentLayer) firstLayer; - val nIn = brl.getNIn(); - if (nIn > 0) { - inputType = InputType.recurrent(nIn, brl.getRnnDataFormat()); - } - } else if (firstLayer instanceof DenseLayer || firstLayer instanceof EmbeddingLayer - || firstLayer instanceof OutputLayer) { - //Can't just use "instanceof FeedForwardLayer" here. ConvolutionLayer is also a FeedForwardLayer - FeedForwardLayer ffl = (FeedForwardLayer) firstLayer; - val nIn = ffl.getNIn(); - if (nIn > 0) { - inputType = InputType.feedForward(nIn); - } - } - } - - - //Add preprocessors and set nIns, if InputType has been set - // Builder.inputType field can be set in 1 of 4 ways: - // 1. User calls setInputType directly - // 2. Via ConvolutionLayerSetup -> internally calls setInputType(InputType.convolutional(...)) - // 3. Via the above code: i.e., assume input is as expected by the RNN or dense layer -> sets the inputType field - if (inputType != null) { - InputType currentInputType = inputType; - for (int i = 0; i < confs.size(); i++) { - Layer l = confs.get(i).getLayer(); - if (inputPreProcessors.get(i) == null) { - //Don't override preprocessor setting, but set preprocessor if required... - InputPreProcessor inputPreProcessor = l.getPreProcessorForInputType(currentInputType); - if (inputPreProcessor != null) { - inputPreProcessors.put(i, inputPreProcessor); - } - } - - InputPreProcessor inputPreProcessor = inputPreProcessors.get(i); - if (inputPreProcessor != null) { - currentInputType = inputPreProcessor.getOutputType(currentInputType); - } - if(i > 0) { - Layer layer = confs.get(i - 1).getLayer(); - //convolution 1d is an edge case where it has rnn input type but the filters - //should be the output - if(layer instanceof Convolution1DLayer) { - if(l instanceof DenseLayer && inputType instanceof InputType.InputTypeRecurrent) { - FeedForwardLayer feedForwardLayer = (FeedForwardLayer) l; - if(inputType instanceof InputType.InputTypeRecurrent) { - InputType.InputTypeRecurrent recurrent = (InputType.InputTypeRecurrent) inputType; - feedForwardLayer.setNIn(recurrent.getTimeSeriesLength()); - } - } - else - l.setNIn(currentInputType, overrideNinUponBuild); //Don't override the nIn setting, if it's manually set by the user - } - else - l.setNIn(currentInputType, overrideNinUponBuild); //Don't override the nIn setting, if it's manually set by the user - - } - else - l.setNIn(currentInputType, overrideNinUponBuild); //Don't override the nIn setting, if it's manually set by the user - - - currentInputType = l.getOutputType(i, currentInputType); - } - - } - - MultiLayerConfiguration conf = new MultiLayerConfiguration(); - conf.confs = this.confs; - conf.inputPreProcessors = inputPreProcessors; - conf.backpropType = backpropType; - conf.tbpttFwdLength = tbpttFwdLength; - conf.tbpttBackLength = tbpttBackLength; - conf.trainingWorkspaceMode = trainingWorkspaceMode; - conf.inferenceWorkspaceMode = inferenceWorkspaceMode; - conf.cacheMode = cacheMode; - conf.dataType = dataType; - - Nd4j.getRandom().setSeed(conf.getConf(0).getSeed()); - - //Validate output layer configuration - if (validateOutputConfig) { - //Validate output layer configurations... - for (NeuralNetConfiguration n : conf.getConfs()) { - Layer l = n.getLayer(); - OutputLayerUtil.validateOutputLayer(l.getLayerName(), l); //No-op for non output/loss layers - } - } - - return conf; - - } - } -} diff --git a/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/nn/conf/NeuralNetBaseBuilderConfiguration.java b/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/nn/conf/NeuralNetBaseBuilderConfiguration.java new file mode 100644 index 000000000..4f9a9bb1f --- /dev/null +++ b/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/nn/conf/NeuralNetBaseBuilderConfiguration.java @@ -0,0 +1,950 @@ +/* + * ****************************************************************************** + * * + * * + * * This program and the accompanying materials are made available under the + * * terms of the Apache License, Version 2.0 which is available at + * * https://www.apache.org/licenses/LICENSE-2.0. + * * + * * See the NOTICE file distributed with this work for additional + * * information regarding copyright ownership. + * * Unless required by applicable law or agreed to in writing, software + * * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * * License for the specific language governing permissions and limitations + * * under the License. + * * + * * SPDX-License-Identifier: Apache-2.0 + * ***************************************************************************** + */ + +package org.deeplearning4j.nn.conf; + +import com.fasterxml.jackson.annotation.JsonIgnoreProperties; +import com.fasterxml.jackson.databind.JsonNode; +import com.fasterxml.jackson.databind.ObjectMapper; +import com.fasterxml.jackson.databind.node.ArrayNode; +import java.io.IOException; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.Random; +import lombok.Data; +import lombok.EqualsAndHashCode; +import lombok.Getter; +import lombok.NonNull; +import lombok.Setter; +import lombok.experimental.SuperBuilder; +import lombok.extern.slf4j.Slf4j; +import net.brutex.ai.dnn.api.INeuralNetworkConfiguration; +import org.deeplearning4j.nn.api.OptimizationAlgorithm; +import org.deeplearning4j.nn.api.layers.LayerConstraint; +import org.deeplearning4j.nn.conf.distribution.Distribution; +import org.deeplearning4j.nn.conf.dropout.Dropout; +import org.deeplearning4j.nn.conf.dropout.IDropout; +import org.deeplearning4j.nn.conf.inputs.InputType; +import org.deeplearning4j.nn.conf.layers.BaseLayerConfiguration; +import org.deeplearning4j.nn.conf.layers.ConvolutionLayer; +import org.deeplearning4j.nn.conf.layers.LayerConfiguration; +import org.deeplearning4j.nn.conf.serde.JsonMappers; +import org.deeplearning4j.nn.conf.stepfunctions.StepFunction; +import org.deeplearning4j.nn.conf.weightnoise.IWeightNoise; +import org.deeplearning4j.nn.weights.IWeightInit; +import org.deeplearning4j.nn.weights.WeightInit; +import org.deeplearning4j.nn.weights.WeightInitDistribution; +import org.deeplearning4j.nn.weights.WeightInitXavier; +import org.deeplearning4j.util.NetworkUtils; +import org.nd4j.common.base.Preconditions; +import org.nd4j.linalg.activations.Activation; +import org.nd4j.linalg.activations.IActivation; +import org.nd4j.linalg.activations.impl.ActivationSigmoid; +import org.nd4j.linalg.api.buffer.DataType; +import org.nd4j.linalg.learning.config.IUpdater; +import org.nd4j.linalg.learning.config.Sgd; +import org.nd4j.linalg.learning.regularization.L1Regularization; +import org.nd4j.linalg.learning.regularization.L2Regularization; +import org.nd4j.linalg.learning.regularization.Regularization; +import org.nd4j.linalg.learning.regularization.WeightDecay; + +/** + * Deeplearning4j is a domain-specific language to configure deep neural networks, which are made of + * multiple layers. Everything starts with a NeuralNetConfiguration, which organizes those layers + * and their hyperparameters. Hyperparameters are variables that determine how a neural network + * learns. They include how many times to update the weights of the model, how to initialize those + * weights, which activation function to attach to the nodes, which optimization algorithm to use, + * and how fast the model should learn. This is what one configuration would look like:
+ *
+ * NeuralNetConfiguration conf = NeuralNetConfiguration.builder()
+ * .weightInit(WeightInit.XAVIER) .activation(Activation.RELU)
+ * .optimizationAlgo(OptimizationAlgorithm.STOCHASTIC_GRADIENT_DESCENT)
+ * .updater(new Sgd(0.05)) //... other hyperparameters
+ * .backprop(true)
+ * .build();
+ *
+ * With Deeplearning4j, you add a layer by calling layer on the + * NeuralNetConfiguration.NeuralNetConfigurationBuilder(), specifying its place in the order of + * layers (the zero-indexed layer below is the input layer), the number of input and output nodes, + * nIn and nOut, as well as the type: DenseLayer.
+ *
+ * .layer(0, new DenseLayer.Builder().nIn(784).nOut(250)
+ * .build())
+ *
+ * Once you've configured your net, you train the model with model.fit. + */ +@Data +@Slf4j +@EqualsAndHashCode(exclude = {"iterationCount", "epochCount"}) +@JsonIgnoreProperties(ignoreUnknown = true) +// The inner builder, that we can then extend ... +@SuperBuilder // TODO fix access +public abstract class NeuralNetBaseBuilderConfiguration implements INeuralNetworkConfiguration { + + private static final int DEFAULT_TBPTT_LENGTH = 20; + + /** + * Set constraints to be applied to all layers. Default: no constraints.
+ * Constraints can be used to enforce certain conditions (non-negativity of parameters, max-norm + * regularization, etc). These constraints are applied at each iteration, after the parameters + * have been updated.
+ * Note: values set by this method will be applied to all applicable layers in the network, unless + * a different value is explicitly set on a given layer. In other words: values set via this + * method are used as the default value, and can be overridden on a per-layer basis. + * + * @param constraints Constraints to apply to all weight parameters of all layers + */ + @lombok.Builder.Default protected final List contrainWeights = new ArrayList<>(); + + /** + * Set constraints to be applied to all layers. Default: no constraints.
+ * Constraints can be used to enforce certain conditions (non-negativity of parameters, max-norm + * regularization, etc). These constraints are applied at each iteration, after the parameters + * have been updated.
+ * Note: values set by this method will be applied to all applicable layers in the network, unless + * a different value is explicitly set on a given layer. In other words: values set via this + * method are used as the default value, and can be overridden on a per-layer basis. + * + * @param constraints Constraints to apply to all bias parameters of all layers + */ + @lombok.Builder.Default protected final List biasConstraints = new ArrayList<>(); + /** + * Set constraints to be applied to all layers. Default: no constraints.
+ * Constraints can be used to enforce certain conditions (non-negativity of parameters, max-norm + * regularization, etc). These constraints are applied at each iteration, after the parameters + * have been updated.
+ * Note: values set by this method will be applied to all applicable layers in the network, unless + * a different value is explicitly set on a given layer. In other words: values set via this + * method are used as the default value, and can be overridden on a per-layer basis. + * + * @param constraints Constraints to apply to all parameters of all layers + */ + @lombok.Builder.Default + protected final List allParamContraints = new ArrayList<>(); + /** + * This is a basic concept, a neural network is made of layers, but also can use another neural + * network as a building block. When the configuration is initialized, those building blocks will + * be flattened into a single list of layers. Internal ordered list of layers and inner neural + * networks. If the object is a NeuralNetConfiguration, each configuration must contain at least + * one layer. + */ + @Getter @lombok.Builder.Default + protected final List innerConfigurations = new ArrayList<>(); + + @Getter @Setter @NonNull @lombok.Builder.Default @Deprecated + protected WorkspaceMode trainingWorkspaceMode = WorkspaceMode.ENABLED; + + @Getter @Setter @NonNull @lombok.Builder.Default @Deprecated + protected WorkspaceMode inferenceWorkspaceMode = WorkspaceMode.ENABLED; + /** + * The type of backprop. Default setting is used for most networks (MLP, CNN etc), but optionally + * truncated BPTT can be used for training recurrent neural networks. If using TruncatedBPTT make + * sure you set both tBPTTForwardLength() and tBPTTBackwardLength() + */ + @Getter @Setter @NonNull @lombok.Builder.Default + protected BackpropType backpropType = BackpropType.Standard; + + @Getter @lombok.Builder.Default + protected Map inputPreProcessors = new HashMap<>(); + /** + * When doing truncated BPTT: how many steps of forward pass should we do before doing (truncated) + * backprop?
+ * Only applicable when doing backpropType(BackpropType.TruncatedBPTT)
+ * Typically tBPTTForwardLength parameter is same as the tBPTTBackwardLength parameter, but may be + * larger than it in some circumstances (but never smaller)
+ * Ideally your training data time series length should be divisible by this This is the k1 + * parameter on pg23 of http://www.cs.utoronto.ca/~ilya/pubs/ilya_sutskever_phd_thesis.pdf + * + * @param forwardLength Forward length > 0, >= backwardLength + */ + @Getter @Setter @lombok.Builder.Default protected int tbpttFwdLength = 20; + /** + * When doing truncated BPTT: how many steps of backward should we do?
+ * Only applicable when doing backpropType(BackpropType.TruncatedBPTT)
+ * This is the k2 parameter on pg23 of http://www.cs.utoronto.ca/~ilya/pubs/ilya_sutskever_phd_thesis.pdf + * + * @param backwardLength <= forwardLength + */ + @Getter @Setter @lombok.Builder.Default protected int tbpttBackLength = 20; + // Counter for the number of parameter updates so far + // This is important for learning rate schedules, for example, and is stored here to ensure it is + // persisted + // for Spark and model serialization + @Getter @Setter @lombok.Builder.Default protected int iterationCount = 0; + // Counter for the number of epochs completed so far. Used for per-epoch schedules + @Getter @Setter @lombok.Builder.Default protected int epochCount = 0; + @lombok.Builder.Default protected double dampingFactor = 100; + // gradient keys used for ensuring order when getting and setting the gradient + // @lombok.Builder.Default + // protected List variables = new ArrayList<>(); + @Getter @Setter @lombok.Builder.Default private boolean miniBatch = false; + /** A seed for this network, will be random if not specified. */ + @Getter @Setter @lombok.Builder.Default private long seed = new Random().nextLong(); + /** + * The default {@link CacheMode} for this configuration. Will be set to "NONE" if not specified + * otherwise. This method defines how/if preOutput cache is handled: NONE: cache disabled (default + * value) HOST: Host memory will be used DEVICE: GPU memory will be used (on CPU backends effect + * will be the same as for HOST) + * + *

Valid values are
+ * CacheMode.NONE,
+ * CacheMode.HOST or
+ * CacheMode.DEVICE
+ * + * @param cacheMode + */ + @NonNull @Getter @Setter @lombok.Builder.Default private CacheMode cacheMode = CacheMode.NONE; + + /** + * The name for this configuration. Defaults to "Anonymous INeuralNetworkConfiguration" if it is + * not specified. + */ + @lombok.Builder.Default @Getter private String name = "Anonymous INeuralNetworkConfiguration"; + /** The {@link InputType} of the data for this network configuration */ + @Getter @Setter private InputType inputType; + /** + * Set the DataType for the network parameters and activations for all layers in the network. + * Default: Float + * + * @param dataType Datatype to use for parameters and activations + */ + @Getter @Setter @lombok.Builder.Default @NonNull private DataType dataType = DataType.FLOAT; + /** + * Whether to override the nIn configuration forcibly upon construction. Default value is true. + * + * @return builder pattern + */ + @Getter @Setter @lombok.Builder.Default private boolean overrideNinUponBuild = true; + /** + * Enabled by default. If enabled, the output layer configuration will be validated, to throw an + * exception on likely invalid outputs - such as softmax + nOut=1, or LossMCXENT + Tanh.
+ * If disabled (false) no output layer validation will be performed.
+ * Disabling this validation is not recommended, as the configurations that fail validation + * usually will not be able to learn correctly. However, the option to disable this validation is + * provided for advanced users when creating non-standard architectures. + * + * @param validate If true: validate output layer configuration. False: don't validate + */ + @Getter @Setter @lombok.Builder.Default private boolean validateOutputLayerConfig = true; + /** + * Enabled by default. If enabled, an exception will be throw when using the (invalid) combination + * of truncated backpropagation through time (TBPTT) with either a GlobalPoolingLayer or + * LastTimeStepLayer.
+ * It is possible to disable this validation to allow what is almost certainly an invalid + * configuration to be used, however this is not recommended. + * + * @param validate Whether TBPTT validation should be performed + */ + @Getter @Setter @lombok.Builder.Default private boolean validateTbpttConfig = true; + /** + * Gradient updater configuration. For example, {@link org.nd4j.linalg.learning.config.Adam} or + * {@link org.nd4j.linalg.learning.config.Nesterovs}
+ * Note: values set by this method will be applied to all applicable layers in the network, unless + * a different value is explicitly set on a given layer. In other words: values set via this + * method are used as the default value, and can be overridden on a per-layer basis. + * + * @param updater Updater to use + */ + @Getter @Setter private IUpdater updater; + /** + * Gradient normalization strategy. Used to specify gradient renormalization, gradient clipping + * etc. See {@link GradientNormalization} for details
+ * Note: values set by this method will be applied to all applicable layers in the network, unless + * a different value is explicitly set on a given layer. In other words: values set via this + * method are used as the default value, and can be overridden on a per-layer basis. + * + * @param gradientNormalization Type of normalization to use. Defaults to None. + * @see GradientNormalization + */ + @Getter @Setter @NonNull @lombok.Builder.Default + private GradientNormalization gradientNormalization = GradientNormalization.None; + /** + * Threshold for gradient normalization, only used for GradientNormalization.ClipL2PerLayer, + * GradientNormalization.ClipL2PerParamType, and + * GradientNormalization.ClipElementWiseAbsoluteValue
+ * Not used otherwise.
+ * L2 threshold for first two types of clipping, or absolute value threshold for last type of + * clipping.
+ * Note: values set by this method will be applied to all applicable layers in the network, unless + * a different value is explicitly set on a given layer. In other words: values set via this + * method are used as the default value, and can be overridden on a per-layer basis. + */ + @Getter @Setter private double gradientNormalizationThreshold; + /** + * Activation function / neuron non-linearity
+ * Note: values set by this method will be applied to all applicable layers in the network, unless + * a different value is explicitly set on a given layer. In other words: values set via this + * method are used as the default value, and can be overridden on a per-layer basis. + */ + @Getter @Setter private IActivation activation; + // whether to constrain the gradient to unit norm or not + @Getter @Setter private StepFunction stepFunction; + + @Getter @Setter @lombok.Builder.Default + private OptimizationAlgorithm optimizationAlgo = + OptimizationAlgorithm.STOCHASTIC_GRADIENT_DESCENT; + + @Getter @Setter @lombok.Builder.Default private int maxNumLineSearchIterations = 5; + /** + * Set the regularization for the parameters (excluding biases) - for example {@link WeightDecay} + *
+ * Note: values set by this method will be applied to all applicable layers in the network, unless + * a different value is explicitly set on a given layer. In other words: values set via this + * method are used as the default value, and can be overridden on a per-layer basis.
+ * + * @param regularization Regularization to apply for the network parameters/weights (excluding + * biases) + */ + @Getter @lombok.Builder.Default private List regularization = new ArrayList<>(); + /** + * Set the regularization for the biases only - for example {@link WeightDecay}
+ * Note: values set by this method will be applied to all applicable layers in the network, unless + * a different value is explicitly set on a given layer. In other words: values set via this + * method are used as the default value, and can be overridden on a per-layer basis.
+ * + * @param regularizationBias Regularization to apply for the network biases only + */ + @Getter @lombok.Builder.Default + private List regularizationBias = new ArrayList<>(); + + @Getter @Setter @lombok.Builder.Default private IUpdater iUpdater = new Sgd(); + /** + * Gradient updater configuration, for the biases only. If not set, biases will use the updater as + * set by {@link #setIUpdater(IUpdater)}
+ * Note: values set by this method will be applied to all applicable layers in the network, unless + * a different value is explicitly set on a given layer. In other words: values set via this + * method are used as the default value, and can be overridden on a per-layer basis. + * + * @param updater Updater to use for bias parameters + */ + @Getter @Setter @lombok.Builder.Default private IUpdater biasUpdater = null; + + @Getter @Setter @lombok.Builder.Default + private IActivation activationFn = new ActivationSigmoid(); + /** + * Weight initialization scheme to use, for initial weight values Note: values set by this method + * will be applied to all applicable layers in the network, unless a different value is explicitly + * set on a given layer. In other words: values set via this method are used as the default value, + * and can be overridden on a per-layer basis. + */ + @Getter @Setter @lombok.Builder.Default private IWeightInit weightInit = new WeightInitXavier(); + /** + * Sets the convolution mode for convolutional layers, which impacts padding and output sizes. See + * {@link ConvolutionMode} for details. Defaults to ConvolutionMode.TRUNCATE
+ * Note: values set by this method will be applied to all applicable layers in the network, unless + * a different value is explicitly set on a given layer. In other words: values set via this + * method are used as the default value, and can be overridden on a per-layer basis. + * + * @param convolutionMode Convolution mode to use + */ + @Getter @Setter @lombok.Builder.Default + private ConvolutionMode convolutionMode = ConvolutionMode.Truncate; + /** + * Sets the cuDNN algo mode for convolutional layers, which impacts performance and memory usage + * of cuDNN. See {@link ConvolutionLayer.AlgoMode} for details. Defaults to "PREFER_FASTEST", but + * "NO_WORKSPACE" uses less memory.
+ * Note: values set by this method will be applied to all applicable layers in the network, unless + * a different value is explicitly set on a given layer. In other words: values set via this + * method are used as the default value, and can be overridden on a per-layer basis. + * + * @param cudnnAlgoMode cuDNN algo mode to use + */ + @Getter @Setter @lombok.Builder.Default + private ConvolutionLayer.AlgoMode cudnnAlgoMode = ConvolutionLayer.AlgoMode.PREFER_FASTEST; + + @Getter @Setter @lombok.Builder.Default private boolean minimize = true; + /** + * Set the dropout for all layers in this network
+ * Note: values set by this method will be applied to all applicable layers in the network, unless + * a different value is explicitly set on a given layer. In other words: values set via this + * method are used as the default value, and can be overridden on a per-layer basis. * Dropout + * probability. This is the probability of retaining each input activation value for a + * layer. * dropOut(x) will keep an input activation with probability x, and set to 0 with + * probability 1-x.
+ * * dropOut(0.0) is a special value / special case - when set to 0.0., dropout is disabled (not + * applied). Note * that a dropout value of 1.0 is functionally equivalent to no dropout: i.e., + * 100% probability of retaining * each input activation.
+ * * + * + *

* Note 1: Dropout is applied at training time only - and is automatically not applied at + * test time * (for evaluation, etc)
+ * * Note 2: This sets the probability per-layer. Care should be taken when setting lower values + * for * complex networks (too much information may be lost with aggressive (very low) dropout + * values).
+ * * Note 3: Frequently, dropout is not applied to (or, has higher retain probability for) input + * (first layer) * layers. Dropout is also often not applied to output layers. This needs to be + * handled MANUALLY by the user * - set .dropout(0) on those layers when using global dropout + * setting.
+ * * Note 4: Implementation detail (most users can ignore): DL4J uses inverted dropout, as + * described here: * http://cs231n.github.io/neural-networks-2/ + * * *
+ * * Note: values set by this method will be applied to all applicable layers in the network, + * unless a different * value is explicitly set on a given layer. In other words: values set via + * this method are used as the default * value, and can be overridden on a per-layer basis. * + * * @param inputRetainProbability Dropout probability (probability of retaining each input + * activation value for a layer) * @see #dropOut(IDropout) + * + * @param dropout Dropout, such as {@link Dropout}, {@link + * org.deeplearning4j.nn.conf.dropout.GaussianDropout}, {@link + * org.deeplearning4j.nn.conf.dropout.GaussianNoise} etc + * @return + */ + @Getter @Setter private IDropout idropOut; + /** + * Set the weight noise (such as {@link org.deeplearning4j.nn.conf.weightnoise.DropConnect} and + * {@link org.deeplearning4j.nn.conf.weightnoise.WeightNoise}) for the layers in this network.
+ * Note: values set by this method will be applied to all applicable layers in the network, unless + * a different value is explicitly set on a given layer. In other words: values set via this + * method are used as the default value, and can be overridden on a per-layer basis. + * + * @param weightNoise Weight noise instance to use + */ + @Getter @Setter private IWeightNoise weightNoise; + + @Getter @Setter @lombok.Builder.Default private double biasInit = 0.0; + @Getter @Setter @lombok.Builder.Default private double gainInit = 1.0; + + /** + * Handle {@link WeightInit} and {@link Distribution} from legacy configs in Json format. Copied + * from handling of {@link Activation} above. + * + * @return True if all is well and layer iteration shall continue. False else-wise. + */ + private static boolean handleLegacyWeightInitFromJson( + String json, LayerConfiguration l, ObjectMapper mapper, JsonNode confs, int layerCount) { + if ((l instanceof BaseLayerConfiguration) + && ((BaseLayerConfiguration) l).getWeightInit() == null) { + try { + JsonNode jsonNode = mapper.readTree(json); + if (confs == null) { + confs = jsonNode.get("confs"); + } + if (confs instanceof ArrayNode) { + ArrayNode layerConfs = (ArrayNode) confs; + JsonNode outputLayerNNCNode = layerConfs.get(layerCount); + if (outputLayerNNCNode == null) { + return false; // Should never happen... + } + JsonNode layerWrapperNode = outputLayerNNCNode.get("layer"); + + if (layerWrapperNode == null || layerWrapperNode.size() != 1) { + return true; + } + + JsonNode layerNode = layerWrapperNode.elements().next(); + JsonNode weightInit = + layerNode.get("weightInit"); // Should only have 1 element: "dense", "output", etc + JsonNode distribution = layerNode.get("dist"); + + Distribution dist = null; + if (distribution != null) { + dist = mapper.treeToValue(distribution, Distribution.class); + } + + if (weightInit != null) { + final IWeightInit wi = + WeightInit.valueOf(weightInit.asText()).getWeightInitFunction(dist); + ((BaseLayerConfiguration) l).setWeightInit(wi); + } + } + + } catch (IOException e) { + log.warn( + "ILayer with null WeightInit detected: " + l.getLayerName() + ", could not parse JSON", + e); + } + } + return true; + } + + /** + * Object mapper for serialization of configurations + * + * @return + */ + public static ObjectMapper mapperYaml() { + return JsonMappers.getMapperYaml(); + } + + /** + * Object mapper for serialization of configurations + * + * @return + */ + public static ObjectMapper mapper() { + return JsonMappers.getMapper(); + } + + public static NeuralNetBaseBuilderConfiguration fromYaml(String input) { + throw new RuntimeException("Needs fixing - not supported."); // TODO + } + + /** + * @return JSON representation of NN configuration + */ + public String toYaml() { + ObjectMapper mapper = NeuralNetBaseBuilderConfiguration.mapperYaml(); + synchronized (mapper) { + try { + return mapper.writeValueAsString(this); + } catch (com.fasterxml.jackson.core.JsonProcessingException e) { + throw new RuntimeException(e); + } + } + } + + /** + * @return JSON representation of NN configuration + */ + public String toJson() { + ObjectMapper mapper = NeuralNetBaseBuilderConfiguration.mapper(); + synchronized (mapper) { + // JSON mappers are supposed to be thread safe: however, in practice they seem to miss fields + // occasionally + // when writeValueAsString is used by multiple threads. This results in invalid JSON. See + // issue #3243 + try { + return mapper.writeValueAsString(this); + } catch (com.fasterxml.jackson.core.JsonProcessingException e) { + throw new RuntimeException(e); + } + } + } + + @Override + public NeuralNetBaseBuilderConfiguration clone() { + NeuralNetBaseBuilderConfiguration clone; + try { + clone = (NeuralNetBaseBuilderConfiguration) super.clone(); + } catch (CloneNotSupportedException ex) { + throw new RuntimeException(ex); + } + if (clone.stepFunction != null) { + clone.stepFunction = clone.stepFunction.clone(); + } + /** if (clone.variables != null) { clone.variables = new ArrayList<>(clone.variables); } */ + clone.getInnerConfigurations().addAll(innerConfigurations); + + if (clone.getInputPreProcessors() != null) { + Map map = new HashMap<>(); + for (Map.Entry entry : clone.getInputPreProcessors().entrySet()) { + map.put(entry.getKey(), entry.getValue().clone()); + } + clone.getInputPreProcessors().clear(); + clone.getInputPreProcessors().putAll(map); + } + + clone.setInferenceWorkspaceMode(this.inferenceWorkspaceMode); + clone.setTrainingWorkspaceMode(this.trainingWorkspaceMode); + clone.setCacheMode(this.cacheMode); + clone.setValidateOutputLayerConfig(this.validateOutputLayerConfig); + clone.setDataType(this.dataType); + + return clone; + } + + public abstract static class NeuralNetBaseBuilderConfigurationBuilder< + C extends NeuralNetBaseBuilderConfiguration, + B extends NeuralNetBaseBuilderConfiguration.NeuralNetBaseBuilderConfigurationBuilder> { + + List innerConfigurations$value = new ArrayList<>(); // initialize with an empty list + + /** + * Set constraints to be applied to all layers. Default: no constraints.
+ * Constraints can be used to enforce certain conditions (non-negativity of parameters, max-norm + * regularization, etc). These constraints are applied at each iteration, after the parameters + * have been updated.
+ * Note: values set by this method will be applied to all applicable layers in the network, + * unless a different value is explicitly set on a given layer. In other words: values set via + * this method are used as the default value, and can be overridden on a per-layer basis. + * + * @param constraints Constraints to apply to all weight parameters of all layers + */ + public B constrainWeights(LayerConstraint... constraints) { + contrainWeights$value = Arrays.asList(constraints); + contrainWeights$set = true; + return (B) this; + } + + /** + * For the (perhaps partially constructed) network configuration, return a list of activation + * sizes for each layer in the network.
+ * Note: To use this method, the network input type must have been set using {@link + * #setInputType(InputType)} first + * + * @return A list of activation types for the network, indexed by layer number + */ + public List getLayerActivationTypes() { + Preconditions.checkState( + inputType != null, + "Can only calculate activation types if input type has" + + "been set. Use setInputType(InputType)"); + + throw new RuntimeException( + "Error calculating layer activation types: error instantiating MultiLayerConfiguration"); + } + + /** + * Set constraints to be applied to all layers. Default: no constraints.
+ * Constraints can be used to enforce certain conditions (non-negativity of parameters, max-norm + * regularization, etc). These constraints are applied at each iteration, after the parameters + * have been updated.
+ * Note: values set by this method will be applied to all applicable layers in the network, + * unless a different value is explicitly set on a given layer. In other words: values set via + * this method are used as the default value, and can be overridden on a per-layer basis. + * + * @param constraints Constraints to apply to all parameters of all layers + */ + public B constrainAllParameters(LayerConstraint... constraints) { + allParamContraints$value = Arrays.asList(constraints); + allParamContraints$set = true; + return (B) this; + } + + /** + * Set constraints to be applied to all layers. Default: no constraints.
+ * Constraints can be used to enforce certain conditions (non-negativity of parameters, max-norm + * regularization, etc). These constraints are applied at each iteration, after the parameters + * have been updated.
+ * Note: values set by this method will be applied to all applicable layers in the network, + * unless a different value is explicitly set on a given layer. In other words: values set via + * this method are used as the default value, and can be overridden on a per-layer basis. + * + * @param constraints Constraints to apply to all bias parameters of all layers + */ + public B constrainBias(LayerConstraint... constraints) { + biasConstraints$value = Arrays.asList(constraints); + biasConstraints$set = true; + return (B) this; + } + + /** + * Specify the processors. These are used at each layer for doing things like normalization and + * shaping of input. + * + * @param processor what to use to preProcess the data. + * @return builder pattern + */ + public B inputPreProcessor(Integer layer, InputPreProcessor processor) { + inputPreProcessors$value.put(layer, processor); + inputPreProcessors$set = true; + return (B) this; + } + + /** + * Set layer at index + * + * @param index where to insert + * @param layer the layer + * @return builder + */ + public B layer(Integer index, @NonNull LayerConfiguration layer) { + innerConfigurations$value.add(index, layer); + innerConfigurations$set = true; + return (B) this; + } + + /** + * Add a layer + * + * @param layer the layer + * @return builder + */ + public B layer(@NonNull LayerConfiguration layer) { + innerConfigurations$value.add(layer); + innerConfigurations$set = true; + return (B) this; + } + + // TODO this is a dirty workaround + public boolean isOverrideNinUponBuild() { + return isOverrideNinUponBuild(); + } + + /** Specify additional layer configurations */ + @Deprecated + public B layersFromArray(@NonNull LayerConfiguration[] arrLayers) { + innerConfigurations$value.addAll(List.of(arrLayers)); + innerConfigurations$set = true; + return (B) this; + } + + /** Specify additional layer configurations */ + @Deprecated + public B layersFromList(@NonNull List listLayers) { + innerConfigurations$value.addAll(listLayers); + innerConfigurations$set = true; + return (B) this; + } + + /** + * L1 regularization coefficient for the weights (excluding biases).
+ * Note: values set by this method will be applied to all applicable layers in the network, + * unless a different value is explicitly set on a given layer. In other words: values set via + * this method are used as the default value, and can be overridden on a per-layer basis. + */ + public B l1(double l1) { + // Check if existing L1 exists; if so, replace it + NetworkUtils.removeInstances(regularization$value, L1Regularization.class); + if (l1 > 0.0) { + regularization$value.add(new L1Regularization(l1)); + } + regularization$set = true; + return (B) this; + } + + /** + * L2 regularization coefficient for the weights (excluding biases).
+ * Note: Generally, {@link WeightDecay} (set via {@link #weightDecay(double)} should be + * preferred to L2 regularization. See {@link WeightDecay} javadoc for further details.
+ * Note: values set by this method will be applied to all applicable layers in the network, + * unless a different value is explicitly set on a given layer. In other words: values set via + * this method are used as the default value, and can be overridden on a per-layer basis.
+ * Note: L2 regularization and weight decay usually should not be used together; if any weight + * decay (or L2) has been added for the biases, these will be removed first. + * + * @see #weightDecay(double, boolean) + */ + public B l2(double l2) { + // Check if existing L2 exists; if so, replace it. Also remove weight decay - it doesn't make + // sense to use both + NetworkUtils.removeInstances(regularization$value, L2Regularization.class); + if (l2 > 0.0) { + NetworkUtils.removeInstancesWithWarning( + regularization$value, + WeightDecay.class, + "WeightDecay regularization removed: incompatible with added L2 regularization"); + regularization$value.add(new L2Regularization(l2)); + } + regularization$set = true; + return (B) this; + } + + /** + * L1 regularization coefficient for the bias.
+ * Note: values set by this method will be applied to all applicable layers in the network, + * unless a different value is explicitly set on a given layer. In other words: values set via + * this method are used as the default value, and can be overridden on a per-layer basis. + */ + public B l1Bias(double l1Bias) { + NetworkUtils.removeInstances(regularizationBias$value, L1Regularization.class); + if (l1Bias > 0.0) { + regularizationBias$value.add(new L1Regularization(l1Bias)); + } + regularizationBias$set = true; + return (B) this; + } + + /** + * L2 regularization coefficient for the bias.
+ * Note: Generally, {@link WeightDecay} (set via {@link #weightDecayBias(double, + * boolean)} should be preferred to L2 regularization. See {@link WeightDecay} javadoc for + * further details.
+ * Note: values set by this method will be applied to all applicable layers in the network, + * unless a different value is explicitly set on a given layer. In other words: values set via + * this method are used as the default value, and can be overridden on a per-layer basis.
+ * Note: L2 regularization and weight decay usually should not be used together; if any weight + * decay (or L2) has been added for the biases, these will be removed first. + * + * @see #weightDecayBias(double, boolean) + */ + public B l2Bias(double l2Bias) { + NetworkUtils.removeInstances(regularizationBias$value, L2Regularization.class); + if (l2Bias > 0.0) { + NetworkUtils.removeInstancesWithWarning( + regularizationBias$value, + WeightDecay.class, + "L2 bias regularization removed: incompatible with added WeightDecay regularization"); + regularizationBias$value.add(new L2Regularization(l2Bias)); + } + return (B) this; + } + + /** + * Add weight decay regularization for the network parameters (excluding biases).
+ * This applies weight decay with multiplying the learning rate - see {@link WeightDecay} + * for more details.
+ * Note: values set by this method will be applied to all applicable layers in the network, + * unless a different value is explicitly set on a given layer. In other words: values set via + * this method are used as the default value, and can be overridden on a per-layer basis.
+ * + * @param coefficient Weight decay regularization coefficient + * @see #weightDecay(double, boolean) + */ + public B weightDecay(double coefficient) { + return weightDecay(coefficient, true); + } + + /** + * Add weight decay regularization for the network parameters (excluding biases). See {@link + * WeightDecay} for more details.
+ * Note: values set by this method will be applied to all applicable layers in the network, + * unless a different value is explicitly set on a given layer. In other words: values set via + * this method are used as the default value, and can be overridden on a per-layer basis.
+ * + * @param coefficient Weight decay regularization coefficient + * @param applyLR Whether the learning rate should be multiplied in when performing weight decay + * updates. See {@link WeightDecay} for more details. + * @see #weightDecay(double, boolean) + */ + public B weightDecay(double coefficient, boolean applyLR) { + // Check if existing weight decay if it exists; if so, replace it. Also remove L2 - it doesn't + // make sense to use both + NetworkUtils.removeInstances(regularization$value, WeightDecay.class); + if (coefficient > 0.0) { + NetworkUtils.removeInstancesWithWarning( + regularization$value, + L2Regularization.class, + "L2 regularization removed: incompatible with added WeightDecay regularization"); + regularization$value.add(new WeightDecay(coefficient, applyLR)); + } + regularization$set = true; + return (B) this; + } + + /** + * Weight decay for the biases only - see {@link #weightDecay(double)} for more details. This + * applies weight decay with multiplying the learning rate.
+ * Note: values set by this method will be applied to all applicable layers in the network, + * unless a different value is explicitly set on a given layer. In other words: values set via + * this method are used as the default value, and can be overridden on a per-layer basis.
+ * + * @param coefficient Weight decay regularization coefficient + * @see #weightDecayBias(double, boolean) + */ + public B weightDecayBias(double coefficient) { + return weightDecayBias(coefficient, true); + } + + /** + * Weight decay for the biases only - see {@link #weightDecay(double)} for more details
+ * Note: values set by this method will be applied to all applicable layers in the network, + * unless a different value is explicitly set on a given layer. In other words: values set via + * this method are used as the default value, and can be overridden on a per-layer basis.
+ * + * @param coefficient Weight decay regularization coefficient + */ + public B weightDecayBias(double coefficient, boolean applyLR) { + // Check if existing weight decay if it exists; if so, replace it. Also remove L2 - it doesn't + // make sense to use both + NetworkUtils.removeInstances(regularizationBias$value, WeightDecay.class); + if (coefficient > 0) { + NetworkUtils.removeInstancesWithWarning( + regularizationBias$value, + L2Regularization.class, + "L2 bias regularization removed: incompatible with added WeightDecay regularization"); + regularizationBias$value.add(new WeightDecay(coefficient, applyLR)); + } + regularization$set = true; + return (B) this; + } + + /** + * Activation function / neuron non-linearity
+ * Note: values set by this method will be applied to all applicable layers in the network, + * unless a different value is explicitly set on a given layer. In other words: values set via + * this method are used as the default value, and can be overridden on a per-layer basis. + */ + @Deprecated + public B activation(@NonNull Activation activation) { + return (B) activationFn(activation.getActivationFunction()); + } + + /** + * legacy code, does nothing + * + * @return + */ + @Deprecated + public B list() { + return (B) this; + } + + /** + * Set weight initialization scheme to random sampling via the specified distribution. + * Equivalent to: {@code .weightInit(new WeightInitDistribution(distribution))} Note: values set + * by this method will be applied to all applicable layers in the network, unless a different + * value is explicitly set on a given layer. In other words: values set via this method are used + * as the default value, and can be overridden on a per-layer basis. + * + * @param distribution Distribution to use for weight initialization + */ + public B weightInit(@NonNull Distribution distribution) { + this.weightInit$value = new WeightInitDistribution(distribution); + this.weightInit$set = true; + return (B) this; + } + + public B weightInit(@NonNull WeightInit weightInit) { + this.weightInit$value = weightInit.getWeightInitFunction(); + this.weightInit$set = true; + return (B) this; + } + + public B weightInit(@NonNull IWeightInit iWeightInit) { + this.weightInit$value = iWeightInit; + this.weightInit$set = true; + return (B) this; + } + + /** + * Same as {@link #weightInit(Distribution)}. + * + * @param distribution + * @return + */ + public B dist(@NonNull Distribution distribution) { + return (B) weightInit(distribution); + } + + public B dropOut(@NonNull IDropout dropout) { + return (B) idropOut(dropout); + } + + /** + * Creates a new {@link Dropout} and sets the dropout in the builder for this configuration + * + * @param dropout activationRetainProbability + * @return builder + */ + public B dropOut(double dropout) { + return (B) idropOut(new Dropout(dropout)); + } + + /** + * Add multiple inner neural net configurations at once + * + * @param confs list of configurations + * @return builder + */ + @Deprecated + public B confs(@NonNull List confs) { + innerConfigurations$value.addAll(confs); + innerConfigurations$set = true; + return (B) this; + } + } +} diff --git a/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/nn/conf/NeuralNetConfiguration.java b/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/nn/conf/NeuralNetConfiguration.java index 69ff898e2..fe946e022 100644 --- a/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/nn/conf/NeuralNetConfiguration.java +++ b/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/nn/conf/NeuralNetConfiguration.java @@ -20,32 +20,55 @@ package org.deeplearning4j.nn.conf; -import lombok.Data; -import lombok.EqualsAndHashCode; -import lombok.NoArgsConstructor; -import lombok.NonNull; +import com.fasterxml.jackson.annotation.JsonIdentityInfo; +import com.fasterxml.jackson.annotation.JsonIgnore; +import com.fasterxml.jackson.annotation.JsonIgnoreProperties; +import com.fasterxml.jackson.annotation.ObjectIdGenerators; +import com.fasterxml.jackson.databind.JsonNode; +import com.fasterxml.jackson.databind.ObjectMapper; +import com.fasterxml.jackson.databind.exc.InvalidTypeIdException; +import com.fasterxml.jackson.databind.node.ArrayNode; +import java.io.IOException; +import java.util.ArrayList; +import java.util.HashMap; +import java.util.LinkedHashMap; +import java.util.List; +import java.util.Map; +import java.util.stream.Collectors; + +import lombok.*; +import lombok.experimental.SuperBuilder; +import lombok.extern.jackson.Jacksonized; import lombok.extern.slf4j.Slf4j; +import net.brutex.ai.dnn.api.IModel; import org.deeplearning4j.nn.api.OptimizationAlgorithm; import org.deeplearning4j.nn.api.layers.LayerConstraint; import org.deeplearning4j.nn.conf.distribution.Distribution; import org.deeplearning4j.nn.conf.dropout.Dropout; import org.deeplearning4j.nn.conf.dropout.IDropout; import org.deeplearning4j.nn.conf.inputs.InputType; -import org.deeplearning4j.nn.conf.layers.*; -import org.deeplearning4j.nn.conf.layers.misc.FrozenLayer; -import org.deeplearning4j.nn.conf.layers.misc.FrozenLayerWithBackprop; -import org.deeplearning4j.nn.conf.layers.recurrent.Bidirectional; -import org.deeplearning4j.nn.conf.layers.samediff.AbstractSameDiffLayer; -import org.deeplearning4j.nn.conf.layers.wrapper.BaseWrapperLayer; +import org.deeplearning4j.nn.conf.layers.BaseLayerConfiguration; +import org.deeplearning4j.nn.conf.layers.BaseOutputLayer; +import org.deeplearning4j.nn.conf.layers.BaseRecurrentLayer; +import org.deeplearning4j.nn.conf.layers.Convolution1DLayer; +import org.deeplearning4j.nn.conf.layers.ConvolutionLayer; +import org.deeplearning4j.nn.conf.layers.DenseLayer; +import org.deeplearning4j.nn.conf.layers.EmbeddingLayer; +import org.deeplearning4j.nn.conf.layers.FeedForwardLayer; +import org.deeplearning4j.nn.conf.layers.GlobalPoolingLayer; +import org.deeplearning4j.nn.conf.layers.LayerConfiguration; +import org.deeplearning4j.nn.conf.layers.OutputLayer; +import org.deeplearning4j.nn.conf.layers.recurrent.LastTimeStep; +import org.deeplearning4j.nn.conf.memory.LayerMemoryReport; +import org.deeplearning4j.nn.conf.memory.MemoryReport; +import org.deeplearning4j.nn.conf.memory.NetworkMemoryReport; import org.deeplearning4j.nn.conf.serde.JsonMappers; +import org.deeplearning4j.nn.conf.stepfunctions.DefaultStepFunction; import org.deeplearning4j.nn.conf.stepfunctions.StepFunction; import org.deeplearning4j.nn.conf.weightnoise.IWeightNoise; import org.deeplearning4j.nn.weights.IWeightInit; import org.deeplearning4j.nn.weights.WeightInit; -import org.deeplearning4j.nn.weights.WeightInitDistribution; -import org.deeplearning4j.nn.weights.WeightInitXavier; -import org.deeplearning4j.util.NetworkUtils; -import org.nd4j.common.base.Preconditions; +import org.deeplearning4j.util.OutputLayerUtil; import org.nd4j.linalg.activations.Activation; import org.nd4j.linalg.activations.IActivation; import org.nd4j.linalg.activations.impl.ActivationSigmoid; @@ -53,1166 +76,1100 @@ import org.nd4j.linalg.api.buffer.DataType; import org.nd4j.linalg.factory.Nd4j; import org.nd4j.linalg.learning.config.IUpdater; import org.nd4j.linalg.learning.config.Sgd; -import org.nd4j.linalg.learning.regularization.L1Regularization; -import org.nd4j.linalg.learning.regularization.L2Regularization; import org.nd4j.linalg.learning.regularization.Regularization; import org.nd4j.linalg.learning.regularization.WeightDecay; -import com.fasterxml.jackson.databind.ObjectMapper; - -import java.io.IOException; -import java.io.Serializable; -import java.util.*; +import org.nd4j.linalg.lossfunctions.LossFunctions; +import org.nd4j.linalg.lossfunctions.impl.LossBinaryXENT; +import org.nd4j.linalg.lossfunctions.impl.LossMCXENT; +import org.nd4j.linalg.lossfunctions.impl.LossMSE; +import org.nd4j.linalg.lossfunctions.impl.LossNegativeLogLikelihood; +/** + * Deeplearning4j is a domain-specific language to configure deep neural networks, which are made of + * multiple layers. Everything starts with a NeuralNetConfiguration, which organizes those layers + * and their hyperparameters. Hyperparameters are variables that determine how a neural network + * learns. They include how many times to update the weights of the model, how to initialize those + * weights, which activation function to attach to the nodes, which optimization algorithm to use, + * and how fast the model should learn. This is what one configuration would look like: + *

+ * + * NeuralNetConfiguration conf = NeuralNetConfiguration.builder()
+ * .weightInit(WeightInit.XAVIER) .activation(Activation.RELU)
+ * .optimizationAlgo(OptimizationAlgorithm.STOCHASTIC_GRADIENT_DESCENT)
+ * .updater(new Sgd(0.05)) //... other hyperparameters
+ * .backprop(true)
+ * .build();

+ * + * With Deeplearning4j, you add a layer + * by calling layer on the NeuralNetConfiguration.NeuralNetConfigurationBuilder(), specifying its place in the order of + * layers (the zero-indexed layer below is the input layer), the number of input and output nodes, + * nIn and nOut, as well as the type: DenseLayer.

+ * + * .layer(0, new DenseLayer.Builder().nIn(784).nOut(250)
+ * .build())

+ * + * Once you've configured your net, you train the + * model with model.fit. + */ @Data -@NoArgsConstructor @Slf4j @EqualsAndHashCode(exclude = {"iterationCount", "epochCount"}) -public class NeuralNetConfiguration implements Serializable, Cloneable { +@Jacksonized +@JsonIgnoreProperties(ignoreUnknown = true) +@JsonIdentityInfo(generator= ObjectIdGenerators.IntSequenceGenerator.class, property="@id") - protected Layer layer; - //batch size: primarily used for conv nets. Will be reinforced if set. - protected boolean miniBatch = true; - //number of line search iterations - protected int maxNumLineSearchIterations; - protected long seed; - protected OptimizationAlgorithm optimizationAlgo; - //gradient keys used for ensuring order when getting and setting the gradient - protected List variables = new ArrayList<>(); - //whether to constrain the gradient to unit norm or not - protected StepFunction stepFunction; - //minimize or maximize objective - protected boolean minimize = true; +//The inner builder, that we can then extend ... +@SuperBuilder //TODO fix access +public class NeuralNetConfiguration extends NeuralNetBaseBuilderConfiguration { - // this field defines preOutput cache - protected CacheMode cacheMode; + private IModel net; + private static final int DEFAULT_TBPTT_LENGTH = 20; + private boolean initCalled = false; - protected DataType dataType = DataType.FLOAT; //Default to float for deserialization of legacy format nets + /** + * Set constraints to be applied to all layers. Default: no constraints.
+ * Constraints can be used to enforce certain conditions (non-negativity of parameters, max-norm regularization, + * etc). These constraints are applied at each iteration, after the parameters have been updated.
+ * Note: values set by this method will be applied to all applicable layers in the network, unless a different + * value is explicitly set on a given layer. In other words: values set via this method are used as the default + * value, and can be overridden on a per-layer basis. + * + * @param constraints Constraints to apply to all bias parameters of all layers + */ + @lombok.Builder.Default + protected final List biasConstraints = new ArrayList<>(); + /** + * Set constraints to be applied to all layers. Default: no constraints.
+ * Constraints can be used to enforce certain conditions (non-negativity of parameters, max-norm regularization, + * etc). These constraints are applied at each iteration, after the parameters have been updated.
+ * Note: values set by this method will be applied to all applicable layers in the network, unless a different + * value is explicitly set on a given layer. In other words: values set via this method are used as the default + * value, and can be overridden on a per-layer basis. + * + * @param constraints Constraints to apply to all parameters of all layers + */ + @lombok.Builder.Default + protected final List allParamContraints = new ArrayList<>(); - //Counter for the number of parameter updates so far for this layer. - //Note that this is only used for pretrain layers (AE, VAE) - MultiLayerConfiguration and ComputationGraphConfiguration - //contain counters for standard backprop training. - // This is important for learning rate schedules, for example, and is stored here to ensure it is persisted - // for Spark and model serialization - protected int iterationCount = 0; + @Getter + @Setter + @NonNull + @lombok.Builder.Default + @Deprecated + protected WorkspaceMode trainingWorkspaceMode = WorkspaceMode.ENABLED; + @Getter + @Setter + @NonNull + @lombok.Builder.Default + @Deprecated + protected WorkspaceMode inferenceWorkspaceMode = WorkspaceMode.ENABLED; + /** + * The type of backprop. Default setting is used for most networks (MLP, CNN etc), but optionally + * truncated BPTT can be used for training recurrent neural networks. If using TruncatedBPTT make + * sure you set both tBPTTForwardLength() and tBPTTBackwardLength() + */ + @Getter + @Setter + @NonNull + @lombok.Builder.Default + protected BackpropType backpropType = BackpropType.Standard; + /** + * When doing truncated BPTT: how many steps of forward pass should we do before doing (truncated) + * backprop?
Only applicable when doing backpropType(BackpropType.TruncatedBPTT)
Typically + * tBPTTForwardLength parameter is same as the tBPTTBackwardLength parameter, but may be larger + * than it in some circumstances (but never smaller)
Ideally your training data time series + * length should be divisible by this This is the k1 parameter on pg23 of http://www.cs.utoronto.ca/~ilya/pubs/ilya_sutskever_phd_thesis.pdf + * + * @param forwardLength Forward length > 0, >= backwardLength + */ + @Getter + @Setter + @lombok.Builder.Default + protected int tbpttFwdLength = 20; + /** + * When doing truncated BPTT: how many steps of backward should we do?
Only applicable when + * doing backpropType(BackpropType.TruncatedBPTT)
This is the k2 parameter on pg23 of http://www.cs.utoronto.ca/~ilya/pubs/ilya_sutskever_phd_thesis.pdf + * + * @param backwardLength <= forwardLength + */ + @Getter + @Setter + @lombok.Builder.Default + protected int tbpttBackLength = 20; + /** + * Creates and returns a copy of this object. + * + * @return a clone of this instance. + * @throws CloneNotSupportedException if the object's class does not support the {@code Cloneable} + * interface. Subclasses that override the {@code clone} method can also throw this exception to + * indicate that an instance cannot be cloned. + * @see Cloneable + */ - //Counter for the number of epochs completed so far. Used for per-epoch schedules - protected int epochCount = 0; + //Nd4j.getRandom().setSeed(getConf(0).getSeed()); //TODO + //Counter for the number of parameter updates so far + // This is important for learning rate schedules, for example, and is stored here to ensure it is persisted + // for Spark and model serialization + @Getter + @Setter + @lombok.Builder.Default + protected int iterationCount = 0; + //Counter for the number of epochs completed so far. Used for per-epoch schedules + @Getter + @Setter + @lombok.Builder.Default + protected int epochCount = 0; + @lombok.Builder.Default + protected double dampingFactor = 100; + //gradient keys used for ensuring order when getting and setting the gradient + @lombok.Builder.Default + protected List netWideVariables = new ArrayList<>(); + @Getter + @Setter + @lombok.Builder.Default + private boolean miniBatch = false; + /** + * A seed for this network, will be random if not specified. + @Getter + @Setter + @lombok.Builder.Default + private long seed = new Random().nextLong(); */ + /** + * The default {@link CacheMode} for this configuration. Will be set to "NONE" if not specified + * otherwise. This method defines how/if preOutput cache is handled: NONE: cache disabled (default + * value) HOST: Host memory will be used DEVICE: GPU memory will be used (on CPU backends effect + * will be the same as for HOST) + *

+ * Valid values are
CacheMode.NONE,
CacheMode.HOST or
CacheMode.DEVICE
+ * + * @param cacheMode + */ + @NonNull + @Getter + @Setter + @lombok.Builder.Default + private CacheMode cacheMode = CacheMode.NONE; + /** + * The list of layer configurations in this configuration. They will be indexed automatically as + * the layers get added starting with index 0. + */ - /** - * Creates and returns a deep copy of the configuration. - */ - @Override - public NeuralNetConfiguration clone() { + @lombok.Builder.Default + @Getter + private String name = "Anonymous INeuralNetworkConfiguration"; + /** + * The {@link InputType} of the data for this network configuration + */ + @Getter + @Setter + private InputType inputType; + /** + * Set the DataType for the network parameters and activations for all layers in the network. + * Default: Float + * + * @param dataType Datatype to use for parameters and activations + */ + @Getter + @Setter + @lombok.Builder.Default + @NonNull + private DataType dataType = DataType.FLOAT; + /** + * Whether to override the nIn configuration forcibly upon construction. Default value is true. + * + * @return builder pattern + */ + @Getter + @Setter + @lombok.Builder.Default + private boolean overrideNinUponBuild = true; + /** + * Enabled by default. If enabled, the output layer configuration will be validated, to throw an + * exception on likely invalid outputs - such as softmax + nOut=1, or LossMCXENT + Tanh.
If + * disabled (false) no output layer validation will be performed.
Disabling this validation is + * not recommended, as the configurations that fail validation usually will not be able to learn + * correctly. However, the option to disable this validation is provided for advanced users when + * creating non-standard architectures. + * + * @param validate If true: validate output layer configuration. False: don't validate + */ + @Getter + @Setter + @lombok.Builder.Default + private boolean validateOutputLayerConfig = true; + /** + * Enabled by default. If enabled, an exception will be throw when using the (invalid) combination + * of truncated backpropagation through time (TBPTT) with either a GlobalPoolingLayer or + * LastTimeStepLayer.
It is possible to disable this validation to allow what is almost + * certainly an invalid configuration to be used, however this is not recommended. + * + * @param validate Whether TBPTT validation should be performed + */ + @Getter + @Setter + @lombok.Builder.Default + private boolean validateTbpttConfig = true; + /** + * Gradient updater configuration. For example, {@link org.nd4j.linalg.learning.config.Adam} or + * {@link org.nd4j.linalg.learning.config.Nesterovs}
+ * Note: values set by this method will be applied to all applicable layers in the network, unless + * a different value is explicitly set on a given layer. In other words: values set via this + * method are used as the default value, and can be overridden on a per-layer basis. + * + * @param updater Updater to use + */ + @Getter @Setter @Builder.Default private IUpdater updater = new Sgd(); + /** + * Gradient normalization strategy. Used to specify gradient renormalization, gradient clipping + * etc. See {@link GradientNormalization} for details
Note: values set by this method will be + * applied to all applicable layers in the network, unless a different value is explicitly set on + * a given layer. In other words: values set via this method are used as the default value, and + * can be overridden on a per-layer basis. + * + * @param gradientNormalization Type of normalization to use. Defaults to None. + * @see GradientNormalization + */ + @Getter + @Setter + @NonNull + @lombok.Builder.Default + private GradientNormalization gradientNormalization = GradientNormalization.None; + /** + * Threshold for gradient normalization, only used for GradientNormalization.ClipL2PerLayer, + * GradientNormalization.ClipL2PerParamType, and + * GradientNormalization.ClipElementWiseAbsoluteValue
Not used otherwise.
L2 threshold for + * first two types of clipping, or absolute value threshold for last type of clipping.
Note: + * values set by this method will be applied to all applicable layers in the network, unless a + * different value is explicitly set on a given layer. In other words: values set via this method + * are used as the default value, and can be overridden on a per-layer basis. + */ + @Getter + @Setter + private double gradientNormalizationThreshold; + + // whether to constrain the gradient to unit norm or not + @Getter @Setter @Builder.Default private StepFunction stepFunction = new DefaultStepFunction(); + + @Getter + @Setter + @lombok.Builder.Default + private OptimizationAlgorithm optimizationAlgo = OptimizationAlgorithm.STOCHASTIC_GRADIENT_DESCENT; + @Getter + @Setter + @lombok.Builder.Default + private int maxNumLineSearchIterations = 5; + /** + * Set the regularization for the parameters (excluding biases) - for example {@link WeightDecay}
+ * Note: values set by this method will be applied to all applicable layers in the network, unless a different + * value is explicitly set on a given layer. In other words: values set via this method are used as the default + * value, and can be overridden on a per-layer basis.
+ * + * @param regularization Regularization to apply for the network parameters/weights (excluding biases) + */ + @Getter + @lombok.Builder.Default + private List regularization = new ArrayList<>(); + /** + * Set the regularization for the biases only - for example {@link WeightDecay}
+ * Note: values set by this method will be applied to all applicable layers in the network, unless a different + * value is explicitly set on a given layer. In other words: values set via this method are used as the default + * value, and can be overridden on a per-layer basis.
+ * + * @param regularizationBias Regularization to apply for the network biases only + */ + @Getter + @lombok.Builder.Default + private List regularizationBias = new ArrayList<>(); + + /** + * Gradient updater configuration, for the biases only. If not set, biases will use the updater as + * set by {@link #setUpdater(IUpdater)}
+ * Note: values set by this method will be applied to all applicable layers in the network, unless a different + * value is explicitly set on a given layer. In other words: values set via this method are used as the default + * value, and can be overridden on a per-layer basis. + * + * @param updater Updater to use for bias parameters + */ + @Getter + @Setter + @lombok.Builder.Default + private IUpdater biasUpdater = null; + @Getter + @Setter + @lombok.Builder.Default + private IActivation activation = new ActivationSigmoid(); + + /** + * Sets the convolution mode for convolutional layers, which impacts padding and output sizes. + * See {@link ConvolutionMode} for details. Defaults to ConvolutionMode.TRUNCATE
+ * Note: values set by this method will be applied to all applicable layers in the network, unless a different + * value is explicitly set on a given layer. In other words: values set via this method are used as the default + * value, and can be overridden on a per-layer basis. + * @param convolutionMode Convolution mode to use + */ + @Getter + @Setter + @lombok.Builder.Default + private ConvolutionMode convolutionMode = ConvolutionMode.Truncate; + /** + * Sets the cuDNN algo mode for convolutional layers, which impacts performance and memory usage of cuDNN. + * See {@link ConvolutionLayer.AlgoMode} for details. Defaults to "PREFER_FASTEST", but "NO_WORKSPACE" uses less memory. + *
+ * Note: values set by this method will be applied to all applicable layers in the network, unless a different + * value is explicitly set on a given layer. In other words: values set via this method are used as the default + * value, and can be overridden on a per-layer basis. + * @param cudnnAlgoMode cuDNN algo mode to use + */ + @Getter + @Setter + @lombok.Builder.Default + private ConvolutionLayer.AlgoMode cudnnAlgoMode = ConvolutionLayer.AlgoMode.PREFER_FASTEST; + @Getter + @Setter + @lombok.Builder.Default + private boolean minimize = true; + /** + * Set the dropout for all layers in this network
+ * Note: values set by this method will be applied to all applicable layers in the network, unless a different + * value is explicitly set on a given layer. In other words: values set via this method are used as the default + * value, and can be overridden on a per-layer basis. + * * Dropout probability. This is the probability of retaining each input activation value for a layer. + * * dropOut(x) will keep an input activation with probability x, and set to 0 with probability 1-x.
+ * * dropOut(0.0) is a special value / special case - when set to 0.0., dropout is disabled (not applied). Note + * * that a dropout value of 1.0 is functionally equivalent to no dropout: i.e., 100% probability of retaining + * * each input activation.
+ * *

+ * * Note 1: Dropout is applied at training time only - and is automatically not applied at test time + * * (for evaluation, etc)
+ * * Note 2: This sets the probability per-layer. Care should be taken when setting lower values for + * * complex networks (too much information may be lost with aggressive (very low) dropout values).
+ * * Note 3: Frequently, dropout is not applied to (or, has higher retain probability for) input (first layer) + * * layers. Dropout is also often not applied to output layers. This needs to be handled MANUALLY by the user + * * - set .dropout(0) on those layers when using global dropout setting.
+ * * Note 4: Implementation detail (most users can ignore): DL4J uses inverted dropout, as described here: + * * http://cs231n.github.io/neural-networks-2/ + * *

+ * *
+ * * Note: values set by this method will be applied to all applicable layers in the network, unless a different + * * value is explicitly set on a given layer. In other words: values set via this method are used as the default + * * value, and can be overridden on a per-layer basis. + * * + * * @param inputRetainProbability Dropout probability (probability of retaining each input activation value for a layer) + * * @see #dropOut(IDropout) + * + * + * @param dropout Dropout, such as {@link Dropout}, {@link org.deeplearning4j.nn.conf.dropout.GaussianDropout}, + * {@link org.deeplearning4j.nn.conf.dropout.GaussianNoise} etc + * @return + */ + @Getter + @Setter + private IDropout idropOut; + /** + * Set the weight noise (such as {@link org.deeplearning4j.nn.conf.weightnoise.DropConnect} and + * {@link org.deeplearning4j.nn.conf.weightnoise.WeightNoise}) for the layers in this network.
+ * Note: values set by this method will be applied to all applicable layers in the network, unless a different + * value is explicitly set on a given layer. In other words: values set via this method are used as the default + * value, and can be overridden on a per-layer basis. + * + * @param weightNoise Weight noise instance to use + */ + @Getter + @Setter + private IWeightNoise weightNoise; + @Getter + @Setter + @lombok.Builder.Default + private double biasInit = 0.0; + @Getter + @Setter + @lombok.Builder.Default + private double gainInit = 1.0; + + /** + * Create a neural net configuration from json + * + * @param json the neural net configuration from json + * @return {@link NeuralNetConfiguration} + */ + public static NeuralNetConfiguration fromJson(String json) { + NeuralNetConfiguration conf; + ObjectMapper mapper = NeuralNetConfiguration.mapper(); + try { + conf = mapper.readValue(json, NeuralNetConfiguration.class); + } catch (InvalidTypeIdException e) { + if (e.getMessage().contains("@class")) { try { - NeuralNetConfiguration clone = (NeuralNetConfiguration) super.clone(); - if (clone.layer != null) - clone.layer = clone.layer.clone(); - if (clone.stepFunction != null) - clone.stepFunction = clone.stepFunction.clone(); - if (clone.variables != null) - clone.variables = new ArrayList<>(clone.variables); - return clone; - } catch (CloneNotSupportedException e) { - throw new RuntimeException(e); + //JSON may be legacy (1.0.0-alpha or earlier), attempt to load it using old format + return JsonMappers.getLegacyMapper().readValue(json, NeuralNetConfiguration.class); + } catch (InvalidTypeIdException e2) { + //Check for legacy custom layers: "Could not resolve type id 'CustomLayer' as a subtype of [simple type, class org.deeplearning4j.nn.conf.layers.ILayer]: known type ids = [Bidirectional, CenterLossOutputLayer, CnnLossLayer, ..." + //1.0.0-beta5: dropping support for custom layers defined in pre-1.0.0-beta format. Built-in layers from these formats still work + String msg = e2.getMessage(); + if (msg != null && msg.contains("Could not resolve type id")) { + throw new RuntimeException( + "Error deserializing NeuralNetConfiguration - configuration may have a custom " + + "layer, vertex or preprocessor, in pre version 1.0.0-beta JSON format.\nModels in legacy format with custom" + + + " layers should be loaded in 1.0.0-beta to 1.0.0-beta4 and saved again, before loading in the current version of DL4J", + e); + } + throw new RuntimeException(e2); + } catch (IOException e2) { + throw new RuntimeException(e2); } + } + throw new RuntimeException(e); + } catch (IOException e) { + //Check if this exception came from legacy deserializer... + String msg = e.getMessage(); + if (msg != null && msg.contains("legacy")) { + throw new RuntimeException( + "Error deserializing NeuralNetConfiguration - configuration may have a custom " + + "layer, vertex or preprocessor, in pre version 1.0.0-alpha JSON format. These layers can be " + + + "deserialized by first registering them with NeuralNetConfiguration.registerLegacyCustomClassesForJSON(Class...)", + e); + } + throw new RuntimeException(e); } - public List variables() { - return new ArrayList<>(variables); - } + //To maintain backward compatibility after loss function refactoring (configs generated with v0.5.0 or earlier) + // Previously: enumeration used for loss functions. Now: use classes + // IN the past, could have only been an OutputLayer or RnnOutputLayer using these enums + int layerCount = 0; + JsonNode confs = null; + for (LayerConfiguration nnc : conf.getFlattenedLayerConfigurations()) { + LayerConfiguration l = nnc; + if (l instanceof BaseOutputLayer && ((BaseOutputLayer) l).getLossFn() == null) { + //lossFn field null -> may be an old config format, with lossFunction field being for the enum + //if so, try walking the JSON graph to extract out the appropriate enum value - public List variables(boolean copy) { - if (copy) - return variables(); - return variables; - } + BaseOutputLayer ol = (BaseOutputLayer) l; + try { + JsonNode jsonNode = mapper.readTree(json); + if (confs == null) { + confs = jsonNode.get("confs"); + } + if (confs instanceof ArrayNode) { + ArrayNode layerConfs = (ArrayNode) confs; + JsonNode outputLayerNNCNode = layerConfs.get(layerCount); + if (outputLayerNNCNode == null) { + throw new RuntimeException( + "should never happen"); //return conf; //Should never happen... + } + JsonNode outputLayerNode = outputLayerNNCNode.get("layer"); - public void addVariable(String variable) { - if (!variables.contains(variable)) { - variables.add(variable); + JsonNode lossFunctionNode = null; + if (outputLayerNode.has("output")) { + lossFunctionNode = outputLayerNode.get("output").get("lossFunction"); + } else if (outputLayerNode.has("rnnoutput")) { + lossFunctionNode = outputLayerNode.get("rnnoutput").get("lossFunction"); + } + + if (lossFunctionNode != null) { + String lossFunctionEnumStr = lossFunctionNode.asText(); + LossFunctions.LossFunction lossFunction = null; + try { + lossFunction = LossFunctions.LossFunction.valueOf(lossFunctionEnumStr); + } catch (Exception e) { + log.warn( + "OutputLayer with null LossFunction or pre-0.6.0 loss function configuration detected: could not parse JSON", + e); + } + + if (lossFunction != null) { + switch (lossFunction) { + case MSE: + ol.setLossFn(new LossMSE()); + break; + case XENT: + ol.setLossFn(new LossBinaryXENT()); + break; + case NEGATIVELOGLIKELIHOOD: + ol.setLossFn(new LossNegativeLogLikelihood()); + break; + case MCXENT: + ol.setLossFn(new LossMCXENT()); + break; + + //Remaining: TODO + case SQUARED_LOSS: + case RECONSTRUCTION_CROSSENTROPY: + default: + log.warn( + "OutputLayer with null LossFunction or pre-0.6.0 loss function configuration detected: could not set loss function for {}", + lossFunction); + break; + } + } + } + + } else { + log.warn( + "OutputLayer with null LossFunction or pre-0.6.0 loss function configuration detected: could not parse JSON: layer 'confs' field is not an ArrayNode (is: {})", + (confs != null ? confs.getClass() : null)); + } + } catch (IOException e) { + log.warn( + "OutputLayer with null LossFunction or pre-0.6.0 loss function configuration detected: could not parse JSON", + e); + break; } + } + + //Also, pre 0.7.2: activation functions were Strings ("activationFunction" field), not classes ("activationFn") + //Try to load the old format if necessary, and create the appropriate IActivation instance + if ((l instanceof BaseLayerConfiguration) && ((BaseLayerConfiguration) l).getActivationFn() == null) { + try { + JsonNode jsonNode = mapper.readTree(json); + if (confs == null) { + confs = jsonNode.get("confs"); + } + if (confs instanceof ArrayNode) { + ArrayNode layerConfs = (ArrayNode) confs; + JsonNode outputLayerNNCNode = layerConfs.get(layerCount); + if (outputLayerNNCNode == null) { + throw new RuntimeException( + "Should never happen"); //return conf; //Should never happen... + } + JsonNode layerWrapperNode = outputLayerNNCNode.get("layer"); + + if (layerWrapperNode == null || layerWrapperNode.size() != 1) { + continue; + } + + JsonNode layerNode = layerWrapperNode.elements().next(); + JsonNode activationFunction = layerNode.get( + "activationFunction"); //Should only have 1 element: "dense", "output", etc + + if (activationFunction != null) { + IActivation ia = Activation.fromString(activationFunction.asText()) + .getActivationFunction(); + ((BaseLayerConfiguration) l).setActivationFn(ia); + } + } + + } catch (IOException e) { + log.warn( + "ILayer with null ActivationFn field or pre-0.7.2 activation function detected: could not parse JSON", + e); + } + } + + if (!handleLegacyWeightInitFromJson(json, l, mapper, confs, layerCount)) { + return conf; + } + + layerCount++; + } + return conf; + } + + /** + * Handle {@link WeightInit} and {@link Distribution} from legacy configs in Json format. Copied + * from handling of {@link Activation} above. + * + * @return True if all is well and layer iteration shall continue. False else-wise. + */ + private static boolean handleLegacyWeightInitFromJson(String json, LayerConfiguration l, + ObjectMapper mapper, + JsonNode confs, int layerCount) { + if ((l instanceof BaseLayerConfiguration) && ((BaseLayerConfiguration) l).getWeightInit() == null) { + try { + JsonNode jsonNode = mapper.readTree(json); + if (confs == null) { + confs = jsonNode.get("confs"); + } + if (confs instanceof ArrayNode) { + ArrayNode layerConfs = (ArrayNode) confs; + JsonNode outputLayerNNCNode = layerConfs.get(layerCount); + if (outputLayerNNCNode == null) { + return false; //Should never happen... + } + JsonNode layerWrapperNode = outputLayerNNCNode.get("layer"); + + if (layerWrapperNode == null || layerWrapperNode.size() != 1) { + return true; + } + + JsonNode layerNode = layerWrapperNode.elements().next(); + JsonNode weightInit = layerNode.get( + "weightInit"); //Should only have 1 element: "dense", "output", etc + JsonNode distribution = layerNode.get("dist"); + + Distribution dist = null; + if (distribution != null) { + dist = mapper.treeToValue(distribution, Distribution.class); + } + + if (weightInit != null) { + final IWeightInit wi = WeightInit.valueOf(weightInit.asText()) + .getWeightInitFunction(dist); + ((BaseLayerConfiguration) l).setWeightInit(wi); + } + } + + } catch (IOException e) { + log.warn( + "ILayer with null WeightInit detected: " + l.getLayerName() + ", could not parse JSON", + e); + } + } + return true; + + } + + /** + * Object mapper for serialization of configurations + * + * @return + */ + public static ObjectMapper mapperYaml() { + return JsonMappers.getMapperYaml(); + } + + /** + * Object mapper for serialization of configurations + * + * @return + */ + public static ObjectMapper mapper() { + return JsonMappers.getMapper(); + } + + public static NeuralNetConfiguration fromYaml(String input) { + throw new RuntimeException("Needs fixing - not supported."); //TODO + } + + + /** + * @return JSON representation of NN configuration + */ + public String toYaml() { + ObjectMapper mapper = NeuralNetConfiguration.mapperYaml(); + synchronized (mapper) { + try { + return mapper.writeValueAsString(this); + } catch (com.fasterxml.jackson.core.JsonProcessingException e) { + throw new RuntimeException(e); + } + } + } + + /** + * @return JSON representation of NN configuration + */ + public String toJson() { + ObjectMapper mapper = NeuralNetConfiguration.mapper(); + synchronized (mapper) { + //JSON mappers are supposed to be thread safe: however, in practice they seem to miss fields occasionally + //when writeValueAsString is used by multiple threads. This results in invalid JSON. See issue #3243 + try { + return mapper.writeValueAsString(this); + } catch (com.fasterxml.jackson.core.JsonProcessingException e) { + log.error(e.getMessage()); + throw new RuntimeException(e); + } + } + } + + @Override + public String toString() { + return toJson(); + } + + @Override + public NeuralNetConfiguration clone() { + NeuralNetConfiguration clone; + clone = (NeuralNetConfiguration) super.clone(); + clone.stepFunction = clone.stepFunction.clone(); + clone.netWideVariables = new ArrayList<>(netWideVariables); + clone.getInnerConfigurations().addAll(innerConfigurations); + + if (clone.getInputPreProcessors() != null) { + Map map = new HashMap<>(); + for (Map.Entry entry : clone.getInputPreProcessors().entrySet()) { + map.put(entry.getKey(), entry.getValue().clone()); + } + clone.getInputPreProcessors().clear(); + clone.getInputPreProcessors().putAll(map); } - public void clearVariables() { - variables.clear(); - } + clone.setInferenceWorkspaceMode(this.inferenceWorkspaceMode); + clone.setTrainingWorkspaceMode(this.trainingWorkspaceMode); + clone.setCacheMode(this.cacheMode); + clone.setValidateOutputLayerConfig(this.validateOutputLayerConfig); + clone.setDataType(this.dataType); + + return clone; + + } + + /** + * + */ + @Override + public void init() { + if(initCalled) return; + initCalled=true; /** - * Fluent interface for building a list of configurations + * Run init() for each layer */ - public static class ListBuilder extends MultiLayerConfiguration.Builder { - private int layerCounter = -1; //Used only for .layer(Layer) method - private final Map layerwise; - private final Builder globalConfig; - // Constructor - public ListBuilder(Builder globalConfig, Map layerMap) { - this.globalConfig = globalConfig; - this.layerwise = layerMap; + getNetConfigurations().stream().forEach( conf -> { + conf.init(); //do not call on self + }); //call init on all embedded net configurations + innerConfigurations.add(0, this); //put this configuration at first place + + /** + * Inherit network wide configuration setting to those layer configurations + * that do not have an individual setting (nor a default) + */ + for(LayerConfiguration lconf : this.getFlattenedLayerConfigurations()) { + if(lconf.getActivationFn() == null ) lconf.setActivationFn(this.getActivation()); + if(lconf.getIUpdater() == null ) lconf.setIUpdater( this.getUpdater() ); + if(lconf.getIDropout() == null ) lconf.setIDropout( this.getIdropOut() ); + if(lconf.getWeightNoise() == null ) lconf.setWeightNoise( this.getWeightNoise()); + + // ... maybe more to set here ... + if(lconf instanceof BaseLayerConfiguration ) { // then we can set some additional config settings + BaseLayerConfiguration bconf = (BaseLayerConfiguration) lconf; + if(bconf.getBiasUpdater() == null) bconf.setBiasUpdater(this.getBiasUpdater()); + if(bconf.getGradientNormalization() == null) bconf.setGradientNormalization(this.getGradientNormalization()); + // ... maybe more to set here ... + } + } + + + + getLayerConfigurations().stream().forEach( lconf -> lconf.setNetConfiguration(this)); //set this as net config for all layers (defined in here, not stacked + + + //Validate BackpropType setting + if ((tbpttBackLength != DEFAULT_TBPTT_LENGTH || tbpttFwdLength != DEFAULT_TBPTT_LENGTH) + && backpropType != BackpropType.TruncatedBPTT) { + log.warn("Truncated backpropagation through time lengths have been configured with values " + + tbpttFwdLength + + " and " + tbpttBackLength + " but backprop type is set to " + backpropType + + ". TBPTT configuration" + + " settings will only take effect if backprop type is set to BackpropType.TruncatedBPTT"); + } + + if (backpropType == BackpropType.TruncatedBPTT && validateTbpttConfig) { + //Check for invalid combination - tbptt plus LastTimeStepLayer or + for (int i = 0; i < getFlattenedLayerConfigurations().size(); i++) { + LayerConfiguration l = getFlattenedLayerConfigurations().get(i); + if (l instanceof LastTimeStep || l instanceof GlobalPoolingLayer) { + throw new IllegalStateException( + "Invalid network configuration detected: Truncated backpropagation through time (TBPTT)" + + + " cannot be used with layer " + i + " of type " + l.getClass().getName() + + ": TBPTT is incompatible with this layer type (which is designed " + + "to process entire sequences at once, and does support the type of sequence segments that TPBTT uses).\n" + + + "This check can be disabled using validateTbpttConfig(false) but this is not recommended."); + } + } + } + + if (inputType == null && inputPreProcessors.get(0) == null) { + //User hasn't set the InputType. Sometimes we can infer it... + // For example, Dense/RNN layers, where preprocessor isn't set -> user is *probably* going to feed in + // standard feedforward or RNN data + //This isn't the most elegant implementation, but should avoid breaking backward compatibility here + //Can't infer InputType for CNN layers, however (don't know image dimensions/depth) + LayerConfiguration firstLayer = getFlattenedLayerConfigurations().get(0); + if (firstLayer instanceof BaseRecurrentLayer) { + BaseRecurrentLayer brl = (BaseRecurrentLayer) firstLayer; + val nIn = brl.getNIn(); + if (nIn > 0) { + inputType = InputType.recurrent(nIn, brl.getRnnDataFormat()); + } + } else if (firstLayer instanceof DenseLayer || firstLayer instanceof EmbeddingLayer + || firstLayer instanceof OutputLayer) { + //Can't just use "instanceof FeedForwardLayer" here. ConvolutionLayer is also a FeedForwardLayer + FeedForwardLayer ffl = (FeedForwardLayer) firstLayer; + val nIn = ffl.getNIn(); + if (nIn > 0) { + inputType = InputType.feedForward(nIn); + } + } + } + + //Add preprocessors and set nIns, if InputType has been set + // Builder.inputType field can be set in 1 of 4 ways: + // 1. User calls setInputType directly + // 2. Via ConvolutionLayerSetup -> internally calls setInputType(InputType.convolutional(...)) + // 3. Via the above code: i.e., assume input is as expected by the RNN or dense layer -> sets the inputType field + if(inputPreProcessors == null) { + inputPreProcessors = new HashMap<>(); + } + if (inputType != null) { + InputType currentInputType = inputType; + for (int i = 0; i < getFlattenedLayerConfigurations().size(); i++) { + LayerConfiguration l = getFlattenedLayerConfigurations().get(i); + if (inputPreProcessors.get(i) == null) { + //Don't override preprocessor setting, but set preprocessor if required... + @NonNull + InputPreProcessor inputPreProcessor = l.getPreProcessorForInputType(currentInputType); + if (inputPreProcessor != null) { + inputPreProcessors.put(i, inputPreProcessor); + } } - public ListBuilder(Builder globalConfig) { - this(globalConfig, new HashMap()); + InputPreProcessor inputPreProcessor = inputPreProcessors.get(i); + if (inputPreProcessor != null) { + currentInputType = inputPreProcessor.getOutputType(currentInputType); } - - public ListBuilder layer(int ind, @NonNull Layer layer) { - if (layerwise.containsKey(ind)) { - log.info("Layer index {} already exists, layer of type {} will be replace by layer type {}", - ind, layerwise.get(ind).getClass().getSimpleName(), layer.getClass().getSimpleName()); - layerwise.get(ind).layer(layer); + if (i > 0) { + LayerConfiguration layer = getFlattenedLayerConfigurations().get(i - 1); + //convolution 1d is an edge case where it has rnn input type but the filters + //should be the output + if (layer instanceof Convolution1DLayer) { + if (l instanceof DenseLayer && inputType instanceof InputType.InputTypeRecurrent) { + FeedForwardLayer feedForwardLayer = (FeedForwardLayer) l; + if (inputType instanceof InputType.InputTypeRecurrent) { + InputType.InputTypeRecurrent recurrent = (InputType.InputTypeRecurrent) inputType; + feedForwardLayer.setNIn(recurrent.getTimeSeriesLength()); + } } else { - layerwise.put(ind, globalConfig.clone().layer(layer)); + l.setNIn(currentInputType, + overrideNinUponBuild); //Don't override the nIn setting, if it's manually set by the user } - if(layerCounter < ind){ - //Edge case: user is mixing .layer(Layer) and .layer(int, Layer) calls - //This should allow a .layer(A, X) and .layer(Y) to work such that layer Y is index (A+1) - layerCounter = ind; - } - return this; + } else { + l.setNIn(currentInputType, + overrideNinUponBuild); //Don't override the nIn setting, if it's manually set by the user + } + + } else { + l.setNIn(currentInputType, + overrideNinUponBuild); //Don't override the nIn setting, if it's manually set by the user } - public ListBuilder layer(Layer layer){ - return layer(++layerCounter, layer); - } + currentInputType = l.getOutputType(i, currentInputType); + } - public Map getLayerwise() { - return layerwise; - } - - @Override - public ListBuilder setInputType(InputType inputType){ - return (ListBuilder)super.setInputType(inputType); - } - - /** - * A convenience method for setting input types: note that for example .inputType().convolutional(h,w,d) - * is equivalent to .setInputType(InputType.convolutional(h,w,d)) - */ - public ListBuilder.InputTypeBuilder inputType(){ - return new InputTypeBuilder(); - } - - /** - * For the (perhaps partially constructed) network configuration, return a list of activation sizes for each - * layer in the network.
- * Note: To use this method, the network input type must have been set using {@link #setInputType(InputType)} first - * @return A list of activation types for the network, indexed by layer number - */ - public List getLayerActivationTypes(){ - Preconditions.checkState(inputType != null, "Can only calculate activation types if input type has" + - "been set. Use setInputType(InputType)"); - - MultiLayerConfiguration conf; - try{ - conf = build(); - } catch (Exception e){ - throw new RuntimeException("Error calculating layer activation types: error instantiating MultiLayerConfiguration", e); - } - - return conf.getLayerActivationTypes(inputType); - } - - /** - * Build the multi layer network - * based on this neural network and - * overr ridden parameters - * - * @return the configuration to build - */ - public MultiLayerConfiguration build() { - List list = new ArrayList<>(); - if (layerwise.isEmpty()) - throw new IllegalStateException("Invalid configuration: no layers defined"); - for (int i = 0; i < layerwise.size(); i++) { - if (layerwise.get(i) == null) { - throw new IllegalStateException("Invalid configuration: layer number " + i - + " not specified. Expect layer " + "numbers to be 0 to " + (layerwise.size() - 1) - + " inclusive (number of layers defined: " + layerwise.size() + ")"); - } - if (layerwise.get(i).getLayer() == null) - throw new IllegalStateException("Cannot construct network: Layer config for" + "layer with index " - + i + " is not defined)"); - - //Layer names: set to default, if not set - if (layerwise.get(i).getLayer().getLayerName() == null) { - layerwise.get(i).getLayer().setLayerName("layer" + i); - } - - list.add(layerwise.get(i).build()); - } - - WorkspaceMode wsmTrain = (globalConfig.setTWM ? globalConfig.trainingWorkspaceMode : trainingWorkspaceMode); - WorkspaceMode wsmTest = (globalConfig.setIWM ? globalConfig.inferenceWorkspaceMode : inferenceWorkspaceMode); - - - return new MultiLayerConfiguration.Builder().inputPreProcessors(inputPreProcessors) - .backpropType(backpropType).tBPTTForwardLength(tbpttFwdLength) - .tBPTTBackwardLength(tbpttBackLength).setInputType(this.inputType) - .trainingWorkspaceMode(wsmTrain).cacheMode(globalConfig.cacheMode) - .inferenceWorkspaceMode(wsmTest).confs(list).validateOutputLayerConfig(validateOutputConfig) - .dataType(globalConfig.dataType) - .build(); - } - - /** Helper class for setting input types */ - public class InputTypeBuilder { - /** - * See {@link InputType#convolutional(long, long, long)} - */ - public ListBuilder convolutional(int height, int width, int depth){ - return ListBuilder.this.setInputType(InputType.convolutional(height, width, depth)); - } - - /** - * * See {@link InputType#convolutionalFlat(long, long, long)} - */ - public ListBuilder convolutionalFlat(int height, int width, int depth){ - return ListBuilder.this.setInputType(InputType.convolutionalFlat(height, width, depth)); - } - - /** - * See {@link InputType#feedForward(long)} - */ - public ListBuilder feedForward(int size){ - return ListBuilder.this.setInputType(InputType.feedForward(size)); - } - - /** - * See {@link InputType#recurrent(long)}} - */ - public ListBuilder recurrent(int size){ - return ListBuilder.this.setInputType(InputType.recurrent(size)); - } - } } - /** - * Return this configuration as json - * - * @return this configuration represented as json - */ - public String toYaml() { - ObjectMapper mapper = mapperYaml(); + Nd4j.getRandom().setSeed(getNetConfigurations().get(0).getSeed()); - try { - String ret = mapper.writeValueAsString(this); - return ret; + //Validate output layer configuration + if (isValidateOutputLayerConfig()) { + //Validate output layer configurations... + for (LayerConfiguration n : getFlattenedLayerConfigurations()) { + OutputLayerUtil.validateOutputLayer(n.getLayerName(), n); //No-op for non output/loss layers + } + } + } - } catch (com.fasterxml.jackson.core.JsonProcessingException e) { - throw new RuntimeException(e); - } + public InputPreProcessor getInputPreProcess(int curr) { + return inputPreProcessors.get(curr); + } + + /** + * Get a {@link MemoryReport} for the given NeuralNetConfiguration. This is used to estimate the + * memory requirements for the given network configuration and input + * + * @param inputType Input types for the network + * @return Memory report for the network + */ + public NetworkMemoryReport getMemoryReport(InputType inputType) { + + Map memoryReportMap = new LinkedHashMap<>(); + int nLayers = getFlattenedLayerConfigurations().size(); + for (int i = 0; i < nLayers; i++) { + String layerName = getFlattenedLayerConfigurations().get(i).getLayerName(); + if (layerName == null) { + layerName = String.valueOf(i); + } + + //Pass input type through preprocessor, if necessary + InputPreProcessor preproc = getInputPreProcess(i); + //TODO memory requirements for preprocessor + if (preproc != null) { + inputType = preproc.getOutputType(inputType); + } + + LayerMemoryReport report = getFlattenedLayerConfigurations().get(i).getMemoryReport(inputType); + memoryReportMap.put(layerName, report); + + inputType = getFlattenedLayerConfigurations().get(i).getOutputType(i, inputType); } - /** - * Create a neural net configuration from json - * - * @param json the neural net configuration from json - * @return - */ - public static NeuralNetConfiguration fromYaml(String json) { - ObjectMapper mapper = mapperYaml(); - try { - NeuralNetConfiguration ret = mapper.readValue(json, NeuralNetConfiguration.class); - return ret; - } catch (IOException e) { - throw new RuntimeException(e); - } + return new NetworkMemoryReport(memoryReportMap, NeuralNetConfiguration.class, + "MultiLayerNetwork", inputType); + } + + /** + * For the given input shape/type for the network, return a list of activation sizes for each + * layer in the network.
i.e., list.get(i) is the output activation sizes for layer i + * + * @param inputType Input type for the network + * @return A lits of activation types for the network, indexed by layer number + */ + public List getLayerActivationTypes(@NonNull InputType inputType) { + List out = new ArrayList<>(); + int nLayers = getFlattenedLayerConfigurations().size(); + for (int i = 0; i < nLayers; i++) { + InputPreProcessor preproc = getInputPreProcess(i); + if (preproc != null) { + inputType = preproc.getOutputType(inputType); + } + + inputType = getFlattenedLayerConfigurations().get(i).getOutputType(i, inputType); + out.add(inputType); + } + return out; + } + + + public List netWideVariables() { + + return netWideVariables; + } + + public List netWideVariables(boolean copy) { + if (copy) { + return netWideVariables(); + } + return netWideVariables; + } + + public void addNetWideVariable(String variable) { + if (!netWideVariables.contains(variable)) { + netWideVariables.add(variable); + log.trace("Adding neural network wide variable '{}' to the list of variables. New length is {}.", variable, netWideVariables.size()); + } + log.trace("Skipped adding neural network wide variable '{}' to the list of variables. It was already present. Length remains {}.", variable, netWideVariables.size()); + } + + public void clearNetWideVariable() { + + netWideVariables.clear(); + log.trace("Adding neural network wide variables have been cleared. New length is {}.", netWideVariables.size()); + } + + + + /** + * From the list of layers and neural net configurations, only return the Layer Configurations that + * are defined in this neural network (it does not include embedded neural network configuration + * layers) + * @return list with layer configurations + */ + public List getLayerConfigurations() { + return innerConfigurations.stream() + .filter(obj -> (obj instanceof LayerConfiguration)) + .map( obj -> (LayerConfiguration)obj ) + .collect( Collectors.toList()); + } + + /** + * From the list of layers and neural net configurations, only return the neural net configurations + * @return list with neural net configurations + */ + public List getNetConfigurations() { + return innerConfigurations.stream() + .filter(obj -> (obj instanceof NeuralNetConfiguration)) + .map( obj -> (NeuralNetConfiguration)obj ) + .collect( Collectors.toList()); + } + + /** + * From the list of layer configurations and inner neural net configurations, create a single, + * flattened list of layer configurations with inheritance parameters resolved + * + * @return list of layer configurations + */ + public List getFlattenedLayerConfigurations(NeuralNetConfiguration conf) { + List ret = new ArrayList<>(); //create the final return list + //When properly initialized, _this_ configuration is set first in the list, however we + //can find cases where this is not true, thus the first configuration is another net or layer configuration + //and should not be skipped. In essence, skip first configuration if that is "this". + int iSkip = 0; + if(conf.getInnerConfigurations().size()>0 && conf.getInnerConfigurations().get(0).equals(this)) { iSkip=1;} + conf.getInnerConfigurations().stream().skip(iSkip) + .forEach(obj -> { + //if Layer Config, include in list and inherit parameters from this conf + //else if neural net configuration, call self recursively to resolve layer configurations + if (obj instanceof LayerConfiguration) { + ((LayerConfiguration) obj).setNetConfiguration(conf); + ret.add((LayerConfiguration) obj); + } else if (obj instanceof NeuralNetConfiguration) + ret.addAll(getFlattenedLayerConfigurations( + (NeuralNetConfiguration) obj)); + else { + log.error( + "The list of layers and neural network configurations does contain an object of {}. Element will be ignored.", + obj.getClass().getSimpleName()); + } + }); + return ret; + } + + /** + * Sames as {@link #getFlattenedLayerConfigurations(NeuralNetConfiguration)}, but uses this configurations + * list of configurations + * @return list of layer configurations + */ + public List getFlattenedLayerConfigurations() { + return getFlattenedLayerConfigurations(this); + } + + /** + * Add a new layer to the first position + * @param layer configuration + */ + public void setLayer(@NonNull LayerConfiguration layer) { + innerConfigurations.add(0, layer); + } + + @Deprecated + public LayerConfiguration getConf(int index) { + return getFlattenedLayerConfigurations().get(index); + } + + /** + * Deprecated, do not use. Workaround for old tests + * and getFlattenedLayerConfigurations().get(0); + * @return + */ + @Deprecated @JsonIgnore + public LayerConfiguration getFirstLayer() { + log.warn("This getFirstLayer method is an ugly workaround and will be removed."); + return getFlattenedLayerConfigurations().get(0); + } + + public static abstract class NeuralNetConfigurationBuilder> extends + NeuralNetBaseBuilderConfigurationBuilder { + + public ComputationGraphConfiguration.GraphBuilder graphBuilder() { + return new ComputationGraphConfiguration.GraphBuilder(this); } - /** - * Return this configuration as json - * - * @return this configuration represented as json - */ - public String toJson() { - ObjectMapper mapper = mapper(); - - try { - return mapper.writeValueAsString(this); - } catch (com.fasterxml.jackson.core.JsonProcessingException e) { - throw new RuntimeException(e); - } + public NeuralNetConfigurationBuilder clone() { + try { + return (NeuralNetConfigurationBuilder) super.clone(); + } catch(CloneNotSupportedException ex) { + throw new RuntimeException(ex); + } } - /** - * Create a neural net configuration from json - * - * @param json the neural net configuration from json - * @return - */ - public static NeuralNetConfiguration fromJson(String json) { - ObjectMapper mapper = mapper(); - try { - NeuralNetConfiguration ret = mapper.readValue(json, NeuralNetConfiguration.class); - return ret; - } catch (IOException e) { - throw new RuntimeException(e); - } - } + } - /** - * Object mapper for serialization of configurations - * - * @return - */ - public static ObjectMapper mapperYaml() { - return JsonMappers.getMapperYaml(); - } + public IModel getNeuralNet() { + return net; + } - /** - * Object mapper for serialization of configurations - * - * @return - */ - public static ObjectMapper mapper() { - return JsonMappers.getMapper(); - } - - /** - * NeuralNetConfiguration builder, used as a starting point for creating a MultiLayerConfiguration or - * ComputationGraphConfiguration.
- * Note that values set here on the layer will be applied to all relevant layers - unless the value is overridden - * on a layer's configuration - */ - @Data - public static class Builder implements Cloneable { - protected IActivation activationFn = new ActivationSigmoid(); - protected IWeightInit weightInitFn = new WeightInitXavier(); - protected double biasInit = 0.0; - protected double gainInit = 1.0; - protected List regularization = new ArrayList<>(); - protected List regularizationBias = new ArrayList<>(); - protected IDropout idropOut; - protected IWeightNoise weightNoise; - protected IUpdater iUpdater = new Sgd(); - protected IUpdater biasUpdater = null; - protected Layer layer; - protected boolean miniBatch = true; - protected int maxNumLineSearchIterations = 5; - protected long seed = System.currentTimeMillis(); - protected OptimizationAlgorithm optimizationAlgo = OptimizationAlgorithm.STOCHASTIC_GRADIENT_DESCENT; - protected StepFunction stepFunction = null; - protected boolean minimize = true; - protected GradientNormalization gradientNormalization = GradientNormalization.None; - protected double gradientNormalizationThreshold = 1.0; - protected List allParamConstraints; - protected List weightConstraints; - protected List biasConstraints; - - protected WorkspaceMode trainingWorkspaceMode = WorkspaceMode.ENABLED; - protected WorkspaceMode inferenceWorkspaceMode = WorkspaceMode.ENABLED; - protected boolean setTWM = false; - protected boolean setIWM = false; - protected CacheMode cacheMode = CacheMode.NONE; - protected DataType dataType = DataType.FLOAT; - - protected ConvolutionMode convolutionMode = ConvolutionMode.Truncate; - protected ConvolutionLayer.AlgoMode cudnnAlgoMode = ConvolutionLayer.AlgoMode.PREFER_FASTEST; - - public Builder() { - // - } - - public Builder(NeuralNetConfiguration newConf) { - if (newConf != null) { - minimize = newConf.minimize; - maxNumLineSearchIterations = newConf.maxNumLineSearchIterations; - layer = newConf.layer; - optimizationAlgo = newConf.optimizationAlgo; - seed = newConf.seed; - stepFunction = newConf.stepFunction; - miniBatch = newConf.miniBatch; - } - } - - /** - * Process input as minibatch vs full dataset. - * Default set to true. - */ - public Builder miniBatch(boolean miniBatch) { - this.miniBatch = miniBatch; - return this; - } - - /** - * This method defines Workspace mode being used during training:
- * NONE: workspace won't be used
- * ENABLED: workspaces will be used for training (reduced memory and better performance) - * - * @param workspaceMode Workspace mode for training - * @return Builder - */ - public Builder trainingWorkspaceMode(@NonNull WorkspaceMode workspaceMode) { - this.trainingWorkspaceMode = workspaceMode; - this.setTWM = true; - return this; - } - - /** - * This method defines Workspace mode being used during inference:
- * NONE: workspace won't be used
- * ENABLED: workspaces will be used for inference (reduced memory and better performance) - * - * @param workspaceMode Workspace mode for inference - * @return Builder - */ - public Builder inferenceWorkspaceMode(@NonNull WorkspaceMode workspaceMode) { - this.inferenceWorkspaceMode = workspaceMode; - this.setIWM = true; - return this; - } - - /** - * This method defines how/if preOutput cache is handled: - * NONE: cache disabled (default value) - * HOST: Host memory will be used - * DEVICE: GPU memory will be used (on CPU backends effect will be the same as for HOST) - * - * @param cacheMode Cache mode to use - * @return Builder - */ - public Builder cacheMode(@NonNull CacheMode cacheMode) { - this.cacheMode = cacheMode; - return this; - } - - /** - * Objective function to minimize or maximize cost function - * Default set to minimize true. - */ - public Builder minimize(boolean minimize) { - this.minimize = minimize; - return this; - } - - /** - * Maximum number of line search iterations. - * Only applies for line search optimizers: Line Search SGD, Conjugate Gradient, LBFGS - * is NOT applicable for standard SGD - * - * @param maxNumLineSearchIterations > 0 - * @return - */ - public Builder maxNumLineSearchIterations(int maxNumLineSearchIterations) { - this.maxNumLineSearchIterations = maxNumLineSearchIterations; - return this; - } - - - /** - * Layer class. - */ - public Builder layer(Layer layer) { - this.layer = layer; - return this; - } - - /** - * Step function to apply for back track line search. - * Only applies for line search optimizers: Line Search SGD, Conjugate Gradient, LBFGS - * Options: DefaultStepFunction (default), NegativeDefaultStepFunction - * GradientStepFunction (for SGD), NegativeGradientStepFunction - */ - @Deprecated - public Builder stepFunction(StepFunction stepFunction) { - this.stepFunction = stepFunction; - return this; - } - - /** - * Create a ListBuilder (for creating a MultiLayerConfiguration)
- * Usage:
- *
-         * {@code .list()
-         * .layer(new DenseLayer.Builder()...build())
-         * ...
-         * .layer(new OutputLayer.Builder()...build())
-         * }
-         * 
- */ - public ListBuilder list() { - return new ListBuilder(this); - } - - /** - * Create a ListBuilder (for creating a MultiLayerConfiguration) with the specified layers
- * Usage:
- *
-         * {@code .list(
-         *      new DenseLayer.Builder()...build(),
-         *      ...,
-         *      new OutputLayer.Builder()...build())
-         * }
-         * 
- * - * @param layers The layer configurations for the network - */ - public ListBuilder list(Layer... layers) { - if (layers == null || layers.length == 0) - throw new IllegalArgumentException("Cannot create network with no layers"); - Map layerMap = new HashMap<>(); - for (int i = 0; i < layers.length; i++) { - Builder b = this.clone(); - b.layer(layers[i]); - layerMap.put(i, b); - } - return new ListBuilder(this, layerMap); - - } - - /** - * Create a GraphBuilder (for creating a ComputationGraphConfiguration). - */ - public ComputationGraphConfiguration.GraphBuilder graphBuilder() { - return new ComputationGraphConfiguration.GraphBuilder(this); - } - - /** - * Random number generator seed. Used for reproducability between runs - */ - public Builder seed(long seed) { - this.seed = seed; - Nd4j.getRandom().setSeed(seed); - return this; - } - - /** - * Optimization algorithm to use. Most common: OptimizationAlgorithm.STOCHASTIC_GRADIENT_DESCENT - * - * @param optimizationAlgo Optimization algorithm to use when training - */ - public Builder optimizationAlgo(OptimizationAlgorithm optimizationAlgo) { - this.optimizationAlgo = optimizationAlgo; - return this; - } - - @Override - public Builder clone() { - try { - Builder clone = (Builder) super.clone(); - if (clone.layer != null) - clone.layer = clone.layer.clone(); - if (clone.stepFunction != null) - clone.stepFunction = clone.stepFunction.clone(); - - return clone; - - } catch (CloneNotSupportedException e) { - throw new RuntimeException(e); - } - } - - /** - * Activation function / neuron non-linearity
- * Note: values set by this method will be applied to all applicable layers in the network, unless a different - * value is explicitly set on a given layer. In other words: values set via this method are used as the default - * value, and can be overridden on a per-layer basis. - * - * @see #activation(Activation) - */ - public Builder activation(IActivation activationFunction) { - this.activationFn = activationFunction; - return this; - } - - /** - * Activation function / neuron non-linearity
- * Note: values set by this method will be applied to all applicable layers in the network, unless a different - * value is explicitly set on a given layer. In other words: values set via this method are used as the default - * value, and can be overridden on a per-layer basis. - */ - public Builder activation(Activation activation) { - return activation(activation.getActivationFunction()); - } - - - /** - * Weight initialization scheme to use, for initial weight values - * Note: values set by this method will be applied to all applicable layers in the network, unless a different - * value is explicitly set on a given layer. In other words: values set via this method are used as the default - * value, and can be overridden on a per-layer basis. - * - * @see IWeightInit - */ - public Builder weightInit(IWeightInit weightInit) { - this.weightInitFn = weightInit; - return this; - } - - /** - * Weight initialization scheme to use, for initial weight values - * Note: values set by this method will be applied to all applicable layers in the network, unless a different - * value is explicitly set on a given layer. In other words: values set via this method are used as the default - * value, and can be overridden on a per-layer basis. - * - * @see WeightInit - */ - public Builder weightInit(WeightInit weightInit) { - if(weightInit == WeightInit.DISTRIBUTION) { - // throw new UnsupportedOperationException("Not supported!, Use weightInit(Distribution distribution) instead!"); - } - - this.weightInitFn = weightInit.getWeightInitFunction(); - return this; - } - - /** - * Set weight initialization scheme to random sampling via the specified distribution. - * Equivalent to: {@code .weightInit(new WeightInitDistribution(distribution))} - * Note: values set by this method will be applied to all applicable layers in the network, unless a different - * value is explicitly set on a given layer. In other words: values set via this method are used as the default - * value, and can be overridden on a per-layer basis. - * - * @param distribution Distribution to use for weight initialization - */ - public Builder weightInit(Distribution distribution){ - return weightInit(new WeightInitDistribution(distribution)); - } - - /** - * Constant for bias initialization. Default: 0.0
- * Note: values set by this method will be applied to all applicable layers in the network, unless a different - * value is explicitly set on a given layer. In other words: values set via this method are used as the default - * value, and can be overridden on a per-layer basis. - * - * @param biasInit Constant for bias initialization - */ - public Builder biasInit(double biasInit) { - this.biasInit = biasInit; - return this; - } - - /** - * Distribution to sample initial weights from. - * Equivalent to: {@code .weightInit(new WeightInitDistribution(distribution))}.
- * Note: values set by this method will be applied to all applicable layers in the network, unless a different - * value is explicitly set on a given layer. In other words: values set via this method are used as the default - * value, and can be overridden on a per-layer basis. - * - * @see #weightInit(Distribution) - * @deprecated Use {@link #weightInit(Distribution)} - */ - @Deprecated - public Builder dist(Distribution dist) { - return weightInit(dist); - } - - /** - * L1 regularization coefficient for the weights (excluding biases).
- * Note: values set by this method will be applied to all applicable layers in the network, unless a different - * value is explicitly set on a given layer. In other words: values set via this method are used as the default - * value, and can be overridden on a per-layer basis. - */ - public Builder l1(double l1) { - //Check if existing L1 exists; if so, replace it - NetworkUtils.removeInstances(this.regularization, L1Regularization.class); - if(l1 > 0.0) { - this.regularization.add(new L1Regularization(l1)); - } - return this; - } - - /** - * L2 regularization coefficient for the weights (excluding biases).
- * Note: Generally, {@link WeightDecay} (set via {@link #weightDecay(double)} should be preferred to - * L2 regularization. See {@link WeightDecay} javadoc for further details.
- * Note: values set by this method will be applied to all applicable layers in the network, unless a different - * value is explicitly set on a given layer. In other words: values set via this method are used as the default - * value, and can be overridden on a per-layer basis.
- * Note: L2 regularization and weight decay usually should not be used together; if any weight decay (or L2) has - * been added for the biases, these will be removed first. - * - * @see #weightDecay(double, boolean) - */ - public Builder l2(double l2) { - //Check if existing L2 exists; if so, replace it. Also remove weight decay - it doesn't make sense to use both - NetworkUtils.removeInstances(this.regularization, L2Regularization.class); - if(l2 > 0.0) { - NetworkUtils.removeInstancesWithWarning(this.regularization, WeightDecay.class, "WeightDecay regularization removed: incompatible with added L2 regularization"); - this.regularization.add(new L2Regularization(l2)); - } - return this; - } - - /** - * L1 regularization coefficient for the bias.
- * Note: values set by this method will be applied to all applicable layers in the network, unless a different - * value is explicitly set on a given layer. In other words: values set via this method are used as the default - * value, and can be overridden on a per-layer basis. - */ - public Builder l1Bias(double l1Bias) { - NetworkUtils.removeInstances(this.regularizationBias, L1Regularization.class); - if(l1Bias > 0.0) { - this.regularizationBias.add(new L1Regularization(l1Bias)); - } - return this; - } - - /** - * L2 regularization coefficient for the bias.
- * Note: Generally, {@link WeightDecay} (set via {@link #weightDecayBias(double,boolean)} should be preferred to - * L2 regularization. See {@link WeightDecay} javadoc for further details.
- * Note: values set by this method will be applied to all applicable layers in the network, unless a different - * value is explicitly set on a given layer. In other words: values set via this method are used as the default - * value, and can be overridden on a per-layer basis.
- * Note: L2 regularization and weight decay usually should not be used together; if any weight decay (or L2) has - * been added for the biases, these will be removed first. - * - * @see #weightDecayBias(double, boolean) - */ - public Builder l2Bias(double l2Bias) { - NetworkUtils.removeInstances(this.regularizationBias, L2Regularization.class); - if(l2Bias > 0.0) { - NetworkUtils.removeInstancesWithWarning(this.regularizationBias, WeightDecay.class, "L2 bias regularization removed: incompatible with added WeightDecay regularization"); - this.regularizationBias.add(new L2Regularization(l2Bias)); - } - return this; - } - - /** - * Add weight decay regularization for the network parameters (excluding biases).
- * This applies weight decay with multiplying the learning rate - see {@link WeightDecay} for more details.
- * Note: values set by this method will be applied to all applicable layers in the network, unless a different - * value is explicitly set on a given layer. In other words: values set via this method are used as the default - * value, and can be overridden on a per-layer basis.
- * - * @param coefficient Weight decay regularization coefficient - * @see #weightDecay(double, boolean) - */ - public Builder weightDecay(double coefficient) { - return weightDecay(coefficient, true); - } - - /** - * Add weight decay regularization for the network parameters (excluding biases). See {@link WeightDecay} for more details.
- * Note: values set by this method will be applied to all applicable layers in the network, unless a different - * value is explicitly set on a given layer. In other words: values set via this method are used as the default - * value, and can be overridden on a per-layer basis.
- * - * @param coefficient Weight decay regularization coefficient - * @param applyLR Whether the learning rate should be multiplied in when performing weight decay updates. See {@link WeightDecay} for more details. - * @see #weightDecay(double, boolean) - */ - public Builder weightDecay(double coefficient, boolean applyLR) { - //Check if existing weight decay if it exists; if so, replace it. Also remove L2 - it doesn't make sense to use both - NetworkUtils.removeInstances(this.regularization, WeightDecay.class); - if(coefficient > 0.0) { - NetworkUtils.removeInstancesWithWarning(this.regularization, L2Regularization.class, "L2 regularization removed: incompatible with added WeightDecay regularization"); - this.regularization.add(new WeightDecay(coefficient, applyLR)); - } - return this; - } - - /** - * Weight decay for the biases only - see {@link #weightDecay(double)} for more details. - * This applies weight decay with multiplying the learning rate.
- * Note: values set by this method will be applied to all applicable layers in the network, unless a different - * value is explicitly set on a given layer. In other words: values set via this method are used as the default - * value, and can be overridden on a per-layer basis.
- * - * @param coefficient Weight decay regularization coefficient - * @see #weightDecayBias(double, boolean) - */ - public Builder weightDecayBias(double coefficient) { - return weightDecayBias(coefficient, true); - } - - /** - * Weight decay for the biases only - see {@link #weightDecay(double)} for more details
- * Note: values set by this method will be applied to all applicable layers in the network, unless a different - * value is explicitly set on a given layer. In other words: values set via this method are used as the default - * value, and can be overridden on a per-layer basis.
- * - * @param coefficient Weight decay regularization coefficient - */ - public Builder weightDecayBias(double coefficient, boolean applyLR) { - //Check if existing weight decay if it exists; if so, replace it. Also remove L2 - it doesn't make sense to use both - NetworkUtils.removeInstances(this.regularizationBias, WeightDecay.class); - if(coefficient > 0) { - NetworkUtils.removeInstancesWithWarning(this.regularizationBias, L2Regularization.class, "L2 bias regularization removed: incompatible with added WeightDecay regularization"); - this.regularizationBias.add(new WeightDecay(coefficient, applyLR)); - } - return this; - } - - /** - * Set the regularization for the parameters (excluding biases) - for example {@link WeightDecay}
- * Note: values set by this method will be applied to all applicable layers in the network, unless a different - * value is explicitly set on a given layer. In other words: values set via this method are used as the default - * value, and can be overridden on a per-layer basis.
- * - * @param regularization Regularization to apply for the network parameters/weights (excluding biases) - */ - public Builder regularization(List regularization) { - this.regularization = regularization; - return this; - } - - /** - * Set the regularization for the biases only - for example {@link WeightDecay}
- * Note: values set by this method will be applied to all applicable layers in the network, unless a different - * value is explicitly set on a given layer. In other words: values set via this method are used as the default - * value, and can be overridden on a per-layer basis.
- * - * @param regularizationBias Regularization to apply for the network biases only - */ - public Builder regularizationBias(List regularizationBias) { - this.regularizationBias = regularizationBias; - return this; - } - - /** - * Dropout probability. This is the probability of retaining each input activation value for a layer. - * dropOut(x) will keep an input activation with probability x, and set to 0 with probability 1-x.
- * dropOut(0.0) is a special value / special case - when set to 0.0., dropout is disabled (not applied). Note - * that a dropout value of 1.0 is functionally equivalent to no dropout: i.e., 100% probability of retaining - * each input activation.
- *

- * Note 1: Dropout is applied at training time only - and is automatically not applied at test time - * (for evaluation, etc)
- * Note 2: This sets the probability per-layer. Care should be taken when setting lower values for - * complex networks (too much information may be lost with aggressive (very low) dropout values).
- * Note 3: Frequently, dropout is not applied to (or, has higher retain probability for) input (first layer) - * layers. Dropout is also often not applied to output layers. This needs to be handled MANUALLY by the user - * - set .dropout(0) on those layers when using global dropout setting.
- * Note 4: Implementation detail (most users can ignore): DL4J uses inverted dropout, as described here: - * http://cs231n.github.io/neural-networks-2/ - *

- *
- * Note: values set by this method will be applied to all applicable layers in the network, unless a different - * value is explicitly set on a given layer. In other words: values set via this method are used as the default - * value, and can be overridden on a per-layer basis. - * - * @param inputRetainProbability Dropout probability (probability of retaining each input activation value for a layer) - * @see #dropOut(IDropout) - */ - public Builder dropOut(double inputRetainProbability) { - if(inputRetainProbability == 0.0){ - return dropOut(null); - } - return dropOut(new Dropout(inputRetainProbability)); - } - - /** - * Set the dropout for all layers in this network
- * Note: values set by this method will be applied to all applicable layers in the network, unless a different - * value is explicitly set on a given layer. In other words: values set via this method are used as the default - * value, and can be overridden on a per-layer basis. - * - * @param dropout Dropout, such as {@link Dropout}, {@link org.deeplearning4j.nn.conf.dropout.GaussianDropout}, - * {@link org.deeplearning4j.nn.conf.dropout.GaussianNoise} etc - * @return - */ - public Builder dropOut(IDropout dropout){ - //Clone: Dropout is stateful usually - don't want to have the same instance shared in multiple places - this.idropOut = (dropout == null ? null : dropout.clone()); - return this; - } - - /** - * Set the weight noise (such as {@link org.deeplearning4j.nn.conf.weightnoise.DropConnect} and - * {@link org.deeplearning4j.nn.conf.weightnoise.WeightNoise}) for the layers in this network.
- * Note: values set by this method will be applied to all applicable layers in the network, unless a different - * value is explicitly set on a given layer. In other words: values set via this method are used as the default - * value, and can be overridden on a per-layer basis. - * - * @param weightNoise Weight noise instance to use - */ - public Builder weightNoise(IWeightNoise weightNoise){ - this.weightNoise = weightNoise; - return this; - } - - - /** - * @deprecated Use {@link #updater(IUpdater)} - */ - @Deprecated - public Builder updater(Updater updater) { - return updater(updater.getIUpdaterWithDefaultConfig()); - } - - /** - * Gradient updater configuration. For example, {@link org.nd4j.linalg.learning.config.Adam} - * or {@link org.nd4j.linalg.learning.config.Nesterovs}
- * Note: values set by this method will be applied to all applicable layers in the network, unless a different - * value is explicitly set on a given layer. In other words: values set via this method are used as the default - * value, and can be overridden on a per-layer basis. - * - * @param updater Updater to use - */ - public Builder updater(IUpdater updater) { - this.iUpdater = updater; - return this; - } - - /** - * Gradient updater configuration, for the biases only. If not set, biases will use the updater as - * set by {@link #updater(IUpdater)}
- * Note: values set by this method will be applied to all applicable layers in the network, unless a different - * value is explicitly set on a given layer. In other words: values set via this method are used as the default - * value, and can be overridden on a per-layer basis. - * - * @param updater Updater to use for bias parameters - */ - public Builder biasUpdater(IUpdater updater){ - this.biasUpdater = updater; - return this; - } - - /** - * Gradient normalization strategy. Used to specify gradient renormalization, gradient clipping etc. - * See {@link GradientNormalization} for details
- * Note: values set by this method will be applied to all applicable layers in the network, unless a different - * value is explicitly set on a given layer. In other words: values set via this method are used as the default - * value, and can be overridden on a per-layer basis. - * - * @param gradientNormalization Type of normalization to use. Defaults to None. - * @see GradientNormalization - */ - public Builder gradientNormalization(GradientNormalization gradientNormalization) { - this.gradientNormalization = gradientNormalization; - return this; - } - - /** - * Threshold for gradient normalization, only used for GradientNormalization.ClipL2PerLayer, - * GradientNormalization.ClipL2PerParamType, and GradientNormalization.ClipElementWiseAbsoluteValue
- * Not used otherwise.
- * L2 threshold for first two types of clipping, or absolute value threshold for last type of clipping.
- * Note: values set by this method will be applied to all applicable layers in the network, unless a different - * value is explicitly set on a given layer. In other words: values set via this method are used as the default - * value, and can be overridden on a per-layer basis. - */ - public Builder gradientNormalizationThreshold(double threshold) { - this.gradientNormalizationThreshold = threshold; - return this; - } - - /** - * Sets the convolution mode for convolutional layers, which impacts padding and output sizes. - * See {@link ConvolutionMode} for details. Defaults to ConvolutionMode.TRUNCATE
- * Note: values set by this method will be applied to all applicable layers in the network, unless a different - * value is explicitly set on a given layer. In other words: values set via this method are used as the default - * value, and can be overridden on a per-layer basis. - * @param convolutionMode Convolution mode to use - */ - public Builder convolutionMode(ConvolutionMode convolutionMode) { - this.convolutionMode = convolutionMode; - return this; - } - - /** - * Sets the cuDNN algo mode for convolutional layers, which impacts performance and memory usage of cuDNN. - * See {@link ConvolutionLayer.AlgoMode} for details. Defaults to "PREFER_FASTEST", but "NO_WORKSPACE" uses less memory. - *
- * Note: values set by this method will be applied to all applicable layers in the network, unless a different - * value is explicitly set on a given layer. In other words: values set via this method are used as the default - * value, and can be overridden on a per-layer basis. - * @param cudnnAlgoMode cuDNN algo mode to use - */ - public Builder cudnnAlgoMode(ConvolutionLayer.AlgoMode cudnnAlgoMode) { - this.cudnnAlgoMode = cudnnAlgoMode; - return this; - } - - /** - * Set constraints to be applied to all layers. Default: no constraints.
- * Constraints can be used to enforce certain conditions (non-negativity of parameters, max-norm regularization, - * etc). These constraints are applied at each iteration, after the parameters have been updated.
- * Note: values set by this method will be applied to all applicable layers in the network, unless a different - * value is explicitly set on a given layer. In other words: values set via this method are used as the default - * value, and can be overridden on a per-layer basis. - * - * @param constraints Constraints to apply to all parameters of all layers - */ - public Builder constrainAllParameters(LayerConstraint... constraints){ - this.allParamConstraints = Arrays.asList(constraints); - return this; - } - - /** - * Set constraints to be applied to all layers. Default: no constraints.
- * Constraints can be used to enforce certain conditions (non-negativity of parameters, max-norm regularization, - * etc). These constraints are applied at each iteration, after the parameters have been updated.
- * Note: values set by this method will be applied to all applicable layers in the network, unless a different - * value is explicitly set on a given layer. In other words: values set via this method are used as the default - * value, and can be overridden on a per-layer basis. - * - * @param constraints Constraints to apply to all bias parameters of all layers - */ - public Builder constrainBias(LayerConstraint... constraints) { - this.biasConstraints = Arrays.asList(constraints); - return this; - } - - /** - * Set constraints to be applied to all layers. Default: no constraints.
- * Constraints can be used to enforce certain conditions (non-negativity of parameters, max-norm regularization, - * etc). These constraints are applied at each iteration, after the parameters have been updated.
- * Note: values set by this method will be applied to all applicable layers in the network, unless a different - * value is explicitly set on a given layer. In other words: values set via this method are used as the default - * value, and can be overridden on a per-layer basis. - * - * @param constraints Constraints to apply to all weight parameters of all layers - */ - public Builder constrainWeights(LayerConstraint... constraints) { - this.weightConstraints = Arrays.asList(constraints); - return this; - } - - - /** - * Set the DataType for the network parameters and activations. Must be a floating point type: {@link DataType#DOUBLE}, - * {@link DataType#FLOAT} or {@link DataType#HALF}.
- */ - public Builder dataType(@NonNull DataType dataType){ - Preconditions.checkState(dataType == DataType.DOUBLE || dataType == DataType.FLOAT || dataType == DataType.HALF, - "Data type must be a floating point type: one of DOUBLE, FLOAT, or HALF. Got datatype: %s", dataType); - this.dataType = dataType; - return this; - } - - /** - * Return a configuration based on this builder - * - * @return - */ - public NeuralNetConfiguration build() { - - NeuralNetConfiguration conf = new NeuralNetConfiguration(); - conf.minimize = minimize; - conf.maxNumLineSearchIterations = maxNumLineSearchIterations; - conf.layer = layer; - conf.optimizationAlgo = optimizationAlgo; - conf.seed = seed; - conf.stepFunction = stepFunction; - conf.miniBatch = miniBatch; - conf.cacheMode = this.cacheMode; - conf.dataType = this.dataType; - - configureLayer(layer); - if (layer instanceof FrozenLayer) { - configureLayer(((FrozenLayer) layer).getLayer()); - } - - if (layer instanceof FrozenLayerWithBackprop) { - configureLayer(((FrozenLayerWithBackprop) layer).getUnderlying()); - } - - return conf; - } - - private void configureLayer(Layer layer) { - String layerName; - if (layer == null || layer.getLayerName() == null) - layerName = "Layer not named"; - else - layerName = layer.getLayerName(); - - if(layer instanceof AbstractSameDiffLayer){ - AbstractSameDiffLayer sdl = (AbstractSameDiffLayer)layer; - sdl.applyGlobalConfig(this); - } - - if (layer != null) { - copyConfigToLayer(layerName, layer); - } - - if (layer instanceof FrozenLayer) { - copyConfigToLayer(layerName, ((FrozenLayer) layer).getLayer()); - } - - if (layer instanceof FrozenLayerWithBackprop) { - copyConfigToLayer(layerName, ((FrozenLayerWithBackprop) layer).getUnderlying()); - } - - if (layer instanceof Bidirectional) { - Bidirectional b = (Bidirectional)layer; - copyConfigToLayer(b.getFwd().getLayerName(), b.getFwd()); - copyConfigToLayer(b.getBwd().getLayerName(), b.getBwd()); - } - - if(layer instanceof BaseWrapperLayer){ - BaseWrapperLayer bwr = (BaseWrapperLayer)layer; - configureLayer(bwr.getUnderlying()); - } - - if (layer instanceof ConvolutionLayer) { - ConvolutionLayer cl = (ConvolutionLayer) layer; - if (cl.getConvolutionMode() == null) { - cl.setConvolutionMode(convolutionMode); - } - if (cl.getCudnnAlgoMode() == null) { - cl.setCudnnAlgoMode(cudnnAlgoMode); - } - } - if (layer instanceof SubsamplingLayer) { - SubsamplingLayer sl = (SubsamplingLayer) layer; - if (sl.getConvolutionMode() == null) { - sl.setConvolutionMode(convolutionMode); - } - } - LayerValidation.generalValidation(layerName, layer, idropOut, regularization, regularizationBias, - allParamConstraints, weightConstraints, biasConstraints); - } - - private void copyConfigToLayer(String layerName, Layer layer) { - - if (layer.getIDropout() == null) { - //Dropout is stateful usually - don't want to have the same instance shared by multiple layers - layer.setIDropout(idropOut == null ? null : idropOut.clone()); - } - - if (layer instanceof BaseLayer) { - BaseLayer bLayer = (BaseLayer) layer; - if (bLayer.getRegularization() == null || bLayer.getRegularization().isEmpty()) - bLayer.setRegularization(regularization); - if (bLayer.getRegularizationBias() == null || bLayer.getRegularizationBias().isEmpty()) - bLayer.setRegularizationBias(regularizationBias); - if (bLayer.getActivationFn() == null) - bLayer.setActivationFn(activationFn); - if (bLayer.getWeightInitFn() == null) - bLayer.setWeightInitFn(weightInitFn); - if (Double.isNaN(bLayer.getBiasInit())) - bLayer.setBiasInit(biasInit); - if (Double.isNaN(bLayer.getGainInit())) - bLayer.setGainInit(gainInit); - - //Configure weight noise: - if(weightNoise != null && ((BaseLayer) layer).getWeightNoise() == null){ - ((BaseLayer) layer).setWeightNoise(weightNoise.clone()); - } - - //Configure updaters: - if(iUpdater != null && bLayer.getIUpdater() == null){ - bLayer.setIUpdater(iUpdater.clone()); //Clone the updater to avoid shared instances - in case of setLearningRate calls later - } - if(biasUpdater != null && bLayer.getBiasUpdater() == null){ - bLayer.setBiasUpdater(biasUpdater.clone()); //Clone the updater to avoid shared instances - in case of setLearningRate calls later - } - - if(bLayer.getIUpdater() == null && iUpdater == null && bLayer.initializer().numParams(bLayer) > 0){ - //No updater set anywhere - IUpdater u = new Sgd(); - bLayer.setIUpdater(u); - log.warn("*** No updater configuration is set for layer {} - defaulting to {} ***", layerName, u); - } - - if (bLayer.getGradientNormalization() == null) - bLayer.setGradientNormalization(gradientNormalization); - if (Double.isNaN(bLayer.getGradientNormalizationThreshold())) - bLayer.setGradientNormalizationThreshold(gradientNormalizationThreshold); - } - - if (layer instanceof ActivationLayer){ - ActivationLayer al = (ActivationLayer)layer; - if(al.getActivationFn() == null) - al.setActivationFn(activationFn); - } - } - } + public void setNeuralNet(IModel model) { + this.net = model; + } } diff --git a/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/nn/conf/constraint/BaseConstraint.java b/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/nn/conf/constraint/BaseConstraint.java index fafb7a78e..f9a3e81f0 100644 --- a/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/nn/conf/constraint/BaseConstraint.java +++ b/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/nn/conf/constraint/BaseConstraint.java @@ -53,12 +53,12 @@ public abstract class BaseConstraint implements LayerConstraint { @Override public void applyConstraint(Layer layer, int iteration, int epoch) { - Map paramTable = layer.paramTable(); + Map paramTable = layer.getParamTable(); if(paramTable == null || paramTable.isEmpty() ){ return; } - ParamInitializer i = layer.conf().getLayer().initializer(); + ParamInitializer i = layer.getLayerConfiguration().initializer(); for(Map.Entry e : paramTable.entrySet()){ if(params.contains(e.getKey())){ apply(e.getValue()); diff --git a/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/nn/conf/constraint/MaxNormConstraint.java b/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/nn/conf/constraint/MaxNormConstraint.java index 43fdc4254..a38e6dfcf 100644 --- a/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/nn/conf/constraint/MaxNormConstraint.java +++ b/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/nn/conf/constraint/MaxNormConstraint.java @@ -43,7 +43,7 @@ public class MaxNormConstraint extends BaseConstraint { /** * @param maxNorm Maximum L2 value * @param paramNames Which parameter names to apply constraint to - * @param dimensions Dimensions to apply to. For DenseLayer, OutputLayer, RnnOutputLayer, LSTM, etc: this should + * @param dimensions Dimensions to apply to. For DenseLayerConfiguration, OutputLayer, RnnOutputLayer, LSTM, etc: this should * be dimension 1. For CNNs, this should be dimensions [1,2,3] corresponding to last 3 of * parameters which have order [depthOut, depthIn, kH, kW] */ @@ -56,7 +56,7 @@ public class MaxNormConstraint extends BaseConstraint { * Apply to weights but not biases by default * * @param maxNorm Maximum L2 value - * @param dimensions Dimensions to apply to. For DenseLayer, OutputLayer, RnnOutputLayer, LSTM, etc: this should + * @param dimensions Dimensions to apply to. For DenseLayerConfiguration, OutputLayer, RnnOutputLayer, LSTM, etc: this should * be dimension 1. For CNNs, this should be dimensions [1,2,3] corresponding to last 3 of * parameters which have order [depthOut, depthIn, kH, kW] */ diff --git a/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/nn/conf/constraint/MinMaxNormConstraint.java b/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/nn/conf/constraint/MinMaxNormConstraint.java index 6449a9abd..ca43d4ca0 100644 --- a/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/nn/conf/constraint/MinMaxNormConstraint.java +++ b/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/nn/conf/constraint/MinMaxNormConstraint.java @@ -51,7 +51,7 @@ public class MinMaxNormConstraint extends BaseConstraint { * * @param max Maximum L2 value * @param min Minimum L2 value - * @param dimensions Dimensions to apply to. For DenseLayer, OutputLayer, RnnOutputLayer, LSTM, etc: this should + * @param dimensions Dimensions to apply to. For DenseLayerConfiguration, OutputLayer, RnnOutputLayer, LSTM, etc: this should * be dimension 1. For CNNs, this should be dimensions [1,2,3] corresponding to last 3 of * parameters which have order [depthOut, depthIn, kH, kW] */ @@ -65,7 +65,7 @@ public class MinMaxNormConstraint extends BaseConstraint { * @param max Maximum L2 value * @param min Minimum L2 value * @param rate Constraint rate - * @param dimensions Dimensions to apply to. For DenseLayer, OutputLayer, RnnOutputLayer, LSTM, etc: this should + * @param dimensions Dimensions to apply to. For DenseLayerConfiguration, OutputLayer, RnnOutputLayer, LSTM, etc: this should * be dimension 1. For CNNs, this should be dimensions [1,2,3] corresponding to last 3 of * parameters which have order [depthOut, depthIn, kH, kW] */ @@ -79,7 +79,7 @@ public class MinMaxNormConstraint extends BaseConstraint { * @param min Minimum L2 value * @param rate Constraint rate * @param paramNames Which parameter names to apply constraint to - * @param dimensions Dimensions to apply to. For DenseLayer, OutputLayer, RnnOutputLayer, LSTM, etc: this should + * @param dimensions Dimensions to apply to. For DenseLayerConfiguration, OutputLayer, RnnOutputLayer, LSTM, etc: this should * be dimension 1. For CNNs, this should be dimensions [1,2,3] corresponding to last 3 of * parameters which have order [depthOut, depthIn, kH, kW] */ diff --git a/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/nn/conf/constraint/UnitNormConstraint.java b/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/nn/conf/constraint/UnitNormConstraint.java index a082056a7..3e80f341b 100644 --- a/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/nn/conf/constraint/UnitNormConstraint.java +++ b/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/nn/conf/constraint/UnitNormConstraint.java @@ -39,7 +39,7 @@ public class UnitNormConstraint extends BaseConstraint { /** * Apply to weights but not biases by default * - * @param dimensions Dimensions to apply to. For DenseLayer, OutputLayer, RnnOutputLayer, LSTM, etc: this should + * @param dimensions Dimensions to apply to. For DenseLayerConfiguration, OutputLayer, RnnOutputLayer, LSTM, etc: this should * be dimension 1. For CNNs, this should be dimensions [1,2,3] corresponding to last 3 of * parameters which have order [depthOut, depthIn, kH, kW] */ @@ -49,7 +49,7 @@ public class UnitNormConstraint extends BaseConstraint { /** - * @param dimensions Dimensions to apply to. For DenseLayer, OutputLayer, RnnOutputLayer, LSTM, etc: this should + * @param dimensions Dimensions to apply to. For DenseLayerConfiguration, OutputLayer, RnnOutputLayer, LSTM, etc: this should * be dimension 1. For CNNs, this should be dimensions [1,2,3] corresponding to last 3 of * parameters which have order [depthOut, depthIn, kH, kW] */ diff --git a/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/nn/conf/graph/LayerVertex.java b/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/nn/conf/graph/LayerVertex.java index b1734682d..67f6ee365 100644 --- a/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/nn/conf/graph/LayerVertex.java +++ b/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/nn/conf/graph/LayerVertex.java @@ -21,12 +21,14 @@ package org.deeplearning4j.nn.conf.graph; import lombok.Data; -import lombok.EqualsAndHashCode; +import lombok.Getter; import lombok.NoArgsConstructor; import org.deeplearning4j.nn.conf.InputPreProcessor; import org.deeplearning4j.nn.conf.NeuralNetConfiguration; import org.deeplearning4j.nn.conf.inputs.InputType; import org.deeplearning4j.nn.conf.inputs.InvalidInputTypeException; +import org.deeplearning4j.nn.conf.layers.BaseLayerConfiguration; +import org.deeplearning4j.nn.conf.layers.LayerConfiguration; import org.deeplearning4j.nn.conf.memory.MemoryReport; import org.deeplearning4j.nn.graph.ComputationGraph; import org.nd4j.linalg.api.buffer.DataType; @@ -38,16 +40,18 @@ import java.util.Arrays; @Data public class LayerVertex extends GraphVertex { - private NeuralNetConfiguration layerConf; + private NeuralNetConfiguration netConfiguration; + @Getter + private LayerConfiguration layerConfiguration; private InputPreProcessor preProcessor; - //Set outputVertex to true when Layer is an OutputLayer, OR For use in specialized situations like reinforcement learning - // For RL situations, this Layer insn't an OutputLayer, but is the last layer in a graph, that gets its error/epsilon + //Set outputVertex to true when ILayer is an OutputLayer, OR For use in specialized situations like reinforcement learning + // For RL situations, this ILayer isn't an OutputLayer, but is the last layer in a graph, that gets its error/epsilon // passed in externally private boolean outputVertex; - public LayerVertex(NeuralNetConfiguration layerConf, InputPreProcessor preProcessor) { - this.layerConf = layerConf; + public LayerVertex(NeuralNetConfiguration netConfiguration, InputPreProcessor preProcessor) { + this.netConfiguration = netConfiguration; this.preProcessor = preProcessor; } @@ -57,7 +61,8 @@ public class LayerVertex extends GraphVertex { @Override public GraphVertex clone() { - return new LayerVertex(layerConf.clone(), (preProcessor != null ? preProcessor.clone() : null)); + return new LayerVertex( + netConfiguration.clone(), (preProcessor != null ? preProcessor.clone() : null)); } @Override @@ -65,10 +70,11 @@ public class LayerVertex extends GraphVertex { if (!(o instanceof LayerVertex)) return false; LayerVertex lv = (LayerVertex) o; - if ((layerConf == null && lv.layerConf != null) || (layerConf != null && lv.layerConf == null)) { + if ((netConfiguration == null && lv.netConfiguration != null) || (netConfiguration != null && lv.netConfiguration + == null)) { return false; } - if (layerConf != null && !layerConf.equals(lv.layerConf)) + if (netConfiguration != null && !netConfiguration.equals(lv.netConfiguration)) return false; if (preProcessor == null && lv.preProcessor != null || preProcessor != null && lv.preProcessor == null) return false; @@ -77,12 +83,12 @@ public class LayerVertex extends GraphVertex { @Override public int hashCode() { - return layerConf.hashCode() ^ (preProcessor != null ? preProcessor.hashCode() : 0); + return netConfiguration.hashCode() ^ (preProcessor != null ? preProcessor.hashCode() : 0); } @Override public long numParams(boolean backprop) { - return layerConf.getLayer().initializer().numParams(layerConf); + return layerConfiguration.initializer().numParams(layerConfiguration); } @Override @@ -99,14 +105,14 @@ public class LayerVertex extends GraphVertex { public org.deeplearning4j.nn.graph.vertex.GraphVertex instantiate(ComputationGraph graph, String name, int idx, INDArray paramsView, boolean initializeParams, DataType networkDatatype) { //Now, we need to work out if this vertex is an output vertex or not... - boolean isOutput = graph.getConfiguration().getNetworkOutputs().contains(name); - + boolean isOutput = graph.getComputationGraphConfiguration().getNetworkOutputs().contains(name); + this.layerConfiguration = graph.getLayer(idx).getLayerConfiguration(); org.deeplearning4j.nn.api.Layer layer = - layerConf.getLayer().instantiate(layerConf, null, idx, paramsView, initializeParams, networkDatatype); + layerConfiguration.instantiate(netConfiguration, null, idx, paramsView, initializeParams, networkDatatype); if(layer == null) { throw new IllegalStateException("Encountered null layer during initialization for layer:" + - layerConf.getLayer().getClass().getSimpleName() + " initialization returned null layer?"); + layerConfiguration.getClass().getSimpleName() + " initialization returned null layer?"); } return new org.deeplearning4j.nn.graph.vertex.impl.LayerVertex(graph, name, idx, layer, preProcessor, isOutput, networkDatatype); @@ -126,7 +132,7 @@ public class LayerVertex extends GraphVertex { else afterPreprocessor = preProcessor.getOutputType(vertexInputs[0]); - InputType ret = layerConf.getLayer().getOutputType(layerIndex, afterPreprocessor); + InputType ret = layerConfiguration.getOutputType(layerIndex, afterPreprocessor); return ret; } @@ -143,11 +149,14 @@ public class LayerVertex extends GraphVertex { it = inputTypes[0]; } //TODO preprocessor memory - return layerConf.getLayer().getMemoryReport(it); + return layerConfiguration.getMemoryReport(it); } @Override public void setDataType(DataType dataType){ - layerConf.getLayer().setDataType(dataType); + if(layerConfiguration instanceof BaseLayerConfiguration) + ((BaseLayerConfiguration)layerConfiguration).setDataType(dataType); } + + } diff --git a/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/nn/conf/layers/ActivationLayer.java b/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/nn/conf/layers/ActivationLayer.java index 0fb559c74..d7ee4b8ef 100644 --- a/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/nn/conf/layers/ActivationLayer.java +++ b/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/nn/conf/layers/ActivationLayer.java @@ -21,6 +21,7 @@ package org.deeplearning4j.nn.conf.layers; import lombok.*; +import net.brutex.ai.dnn.api.LayerType; import org.deeplearning4j.nn.api.Layer; import org.deeplearning4j.nn.api.ParamInitializer; import org.deeplearning4j.nn.conf.InputPreProcessor; @@ -34,6 +35,7 @@ import org.nd4j.linalg.activations.Activation; import org.nd4j.linalg.activations.IActivation; import org.nd4j.linalg.api.buffer.DataType; import org.nd4j.linalg.api.ndarray.INDArray; +import org.nd4j.linalg.learning.config.IUpdater; import java.util.Collection; import java.util.Map; @@ -48,6 +50,7 @@ public class ActivationLayer extends NoParamLayer { protected ActivationLayer(Builder builder) { super(builder); + setType(LayerType.ACT); this.activationFn = builder.activationFn; initializeConstraints(builder); } @@ -72,16 +75,24 @@ public class ActivationLayer extends NoParamLayer { return clone; } + @Override + public IUpdater getIUpdater() { + return null; + } + @Override public Layer instantiate(NeuralNetConfiguration conf, Collection trainingListeners, int layerIndex, INDArray layerParamsView, boolean initializeParams, DataType networkDataType) { - org.deeplearning4j.nn.layers.ActivationLayer ret = new org.deeplearning4j.nn.layers.ActivationLayer(conf, networkDataType); - ret.setListeners(trainingListeners); + this.setNetConfiguration(conf); + LayerConfiguration lconf = conf.getFlattenedLayerConfigurations().get(layerIndex); + + org.deeplearning4j.nn.layers.ActivationLayer ret = new org.deeplearning4j.nn.layers.ActivationLayer(lconf, networkDataType); + ret.addTrainingListeners(trainingListeners); ret.setIndex(layerIndex); ret.setParamsViewArray(layerParamsView); - Map paramTable = initializer().init(conf, layerParamsView, initializeParams); + Map paramTable = initializer().init(this, layerParamsView, initializeParams); ret.setParamTable(paramTable); - ret.setConf(conf); + ret.setLayerConfiguration(lconf); return ret; } @@ -126,7 +137,7 @@ public class ActivationLayer extends NoParamLayer { @NoArgsConstructor @Getter @Setter - public static class Builder extends org.deeplearning4j.nn.conf.layers.Layer.Builder { + public static class Builder extends LayerConfiguration.Builder { /** * Activation function for the layer @@ -134,7 +145,7 @@ public class ActivationLayer extends NoParamLayer { private IActivation activationFn = null; /** - * Layer activation function. Typical values include:
"relu" (rectified linear), "tanh", "sigmoid", + * ILayer activation function. Typical values include:
"relu" (rectified linear), "tanh", "sigmoid", * "softmax", "hardtanh", "leakyrelu", "maxout", "softsign", "softplus" * * @deprecated Use {@link #activation(Activation)} or {@link @activation(IActivation)} diff --git a/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/nn/conf/layers/AutoEncoder.java b/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/nn/conf/layers/AutoEncoder.java index 09f14e034..c0dbc4f56 100644 --- a/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/nn/conf/layers/AutoEncoder.java +++ b/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/nn/conf/layers/AutoEncoder.java @@ -55,14 +55,18 @@ public class AutoEncoder extends BasePretrainNetwork { @Override public Layer instantiate(NeuralNetConfiguration conf, Collection trainingListeners, int layerIndex, INDArray layerParamsView, boolean initializeParams, DataType networkDataType) { + this.setNetConfiguration(conf); + LayerConfiguration lconf = conf.getFlattenedLayerConfigurations().get(layerIndex); + runInheritance(); org.deeplearning4j.nn.layers.feedforward.autoencoder.AutoEncoder ret = - new org.deeplearning4j.nn.layers.feedforward.autoencoder.AutoEncoder(conf, networkDataType); - ret.setListeners(trainingListeners); + new org.deeplearning4j.nn.layers.feedforward.autoencoder.AutoEncoder(lconf, networkDataType); + + ret.addTrainingListeners(trainingListeners); ret.setIndex(layerIndex); ret.setParamsViewArray(layerParamsView); - Map paramTable = initializer().init(conf, layerParamsView, initializeParams); + Map paramTable = initializer().init(this, layerParamsView, initializeParams); ret.setParamTable(paramTable); - ret.setConf(conf); + ret.setLayerConfiguration(conf.getFlattenedLayerConfigurations().get(layerIndex)); return ret; } diff --git a/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/nn/conf/layers/BaseLayer.java b/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/nn/conf/layers/BaseLayerConfiguration.java similarity index 89% rename from cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/nn/conf/layers/BaseLayer.java rename to cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/nn/conf/layers/BaseLayerConfiguration.java index fc751e91b..b16ecb768 100644 --- a/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/nn/conf/layers/BaseLayer.java +++ b/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/nn/conf/layers/BaseLayerConfiguration.java @@ -21,7 +21,9 @@ package org.deeplearning4j.nn.conf.layers; import lombok.*; +import org.deeplearning4j.nn.api.ITraininableLayerConfiguration; import org.deeplearning4j.nn.conf.GradientNormalization; +import org.deeplearning4j.nn.conf.NeuralNetConfiguration; import org.deeplearning4j.nn.conf.Updater; import org.deeplearning4j.nn.conf.distribution.Distribution; import org.deeplearning4j.nn.conf.weightnoise.IWeightNoise; @@ -29,8 +31,10 @@ import org.deeplearning4j.nn.weights.IWeightInit; import org.deeplearning4j.nn.weights.WeightInit; import org.deeplearning4j.nn.weights.WeightInitDistribution; import org.deeplearning4j.util.NetworkUtils; +import org.jetbrains.annotations.NotNull; import org.nd4j.linalg.activations.Activation; import org.nd4j.linalg.activations.IActivation; +import org.nd4j.linalg.api.buffer.DataType; import org.nd4j.linalg.learning.config.IUpdater; import org.nd4j.linalg.learning.regularization.L1Regularization; import org.nd4j.linalg.learning.regularization.L2Regularization; @@ -46,27 +50,27 @@ import java.util.List; */ @Data @EqualsAndHashCode(callSuper = true) -@NoArgsConstructor -public abstract class BaseLayer extends Layer implements Serializable, Cloneable { +@NoArgsConstructor(force = true) +public abstract class BaseLayerConfiguration extends LayerConfiguration implements ITraininableLayerConfiguration, Serializable, Cloneable { - protected IActivation activationFn; - protected IWeightInit weightInitFn; - protected double biasInit; - protected double gainInit; + @NonNull + protected IWeightInit weightInit; + protected double biasInit = 0.0; + protected double gainInit = 0.0; protected List regularization; protected List regularizationBias; protected IUpdater iUpdater; protected IUpdater biasUpdater; - protected IWeightNoise weightNoise; + private DataType dataType; + protected GradientNormalization gradientNormalization = GradientNormalization.None; //Clipping, rescale based on l2 norm, etc protected double gradientNormalizationThreshold = 1.0; //Threshold for l2 and element-wise gradient clipping - public BaseLayer(Builder builder) { + public BaseLayerConfiguration(Builder builder) { super(builder); this.layerName = builder.layerName; - this.activationFn = builder.activationFn; - this.weightInitFn = builder.weightInitFn; + this.weightInit = builder.weightInit; this.biasInit = builder.biasInit; this.gainInit = builder.gainInit; this.regularization = builder.regularization; @@ -76,6 +80,7 @@ public abstract class BaseLayer extends Layer implements Serializable, Cloneable this.gradientNormalization = builder.gradientNormalization; this.gradientNormalizationThreshold = builder.gradientNormalizationThreshold; this.weightNoise = builder.weightNoise; + super.setActivationFn(builder.activationFn); } /** @@ -86,7 +91,7 @@ public abstract class BaseLayer extends Layer implements Serializable, Cloneable public void resetLayerDefaultConfig() { //clear the learning related params for all layers in the origConf and set to defaults this.setIUpdater(null); - this.setWeightInitFn(null); + this.setWeightInit(null); this.setBiasInit(Double.NaN); this.setGainInit(Double.NaN); this.regularization = null; @@ -98,11 +103,8 @@ public abstract class BaseLayer extends Layer implements Serializable, Cloneable } @Override - public BaseLayer clone() { - BaseLayer clone = (BaseLayer) super.clone(); - if (clone.iDropout != null) { - clone.iDropout = clone.iDropout.clone(); - } + public BaseLayerConfiguration clone() { + BaseLayerConfiguration clone = (BaseLayerConfiguration) super.clone(); if(regularization != null){ //Regularization fields are _usually_ thread safe and immutable, but let's clone to be sure clone.regularization = new ArrayList<>(regularization.size()); @@ -120,7 +122,7 @@ public abstract class BaseLayer extends Layer implements Serializable, Cloneable } /** - * Get the updater for the given parameter. Typically the same updater will be used for all updaters, but this is + * Get the updater for the given parameter. Typically the same updater will be used for all parameters, but this is * not necessarily the case * * @param paramName Parameter name @@ -153,7 +155,7 @@ public abstract class BaseLayer extends Layer implements Serializable, Cloneable @SuppressWarnings("unchecked") @Getter @Setter - public abstract static class Builder> extends Layer.Builder { + public abstract static class Builder> extends LayerConfiguration.Builder { /** * Set the activation function for the layer. This overload can be used for custom {@link IActivation} @@ -167,19 +169,19 @@ public abstract class BaseLayer extends Layer implements Serializable, Cloneable * * @see IWeightInit */ - protected IWeightInit weightInitFn = null; + protected IWeightInit weightInit = null; /** * Bias initialization value, for layers with biases. Defaults to 0 * */ - protected double biasInit = Double.NaN; + protected double biasInit = 0.0; /** - * Gain initialization value, for layers with Layer Normalization. Defaults to 1 + * Gain initialization value, for layers with ILayer Normalization. Defaults to 1 * */ - protected double gainInit = Double.NaN; + protected double gainInit = 1.0; /** * Regularization for the parameters (excluding biases). @@ -252,7 +254,7 @@ public abstract class BaseLayer extends Layer implements Serializable, Cloneable * @see IWeightInit */ public T weightInit(IWeightInit weightInit) { - this.setWeightInitFn(weightInit); + this.setWeightInit(weightInit); return (T) this; } @@ -267,7 +269,7 @@ public abstract class BaseLayer extends Layer implements Serializable, Cloneable "Not supported!, Use weightInit(Distribution distribution) instead!"); } - this.setWeightInitFn(weightInit.getWeightInitFunction()); + this.setWeightInit(weightInit.getWeightInitFunction()); return (T) this; } @@ -292,7 +294,7 @@ public abstract class BaseLayer extends Layer implements Serializable, Cloneable } /** - * Gain initialization value, for layers with Layer Normalization. Defaults to 1 + * Gain initialization value, for layers with ILayer Normalization. Defaults to 1 * * @param gainInit Value to use for initializing gain */ @@ -503,7 +505,22 @@ public abstract class BaseLayer extends Layer implements Serializable, Cloneable this.setWeightNoise(weightNoise); return (T) this; } - - } -} + + /** + * Inherit setting from neural network for those settings, that are not already set or do have + * a layer(type) specific default. + * @param conf the neural net configration to inherit parameters from + */ + @Override + public void runInheritance(@NotNull NeuralNetConfiguration conf) { + super.runInheritance(conf); + if(this.biasUpdater == null ) this.biasUpdater = conf.getBiasUpdater(); + if(this.iUpdater == null ) this.iUpdater = conf.getUpdater(); + if(this.regularizationBias == null) this.regularizationBias = conf.getRegularizationBias(); + if(this.regularization == null ) this.regularization = conf.getRegularization(); + if(this.gradientNormalization == null) this.gradientNormalization = conf.getGradientNormalization(); + if(this.weightInit == null) this.weightInit = conf.getWeightInit(); + } + +} \ No newline at end of file diff --git a/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/nn/conf/layers/BaseUpsamplingLayer.java b/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/nn/conf/layers/BaseUpsamplingLayer.java index b92ad390f..07220f89e 100644 --- a/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/nn/conf/layers/BaseUpsamplingLayer.java +++ b/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/nn/conf/layers/BaseUpsamplingLayer.java @@ -21,10 +21,8 @@ package org.deeplearning4j.nn.conf.layers; import lombok.*; -import org.deeplearning4j.nn.api.ParamInitializer; import org.deeplearning4j.nn.conf.InputPreProcessor; import org.deeplearning4j.nn.conf.inputs.InputType; -import org.deeplearning4j.nn.params.EmptyParamInitializer; /** * Upsampling base layer @@ -64,7 +62,7 @@ public abstract class BaseUpsamplingLayer extends NoParamLayer { @NoArgsConstructor @Getter @Setter - protected static abstract class UpsamplingBuilder> extends Layer.Builder { + protected static abstract class UpsamplingBuilder> extends LayerConfiguration.Builder { /** * An int array to specify upsampling dimensions, the length of which has to equal to the number of spatial diff --git a/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/nn/conf/layers/BatchNormalization.java b/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/nn/conf/layers/BatchNormalization.java index 2dd228b0e..5e266afb2 100644 --- a/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/nn/conf/layers/BatchNormalization.java +++ b/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/nn/conf/layers/BatchNormalization.java @@ -21,6 +21,7 @@ package org.deeplearning4j.nn.conf.layers; import lombok.*; +import net.brutex.ai.dnn.api.LayerType; import org.deeplearning4j.nn.api.Layer; import org.deeplearning4j.nn.api.ParamInitializer; import org.deeplearning4j.nn.api.layers.LayerConstraint; @@ -64,6 +65,7 @@ public class BatchNormalization extends FeedForwardLayer { private BatchNormalization(Builder builder) { super(builder); + this.setType(LayerType.BN); this.decay = builder.decay; this.eps = builder.eps; this.isMinibatch = builder.isMinibatch; @@ -89,16 +91,19 @@ public class BatchNormalization extends FeedForwardLayer { @Override public Layer instantiate(NeuralNetConfiguration conf, Collection trainingListeners, int layerIndex, INDArray layerParamsView, boolean initializeParams, DataType networkDataType) { + this.setNetConfiguration(conf); LayerValidation.assertNOutSet("BatchNormalization", getLayerName(), layerIndex, getNOut()); + runInheritance(); + LayerConfiguration lconf = conf.getFlattenedLayerConfigurations().get(layerIndex); org.deeplearning4j.nn.layers.normalization.BatchNormalization ret = - new org.deeplearning4j.nn.layers.normalization.BatchNormalization(conf, networkDataType); - ret.setListeners(trainingListeners); + new org.deeplearning4j.nn.layers.normalization.BatchNormalization(lconf, networkDataType); + ret.addTrainingListeners(trainingListeners); ret.setIndex(layerIndex); ret.setParamsViewArray(layerParamsView); - Map paramTable = initializer().init(conf, layerParamsView, initializeParams); + Map paramTable = initializer().init(this, layerParamsView, initializeParams); ret.setParamTable(paramTable); - ret.setConf(conf); + ret.setLayerConfiguration(lconf); return ret; } diff --git a/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/nn/conf/layers/CapsuleLayer.java b/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/nn/conf/layers/CapsuleLayer.java index c6f31faf3..05d32dc56 100644 --- a/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/nn/conf/layers/CapsuleLayer.java +++ b/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/nn/conf/layers/CapsuleLayer.java @@ -63,14 +63,14 @@ public class CapsuleLayer extends SameDiffLayer { this.routings = builder.routings; if(capsules <= 0 || capsuleDimensions <= 0 || routings <= 0){ - throw new IllegalArgumentException("Invalid configuration for Capsule Layer (layer name = \"" + throw new IllegalArgumentException("Invalid configuration for Capsule ILayer (layer name = \"" + layerName + "\"):" + " capsules, capsuleDimensions, and routings must be > 0. Got: " + capsules + ", " + capsuleDimensions + ", " + routings); } if(inputCapsules < 0 || inputCapsuleDimensions < 0){ - throw new IllegalArgumentException("Invalid configuration for Capsule Layer (layer name = \"" + throw new IllegalArgumentException("Invalid configuration for Capsule ILayer (layer name = \"" + layerName + "\"):" + " inputCapsules and inputCapsuleDimensions must be >= 0 if set. Got: " + inputCapsules + ", " + inputCapsuleDimensions); @@ -211,7 +211,7 @@ public class CapsuleLayer extends SameDiffLayer { } @Override - public E build() { + public E build() { return (E) new CapsuleLayer(this); } diff --git a/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/nn/conf/layers/CapsuleStrengthLayer.java b/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/nn/conf/layers/CapsuleStrengthLayer.java index bd75b863e..e702b2de1 100644 --- a/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/nn/conf/layers/CapsuleStrengthLayer.java +++ b/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/nn/conf/layers/CapsuleStrengthLayer.java @@ -59,7 +59,7 @@ public class CapsuleStrengthLayer extends SameDiffLambdaLayer { public static class Builder extends SameDiffLambdaLayer.Builder{ @Override - public E build() { + public E build() { return (E) new CapsuleStrengthLayer(this); } } diff --git a/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/nn/conf/layers/CenterLossOutputLayer.java b/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/nn/conf/layers/CenterLossOutputLayer.java index 820d73d5d..afe3fcc48 100644 --- a/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/nn/conf/layers/CenterLossOutputLayer.java +++ b/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/nn/conf/layers/CenterLossOutputLayer.java @@ -61,15 +61,17 @@ public class CenterLossOutputLayer extends BaseOutputLayer { @Override public Layer instantiate(NeuralNetConfiguration conf, Collection trainingListeners, int layerIndex, INDArray layerParamsView, boolean initializeParams, DataType networkDataType) { + setNetConfiguration(conf); LayerValidation.assertNInNOutSet("CenterLossOutputLayer", getLayerName(), layerIndex, getNIn(), getNOut()); + LayerConfiguration lconf = conf.getFlattenedLayerConfigurations().get(layerIndex); - Layer ret = new org.deeplearning4j.nn.layers.training.CenterLossOutputLayer(conf, networkDataType); - ret.setListeners(trainingListeners); + Layer ret = new org.deeplearning4j.nn.layers.training.CenterLossOutputLayer(lconf, networkDataType); + ret.addTrainingListeners(trainingListeners.toArray(new TrainingListener[]{})); ret.setIndex(layerIndex); ret.setParamsViewArray(layerParamsView); - Map paramTable = initializer().init(conf, layerParamsView, initializeParams); + Map paramTable = initializer().init(this, layerParamsView, initializeParams); ret.setParamTable(paramTable); - ret.setConf(conf); + ret.setLayerConfiguration(lconf); return ret; } diff --git a/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/nn/conf/layers/Cnn3DLossLayer.java b/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/nn/conf/layers/Cnn3DLossLayer.java index 774397ede..8ae76bd41 100644 --- a/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/nn/conf/layers/Cnn3DLossLayer.java +++ b/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/nn/conf/layers/Cnn3DLossLayer.java @@ -56,14 +56,18 @@ public class Cnn3DLossLayer extends FeedForwardLayer { @Override public Layer instantiate(NeuralNetConfiguration conf, Collection trainingListeners, int layerIndex, INDArray layerParamsView, boolean initializeParams, DataType networkDataType) { + setNetConfiguration(conf); + LayerConfiguration lconf = conf.getFlattenedLayerConfigurations().get(layerIndex); + runInheritance(); + org.deeplearning4j.nn.layers.convolution.Cnn3DLossLayer ret = - new org.deeplearning4j.nn.layers.convolution.Cnn3DLossLayer(conf, networkDataType); - ret.setListeners(trainingListeners); + new org.deeplearning4j.nn.layers.convolution.Cnn3DLossLayer(lconf, networkDataType); + ret.addTrainingListeners(trainingListeners); ret.setIndex(layerIndex); ret.setParamsViewArray(layerParamsView); - Map paramTable = initializer().init(conf, layerParamsView, initializeParams); + Map paramTable = initializer().init(this, layerParamsView, initializeParams); ret.setParamTable(paramTable); - ret.setConf(conf); + ret.setLayerConfiguration(lconf); return ret; } diff --git a/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/nn/conf/layers/CnnLossLayer.java b/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/nn/conf/layers/CnnLossLayer.java index 0b31dd703..50e917dac 100644 --- a/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/nn/conf/layers/CnnLossLayer.java +++ b/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/nn/conf/layers/CnnLossLayer.java @@ -61,14 +61,18 @@ public class CnnLossLayer extends FeedForwardLayer { @Override public Layer instantiate(NeuralNetConfiguration conf, Collection trainingListeners, int layerIndex, INDArray layerParamsView, boolean initializeParams, DataType networkDataType) { + setNetConfiguration(conf); + LayerConfiguration lconf = conf.getFlattenedLayerConfigurations().get(layerIndex); + runInheritance(); + org.deeplearning4j.nn.layers.convolution.CnnLossLayer ret = - new org.deeplearning4j.nn.layers.convolution.CnnLossLayer(conf, networkDataType); - ret.setListeners(trainingListeners); + new org.deeplearning4j.nn.layers.convolution.CnnLossLayer(lconf, networkDataType); + ret.addTrainingListeners(trainingListeners); ret.setIndex(layerIndex); ret.setParamsViewArray(layerParamsView); - Map paramTable = initializer().init(conf, layerParamsView, initializeParams); + Map paramTable = initializer().init(this, layerParamsView, initializeParams); ret.setParamTable(paramTable); - ret.setConf(conf); + ret.setLayerConfiguration(lconf); return ret; } diff --git a/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/nn/conf/layers/Convolution1DLayer.java b/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/nn/conf/layers/Convolution1DLayer.java index 1bd0e5172..cf4fb5a1a 100644 --- a/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/nn/conf/layers/Convolution1DLayer.java +++ b/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/nn/conf/layers/Convolution1DLayer.java @@ -64,16 +64,17 @@ public class Convolution1DLayer extends ConvolutionLayer { public org.deeplearning4j.nn.api.Layer instantiate(NeuralNetConfiguration conf, Collection trainingListeners, int layerIndex, INDArray layerParamsView, boolean initializeParams, DataType networkDataType) { + setNetConfiguration(conf); LayerValidation.assertNInNOutSet("Convolution1DLayer", getLayerName(), layerIndex, getNIn(), getNOut()); - + LayerConfiguration lconf = conf.getFlattenedLayerConfigurations().get(layerIndex); org.deeplearning4j.nn.layers.convolution.Convolution1DLayer ret = - new org.deeplearning4j.nn.layers.convolution.Convolution1DLayer(conf, networkDataType); - ret.setListeners(trainingListeners); + new org.deeplearning4j.nn.layers.convolution.Convolution1DLayer(lconf, networkDataType); + ret.addTrainingListeners(trainingListeners); ret.setIndex(layerIndex); ret.setParamsViewArray(layerParamsView); - Map paramTable = initializer().init(conf, layerParamsView, initializeParams); + Map paramTable = initializer().init(this, layerParamsView, initializeParams); ret.setParamTable(paramTable); - ret.setConf(conf); + ret.setLayerConfiguration(lconf); return ret; } diff --git a/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/nn/conf/layers/Convolution3D.java b/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/nn/conf/layers/Convolution3D.java index f012b0008..99992463a 100644 --- a/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/nn/conf/layers/Convolution3D.java +++ b/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/nn/conf/layers/Convolution3D.java @@ -97,13 +97,15 @@ public class Convolution3D extends ConvolutionLayer { int layerIndex, INDArray layerParamsView, boolean initializeParams, DataType networkDataType) { LayerValidation.assertNInNOutSet("Convolution3D", getLayerName(), layerIndex, getNIn(), getNOut()); - Convolution3DLayer ret = new Convolution3DLayer(conf, networkDataType); - ret.setListeners(iterationListeners); + + LayerConfiguration lconf = conf.getFlattenedLayerConfigurations().get(layerIndex); + Convolution3DLayer ret = new Convolution3DLayer(lconf, networkDataType); + ret.addTrainingListeners(iterationListeners); ret.setIndex(layerIndex); ret.setParamsViewArray(layerParamsView); - Map paramTable = initializer().init(conf, layerParamsView, initializeParams); + Map paramTable = initializer().init(this, layerParamsView, initializeParams); ret.setParamTable(paramTable); - ret.setConf(conf); + ret.setLayerConfiguration(lconf); return ret; } diff --git a/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/nn/conf/layers/ConvolutionLayer.java b/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/nn/conf/layers/ConvolutionLayer.java index ae26e62f0..9ef539ae9 100644 --- a/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/nn/conf/layers/ConvolutionLayer.java +++ b/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/nn/conf/layers/ConvolutionLayer.java @@ -21,6 +21,7 @@ package org.deeplearning4j.nn.conf.layers; import lombok.*; +import net.brutex.ai.dnn.api.LayerType; import org.deeplearning4j.nn.api.Layer; import org.deeplearning4j.nn.api.ParamInitializer; import org.deeplearning4j.nn.conf.*; @@ -113,6 +114,7 @@ public class ConvolutionLayer extends FeedForwardLayer { */ protected ConvolutionLayer(BaseConvBuilder builder) { super(builder); + this.setType(LayerType.CONV); int dim = builder.convolutionDim; this.hasBias = builder.hasBias; @@ -168,16 +170,20 @@ public class ConvolutionLayer extends FeedForwardLayer { @Override public Layer instantiate(NeuralNetConfiguration conf, Collection trainingListeners, int layerIndex, INDArray layerParamsView, boolean initializeParams, DataType networkDataType) { + setNetConfiguration(conf); + LayerConfiguration lconf = conf.getFlattenedLayerConfigurations().get(layerIndex); + lconf.runInheritance(); + LayerValidation.assertNInNOutSet("ConvolutionLayer", getLayerName(), layerIndex, getNIn(), getNOut()); org.deeplearning4j.nn.layers.convolution.ConvolutionLayer ret = - new org.deeplearning4j.nn.layers.convolution.ConvolutionLayer(conf, networkDataType); - ret.setListeners(trainingListeners); + new org.deeplearning4j.nn.layers.convolution.ConvolutionLayer(lconf, networkDataType); + ret.addTrainingListeners(trainingListeners); ret.setIndex(layerIndex); ret.setParamsViewArray(layerParamsView); - Map paramTable = initializer().init(conf, layerParamsView, initializeParams); + Map paramTable = initializer().init(this, layerParamsView, initializeParams); ret.setParamTable(paramTable); - ret.setConf(conf); + ret.setLayerConfiguration(lconf); return ret; } @@ -399,9 +405,10 @@ public class ConvolutionLayer extends FeedForwardLayer { /** * Set the convolution mode for the Convolution layer. See {@link ConvolutionMode} for more details + * Default is {@link ConvolutionMode}.Truncate. * */ - protected ConvolutionMode convolutionMode; + protected ConvolutionMode convolutionMode = ConvolutionMode.Truncate; /** * Kernel dilation. Default: {1, 1}, which is standard convolutions. Used for implementing dilated convolutions, diff --git a/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/nn/conf/layers/Deconvolution2D.java b/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/nn/conf/layers/Deconvolution2D.java index e4f789ab7..d805561d0 100644 --- a/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/nn/conf/layers/Deconvolution2D.java +++ b/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/nn/conf/layers/Deconvolution2D.java @@ -81,16 +81,19 @@ public class Deconvolution2D extends ConvolutionLayer { @Override public Layer instantiate(NeuralNetConfiguration conf, Collection trainingListeners, int layerIndex, INDArray layerParamsView, boolean initializeParams, DataType networkDataType) { + setNetConfiguration(conf); LayerValidation.assertNInNOutSet("Deconvolution2D", getLayerName(), layerIndex, getNIn(), getNOut()); + LayerConfiguration lconf = conf.getFlattenedLayerConfigurations().get(layerIndex); org.deeplearning4j.nn.layers.convolution.Deconvolution2DLayer ret = - new org.deeplearning4j.nn.layers.convolution.Deconvolution2DLayer(conf, networkDataType); - ret.setListeners(trainingListeners); + new org.deeplearning4j.nn.layers.convolution.Deconvolution2DLayer(lconf, networkDataType); + + ret.addTrainingListeners(trainingListeners); ret.setIndex(layerIndex); ret.setParamsViewArray(layerParamsView); - Map paramTable = initializer().init(conf, layerParamsView, initializeParams); + Map paramTable = initializer().init(this, layerParamsView, initializeParams); ret.setParamTable(paramTable); - ret.setConf(conf); + ret.setLayerConfiguration(lconf); return ret; } diff --git a/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/nn/conf/layers/Deconvolution3D.java b/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/nn/conf/layers/Deconvolution3D.java index 9f96b25da..ea19c1148 100644 --- a/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/nn/conf/layers/Deconvolution3D.java +++ b/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/nn/conf/layers/Deconvolution3D.java @@ -30,10 +30,8 @@ import org.deeplearning4j.nn.conf.ConvolutionMode; import org.deeplearning4j.nn.conf.InputPreProcessor; import org.deeplearning4j.nn.conf.NeuralNetConfiguration; import org.deeplearning4j.nn.conf.inputs.InputType; -import org.deeplearning4j.nn.layers.convolution.Deconvolution2DLayer; import org.deeplearning4j.nn.layers.convolution.Deconvolution3DLayer; import org.deeplearning4j.nn.params.Deconvolution3DParamInitializer; -import org.deeplearning4j.nn.params.DeconvolutionParamInitializer; import org.deeplearning4j.optimize.api.TrainingListener; import org.deeplearning4j.util.ValidationUtils; import org.nd4j.linalg.api.buffer.DataType; @@ -84,15 +82,15 @@ public class Deconvolution3D extends ConvolutionLayer { public Layer instantiate(NeuralNetConfiguration conf, Collection trainingListeners, int layerIndex, INDArray layerParamsView, boolean initializeParams, DataType networkDataType) { LayerValidation.assertNInNOutSet("Deconvolution2D", getLayerName(), layerIndex, getNIn(), getNOut()); - + LayerConfiguration lconf = conf.getFlattenedLayerConfigurations().get(layerIndex); Deconvolution3DLayer ret = - new Deconvolution3DLayer(conf, networkDataType); - ret.setListeners(trainingListeners); + new Deconvolution3DLayer(lconf, networkDataType); + ret.addTrainingListeners(trainingListeners); ret.setIndex(layerIndex); ret.setParamsViewArray(layerParamsView); - Map paramTable = initializer().init(conf, layerParamsView, initializeParams); + Map paramTable = initializer().init(this, layerParamsView, initializeParams); ret.setParamTable(paramTable); - ret.setConf(conf); + ret.setLayerConfiguration(lconf); return ret; } diff --git a/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/nn/conf/layers/DenseLayer.java b/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/nn/conf/layers/DenseLayer.java index d77f13e5c..b1dd9856a 100644 --- a/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/nn/conf/layers/DenseLayer.java +++ b/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/nn/conf/layers/DenseLayer.java @@ -28,6 +28,7 @@ import org.deeplearning4j.nn.conf.inputs.InputType; import org.deeplearning4j.nn.conf.memory.LayerMemoryReport; import org.deeplearning4j.nn.conf.memory.MemoryReport; import org.deeplearning4j.nn.params.DefaultParamInitializer; +import org.deeplearning4j.nn.weights.WeightInitXavier; import org.deeplearning4j.optimize.api.TrainingListener; import org.nd4j.linalg.api.buffer.DataType; import org.nd4j.linalg.api.ndarray.INDArray; @@ -35,6 +36,10 @@ import org.nd4j.linalg.api.ndarray.INDArray; import java.util.Collection; import java.util.Map; +/** + * Dense Layer + * Uses WeightInitXavier as default + */ @Data @NoArgsConstructor @ToString(callSuper = true) @@ -55,16 +60,20 @@ public class DenseLayer extends FeedForwardLayer { @Override public Layer instantiate(NeuralNetConfiguration conf, Collection trainingListeners, int layerIndex, INDArray layerParamsView, boolean initializeParams, DataType networkDataType) { - LayerValidation.assertNInNOutSet("DenseLayer", getLayerName(), layerIndex, getNIn(), getNOut()); + + LayerValidation.assertNInNOutSet("DenseLayerConfiguration", getLayerName(), layerIndex, getNIn(), getNOut()); + LayerConfiguration lconf = conf.getFlattenedLayerConfigurations().get(layerIndex); + lconf.runInheritance(); org.deeplearning4j.nn.layers.feedforward.dense.DenseLayer ret = - new org.deeplearning4j.nn.layers.feedforward.dense.DenseLayer(conf, networkDataType); - ret.setListeners(trainingListeners); + new org.deeplearning4j.nn.layers.feedforward.dense.DenseLayer(lconf, networkDataType); + + if(getWeightInit() == null) setWeightInit(new WeightInitXavier()); + ret.addTrainingListeners(trainingListeners); ret.setIndex(layerIndex); ret.setParamsViewArray(layerParamsView); - Map paramTable = initializer().init(conf, layerParamsView, initializeParams); + Map paramTable = initializer().init(this, layerParamsView, initializeParams); ret.setParamTable(paramTable); - ret.setConf(conf); return ret; } @@ -101,7 +110,7 @@ public class DenseLayer extends FeedForwardLayer { return new LayerMemoryReport.Builder(layerName, DenseLayer.class, inputType, outputType) .standardMemory(numParams, updaterStateSize) .workingMemory(0, 0, trainSizeFixed, trainSizeVariable) //No additional memory (beyond activations) for inference - .cacheMemory(MemoryReport.CACHE_MODE_ALL_ZEROS, MemoryReport.CACHE_MODE_ALL_ZEROS) //No caching in DenseLayer + .cacheMemory(MemoryReport.CACHE_MODE_ALL_ZEROS, MemoryReport.CACHE_MODE_ALL_ZEROS) //No caching in DenseLayerConfiguration .build(); } diff --git a/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/nn/conf/layers/DepthwiseConvolution2D.java b/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/nn/conf/layers/DepthwiseConvolution2D.java index d412c7158..307604ce0 100644 --- a/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/nn/conf/layers/DepthwiseConvolution2D.java +++ b/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/nn/conf/layers/DepthwiseConvolution2D.java @@ -68,13 +68,15 @@ public class DepthwiseConvolution2D extends ConvolutionLayer { int layerIndex, INDArray layerParamsView, boolean initializeParams, DataType networkDataType) { LayerValidation.assertNInNOutSet("DepthwiseConvolution2D", getLayerName(), layerIndex, getNIn(), getNOut()); - DepthwiseConvolution2DLayer ret = new DepthwiseConvolution2DLayer(conf, networkDataType); - ret.setListeners(trainingListeners); + LayerConfiguration lconf = conf.getFlattenedLayerConfigurations().get(layerIndex); + DepthwiseConvolution2DLayer ret = new DepthwiseConvolution2DLayer(lconf, networkDataType); + + ret.addTrainingListeners(trainingListeners); ret.setIndex(layerIndex); ret.setParamsViewArray(layerParamsView); - Map paramTable = initializer().init(conf, layerParamsView, initializeParams); + Map paramTable = initializer().init(this, layerParamsView, initializeParams); ret.setParamTable(paramTable); - ret.setConf(conf); + ret.setLayerConfiguration(lconf); return ret; } diff --git a/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/nn/conf/layers/DropoutLayer.java b/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/nn/conf/layers/DropoutLayer.java index fa20692be..521dacd23 100644 --- a/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/nn/conf/layers/DropoutLayer.java +++ b/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/nn/conf/layers/DropoutLayer.java @@ -21,6 +21,7 @@ package org.deeplearning4j.nn.conf.layers; import lombok.*; +import net.brutex.ai.dnn.api.LayerType; import org.deeplearning4j.nn.api.ParamInitializer; import org.deeplearning4j.nn.conf.InputPreProcessor; import org.deeplearning4j.nn.conf.NeuralNetConfiguration; @@ -46,7 +47,9 @@ import java.util.Map; public class DropoutLayer extends FeedForwardLayer { private DropoutLayer(Builder builder) { + super(builder); + setType(LayerType.DO); } public DropoutLayer(double activationRetainProb){ @@ -66,13 +69,17 @@ public class DropoutLayer extends FeedForwardLayer { public org.deeplearning4j.nn.api.Layer instantiate(NeuralNetConfiguration conf, Collection trainingListeners, int layerIndex, INDArray layerParamsView, boolean initializeParams, DataType networkDataType) { - org.deeplearning4j.nn.layers.DropoutLayer ret = new org.deeplearning4j.nn.layers.DropoutLayer(conf, networkDataType); - ret.setListeners(trainingListeners); + setNetConfiguration(conf); + + LayerConfiguration lconf = conf.getFlattenedLayerConfigurations().get(layerIndex); + org.deeplearning4j.nn.layers.DropoutLayer ret = new org.deeplearning4j.nn.layers.DropoutLayer(lconf, networkDataType); + + ret.addTrainingListeners(trainingListeners); ret.setIndex(layerIndex); ret.setParamsViewArray(layerParamsView); - Map paramTable = initializer().init(conf, layerParamsView, initializeParams); + Map paramTable = initializer().init(this, layerParamsView, initializeParams); ret.setParamTable(paramTable); - ret.setConf(conf); + ret.setLayerConfiguration(lconf); return ret; } diff --git a/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/nn/conf/layers/EmbeddingLayer.java b/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/nn/conf/layers/EmbeddingLayer.java index 67199aa64..36d719ddc 100644 --- a/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/nn/conf/layers/EmbeddingLayer.java +++ b/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/nn/conf/layers/EmbeddingLayer.java @@ -27,7 +27,6 @@ import org.deeplearning4j.nn.conf.NeuralNetConfiguration; import org.deeplearning4j.nn.conf.inputs.InputType; import org.deeplearning4j.nn.conf.memory.LayerMemoryReport; import org.deeplearning4j.nn.conf.memory.MemoryReport; -import org.deeplearning4j.nn.params.DefaultParamInitializer; import org.deeplearning4j.nn.params.EmbeddingLayerParamInitializer; import org.deeplearning4j.nn.weights.IWeightInit; import org.deeplearning4j.nn.weights.embeddings.ArrayEmbeddingInitializer; @@ -58,14 +57,16 @@ public class EmbeddingLayer extends FeedForwardLayer { @Override public Layer instantiate(NeuralNetConfiguration conf, Collection trainingListeners, int layerIndex, INDArray layerParamsView, boolean initializeParams, DataType networkDataType) { + LayerConfiguration lconf = conf.getFlattenedLayerConfigurations().get(layerIndex); org.deeplearning4j.nn.layers.feedforward.embedding.EmbeddingLayer ret = - new org.deeplearning4j.nn.layers.feedforward.embedding.EmbeddingLayer(conf, networkDataType); - ret.setListeners(trainingListeners); + new org.deeplearning4j.nn.layers.feedforward.embedding.EmbeddingLayer(lconf, networkDataType); + + ret.addTrainingListeners(trainingListeners); ret.setIndex(layerIndex); ret.setParamsViewArray(layerParamsView); - Map paramTable = initializer().init(conf, layerParamsView, initializeParams); + Map paramTable = initializer().init(this, layerParamsView, initializeParams); ret.setParamTable(paramTable); - ret.setConf(conf); + ret.setLayerConfiguration(lconf); return ret; } diff --git a/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/nn/conf/layers/EmbeddingSequenceLayer.java b/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/nn/conf/layers/EmbeddingSequenceLayer.java index ea7b4e6bf..16aeb1acd 100644 --- a/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/nn/conf/layers/EmbeddingSequenceLayer.java +++ b/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/nn/conf/layers/EmbeddingSequenceLayer.java @@ -65,14 +65,16 @@ public class EmbeddingSequenceLayer extends FeedForwardLayer { @Override public Layer instantiate(NeuralNetConfiguration conf, Collection trainingListeners, int layerIndex, INDArray layerParamsView, boolean initializeParams, DataType networkDataType) { + LayerConfiguration lconf = conf.getFlattenedLayerConfigurations().get(layerIndex); org.deeplearning4j.nn.layers.feedforward.embedding.EmbeddingSequenceLayer ret = - new org.deeplearning4j.nn.layers.feedforward.embedding.EmbeddingSequenceLayer(conf, networkDataType); - ret.setListeners(trainingListeners); + new org.deeplearning4j.nn.layers.feedforward.embedding.EmbeddingSequenceLayer(lconf, networkDataType); + + ret.addTrainingListeners(trainingListeners); ret.setIndex(layerIndex); ret.setParamsViewArray(layerParamsView); - Map paramTable = initializer().init(conf, layerParamsView, initializeParams); + Map paramTable = initializer().init(this, layerParamsView, initializeParams); ret.setParamTable(paramTable); - ret.setConf(conf); + ret.setLayerConfiguration(lconf); return ret; } @@ -215,14 +217,14 @@ public class EmbeddingSequenceLayer extends FeedForwardLayer { return this; } - @Override + public void setWeightInitFn(IWeightInit weightInit){ if(weightInit instanceof WeightInitEmbedding){ long[] shape = ((WeightInitEmbedding) weightInit).shape(); nIn(shape[0]); nOut(shape[1]); } - this.weightInitFn = weightInit; + this.weightInit = weightInit; } /** diff --git a/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/nn/conf/layers/FeedForwardLayer.java b/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/nn/conf/layers/FeedForwardLayer.java index 8e8fd62a3..de733add8 100644 --- a/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/nn/conf/layers/FeedForwardLayer.java +++ b/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/nn/conf/layers/FeedForwardLayer.java @@ -21,19 +21,19 @@ package org.deeplearning4j.nn.conf.layers; import lombok.*; +import net.brutex.ai.dnn.api.LayerType; import org.deeplearning4j.nn.conf.DataFormat; import org.deeplearning4j.nn.conf.InputPreProcessor; import org.deeplearning4j.nn.conf.inputs.InputType; import org.deeplearning4j.nn.conf.preprocessor.Cnn3DToFeedForwardPreProcessor; import org.deeplearning4j.nn.conf.preprocessor.CnnToFeedForwardPreProcessor; import org.deeplearning4j.nn.conf.preprocessor.RnnToFeedForwardPreProcessor; -import org.deeplearning4j.nn.params.DefaultParamInitializer; @Data @NoArgsConstructor @ToString(callSuper = true) @EqualsAndHashCode(callSuper = true) -public abstract class FeedForwardLayer extends BaseLayer { +public abstract class FeedForwardLayer extends BaseLayerConfiguration { protected long nIn; protected long nOut; @@ -43,9 +43,11 @@ public abstract class FeedForwardLayer extends BaseLayer { super(builder); this.nIn = builder.nIn; this.nOut = builder.nOut; + setType(LayerType.FC); } + @Override public InputType getOutputType(int layerIndex, InputType inputType) { if (inputType == null || (inputType.getType() != InputType.Type.FF @@ -120,7 +122,7 @@ public abstract class FeedForwardLayer extends BaseLayer { @Getter @Setter - public abstract static class Builder> extends BaseLayer.Builder { + public abstract static class Builder> extends BaseLayerConfiguration.Builder { /** * Number of inputs for the layer (usually the size of the last layer).
Note that for Convolutional layers, diff --git a/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/nn/conf/layers/GlobalPoolingLayer.java b/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/nn/conf/layers/GlobalPoolingLayer.java index cdf92720a..6d95ae93b 100644 --- a/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/nn/conf/layers/GlobalPoolingLayer.java +++ b/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/nn/conf/layers/GlobalPoolingLayer.java @@ -69,14 +69,16 @@ public class GlobalPoolingLayer extends NoParamLayer { public org.deeplearning4j.nn.api.Layer instantiate(NeuralNetConfiguration conf, Collection trainingListeners, int layerIndex, INDArray layerParamsView, boolean initializeParams, DataType networkDataType) { + LayerConfiguration lconf = conf.getFlattenedLayerConfigurations().get(layerIndex); org.deeplearning4j.nn.layers.pooling.GlobalPoolingLayer ret = - new org.deeplearning4j.nn.layers.pooling.GlobalPoolingLayer(conf, networkDataType); - ret.setListeners(trainingListeners); + new org.deeplearning4j.nn.layers.pooling.GlobalPoolingLayer(lconf, networkDataType); + + ret.addTrainingListeners(trainingListeners); ret.setIndex(layerIndex); ret.setParamsViewArray(layerParamsView); - Map paramTable = initializer().init(conf, layerParamsView, initializeParams); + Map paramTable = initializer().init(this, layerParamsView, initializeParams); ret.setParamTable(paramTable); - ret.setConf(conf); + ret.setLayerConfiguration(lconf); return ret; } @@ -189,7 +191,7 @@ public class GlobalPoolingLayer extends NoParamLayer { @Getter @Setter - public static class Builder extends Layer.Builder { + public static class Builder extends LayerConfiguration.Builder { /** * Pooling type for global pooling diff --git a/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/nn/conf/layers/GravesBidirectionalLSTM.java b/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/nn/conf/layers/GravesBidirectionalLSTM.java index 76a943509..792d735c3 100644 --- a/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/nn/conf/layers/GravesBidirectionalLSTM.java +++ b/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/nn/conf/layers/GravesBidirectionalLSTM.java @@ -59,7 +59,7 @@ public class GravesBidirectionalLSTM extends BaseRecurrentLayer { } @Override - protected void initializeConstraints(org.deeplearning4j.nn.conf.layers.Layer.Builder builder) { + protected void initializeConstraints(LayerConfiguration.Builder builder) { super.initializeConstraints(builder); if (((Builder) builder).recurrentConstraints != null) { if (constraints == null) { @@ -79,14 +79,16 @@ public class GravesBidirectionalLSTM extends BaseRecurrentLayer { @Override public Layer instantiate(NeuralNetConfiguration conf, Collection trainingListeners, int layerIndex, INDArray layerParamsView, boolean initializeParams, DataType networkDataType) { + + LayerConfiguration lconf = conf.getFlattenedLayerConfigurations().get(layerIndex); org.deeplearning4j.nn.layers.recurrent.GravesBidirectionalLSTM ret = - new org.deeplearning4j.nn.layers.recurrent.GravesBidirectionalLSTM(conf, networkDataType); - ret.setListeners(trainingListeners); + new org.deeplearning4j.nn.layers.recurrent.GravesBidirectionalLSTM(lconf, networkDataType); + ret.addTrainingListeners(trainingListeners); ret.setIndex(layerIndex); ret.setParamsViewArray(layerParamsView); - Map paramTable = initializer().init(conf, layerParamsView, initializeParams); + Map paramTable = initializer().init(this, layerParamsView, initializeParams); ret.setParamTable(paramTable); - ret.setConf(conf); + ret.setLayerConfiguration(lconf); return ret; } diff --git a/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/nn/conf/layers/GravesLSTM.java b/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/nn/conf/layers/GravesLSTM.java index e12d6df22..9c50ccba4 100644 --- a/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/nn/conf/layers/GravesLSTM.java +++ b/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/nn/conf/layers/GravesLSTM.java @@ -59,7 +59,7 @@ public class GravesLSTM extends AbstractLSTM { } @Override - protected void initializeConstraints(org.deeplearning4j.nn.conf.layers.Layer.Builder builder) { + protected void initializeConstraints(LayerConfiguration.Builder builder) { super.initializeConstraints(builder); if (((Builder) builder).recurrentConstraints != null) { if (constraints == null) { @@ -77,14 +77,20 @@ public class GravesLSTM extends AbstractLSTM { public Layer instantiate(NeuralNetConfiguration conf, Collection trainingListeners, int layerIndex, INDArray layerParamsView, boolean initializeParams, DataType networkDataType) { LayerValidation.assertNInNOutSet("GravesLSTM", getLayerName(), layerIndex, getNIn(), getNOut()); + + LayerConfiguration lconf = conf.getFlattenedLayerConfigurations().get(layerIndex); + lconf.setNetConfiguration(conf); + runInheritance(); + org.deeplearning4j.nn.layers.recurrent.GravesLSTM ret = - new org.deeplearning4j.nn.layers.recurrent.GravesLSTM(conf, networkDataType); - ret.setListeners(trainingListeners); + new org.deeplearning4j.nn.layers.recurrent.GravesLSTM(lconf, networkDataType); + + ret.addTrainingListeners(trainingListeners); ret.setIndex(layerIndex); ret.setParamsViewArray(layerParamsView); - Map paramTable = initializer().init(conf, layerParamsView, initializeParams); + Map paramTable = initializer().init(this, layerParamsView, initializeParams); ret.setParamTable(paramTable); - ret.setConf(conf); + ret.setLayerConfiguration(lconf); return ret; } diff --git a/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/nn/conf/layers/LSTM.java b/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/nn/conf/layers/LSTM.java index 0f0d61fc3..85c440c18 100644 --- a/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/nn/conf/layers/LSTM.java +++ b/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/nn/conf/layers/LSTM.java @@ -57,7 +57,7 @@ public class LSTM extends AbstractLSTM { } @Override - protected void initializeConstraints(org.deeplearning4j.nn.conf.layers.Layer.Builder builder) { + protected void initializeConstraints(LayerConfiguration.Builder builder) { super.initializeConstraints(builder); if (((Builder) builder).recurrentConstraints != null) { if (constraints == null) { @@ -75,13 +75,14 @@ public class LSTM extends AbstractLSTM { public Layer instantiate(NeuralNetConfiguration conf, Collection trainingListeners, int layerIndex, INDArray layerParamsView, boolean initializeParams, DataType networkDataType) { LayerValidation.assertNInNOutSet("LSTM", getLayerName(), layerIndex, getNIn(), getNOut()); - org.deeplearning4j.nn.layers.recurrent.LSTM ret = new org.deeplearning4j.nn.layers.recurrent.LSTM(conf, networkDataType); - ret.setListeners(trainingListeners); + LayerConfiguration lconf = conf.getFlattenedLayerConfigurations().get(layerIndex); + org.deeplearning4j.nn.layers.recurrent.LSTM ret = new org.deeplearning4j.nn.layers.recurrent.LSTM(lconf, networkDataType); + ret.addTrainingListeners(trainingListeners); ret.setIndex(layerIndex); ret.setParamsViewArray(layerParamsView); - Map paramTable = initializer().init(conf, layerParamsView, initializeParams); + Map paramTable = initializer().init(this, layerParamsView, initializeParams); ret.setParamTable(paramTable); - ret.setConf(conf); + ret.setLayerConfiguration(lconf); return ret; } diff --git a/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/nn/conf/layers/Layer.java b/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/nn/conf/layers/LayerConfiguration.java similarity index 79% rename from cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/nn/conf/layers/Layer.java rename to cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/nn/conf/layers/LayerConfiguration.java index a96ec6db7..394012c4f 100644 --- a/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/nn/conf/layers/Layer.java +++ b/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/nn/conf/layers/LayerConfiguration.java @@ -20,12 +20,25 @@ package org.deeplearning4j.nn.conf.layers; +import com.fasterxml.jackson.annotation.JsonIdentityInfo; +import com.fasterxml.jackson.annotation.JsonTypeInfo; +import com.fasterxml.jackson.annotation.ObjectIdGenerators; +import java.io.Serializable; +import java.lang.reflect.Field; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collection; +import java.util.HashSet; +import java.util.List; import lombok.Data; import lombok.Getter; import lombok.NoArgsConstructor; +import lombok.NonNull; import lombok.Setter; +import lombok.extern.slf4j.Slf4j; +import net.brutex.ai.dnn.api.ILayerConfiguration; +import net.brutex.ai.dnn.api.LayerType; import org.deeplearning4j.nn.api.ParamInitializer; -import org.deeplearning4j.nn.api.TrainingConfig; import org.deeplearning4j.nn.api.layers.LayerConstraint; import org.deeplearning4j.nn.conf.InputPreProcessor; import org.deeplearning4j.nn.conf.NeuralNetConfiguration; @@ -33,36 +46,53 @@ import org.deeplearning4j.nn.conf.dropout.Dropout; import org.deeplearning4j.nn.conf.dropout.IDropout; import org.deeplearning4j.nn.conf.inputs.InputType; import org.deeplearning4j.nn.conf.memory.LayerMemoryReport; +import org.deeplearning4j.nn.conf.weightnoise.IWeightNoise; import org.deeplearning4j.optimize.api.TrainingListener; +import org.nd4j.linalg.activations.IActivation; import org.nd4j.linalg.api.buffer.DataType; import org.nd4j.linalg.api.ndarray.INDArray; import org.nd4j.linalg.learning.config.IUpdater; import org.nd4j.linalg.learning.regularization.Regularization; -import com.fasterxml.jackson.annotation.JsonTypeInfo; - -import java.io.Serializable; -import java.lang.reflect.Field; -import java.util.*; /** * A neural network layer. + * */ @JsonTypeInfo(use = JsonTypeInfo.Id.CLASS, include = JsonTypeInfo.As.PROPERTY, property = "@class") @Data @NoArgsConstructor -public abstract class Layer implements TrainingConfig, Serializable, Cloneable { +@JsonIdentityInfo(generator= ObjectIdGenerators.IntSequenceGenerator.class, property="@id") +@Slf4j +public abstract class LayerConfiguration implements ILayerConfiguration, Serializable, Cloneable { // ITraininableLayerConfiguration protected String layerName; - protected IDropout iDropout; + @Getter + protected List variables = new ArrayList<>(); protected List constraints; + protected IWeightNoise weightNoise; + private IDropout iDropout; + /** + * The type of the layer, basically defines the base class and its properties + */ + @Getter @Setter @NonNull + private LayerType type = LayerType.UNKNOWN; + @Getter @Setter + private NeuralNetConfiguration netConfiguration; + @Getter @Setter + private IActivation activationFn; - - public Layer(Builder builder) { + public LayerConfiguration(Builder builder) { this.layerName = builder.layerName; this.iDropout = builder.iDropout; } + public void addVariable(String s) {variables.add(s);} + + public String toJson() { + throw new RuntimeException("toJson is not implemented for LayerConfiguration"); + } + /** * Initialize the weight constraints. Should be called last, in the outer-most constructor */ @@ -113,10 +143,20 @@ public abstract class Layer implements TrainingConfig, Serializable, Cloneable { this.constraints = null; } + /** + * Migration workaround //TODO To be removed + * + * @return a layer configuration + */ + @Deprecated + public LayerConfiguration getLayer() { + return this; + } + @Override - public Layer clone() { + public LayerConfiguration clone() { try { - Layer ret = (Layer) super.clone(); + LayerConfiguration ret = (LayerConfiguration) super.clone(); //Let's check for any INDArray fields and dup them (in case cloned layer will be used in different threads on CUDA... // we don't want it being relocated contantly between devices) Class c = getClass(); @@ -150,7 +190,7 @@ public abstract class Layer implements TrainingConfig, Serializable, Cloneable { } } - public abstract org.deeplearning4j.nn.api.Layer instantiate(NeuralNetConfiguration conf, + public abstract org.deeplearning4j.nn.api.Layer instantiate( @NonNull NeuralNetConfiguration conf, Collection trainingListeners, int layerIndex, INDArray layerParamsView, boolean initializeParams, DataType networkDataType); @@ -180,7 +220,6 @@ public abstract class Layer implements TrainingConfig, Serializable, Cloneable { */ public abstract void setNIn(InputType inputType, boolean override); - /** * For the given type of input to this layer, what preprocessor (if any) is required?
* Returns null if no preprocessor is required, otherwise returns an appropriate {@link @@ -205,7 +244,7 @@ public abstract class Layer implements TrainingConfig, Serializable, Cloneable { /** * Is the specified parameter a layerwise pretraining only parameter?
For example, visible * bias params in an autoencoder (or, decoder params in a variational autoencoder) aren't used - * during supervised backprop.
Layers (like DenseLayer, etc) with no pretrainable parameters + * during supervised backprop.
Layers (like DenseLayerConfiguration, etc) with no pretrainable parameters * will return false for all (valid) inputs. * * @param paramName Parameter name/key @@ -214,7 +253,7 @@ public abstract class Layer implements TrainingConfig, Serializable, Cloneable { public abstract boolean isPretrainParam(String paramName); /** - * Get the updater for the given parameter. Typically the same updater will be used for all + * Get the updater for the given parameter. Typically, the same updater will be used for all * parameters, but this is not necessarily the case * * @param paramName Parameter name @@ -225,9 +264,13 @@ public abstract class Layer implements TrainingConfig, Serializable, Cloneable { "Not supported: all layers with parameters should override this method"); } - @Override - public void setDataType(DataType dataType) { - //No-op for most layers + public IUpdater getIUpdater() { + throw new UnsupportedOperationException( + "Not supported: all layers with parameters should override this method"); + } + + public void setIUpdater(IUpdater iUpdater) { + log.warn("Setting an IUpdater on {} with name {} has no effect.", getClass().getSimpleName(), getLayerName()); } /** @@ -239,12 +282,37 @@ public abstract class Layer implements TrainingConfig, Serializable, Cloneable { */ public abstract LayerMemoryReport getMemoryReport(InputType inputType); - @SuppressWarnings("unchecked") + public void clearVariables() { + this.variables.clear(); + } + + /** + * Inherit setting from neural network for those settings, that are not already set or do have + * a layer(type) specific default. This implementation does not require the neural network configuration to be + * the same as the one returned from this layers {@link #getNetConfiguration()}. + * + * @param conf a neural net configration to inherit parameters from + * + */ + public void runInheritance(@NonNull NeuralNetConfiguration conf) { + if(this.activationFn == null ) this.activationFn = conf.getActivation(); + if(this.iDropout == null ) this.iDropout = conf.getIdropOut(); + if(this.weightNoise == null) this.weightNoise = conf.getWeightNoise(); + } + + /** Runs {@link #runInheritance(NeuralNetConfiguration)} using the layers configurations embedded neural net + * configuration (the one returned from {@link #getNetConfiguration()}. + */ + public void runInheritance() { + runInheritance(getNetConfiguration()); + } + + @SuppressWarnings("unchecked") @Getter @Setter public abstract static class Builder> { - protected String layerName = null; + protected String layerName; protected List allParamConstraints; @@ -255,7 +323,7 @@ public abstract class Layer implements TrainingConfig, Serializable, Cloneable { protected IDropout iDropout; /** - * Layer name assigns layer string name. Allows easier differentiation between layers. + * ILayer name assigns layer string name. Allows easier differentiation between layers. */ public T name(String layerName) { this.setLayerName(layerName); @@ -344,6 +412,6 @@ public abstract class Layer implements TrainingConfig, Serializable, Cloneable { return (T) this; } - public abstract E build(); + public abstract E build(); } } diff --git a/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/nn/conf/layers/LayerValidation.java b/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/nn/conf/layers/LayerValidation.java index 2a5f16be6..fcde1b127 100644 --- a/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/nn/conf/layers/LayerValidation.java +++ b/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/nn/conf/layers/LayerValidation.java @@ -42,7 +42,7 @@ public class LayerValidation { /** * Asserts that the layer nIn and nOut values are set for the layer * - * @param layerType Type of layer ("DenseLayer", etc) + * @param layerType Type of layer ("DenseLayerConfiguration", etc) * @param layerName Name of the layer (may be null if not set) * @param layerIndex Index of the layer * @param nIn nIn value @@ -60,7 +60,7 @@ public class LayerValidation { /** * Asserts that the layer nOut value is set for the layer * - * @param layerType Type of layer ("DenseLayer", etc) + * @param layerType Type of layer ("DenseLayerConfiguration", etc) * @param layerName Name of the layer (may be null if not set) * @param layerIndex Index of the layer * @param nOut nOut value @@ -74,16 +74,16 @@ public class LayerValidation { } } - public static void generalValidation(String layerName, Layer layer, IDropout iDropout, List regularization, + public static void generalValidation(String layerName, LayerConfiguration layer, IDropout iDropout, List regularization, List regularizationBias, List allParamConstraints, List weightConstraints, List biasConstraints) { if (layer != null) { - if (layer instanceof BaseLayer) { - BaseLayer bLayer = (BaseLayer) layer; + if (layer instanceof BaseLayerConfiguration) { + BaseLayerConfiguration bLayer = (BaseLayerConfiguration) layer; configureBaseLayer(layerName, bLayer, iDropout, regularization, regularizationBias); - } else if (layer instanceof FrozenLayer && ((FrozenLayer) layer).getLayer() instanceof BaseLayer) { - BaseLayer bLayer = (BaseLayer) ((FrozenLayer) layer).getLayer(); + } else if (layer instanceof FrozenLayer && ((FrozenLayer) layer).getInnerConfiguration() instanceof BaseLayerConfiguration) { + BaseLayerConfiguration bLayer = (BaseLayerConfiguration) ((FrozenLayer) layer).getInnerConfiguration(); configureBaseLayer(layerName, bLayer, iDropout, regularization, regularizationBias); } else if (layer instanceof Bidirectional) { Bidirectional l = (Bidirectional) layer; @@ -128,7 +128,7 @@ public class LayerValidation { } } - private static void configureBaseLayer(String layerName, BaseLayer bLayer, IDropout iDropout, + private static void configureBaseLayer(String layerName, BaseLayerConfiguration bLayer, IDropout iDropout, List regularization, List regularizationBias) { if (regularization != null && !regularization.isEmpty()) { diff --git a/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/nn/conf/layers/LocalResponseNormalization.java b/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/nn/conf/layers/LocalResponseNormalization.java index 8648a2814..75397400b 100644 --- a/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/nn/conf/layers/LocalResponseNormalization.java +++ b/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/nn/conf/layers/LocalResponseNormalization.java @@ -23,7 +23,6 @@ package org.deeplearning4j.nn.conf.layers; import lombok.*; import org.deeplearning4j.nn.api.ParamInitializer; import org.deeplearning4j.nn.conf.CNN2DFormat; -import org.deeplearning4j.nn.conf.GradientNormalization; import org.deeplearning4j.nn.conf.InputPreProcessor; import org.deeplearning4j.nn.conf.NeuralNetConfiguration; import org.deeplearning4j.nn.conf.inputs.InputType; @@ -44,7 +43,7 @@ import java.util.Map; @NoArgsConstructor @ToString(callSuper = true) @EqualsAndHashCode(callSuper = true) -public class LocalResponseNormalization extends Layer { +public class LocalResponseNormalization extends LayerConfiguration { // Defaults as per http://www.cs.toronto.edu/~fritz/absps/imagenet.pdf //Set defaults here as well as in builder, in case users use no-arg constructor instead of builder @@ -75,14 +74,16 @@ public class LocalResponseNormalization extends Layer { public org.deeplearning4j.nn.api.Layer instantiate(NeuralNetConfiguration conf, Collection trainingListeners, int layerIndex, INDArray layerParamsView, boolean initializeParams, DataType networkDataType) { + LayerConfiguration lconf = conf.getFlattenedLayerConfigurations().get(layerIndex); org.deeplearning4j.nn.layers.normalization.LocalResponseNormalization ret = - new org.deeplearning4j.nn.layers.normalization.LocalResponseNormalization(conf, networkDataType); - ret.setListeners(trainingListeners); + new org.deeplearning4j.nn.layers.normalization.LocalResponseNormalization(lconf, networkDataType); + + ret.addTrainingListeners(trainingListeners); ret.setIndex(layerIndex); ret.setParamsViewArray(layerParamsView); - Map paramTable = initializer().init(conf, layerParamsView, initializeParams); + Map paramTable = initializer().init(this, layerParamsView, initializeParams); ret.setParamTable(paramTable); - ret.setConf(conf); + ret.setLayerConfiguration(lconf); return ret; } @@ -128,16 +129,6 @@ public class LocalResponseNormalization extends Layer { return false; //No params in LRN } - @Override - public GradientNormalization getGradientNormalization() { - return GradientNormalization.None; - } - - @Override - public double getGradientNormalizationThreshold() { - return 0; - } - @Override public LayerMemoryReport getMemoryReport(InputType inputType) { val actElementsPerEx = inputType.arrayElementsPerExample(); @@ -147,14 +138,14 @@ public class LocalResponseNormalization extends Layer { return new LayerMemoryReport.Builder(layerName, DenseLayer.class, inputType, inputType).standardMemory(0, 0) .workingMemory(0, 2 * actElementsPerEx, 0, 3 * actElementsPerEx) - .cacheMemory(MemoryReport.CACHE_MODE_ALL_ZEROS, MemoryReport.CACHE_MODE_ALL_ZEROS) //No caching in DenseLayer + .cacheMemory(MemoryReport.CACHE_MODE_ALL_ZEROS, MemoryReport.CACHE_MODE_ALL_ZEROS) //No caching in DenseLayerConfiguration .build(); } @AllArgsConstructor @Getter @Setter - public static class Builder extends Layer.Builder { + public static class Builder extends LayerConfiguration.Builder { // defaults based on AlexNet model @@ -275,7 +266,7 @@ public class LocalResponseNormalization extends Layer { * Set the data format for the CNN activations - NCHW (channels first) or NHWC (channels last). * See {@link CNN2DFormat} for more details.
* Default: NCHW - * @param format Format for activations (in and out) + * @param dataFormat Format for activations (in and out) */ public Builder dataFormat(CNN2DFormat dataFormat){ this.dataFormat = dataFormat; diff --git a/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/nn/conf/layers/LocallyConnected1D.java b/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/nn/conf/layers/LocallyConnected1D.java index 921d0f9ea..ea679c9d4 100644 --- a/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/nn/conf/layers/LocallyConnected1D.java +++ b/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/nn/conf/layers/LocallyConnected1D.java @@ -212,12 +212,13 @@ public class LocallyConnected1D extends SameDiffLayer { } @Override - public void applyGlobalConfigToLayer(NeuralNetConfiguration.Builder globalConfig) { + public void applyGlobalConfigToLayer(NeuralNetConfiguration.NeuralNetConfigurationBuilder globalConfig) { + NeuralNetConfiguration global_conf = globalConfig.build(); if (activation == null) { - activation = SameDiffLayerUtils.fromIActivation(globalConfig.getActivationFn()); + activation = SameDiffLayerUtils.fromIActivation(global_conf.getActivation()); } if (cm == null) { - cm = globalConfig.getConvolutionMode(); + cm = global_conf.getConvolutionMode(); } } diff --git a/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/nn/conf/layers/LocallyConnected2D.java b/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/nn/conf/layers/LocallyConnected2D.java index b44055332..5dd5ec62e 100644 --- a/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/nn/conf/layers/LocallyConnected2D.java +++ b/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/nn/conf/layers/LocallyConnected2D.java @@ -229,12 +229,13 @@ public class LocallyConnected2D extends SameDiffLayer { } @Override - public void applyGlobalConfigToLayer(NeuralNetConfiguration.Builder globalConfig) { + public void applyGlobalConfigToLayer(NeuralNetConfiguration.NeuralNetConfigurationBuilder globalConfig) { + NeuralNetConfiguration gconf = globalConfig.build(); if (activation == null) { - activation = SameDiffLayerUtils.fromIActivation(globalConfig.getActivationFn()); + activation = SameDiffLayerUtils.fromIActivation(gconf.getActivation()); } if (cm == null) { - cm = globalConfig.getConvolutionMode(); + cm = gconf.getConvolutionMode(); } } diff --git a/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/nn/conf/layers/LossLayer.java b/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/nn/conf/layers/LossLayer.java index e88a66298..226d3255d 100644 --- a/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/nn/conf/layers/LossLayer.java +++ b/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/nn/conf/layers/LossLayer.java @@ -57,13 +57,15 @@ public class LossLayer extends FeedForwardLayer { @Override public Layer instantiate(NeuralNetConfiguration conf, Collection trainingListeners, int layerIndex, INDArray layerParamsView, boolean initializeParams, DataType networkDataType) { - org.deeplearning4j.nn.layers.LossLayer ret = new org.deeplearning4j.nn.layers.LossLayer(conf, networkDataType); - ret.setListeners(trainingListeners); + LayerConfiguration lconf = conf.getFlattenedLayerConfigurations().get(layerIndex); + org.deeplearning4j.nn.layers.LossLayer ret = new org.deeplearning4j.nn.layers.LossLayer(lconf, networkDataType); + + ret.addTrainingListeners(trainingListeners); ret.setIndex(layerIndex); ret.setParamsViewArray(layerParamsView); - Map paramTable = initializer().init(conf, layerParamsView, initializeParams); + Map paramTable = initializer().init(this, layerParamsView, initializeParams); ret.setParamTable(paramTable); - ret.setConf(conf); + ret.setLayerConfiguration(lconf); return ret; } diff --git a/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/nn/conf/layers/NoParamLayer.java b/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/nn/conf/layers/NoParamLayer.java index 227650a5f..57a58f42c 100644 --- a/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/nn/conf/layers/NoParamLayer.java +++ b/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/nn/conf/layers/NoParamLayer.java @@ -21,19 +21,24 @@ package org.deeplearning4j.nn.conf.layers; import lombok.NoArgsConstructor; +import net.brutex.ai.dnn.api.LayerType; import org.deeplearning4j.nn.api.ParamInitializer; import org.deeplearning4j.nn.conf.GradientNormalization; +import org.deeplearning4j.nn.conf.Updater; import org.deeplearning4j.nn.conf.inputs.InputType; import org.deeplearning4j.nn.params.EmptyParamInitializer; +import org.nd4j.linalg.learning.config.IUpdater; import org.nd4j.linalg.learning.regularization.Regularization; import java.util.List; @NoArgsConstructor -public abstract class NoParamLayer extends Layer { +public abstract class NoParamLayer extends LayerConfiguration { protected NoParamLayer(Builder builder) { + super(builder); + setType(LayerType.POOL); } @Override @@ -52,18 +57,17 @@ public abstract class NoParamLayer extends Layer { return null; } - @Override - public GradientNormalization getGradientNormalization() { - return GradientNormalization.None; - } - - @Override - public double getGradientNormalizationThreshold() { - return 0; - } - @Override public boolean isPretrainParam(String paramName) { throw new UnsupportedOperationException(getClass().getSimpleName() + " does not contain parameters"); } + +/** +* + * @return +*/ + @Override + public IUpdater getIUpdater() { + return Updater.NONE.getIUpdaterWithDefaultConfig(); + } } diff --git a/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/nn/conf/layers/OutputLayer.java b/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/nn/conf/layers/OutputLayer.java index d31ff854a..2884ac424 100644 --- a/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/nn/conf/layers/OutputLayer.java +++ b/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/nn/conf/layers/OutputLayer.java @@ -53,14 +53,16 @@ public class OutputLayer extends BaseOutputLayer { public Layer instantiate(NeuralNetConfiguration conf, Collection trainingListeners, int layerIndex, INDArray layerParamsView, boolean initializeParams, DataType networkDataType) { LayerValidation.assertNInNOutSet("OutputLayer", getLayerName(), layerIndex, getNIn(), getNOut()); + LayerConfiguration lconf = conf.getFlattenedLayerConfigurations().get(layerIndex); + runInheritance(); - org.deeplearning4j.nn.layers.OutputLayer ret = new org.deeplearning4j.nn.layers.OutputLayer(conf, networkDataType); - ret.setListeners(trainingListeners); + org.deeplearning4j.nn.layers.OutputLayer ret = new org.deeplearning4j.nn.layers.OutputLayer(lconf, networkDataType); + ret.addTrainingListeners(trainingListeners); ret.setIndex(layerIndex); ret.setParamsViewArray(layerParamsView); - Map paramTable = initializer().init(conf, layerParamsView, initializeParams); + Map paramTable = initializer().init(this, layerParamsView, initializeParams); ret.setParamTable(paramTable); - ret.setConf(conf); + ret.setLayerConfiguration(lconf); return ret; } diff --git a/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/nn/conf/layers/PReLULayer.java b/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/nn/conf/layers/PReLULayer.java index 289009ad7..249339df9 100644 --- a/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/nn/conf/layers/PReLULayer.java +++ b/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/nn/conf/layers/PReLULayer.java @@ -41,7 +41,7 @@ import java.util.Map; @NoArgsConstructor @ToString(callSuper = true) @EqualsAndHashCode(callSuper = true) -public class PReLULayer extends BaseLayer { +public class PReLULayer extends BaseLayerConfiguration { private long[] inputShape = null; private long[] sharedAxes = null; @@ -59,13 +59,14 @@ public class PReLULayer extends BaseLayer { @Override public Layer instantiate(NeuralNetConfiguration conf, Collection trainingListeners, int layerIndex, INDArray layerParamsView, boolean initializeParams, DataType networkDataType) { - org.deeplearning4j.nn.layers.feedforward.PReLU ret = new org.deeplearning4j.nn.layers.feedforward.PReLU(conf, networkDataType); - ret.setListeners(trainingListeners); + LayerConfiguration lconf = conf.getFlattenedLayerConfigurations().get(layerIndex); + org.deeplearning4j.nn.layers.feedforward.PReLU ret = new org.deeplearning4j.nn.layers.feedforward.PReLU(lconf, networkDataType); + ret.addTrainingListeners(trainingListeners); ret.setIndex(layerIndex); ret.setParamsViewArray(layerParamsView); - Map paramTable = initializer().init(conf, layerParamsView, initializeParams); + Map paramTable = initializer().init(this, layerParamsView, initializeParams); ret.setParamTable(paramTable); - ret.setConf(conf); + ret.setLayerConfiguration(lconf); return ret; } @@ -116,7 +117,7 @@ public class PReLULayer extends BaseLayer { public Builder(){ //Default to 0s, and don't inherit global default - this.weightInitFn = new WeightInitConstant(0); + this.weightInit = new WeightInitConstant(0); } /** diff --git a/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/nn/conf/layers/PrimaryCapsules.java b/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/nn/conf/layers/PrimaryCapsules.java index 2107bdede..fc0c256f7 100644 --- a/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/nn/conf/layers/PrimaryCapsules.java +++ b/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/nn/conf/layers/PrimaryCapsules.java @@ -87,7 +87,7 @@ public class PrimaryCapsules extends SameDiffLayer { } if(capsules < 0){ - throw new IllegalArgumentException("Invalid configuration for Capsule Layer (layer name = \"" + throw new IllegalArgumentException("Invalid configuration for Capsule ILayer (layer name = \"" + layerName + "\"):" + " capsules must be >= 0 if set. Got: " + capsules); @@ -417,7 +417,7 @@ public class PrimaryCapsules extends SameDiffLayer { } @Override - public E build() { + public E build() { return (E) new PrimaryCapsules(this); } } diff --git a/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/nn/conf/layers/RecurrentAttentionLayer.java b/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/nn/conf/layers/RecurrentAttentionLayer.java index 161acc44e..a1bbd9f83 100644 --- a/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/nn/conf/layers/RecurrentAttentionLayer.java +++ b/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/nn/conf/layers/RecurrentAttentionLayer.java @@ -150,9 +150,9 @@ public class RecurrentAttentionLayer extends SameDiffLayer { } @Override - public void applyGlobalConfigToLayer(NeuralNetConfiguration.Builder globalConfig) { + public void applyGlobalConfigToLayer(NeuralNetConfiguration.NeuralNetConfigurationBuilder globalConfig) { if (activation == null) { - activation = SameDiffLayerUtils.fromIActivation(globalConfig.getActivationFn()); + activation = SameDiffLayerUtils.fromIActivation(globalConfig.build().getActivation()); } } diff --git a/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/nn/conf/layers/RnnLossLayer.java b/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/nn/conf/layers/RnnLossLayer.java index 376886cc4..e7db009ed 100644 --- a/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/nn/conf/layers/RnnLossLayer.java +++ b/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/nn/conf/layers/RnnLossLayer.java @@ -59,14 +59,19 @@ public class RnnLossLayer extends FeedForwardLayer { @Override public Layer instantiate(NeuralNetConfiguration conf, Collection trainingListeners, int layerIndex, INDArray layerParamsView, boolean initializeParams, DataType networkDataType) { + + LayerConfiguration lconf = conf.getFlattenedLayerConfigurations().get(layerIndex); + lconf.setNetConfiguration(conf); + runInheritance(); + org.deeplearning4j.nn.layers.recurrent.RnnLossLayer ret = - new org.deeplearning4j.nn.layers.recurrent.RnnLossLayer(conf, networkDataType); - ret.setListeners(trainingListeners); + new org.deeplearning4j.nn.layers.recurrent.RnnLossLayer(lconf, networkDataType); + ret.addTrainingListeners(trainingListeners); ret.setIndex(layerIndex); ret.setParamsViewArray(layerParamsView); - Map paramTable = initializer().init(conf, layerParamsView, initializeParams); + Map paramTable = initializer().init(this, layerParamsView, initializeParams); ret.setParamTable(paramTable); - ret.setConf(conf); + ret.setLayerConfiguration(lconf); return ret; } diff --git a/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/nn/conf/layers/RnnOutputLayer.java b/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/nn/conf/layers/RnnOutputLayer.java index 9f17c2cee..5b59c5399 100644 --- a/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/nn/conf/layers/RnnOutputLayer.java +++ b/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/nn/conf/layers/RnnOutputLayer.java @@ -59,15 +59,16 @@ public class RnnOutputLayer extends BaseOutputLayer { public Layer instantiate(NeuralNetConfiguration conf, Collection trainingListeners, int layerIndex, INDArray layerParamsView, boolean initializeParams, DataType networkDataType) { LayerValidation.assertNInNOutSet("RnnOutputLayer", getLayerName(), layerIndex, getNIn(), getNOut()); + LayerConfiguration lconf = conf.getFlattenedLayerConfigurations().get(layerIndex); org.deeplearning4j.nn.layers.recurrent.RnnOutputLayer ret = - new org.deeplearning4j.nn.layers.recurrent.RnnOutputLayer(conf, networkDataType); - ret.setListeners(trainingListeners); + new org.deeplearning4j.nn.layers.recurrent.RnnOutputLayer(lconf, networkDataType); + ret.addTrainingListeners(trainingListeners); ret.setIndex(layerIndex); ret.setParamsViewArray(layerParamsView); - Map paramTable = initializer().init(conf, layerParamsView, initializeParams); + Map paramTable = initializer().init(this, layerParamsView, initializeParams); ret.setParamTable(paramTable); - ret.setConf(conf); + ret.setLayerConfiguration(lconf); return ret; } diff --git a/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/nn/conf/layers/SeparableConvolution2D.java b/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/nn/conf/layers/SeparableConvolution2D.java index a6efb86b1..924c4cc2a 100644 --- a/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/nn/conf/layers/SeparableConvolution2D.java +++ b/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/nn/conf/layers/SeparableConvolution2D.java @@ -79,7 +79,7 @@ public class SeparableConvolution2D extends ConvolutionLayer { } @Override - protected void initializeConstraints(org.deeplearning4j.nn.conf.layers.Layer.Builder builder) { + protected void initializeConstraints(LayerConfiguration.Builder builder) { super.initializeConstraints(builder); if (((Builder) builder).pointWiseConstraints != null) { if (constraints == null) { @@ -117,15 +117,16 @@ public class SeparableConvolution2D extends ConvolutionLayer { public Layer instantiate(NeuralNetConfiguration conf, Collection trainingListeners, int layerIndex, INDArray layerParamsView, boolean initializeParams, DataType networkDataType) { LayerValidation.assertNInNOutSet("SeparableConvolution2D", getLayerName(), layerIndex, getNIn(), getNOut()); + LayerConfiguration lconf = conf.getFlattenedLayerConfigurations().get(layerIndex); org.deeplearning4j.nn.layers.convolution.SeparableConvolution2DLayer ret = - new org.deeplearning4j.nn.layers.convolution.SeparableConvolution2DLayer(conf, networkDataType); - ret.setListeners(trainingListeners); + new org.deeplearning4j.nn.layers.convolution.SeparableConvolution2DLayer(lconf, networkDataType); + ret.addTrainingListeners(trainingListeners); ret.setIndex(layerIndex); ret.setParamsViewArray(layerParamsView); - Map paramTable = initializer().init(conf, layerParamsView, initializeParams); + Map paramTable = initializer().init(this, layerParamsView, initializeParams); ret.setParamTable(paramTable); - ret.setConf(conf); + ret.setLayerConfiguration(lconf); return ret; } diff --git a/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/nn/conf/layers/SpaceToBatchLayer.java b/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/nn/conf/layers/SpaceToBatchLayer.java index b8de7a4e4..50f91781b 100644 --- a/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/nn/conf/layers/SpaceToBatchLayer.java +++ b/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/nn/conf/layers/SpaceToBatchLayer.java @@ -67,14 +67,16 @@ public class SpaceToBatchLayer extends NoParamLayer { public org.deeplearning4j.nn.api.Layer instantiate(NeuralNetConfiguration conf, Collection trainingListeners, int layerIndex, INDArray layerParamsView, boolean initializeParams, DataType networkDataType) { + LayerConfiguration lconf = conf.getFlattenedLayerConfigurations().get(layerIndex); + org.deeplearning4j.nn.layers.convolution.SpaceToBatch ret = - new org.deeplearning4j.nn.layers.convolution.SpaceToBatch(conf, networkDataType); - ret.setListeners(trainingListeners); + new org.deeplearning4j.nn.layers.convolution.SpaceToBatch(lconf, networkDataType); + ret.addTrainingListeners(trainingListeners); ret.setIndex(layerIndex); ret.setParamsViewArray(layerParamsView); - Map paramTable = initializer().init(conf, layerParamsView, initializeParams); + Map paramTable = initializer().init(this, layerParamsView, initializeParams); ret.setParamTable(paramTable); - ret.setConf(conf); + ret.setLayerConfiguration(lconf); return ret; } @@ -130,7 +132,7 @@ public class SpaceToBatchLayer extends NoParamLayer { @NoArgsConstructor @Getter @Setter - public static class Builder> extends Layer.Builder { + public static class Builder> extends LayerConfiguration.Builder { /** * Block size for SpaceToBatch layer. Should be a length 2 array for the height and width diff --git a/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/nn/conf/layers/SpaceToDepthLayer.java b/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/nn/conf/layers/SpaceToDepthLayer.java index b35092359..462f3ab5e 100644 --- a/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/nn/conf/layers/SpaceToDepthLayer.java +++ b/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/nn/conf/layers/SpaceToDepthLayer.java @@ -73,14 +73,16 @@ public class SpaceToDepthLayer extends NoParamLayer { public org.deeplearning4j.nn.api.Layer instantiate(NeuralNetConfiguration conf, Collection trainingListeners, int layerIndex, INDArray layerParamsView, boolean initializeParams, DataType networkDataType) { + LayerConfiguration lconf = conf.getFlattenedLayerConfigurations().get(layerIndex); + org.deeplearning4j.nn.layers.convolution.SpaceToDepth ret = - new org.deeplearning4j.nn.layers.convolution.SpaceToDepth(conf, networkDataType); - ret.setListeners(trainingListeners); + new org.deeplearning4j.nn.layers.convolution.SpaceToDepth(lconf, networkDataType); + ret.addTrainingListeners(trainingListeners); ret.setIndex(layerIndex); ret.setParamsViewArray(layerParamsView); - Map paramTable = initializer().init(conf, layerParamsView, initializeParams); + Map paramTable = initializer().init(this, layerParamsView, initializeParams); ret.setParamTable(paramTable); - ret.setConf(conf); + ret.setLayerConfiguration(lconf); return ret; } @@ -134,7 +136,7 @@ public class SpaceToDepthLayer extends NoParamLayer { @NoArgsConstructor @Getter @Setter - public static class Builder> extends Layer.Builder { + public static class Builder> extends LayerConfiguration.Builder { protected int blockSize; diff --git a/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/nn/conf/layers/Subsampling1DLayer.java b/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/nn/conf/layers/Subsampling1DLayer.java index 267e67005..be544fb2f 100644 --- a/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/nn/conf/layers/Subsampling1DLayer.java +++ b/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/nn/conf/layers/Subsampling1DLayer.java @@ -61,14 +61,16 @@ public class Subsampling1DLayer extends SubsamplingLayer { public org.deeplearning4j.nn.api.Layer instantiate(NeuralNetConfiguration conf, Collection trainingListeners, int layerIndex, INDArray layerParamsView, boolean initializeParams, DataType networkDataType) { + LayerConfiguration lconf = conf.getFlattenedLayerConfigurations().get(layerIndex); + org.deeplearning4j.nn.layers.convolution.subsampling.Subsampling1DLayer ret = - new org.deeplearning4j.nn.layers.convolution.subsampling.Subsampling1DLayer(conf, networkDataType); - ret.setListeners(trainingListeners); + new org.deeplearning4j.nn.layers.convolution.subsampling.Subsampling1DLayer(lconf, networkDataType); + ret.addTrainingListeners(trainingListeners); ret.setIndex(layerIndex); ret.setParamsViewArray(layerParamsView); - Map paramTable = initializer().init(conf, layerParamsView, initializeParams); + Map paramTable = initializer().init(this, layerParamsView, initializeParams); ret.setParamTable(paramTable); - ret.setConf(conf); + ret.setLayerConfiguration(lconf); return ret; } diff --git a/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/nn/conf/layers/Subsampling3DLayer.java b/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/nn/conf/layers/Subsampling3DLayer.java index cb643cd7b..123df419b 100644 --- a/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/nn/conf/layers/Subsampling3DLayer.java +++ b/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/nn/conf/layers/Subsampling3DLayer.java @@ -113,14 +113,16 @@ public class Subsampling3DLayer extends NoParamLayer { public org.deeplearning4j.nn.api.Layer instantiate(NeuralNetConfiguration conf, Collection iterationListeners, int layerIndex, INDArray layerParamsView, boolean initializeParams, DataType networkDataType) { + LayerConfiguration lconf = conf.getFlattenedLayerConfigurations().get(layerIndex); + org.deeplearning4j.nn.layers.convolution.subsampling.Subsampling3DLayer ret = - new org.deeplearning4j.nn.layers.convolution.subsampling.Subsampling3DLayer(conf, networkDataType); - ret.setListeners(iterationListeners); + new org.deeplearning4j.nn.layers.convolution.subsampling.Subsampling3DLayer(lconf, networkDataType); + ret.addTrainingListeners(iterationListeners); ret.setIndex(layerIndex); ret.setParamsViewArray(layerParamsView); - Map paramTable = initializer().init(conf, layerParamsView, initializeParams); + Map paramTable = initializer().init(this, layerParamsView, initializeParams); ret.setParamTable(paramTable); - ret.setConf(conf); + ret.setLayerConfiguration(lconf); return ret; } @@ -338,7 +340,7 @@ public class Subsampling3DLayer extends NoParamLayer { @Setter @NoArgsConstructor protected static abstract class BaseSubsamplingBuilder> - extends Layer.Builder { + extends LayerConfiguration.Builder { protected org.deeplearning4j.nn.conf.layers.PoolingType poolingType = org.deeplearning4j.nn.conf.layers.PoolingType.MAX; diff --git a/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/nn/conf/layers/SubsamplingLayer.java b/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/nn/conf/layers/SubsamplingLayer.java index f1d546234..55e766133 100644 --- a/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/nn/conf/layers/SubsamplingLayer.java +++ b/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/nn/conf/layers/SubsamplingLayer.java @@ -134,14 +134,17 @@ public class SubsamplingLayer extends NoParamLayer { public org.deeplearning4j.nn.api.Layer instantiate(NeuralNetConfiguration conf, Collection trainingListeners, int layerIndex, INDArray layerParamsView, boolean initializeParams, DataType networkDataType) { + LayerConfiguration lconf = conf.getFlattenedLayerConfigurations().get(layerIndex); + runInheritance(); + org.deeplearning4j.nn.layers.convolution.subsampling.SubsamplingLayer ret = - new org.deeplearning4j.nn.layers.convolution.subsampling.SubsamplingLayer(conf, networkDataType); - ret.setListeners(trainingListeners); + new org.deeplearning4j.nn.layers.convolution.subsampling.SubsamplingLayer(lconf, networkDataType); + ret.addTrainingListeners(trainingListeners); ret.setIndex(layerIndex); ret.setParamsViewArray(layerParamsView); - Map paramTable = initializer().init(conf, layerParamsView, initializeParams); + Map paramTable = initializer().init(this, layerParamsView, initializeParams); ret.setParamTable(paramTable); - ret.setConf(conf); + ret.setLayerConfiguration(lconf); return ret; } @@ -390,7 +393,7 @@ public class SubsamplingLayer extends NoParamLayer { @Getter @Setter protected static abstract class BaseSubsamplingBuilder> - extends Layer.Builder { + extends LayerConfiguration.Builder { protected org.deeplearning4j.nn.conf.layers.PoolingType poolingType = org.deeplearning4j.nn.conf.layers.PoolingType.MAX; diff --git a/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/nn/conf/layers/Upsampling1D.java b/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/nn/conf/layers/Upsampling1D.java index 6a012ed15..a2d3c4fb8 100644 --- a/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/nn/conf/layers/Upsampling1D.java +++ b/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/nn/conf/layers/Upsampling1D.java @@ -56,14 +56,17 @@ public class Upsampling1D extends BaseUpsamplingLayer { public org.deeplearning4j.nn.api.Layer instantiate(NeuralNetConfiguration conf, Collection trainingListeners, int layerIndex, INDArray layerParamsView, boolean initializeParams, DataType networkDataType) { + + LayerConfiguration lconf = conf.getFlattenedLayerConfigurations().get(layerIndex); + org.deeplearning4j.nn.layers.convolution.upsampling.Upsampling1D ret = - new org.deeplearning4j.nn.layers.convolution.upsampling.Upsampling1D(conf, networkDataType); - ret.setListeners(trainingListeners); + new org.deeplearning4j.nn.layers.convolution.upsampling.Upsampling1D(lconf, networkDataType); + ret.addTrainingListeners(trainingListeners); ret.setIndex(layerIndex); ret.setParamsViewArray(layerParamsView); - Map paramTable = initializer().init(conf, layerParamsView, initializeParams); + Map paramTable = initializer().init(this, layerParamsView, initializeParams); ret.setParamTable(paramTable); - ret.setConf(conf); + ret.setLayerConfiguration(lconf); return ret; } diff --git a/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/nn/conf/layers/Upsampling2D.java b/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/nn/conf/layers/Upsampling2D.java index bdbbb0c73..48e86c848 100644 --- a/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/nn/conf/layers/Upsampling2D.java +++ b/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/nn/conf/layers/Upsampling2D.java @@ -63,14 +63,16 @@ public class Upsampling2D extends BaseUpsamplingLayer { public org.deeplearning4j.nn.api.Layer instantiate(NeuralNetConfiguration conf, Collection trainingListeners, int layerIndex, INDArray layerParamsView, boolean initializeParams, DataType networkDataType) { + LayerConfiguration lconf = conf.getFlattenedLayerConfigurations().get(layerIndex); + org.deeplearning4j.nn.layers.convolution.upsampling.Upsampling2D ret = - new org.deeplearning4j.nn.layers.convolution.upsampling.Upsampling2D(conf, networkDataType); - ret.setListeners(trainingListeners); + new org.deeplearning4j.nn.layers.convolution.upsampling.Upsampling2D(lconf, networkDataType); + ret.addTrainingListeners(trainingListeners); ret.setIndex(layerIndex); ret.setParamsViewArray(layerParamsView); - Map paramTable = initializer().init(conf, layerParamsView, initializeParams); + Map paramTable = initializer().init(this, layerParamsView, initializeParams); ret.setParamTable(paramTable); - ret.setConf(conf); + ret.setLayerConfiguration(lconf); return ret; } diff --git a/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/nn/conf/layers/Upsampling3D.java b/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/nn/conf/layers/Upsampling3D.java index ef5d832b4..4d629e2fd 100644 --- a/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/nn/conf/layers/Upsampling3D.java +++ b/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/nn/conf/layers/Upsampling3D.java @@ -61,14 +61,18 @@ public class Upsampling3D extends BaseUpsamplingLayer { public org.deeplearning4j.nn.api.Layer instantiate(NeuralNetConfiguration conf, Collection iterationListeners, int layerIndex, INDArray layerParamsView, boolean initializeParams, DataType networkDataType) { + LayerConfiguration lconf = conf.getFlattenedLayerConfigurations().get(layerIndex); + org.deeplearning4j.nn.layers.convolution.upsampling.Upsampling3D ret = - new org.deeplearning4j.nn.layers.convolution.upsampling.Upsampling3D(conf, networkDataType); - ret.setListeners(iterationListeners); + new org.deeplearning4j.nn.layers.convolution.upsampling.Upsampling3D(lconf, networkDataType); + + + ret.addTrainingListeners(iterationListeners); ret.setIndex(layerIndex); ret.setParamsViewArray(layerParamsView); - Map paramTable = initializer().init(conf, layerParamsView, initializeParams); + Map paramTable = initializer().init(this, layerParamsView, initializeParams); ret.setParamTable(paramTable); - ret.setConf(conf); + ret.setLayerConfiguration(lconf); return ret; } diff --git a/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/nn/conf/layers/ZeroPadding1DLayer.java b/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/nn/conf/layers/ZeroPadding1DLayer.java index 98f6f8077..43f6e4ed1 100644 --- a/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/nn/conf/layers/ZeroPadding1DLayer.java +++ b/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/nn/conf/layers/ZeroPadding1DLayer.java @@ -66,13 +66,15 @@ public class ZeroPadding1DLayer extends NoParamLayer { public org.deeplearning4j.nn.api.Layer instantiate(NeuralNetConfiguration conf, Collection trainingListeners, int layerIndex, INDArray layerParamsView, boolean initializeParams, DataType networkDataType) { + LayerConfiguration lconf = conf.getFlattenedLayerConfigurations().get(layerIndex); + org.deeplearning4j.nn.layers.convolution.ZeroPadding1DLayer ret = - new org.deeplearning4j.nn.layers.convolution.ZeroPadding1DLayer(conf, networkDataType); - ret.setListeners(trainingListeners); + new org.deeplearning4j.nn.layers.convolution.ZeroPadding1DLayer(lconf, networkDataType); + ret.addTrainingListeners(trainingListeners); ret.setIndex(layerIndex); - Map paramTable = initializer().init(conf, layerParamsView, initializeParams); + Map paramTable = initializer().init(this, layerParamsView, initializeParams); ret.setParamTable(paramTable); - ret.setConf(conf); + ret.setLayerConfiguration(lconf); return ret; } @@ -125,7 +127,7 @@ public class ZeroPadding1DLayer extends NoParamLayer { @Getter @Setter - public static class Builder extends Layer.Builder { + public static class Builder extends LayerConfiguration.Builder { /** * Padding value for left and right. Must be length 2 array diff --git a/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/nn/conf/layers/ZeroPadding3DLayer.java b/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/nn/conf/layers/ZeroPadding3DLayer.java index f6b97cfcc..cdabe2788 100644 --- a/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/nn/conf/layers/ZeroPadding3DLayer.java +++ b/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/nn/conf/layers/ZeroPadding3DLayer.java @@ -53,13 +53,15 @@ public class ZeroPadding3DLayer extends NoParamLayer { public org.deeplearning4j.nn.api.Layer instantiate(NeuralNetConfiguration conf, Collection iterationListeners, int layerIndex, INDArray layerParamsView, boolean initializeParams, DataType networkDataType) { + LayerConfiguration lconf = conf.getFlattenedLayerConfigurations().get(layerIndex); + org.deeplearning4j.nn.layers.convolution.ZeroPadding3DLayer ret = - new org.deeplearning4j.nn.layers.convolution.ZeroPadding3DLayer(conf, networkDataType); - ret.setListeners(iterationListeners); + new org.deeplearning4j.nn.layers.convolution.ZeroPadding3DLayer(lconf, networkDataType); + ret.addTrainingListeners(iterationListeners); ret.setIndex(layerIndex); - Map paramTable = initializer().init(conf, layerParamsView, initializeParams); + Map paramTable = initializer().init(this, layerParamsView, initializeParams); ret.setParamTable(paramTable); - ret.setConf(conf); + ret.setLayerConfiguration(lconf); return ret; } @@ -114,7 +116,7 @@ public class ZeroPadding3DLayer extends NoParamLayer { @Getter @Setter - public static class Builder extends Layer.Builder { + public static class Builder extends LayerConfiguration.Builder { /** * [padLeftD, padRightD, padLeftH, padRightH, padLeftW, padRightW] diff --git a/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/nn/conf/layers/ZeroPaddingLayer.java b/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/nn/conf/layers/ZeroPaddingLayer.java index 459205609..4582f42c5 100644 --- a/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/nn/conf/layers/ZeroPaddingLayer.java +++ b/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/nn/conf/layers/ZeroPaddingLayer.java @@ -70,13 +70,15 @@ public class ZeroPaddingLayer extends NoParamLayer { public org.deeplearning4j.nn.api.Layer instantiate(NeuralNetConfiguration conf, Collection trainingListeners, int layerIndex, INDArray layerParamsView, boolean initializeParams, DataType networkDataType) { + LayerConfiguration lconf = conf.getFlattenedLayerConfigurations().get(layerIndex); + org.deeplearning4j.nn.layers.convolution.ZeroPaddingLayer ret = - new org.deeplearning4j.nn.layers.convolution.ZeroPaddingLayer(conf, networkDataType); - ret.setListeners(trainingListeners); + new org.deeplearning4j.nn.layers.convolution.ZeroPaddingLayer(lconf, networkDataType); + ret.addTrainingListeners(trainingListeners); ret.setIndex(layerIndex); - Map paramTable = initializer().init(conf, layerParamsView, initializeParams); + Map paramTable = initializer().init(this, layerParamsView, initializeParams); ret.setParamTable(paramTable); - ret.setConf(conf); + ret.setLayerConfiguration(lconf); return ret; } @@ -118,7 +120,7 @@ public class ZeroPaddingLayer extends NoParamLayer { @Getter @Setter - public static class Builder extends Layer.Builder { + public static class Builder extends LayerConfiguration.Builder { /** * Padding value for top, bottom, left, and right. Must be length 4 array diff --git a/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/nn/conf/layers/convolutional/Cropping1D.java b/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/nn/conf/layers/convolutional/Cropping1D.java index fd2546019..ef3cedabe 100644 --- a/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/nn/conf/layers/convolutional/Cropping1D.java +++ b/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/nn/conf/layers/convolutional/Cropping1D.java @@ -25,7 +25,7 @@ import org.deeplearning4j.nn.conf.InputPreProcessor; import org.deeplearning4j.nn.conf.NeuralNetConfiguration; import org.deeplearning4j.nn.conf.inputs.InputType; import org.deeplearning4j.nn.conf.layers.InputTypeUtil; -import org.deeplearning4j.nn.conf.layers.Layer; +import org.deeplearning4j.nn.conf.layers.LayerConfiguration; import org.deeplearning4j.nn.conf.layers.NoParamLayer; import org.deeplearning4j.nn.conf.memory.LayerMemoryReport; import org.deeplearning4j.nn.layers.convolution.Cropping1DLayer; @@ -76,12 +76,14 @@ public class Cropping1D extends NoParamLayer { public org.deeplearning4j.nn.api.Layer instantiate(NeuralNetConfiguration conf, Collection trainingListeners, int layerIndex, INDArray layerParamsView, boolean initializeParams, DataType networkDataType) { - Cropping1DLayer ret = new Cropping1DLayer(conf, networkDataType); - ret.setListeners(trainingListeners); + setNetConfiguration(conf); + LayerConfiguration lconf = conf.getFlattenedLayerConfigurations().get(layerIndex); + Cropping1DLayer ret = new Cropping1DLayer(lconf, networkDataType); + ret.addTrainingListeners(trainingListeners); ret.setIndex(layerIndex); - Map paramTable = initializer().init(conf, layerParamsView, initializeParams); + Map paramTable = initializer().init(this, layerParamsView, initializeParams); ret.setParamTable(paramTable); - ret.setConf(conf); + ret.setLayerConfiguration(lconf); return ret; } @@ -113,7 +115,7 @@ public class Cropping1D extends NoParamLayer { @Getter @Setter - public static class Builder extends Layer.Builder { + public static class Builder extends LayerConfiguration.Builder { /** * Cropping amount for top/bottom (in that order). Must be length 1 or 2 array. */ diff --git a/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/nn/conf/layers/convolutional/Cropping2D.java b/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/nn/conf/layers/convolutional/Cropping2D.java index 29aad71bd..d73d33950 100644 --- a/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/nn/conf/layers/convolutional/Cropping2D.java +++ b/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/nn/conf/layers/convolutional/Cropping2D.java @@ -26,7 +26,7 @@ import org.deeplearning4j.nn.conf.InputPreProcessor; import org.deeplearning4j.nn.conf.NeuralNetConfiguration; import org.deeplearning4j.nn.conf.inputs.InputType; import org.deeplearning4j.nn.conf.layers.InputTypeUtil; -import org.deeplearning4j.nn.conf.layers.Layer; +import org.deeplearning4j.nn.conf.layers.LayerConfiguration; import org.deeplearning4j.nn.conf.layers.NoParamLayer; import org.deeplearning4j.nn.conf.memory.LayerMemoryReport; import org.deeplearning4j.nn.layers.convolution.Cropping2DLayer; @@ -92,12 +92,14 @@ public class Cropping2D extends NoParamLayer { public org.deeplearning4j.nn.api.Layer instantiate(NeuralNetConfiguration conf, Collection trainingListeners, int layerIndex, INDArray layerParamsView, boolean initializeParams, DataType networkDataType) { - Cropping2DLayer ret = new Cropping2DLayer(conf, networkDataType); - ret.setListeners(trainingListeners); + setNetConfiguration(conf); + LayerConfiguration lconf = conf.getFlattenedLayerConfigurations().get(layerIndex); + Cropping2DLayer ret = new Cropping2DLayer(lconf, networkDataType); + ret.addTrainingListeners(trainingListeners); ret.setIndex(layerIndex); - Map paramTable = initializer().init(conf, layerParamsView, initializeParams); + Map paramTable = initializer().init(this, layerParamsView, initializeParams); ret.setParamTable(paramTable); - ret.setConf(conf); + ret.setLayerConfiguration(lconf); return ret; } @@ -131,7 +133,7 @@ public class Cropping2D extends NoParamLayer { @Getter @Setter - public static class Builder extends Layer.Builder { + public static class Builder extends LayerConfiguration.Builder { /** * Cropping amount for top/bottom/left/right (in that order). A length 4 array. diff --git a/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/nn/conf/layers/convolutional/Cropping3D.java b/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/nn/conf/layers/convolutional/Cropping3D.java index 1ab34b17b..a950ed633 100644 --- a/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/nn/conf/layers/convolutional/Cropping3D.java +++ b/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/nn/conf/layers/convolutional/Cropping3D.java @@ -25,7 +25,7 @@ import org.deeplearning4j.nn.conf.InputPreProcessor; import org.deeplearning4j.nn.conf.NeuralNetConfiguration; import org.deeplearning4j.nn.conf.inputs.InputType; import org.deeplearning4j.nn.conf.layers.InputTypeUtil; -import org.deeplearning4j.nn.conf.layers.Layer; +import org.deeplearning4j.nn.conf.layers.LayerConfiguration; import org.deeplearning4j.nn.conf.layers.NoParamLayer; import org.deeplearning4j.nn.conf.memory.LayerMemoryReport; import org.deeplearning4j.nn.layers.convolution.Cropping3DLayer; @@ -84,12 +84,14 @@ public class Cropping3D extends NoParamLayer { public org.deeplearning4j.nn.api.Layer instantiate(NeuralNetConfiguration conf, Collection iterationListeners, int layerIndex, INDArray layerParamsView, boolean initializeParams, DataType networkDataType) { - Cropping3DLayer ret = new Cropping3DLayer(conf, networkDataType); - ret.setListeners(iterationListeners); + setNetConfiguration(conf); + LayerConfiguration lconf = conf.getFlattenedLayerConfigurations().get(layerIndex); + Cropping3DLayer ret = new Cropping3DLayer(lconf, networkDataType); + ret.addTrainingListeners(iterationListeners); ret.setIndex(layerIndex); - Map paramTable = initializer().init(conf, layerParamsView, initializeParams); + Map paramTable = initializer().init(this, layerParamsView, initializeParams); ret.setParamTable(paramTable); - ret.setConf(conf); + ret.setLayerConfiguration(lconf); return ret; } @@ -121,7 +123,7 @@ public class Cropping3D extends NoParamLayer { @Getter @Setter - public static class Builder extends Layer.Builder { + public static class Builder extends LayerConfiguration.Builder { /** * Cropping amount, a length 6 array, i.e. crop left depth, crop right depth, crop left height, crop right height, crop left width, crop right width diff --git a/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/nn/conf/layers/misc/ElementWiseMultiplicationLayer.java b/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/nn/conf/layers/misc/ElementWiseMultiplicationLayer.java index 79ab2ca54..703d95cea 100644 --- a/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/nn/conf/layers/misc/ElementWiseMultiplicationLayer.java +++ b/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/nn/conf/layers/misc/ElementWiseMultiplicationLayer.java @@ -26,6 +26,7 @@ import org.deeplearning4j.nn.api.ParamInitializer; import org.deeplearning4j.nn.conf.NeuralNetConfiguration; import org.deeplearning4j.nn.conf.inputs.InputType; import org.deeplearning4j.nn.conf.layers.FeedForwardLayer; +import org.deeplearning4j.nn.conf.layers.LayerConfiguration; import org.deeplearning4j.nn.conf.memory.LayerMemoryReport; import org.deeplearning4j.nn.conf.memory.MemoryReport; import org.deeplearning4j.nn.params.ElementWiseParamInitializer; @@ -58,18 +59,21 @@ public class ElementWiseMultiplicationLayer extends org.deeplearning4j.nn.conf.l @Override public Layer instantiate(NeuralNetConfiguration conf, Collection trainingListeners, int layerIndex, INDArray layerParamsView, boolean initializeParams, DataType networkDataType) { + setNetConfiguration(conf); if (this.nIn != this.nOut) { throw new IllegalStateException("Element wise layer must have the same input and output size. Got nIn=" + nIn + ", nOut=" + nOut); } + LayerConfiguration lconf = conf.getFlattenedLayerConfigurations().get(layerIndex); org.deeplearning4j.nn.layers.feedforward.elementwise.ElementWiseMultiplicationLayer ret = - new org.deeplearning4j.nn.layers.feedforward.elementwise.ElementWiseMultiplicationLayer(conf, networkDataType); - ret.setListeners(trainingListeners); + new org.deeplearning4j.nn.layers.feedforward.elementwise.ElementWiseMultiplicationLayer(lconf, networkDataType); + + ret.addTrainingListeners(trainingListeners); ret.setIndex(layerIndex); ret.setParamsViewArray(layerParamsView); - Map paramTable = initializer().init(conf, layerParamsView, initializeParams); + Map paramTable = initializer().init(this, layerParamsView, initializeParams); ret.setParamTable(paramTable); - ret.setConf(conf); + ret.setLayerConfiguration(lconf); return ret; } @@ -113,7 +117,7 @@ public class ElementWiseMultiplicationLayer extends org.deeplearning4j.nn.conf.l return new LayerMemoryReport.Builder(layerName, ElementWiseMultiplicationLayer.class, inputType, outputType) .standardMemory(numParams, updaterStateSize) .workingMemory(0, 0, trainSizeFixed, trainSizeVariable) //No additional memory (beyond activations) for inference - .cacheMemory(MemoryReport.CACHE_MODE_ALL_ZEROS, MemoryReport.CACHE_MODE_ALL_ZEROS) //No caching in DenseLayer + .cacheMemory(MemoryReport.CACHE_MODE_ALL_ZEROS, MemoryReport.CACHE_MODE_ALL_ZEROS) //No caching in DenseLayerConfiguration .build(); } diff --git a/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/nn/conf/layers/misc/FrozenLayer.java b/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/nn/conf/layers/misc/FrozenLayer.java index ba5674bbb..eb15350dc 100644 --- a/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/nn/conf/layers/misc/FrozenLayer.java +++ b/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/nn/conf/layers/misc/FrozenLayer.java @@ -29,7 +29,7 @@ import org.deeplearning4j.nn.conf.GradientNormalization; import org.deeplearning4j.nn.conf.InputPreProcessor; import org.deeplearning4j.nn.conf.NeuralNetConfiguration; import org.deeplearning4j.nn.conf.inputs.InputType; -import org.deeplearning4j.nn.conf.layers.Layer; +import org.deeplearning4j.nn.conf.layers.LayerConfiguration; import org.deeplearning4j.nn.conf.memory.LayerMemoryReport; import org.deeplearning4j.nn.params.FrozenLayerParamInitializer; import org.deeplearning4j.optimize.api.TrainingListener; @@ -38,36 +38,32 @@ import org.nd4j.linalg.api.ndarray.INDArray; import org.nd4j.linalg.learning.config.IUpdater; import org.nd4j.linalg.learning.regularization.Regularization; import com.fasterxml.jackson.annotation.JsonProperty; -import com.fasterxml.jackson.databind.annotation.JsonDeserialize; import java.util.Collection; import java.util.List; @EqualsAndHashCode(callSuper = false) -public class FrozenLayer extends Layer { +public class FrozenLayer extends LayerConfiguration { - @Getter - protected Layer layer; + /** + * A layer configuration, only if this layer config has been created from another one + */ + @Getter @Setter + private LayerConfiguration innerConfiguration; private FrozenLayer(Builder builder) { super(builder); - this.layer = builder.layer; + this.innerConfiguration = builder.layer; } - public FrozenLayer(@JsonProperty("layer") Layer layer) { - this.layer = layer; - } - - public NeuralNetConfiguration getInnerConf(NeuralNetConfiguration conf) { - NeuralNetConfiguration nnc = conf.clone(); - nnc.setLayer(layer); - return nnc; + public FrozenLayer(@JsonProperty("layer") LayerConfiguration layer) { + this.innerConfiguration = layer; } @Override - public Layer clone() { + public LayerConfiguration clone() { FrozenLayer l = (FrozenLayer) super.clone(); - l.layer = layer.clone(); + l.innerConfiguration = innerConfiguration.clone(); return l; } @@ -77,17 +73,17 @@ public class FrozenLayer extends Layer { boolean initializeParams, DataType networkDataType) { //Need to be able to instantiate a layer, from a config - for JSON -> net type situations - org.deeplearning4j.nn.api.Layer underlying = layer.instantiate(getInnerConf(conf), trainingListeners, + org.deeplearning4j.nn.api.Layer underlying = innerConfiguration.instantiate(getNetConfiguration(), trainingListeners, layerIndex, layerParamsView, initializeParams, networkDataType); - NeuralNetConfiguration nncUnderlying = underlying.conf(); - if (nncUnderlying.variables() != null) { - List vars = nncUnderlying.variables(true); - nncUnderlying.clearVariables(); - conf.clearVariables(); + NeuralNetConfiguration nncUnderlying = underlying.getNetConfiguration(); + if (nncUnderlying.netWideVariables() != null) { + List vars = nncUnderlying.netWideVariables(true); + nncUnderlying.clearNetWideVariable(); + conf.clearNetWideVariable(); for (String s : vars) { - conf.variables(false).add(s); - nncUnderlying.variables(false).add(s); + conf.netWideVariables(false).add(s); + nncUnderlying.netWideVariables(false).add(s); } } @@ -101,17 +97,17 @@ public class FrozenLayer extends Layer { @Override public InputType getOutputType(int layerIndex, InputType inputType) { - return layer.getOutputType(layerIndex, inputType); + return innerConfiguration.getOutputType(layerIndex, inputType); } @Override public void setNIn(InputType inputType, boolean override) { - layer.setNIn(inputType, override); + innerConfiguration.setNIn(inputType, override); } @Override public InputPreProcessor getPreProcessorForInputType(InputType inputType) { - return layer.getPreProcessorForInputType(inputType); + return innerConfiguration.getPreProcessorForInputType(inputType); } @Override @@ -129,40 +125,30 @@ public class FrozenLayer extends Layer { return null; } - @Override - public GradientNormalization getGradientNormalization() { - return layer.getGradientNormalization(); - } - - @Override - public double getGradientNormalizationThreshold() { - return layer.getGradientNormalizationThreshold(); - } - @Override public LayerMemoryReport getMemoryReport(InputType inputType) { - return layer.getMemoryReport(inputType); + return innerConfiguration.getMemoryReport(inputType); } @Override public void setLayerName(String layerName) { super.setLayerName(layerName); - layer.setLayerName(layerName); + innerConfiguration.setLayerName(layerName); } @Override public void setConstraints(List constraints) { this.constraints = constraints; - this.layer.setConstraints(constraints); + this.innerConfiguration.setConstraints(constraints); } @Getter @Setter - public static class Builder extends Layer.Builder { + public static class Builder extends LayerConfiguration.Builder { - private Layer layer; + private LayerConfiguration layer; - public Builder layer(Layer layer) { + public Builder layer(LayerConfiguration layer) { this.setLayer(layer); return this; } diff --git a/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/nn/conf/layers/misc/FrozenLayerWithBackprop.java b/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/nn/conf/layers/misc/FrozenLayerWithBackprop.java index 53d7ff914..6abf467d3 100644 --- a/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/nn/conf/layers/misc/FrozenLayerWithBackprop.java +++ b/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/nn/conf/layers/misc/FrozenLayerWithBackprop.java @@ -25,8 +25,8 @@ import lombok.EqualsAndHashCode; import org.deeplearning4j.nn.api.ParamInitializer; import org.deeplearning4j.nn.api.layers.LayerConstraint; import org.deeplearning4j.nn.conf.NeuralNetConfiguration; -import org.deeplearning4j.nn.conf.layers.Layer; -import org.deeplearning4j.nn.conf.layers.wrapper.BaseWrapperLayer; +import org.deeplearning4j.nn.conf.layers.LayerConfiguration; +import org.deeplearning4j.nn.conf.layers.wrapper.BaseWrapperLayerConfiguration; import org.deeplearning4j.nn.params.FrozenLayerWithBackpropParamInitializer; import org.deeplearning4j.optimize.api.TrainingListener; import org.nd4j.linalg.api.buffer.DataType; @@ -40,20 +40,25 @@ import java.util.List; @Data @EqualsAndHashCode(callSuper = false) -public class FrozenLayerWithBackprop extends BaseWrapperLayer { +public class FrozenLayerWithBackprop extends BaseWrapperLayerConfiguration { - public FrozenLayerWithBackprop(@JsonProperty("layer") Layer layer) { + /** + * Create a new Frozen Layer, that wraps another layer with backpropagation enabled. + * + * @param layer configuration of the layer to wrap + */ + public FrozenLayerWithBackprop(@JsonProperty("layer") LayerConfiguration layer) { super(layer); } public NeuralNetConfiguration getInnerConf(NeuralNetConfiguration conf) { NeuralNetConfiguration nnc = conf.clone(); - nnc.setLayer(underlying); + nnc.getLayerConfigurations().add(0, underlying); return nnc; } @Override - public Layer clone() { + public LayerConfiguration clone() { FrozenLayerWithBackprop l = (FrozenLayerWithBackprop) super.clone(); l.underlying = underlying.clone(); return l; @@ -65,22 +70,23 @@ public class FrozenLayerWithBackprop extends BaseWrapperLayer { boolean initializeParams, DataType networkDataType) { //Need to be able to instantiate a layer, from a config - for JSON -> net type situations - org.deeplearning4j.nn.api.Layer underlying = getUnderlying().instantiate(getInnerConf(conf), trainingListeners, + org.deeplearning4j.nn.api.Layer newUnderlyingLayer = underlying.instantiate(conf, trainingListeners, layerIndex, layerParamsView, initializeParams, networkDataType); - NeuralNetConfiguration nncUnderlying = underlying.conf(); + newUnderlyingLayer.setLayerConfiguration(underlying); //Fix a problem, where the embedded layer gets the conf of the frozen layer, rather than its own + NeuralNetConfiguration nncUnderlying = underlying.getNetConfiguration(); - if (nncUnderlying.variables() != null) { - List vars = nncUnderlying.variables(true); - nncUnderlying.clearVariables(); - conf.clearVariables(); + if (nncUnderlying.netWideVariables() != null) { + List vars = nncUnderlying.netWideVariables(true); + nncUnderlying.clearNetWideVariable(); + conf.clearNetWideVariable(); for (String s : vars) { - conf.variables(false).add(s); - nncUnderlying.variables(false).add(s); + conf.netWideVariables(false).add(s); + nncUnderlying.netWideVariables(false).add(s); } } - return new org.deeplearning4j.nn.layers.FrozenLayerWithBackprop(underlying); + return new org.deeplearning4j.nn.layers.FrozenLayerWithBackprop(newUnderlyingLayer); } @Override diff --git a/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/nn/conf/layers/misc/RepeatVector.java b/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/nn/conf/layers/misc/RepeatVector.java index 127502b68..541c26914 100644 --- a/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/nn/conf/layers/misc/RepeatVector.java +++ b/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/nn/conf/layers/misc/RepeatVector.java @@ -26,6 +26,7 @@ import org.deeplearning4j.nn.conf.NeuralNetConfiguration; import org.deeplearning4j.nn.conf.RNNFormat; import org.deeplearning4j.nn.conf.inputs.InputType; import org.deeplearning4j.nn.conf.layers.FeedForwardLayer; +import org.deeplearning4j.nn.conf.layers.LayerConfiguration; import org.deeplearning4j.nn.conf.memory.LayerMemoryReport; import org.deeplearning4j.nn.conf.memory.MemoryReport; import org.deeplearning4j.nn.params.EmptyParamInitializer; @@ -65,13 +66,15 @@ public class RepeatVector extends FeedForwardLayer { public org.deeplearning4j.nn.api.Layer instantiate(NeuralNetConfiguration conf, Collection trainingListeners, int layerIndex, INDArray layerParamsView, boolean initializeParams, DataType networkDataType) { - org.deeplearning4j.nn.layers.RepeatVector ret = new org.deeplearning4j.nn.layers.RepeatVector(conf, networkDataType); - ret.setListeners(trainingListeners); + LayerConfiguration lconf = conf.getFlattenedLayerConfigurations().get(layerIndex); + + org.deeplearning4j.nn.layers.RepeatVector ret = new org.deeplearning4j.nn.layers.RepeatVector(lconf, networkDataType); + ret.addTrainingListeners(trainingListeners); ret.setIndex(layerIndex); ret.setParamsViewArray(layerParamsView); - Map paramTable = initializer().init(conf, layerParamsView, initializeParams); + Map paramTable = initializer().init(this, layerParamsView, initializeParams); ret.setParamTable(paramTable); - ret.setConf(conf); + ret.setLayerConfiguration(lconf); return ret; } diff --git a/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/nn/conf/layers/objdetect/Yolo2OutputLayer.java b/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/nn/conf/layers/objdetect/Yolo2OutputLayer.java index 1229e8cfd..70bd048e6 100644 --- a/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/nn/conf/layers/objdetect/Yolo2OutputLayer.java +++ b/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/nn/conf/layers/objdetect/Yolo2OutputLayer.java @@ -27,10 +27,10 @@ import lombok.Setter; import org.deeplearning4j.nn.api.Layer; import org.deeplearning4j.nn.api.ParamInitializer; import org.deeplearning4j.nn.conf.CNN2DFormat; -import org.deeplearning4j.nn.conf.GradientNormalization; import org.deeplearning4j.nn.conf.InputPreProcessor; import org.deeplearning4j.nn.conf.NeuralNetConfiguration; import org.deeplearning4j.nn.conf.inputs.InputType; +import org.deeplearning4j.nn.conf.layers.LayerConfiguration; import org.deeplearning4j.nn.conf.memory.LayerMemoryReport; import org.deeplearning4j.nn.conf.preprocessor.FeedForwardToCnnPreProcessor; import org.deeplearning4j.nn.params.EmptyParamInitializer; @@ -51,7 +51,7 @@ import java.util.Map; @Data @EqualsAndHashCode(callSuper = false) -public class Yolo2OutputLayer extends org.deeplearning4j.nn.conf.layers.Layer { +public class Yolo2OutputLayer extends LayerConfiguration { private double lambdaCoord; private double lambdaNoObj; @@ -79,14 +79,16 @@ public class Yolo2OutputLayer extends org.deeplearning4j.nn.conf.layers.Layer { @Override public Layer instantiate(NeuralNetConfiguration conf, Collection trainingListeners, int layerIndex, INDArray layerParamsView, boolean initializeParams, DataType networkDataType) { + LayerConfiguration lconf = conf.getFlattenedLayerConfigurations().get(layerIndex); + org.deeplearning4j.nn.layers.objdetect.Yolo2OutputLayer ret = - new org.deeplearning4j.nn.layers.objdetect.Yolo2OutputLayer(conf, networkDataType); - ret.setListeners(trainingListeners); + new org.deeplearning4j.nn.layers.objdetect.Yolo2OutputLayer(lconf, networkDataType); + ret.addTrainingListeners(trainingListeners); ret.setIndex(layerIndex); ret.setParamsViewArray(layerParamsView); - Map paramTable = initializer().init(conf, layerParamsView, initializeParams); + Map paramTable = initializer().init(this, layerParamsView, initializeParams); ret.setParamTable(paramTable); - ret.setConf(conf); + ret.setLayerConfiguration(lconf); return ret; } @@ -132,17 +134,6 @@ public class Yolo2OutputLayer extends org.deeplearning4j.nn.conf.layers.Layer { public boolean isPretrainParam(String paramName) { return false; //No params } - - @Override - public GradientNormalization getGradientNormalization() { - return GradientNormalization.None; - } - - @Override - public double getGradientNormalizationThreshold() { - return 1.0; - } - @Override public LayerMemoryReport getMemoryReport(InputType inputType) { long numValues = inputType.arrayElementsPerExample(); @@ -156,7 +147,7 @@ public class Yolo2OutputLayer extends org.deeplearning4j.nn.conf.layers.Layer { @Getter @Setter - public static class Builder extends org.deeplearning4j.nn.conf.layers.Layer.Builder { + public static class Builder extends LayerConfiguration.Builder { /** * Loss function coefficient for position and size/scale components of the loss function. Default (as per diff --git a/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/nn/conf/layers/recurrent/Bidirectional.java b/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/nn/conf/layers/recurrent/Bidirectional.java index 388e131cd..573492f3a 100644 --- a/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/nn/conf/layers/recurrent/Bidirectional.java +++ b/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/nn/conf/layers/recurrent/Bidirectional.java @@ -29,8 +29,8 @@ import org.deeplearning4j.nn.conf.RNNFormat; import org.deeplearning4j.nn.conf.inputs.InputType; import org.deeplearning4j.nn.conf.layers.BaseRecurrentLayer; import org.deeplearning4j.nn.conf.layers.FeedForwardLayer; -import org.deeplearning4j.nn.conf.layers.Layer; -import org.deeplearning4j.nn.conf.layers.wrapper.BaseWrapperLayer; +import org.deeplearning4j.nn.conf.layers.LayerConfiguration; +import org.deeplearning4j.nn.conf.layers.wrapper.BaseWrapperLayerConfiguration; import org.deeplearning4j.nn.conf.memory.LayerMemoryReport; import org.deeplearning4j.nn.layers.recurrent.BidirectionalLayer; import org.deeplearning4j.nn.params.BidirectionalParamInitializer; @@ -47,13 +47,12 @@ import java.util.List; import java.util.Map; import static org.nd4j.linalg.indexing.NDArrayIndex.interval; -import static org.nd4j.linalg.indexing.NDArrayIndex.point; @NoArgsConstructor @Data @EqualsAndHashCode(callSuper = true, exclude = {"initializer"}) @JsonIgnoreProperties({"initializer"}) -public class Bidirectional extends Layer { +public class Bidirectional extends LayerConfiguration { /** * This Mode enumeration defines how the activations for the forward and backward networks should be combined.
@@ -68,8 +67,8 @@ public class Bidirectional extends Layer { ADD, MUL, AVERAGE, CONCAT } - private Layer fwd; - private Layer bwd; + private LayerConfiguration fwd; + private LayerConfiguration bwd; private Mode mode; private transient BidirectionalParamInitializer initializer; @@ -82,7 +81,7 @@ public class Bidirectional extends Layer { * * @param layer layer to wrap */ - public Bidirectional(@NonNull Layer layer) { + public Bidirectional(@NonNull LayerConfiguration layer) { this(Mode.CONCAT, layer); } @@ -92,9 +91,9 @@ public class Bidirectional extends Layer { * @param mode Mode to use to combine activations. See {@link Mode} for details * @param layer layer to wrap */ - public Bidirectional(@NonNull Mode mode, @NonNull Layer layer) { + public Bidirectional(@NonNull Mode mode, @NonNull LayerConfiguration layer) { if (!(layer instanceof BaseRecurrentLayer || layer instanceof LastTimeStep - || layer instanceof BaseWrapperLayer)) { + || layer instanceof BaseWrapperLayerConfiguration)) { throw new IllegalArgumentException("Cannot wrap a non-recurrent layer: " + "config must extend BaseRecurrentLayer or LastTimeStep " + "Got class: " + layer.getClass()); @@ -128,6 +127,7 @@ public class Bidirectional extends Layer { public org.deeplearning4j.nn.api.Layer instantiate(NeuralNetConfiguration conf, Collection trainingListeners, int layerIndex, INDArray layerParamsView, boolean initializeParams, DataType networkDataType) { + LayerConfiguration lconf = conf.getFlattenedLayerConfigurations().get(layerIndex); NeuralNetConfiguration c1 = conf.clone(); NeuralNetConfiguration c2 = conf.clone(); c1.setLayer(fwd); @@ -140,10 +140,10 @@ public class Bidirectional extends Layer { org.deeplearning4j.nn.api.Layer b = bwd.instantiate(c2, trainingListeners, layerIndex, bp, initializeParams, networkDataType); - BidirectionalLayer ret = new BidirectionalLayer(conf, f, b, layerParamsView); - Map paramTable = initializer().init(conf, layerParamsView, initializeParams); + BidirectionalLayer ret = new BidirectionalLayer(lconf, f, b, layerParamsView); + Map paramTable = initializer().init(this, layerParamsView, initializeParams); ret.setParamTable(paramTable); - ret.setConf(conf); + ret.setLayerConfiguration(lconf); return ret; } @@ -211,16 +211,6 @@ public class Bidirectional extends Layer { return fwd.getUpdaterByParam(sub); } - @Override - public GradientNormalization getGradientNormalization() { - return fwd.getGradientNormalization(); - } - - @Override - public double getGradientNormalizationThreshold() { - return fwd.getGradientNormalizationThreshold(); - } - @Override public void setLayerName(String layerName) { this.layerName = layerName; @@ -238,12 +228,12 @@ public class Bidirectional extends Layer { @AllArgsConstructor @Getter @Setter - public static class Builder extends Layer.Builder { + public static class Builder extends LayerConfiguration.Builder { private Mode mode; - private Layer layer; + private LayerConfiguration layer; - public void setLayer(Layer layer) { + public void setLayer(LayerConfiguration layer) { rnnLayer(layer); } @@ -252,9 +242,9 @@ public class Bidirectional extends Layer { return this; } - public Builder rnnLayer(Layer layer) { + public Builder rnnLayer(LayerConfiguration layer) { if (!(layer instanceof BaseRecurrentLayer || layer instanceof LastTimeStep - || layer instanceof BaseWrapperLayer)) { + || layer instanceof BaseWrapperLayerConfiguration)) { throw new IllegalArgumentException("Cannot wrap a non-recurrent layer: " + "config must extend BaseRecurrentLayer or LastTimeStep " + "Got class: " + layer.getClass()); diff --git a/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/nn/conf/layers/recurrent/LastTimeStep.java b/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/nn/conf/layers/recurrent/LastTimeStep.java index ce87b8051..a5dff218f 100644 --- a/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/nn/conf/layers/recurrent/LastTimeStep.java +++ b/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/nn/conf/layers/recurrent/LastTimeStep.java @@ -22,8 +22,8 @@ package org.deeplearning4j.nn.conf.layers.recurrent; import org.deeplearning4j.nn.conf.NeuralNetConfiguration; import org.deeplearning4j.nn.conf.inputs.InputType; -import org.deeplearning4j.nn.conf.layers.Layer; -import org.deeplearning4j.nn.conf.layers.wrapper.BaseWrapperLayer; +import org.deeplearning4j.nn.conf.layers.LayerConfiguration; +import org.deeplearning4j.nn.conf.layers.wrapper.BaseWrapperLayerConfiguration; import org.deeplearning4j.nn.layers.recurrent.LastTimeStepLayer; import org.deeplearning4j.optimize.api.TrainingListener; import org.nd4j.linalg.api.buffer.DataType; @@ -31,16 +31,16 @@ import org.nd4j.linalg.api.ndarray.INDArray; import java.util.Collection; -public class LastTimeStep extends BaseWrapperLayer { +public class LastTimeStep extends BaseWrapperLayerConfiguration { private LastTimeStep() {} - public LastTimeStep(Layer underlying) { + public LastTimeStep(LayerConfiguration underlying) { super(underlying); this.layerName = underlying.getLayerName(); // needed for keras import to match names } - public Layer getUnderlying() { + public LayerConfiguration getUnderlying() { return underlying; } @@ -49,8 +49,9 @@ public class LastTimeStep extends BaseWrapperLayer { public org.deeplearning4j.nn.api.Layer instantiate(NeuralNetConfiguration conf, Collection trainingListeners, int layerIndex, INDArray layerParamsView, boolean initializeParams, DataType networkDataType) { + LayerConfiguration lconf = conf.getFlattenedLayerConfigurations().get(layerIndex); NeuralNetConfiguration conf2 = conf.clone(); - conf2.setLayer(((LastTimeStep) conf2.getLayer()).getUnderlying()); + conf2.setLayer(((LastTimeStep) lconf).getUnderlying()); return new LastTimeStepLayer(underlying.instantiate(conf2, trainingListeners, layerIndex, layerParamsView, initializeParams, networkDataType)); } diff --git a/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/nn/conf/layers/recurrent/SimpleRnn.java b/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/nn/conf/layers/recurrent/SimpleRnn.java index 7cbebeaf2..1d4c182aa 100644 --- a/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/nn/conf/layers/recurrent/SimpleRnn.java +++ b/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/nn/conf/layers/recurrent/SimpleRnn.java @@ -26,6 +26,7 @@ import org.deeplearning4j.nn.api.ParamInitializer; import org.deeplearning4j.nn.conf.NeuralNetConfiguration; import org.deeplearning4j.nn.conf.inputs.InputType; import org.deeplearning4j.nn.conf.layers.BaseRecurrentLayer; +import org.deeplearning4j.nn.conf.layers.LayerConfiguration; import org.deeplearning4j.nn.conf.layers.LayerValidation; import org.deeplearning4j.nn.conf.memory.LayerMemoryReport; import org.deeplearning4j.nn.params.SimpleRnnParamInitializer; @@ -55,15 +56,16 @@ public class SimpleRnn extends BaseRecurrentLayer { public Layer instantiate(NeuralNetConfiguration conf, Collection trainingListeners, int layerIndex, INDArray layerParamsView, boolean initializeParams, DataType networkDataType) { LayerValidation.assertNInNOutSet("SimpleRnn", getLayerName(), layerIndex, getNIn(), getNOut()); + LayerConfiguration lconf = conf.getFlattenedLayerConfigurations().get(layerIndex); org.deeplearning4j.nn.layers.recurrent.SimpleRnn ret = - new org.deeplearning4j.nn.layers.recurrent.SimpleRnn(conf, networkDataType); - ret.setListeners(trainingListeners); + new org.deeplearning4j.nn.layers.recurrent.SimpleRnn(lconf, networkDataType); + ret.addTrainingListeners(trainingListeners); ret.setIndex(layerIndex); ret.setParamsViewArray(layerParamsView); - Map paramTable = initializer().init(conf, layerParamsView, initializeParams); + Map paramTable = initializer().init(this, layerParamsView, initializeParams); ret.setParamTable(paramTable); - ret.setConf(conf); + ret.setLayerConfiguration(lconf); return ret; } diff --git a/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/nn/conf/layers/recurrent/TimeDistributed.java b/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/nn/conf/layers/recurrent/TimeDistributed.java index d6004f6bb..73cddbf14 100644 --- a/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/nn/conf/layers/recurrent/TimeDistributed.java +++ b/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/nn/conf/layers/recurrent/TimeDistributed.java @@ -27,8 +27,8 @@ import org.deeplearning4j.nn.conf.InputPreProcessor; import org.deeplearning4j.nn.conf.NeuralNetConfiguration; import org.deeplearning4j.nn.conf.RNNFormat; import org.deeplearning4j.nn.conf.inputs.InputType; -import org.deeplearning4j.nn.conf.layers.Layer; -import org.deeplearning4j.nn.conf.layers.wrapper.BaseWrapperLayer; +import org.deeplearning4j.nn.conf.layers.LayerConfiguration; +import org.deeplearning4j.nn.conf.layers.wrapper.BaseWrapperLayerConfiguration; import org.deeplearning4j.nn.layers.recurrent.TimeDistributedLayer; import org.deeplearning4j.optimize.api.TrainingListener; import org.nd4j.linalg.api.buffer.DataType; @@ -39,27 +39,29 @@ import java.util.Collection; @Data @EqualsAndHashCode(callSuper = true) -public class TimeDistributed extends BaseWrapperLayer { +public class TimeDistributed extends BaseWrapperLayerConfiguration { private RNNFormat rnnDataFormat = RNNFormat.NCW; /** - * @param underlying Underlying (internal) layer - should be a feed forward type such as DenseLayer + * @param underlying Underlying (internal) layer - should be a feed forward type such as DenseLayerConfiguration */ - public TimeDistributed(@JsonProperty("underlying") @NonNull Layer underlying, @JsonProperty("rnnDataFormat") RNNFormat rnnDataFormat) { + public TimeDistributed(@JsonProperty("underlying") @NonNull LayerConfiguration underlying, @JsonProperty("rnnDataFormat") RNNFormat rnnDataFormat) { super(underlying); this.rnnDataFormat = rnnDataFormat; } - public TimeDistributed(Layer underlying){ + public TimeDistributed(LayerConfiguration underlying){ super(underlying); } @Override public org.deeplearning4j.nn.api.Layer instantiate(NeuralNetConfiguration conf, Collection trainingListeners, int layerIndex, INDArray layerParamsView, boolean initializeParams, DataType networkDataType) { + LayerConfiguration lconf = conf.getFlattenedLayerConfigurations().get(layerIndex); + NeuralNetConfiguration conf2 = conf.clone(); - conf2.setLayer(((TimeDistributed) conf2.getLayer()).getUnderlying()); + conf2.setLayer(((TimeDistributed) lconf).getUnderlying()); return new TimeDistributedLayer(underlying.instantiate(conf2, trainingListeners, layerIndex, layerParamsView, initializeParams, networkDataType), rnnDataFormat); } diff --git a/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/nn/conf/layers/samediff/AbstractSameDiffLayer.java b/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/nn/conf/layers/samediff/AbstractSameDiffLayer.java index 71bb2a95a..0d05a9486 100644 --- a/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/nn/conf/layers/samediff/AbstractSameDiffLayer.java +++ b/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/nn/conf/layers/samediff/AbstractSameDiffLayer.java @@ -30,7 +30,7 @@ import org.deeplearning4j.nn.conf.GradientNormalization; import org.deeplearning4j.nn.conf.InputPreProcessor; import org.deeplearning4j.nn.conf.NeuralNetConfiguration; import org.deeplearning4j.nn.conf.inputs.InputType; -import org.deeplearning4j.nn.conf.layers.Layer; +import org.deeplearning4j.nn.conf.layers.LayerConfiguration; import org.deeplearning4j.nn.conf.memory.LayerMemoryReport; import org.deeplearning4j.nn.params.SameDiffParamInitializer; import org.deeplearning4j.nn.weights.WeightInit; @@ -54,7 +54,7 @@ import java.util.Map; @Slf4j @Data @EqualsAndHashCode(callSuper = true, doNotUseGetters = true) -public abstract class AbstractSameDiffLayer extends Layer { +public abstract class AbstractSameDiffLayer extends LayerConfiguration { protected List regularization; protected List regularizationBias; @@ -121,7 +121,7 @@ public abstract class AbstractSameDiffLayer extends Layer { } - public void applyGlobalConfigToLayer(NeuralNetConfiguration.Builder globalConfig) { + public void applyGlobalConfigToLayer(NeuralNetConfiguration.NeuralNetConfigurationBuilder globalConfig) { //Default implementation: no op } @@ -187,24 +187,25 @@ public abstract class AbstractSameDiffLayer extends Layer { WeightInitUtil.initWeights(fanIn, fanOut, array.shape(), weightInit, null, paramReshapeOrder(null), array); } - public void applyGlobalConfig(NeuralNetConfiguration.Builder b) { + public void applyGlobalConfig(NeuralNetConfiguration.NeuralNetConfigurationBuilder b) { + NeuralNetConfiguration bConf = b.build(); if (regularization == null || regularization.isEmpty()) { - regularization = b.getRegularization(); + regularization = bConf.getRegularization(); } if (regularizationBias == null || regularizationBias.isEmpty()) { - regularizationBias = b.getRegularizationBias(); + regularizationBias = bConf.getRegularizationBias(); } if (updater == null) { - updater = b.getIUpdater(); + updater = bConf.getUpdater(); } if (biasUpdater == null) { - biasUpdater = b.getBiasUpdater(); + biasUpdater = bConf.getBiasUpdater(); } if (gradientNormalization == null) { - gradientNormalization = b.getGradientNormalization(); + gradientNormalization = bConf.getGradientNormalization(); } if (Double.isNaN(gradientNormalizationThreshold)) { - gradientNormalizationThreshold = b.getGradientNormalizationThreshold(); + gradientNormalizationThreshold = bConf.getGradientNormalizationThreshold(); } applyGlobalConfigToLayer(b); @@ -234,7 +235,7 @@ public abstract class AbstractSameDiffLayer extends Layer { @Getter @Setter - public static abstract class Builder> extends Layer.Builder { + public static abstract class Builder> extends LayerConfiguration.Builder { protected List regularization = new ArrayList<>(); protected List regularizationBias = new ArrayList<>(); diff --git a/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/nn/conf/layers/samediff/SameDiffLambdaLayer.java b/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/nn/conf/layers/samediff/SameDiffLambdaLayer.java index 51cdb3b6f..0b68bf649 100644 --- a/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/nn/conf/layers/samediff/SameDiffLambdaLayer.java +++ b/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/nn/conf/layers/samediff/SameDiffLambdaLayer.java @@ -33,7 +33,7 @@ public abstract class SameDiffLambdaLayer extends SameDiffLayer { * The defineLayer method is used to define the forward pass for the layer * * @param sameDiff SameDiff instance to use to define the vertex - * @param layerInput Layer input variable + * @param layerInput ILayer input variable * @return The output variable (corresponding to the output activations for the layer) */ public abstract SDVariable defineLayer(SameDiff sameDiff, SDVariable layerInput); diff --git a/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/nn/conf/layers/samediff/SameDiffLambdaVertex.java b/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/nn/conf/layers/samediff/SameDiffLambdaVertex.java index d3c10ec2f..7ec4fb2d5 100644 --- a/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/nn/conf/layers/samediff/SameDiffLambdaVertex.java +++ b/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/nn/conf/layers/samediff/SameDiffLambdaVertex.java @@ -37,7 +37,7 @@ public abstract class SameDiffLambdaVertex extends SameDiffVertex { * The defineVertex method is used to define the foward pass for the vertex * * @param sameDiff SameDiff instance to use to define the vertex - * @param inputs Layer input variable + * @param inputs ILayer input variable * @return The output variable (orresponding to the output activations for the vertex) */ public abstract SDVariable defineVertex(SameDiff sameDiff, VertexInputs inputs); diff --git a/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/nn/conf/layers/samediff/SameDiffLayer.java b/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/nn/conf/layers/samediff/SameDiffLayer.java index ea8fc2b09..cb16d2f26 100644 --- a/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/nn/conf/layers/samediff/SameDiffLayer.java +++ b/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/nn/conf/layers/samediff/SameDiffLayer.java @@ -24,6 +24,7 @@ import lombok.*; import org.deeplearning4j.nn.api.Layer; import org.deeplearning4j.nn.api.MaskState; import org.deeplearning4j.nn.conf.NeuralNetConfiguration; +import org.deeplearning4j.nn.conf.layers.LayerConfiguration; import org.deeplearning4j.nn.weights.IWeightInit; import org.deeplearning4j.nn.weights.WeightInit; import org.deeplearning4j.optimize.api.TrainingListener; @@ -85,16 +86,19 @@ public abstract class SameDiffLayer extends AbstractSameDiffLayer { public org.deeplearning4j.nn.api.Layer instantiate(NeuralNetConfiguration conf, Collection trainingListeners, int layerIndex, INDArray layerParamsView, boolean initializeParams, DataType networkDataType) { + LayerConfiguration lconf = conf.getFlattenedLayerConfigurations().get(layerIndex); + org.deeplearning4j.nn.layers.samediff.SameDiffLayer ret = - new org.deeplearning4j.nn.layers.samediff.SameDiffLayer(conf, networkDataType); + new org.deeplearning4j.nn.layers.samediff.SameDiffLayer(lconf, networkDataType); ret.setIndex(layerIndex); ret.setParamsViewArray(layerParamsView); - Map paramTable = initializer().init(conf, layerParamsView, initializeParams); + Map paramTable = initializer().init(this, layerParamsView, initializeParams); ret.setParamTable(paramTable); - ret.setConf(conf); + ret.setLayerConfiguration(lconf); return ret; } + @SuppressWarnings("unchecked") @Getter @Setter diff --git a/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/nn/conf/layers/samediff/SameDiffOutputLayer.java b/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/nn/conf/layers/samediff/SameDiffOutputLayer.java index d781dd244..8fa7fd4d0 100644 --- a/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/nn/conf/layers/samediff/SameDiffOutputLayer.java +++ b/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/nn/conf/layers/samediff/SameDiffOutputLayer.java @@ -21,6 +21,7 @@ package org.deeplearning4j.nn.conf.layers.samediff; import org.deeplearning4j.nn.conf.NeuralNetConfiguration; +import org.deeplearning4j.nn.conf.layers.LayerConfiguration; import org.deeplearning4j.optimize.api.TrainingListener; import org.nd4j.autodiff.samediff.SDVariable; import org.nd4j.autodiff.samediff.SameDiff; @@ -74,13 +75,15 @@ public abstract class SameDiffOutputLayer extends AbstractSameDiffLayer { public org.deeplearning4j.nn.api.Layer instantiate(NeuralNetConfiguration conf, Collection trainingListeners, int layerIndex, INDArray layerParamsView, boolean initializeParams, DataType networkDataType) { + LayerConfiguration lconf = conf.getFlattenedLayerConfigurations().get(layerIndex); + org.deeplearning4j.nn.layers.samediff.SameDiffOutputLayer ret = - new org.deeplearning4j.nn.layers.samediff.SameDiffOutputLayer(conf, networkDataType); + new org.deeplearning4j.nn.layers.samediff.SameDiffOutputLayer(lconf, networkDataType); ret.setIndex(layerIndex); ret.setParamsViewArray(layerParamsView); - Map paramTable = initializer().init(conf, layerParamsView, initializeParams); + Map paramTable = initializer().init(this, layerParamsView, initializeParams); ret.setParamTable(paramTable); - ret.setConf(conf); + ret.setLayerConfiguration(lconf); return ret; } diff --git a/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/nn/conf/layers/samediff/SameDiffVertex.java b/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/nn/conf/layers/samediff/SameDiffVertex.java index 94a13ffec..accc675d0 100644 --- a/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/nn/conf/layers/samediff/SameDiffVertex.java +++ b/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/nn/conf/layers/samediff/SameDiffVertex.java @@ -23,7 +23,7 @@ package org.deeplearning4j.nn.conf.layers.samediff; import lombok.Data; import lombok.EqualsAndHashCode; import org.deeplearning4j.nn.api.MaskState; -import org.deeplearning4j.nn.api.TrainingConfig; +import org.deeplearning4j.nn.api.ITraininableLayerConfiguration; import org.deeplearning4j.nn.conf.GradientNormalization; import org.deeplearning4j.nn.conf.NeuralNetConfiguration; import org.deeplearning4j.nn.conf.graph.GraphVertex; @@ -46,7 +46,7 @@ import java.util.Map; @Data @EqualsAndHashCode(callSuper = false) -public abstract class SameDiffVertex extends GraphVertex implements TrainingConfig { +public abstract class SameDiffVertex extends GraphVertex implements ITraininableLayerConfiguration { private SDVertexParams vertexParams; private String name; @@ -147,30 +147,31 @@ public abstract class SameDiffVertex extends GraphVertex implements TrainingConf } - public void applyGlobalConfig(NeuralNetConfiguration.Builder b) { + public void applyGlobalConfig(NeuralNetConfiguration b_conf) { + if(regularization == null || regularization.isEmpty()){ - regularization = b.getRegularization(); + regularization = b_conf.getRegularization(); } if(regularizationBias == null || regularizationBias.isEmpty()){ - regularizationBias = b.getRegularizationBias(); + regularizationBias = b_conf.getRegularizationBias(); } if (updater == null) { - updater = b.getIUpdater(); + updater = b_conf.getUpdater(); } if (biasUpdater == null) { - biasUpdater = b.getBiasUpdater(); + biasUpdater = b_conf.getBiasUpdater(); } if (gradientNormalization == null) { - gradientNormalization = b.getGradientNormalization(); + gradientNormalization = b_conf.getGradientNormalization(); } if (Double.isNaN(gradientNormalizationThreshold)) { - gradientNormalizationThreshold = b.getGradientNormalizationThreshold(); + gradientNormalizationThreshold = b_conf.getGradientNormalizationThreshold(); } - applyGlobalConfigToLayer(b); + applyGlobalConfigToLayer(b_conf); } - public void applyGlobalConfigToLayer(NeuralNetConfiguration.Builder globalConfig) { + public void applyGlobalConfigToLayer(NeuralNetConfiguration globalConfig) { //Default implementation: no op } diff --git a/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/nn/conf/layers/util/MaskLayer.java b/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/nn/conf/layers/util/MaskLayer.java index 181d32b4c..bd39eb828 100644 --- a/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/nn/conf/layers/util/MaskLayer.java +++ b/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/nn/conf/layers/util/MaskLayer.java @@ -25,6 +25,7 @@ import org.deeplearning4j.nn.api.ParamInitializer; import org.deeplearning4j.nn.conf.InputPreProcessor; import org.deeplearning4j.nn.conf.NeuralNetConfiguration; import org.deeplearning4j.nn.conf.inputs.InputType; +import org.deeplearning4j.nn.conf.layers.LayerConfiguration; import org.deeplearning4j.nn.conf.layers.NoParamLayer; import org.deeplearning4j.nn.conf.memory.LayerMemoryReport; import org.deeplearning4j.nn.params.EmptyParamInitializer; @@ -43,12 +44,13 @@ public class MaskLayer extends NoParamLayer { public org.deeplearning4j.nn.api.Layer instantiate(NeuralNetConfiguration conf, Collection trainingListeners, int layerIndex, INDArray layerParamsView, boolean initializeParams, DataType networkDataType) { - org.deeplearning4j.nn.layers.util.MaskLayer ret = new org.deeplearning4j.nn.layers.util.MaskLayer(conf, networkDataType); + LayerConfiguration lconf = conf.getFlattenedLayerConfigurations().get(layerIndex); + org.deeplearning4j.nn.layers.util.MaskLayer ret = new org.deeplearning4j.nn.layers.util.MaskLayer(lconf, networkDataType); ret.setIndex(layerIndex); ret.setParamsViewArray(layerParamsView); - Map paramTable = initializer().init(conf, layerParamsView, initializeParams); + Map paramTable = initializer().init(this, layerParamsView, initializeParams); ret.setParamTable(paramTable); - ret.setConf(conf); + ret.setLayerConfiguration(lconf); return ret; } diff --git a/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/nn/conf/layers/util/MaskZeroLayer.java b/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/nn/conf/layers/util/MaskZeroLayer.java index 8a3d309a5..18f9cadc1 100644 --- a/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/nn/conf/layers/util/MaskZeroLayer.java +++ b/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/nn/conf/layers/util/MaskZeroLayer.java @@ -24,8 +24,8 @@ import lombok.*; import org.deeplearning4j.nn.conf.InputPreProcessor; import org.deeplearning4j.nn.conf.NeuralNetConfiguration; import org.deeplearning4j.nn.conf.inputs.InputType; -import org.deeplearning4j.nn.conf.layers.Layer; -import org.deeplearning4j.nn.conf.layers.wrapper.BaseWrapperLayer; +import org.deeplearning4j.nn.conf.layers.LayerConfiguration; +import org.deeplearning4j.nn.conf.layers.wrapper.BaseWrapperLayerConfiguration; import org.deeplearning4j.nn.conf.memory.LayerMemoryReport; import org.deeplearning4j.optimize.api.TrainingListener; import org.nd4j.linalg.api.buffer.DataType; @@ -36,7 +36,7 @@ import java.util.Collection; @Data @EqualsAndHashCode(callSuper = false) -public class MaskZeroLayer extends BaseWrapperLayer { +public class MaskZeroLayer extends BaseWrapperLayerConfiguration { private double maskingValue = 0.0; @@ -49,7 +49,7 @@ public class MaskZeroLayer extends BaseWrapperLayer { } - public MaskZeroLayer(@JsonProperty("underlying") Layer underlying, @JsonProperty("maskingValue") double maskingValue) { + public MaskZeroLayer(@JsonProperty("underlying") LayerConfiguration underlying, @JsonProperty("maskingValue") double maskingValue) { this.underlying = underlying; this.maskingValue = maskingValue; } @@ -61,7 +61,7 @@ public class MaskZeroLayer extends BaseWrapperLayer { boolean initializeParams, DataType networkDataType) { NeuralNetConfiguration conf2 = conf.clone(); - conf2.setLayer(((BaseWrapperLayer) conf2.getLayer()).getUnderlying()); + conf2.setLayer(((BaseWrapperLayerConfiguration) this).getUnderlying()); org.deeplearning4j.nn.api.Layer underlyingLayer = underlying.instantiate(conf2, trainingListeners, layerIndex, layerParamsView, initializeParams, networkDataType); @@ -102,12 +102,12 @@ public class MaskZeroLayer extends BaseWrapperLayer { @NoArgsConstructor @Getter @Setter - public static class Builder extends Layer.Builder { + public static class Builder extends LayerConfiguration.Builder { - private Layer underlying; + private LayerConfiguration underlying; private double maskValue; - public Builder setUnderlying(Layer underlying) { + public Builder setUnderlying(LayerConfiguration underlying) { this.underlying = underlying; return this; } @@ -117,7 +117,7 @@ public class MaskZeroLayer extends BaseWrapperLayer { return this; } - public Builder underlying(Layer underlying){ + public Builder underlying(LayerConfiguration underlying){ setUnderlying(underlying); return this; } diff --git a/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/nn/conf/layers/variational/VariationalAutoencoder.java b/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/nn/conf/layers/variational/VariationalAutoencoder.java index ca1f10bd0..a4cf67c79 100644 --- a/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/nn/conf/layers/variational/VariationalAutoencoder.java +++ b/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/nn/conf/layers/variational/VariationalAutoencoder.java @@ -26,6 +26,7 @@ import org.deeplearning4j.nn.api.ParamInitializer; import org.deeplearning4j.nn.conf.NeuralNetConfiguration; import org.deeplearning4j.nn.conf.inputs.InputType; import org.deeplearning4j.nn.conf.layers.BasePretrainNetwork; +import org.deeplearning4j.nn.conf.layers.LayerConfiguration; import org.deeplearning4j.nn.conf.layers.LayerValidation; import org.deeplearning4j.nn.conf.memory.LayerMemoryReport; import org.deeplearning4j.nn.conf.memory.MemoryReport; @@ -68,15 +69,17 @@ public class VariationalAutoencoder extends BasePretrainNetwork { int layerIndex, INDArray layerParamsView, boolean initializeParams, DataType networkDataType) { LayerValidation.assertNInNOutSet("VariationalAutoencoder", getLayerName(), layerIndex, getNIn(), getNOut()); + LayerConfiguration lconf = conf.getFlattenedLayerConfigurations().get(layerIndex); org.deeplearning4j.nn.layers.variational.VariationalAutoencoder ret = - new org.deeplearning4j.nn.layers.variational.VariationalAutoencoder(conf, networkDataType); + new org.deeplearning4j.nn.layers.variational.VariationalAutoencoder(lconf, networkDataType); + lconf.runInheritance(); - ret.setListeners(trainingListeners); + ret.addTrainingListeners(trainingListeners); ret.setIndex(layerIndex); ret.setParamsViewArray(layerParamsView); - Map paramTable = initializer().init(conf, layerParamsView, initializeParams); + Map paramTable = initializer().init(this, layerParamsView, initializeParams); ret.setParamTable(paramTable); - ret.setConf(conf); + ret.setLayerConfiguration(lconf); return ret; } diff --git a/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/nn/conf/layers/wrapper/BaseWrapperLayer.java b/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/nn/conf/layers/wrapper/BaseWrapperLayer.java deleted file mode 100644 index ca90ee7a1..000000000 --- a/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/nn/conf/layers/wrapper/BaseWrapperLayer.java +++ /dev/null @@ -1,105 +0,0 @@ -/* - * ****************************************************************************** - * * - * * - * * This program and the accompanying materials are made available under the - * * terms of the Apache License, Version 2.0 which is available at - * * https://www.apache.org/licenses/LICENSE-2.0. - * * - * * See the NOTICE file distributed with this work for additional - * * information regarding copyright ownership. - * * Unless required by applicable law or agreed to in writing, software - * * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * * License for the specific language governing permissions and limitations - * * under the License. - * * - * * SPDX-License-Identifier: Apache-2.0 - * ***************************************************************************** - */ - -package org.deeplearning4j.nn.conf.layers.wrapper; - -import lombok.Data; -import lombok.EqualsAndHashCode; -import org.deeplearning4j.nn.api.ParamInitializer; -import org.deeplearning4j.nn.conf.GradientNormalization; -import org.deeplearning4j.nn.conf.InputPreProcessor; -import org.deeplearning4j.nn.conf.inputs.InputType; -import org.deeplearning4j.nn.conf.layers.Layer; -import org.deeplearning4j.nn.conf.memory.LayerMemoryReport; -import org.deeplearning4j.nn.params.WrapperLayerParamInitializer; -import org.nd4j.linalg.learning.regularization.Regularization; - -import java.util.List; - -@Data -@EqualsAndHashCode(callSuper = false) -public abstract class BaseWrapperLayer extends Layer { - - protected Layer underlying; - - protected BaseWrapperLayer(Builder builder) { - super(builder); - } - - protected BaseWrapperLayer() {} - - public BaseWrapperLayer(Layer underlying) { - this.underlying = underlying; - } - - @Override - public ParamInitializer initializer() { - return WrapperLayerParamInitializer.getInstance(); - } - - @Override - public InputType getOutputType(int layerIndex, InputType inputType) { - return underlying.getOutputType(layerIndex, inputType); - } - - @Override - public void setNIn(InputType inputType, boolean override) { - underlying.setNIn(inputType, override); - } - - @Override - public InputPreProcessor getPreProcessorForInputType(InputType inputType) { - return underlying.getPreProcessorForInputType(inputType); - } - - @Override - public List getRegularizationByParam(String paramName){ - return underlying.getRegularizationByParam(paramName); - } - - @Override - public GradientNormalization getGradientNormalization() { - return underlying.getGradientNormalization(); - } - - @Override - public double getGradientNormalizationThreshold() { - return underlying.getGradientNormalizationThreshold(); - } - - @Override - public boolean isPretrainParam(String paramName) { - return underlying.isPretrainParam(paramName); - } - - @Override - public LayerMemoryReport getMemoryReport(InputType inputType) { - return underlying.getMemoryReport(inputType); - } - - @Override - public void setLayerName(String layerName) { - super.setLayerName(layerName); - if (underlying != null) { - //May be null at some points during JSON deserialization - underlying.setLayerName(layerName); - } - } -} diff --git a/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/nn/conf/layers/wrapper/BaseWrapperLayerConfiguration.java b/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/nn/conf/layers/wrapper/BaseWrapperLayerConfiguration.java new file mode 100644 index 000000000..74b71de1f --- /dev/null +++ b/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/nn/conf/layers/wrapper/BaseWrapperLayerConfiguration.java @@ -0,0 +1,196 @@ +/* + * ****************************************************************************** + * * + * * + * * This program and the accompanying materials are made available under the + * * terms of the Apache License, Version 2.0 which is available at + * * https://www.apache.org/licenses/LICENSE-2.0. + * * + * * See the NOTICE file distributed with this work for additional + * * information regarding copyright ownership. + * * Unless required by applicable law or agreed to in writing, software + * * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * * License for the specific language governing permissions and limitations + * * under the License. + * * + * * SPDX-License-Identifier: Apache-2.0 + * ***************************************************************************** + */ + +package org.deeplearning4j.nn.conf.layers.wrapper; + +import java.util.List; +import lombok.Data; +import lombok.EqualsAndHashCode; +import org.deeplearning4j.nn.api.ParamInitializer; +import org.deeplearning4j.nn.conf.GradientNormalization; +import org.deeplearning4j.nn.conf.InputPreProcessor; +import org.deeplearning4j.nn.conf.NeuralNetConfiguration; +import org.deeplearning4j.nn.conf.dropout.IDropout; +import org.deeplearning4j.nn.conf.inputs.InputType; +import org.deeplearning4j.nn.conf.layers.LayerConfiguration; +import org.deeplearning4j.nn.conf.memory.LayerMemoryReport; +import org.deeplearning4j.nn.conf.weightnoise.IWeightNoise; +import org.deeplearning4j.nn.params.WrapperLayerParamInitializer; +import org.nd4j.linalg.activations.IActivation; +import org.nd4j.linalg.learning.config.IUpdater; +import org.nd4j.linalg.learning.regularization.Regularization; + +@Data +@EqualsAndHashCode(callSuper = false) +public abstract class BaseWrapperLayerConfiguration extends LayerConfiguration { + + /** + * The configuration to of another layer to wrap + */ + protected LayerConfiguration underlying; + + protected BaseWrapperLayerConfiguration(Builder builder) { + super(builder); + } + + protected BaseWrapperLayerConfiguration() { + } + + public BaseWrapperLayerConfiguration(LayerConfiguration underlying) { + this.underlying = underlying; + this.setNetConfiguration(underlying.getNetConfiguration()); + } + + /** + * Set the net configuration for this configuration as well as for the underlying layer (if not + * null there) + * + * @param netConfiguration the neural net configuration + */ + @Override + public void setNetConfiguration(NeuralNetConfiguration netConfiguration) { + super.setNetConfiguration(netConfiguration); + if (underlying.getNetConfiguration() == null) { + underlying.setNetConfiguration( + netConfiguration); //also set netconf for underlying if not set + } + } + + /** + * @return + */ + @Override + public IActivation getActivationFn() { + return underlying.getActivationFn(); + } + + /** + * @return + */ + @Override + public IDropout getIDropout() { + return underlying.getIDropout(); + } + + /** + * @param activationFn + */ + @Override + public void setActivationFn(IActivation activationFn) { + underlying.setActivationFn(activationFn); + } + + /** + * @param iDropout + */ + @Override + public void setIDropout(IDropout iDropout) { + underlying.setIDropout(iDropout); + } + + /** + * @param weightNoise + */ + @Override + public void setWeightNoise(IWeightNoise weightNoise) { + underlying.setWeightNoise(weightNoise); + } + + /** + * @param s + */ + @Override + public void addVariable(String s) { + underlying.addVariable(s); + } + + /** + * Get the updater for the given parameter. Typically, the same updater will be used for all + * parameters, but this is not necessarily the case + * + * @param paramName Parameter name + * @return IUpdater for the parameter + */ + @Override + public IUpdater getUpdaterByParam(String paramName) { + return underlying.getUpdaterByParam(paramName); + } + + /** + * @param iUpdater + */ + @Override + public void setIUpdater(IUpdater iUpdater) { + underlying.setIUpdater(iUpdater); + } + + /** + * @return + */ + @Override + public IUpdater getIUpdater() { + return underlying.getIUpdater(); + } + + @Override + public ParamInitializer initializer() { + return WrapperLayerParamInitializer.getInstance(); + } + + @Override + public InputType getOutputType(int layerIndex, InputType inputType) { + return underlying.getOutputType(layerIndex, inputType); + } + + @Override + public void setNIn(InputType inputType, boolean override) { + underlying.setNIn(inputType, override); + } + + @Override + public InputPreProcessor getPreProcessorForInputType(InputType inputType) { + return underlying.getPreProcessorForInputType(inputType); + } + + @Override + public List getRegularizationByParam(String paramName) { + return underlying.getRegularizationByParam(paramName); + } + + @Override + public boolean isPretrainParam(String paramName) { + return underlying.isPretrainParam(paramName); + } + + @Override + public LayerMemoryReport getMemoryReport(InputType inputType) { + return underlying.getMemoryReport(inputType); + } + + @Override + public void setLayerName(String layerName) { + super.setLayerName(layerName); + if (underlying != null) { + //May be null at some points during JSON deserialization + underlying.setLayerName(layerName); + } + } + +} diff --git a/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/nn/conf/memory/NetworkMemoryReport.java b/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/nn/conf/memory/NetworkMemoryReport.java index 9182ccfb9..d3f7b1955 100644 --- a/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/nn/conf/memory/NetworkMemoryReport.java +++ b/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/nn/conf/memory/NetworkMemoryReport.java @@ -153,7 +153,7 @@ public class NetworkMemoryReport extends MemoryReport { .append(modelName).append("\n").append(" Network Input: ") .append(Arrays.toString(networkInputTypes)).append("\n") .append(" # Layers: ").append(layerAndVertexReports.size()) - .append("\n").append(" Layer Types: ").append(sbLayerCounts) + .append("\n").append(" ILayer Types: ").append(sbLayerCounts) .append("\n"); appendFixedPlusVariable(sb, " Inference Memory (FP32) ", fixedMemBytes, perEx); diff --git a/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/nn/conf/misc/DummyConfig.java b/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/nn/conf/misc/DummyConfig.java index 8cc5e6e20..c2e149c25 100644 --- a/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/nn/conf/misc/DummyConfig.java +++ b/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/nn/conf/misc/DummyConfig.java @@ -21,7 +21,7 @@ package org.deeplearning4j.nn.conf.misc; import lombok.AllArgsConstructor; -import org.deeplearning4j.nn.api.TrainingConfig; +import org.deeplearning4j.nn.api.ITraininableLayerConfiguration; import org.deeplearning4j.nn.conf.GradientNormalization; import org.nd4j.linalg.api.buffer.DataType; import org.nd4j.linalg.learning.config.IUpdater; @@ -31,7 +31,7 @@ import org.nd4j.linalg.learning.regularization.Regularization; import java.util.List; @AllArgsConstructor -public class DummyConfig implements TrainingConfig { +public class DummyConfig implements ITraininableLayerConfiguration { private final String name; @Override diff --git a/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/nn/conf/ocnn/OCNNOutputLayer.java b/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/nn/conf/ocnn/OCNNOutputLayer.java index 696d63f5d..34a888303 100644 --- a/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/nn/conf/ocnn/OCNNOutputLayer.java +++ b/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/nn/conf/ocnn/OCNNOutputLayer.java @@ -25,6 +25,7 @@ import org.deeplearning4j.nn.api.Layer; import org.deeplearning4j.nn.api.ParamInitializer; import org.deeplearning4j.nn.conf.NeuralNetConfiguration; import org.deeplearning4j.nn.conf.layers.BaseOutputLayer; +import org.deeplearning4j.nn.conf.layers.LayerConfiguration; import org.deeplearning4j.nn.conf.layers.LayerValidation; import org.deeplearning4j.nn.layers.ocnn.OCNNParamInitializer; import org.deeplearning4j.optimize.api.TrainingListener; @@ -72,7 +73,7 @@ public class OCNNOutputLayer extends BaseOutputLayer { super(builder); this.hiddenSize = builder.hiddenLayerSize; this.nu = builder.nu; - this.activationFn = builder.activation; + setActivationFn( builder.activation) ; this.windowSize = builder.windowSize; this.initialRValue = builder.initialRValue; this.configureR = builder.configureR; @@ -87,7 +88,7 @@ public class OCNNOutputLayer extends BaseOutputLayer { @JsonProperty("configureR") boolean configureR) { this.hiddenSize = hiddenSize; this.nu = nu; - this.activationFn = activation; + setActivationFn( activation); this.windowSize = windowSize; this.initialRValue = initialRValue; this.configureR = configureR; @@ -102,16 +103,17 @@ public class OCNNOutputLayer extends BaseOutputLayer { public Layer instantiate(NeuralNetConfiguration conf, Collection trainingListeners, int layerIndex, INDArray layerParamsView, boolean initializeParams, DataType networkDataType) { LayerValidation.assertNInNOutSet("OCNNOutputLayer", getLayerName(), layerIndex, getNIn(), getNOut()); + LayerConfiguration lconf = conf.getFlattenedLayerConfigurations().get(layerIndex); org.deeplearning4j.nn.layers.ocnn.OCNNOutputLayer ret = - new org.deeplearning4j.nn.layers.ocnn.OCNNOutputLayer(conf, networkDataType); - ret.setListeners(trainingListeners); + new org.deeplearning4j.nn.layers.ocnn.OCNNOutputLayer(lconf, networkDataType); + ret.addTrainingListeners(trainingListeners); ret.setIndex(layerIndex); ret.setParamsViewArray(layerParamsView); - Map paramTable = initializer().init(conf, layerParamsView, initializeParams); + Map paramTable = initializer().init(this, layerParamsView, initializeParams); ret.setParamTable(paramTable); - ret.setConf(conf); - ret.setActivation(activationFn); + ret.setLayerConfiguration(lconf); + ret.setActivation(getActivationFn()); if (lastEpochSinceRUpdated == 0 && configureR) { paramTable.get(OCNNParamInitializer.R_KEY).putScalar(0, initialRValue); } diff --git a/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/nn/conf/serde/BaseNetConfigDeserializer.java b/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/nn/conf/serde/BaseNetConfigDeserializer.java index abd52c0c3..24a17c263 100644 --- a/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/nn/conf/serde/BaseNetConfigDeserializer.java +++ b/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/nn/conf/serde/BaseNetConfigDeserializer.java @@ -24,13 +24,12 @@ import lombok.extern.slf4j.Slf4j; import org.deeplearning4j.nn.conf.NeuralNetConfiguration; import org.deeplearning4j.nn.conf.Updater; import org.deeplearning4j.nn.conf.distribution.Distribution; -import org.deeplearning4j.nn.conf.layers.BaseLayer; +import org.deeplearning4j.nn.conf.layers.BaseLayerConfiguration; import org.deeplearning4j.nn.conf.layers.BaseOutputLayer; -import org.deeplearning4j.nn.conf.layers.Layer; +import org.deeplearning4j.nn.conf.layers.LayerConfiguration; import org.deeplearning4j.nn.weights.*; import org.nd4j.linalg.activations.Activation; import org.nd4j.linalg.activations.IActivation; -import org.nd4j.linalg.activations.impl.*; import org.nd4j.linalg.learning.config.*; import org.nd4j.linalg.learning.regularization.L1Regularization; import org.nd4j.linalg.learning.regularization.Regularization; @@ -38,7 +37,6 @@ import org.nd4j.linalg.learning.regularization.WeightDecay; import org.nd4j.linalg.lossfunctions.ILossFunction; import org.nd4j.linalg.lossfunctions.impl.*; import com.fasterxml.jackson.core.JsonParser; -import com.fasterxml.jackson.core.JsonProcessingException; import com.fasterxml.jackson.databind.DeserializationContext; import com.fasterxml.jackson.databind.JsonDeserializer; import com.fasterxml.jackson.databind.JsonMappingException; @@ -66,10 +64,10 @@ public abstract class BaseNetConfigDeserializer extends StdDeserializer im public abstract T deserialize(JsonParser jp, DeserializationContext ctxt) throws IOException; - protected boolean requiresIUpdaterFromLegacy(Layer[] layers){ - for(Layer l : layers){ - if(l instanceof BaseLayer){ - BaseLayer bl = (BaseLayer)l; + protected boolean requiresIUpdaterFromLegacy(LayerConfiguration[] layers){ + for(LayerConfiguration l : layers){ + if(l instanceof BaseLayerConfiguration){ + BaseLayerConfiguration bl = (BaseLayerConfiguration)l; if(bl.getIUpdater() == null && bl.initializer().numParams(bl) > 0){ return true; } @@ -78,8 +76,8 @@ public abstract class BaseNetConfigDeserializer extends StdDeserializer im return false; } - protected boolean requiresDropoutFromLegacy(Layer[] layers){ - for(Layer l : layers){ + protected boolean requiresDropoutFromLegacy(LayerConfiguration[] layers){ + for(LayerConfiguration l : layers){ if(l.getIDropout() != null){ return false; } @@ -87,35 +85,38 @@ public abstract class BaseNetConfigDeserializer extends StdDeserializer im return true; } - protected boolean requiresRegularizationFromLegacy(Layer[] layers){ - for(Layer l : layers){ - if(l instanceof BaseLayer && ((BaseLayer)l).getRegularization() == null){ + protected boolean requiresRegularizationFromLegacy(LayerConfiguration[] layers){ + for(LayerConfiguration l : layers){ + if(l instanceof BaseLayerConfiguration + && ((BaseLayerConfiguration)l).getRegularization() == null){ return true; } } return false; } - protected boolean requiresWeightInitFromLegacy(Layer[] layers){ - for(Layer l : layers){ - if(l instanceof BaseLayer && ((BaseLayer)l).getWeightInitFn() == null){ + protected boolean requiresWeightInitFromLegacy(LayerConfiguration[] layers){ + for(LayerConfiguration l : layers){ + if(l instanceof BaseLayerConfiguration + && ((BaseLayerConfiguration)l).getWeightInit() == null){ return true; } } return false; } - protected boolean requiresActivationFromLegacy(Layer[] layers){ - for(Layer l : layers){ - if(l instanceof BaseLayer && ((BaseLayer)l).getActivationFn() == null){ + protected boolean requiresActivationFromLegacy(LayerConfiguration[] layers){ + for(LayerConfiguration l : layers){ + if(l instanceof BaseLayerConfiguration + && ((BaseLayerConfiguration)l).getActivationFn() == null){ return true; } } return false; } - protected boolean requiresLegacyLossHandling(Layer[] layers){ - for(Layer l : layers){ + protected boolean requiresLegacyLossHandling(LayerConfiguration[] layers){ + for(LayerConfiguration l : layers){ if(l instanceof BaseOutputLayer && ((BaseOutputLayer)l).getLossFn() == null){ return true; } @@ -123,7 +124,7 @@ public abstract class BaseNetConfigDeserializer extends StdDeserializer im return false; } - protected void handleUpdaterBackwardCompatibility(BaseLayer layer, ObjectNode on){ + protected void handleUpdaterBackwardCompatibility(BaseLayerConfiguration layer, ObjectNode on){ if(on != null && on.has("updater")){ String updaterName = on.get("updater").asText(); if(updaterName != null){ @@ -204,42 +205,43 @@ public abstract class BaseNetConfigDeserializer extends StdDeserializer im } } - protected void handleL1L2BackwardCompatibility(BaseLayer baseLayer, ObjectNode on){ + protected void handleL1L2BackwardCompatibility(BaseLayerConfiguration baseLayerConfiguration, ObjectNode on){ if(on != null && (on.has("l1") || on.has("l2"))){ //Legacy format JSON - baseLayer.setRegularization(new ArrayList()); - baseLayer.setRegularizationBias(new ArrayList()); + baseLayerConfiguration.setRegularization(new ArrayList()); + baseLayerConfiguration.setRegularizationBias(new ArrayList()); if(on.has("l1")){ double l1 = on.get("l1").doubleValue(); if(l1 > 0.0){ - baseLayer.getRegularization().add(new L1Regularization(l1)); + baseLayerConfiguration.getRegularization().add(new L1Regularization(l1)); } } if(on.has("l2")){ double l2 = on.get("l2").doubleValue(); if(l2 > 0.0){ //Default to non-LR based WeightDecay, to match behaviour in 1.0.0-beta3 - baseLayer.getRegularization().add(new WeightDecay(l2, false)); + baseLayerConfiguration.getRegularization().add(new WeightDecay(l2, false)); } } if(on.has("l1Bias")){ double l1Bias = on.get("l1Bias").doubleValue(); if(l1Bias > 0.0){ - baseLayer.getRegularizationBias().add(new L1Regularization(l1Bias)); + baseLayerConfiguration.getRegularizationBias().add(new L1Regularization(l1Bias)); } } if(on.has("l2Bias")){ double l2Bias = on.get("l2Bias").doubleValue(); if(l2Bias > 0.0){ //Default to non-LR based WeightDecay, to match behaviour in 1.0.0-beta3 - baseLayer.getRegularizationBias().add(new WeightDecay(l2Bias, false)); + baseLayerConfiguration.getRegularizationBias().add(new WeightDecay(l2Bias, false)); } } } } - protected void handleWeightInitBackwardCompatibility(BaseLayer baseLayer, ObjectNode on){ + protected void handleWeightInitBackwardCompatibility( + BaseLayerConfiguration baseLayerConfiguration, ObjectNode on){ if(on != null && on.has("weightInit") ){ //Legacy format JSON if(on.has("weightInit")){ @@ -252,7 +254,7 @@ public abstract class BaseNetConfigDeserializer extends StdDeserializer im d = NeuralNetConfiguration.mapper().readValue(dist, Distribution.class); } IWeightInit iwi = w.getWeightInitFunction(d); - baseLayer.setWeightInitFn(iwi); + baseLayerConfiguration.setWeightInit(iwi); } catch (Throwable t){ log.warn("Failed to infer weight initialization from legacy JSON format",t); } @@ -261,8 +263,9 @@ public abstract class BaseNetConfigDeserializer extends StdDeserializer im } //Changed after 0.7.1 from "activationFunction" : "softmax" to "activationFn" : - protected void handleActivationBackwardCompatibility(BaseLayer baseLayer, ObjectNode on){ - if(baseLayer.getActivationFn() == null && on.has("activationFunction")){ + protected void handleActivationBackwardCompatibility( + BaseLayerConfiguration baseLayerConfiguration, ObjectNode on){ + if(baseLayerConfiguration.getActivationFn() == null && on.has("activationFunction")){ String afn = on.get("activationFunction").asText(); IActivation a = null; try { @@ -274,7 +277,7 @@ public abstract class BaseNetConfigDeserializer extends StdDeserializer im | InvocationTargetException instantiationException){ log.error(instantiationException.getMessage()); } - baseLayer.setActivationFn(a); + baseLayerConfiguration.setActivationFn(a); } } diff --git a/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/nn/conf/serde/ComputationGraphConfigurationDeserializer.java b/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/nn/conf/serde/ComputationGraphConfigurationDeserializer.java index edd9cbef8..9f93c43e0 100644 --- a/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/nn/conf/serde/ComputationGraphConfigurationDeserializer.java +++ b/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/nn/conf/serde/ComputationGraphConfigurationDeserializer.java @@ -26,10 +26,10 @@ import org.deeplearning4j.nn.conf.NeuralNetConfiguration; import org.deeplearning4j.nn.conf.dropout.Dropout; import org.deeplearning4j.nn.conf.graph.GraphVertex; import org.deeplearning4j.nn.conf.graph.LayerVertex; -import org.deeplearning4j.nn.conf.layers.BaseLayer; +import org.deeplearning4j.nn.conf.layers.BaseLayerConfiguration; import org.deeplearning4j.nn.conf.layers.BaseOutputLayer; import org.deeplearning4j.nn.conf.layers.BatchNormalization; -import org.deeplearning4j.nn.conf.layers.Layer; +import org.deeplearning4j.nn.conf.layers.LayerConfiguration; import org.deeplearning4j.nn.conf.weightnoise.DropConnect; import org.deeplearning4j.nn.params.BatchNormalizationParamInitializer; import com.fasterxml.jackson.core.JsonLocation; @@ -65,16 +65,16 @@ public class ComputationGraphConfigurationDeserializer //Previously: enumerations and fields. Now: classes //Here, we manually create the appropriate Updater instances, if the IUpdater field is empty - List layerList = new ArrayList<>(); + List layerList = new ArrayList<>(); Map vertices = conf.getVertices(); for (Map.Entry entry : vertices.entrySet()) { if (entry.getValue() instanceof LayerVertex) { LayerVertex lv = (LayerVertex) entry.getValue(); - layerList.add(lv.getLayerConf().getLayer()); + layerList.add(lv.getLayerConfiguration()); } } - Layer[] layers = layerList.toArray(new Layer[layerList.size()]); + LayerConfiguration[] layers = layerList.toArray(new LayerConfiguration[layerList.size()]); //Now, check if we need to manually handle IUpdater deserialization from legacy format boolean attemptIUpdaterFromLegacy = requiresIUpdaterFromLegacy(layers); boolean requireLegacyRegularizationHandling = requiresRegularizationFromLegacy(layers); @@ -118,20 +118,24 @@ public class ComputationGraphConfigurationDeserializer continue; } - if(attemptIUpdaterFromLegacy && layers[layerIdx] instanceof BaseLayer && ((BaseLayer)layers[layerIdx]).getIUpdater() == null){ - handleUpdaterBackwardCompatibility((BaseLayer)layers[layerIdx], (ObjectNode)next); + if(attemptIUpdaterFromLegacy && layers[layerIdx] instanceof BaseLayerConfiguration + && ((BaseLayerConfiguration)layers[layerIdx]).getIUpdater() == null){ + handleUpdaterBackwardCompatibility((BaseLayerConfiguration)layers[layerIdx], (ObjectNode)next); } - if(requireLegacyRegularizationHandling && layers[layerIdx] instanceof BaseLayer && ((BaseLayer)layers[layerIdx]).getRegularization() == null){ - handleL1L2BackwardCompatibility((BaseLayer)layers[layerIdx], (ObjectNode)next); + if(requireLegacyRegularizationHandling && layers[layerIdx] instanceof BaseLayerConfiguration + && ((BaseLayerConfiguration)layers[layerIdx]).getRegularization() == null){ + handleL1L2BackwardCompatibility((BaseLayerConfiguration)layers[layerIdx], (ObjectNode)next); } - if(requiresLegacyWeightInitHandling && layers[layerIdx] instanceof BaseLayer && ((BaseLayer)layers[layerIdx]).getWeightInitFn() == null){ - handleWeightInitBackwardCompatibility((BaseLayer)layers[layerIdx], (ObjectNode)next); + if(requiresLegacyWeightInitHandling && layers[layerIdx] instanceof BaseLayerConfiguration + && ((BaseLayerConfiguration)layers[layerIdx]).getWeightInit() == null){ + handleWeightInitBackwardCompatibility((BaseLayerConfiguration)layers[layerIdx], (ObjectNode)next); } - if(requiresLegacyActivationHandling && layers[layerIdx] instanceof BaseLayer && ((BaseLayer)layers[layerIdx]).getActivationFn() == null){ - handleActivationBackwardCompatibility((BaseLayer)layers[layerIdx], (ObjectNode)next); + if(requiresLegacyActivationHandling && layers[layerIdx] instanceof BaseLayerConfiguration + && ((BaseLayerConfiguration)layers[layerIdx]).getActivationFn() == null){ + handleActivationBackwardCompatibility((BaseLayerConfiguration)layers[layerIdx], (ObjectNode)next); } if(requiresLegacyLossHandling && layers[layerIdx] instanceof BaseOutputLayer && ((BaseOutputLayer)layers[layerIdx]).getLossFn() == null){ @@ -144,9 +148,9 @@ public class ComputationGraphConfigurationDeserializer double d = next.get("dropOut").asDouble(); if(!Double.isNaN(d)){ //Might be dropout or dropconnect... - if(layers[layerIdx] instanceof BaseLayer && confNode.has("useDropConnect") + if(layers[layerIdx] instanceof BaseLayerConfiguration && confNode.has("useDropConnect") && confNode.get("useDropConnect").asBoolean(false)){ - ((BaseLayer)layers[layerIdx]).setWeightNoise(new DropConnect(d)); + ((BaseLayerConfiguration)layers[layerIdx]).setWeightNoise(new DropConnect(d)); } else { layers[layerIdx].setIDropout(new Dropout(d)); } @@ -155,11 +159,12 @@ public class ComputationGraphConfigurationDeserializer } layerIdx++; } else if("org.deeplearning4j.nn.conf.graph.LayerVertex".equals(cls)){ - if(requiresLegacyWeightInitHandling && layers[layerIdx] instanceof BaseLayer && ((BaseLayer)layers[layerIdx]).getWeightInitFn() == null) { + if(requiresLegacyWeightInitHandling && layers[layerIdx] instanceof BaseLayerConfiguration + && ((BaseLayerConfiguration)layers[layerIdx]).getWeightInit() == null) { //Post JSON format change for subclasses, but before WeightInit was made a class confNode = (ObjectNode) next.get("layerConf"); next = confNode.get("layer"); - handleWeightInitBackwardCompatibility((BaseLayer) layers[layerIdx], (ObjectNode) next); + handleWeightInitBackwardCompatibility((BaseLayerConfiguration) layers[layerIdx], (ObjectNode) next); } layerIdx++; } @@ -171,9 +176,9 @@ public class ComputationGraphConfigurationDeserializer // but, as there is no useLogStdev=false property for legacy batchnorm JSON, the 'real' value (useLogStdev=false) // is not set to override the default, unless we do it manually here for(GraphVertex gv : conf.getVertices().values()){ - if(gv instanceof LayerVertex && ((LayerVertex) gv).getLayerConf().getLayer() instanceof BatchNormalization){ - BatchNormalization bn = (BatchNormalization) ((LayerVertex) gv).getLayerConf().getLayer(); - List vars = ((LayerVertex) gv).getLayerConf().getVariables(); + if(gv instanceof LayerVertex && ((LayerVertex) gv).getLayerConfiguration() instanceof BatchNormalization){ + BatchNormalization bn = (BatchNormalization) ((LayerVertex) gv).getLayerConfiguration(); + List vars = ((LayerVertex) gv).getNetConfiguration().getNetWideVariables(); boolean isVariance = vars.contains(BatchNormalizationParamInitializer.GLOBAL_VAR); bn.setUseLogStd(!isVariance); } diff --git a/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/nn/conf/serde/JsonMappers.java b/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/nn/conf/serde/JsonMappers.java index 8097111d6..0b6871524 100644 --- a/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/nn/conf/serde/JsonMappers.java +++ b/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/nn/conf/serde/JsonMappers.java @@ -22,7 +22,7 @@ package org.deeplearning4j.nn.conf.serde; import lombok.extern.slf4j.Slf4j; import org.deeplearning4j.nn.conf.ComputationGraphConfiguration; -import org.deeplearning4j.nn.conf.MultiLayerConfiguration; +import org.deeplearning4j.nn.conf.NeuralNetConfiguration; import org.deeplearning4j.nn.conf.serde.legacy.LegacyJsonFormat; import com.fasterxml.jackson.databind.*; import com.fasterxml.jackson.databind.deser.BeanDeserializerModifier; @@ -76,8 +76,8 @@ public class JsonMappers { public JsonDeserializer modifyDeserializer(DeserializationConfig config, BeanDescription beanDesc, JsonDeserializer deserializer) { //Use our custom deserializers to handle backward compatibility for updaters -> IUpdater - if (beanDesc.getBeanClass() == MultiLayerConfiguration.class) { - return new MultiLayerConfigurationDeserializer(deserializer); + if (beanDesc.getBeanClass() == NeuralNetConfiguration.class) { + return new NeuralNetConfigurationDeserializer(deserializer); } else if (beanDesc.getBeanClass() == ComputationGraphConfiguration.class) { return new ComputationGraphConfigurationDeserializer(deserializer); } diff --git a/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/nn/conf/serde/MultiLayerConfigurationDeserializer.java b/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/nn/conf/serde/NeuralNetConfigurationDeserializer.java similarity index 79% rename from cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/nn/conf/serde/MultiLayerConfigurationDeserializer.java rename to cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/nn/conf/serde/NeuralNetConfigurationDeserializer.java index 36f4a9b45..7863aca02 100644 --- a/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/nn/conf/serde/MultiLayerConfigurationDeserializer.java +++ b/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/nn/conf/serde/NeuralNetConfigurationDeserializer.java @@ -21,13 +21,12 @@ package org.deeplearning4j.nn.conf.serde; import org.apache.commons.io.IOUtils; -import org.deeplearning4j.nn.conf.MultiLayerConfiguration; import org.deeplearning4j.nn.conf.NeuralNetConfiguration; import org.deeplearning4j.nn.conf.dropout.Dropout; -import org.deeplearning4j.nn.conf.layers.BaseLayer; +import org.deeplearning4j.nn.conf.layers.BaseLayerConfiguration; import org.deeplearning4j.nn.conf.layers.BaseOutputLayer; import org.deeplearning4j.nn.conf.layers.BatchNormalization; -import org.deeplearning4j.nn.conf.layers.Layer; +import org.deeplearning4j.nn.conf.layers.LayerConfiguration; import org.deeplearning4j.nn.conf.weightnoise.DropConnect; import org.deeplearning4j.nn.params.BatchNormalizationParamInitializer; import com.fasterxml.jackson.core.JsonLocation; @@ -43,21 +42,19 @@ import java.io.IOException; import java.io.StringReader; import java.util.List; -public class MultiLayerConfigurationDeserializer extends BaseNetConfigDeserializer { +public class NeuralNetConfigurationDeserializer extends BaseNetConfigDeserializer { - public MultiLayerConfigurationDeserializer(JsonDeserializer defaultDeserializer) { - super(defaultDeserializer, MultiLayerConfiguration.class); + public NeuralNetConfigurationDeserializer(JsonDeserializer defaultDeserializer) { + super(defaultDeserializer, NeuralNetConfiguration.class); } @Override - public MultiLayerConfiguration deserialize(JsonParser jp, DeserializationContext ctxt) throws IOException { + public NeuralNetConfiguration deserialize(JsonParser jp, DeserializationContext ctxt) throws IOException { long charOffsetStart = jp.getCurrentLocation().getCharOffset(); - MultiLayerConfiguration conf = (MultiLayerConfiguration) defaultDeserializer.deserialize(jp, ctxt); - Layer[] layers = new Layer[conf.getConfs().size()]; - for (int i = 0; i < layers.length; i++) { - layers[i] = conf.getConf(i).getLayer(); - } + NeuralNetConfiguration conf = (NeuralNetConfiguration) defaultDeserializer.deserialize(jp, ctxt); + + LayerConfiguration[] layers = conf.getFlattenedLayerConfigurations().toArray(new LayerConfiguration[0]); //Now, check if we need to manually handle IUpdater deserialization from legacy format boolean attemptIUpdaterFromLegacy = requiresIUpdaterFromLegacy(layers); @@ -89,7 +86,8 @@ public class MultiLayerConfigurationDeserializer extends BaseNetConfigDeserializ for( int i=0; i (first/only child) -> updater if(on.has("layer")){ confNode = on; @@ -99,7 +97,7 @@ public class MultiLayerConfigurationDeserializer extends BaseNetConfigDeserializ } on = (ObjectNode) on.elements().next(); - handleUpdaterBackwardCompatibility((BaseLayer)layers[i], on); + handleUpdaterBackwardCompatibility((BaseLayerConfiguration)layers[i], on); } if(attemptIUpdaterFromLegacy) { @@ -109,9 +107,10 @@ public class MultiLayerConfigurationDeserializer extends BaseNetConfigDeserializ double d = on.get("dropOut").asDouble(); if (!Double.isNaN(d)) { //Might be dropout or dropconnect... - if (confNode != null && layers[i] instanceof BaseLayer && confNode.has("useDropConnect") + if (confNode != null && layers[i] instanceof BaseLayerConfiguration + && confNode.has("useDropConnect") && confNode.get("useDropConnect").asBoolean(false)) { - ((BaseLayer) layers[i]).setWeightNoise(new DropConnect(d)); + ((BaseLayerConfiguration) layers[i]).setWeightNoise(new DropConnect(d)); } else { if (d > 0.0) { layers[i].setIDropout(new Dropout(d)); @@ -136,16 +135,19 @@ public class MultiLayerConfigurationDeserializer extends BaseNetConfigDeserializ } } - if(requiresLegacyRegularizationHandling && layers[i] instanceof BaseLayer && ((BaseLayer) layers[i]).getRegularization() == null) { - handleL1L2BackwardCompatibility((BaseLayer) layers[i], on); + if(requiresLegacyRegularizationHandling && layers[i] instanceof BaseLayerConfiguration + && ((BaseLayerConfiguration) layers[i]).getRegularization() == null) { + handleL1L2BackwardCompatibility((BaseLayerConfiguration) layers[i], on); } - if(requiresLegacyWeightInitHandling && layers[i] instanceof BaseLayer && ((BaseLayer) layers[i]).getWeightInitFn() == null) { - handleWeightInitBackwardCompatibility((BaseLayer) layers[i], on); + if(requiresLegacyWeightInitHandling && layers[i] instanceof BaseLayerConfiguration + && ((BaseLayerConfiguration) layers[i]).getWeightInit() == null) { + handleWeightInitBackwardCompatibility((BaseLayerConfiguration) layers[i], on); } - if(requiresLegacyActivationHandling && layers[i] instanceof BaseLayer && ((BaseLayer)layers[i]).getActivationFn() == null){ - handleActivationBackwardCompatibility((BaseLayer) layers[i], on); + if(requiresLegacyActivationHandling && layers[i] instanceof BaseLayerConfiguration + && ((BaseLayerConfiguration)layers[i]).getActivationFn() == null){ + handleActivationBackwardCompatibility((BaseLayerConfiguration) layers[i], on); } if(requiresLegacyLossHandling && layers[i] instanceof BaseOutputLayer && ((BaseOutputLayer)layers[i]).getLossFn() == null){ @@ -162,11 +164,11 @@ public class MultiLayerConfigurationDeserializer extends BaseNetConfigDeserializ //JSON deserialization uses public BatchNormalization() constructor which defaults to log10stdev now // but, as there is no useLogStdev=false property for legacy batchnorm JSON, the 'real' value (useLogStdev=false) // is not set to override the default, unless we do it manually here - for(NeuralNetConfiguration nnc : conf.getConfs()){ - Layer l = nnc.getLayer(); + for(NeuralNetConfiguration nnc : conf.getNetConfigurations()){ + LayerConfiguration l = nnc.getLayerConfigurations().get(0); if(l instanceof BatchNormalization){ BatchNormalization bn = (BatchNormalization)l; - List vars = nnc.getVariables(); + List vars = nnc.getNetWideVariables(); boolean isVariance = vars.contains(BatchNormalizationParamInitializer.GLOBAL_VAR); bn.setUseLogStd(!isVariance); } diff --git a/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/nn/conf/serde/legacy/LegacyJsonFormat.java b/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/nn/conf/serde/legacy/LegacyJsonFormat.java index c654b2698..ceb645be7 100644 --- a/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/nn/conf/serde/legacy/LegacyJsonFormat.java +++ b/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/nn/conf/serde/legacy/LegacyJsonFormat.java @@ -61,7 +61,7 @@ public class LegacyJsonFormat { om.addMixIn(InputPreProcessor.class, InputPreProcessorMixin.class); om.addMixIn(GraphVertex.class, GraphVertexMixin.class); - om.addMixIn(Layer.class, LayerMixin.class); + om.addMixIn(LayerConfiguration.class, LayerMixin.class); om.addMixIn(ReconstructionDistribution.class, ReconstructionDistributionMixin.class); om.addMixIn(IActivation.class, IActivationMixin.class); om.addMixIn(ILossFunction.class, ILossFunctionMixin.class); diff --git a/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/nn/conf/weightnoise/DropConnect.java b/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/nn/conf/weightnoise/DropConnect.java index cabb01843..926d2017d 100644 --- a/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/nn/conf/weightnoise/DropConnect.java +++ b/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/nn/conf/weightnoise/DropConnect.java @@ -78,7 +78,7 @@ public class DropConnect implements IWeightNoise { @Override public INDArray getParameter(Layer layer, String paramKey, int iteration, int epoch, boolean train, LayerWorkspaceMgr workspaceMgr) { - ParamInitializer init = layer.conf().getLayer().initializer(); + ParamInitializer init = layer.getLayerConfiguration().initializer(); INDArray param = layer.getParam(paramKey); double p; @@ -88,8 +88,8 @@ public class DropConnect implements IWeightNoise { p = weightRetainProbSchedule.valueAt(iteration, epoch); } - if (train && init.isWeightParam(layer.conf().getLayer(), paramKey) - || (applyToBiases && init.isBiasParam(layer.conf().getLayer(), paramKey))) { + if (train && init.isWeightParam(layer.getLayerConfiguration(), paramKey) + || (applyToBiases && init.isBiasParam(layer.getLayerConfiguration(), paramKey))) { INDArray out = workspaceMgr.createUninitialized(ArrayType.INPUT, param.dataType(), param.shape(), param.ordering()); Nd4j.getExecutioner().exec(new DropOut(param, out, p)); return out; diff --git a/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/nn/conf/weightnoise/IWeightNoise.java b/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/nn/conf/weightnoise/IWeightNoise.java index 4c45b762f..c6c77d3d2 100644 --- a/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/nn/conf/weightnoise/IWeightNoise.java +++ b/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/nn/conf/weightnoise/IWeightNoise.java @@ -33,7 +33,7 @@ public interface IWeightNoise extends Serializable, Cloneable{ /** * Get the parameter, after applying weight noise * - * @param layer Layer to get the parameter for + * @param layer ILayer to get the parameter for * @param paramKey Parameter key * @param iteration Iteration number * @param epoch Epoch number diff --git a/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/nn/conf/weightnoise/WeightNoise.java b/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/nn/conf/weightnoise/WeightNoise.java index 0e789749b..fdf01ad66 100644 --- a/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/nn/conf/weightnoise/WeightNoise.java +++ b/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/nn/conf/weightnoise/WeightNoise.java @@ -71,10 +71,10 @@ public class WeightNoise implements IWeightNoise { @Override public INDArray getParameter(Layer layer, String paramKey, int iteration, int epoch, boolean train, LayerWorkspaceMgr workspaceMgr) { - ParamInitializer init = layer.conf().getLayer().initializer(); + ParamInitializer init = layer.getLayerConfiguration().initializer(); INDArray param = layer.getParam(paramKey); - if (train && init.isWeightParam(layer.conf().getLayer(), paramKey) || - (applyToBias && init.isBiasParam(layer.conf().getLayer(), paramKey))) { + if (train && init.isWeightParam(layer.getLayerConfiguration(), paramKey) || + (applyToBias && init.isBiasParam(layer.getLayerConfiguration(), paramKey))) { org.nd4j.linalg.api.rng.distribution.Distribution dist = Distributions.createDistribution(distribution); INDArray noise = dist.sample(param.ulike()); diff --git a/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/nn/graph/ComputationGraph.java b/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/nn/graph/ComputationGraph.java index ac8a05be4..cc0c13506 100644 --- a/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/nn/graph/ComputationGraph.java +++ b/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/nn/graph/ComputationGraph.java @@ -25,10 +25,12 @@ import lombok.NonNull; import lombok.Setter; import lombok.extern.slf4j.Slf4j; import lombok.val; +import net.brutex.ai.dnn.networks.ArtificialNeuralNetwork; import org.apache.commons.lang3.ArrayUtils; import org.apache.commons.lang3.StringUtils; import org.bytedeco.javacpp.Pointer; import org.deeplearning4j.exception.DL4JInvalidConfigException; +import org.deeplearning4j.nn.conf.layers.LayerConfiguration; import org.deeplearning4j.util.*; import org.nd4j.adapters.OutputAdapter; import org.nd4j.linalg.dataset.AsyncMultiDataSetIterator; @@ -103,9 +105,15 @@ import java.util.*; import java.util.concurrent.atomic.AtomicLong; @Slf4j -public class ComputationGraph implements Serializable, Model, NeuralNetwork { +public class ComputationGraph extends ArtificialNeuralNetwork implements Serializable { - protected ComputationGraphConfiguration configuration; + /** + * This method returns configuration of this ComputationGraph + * + * @return + */ + @Getter + protected ComputationGraphConfiguration computationGraphConfiguration; protected boolean initCalled = false; protected transient Solver solver; //Used to call optimizers during backprop protected INDArray flattenedParams; //Params for all layers are a view/subset of this array @@ -210,17 +218,18 @@ public class ComputationGraph implements Serializable, Model, NeuralNetwork { private Collection trainingListeners = new ArrayList<>(); - public ComputationGraph(ComputationGraphConfiguration configuration) { - this.configuration = configuration; - this.numInputArrays = configuration.getNetworkInputs().size(); - this.numOutputArrays = configuration.getNetworkOutputs().size(); + public ComputationGraph(ComputationGraphConfiguration computationGraphConfiguration) { + super(computationGraphConfiguration.getDefaultConfiguration()); + this.computationGraphConfiguration = computationGraphConfiguration; + this.numInputArrays = computationGraphConfiguration.getNetworkInputs().size(); + this.numOutputArrays = computationGraphConfiguration.getNetworkOutputs().size(); this.inputs = new INDArray[numInputArrays]; this.labels = new INDArray[numOutputArrays]; - this.defaultConfiguration = configuration.getDefaultConfiguration(); + this.defaultConfiguration = computationGraphConfiguration.getDefaultConfiguration(); //Working memory: should learn over course of: (a) full forward pass, and (b) full backward pass //Working memory should be opened once per vertex, for each of forward and backward passes - int numWorkingMem = 2 * configuration.getVertices().size(); + int numWorkingMem = 2 * computationGraphConfiguration.getVertices().size(); WS_LAYER_WORKING_MEM_CONFIG = WorkspaceConfiguration.builder() .initialSize(0) .overallocationLimit(0.02) @@ -238,7 +247,7 @@ public class ComputationGraph implements Serializable, Model, NeuralNetwork { .initialSize(0) .overallocationLimit(0.02) .policyLearning(LearningPolicy.OVER_TIME) - .cyclesBeforeInitialization(configuration.getVertices().size()) + .cyclesBeforeInitialization(computationGraphConfiguration.getVertices().size()) .policyReset(ResetPolicy.BLOCK_LEFT) .policySpill(SpillPolicy.REALLOCATE) .policyAllocation(AllocationPolicy.OVERALLOCATE) @@ -278,14 +287,7 @@ public class ComputationGraph implements Serializable, Model, NeuralNetwork { } } - /** - * This method returns configuration of this ComputationGraph - * - * @return - */ - public ComputationGraphConfiguration getConfiguration() { - return configuration; - } + /** * Returns the number of layers in the ComputationGraph @@ -313,7 +315,7 @@ public class ComputationGraph implements Serializable, Model, NeuralNetwork { * Get a given layer by name. */ public Layer getLayer(String name) { - Preconditions.checkState(verticesMap.containsKey(name), "Layer with name %s does not exist in the network", name); + Preconditions.checkState(verticesMap.containsKey(name), "ILayer with name %s does not exist in the network", name); return verticesMap.get(name).getLayer(); } @@ -449,7 +451,7 @@ public class ComputationGraph implements Serializable, Model, NeuralNetwork { if (initCalled) return; - DataType netDtype = getConfiguration().getDataType(); + DataType netDtype = this.getComputationGraphConfiguration().getDataType(); if(parameters != null && parameters.dataType() != netDtype){ Preconditions.checkState(parameters.rank() == 2 && parameters.size(0) == 1, "Invalid parameters array: should be rank 2 with shape [1,numParams]. Got %ndShape", parameters); if(cloneParametersArray){ @@ -463,31 +465,31 @@ public class ComputationGraph implements Serializable, Model, NeuralNetwork { } } - if (configuration.getTrainingWorkspaceMode() == null) - configuration.setTrainingWorkspaceMode(WorkspaceMode.NONE); + if (computationGraphConfiguration.getTrainingWorkspaceMode() == null) + computationGraphConfiguration.setTrainingWorkspaceMode(WorkspaceMode.NONE); - if (configuration.getInferenceWorkspaceMode() == null) - configuration.setInferenceWorkspaceMode(WorkspaceMode.NONE); + if (computationGraphConfiguration.getInferenceWorkspaceMode() == null) + computationGraphConfiguration.setInferenceWorkspaceMode(WorkspaceMode.NONE); - if (configuration.getCacheMode() == null) - configuration.setCacheMode(CacheMode.NONE); + if (computationGraphConfiguration.getCacheMode() == null) + computationGraphConfiguration.setCacheMode(CacheMode.NONE); OneTimeLogger.info(log, "Starting ComputationGraph with WorkspaceModes set to [training: {}; inference: {}], cacheMode set to [{}]", - configuration.getTrainingWorkspaceMode(), configuration.getInferenceWorkspaceMode(), configuration.getCacheMode()); + computationGraphConfiguration.getTrainingWorkspaceMode(), computationGraphConfiguration.getInferenceWorkspaceMode(), computationGraphConfiguration.getCacheMode()); //First: build topological ordering, based on configuration. Used for forward pass, backprop and order of parameters/gradients GraphIndices indices = calculateIndices(); topologicalOrder = indices.getTopologicalSortOrder(); //Initialization: create the GraphVertex objects, based on configuration structure - Map configVertexMap = configuration.getVertices(); + Map configVertexMap = computationGraphConfiguration.getVertices(); //Names of all of the (data) inputs to the ComputationGraph - List networkInputNames = configuration.getNetworkInputs(); + List networkInputNames = computationGraphConfiguration.getNetworkInputs(); //Inputs for each layer and GraphNode: - Map> vertexInputs = configuration.getVertexInputs(); - this.vertices = new GraphVertex[networkInputNames.size() + configuration.getVertices().size()]; + Map> vertexInputs = computationGraphConfiguration.getVertexInputs(); + this.vertices = new GraphVertex[networkInputNames.size() + computationGraphConfiguration.getVertices().size()]; //All names: inputs, layers and graph nodes (index to name map) Map allNamesReverse = new HashMap<>(); @@ -504,7 +506,7 @@ public class ComputationGraph implements Serializable, Model, NeuralNetwork { long numParams = 0; long[] numParamsForVertex = new long[topologicalOrder.length]; int i = 0; - for (; i < configuration.getNetworkInputs().size(); i++) { + for (; i < computationGraphConfiguration.getNetworkInputs().size(); i++) { numParamsForVertex[i] = 0; //No parameters for input vertices } for(; i < topologicalOrder.length; i++) { @@ -513,7 +515,7 @@ public class ComputationGraph implements Serializable, Model, NeuralNetwork { n.setDataType(netDtype); numParamsForVertex[i] = n.numParams(true); if(numParamsForVertex[i] < 0) - throw new DL4JInvalidConfigException("Layer " + name + " had parameters < 0 " + numParamsForVertex[i]); + throw new DL4JInvalidConfigException("ILayer " + name + " had parameters < 0 " + numParamsForVertex[i]); numParams += numParamsForVertex[i]; } @@ -541,7 +543,7 @@ public class ComputationGraph implements Serializable, Model, NeuralNetwork { //Set RNG seed, for repeatability between initializations when set if (initializeParams) { - Nd4j.getRandom().setSeed(conf().getSeed()); + Nd4j.getRandom().setSeed(getNetConfiguration().getSeed()); } //Given the topological ordering: work out the subset of the parameters array used for each layer @@ -562,9 +564,9 @@ public class ComputationGraph implements Serializable, Model, NeuralNetwork { int numLayers = 0; List tempLayerList = new ArrayList<>(); - defaultConfiguration.clearVariables(); - List variables = defaultConfiguration.variables(false); - i = configuration.getNetworkInputs().size(); + defaultConfiguration.clearNetWideVariable(); + List variables = defaultConfiguration.netWideVariables(false); + i = computationGraphConfiguration.getNetworkInputs().size(); for(; i layerVariables = l.conf().variables(); + List layerVariables = l.getNetConfiguration().netWideVariables(); if (layerVariables != null) { for (String s : layerVariables) { variables.add(gv.getVertexName() + "_" + s); @@ -679,7 +681,7 @@ public class ComputationGraph implements Serializable, Model, NeuralNetwork { } //Mark any output vertices as outputs: - for (String s : configuration.getNetworkOutputs()) { + for (String s : computationGraphConfiguration.getNetworkOutputs()) { GraphVertex gv = verticesMap.get(s); gv.setOutputVertex(true); } @@ -687,7 +689,8 @@ public class ComputationGraph implements Serializable, Model, NeuralNetwork { // now we init solver & optimizer if (solver == null) { try (MemoryWorkspace wsO = Nd4j.getMemoryManager().scopeOutOfWorkspaces()) { - solver = new Solver.Builder().configure(conf()).listeners(getListeners()).model(this).build(); + solver = new Solver.Builder().configure(getNetConfiguration()).listeners( + getTrainingListeners()).model(this).build(); solver.initOptimizer(); } } @@ -697,7 +700,7 @@ public class ComputationGraph implements Serializable, Model, NeuralNetwork { // Safe when the input is: (a) it's not a graph input, and (b) isn't shared by any other layers/vertices Map> seenAsInputTo = new HashMap<>(); - for(Map.Entry> entry : configuration.getVertexInputs().entrySet()){ + for(Map.Entry> entry : computationGraphConfiguration.getVertexInputs().entrySet()){ for(String s : entry.getValue() ){ if (!seenAsInputTo.containsKey(s)) { seenAsInputTo.put(s, new ArrayList()); @@ -708,11 +711,11 @@ public class ComputationGraph implements Serializable, Model, NeuralNetwork { } for(Layer l : layers){ - String layerName = l.conf().getLayer().getLayerName(); - List inputs = configuration.getVertexInputs().get(layerName); + String layerName = l.getLayerConfiguration().getLayerName(); + List inputs = computationGraphConfiguration.getVertexInputs().get(layerName); String in = inputs.get(0); //For now: layers should have exactly 1 input - if(configuration.getNetworkInputs().contains(in)){ + if(computationGraphConfiguration.getNetworkInputs().contains(in)){ //TODO When is it safe to NOT allow input modifucation? It's not always safe... // For example dropout + iterating over List that is used for multiple epochs... continue; @@ -761,10 +764,10 @@ public class ComputationGraph implements Serializable, Model, NeuralNetwork { long numParams = 0; long[] numParamsForVertex = new long[topologicalOrder.length]; int i = 0; - for (; i < configuration.getNetworkInputs().size(); i++) { + for (; i < computationGraphConfiguration.getNetworkInputs().size(); i++) { numParamsForVertex[i] = 0; //No parameters for input vertices } - Map configVertexMap = configuration.getVertices(); + Map configVertexMap = computationGraphConfiguration.getVertices(); for (; i < topologicalOrder.length; i++) { String name = indices.getIdxToName().get(i); org.deeplearning4j.nn.conf.graph.GraphVertex n = configVertexMap.get(name); @@ -796,7 +799,7 @@ public class ComputationGraph implements Serializable, Model, NeuralNetwork { if(outputLayerIdxs == null) { outputLayerIdxs = new int[numOutputArrays]; int i = 0; - for (String s : configuration.getNetworkOutputs()) { + for (String s : computationGraphConfiguration.getNetworkOutputs()) { outputLayerIdxs[i++] = verticesMap.get(s).getVertexIndex(); } } @@ -875,7 +878,7 @@ public class ComputationGraph implements Serializable, Model, NeuralNetwork { /** * Pretrain a specified layer with the given DataSetIterator * - * @param layerName Layer name + * @param layerName ILayer name * @param dataSetIterator Data */ public void pretrainLayer(String layerName, DataSetIterator dataSetIterator) { @@ -890,7 +893,7 @@ public class ComputationGraph implements Serializable, Model, NeuralNetwork { /** * Pretrain a specified layer with the given MultiDataSetIterator * - * @param layerName Layer name + * @param layerName ILayer name * @param iter Training data */ public void pretrainLayer(String layerName, MultiDataSetIterator iter) { @@ -920,7 +923,7 @@ public class ComputationGraph implements Serializable, Model, NeuralNetwork { int idx = toTrain.getVertexIndex(); LayerWorkspaceMgr workspaceMgr; - if(configuration.getTrainingWorkspaceMode() == WorkspaceMode.NONE){ + if(computationGraphConfiguration.getTrainingWorkspaceMode() == WorkspaceMode.NONE){ workspaceMgr = LayerWorkspaceMgr.noWorkspaces(); } else { workspaceMgr = LayerWorkspaceMgr.builder() @@ -1133,7 +1136,7 @@ public class ComputationGraph implements Serializable, Model, NeuralNetwork { update(TaskUtils.buildTask(inputs, labels)); LayerWorkspaceMgr workspaceMgr; - if(configuration.getTrainingWorkspaceMode() == WorkspaceMode.NONE){ + if(computationGraphConfiguration.getTrainingWorkspaceMode() == WorkspaceMode.NONE){ workspaceMgr = LayerWorkspaceMgr.noWorkspaces(); } else { workspaceMgr = LayerWorkspaceMgr.builder() @@ -1151,12 +1154,13 @@ public class ComputationGraph implements Serializable, Model, NeuralNetwork { } workspaceMgr.setHelperWorkspacePointers(helperWorkspaces); - if (configuration.getBackpropType() == BackpropType.TruncatedBPTT) { + if (computationGraphConfiguration.getBackpropType() == BackpropType.TruncatedBPTT) { doTruncatedBPTT(inputs, labels, featureMaskArrays, labelMaskArrays, workspaceMgr); } else { if (solver == null) { try (MemoryWorkspace wsO = Nd4j.getMemoryManager().scopeOutOfWorkspaces()) { - solver = new Solver.Builder().configure(conf()).listeners(getListeners()).model(this).build(); + solver = new Solver.Builder().configure(getNetConfiguration()).listeners( + getTrainingListeners()).model(this).build(); } } @@ -1202,9 +1206,9 @@ public class ComputationGraph implements Serializable, Model, NeuralNetwork { //Get cached topological sort order from config, if present - if(configuration.getTopologicalOrder() != null && configuration.getTopologicalOrderStr() != null){ - int[] t = configuration.getTopologicalOrder(); - List s = configuration.getTopologicalOrderStr(); + if(computationGraphConfiguration.getTopologicalOrder() != null && computationGraphConfiguration.getTopologicalOrderStr() != null){ + int[] t = computationGraphConfiguration.getTopologicalOrder(); + List s = computationGraphConfiguration.getTopologicalOrderStr(); Map m1 = new HashMap<>(); Map m2 = new HashMap<>(); for( int i=0; i nodeMap = configuration.getVertices(); - List networkInputNames = configuration.getNetworkInputs(); - int numVertices = networkInputNames.size() + configuration.getVertices().size(); + Map nodeMap = computationGraphConfiguration.getVertices(); + List networkInputNames = computationGraphConfiguration.getNetworkInputs(); + int numVertices = networkInputNames.size() + computationGraphConfiguration.getVertices().size(); int[] out = new int[numVertices]; int outCounter = 0; @@ -1233,7 +1237,7 @@ public class ComputationGraph implements Serializable, Model, NeuralNetwork { Map vertexNamesMap = new HashMap<>(); Map vertexNamesMap2 = new HashMap<>(); int i = 0; - for (String inputName : configuration.getNetworkInputs()) { + for (String inputName : computationGraphConfiguration.getNetworkInputs()) { vertexNamesMap.put(i, inputName); vertexNamesMap2.put(inputName, i); i++; @@ -1248,7 +1252,7 @@ public class ComputationGraph implements Serializable, Model, NeuralNetwork { Map> inputEdges = new HashMap<>(); //key: vertex. Values: vertices that the key vertex receives input from Map> outputEdges = new HashMap<>(); //key: vertex. Values: vertices that the key vertex outputs to - for (String s : configuration.getNetworkInputs()) { + for (String s : computationGraphConfiguration.getNetworkInputs()) { int idx = vertexNamesMap2.get(s); inputEdges.put(idx, null); } @@ -1256,7 +1260,7 @@ public class ComputationGraph implements Serializable, Model, NeuralNetwork { for (Map.Entry entry : nodeMap.entrySet()) { String thisVertexName = entry.getKey(); int idx = vertexNamesMap2.get(thisVertexName); - List inputsToThisVertex = configuration.getVertexInputs().get(thisVertexName); + List inputsToThisVertex = computationGraphConfiguration.getVertexInputs().get(thisVertexName); if (inputsToThisVertex == null || inputsToThisVertex.isEmpty()) { inputEdges.put(idx, null); @@ -1324,8 +1328,8 @@ public class ComputationGraph implements Serializable, Model, NeuralNetwork { for( int idx : out){ s.add(vertexNamesMap.get(idx)); } - configuration.setTopologicalOrder(out); - configuration.setTopologicalOrderStr(s); + computationGraphConfiguration.setTopologicalOrder(out); + computationGraphConfiguration.setTopologicalOrderStr(s); graphIndices = GraphIndices.builder() .topologicalSortOrder(out) @@ -1344,7 +1348,7 @@ public class ComputationGraph implements Serializable, Model, NeuralNetwork { synchronizeIterEpochCounts(); LayerWorkspaceMgr workspaceMgr; - if(configuration.getTrainingWorkspaceMode() == WorkspaceMode.NONE){ + if(computationGraphConfiguration.getTrainingWorkspaceMode() == WorkspaceMode.NONE){ workspaceMgr = LayerWorkspaceMgr.noWorkspaces(); } else { workspaceMgr = LayerWorkspaceMgr.builder() @@ -1362,7 +1366,7 @@ public class ComputationGraph implements Serializable, Model, NeuralNetwork { } workspaceMgr.setHelperWorkspacePointers(helperWorkspaces); - boolean tbptt = configuration.getBackpropType() == BackpropType.TruncatedBPTT; + boolean tbptt = computationGraphConfiguration.getBackpropType() == BackpropType.TruncatedBPTT; FwdPassType fwdType = (tbptt ? FwdPassType.RNN_ACTIVATE_WITH_STORED_STATE : FwdPassType.STANDARD); synchronizeIterEpochCounts(); @@ -1386,7 +1390,7 @@ public class ComputationGraph implements Serializable, Model, NeuralNetwork { score = 0.0; int outNum = 0; - for (String s : configuration.getNetworkOutputs()) { + for (String s : computationGraphConfiguration.getNetworkOutputs()) { GraphVertex gv = verticesMap.get(s); if(gv instanceof LayerVertex) { //At this point: the input to the output layer might not be set on the layer itself - just the vertex @@ -1863,7 +1867,7 @@ public class ComputationGraph implements Serializable, Model, NeuralNetwork { int[] layerNums = new int[layers.size()]; for( int i=0; i freeWorkspaceManagers = new ArrayList<>(); //Basically used as a stack Map openActivationsWorkspaces = new IdentityHashMap<>(); - WorkspaceMode wsm = (train ? configuration.getTrainingWorkspaceMode() : configuration.getInferenceWorkspaceMode()); + WorkspaceMode wsm = (train ? computationGraphConfiguration.getTrainingWorkspaceMode() : computationGraphConfiguration.getInferenceWorkspaceMode()); boolean noWS = wsm == WorkspaceMode.NONE; LayerWorkspaceMgr allNone = noWS ? LayerWorkspaceMgr.noWorkspaces(helperWorkspaces) : null; List[] closeAtEndIteraton = (List[])new List[topologicalOrder.length]; @@ -2379,8 +2383,8 @@ public class ComputationGraph implements Serializable, Model, NeuralNetwork { //Standard feed-forward case if(i > 0 && current.hasLayer() && prev.hasLayer() && - ConvolutionUtils.layerHasConvolutionLayout(prev.getLayer().conf().getLayer()) - && ConvolutionUtils.layerHasConvolutionLayout(current.getLayer().conf().getLayer())) { + ConvolutionUtils.layerHasConvolutionLayout(prev.getLayer().getLayerConfiguration()) + && ConvolutionUtils.layerHasConvolutionLayout(current.getLayer().getLayerConfiguration())) { /** * Not QUITE the proper fix, but getting close. @@ -2388,8 +2392,8 @@ public class ComputationGraph implements Serializable, Model, NeuralNetwork { * Need to play with output sizes a bit to make sure we put the right parameters in there to get * correct behavior. */ - CNN2DFormat preLayerFormat = ConvolutionUtils.getFormatForLayer(prev.getLayer().conf().getLayer()); - CNN2DFormat currLayerFormat = ConvolutionUtils.getFormatForLayer(current.getLayer().conf().getLayer()); + CNN2DFormat preLayerFormat = ConvolutionUtils.getFormatForLayer(prev.getLayer().getLayerConfiguration()); + CNN2DFormat currLayerFormat = ConvolutionUtils.getFormatForLayer(current.getLayer().getLayerConfiguration()); if(preLayerFormat != currLayerFormat) { int inputIdx = -1; for(int inputVertex = 0; inputVertex < current.getInputVertices().length; inputVertex++) { @@ -2415,10 +2419,10 @@ public class ComputationGraph implements Serializable, Model, NeuralNetwork { else out = current.doForward(train, workspaceMgr); } else if(i > 0 && current.hasLayer() && prev.hasLayer() && - Convolution1DUtils.hasRnnDataFormat(prev.getLayer().conf().getLayer()) - && Convolution1DUtils.hasRnnDataFormat(current.getLayer().conf().getLayer())) { - RNNFormat preLayerFormat = Convolution1DUtils.getRnnFormatFromLayer(prev.getLayer().conf().getLayer()); - RNNFormat currLayerFormat = Convolution1DUtils.getRnnFormatFromLayer(current.getLayer().conf().getLayer()); + Convolution1DUtils.hasRnnDataFormat(prev.getLayer().getLayerConfiguration()) + && Convolution1DUtils.hasRnnDataFormat(current.getLayer().getLayerConfiguration())) { + RNNFormat preLayerFormat = Convolution1DUtils.getRnnFormatFromLayer(prev.getLayer().getLayerConfiguration()); + RNNFormat currLayerFormat = Convolution1DUtils.getRnnFormatFromLayer(current.getLayer().getLayerConfiguration()); int inputIdx = -1; for(int inputVertex = 0; inputVertex < current.getInputVertices().length; inputVertex++) { if(current.getInputVertices()[inputVertex].getVertexIndex() == prev.getVertexIndex()) { @@ -2438,7 +2442,7 @@ public class ComputationGraph implements Serializable, Model, NeuralNetwork { } } else if (fwdPassType == FwdPassType.RNN_TIMESTEP) { if (current.hasLayer()) { - //Layer + //ILayer INDArray input = current.getInputs()[0]; Layer l = current.getLayer(); if (l instanceof RecurrentLayer) { @@ -2562,7 +2566,7 @@ public class ComputationGraph implements Serializable, Model, NeuralNetwork { try { - calcBackpropGradients(true, configuration.getBackpropType() == BackpropType.TruncatedBPTT, epsilons); + calcBackpropGradients(true, computationGraphConfiguration.getBackpropType() == BackpropType.TruncatedBPTT, epsilons); return gradient; } catch (OutOfMemoryError e){ CrashReportingUtil.writeMemoryCrashDump(this, e); @@ -2595,19 +2599,19 @@ public class ComputationGraph implements Serializable, Model, NeuralNetwork { consumed by all layers */ - if(externalEpsilons == null || externalEpsilons.length == 0 && configuration.getTrainingWorkspaceMode() != WorkspaceMode.NONE){ + if(externalEpsilons == null || externalEpsilons.length == 0 && computationGraphConfiguration.getTrainingWorkspaceMode() != WorkspaceMode.NONE){ WorkspaceUtils.assertOpenAndActive(WS_ALL_LAYERS_ACT, "Expected workspace WS_ALL_LAYERS_ACT to be active and open" + " in calcBackpropGradients when workspace mode is not set to NONE"); } //Validate the network configuration for external errors - no output layers if(externalEpsilons != null && externalEpsilons.length > 0){ - List outputLayers = configuration.getNetworkOutputs(); + List outputLayers = computationGraphConfiguration.getNetworkOutputs(); for(String s : outputLayers ){ GraphVertex gv = getVertex(s); if(gv instanceof LayerVertex && gv.getLayer() instanceof IOutputLayer){ throw new IllegalStateException("Cannot perform backprop with external errors in conjunction with an output layer:" + - " output layers cannot use external errors for backprop. Layer name: " + s); + " output layers cannot use external errors for backprop. ILayer name: " + s); } } @@ -2643,7 +2647,7 @@ public class ComputationGraph implements Serializable, Model, NeuralNetwork { } - boolean noWS = configuration.getInferenceWorkspaceMode() == WorkspaceMode.NONE; + boolean noWS = computationGraphConfiguration.getInferenceWorkspaceMode() == WorkspaceMode.NONE; LayerWorkspaceMgr allNone = noWS ? LayerWorkspaceMgr.noWorkspaces(helperWorkspaces) : null; List allWorkspaceManagers = new ArrayList<>(); @@ -2722,7 +2726,7 @@ public class ComputationGraph implements Serializable, Model, NeuralNetwork { //(a) it's an output layer (i.e., instanceof IOutputLayer), or //(b) it's a normal layer, but it has been marked as an output layer for use in external errors - for reinforcement learning, for example - int thisOutputNumber = configuration.getNetworkOutputs().indexOf(current.getVertexName()); + int thisOutputNumber = computationGraphConfiguration.getNetworkOutputs().indexOf(current.getVertexName()); Layer currentLayer = current.getLayer(); if (currentLayer instanceof FrozenLayerWithBackprop) { currentLayer = ((FrozenLayerWithBackprop) currentLayer).getInsideLayer(); @@ -2735,7 +2739,7 @@ public class ComputationGraph implements Serializable, Model, NeuralNetwork { } else { if ((externalEpsilons == null || externalEpsilons.length == 0) && labels[thisOutputNumber] != null) { - throw new DL4JException("Layer \"" + current.getVertexName() + "\" of type " + throw new DL4JException("ILayer \"" + current.getVertexName() + "\" of type " + current.getLayer().getClass().getSimpleName() + " is set as network output " + "(but isn't an IOutputLayer). Only IOutputLayer layers can be fit via backprop with" @@ -2882,8 +2886,8 @@ public class ComputationGraph implements Serializable, Model, NeuralNetwork { @Override public ComputationGraph clone() { - ComputationGraph cg = new ComputationGraph(configuration.clone()); - cg.init(params().dup(), false); + ComputationGraph cg = new ComputationGraph(computationGraphConfiguration.clone()); + cg.init(getModelParams().dup(), false); if (solver != null) { //If solver is null: updater hasn't been initialized -> getUpdater call will force initialization, however ComputationGraphUpdater u = this.getUpdater(); @@ -2916,12 +2920,12 @@ public class ComputationGraph implements Serializable, Model, NeuralNetwork { /** * Set the trainingListeners for the ComputationGraph (and all layers in the network) */ - public void setListeners(Collection listeners) { + public void addTrainingListeners(Collection listeners) { if (layers == null) init(); for (Layer l : layers) { - l.setListeners(listeners); + l.addTrainingListeners(listeners.toArray(new TrainingListener[]{})); } if (solver != null) { @@ -2934,10 +2938,32 @@ public class ComputationGraph implements Serializable, Model, NeuralNetwork { } } + /** + * The param table + * + * @return + */ + + public Map getParamTable() { + return null; + } + + /** + * Table of parameters by key, for backprop. For many models (dense layers, etc) - all parameters + * are backprop parameters + * + * @param backpropParamsOnly If true, return backprop params only. If false: return all params + * (equivalent to paramsTable()) + */ + + public Map getParamTable(boolean backpropParamsOnly) { + return null; + } + /** * Set the trainingListeners for the ComputationGraph (and all layers in the network) */ - public void setListeners(TrainingListener... listeners) { + public void addTrainingListeners(TrainingListener... listeners) { List list = new ArrayList<>(); //Check: user might have done setListeners(null) thinking this would clear the current listeners. //This results in an TrainingListener[1] with a single null value -> results in a NPE later @@ -2947,7 +2973,7 @@ public class ComputationGraph implements Serializable, Model, NeuralNetwork { list.add(i); } } - setListeners(list); + addTrainingListeners(list); } /** @@ -2955,26 +2981,11 @@ public class ComputationGraph implements Serializable, Model, NeuralNetwork { * * @param listeners Listeners to add */ - @Override - public void addListeners(TrainingListener... listeners) { - if (this.trainingListeners == null) { - setListeners(listeners); - return; - } else { - List newListeners = new ArrayList<>(this.trainingListeners); //To avoid immutable list issues - Collections.addAll(newListeners, listeners); - setListeners(newListeners); - } - - if (solver != null) { - solver.setListeners(this.trainingListeners); - } - } /** * Get the trainingListeners for the ComputationGraph */ - public Collection getListeners() { + public Collection getTrainingListeners() { return trainingListeners; } @@ -2992,7 +3003,8 @@ public class ComputationGraph implements Serializable, Model, NeuralNetwork { */ public ComputationGraphUpdater getUpdater(boolean initializeIfAbsent){ if (solver == null && initializeIfAbsent) { - solver = new Solver.Builder().configure(conf()).listeners(getListeners()).model(this).build(); + solver = new Solver.Builder().configure(getNetConfiguration()).listeners( + getTrainingListeners()).model(this).build(); solver.getOptimizer().setUpdaterComputationGraph(new ComputationGraphUpdater(this)); } if(solver != null) { @@ -3006,7 +3018,8 @@ public class ComputationGraph implements Serializable, Model, NeuralNetwork { */ public void setUpdater(ComputationGraphUpdater updater) { if (solver == null) { - solver = new Solver.Builder().configure(conf()).listeners(getListeners()).model(this).build(); + solver = new Solver.Builder().configure(getNetConfiguration()).listeners( + getTrainingListeners()).model(this).build(); } solver.getOptimizer().setUpdaterComputationGraph(updater); } @@ -3019,15 +3032,15 @@ public class ComputationGraph implements Serializable, Model, NeuralNetwork { if (outputLayerIdx >= numOutputArrays) throw new IllegalArgumentException("Invalid index: cannot get output layer " + outputLayerIdx + ", total number of network outputs = " + numOutputArrays); - return getLayer(configuration.getNetworkOutputs().get(outputLayerIdx)); + return getLayer(computationGraphConfiguration.getNetworkOutputs().get(outputLayerIdx)); } /** - * @deprecated To be removed. Use {@link #params()} + * @deprecated To be removed. Use {@link #getModelParams()} */ @Deprecated public INDArray params(boolean backwardOnly) { - return params(); + return getModelParams(); } /** @@ -3086,7 +3099,7 @@ public class ComputationGraph implements Serializable, Model, NeuralNetwork { private double scoreHelper(MultiDataSet dataSet, boolean training){ LayerWorkspaceMgr mgr; - WorkspaceMode wsm = (training ? configuration.getTrainingWorkspaceMode() : configuration.getInferenceWorkspaceMode()); + WorkspaceMode wsm = (training ? computationGraphConfiguration.getTrainingWorkspaceMode() : computationGraphConfiguration.getInferenceWorkspaceMode()); if(wsm == WorkspaceMode.NONE){ mgr = LayerWorkspaceMgr.noWorkspaces(); } else { @@ -3120,7 +3133,7 @@ public class ComputationGraph implements Serializable, Model, NeuralNetwork { double r = calcRegularizationScore(true); int i = 0; - for (String s : configuration.getNetworkOutputs()) { + for (String s : computationGraphConfiguration.getNetworkOutputs()) { GraphVertex gv = verticesMap.get(s); Layer outLayer = gv.getLayer(); if (outLayer == null || !(outLayer instanceof IOutputLayer)) { @@ -3180,7 +3193,7 @@ public class ComputationGraph implements Serializable, Model, NeuralNetwork { private INDArray scoreExamplesHelper(MultiDataSet dataSet, boolean addRegularizationTerms){ LayerWorkspaceMgr mgr; - if(configuration.getInferenceWorkspaceMode() == WorkspaceMode.NONE){ + if(computationGraphConfiguration.getInferenceWorkspaceMode() == WorkspaceMode.NONE){ mgr = LayerWorkspaceMgr.noWorkspaces(); } else { mgr = LayerWorkspaceMgr.builder() @@ -3212,7 +3225,7 @@ public class ComputationGraph implements Serializable, Model, NeuralNetwork { double r = (addRegularizationTerms ? calcRegularizationScore(true) : 0.0); int i = 0; - for (String s : configuration.getNetworkOutputs()) { + for (String s : computationGraphConfiguration.getNetworkOutputs()) { GraphVertex gv = verticesMap.get(s); Layer outLayer = gv.getLayer(); if (outLayer == null || !(outLayer instanceof IOutputLayer)) { @@ -3289,7 +3302,7 @@ public class ComputationGraph implements Serializable, Model, NeuralNetwork { } @Override - public double score() { + public double getScore() { return score; } @@ -3298,7 +3311,7 @@ public class ComputationGraph implements Serializable, Model, NeuralNetwork { } @Override - public INDArray params() { + public INDArray getModelParams() { return flattenedParams; } @@ -3385,7 +3398,7 @@ public class ComputationGraph implements Serializable, Model, NeuralNetwork { @Override public Pair gradientAndScore() { - return new Pair<>(gradient(), score()); + return new Pair<>(gradient(), getScore()); } @Override @@ -3397,14 +3410,10 @@ public class ComputationGraph implements Serializable, Model, NeuralNetwork { } @Override - public NeuralNetConfiguration conf() { + public NeuralNetConfiguration getNetConfiguration() { return defaultConfiguration; } - @Override - public void setConf(NeuralNetConfiguration conf) { - throw new UnsupportedOperationException(); - } @Override public INDArray input() { @@ -3432,16 +3441,11 @@ public class ComputationGraph implements Serializable, Model, NeuralNetwork { } - @Override - public Map paramTable() { - return paramTable(false); - } - public Map paramTable(boolean backpropParamsOnly) { //Get all parameters from all layers/vertices Map allParams = new LinkedHashMap<>(); for(GraphVertex gv : vertices){ - Map paramMap = gv.paramTable(backpropParamsOnly); + Map paramMap = gv.getParamTable(backpropParamsOnly); for (Map.Entry entry : paramMap.entrySet()) { String newKey = gv.getVertexName() + "_" + entry.getKey(); allParams.put(newKey, entry.getValue()); @@ -3450,11 +3454,11 @@ public class ComputationGraph implements Serializable, Model, NeuralNetwork { return allParams; } - @Override + public void setParamTable(@NonNull Map paramTable) { - Map m = paramTable(); + Map m = getParamTable(); Preconditions.checkArgument(paramTable.keySet().equals(m.keySet()), "Cannot set param table: parameter set keys are not equal"); - Map current = paramTable(); + Map current = getParamTable(); //Check shapes before doing partial assigment to avoid leaving net in incorrect state for(String s : current.keySet()){ INDArray arrCurrent = current.get(s); @@ -3578,7 +3582,7 @@ public class ComputationGraph implements Serializable, Model, NeuralNetwork { * @return Hidden state, or null if layer is not an RNN layer */ public Map rnnGetPreviousState(int layer) { - return rnnGetPreviousState(layers[layer].conf().getLayer().getLayerName()); + return rnnGetPreviousState(layers[layer].getLayerConfiguration().getLayerName()); } /** @@ -3611,7 +3615,7 @@ public class ComputationGraph implements Serializable, Model, NeuralNetwork { l = ((org.deeplearning4j.nn.layers.wrapper.BaseWrapperLayer)l).getUnderlying(); } if (l instanceof RecurrentLayer) { - states.put(l.conf().getLayer().getLayerName(), ((RecurrentLayer) l).rnnGetPreviousState()); + states.put(l.getLayerConfiguration().getLayerName(), ((RecurrentLayer) l).rnnGetPreviousState()); } } return states; @@ -3624,7 +3628,7 @@ public class ComputationGraph implements Serializable, Model, NeuralNetwork { * @param state The state to set the specified layer to */ public void rnnSetPreviousState(int layer, Map state) { - rnnSetPreviousState(layers[layer].conf().getLayer().getLayerName(), state); + rnnSetPreviousState(layers[layer].getLayerConfiguration().getLayerName(), state); } /** @@ -3640,7 +3644,7 @@ public class ComputationGraph implements Serializable, Model, NeuralNetwork { } if (l == null || !(l instanceof RecurrentLayer)) { throw new UnsupportedOperationException( - "Layer \"" + layerName + "\" is not a recurrent layer. Cannot set state"); + "ILayer \"" + layerName + "\" is not a recurrent layer. Cannot set state"); } ((RecurrentLayer) l).rnnSetPreviousState(state); } @@ -3704,7 +3708,7 @@ public class ComputationGraph implements Serializable, Model, NeuralNetwork { } } - long fwdLen = configuration.getTbpttFwdLength(); + long fwdLen = computationGraphConfiguration.getTbpttFwdLength(); long nSubsets = timeSeriesLength / fwdLen; if (timeSeriesLength % fwdLen != 0) nSubsets++; @@ -3727,7 +3731,8 @@ public class ComputationGraph implements Serializable, Model, NeuralNetwork { if (solver == null) { try (MemoryWorkspace wsO = Nd4j.getMemoryManager().scopeOutOfWorkspaces()) { - solver = new Solver.Builder().configure(conf()).listeners(getListeners()).model(this) + solver = new Solver.Builder().configure(getNetConfiguration()).listeners( + getTrainingListeners()).model(this) .build(); } } @@ -3882,7 +3887,7 @@ public class ComputationGraph implements Serializable, Model, NeuralNetwork { // This output doesn't have a mask, we can skip it. continue; } - String outputName = configuration.getNetworkOutputs().get(i); + String outputName = computationGraphConfiguration.getNetworkOutputs().get(i); GraphVertex v = verticesMap.get(outputName); Layer ol = v.getLayer(); ol.setMaskArray(labelMaskArrays[i]); @@ -3972,8 +3977,8 @@ public class ComputationGraph implements Serializable, Model, NeuralNetwork { labelsList = iterator.getLabels(); Layer outputLayer = getOutputLayer(0); - if(getConfiguration().isValidateOutputLayerConfig()){ - OutputLayerUtil.validateOutputLayerForClassifierEvaluation(outputLayer.conf().getLayer(), Evaluation.class); + if(this.getComputationGraphConfiguration().isValidateOutputLayerConfig()){ + OutputLayerUtil.validateOutputLayerForClassifierEvaluation(outputLayer.getLayerConfiguration(), Evaluation.class); } return (T)doEvaluation(iterator, new org.deeplearning4j.eval.Evaluation(labelsList, topN))[0]; @@ -3990,8 +3995,8 @@ public class ComputationGraph implements Serializable, Model, NeuralNetwork { */ public T evaluate(MultiDataSetIterator iterator, List labelsList, int topN) { Layer outputLayer = getOutputLayer(0); - if(getConfiguration().isValidateOutputLayerConfig()){ - OutputLayerUtil.validateOutputLayerForClassifierEvaluation(outputLayer.conf().getLayer(), Evaluation.class); + if(this.getComputationGraphConfiguration().isValidateOutputLayerConfig()){ + OutputLayerUtil.validateOutputLayerForClassifierEvaluation(outputLayer.getLayerConfiguration(), Evaluation.class); } return (T)doEvaluation(iterator, new org.deeplearning4j.eval.Evaluation(labelsList, topN))[0]; } @@ -4055,8 +4060,8 @@ public class ComputationGraph implements Serializable, Model, NeuralNetwork { */ public T evaluateROC(DataSetIterator iterator, int rocThresholdSteps) { Layer outputLayer = getOutputLayer(0); - if(getConfiguration().isValidateOutputLayerConfig()){ - OutputLayerUtil.validateOutputLayerForClassifierEvaluation(outputLayer.conf().getLayer(), ROC.class); + if(this.getComputationGraphConfiguration().isValidateOutputLayerConfig()){ + OutputLayerUtil.validateOutputLayerForClassifierEvaluation(outputLayer.getLayerConfiguration(), ROC.class); } return (T)doEvaluation(iterator, new org.deeplearning4j.eval.ROC(rocThresholdSteps))[0]; } @@ -4078,8 +4083,8 @@ public class ComputationGraph implements Serializable, Model, NeuralNetwork { */ public T evaluateROC(MultiDataSetIterator iterator, int rocThresholdSteps) { Layer outputLayer = getOutputLayer(0); - if(getConfiguration().isValidateOutputLayerConfig()){ - OutputLayerUtil.validateOutputLayerForClassifierEvaluation(outputLayer.conf().getLayer(), ROC.class); + if(this.getComputationGraphConfiguration().isValidateOutputLayerConfig()){ + OutputLayerUtil.validateOutputLayerForClassifierEvaluation(outputLayer.getLayerConfiguration(), ROC.class); } return (T)doEvaluation(iterator, new org.deeplearning4j.eval.ROC(rocThresholdSteps))[0]; } @@ -4101,8 +4106,8 @@ public class ComputationGraph implements Serializable, Model, NeuralNetwork { */ public T evaluateROCMultiClass(DataSetIterator iterator, int rocThresholdSteps) { Layer outputLayer = getOutputLayer(0); - if(getConfiguration().isValidateOutputLayerConfig()){ - OutputLayerUtil.validateOutputLayerForClassifierEvaluation(outputLayer.conf().getLayer(), ROCMultiClass.class); + if(this.getComputationGraphConfiguration().isValidateOutputLayerConfig()){ + OutputLayerUtil.validateOutputLayerForClassifierEvaluation(outputLayer.getLayerConfiguration(), ROCMultiClass.class); } return (T)doEvaluation(iterator, new org.deeplearning4j.eval.ROCMultiClass(rocThresholdSteps))[0]; } @@ -4116,8 +4121,8 @@ public class ComputationGraph implements Serializable, Model, NeuralNetwork { */ public T evaluateROCMultiClass(MultiDataSetIterator iterator, int rocThresholdSteps) { Layer outputLayer = getOutputLayer(0); - if(getConfiguration().isValidateOutputLayerConfig()){ - OutputLayerUtil.validateOutputLayerForClassifierEvaluation(outputLayer.conf().getLayer(), ROCMultiClass.class); + if(this.getComputationGraphConfiguration().isValidateOutputLayerConfig()){ + OutputLayerUtil.validateOutputLayerForClassifierEvaluation(outputLayer.getLayerConfiguration(), ROCMultiClass.class); } return (T)doEvaluation(iterator, new org.deeplearning4j.eval.ROCMultiClass(rocThresholdSteps))[0]; } @@ -4202,13 +4207,13 @@ public class ComputationGraph implements Serializable, Model, NeuralNetwork { MultiDataSetIterator iter = iterator.asyncSupported() ? new AsyncMultiDataSetIterator(iterator, 2, true) : iterator; - WorkspaceMode cMode = configuration.getTrainingWorkspaceMode(); - configuration.setTrainingWorkspaceMode(configuration.getInferenceWorkspaceMode()); + WorkspaceMode cMode = computationGraphConfiguration.getTrainingWorkspaceMode(); + computationGraphConfiguration.setTrainingWorkspaceMode(computationGraphConfiguration.getInferenceWorkspaceMode()); - boolean useRnnSegments = (configuration.getBackpropType() == BackpropType.TruncatedBPTT); + boolean useRnnSegments = (computationGraphConfiguration.getBackpropType() == BackpropType.TruncatedBPTT); MemoryWorkspace outputWs; - if(getConfiguration().getInferenceWorkspaceMode() == WorkspaceMode.ENABLED){ + if(this.getComputationGraphConfiguration().getInferenceWorkspaceMode() == WorkspaceMode.ENABLED){ outputWs = Nd4j.getWorkspaceManager().getWorkspaceForCurrentThread(WS_ALL_LAYERS_ACT_CONFIG, WS_OUTPUT_MEM); } else { outputWs = new DummyWorkspace(); @@ -4256,7 +4261,7 @@ public class ComputationGraph implements Serializable, Model, NeuralNetwork { } else { rnnClearPreviousState(); - int fwdLen = configuration.getTbpttFwdLength(); + int fwdLen = computationGraphConfiguration.getTbpttFwdLength(); long tsLength = -1; long nF = next.getFeatures().length; for (int i = 0; i < nF; i++) { @@ -4309,7 +4314,7 @@ public class ComputationGraph implements Serializable, Model, NeuralNetwork { if (iterator.asyncSupported()) ((AsyncMultiDataSetIterator) iter).shutdown(); - configuration.setTrainingWorkspaceMode(cMode); + computationGraphConfiguration.setTrainingWorkspaceMode(cMode); return evaluations; } @@ -4380,9 +4385,9 @@ public class ComputationGraph implements Serializable, Model, NeuralNetwork { String out = "-"; String paramShape = "-"; if (currentVertex.isInputVertex()) { - if (inputTypes != null) vertexOutputs.put(currentVertexName, inputTypes[configuration.getNetworkInputs().indexOf(currentVertexName)]); //for input vertices the outputs are just the input types (only layer vertices have preprocessing?) + if (inputTypes != null) vertexOutputs.put(currentVertexName, inputTypes[computationGraphConfiguration.getNetworkInputs().indexOf(currentVertexName)]); //for input vertices the outputs are just the input types (only layer vertices have preprocessing?) } else { - connections = configuration.getVertexInputs().get(currentVertexName).toString(); + connections = computationGraphConfiguration.getVertexInputs().get(currentVertexName).toString(); List inputTypeList = new ArrayList<>(); if (currentVertex.hasLayer()) { Layer currentLayer = currentVertex.getLayer(); @@ -4394,19 +4399,19 @@ public class ComputationGraph implements Serializable, Model, NeuralNetwork { paramShape = ""; if (currentLayer instanceof BidirectionalLayer) { // Bidirectional layer is not an FFL BidirectionalLayer bi = (BidirectionalLayer) currentLayer; - in = String.valueOf(((Bidirectional)bi.conf().getLayer()).getNIn()); - out = String.valueOf(((Bidirectional)bi.conf().getLayer()).getNOut()); + in = String.valueOf(((Bidirectional)bi.getLayerConfiguration()).getNIn()); + out = String.valueOf(((Bidirectional)bi.getLayerConfiguration()).getNOut()); } else { try { - in = String.valueOf(((FeedForwardLayer) currentLayer.conf().getLayer()).getNIn()); - out = String.valueOf(((FeedForwardLayer) currentLayer.conf().getLayer()).getNOut()); + in = String.valueOf(((FeedForwardLayer) currentLayer.getLayerConfiguration()).getNIn()); + out = String.valueOf(((FeedForwardLayer) currentLayer.getLayerConfiguration()).getNOut()); } catch (Exception e) { // Some layers, like PReLU, are just BaseLayers (but have parameters) } } - List paraNames = currentLayer.conf().variables(); + List paraNames = currentLayer.getNetConfiguration().netWideVariables(); for (String aP : paraNames) { - String paramS = ArrayUtils.toString(currentLayer.paramTable().get(aP).shape()); + String paramS = ArrayUtils.toString(currentLayer.getParamTable().get(aP).shape()); paramShape += aP + ":" + paramS + ", "; } paramShape = paramShape.subSequence(0, paramShape.lastIndexOf(",")).toString(); @@ -4425,7 +4430,7 @@ public class ComputationGraph implements Serializable, Model, NeuralNetwork { inShape = currentInType.toString(); inputTypeList.add(currentInType); - InputPreProcessor layerVertexPreProcesor = ((org.deeplearning4j.nn.conf.graph.LayerVertex)configuration.getVertices().get(currentVertexName)).getPreProcessor(); + InputPreProcessor layerVertexPreProcesor = ((org.deeplearning4j.nn.conf.graph.LayerVertex) computationGraphConfiguration.getVertices().get(currentVertexName)).getPreProcessor(); if (layerVertexPreProcesor != null) { inShape += "-->" + layerVertexPreProcesor.getOutputType(currentInType); } @@ -4444,7 +4449,7 @@ public class ComputationGraph implements Serializable, Model, NeuralNetwork { } } if (inputTypes != null) { - InputType currentVertexOutputType = configuration.getVertices().get(currentVertexName).getOutputType(currLayerIdx, inputTypeList.toArray(new InputType[inputTypeList.size()])); + InputType currentVertexOutputType = computationGraphConfiguration.getVertices().get(currentVertexName).getOutputType(currLayerIdx, inputTypeList.toArray(new InputType[inputTypeList.size()])); outShape = currentVertexOutputType.toString(); vertexOutputs.put(currentVertexName, currentVertexOutputType); } @@ -4495,8 +4500,8 @@ public class ComputationGraph implements Serializable, Model, NeuralNetwork { } ret.append(StringUtils.repeat("-", totalLength)) - .append(String.format("\n%30s %,d", "Total Parameters: ", params().length())) - .append(String.format("\n%30s %,d", "Trainable Parameters: ", params().length() - frozenParams)) + .append(String.format("\n%30s %,d", "Total Parameters: ", getModelParams().length())) + .append(String.format("\n%30s %,d", "ITrainableLayer Parameters: ", getModelParams().length() - frozenParams)) .append(String.format("\n%30s %,d", "Frozen Parameters: ", frozenParams)) .append("\n") .append(StringUtils.repeat("=", totalLength)) @@ -4546,14 +4551,14 @@ public class ComputationGraph implements Serializable, Model, NeuralNetwork { * The current epoch count can be obtained using {@code ComputationGraph.getConfiguration().getEpochCount()} */ public void incrementEpochCount(){ - configuration.setEpochCount(configuration.getEpochCount() + 1); + computationGraphConfiguration.setEpochCount(computationGraphConfiguration.getEpochCount() + 1); synchronizeIterEpochCounts(); } protected void synchronizeIterEpochCounts(){ //TODO: this is necessrry for some schedules - but the redundant values are a little ugly... - int currIter = getConfiguration().getIterationCount(); - int currEpoch = getConfiguration().getEpochCount(); + int currIter = this.getComputationGraphConfiguration().getIterationCount(); + int currEpoch = this.getComputationGraphConfiguration().getEpochCount(); for(Layer l : layers){ l.setIterationCount(currIter); l.setEpochCount(currEpoch); @@ -4565,7 +4570,7 @@ public class ComputationGraph implements Serializable, Model, NeuralNetwork { * @return Number of iterations */ public int getIterationCount(){ - return configuration.getIterationCount(); + return computationGraphConfiguration.getIterationCount(); } /** @@ -4576,7 +4581,7 @@ public class ComputationGraph implements Serializable, Model, NeuralNetwork { * @return Number of epochs */ public int getEpochCount(){ - return configuration.getEpochCount(); + return computationGraphConfiguration.getEpochCount(); } /** @@ -4627,13 +4632,13 @@ public class ComputationGraph implements Serializable, Model, NeuralNetwork { */ public ComputationGraph convertDataType(@NonNull DataType dataType){ Preconditions.checkState(dataType.isFPType(), "Invalid DataType: %s. Can only convert network to a floating point type", dataType); - if(dataType == params().dataType()){ + if(dataType == getModelParams().dataType()){ return this; } try(MemoryWorkspace ws = Nd4j.getMemoryManager().scopeOutOfWorkspaces()) { - INDArray newParams = params().castTo(dataType); - String jsonConfig = getConfiguration().toJson(); + INDArray newParams = getModelParams().castTo(dataType); + String jsonConfig = this.getComputationGraphConfiguration().toJson(); ComputationGraphConfiguration newConf = ComputationGraphConfiguration.fromJson(jsonConfig); newConf.setDataType(dataType); ComputationGraph newNet = new ComputationGraph(newConf); @@ -4714,7 +4719,7 @@ public class ComputationGraph implements Serializable, Model, NeuralNetwork { /** * Get the current learning rate, for the specified layer, from the network. * Note: If the layer has no learning rate (no parameters, or an updater without a learning rate) then null is returned - * @param layerName Layer name + * @param layerName ILayer name * @return Learning rate for the specified layer, or null */ public Double getLearningRate(String layerName){ @@ -4724,7 +4729,7 @@ public class ComputationGraph implements Serializable, Model, NeuralNetwork { /** * Return the layer size (number of units) for the specified layer. * Note that the meaning of the "layer size" can depend on the type of layer. For example:
- * - DenseLayer, OutputLayer, recurrent layers: number of units (nOut configuration option)
+ * - DenseLayerConfiguration, OutputLayer, recurrent layers: number of units (nOut configuration option)
* - ConvolutionLayer: the channels (number of channels)
* - Subsampling layers, global pooling layers, etc: size of 0 is always returned
* @@ -4733,16 +4738,16 @@ public class ComputationGraph implements Serializable, Model, NeuralNetwork { */ public long layerSize(int layer) { if (layer < 0 || layer > layers.length) { - throw new IllegalArgumentException("Invalid layer index: " + layer + ". Layer index must be between 0 and " + throw new IllegalArgumentException("Invalid layer index: " + layer + ". ILayer index must be between 0 and " + (layers.length - 1) + " inclusive"); } - return layerSize(layers[layer].conf().getLayer().getLayerName()); + return layerSize(layers[layer].getLayerConfiguration().getLayerName()); } /** * Return the input size (number of inputs) for the specified layer.
* Note that the meaning of the "input size" can depend on the type of layer. For example:
- * - DenseLayer, OutputLayer, etc: the feature vector size (nIn configuration option)
+ * - DenseLayerConfiguration, OutputLayer, etc: the feature vector size (nIn configuration option)
* - Recurrent layers: the feature vector size per time step (nIn configuration option)
* - ConvolutionLayer: the channels (number of channels)
* - Subsampling layers, global pooling layers, etc: size of 0 is always returned
@@ -4752,16 +4757,16 @@ public class ComputationGraph implements Serializable, Model, NeuralNetwork { */ public long layerInputSize(int layer) { if (layer < 0 || layer > layers.length) { - throw new IllegalArgumentException("Invalid layer index: " + layer + ". Layer index must be between 0 and " + throw new IllegalArgumentException("Invalid layer index: " + layer + ". ILayer index must be between 0 and " + (layers.length - 1) + " inclusive"); } - return layerInputSize(layers[layer].conf().getLayer().getLayerName()); + return layerInputSize(layers[layer].getLayerConfiguration().getLayerName()); } /** * Return the layer size (number of units) for the specified layer.
* Note that the meaning of the "layer size" can depend on the type of layer. For example:
- * - DenseLayer, OutputLayer, recurrent layers: number of units (nOut configuration option)
+ * - DenseLayerConfiguration, OutputLayer, recurrent layers: number of units (nOut configuration option)
* - ConvolutionLayer: the channels (number of channels)
* - Subsampling layers, global pooling layers, etc: size of 0 is always returned
* @@ -4773,7 +4778,7 @@ public class ComputationGraph implements Serializable, Model, NeuralNetwork { if(l == null){ throw new IllegalArgumentException("No layer with name \"" + layerName + "\" exists"); } - org.deeplearning4j.nn.conf.layers.Layer conf = l.conf().getLayer(); + LayerConfiguration conf = l.getLayerConfiguration(); if (conf == null || !(conf instanceof FeedForwardLayer)) { return 0; } @@ -4785,7 +4790,7 @@ public class ComputationGraph implements Serializable, Model, NeuralNetwork { /** * Return the input size (number of inputs) for the specified layer.
* Note that the meaning of the "input size" can depend on the type of layer. For example:
- * - DenseLayer, OutputLayer, etc: the feature vector size (nIn configuration option)
+ * - DenseLayerConfiguration, OutputLayer, etc: the feature vector size (nIn configuration option)
* - Recurrent layers: the feature vector size per time step (nIn configuration option)
* - ConvolutionLayer: the channels (number of channels)
* - Subsampling layers, global pooling layers, etc: size of 0 is always returned
@@ -4798,7 +4803,7 @@ public class ComputationGraph implements Serializable, Model, NeuralNetwork { if(l == null){ throw new IllegalArgumentException("No layer with name \"" + layerName + "\" exists"); } - org.deeplearning4j.nn.conf.layers.Layer conf = l.conf().getLayer(); + LayerConfiguration conf = l.getLayerConfiguration(); if (conf == null || !(conf instanceof FeedForwardLayer)) { return 0; } @@ -4859,8 +4864,8 @@ public class ComputationGraph implements Serializable, Model, NeuralNetwork { return false; if (obj instanceof ComputationGraph) { ComputationGraph network = (ComputationGraph) obj; - boolean paramsEquals = network.params().equals(params()); - boolean confEquals = getConfiguration().equals(network.getConfiguration()); + boolean paramsEquals = network.getModelParams().equals(getModelParams()); + boolean confEquals = this.getComputationGraphConfiguration().equals(network.getComputationGraphConfiguration()); boolean updaterEquals = getUpdater().equals(network.getUpdater()); return paramsEquals && confEquals && updaterEquals; } @@ -4875,7 +4880,7 @@ public class ComputationGraph implements Serializable, Model, NeuralNetwork { val cg = ModelSerializer.restoreComputationGraph(ois, true); this.defaultConfiguration = cg.defaultConfiguration.clone(); - this.configuration = cg.configuration.clone(); + this.computationGraphConfiguration = cg.computationGraphConfiguration.clone(); this.init(); this.flattenedParams.assign(cg.flattenedParams); @@ -4906,4 +4911,22 @@ public class ComputationGraph implements Serializable, Model, NeuralNetwork { Nd4j.getWorkspaceManager().destroyAllWorkspacesForCurrentThread(); System.gc(); } + + @Override + public ITraininableLayerConfiguration getTrainingConfig() { + throw new UnsupportedOperationException("Not supported"); + } + + /** + * @return 1d parameter vector + */ + @Override + public INDArray getParams() { + throw new RuntimeException("Not supported"); + } + + @Override + public boolean updaterDivideByMinibatch(String paramName) { + return false; + } } diff --git a/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/nn/graph/vertex/BaseGraphVertex.java b/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/nn/graph/vertex/BaseGraphVertex.java index afffe99d4..269e67ac0 100644 --- a/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/nn/graph/vertex/BaseGraphVertex.java +++ b/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/nn/graph/vertex/BaseGraphVertex.java @@ -23,7 +23,7 @@ package org.deeplearning4j.nn.graph.vertex; import lombok.Data; import lombok.Getter; import lombok.Setter; -import org.deeplearning4j.nn.api.TrainingConfig; +import org.deeplearning4j.nn.api.ITraininableLayerConfiguration; import org.deeplearning4j.nn.graph.ComputationGraph; import org.deeplearning4j.nn.graph.vertex.impl.LayerVertex; import org.nd4j.linalg.api.buffer.DataType; @@ -38,6 +38,16 @@ public abstract class BaseGraphVertex implements GraphVertex { protected ComputationGraph graph; + public BaseGraphVertex(){}; + @Override + public Map getParamTable() { + return null; + } + + public void setParamTable(Map params) { + throw new RuntimeException("Not implemented."); + } + protected String vertexName; /** The index of this vertex */ @@ -58,8 +68,8 @@ public abstract class BaseGraphVertex implements GraphVertex { protected INDArray[] inputs; protected INDArray epsilon; - //Set outputVertex to true when Layer is an OutputLayer, OR For use in specialized situations like reinforcement learning - // For RL situations, this Layer insn't an OutputLayer, but is the last layer in a graph, that gets its error/epsilon + //Set outputVertex to true when ILayer is an OutputLayer, OR For use in specialized situations like reinforcement learning + // For RL situations, this ILayer insn't an OutputLayer, but is the last layer in a graph, that gets its error/epsilon // passed in externally @Setter @Getter protected boolean outputVertex; @@ -197,22 +207,22 @@ public abstract class BaseGraphVertex implements GraphVertex { } @Override - public Map paramTable(boolean backpropOnly) { + public Map getParamTable(boolean backpropOnly) { return Collections.emptyMap(); } @Override public long numParams(){ - return params() == null ? 0 : params().length(); + return getParams() == null ? 0 : getParams().length(); } @Override - public TrainingConfig getConfig() { + public ITraininableLayerConfiguration getTrainingConfig() { return null; } @Override - public INDArray params() { + public INDArray getParams() { return null; } diff --git a/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/nn/graph/vertex/BaseWrapperVertex.java b/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/nn/graph/vertex/BaseWrapperVertex.java index 949ee0f7e..d73315645 100644 --- a/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/nn/graph/vertex/BaseWrapperVertex.java +++ b/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/nn/graph/vertex/BaseWrapperVertex.java @@ -22,7 +22,7 @@ package org.deeplearning4j.nn.graph.vertex; import org.deeplearning4j.nn.api.Layer; import org.deeplearning4j.nn.api.MaskState; -import org.deeplearning4j.nn.api.TrainingConfig; +import org.deeplearning4j.nn.api.ITraininableLayerConfiguration; import org.deeplearning4j.nn.gradient.Gradient; import org.deeplearning4j.nn.workspace.LayerWorkspaceMgr; import org.nd4j.linalg.api.ndarray.INDArray; @@ -179,18 +179,18 @@ public abstract class BaseWrapperVertex implements GraphVertex { } @Override - public Map paramTable(boolean backpropOnly) { - return underlying.paramTable(backpropOnly); + public Map getParamTable(boolean backpropOnly) { + return underlying.getParamTable(backpropOnly); } @Override - public TrainingConfig getConfig() { - return underlying.getConfig(); + public ITraininableLayerConfiguration getTrainingConfig() { + return underlying.getTrainingConfig(); } @Override - public INDArray params() { - return underlying.params(); + public INDArray getParams() { + return underlying.getParams(); } @Override diff --git a/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/nn/graph/vertex/GraphVertex.java b/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/nn/graph/vertex/GraphVertex.java index 73e4b2fc4..51bd7ee62 100644 --- a/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/nn/graph/vertex/GraphVertex.java +++ b/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/nn/graph/vertex/GraphVertex.java @@ -20,9 +20,9 @@ package org.deeplearning4j.nn.graph.vertex; +import org.deeplearning4j.nn.api.ITrainableLayer; import org.deeplearning4j.nn.api.Layer; import org.deeplearning4j.nn.api.MaskState; -import org.deeplearning4j.nn.api.Trainable; import org.deeplearning4j.nn.gradient.Gradient; import org.nd4j.linalg.api.ndarray.INDArray; import org.nd4j.common.primitives.Pair; @@ -31,7 +31,7 @@ import org.deeplearning4j.nn.workspace.LayerWorkspaceMgr; import java.io.Serializable; import java.util.Map; -public interface GraphVertex extends Trainable, Serializable { +public interface GraphVertex extends ITrainableLayer, Serializable { /** Get the name/label of the GraphVertex */ @@ -40,7 +40,7 @@ public interface GraphVertex extends Trainable, Serializable { /** Get the index of the GraphVertex */ int getVertexIndex(); - /** Get the number of input arrays. For example, a Layer may have only one input array, but in general a GraphVertex + /** Get the number of input arrays. For example, a ILayer may have only one input array, but in general a GraphVertex * may have an arbtrary (>=1) number of input arrays (for example, from multiple other layers) */ int getNumInputArrays(); @@ -85,7 +85,7 @@ public interface GraphVertex extends Trainable, Serializable { /** Set the GraphVertex to be an output vertex */ void setOutputVertex(boolean outputVertex); - /** Get the Layer (if any). Returns null if {@link #hasLayer()} == false */ + /** Get the ILayer (if any). Returns null if {@link #hasLayer()} == false */ Layer getLayer(); /** Set the input activations. @@ -156,5 +156,5 @@ public interface GraphVertex extends Trainable, Serializable { * @param backpropOnly If true: exclude unsupervised training parameters * @return Parameter table */ - Map paramTable(boolean backpropOnly); + Map getParamTable(boolean backpropOnly); } diff --git a/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/nn/graph/vertex/impl/FrozenVertex.java b/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/nn/graph/vertex/impl/FrozenVertex.java index 77107c6ee..a3f45121a 100644 --- a/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/nn/graph/vertex/impl/FrozenVertex.java +++ b/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/nn/graph/vertex/impl/FrozenVertex.java @@ -20,16 +20,14 @@ package org.deeplearning4j.nn.graph.vertex.impl; -import lombok.AllArgsConstructor; +import java.util.Map; + import lombok.EqualsAndHashCode; -import org.deeplearning4j.nn.api.TrainingConfig; -import org.deeplearning4j.nn.conf.GradientNormalization; +import org.deeplearning4j.nn.api.ITraininableLayerConfiguration; import org.deeplearning4j.nn.conf.misc.DummyConfig; import org.deeplearning4j.nn.graph.vertex.BaseWrapperVertex; import org.deeplearning4j.nn.graph.vertex.GraphVertex; import org.nd4j.linalg.api.ndarray.INDArray; -import org.nd4j.linalg.learning.config.IUpdater; -import org.nd4j.linalg.learning.config.NoOp; @EqualsAndHashCode(callSuper = true, exclude = {"config"}) public class FrozenVertex extends BaseWrapperVertex { @@ -40,10 +38,30 @@ public class FrozenVertex extends BaseWrapperVertex { private transient DummyConfig config; @Override - public TrainingConfig getConfig(){ + public ITraininableLayerConfiguration getTrainingConfig(){ if (config == null) { config = new DummyConfig(getVertexName()); } return config; } + + /** + * The param table + * + * @return + */ + @Override + public Map getParamTable() { + return null; + } + + /** + * Setter for the param table + * + * @param paramTable + */ + @Override + public void setParamTable(Map paramTable) { + + } } diff --git a/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/nn/graph/vertex/impl/LayerVertex.java b/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/nn/graph/vertex/impl/LayerVertex.java index fdd05c390..a0df3e1bb 100644 --- a/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/nn/graph/vertex/impl/LayerVertex.java +++ b/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/nn/graph/vertex/impl/LayerVertex.java @@ -24,7 +24,7 @@ import lombok.Data; import lombok.EqualsAndHashCode; import org.deeplearning4j.nn.api.Layer; import org.deeplearning4j.nn.api.MaskState; -import org.deeplearning4j.nn.api.TrainingConfig; +import org.deeplearning4j.nn.api.ITraininableLayerConfiguration; import org.deeplearning4j.nn.api.layers.IOutputLayer; import org.deeplearning4j.nn.api.layers.RecurrentLayer; import org.deeplearning4j.nn.conf.InputPreProcessor; @@ -85,12 +85,12 @@ public class LayerVertex extends BaseGraphVertex { return; this.layer = new FrozenLayer(this.layer); - this.layer.conf().getLayer().setLayerName(vertexName); + this.layer.getLayerConfiguration().setLayerName(vertexName); } @Override - public Map paramTable(boolean backpropOnly) { - return layer.paramTable(backpropOnly); + public Map getParamTable(boolean backpropOnly) { + return layer.getParamTable(backpropOnly); } @Override @@ -124,10 +124,10 @@ public class LayerVertex extends BaseGraphVertex { public Pair doBackward(boolean tbptt, LayerWorkspaceMgr workspaceMgr) { if (!canDoBackward()) { if(inputs == null || inputs[0] == null){ - throw new IllegalStateException("Cannot do backward pass: inputs not set. Layer: \"" + vertexName + throw new IllegalStateException("Cannot do backward pass: inputs not set. ILayer: \"" + vertexName + "\" (idx " + vertexIndex + "), numInputs: " + getNumInputArrays()); } else { - throw new IllegalStateException("Cannot do backward pass: all epsilons not set. Layer \"" + vertexName + throw new IllegalStateException("Cannot do backward pass: all epsilons not set. ILayer \"" + vertexName + "\" (idx " + vertexIndex + "), numInputs :" + getNumInputArrays() + "; numOutputs: " + getNumOutputConnections()); } @@ -142,7 +142,7 @@ public class LayerVertex extends BaseGraphVertex { if (tbptt && layer instanceof RecurrentLayer) { //Truncated BPTT for recurrent layers pair = ((RecurrentLayer) layer).tbpttBackpropGradient(epsilon, - graph.getConfiguration().getTbpttBackLength(), workspaceMgr); + graph.getComputationGraphConfiguration().getTbpttBackLength(), workspaceMgr); } else { //Normal backprop pair = layer.backpropGradient(epsilon, workspaceMgr); //epsTotal may be null for OutputLayers @@ -263,13 +263,13 @@ public class LayerVertex extends BaseGraphVertex { } @Override - public TrainingConfig getConfig(){ - return getLayer().getConfig(); + public ITraininableLayerConfiguration getTrainingConfig(){ + return getLayer().getTrainingConfig(); } @Override - public INDArray params(){ - return layer.params(); + public INDArray getParams(){ + return layer.getParams(); } @Override diff --git a/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/nn/graph/vertex/impl/rnn/DuplicateToTimeSeriesVertex.java b/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/nn/graph/vertex/impl/rnn/DuplicateToTimeSeriesVertex.java index 2bfc6ee97..27eb238d3 100644 --- a/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/nn/graph/vertex/impl/rnn/DuplicateToTimeSeriesVertex.java +++ b/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/nn/graph/vertex/impl/rnn/DuplicateToTimeSeriesVertex.java @@ -48,10 +48,10 @@ public class DuplicateToTimeSeriesVertex extends BaseGraphVertex { VertexIndices[] inputVertices, VertexIndices[] outputVertices, String inputName, DataType dataType) { super(graph, name, vertexIndex, inputVertices, outputVertices, dataType); this.inputName = inputName; - this.inputVertexIndex = graph.getConfiguration().getNetworkInputs().indexOf(inputName); + this.inputVertexIndex = graph.getComputationGraphConfiguration().getNetworkInputs().indexOf(inputName); if (inputVertexIndex == -1) throw new IllegalArgumentException("Invalid input name: \"" + inputName + "\" not found in list " - + "of network inputs (" + graph.getConfiguration().getNetworkInputs() + ")"); + + "of network inputs (" + graph.getComputationGraphConfiguration().getNetworkInputs() + ")"); } @Override diff --git a/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/nn/graph/vertex/impl/rnn/LastTimeStepVertex.java b/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/nn/graph/vertex/impl/rnn/LastTimeStepVertex.java index 0475936d0..4402dc4c5 100644 --- a/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/nn/graph/vertex/impl/rnn/LastTimeStepVertex.java +++ b/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/nn/graph/vertex/impl/rnn/LastTimeStepVertex.java @@ -54,10 +54,10 @@ public class LastTimeStepVertex extends BaseGraphVertex { VertexIndices[] outputVertices, String inputName, DataType dataType) { super(graph, name, vertexIndex, inputVertices, outputVertices, dataType); this.inputName = inputName; - this.inputIdx = graph.getConfiguration().getNetworkInputs().indexOf(inputName); + this.inputIdx = graph.getComputationGraphConfiguration().getNetworkInputs().indexOf(inputName); if (inputIdx == -1) throw new IllegalArgumentException("Invalid input name: \"" + inputName + "\" not found in list " - + "of network inputs (" + graph.getConfiguration().getNetworkInputs() + ")"); + + "of network inputs (" + graph.getComputationGraphConfiguration().getNetworkInputs() + ")"); } @Override diff --git a/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/nn/graph/vertex/impl/rnn/ReverseTimeSeriesVertex.java b/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/nn/graph/vertex/impl/rnn/ReverseTimeSeriesVertex.java index 359a576a3..86b5dcab3 100644 --- a/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/nn/graph/vertex/impl/rnn/ReverseTimeSeriesVertex.java +++ b/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/nn/graph/vertex/impl/rnn/ReverseTimeSeriesVertex.java @@ -48,10 +48,10 @@ public class ReverseTimeSeriesVertex extends BaseGraphVertex { this.inputIdx = -1; } else { // Find the given input - this.inputIdx = graph.getConfiguration().getNetworkInputs().indexOf(inputName); + this.inputIdx = graph.getComputationGraphConfiguration().getNetworkInputs().indexOf(inputName); if (inputIdx == -1) throw new IllegalArgumentException("Invalid input name: \"" + inputName + "\" not found in list " - + "of network inputs (" + graph.getConfiguration().getNetworkInputs() + ")"); + + "of network inputs (" + graph.getComputationGraphConfiguration().getNetworkInputs() + ")"); } } diff --git a/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/nn/layers/AbstractLayer.java b/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/nn/layers/AbstractLayer.java index 5c4c8ee16..d14f20d85 100644 --- a/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/nn/layers/AbstractLayer.java +++ b/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/nn/layers/AbstractLayer.java @@ -20,421 +20,590 @@ package org.deeplearning4j.nn.layers; -import lombok.AccessLevel; -import lombok.Data; -import lombok.NoArgsConstructor; -import lombok.Setter; +import java.util.ArrayList; +import java.util.Collection; +import java.util.List; +import java.util.Map; +import java.util.stream.Collectors; + +import lombok.*; +import net.brutex.ai.dnn.api.IModel; +import org.deeplearning4j.nn.api.ITraininableLayerConfiguration; import org.deeplearning4j.nn.api.Layer; import org.deeplearning4j.nn.api.MaskState; -import org.deeplearning4j.nn.api.TrainingConfig; import org.deeplearning4j.nn.api.layers.LayerConstraint; import org.deeplearning4j.nn.conf.CacheMode; import org.deeplearning4j.nn.conf.NeuralNetConfiguration; +import org.deeplearning4j.nn.conf.layers.LayerConfiguration; import org.deeplearning4j.nn.gradient.Gradient; import org.deeplearning4j.nn.workspace.ArrayType; import org.deeplearning4j.nn.workspace.LayerWorkspaceMgr; import org.deeplearning4j.optimize.api.ConvexOptimizer; import org.deeplearning4j.optimize.api.TrainingListener; +import org.nd4j.common.primitives.Pair; +import org.nd4j.evaluation.IEvaluation; import org.nd4j.linalg.api.buffer.DataType; import org.nd4j.linalg.api.ndarray.INDArray; -import org.nd4j.common.primitives.Pair; +import org.nd4j.linalg.dataset.api.DataSet; +import org.nd4j.linalg.dataset.api.MultiDataSet; +import org.nd4j.linalg.dataset.api.iterator.DataSetIterator; +import org.nd4j.linalg.dataset.api.iterator.MultiDataSetIterator; -import java.util.*; +/** A layer with input and output, no parameters or gradients */ +@NoArgsConstructor(force = true) +public abstract class AbstractLayer implements Layer { -/** - * A layer with input and output, no parameters or gradients - */ -@Data -@NoArgsConstructor -public abstract class AbstractLayer implements Layer { + private final @Getter List variables = new ArrayList<>(); - @Setter(AccessLevel.NONE) - protected INDArray input; - protected INDArray preOutput; - protected NeuralNetConfiguration conf; - protected boolean dropoutApplied = false; - protected Collection trainingListeners = new ArrayList<>(); - protected int index = 0; - protected INDArray maskArray; - protected MaskState maskState; - protected CacheMode cacheMode = CacheMode.NONE; - protected boolean inputModificationAllowed = false; - protected DataType dataType; + @Getter + @Setter(AccessLevel.MODULE) + protected INDArray + input; // TODO: this should be private, but too much code is still accessing input directly. - protected int iterationCount; - protected int epochCount; + protected INDArray preOutput; + /** The typed {@link LayerConfiguration}. */ + @Getter @NonNull protected LayerConf_T layerConfiguration; - public AbstractLayer(NeuralNetConfiguration conf, DataType dataType) { - this.conf = conf; - if (conf != null) - cacheMode = conf.getCacheMode(); - this.dataType = dataType; + protected boolean dropoutApplied = false; + + @Getter @Setter @NonNull + protected Collection trainingListeners = new ArrayList<>(); + + protected @Getter @Setter int index = 0; + protected @Getter @Setter INDArray maskArray; + protected @Getter @Setter MaskState maskState; + protected CacheMode cacheMode = CacheMode.NONE; + protected boolean inputModificationAllowed = false; + protected DataType dataType; + protected @Getter @Setter int iterationCount; + protected @Getter @Setter int epochCount; + private @Getter @Setter IModel net; + + @Getter @Setter @NonNull private NeuralNetConfiguration netConfiguration; + + public AbstractLayer(@NonNull LayerConfiguration layerConf, @NonNull DataType dataType) { + //noinspection unchecked + this.layerConfiguration = (LayerConf_T) layerConf; + this.netConfiguration = layerConfiguration.getNetConfiguration(); + + if (layerConfiguration.getNetConfiguration() != null) { + cacheMode = layerConfiguration.getNetConfiguration().getCacheMode(); + } + this.dataType = dataType; + if (layerConfiguration.getNetConfiguration() == null) { + throw new RuntimeException("You cannot create a layer from a layer configuration, that is not part of any neural network configuration."); + } + this.net = layerConfiguration.getNetConfiguration().getNet(); + } + + public void addTrainingListeners(TrainingListener... listeners) { + if(listeners != null) + trainingListeners.addAll(List.of(listeners)); + } + + public void addTrainingListeners(Collection listeners) { + if(listeners != null) + trainingListeners.addAll(listeners); + } + + @Override + public INDArray activate(INDArray input, boolean training, LayerWorkspaceMgr workspaceMgr) { + setInput(input, workspaceMgr); + return activate(training, workspaceMgr); + } + + /** + * Creates and returns a copy of this object. + * + * @return a clone of this instance. + * @throws CloneNotSupportedException if the object's class does not support the {@code Cloneable} + * interface. Subclasses that override the {@code clone} method can also throw this exception + * to indicate that an instance cannot be cloned. + * @see Cloneable + */ + @Override + protected Object clone() throws CloneNotSupportedException { + return super.clone(); + } + + /** + * This method returns updater state (if applicable), null otherwise + * + * @return + */ + @Override + public INDArray updaterState() { + return null; + } + + /** + * This method returns Optimizer used for training + * + * @return + */ + @Override + public ConvexOptimizer getOptimizer() { + return null; + } + + /** + * This method fits model with a given DataSet + * + * @param dataSet + */ + @Override + public void fit(DataSet dataSet) {} + + /** + * This method fits model with a given MultiDataSet + * + * @param dataSet + */ + @Override + public void fit(MultiDataSet dataSet) {} + + /** + * This method fits model with a given DataSetIterator + * + * @param iterator + */ + @Override + public void fit(DataSetIterator iterator) {} + + /** + * This method fits model with a given MultiDataSetIterator + * + * @param iterator + */ + @Override + public void fit(MultiDataSetIterator iterator) {} + + /** + * This method executes evaluation of the model against given iterator and evaluation + * implementations + * + * @param iterator + * @param evaluations + */ + @Override + public T[] doEvaluation(DataSetIterator iterator, T... evaluations) { + return null; + } + + /** + * This method executes evaluation of the model against given iterator and evaluation + * implementations + * + * @param iterator + * @param evaluations + */ + @Override + public T[] doEvaluation(MultiDataSetIterator iterator, T... evaluations) { + return null; + } + + /** Init the model */ + @Override + public void init() {} + + /** + * Update layer weights and biases with gradient change + * + * @param gradient + */ + @Override + public void update(Gradient gradient) {} + + /** + * Perform one update applying the gradient + * + * @param gradient the gradient to apply + * @param paramType + */ + @Override + public void update(INDArray gradient, String paramType) {} + + /** + * Update the score + * + * @param workspaceMgr + */ + @Override + public void computeGradientAndScore(LayerWorkspaceMgr workspaceMgr) {} + + /** + * the number of parameters for the model + * + * @param backwards + * @return the number of parameters for the model + */ + @Override + public long numParams(boolean backwards) { + return 0; + } + + @Override + public void setParam(String s, INDArray array) {} + + /** + * Get a parameter array for a given parameter type key + * + * @param param the key of the parameter + * @return ndarray of parameters + */ + @Override + public INDArray getParam(String param) { + return null; + } + + /** + * Set the initial parameters array as a view of the full (backprop) network parameters NOTE: this + * is intended to be used internally in MultiLayerNetwork and ComputationGraph, not by users. + * + * @param params a 1 x nParams row vector that is a view of the larger (MLN/CG) parameters array + */ + @Override + public void setParamsViewArray(INDArray params) {} + + /** + * Set the gradients array as a view of the full (backprop) network parameters NOTE: this is + * intended to be used internally in MultiLayerNetwork and ComputationGraph, not by users. + * + * @param gradients a 1 x nParams row vector that is a view of the larger (MLN/CG) gradients array + */ + @Override + public void setBackpropGradientsViewArray(INDArray gradients) {} + + /** + * The current inputs batch size + * + * @return the current inputs batch size + */ + @Override + public int batchSize() { + return 0; + } + + /** + * The input/feature matrix for the model + * + * @return the input/feature matrix for the model + */ + @Override + public INDArray input() { + return this.input; + } + + /** */ + @Override + public void close() {} + + /** + * Calculate the gradient relative to the error in the next layer + * + * @param epsilon w^(L+1)*delta^(L+1). Or, equiv: dC/da, i.e., (dC/dz)*(dz/da) = dC/da, where C is + * cost function a=sigma(z) is activation. + * @param workspaceMgr Workspace manager + * @return Pair where Gradient is gradient for this layer, INDArray is epsilon + * (activation gradient) needed by next layer, but before element-wise multiply by + * sigmaPrime(z). So for standard feed-forward layer, if this layer is L, then + * return.getSecond() == dL/dIn = (w^(L)*(delta^(L))^T)^T. Note that the returned array should + * be placed in the {@link ArrayType#ACTIVATION_GRAD} workspace via the workspace manager + */ + @Override + public Pair backpropGradient( + INDArray epsilon, LayerWorkspaceMgr workspaceMgr) { + return null; + } + + /** + * Returns true if the layer can be trained in an unsupervised/pretrain manner (AE, VAE, etc) + * + * @return true if the layer can be pretrained (using fit(INDArray), false otherwise + */ + @Override + public boolean isPretrainLayer() { + return false; + } + + /** */ + @Override + public void clearNoiseWeightParams() {} + + public List getVariables(boolean copy) { + if (copy) { + return new ArrayList<>(getVariables()); + } + return variables; + } + + public void addVariable(String variable) { + if (!variables.contains(variable)) { + variables.add(variable); + } + } + + public void setLayerConfiguration(LayerConfiguration layerConfiguration) { + //noinspection unchecked + this.layerConfiguration = (LayerConf_T) layerConfiguration; + } + + @Override + public void setCacheMode(CacheMode mode) { + if (mode == null) { + mode = CacheMode.NONE; } - @Override - public void setCacheMode(CacheMode mode) { - if (mode == null) - mode = CacheMode.NONE; + this.cacheMode = mode; + } - this.cacheMode = mode; + public LayerConf_T getTypedLayerConfiguration() { + return this.layerConfiguration; + } + + @Override + public ITraininableLayerConfiguration getTrainingConfig() { + return (ITraininableLayerConfiguration) getTypedLayerConfiguration(); + } + + protected String layerId() { + String name = this.layerConfiguration.getLayerName(); + return "(layer name: " + + (name == null ? "\"\"" : name) + + ", layer index: " + + index + + ", layer type: " + + getClass().getSimpleName() + + ")"; + } + + @Override + public void setInput(@NonNull INDArray input, LayerWorkspaceMgr workspaceMgr) { + this.input = workspaceMgr.leverageTo(ArrayType.INPUT, input); + dropoutApplied = false; + } + + /** + * Returns the parameters of the neural network as a flattened row vector + * + * @return the parameters of the neural network + */ + @Override + public INDArray getModelParams() { + return null; + } + + protected void setParams(INDArray params, char order) { + throw new UnsupportedOperationException("Not supported"); + } + + /** + * @return Number of parameters + */ + @Override + public long numParams() { + return 0; + } + + protected void applyMask(INDArray to) { + to.muliColumnVector(maskArray.castTo(to.dataType())); + } + + @Override + public double calcRegularizationScore(boolean backpropParamsOnly) { + return 0.0; + } + + @Deprecated + public void clear() { + input = null; + maskArray = null; + maskState = null; + if (getTypedLayerConfiguration().getIDropout() != null) { + getTypedLayerConfiguration().getIDropout().clear(); } + } - public LayerConfT layerConf() { - return (LayerConfT) this.conf.getLayer(); + protected void applyDropOutIfNecessary(boolean training, LayerWorkspaceMgr workspaceMgr) { + if (training && !dropoutApplied && getTypedLayerConfiguration().getIDropout() != null) { + INDArray result; + if (inputModificationAllowed) { + result = input; + } else { + result = + workspaceMgr.createUninitialized( + ArrayType.INPUT, input.dataType(), input.shape(), input.ordering()); + } + + input = + getTypedLayerConfiguration() + .getIDropout() + .applyDropout(input, result, getIterationCount(), getEpochCount(), workspaceMgr); + dropoutApplied = true; } + } - @Override - public TrainingConfig getConfig(){ - return conf.getLayer(); + protected INDArray backpropDropOutIfPresent(INDArray epsilon) { + if (getTypedLayerConfiguration().getIDropout() != null) { + getTypedLayerConfiguration() + .getIDropout() + .backprop(epsilon, epsilon, getIterationCount(), getEpochCount()); } + return epsilon; + } - protected String layerId() { - String name = this.conf().getLayer().getLayerName(); - return "(layer name: " + (name == null ? "\"\"" : name) + ", layer index: " + index + ", layer type: " + - getClass().getSimpleName() + ")"; + @Override + public Type type() { + return Type.FEED_FORWARD; + } + + public void fit(INDArray input, LayerWorkspaceMgr workspaceMgr) { + throw new UnsupportedOperationException("Not supported"); + } + + public Pair gradientAndScore() { + return new Pair<>(gradient(), getScore()); + } + + @Override + public int getInputMiniBatchSize() { + return (int) input.size(0); + } + + @Override + public void setInputMiniBatchSize(int size) {} + + @Override + public Pair feedForwardMaskArray( + INDArray maskArray, MaskState currentMaskState, int minibatchSize) { + // Most layers: CNN, dense, activation, etc - set mask array, mask state and then leave the mask + // unmodified + + this.maskArray = maskArray; + this.maskState = currentMaskState; + + return new Pair<>(maskArray, currentMaskState); + } + + public Gradient gradient() { + throw new UnsupportedOperationException( + "Not supported for this layer, or should be overridden for layers requiring it"); + } + + public void fit() { + throw new UnsupportedOperationException( + "Not supported for this layer, or should be overridden for layers requiring it"); + } + + public double getScore() { + throw new UnsupportedOperationException( + "Not supported for this layer, or should be overridden for layers requiring it"); + } + + public void applyConstraints(int iteration, int epoch) { + if (getTypedLayerConfiguration().getConstraints() != null) { + for (LayerConstraint lc : getTypedLayerConfiguration().getConstraints()) { + lc.applyConstraint(this, iteration, epoch); + } } + } - public INDArray getInput() { - return input; - } - - public int getEpochCount() { - return epochCount; - } - - public void setEpochCount(int epochCount) { - this.epochCount = epochCount; - } - - /** - * Init the model - */ - @Override - public void init() { - - } - - @Override - public void setInput(INDArray input, LayerWorkspaceMgr workspaceMgr) { - this.input = workspaceMgr.leverageTo(ArrayType.INPUT, input); - dropoutApplied = false; - } - - @Override - public int getIndex() { - return index; - } - - @Override - public void setIndex(int index) { - this.index = index; - } - - - @Override - public Collection getListeners() { - return trainingListeners; - } - - @Override - public void setListeners(Collection listeners) { - this.trainingListeners = listeners != null ? listeners : new ArrayList(); - } - - /** - * This method ADDS additional TrainingListener to existing listeners - * - * @param listeners - */ - @Override - public void addListeners(TrainingListener... listeners) { - if (this.trainingListeners == null) { - setListeners(listeners); - return; - } - - Collections.addAll(trainingListeners, listeners); - } - - @Override - public void setListeners(TrainingListener... listeners) { - setListeners(Arrays.asList(listeners)); - } - - @Override - public void computeGradientAndScore(LayerWorkspaceMgr workspaceMgr) { - throw new UnsupportedOperationException("Not supported"); - } - - @Override - public void update(Gradient gradient) { - throw new UnsupportedOperationException(); - } - - @Override - public void update(INDArray gradient, String paramType) { - throw new UnsupportedOperationException(); - } - - - @Override - public ConvexOptimizer getOptimizer() { - throw new UnsupportedOperationException("Not supported"); - } - - @Override - public void setConf(NeuralNetConfiguration conf) { - this.conf = conf; - } - - /**Returns the parameters of the neural network as a flattened row vector - * @return the parameters of the neural network - */ - @Override - public INDArray params() { - return null; - } - - @Override - public INDArray getParam(String param) { - throw new UnsupportedOperationException("Not supported"); - } - - @Override - public void setParam(String key, INDArray val) { - throw new UnsupportedOperationException("Not supported"); - } - - @Override - public void setParams(INDArray params) { - if (params != null) { - throw new UnsupportedOperationException("Not supported"); - } - } - - protected void setParams(INDArray params, char order) { - throw new UnsupportedOperationException("Not supported"); - } - - @Override - public void setParamsViewArray(INDArray params) { - if (params != null) { - throw new UnsupportedOperationException("Not supported"); - } - } - - @Override - public INDArray getGradientsViewArray() { - return null; - } - - @Override - public void setBackpropGradientsViewArray(INDArray gradients) { - if (gradients != null) { - throw new UnsupportedOperationException("Not supported"); - } - } - - @Override - public void setParamTable(Map paramTable) { - if (paramTable != null && !paramTable.isEmpty()) { - throw new UnsupportedOperationException("Not supported"); - } - } - - @Override - public Map paramTable() { - return paramTable(false); - } - - @Override - public Map paramTable(boolean backpropParamsOnly) { - return Collections.emptyMap(); - } - - protected void applyMask(INDArray to) { - to.muliColumnVector(maskArray.castTo(to.dataType())); - } - - @Override - public INDArray activate(INDArray input, boolean training, LayerWorkspaceMgr workspaceMgr) { - setInput(input, workspaceMgr); - return activate(training, workspaceMgr); - } - - @Override - public double calcRegularizationScore(boolean backpropParamsOnly){ - return 0.0; - } - - @Override - public int batchSize() { - return (int) input.size(0); - } - - @Override - public NeuralNetConfiguration conf() { - return conf; - } - - - @Override - public void clear() { - input = null; - maskArray = null; - maskState = null; - if(layerConf().getIDropout() != null){ - layerConf().getIDropout().clear(); - } - } - - protected void applyDropOutIfNecessary(boolean training, LayerWorkspaceMgr workspaceMgr){ - if(training && !dropoutApplied && layerConf().getIDropout() != null ){ - INDArray result; - if(inputModificationAllowed){ - result = input; - } else { - result = workspaceMgr.createUninitialized(ArrayType.INPUT, input.dataType(), input.shape(), input.ordering()); - } - - input = layerConf().getIDropout().applyDropout(input, result, getIterationCount(), getEpochCount(), workspaceMgr); - dropoutApplied = true; - } - } - - protected INDArray backpropDropOutIfPresent(INDArray epsilon){ - if(layerConf().getIDropout() != null ){ - layerConf().getIDropout().backprop(epsilon, epsilon, getIterationCount(), getEpochCount()); - } - return epsilon; - } - - - @Override - public Type type() { - return Type.FEED_FORWARD; - } - - /** - * The number of parameters for the model - * - * @return the number of parameters for the model - */ - @Override - public long numParams() { - return 0; - } - - @Override - public long numParams(boolean backwards) { - return numParams(); - } - - @Override - public void fit(INDArray input, LayerWorkspaceMgr workspaceMgr) { - throw new UnsupportedOperationException("Not supported"); - } - - - @Override - public Pair gradientAndScore() { - return new Pair<>(gradient(), score()); - } - - @Override - public INDArray input() { - return input; - } - - @Override - public void setInputMiniBatchSize(int size) {} - - @Override - public int getInputMiniBatchSize() { - return (int) input.size(0); - } - - @Override - public void setMaskArray(INDArray maskArray) { - this.maskArray = maskArray; - } - - @Override - public INDArray getMaskArray() { - return maskArray; - } - - - @Override - public Pair feedForwardMaskArray(INDArray maskArray, MaskState currentMaskState, int minibatchSize) { - //Most layers: CNN, dense, activation, etc - set mask array, mask state and then leave the mask unmodified - - this.maskArray = maskArray; - this.maskState = currentMaskState; - - return new Pair<>(maskArray, currentMaskState); - } - - - @Override - public Gradient gradient() { - throw new UnsupportedOperationException( - "Not supported for this layer, or should be overridden for layers requiring it"); - } - - @Override - public void fit() { - throw new UnsupportedOperationException( - "Not supported for this layer, or should be overridden for layers requiring it"); - } - - @Override - public double score() { - throw new UnsupportedOperationException( - "Not supported for this layer, or should be overridden for layers requiring it"); - } - - - @Override - public void applyConstraints(int iteration, int epoch){ - if(layerConf().getConstraints() != null){ - for(LayerConstraint lc : layerConf().getConstraints()){ - lc.applyConstraint(this, iteration, epoch); - } - } - } - - public void assertInputSet(boolean backprop){ - if(input == null){ - if(backprop){ - throw new IllegalStateException("Cannot perform backprop in layer " + getClass().getSimpleName() - + ": layer input field is not set"); - } else { - throw new IllegalStateException("Cannot perform forward pass in layer " + getClass().getSimpleName() - + ": layer input field is not set"); - } - } - } - - @Override - public void allowInputModification(boolean allow){ - inputModificationAllowed = allow; - } - - @Override - public LayerHelper getHelper() { - //Layers with helpers should override this method! - return null; - } - - @Override - public boolean updaterDivideByMinibatch(String paramName) { - //Majority of params's gradients should be... Exception: batch norm mean/variance estimate - return true; - } - - @Override - public void close(){ - //No-op for individual layers + public void assertInputSet(boolean backprop) { + if (input == null) { + if (backprop) { + throw new IllegalStateException( + "Cannot perform backprop in layer " + + getClass().getSimpleName() + + ": layer input field is not set"); + } else { + throw new IllegalStateException( + "Cannot perform forward pass in layer " + + getClass().getSimpleName() + + ": layer input field is not set"); + } } + } + + @Override + public void allowInputModification(boolean allow) { + inputModificationAllowed = allow; + } + + @Override + public LayerHelper getHelper() { + // Layers with helpers should override this method! + return null; + } + + @Override + public boolean updaterDivideByMinibatch(String paramName) { + // Majority of params's gradients should be... Exception: batch norm mean/variance estimate + return true; + } + + /** + * The AbstractLayer does not implement Params, ParamTable and GradientView. A RuntimeException + * will be triggered when calling this. + * + * @return + */ + @Override + public Map getParamTable() { + throw new RuntimeException("Not implemented"); + } + + /** + * * The AbstractLayer does not implement Params, ParamTable and GradientView. A RuntimeException + * * will be triggered when calling this. + * + * @param paramTable + */ + @Override + public void setParamTable(Map paramTable) { + throw new RuntimeException("Not implemented"); + } + + /** + * * The AbstractLayer does not implement Params, ParamTable and GradientView. A RuntimeException + * * will be triggered when calling this. + * + * @param isBackprop + * @return + */ + @Override + public Map getParamTable(boolean isBackprop) { + throw new RuntimeException("Not implemented"); + } + + /** + * * The AbstractLayer does not implement Params, ParamTable and GradientView. A RuntimeException + * * will be triggered when calling this. + * + * @return 1d parameter vector + */ + @Override + public INDArray getParams() { + //throw new RuntimeException("Not implemented"); + return null; + } + + /** + * Set the parameters for this model. This expects a linear ndarray which then be unpacked + * internally relative to the expected ordering of the model + * + * @param params the parameters for the model + */ + @Override + public void setParams(INDArray params) {} + + /** + * * The AbstractLayer does not implement Params, ParamTable and GradientView. A RuntimeException + * * will be triggered when calling this. + * + * @return 1D gradients view array + */ + @Override + public INDArray getGradientsViewArray() { + throw new RuntimeException("Not implemented"); + } } diff --git a/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/nn/layers/ActivationLayer.java b/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/nn/layers/ActivationLayer.java index f83b1cf31..48df25694 100644 --- a/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/nn/layers/ActivationLayer.java +++ b/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/nn/layers/ActivationLayer.java @@ -21,7 +21,8 @@ package org.deeplearning4j.nn.layers; -import org.deeplearning4j.nn.conf.NeuralNetConfiguration; +import java.util.Map; +import org.deeplearning4j.nn.conf.layers.LayerConfiguration; import org.deeplearning4j.nn.gradient.DefaultGradient; import org.deeplearning4j.nn.gradient.Gradient; import org.nd4j.linalg.api.buffer.DataType; @@ -33,10 +34,11 @@ import org.deeplearning4j.nn.workspace.ArrayType; public class ActivationLayer extends AbstractLayer { - public ActivationLayer(NeuralNetConfiguration conf, DataType dataType) { + public ActivationLayer(LayerConfiguration conf, DataType dataType) { super(conf, dataType); } + @Override public double calcRegularizationScore(boolean backpropParamsOnly){ return 0; @@ -51,7 +53,7 @@ public class ActivationLayer extends AbstractLayer backpropGradient(INDArray epsilon, LayerWorkspaceMgr workspaceMgr) { assertInputSet(true); INDArray temp = workspaceMgr.dup(ArrayType.ACTIVATION_GRAD, input, input.ordering()); - INDArray delta = layerConf().getActivationFn().backprop(temp, epsilon).getFirst(); //TODO handle activation function params + INDArray delta = getTypedLayerConfiguration().getActivationFn().backprop(temp, epsilon).getFirst(); //TODO handle activation function params if(delta == epsilon ){ //Edge case: identity activation + external errors -> no-op delta = workspaceMgr.dup(ArrayType.ACTIVATION_GRAD, delta); @@ -74,7 +76,7 @@ public class ActivationLayer extends AbstractLayer paramTable; + /** + * @param backpropOnly If true: return only parameters that are not exclusively used for layerwise + * pretraining + * @return Parameter table + */ + @Override + public Map getParamTable(boolean backpropOnly) { + return this.paramTable; + } + + /** + * @param map + */ + @Override + public void setParamTable(Map map) { + this.paramTable = map; + } @Override - public INDArray params() { + public INDArray getModelParams() { return null; } diff --git a/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/nn/layers/BaseLayer.java b/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/nn/layers/BaseLayer.java index ed1176133..1a055c528 100644 --- a/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/nn/layers/BaseLayer.java +++ b/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/nn/layers/BaseLayer.java @@ -20,10 +20,18 @@ package org.deeplearning4j.nn.layers; +import java.lang.reflect.Constructor; +import java.util.*; +import lombok.Getter; +import lombok.NonNull; +import lombok.Setter; import lombok.extern.slf4j.Slf4j; import org.deeplearning4j.exception.DL4JInvalidInputException; +import org.deeplearning4j.nn.api.ITrainableLayer; import org.deeplearning4j.nn.api.Layer; import org.deeplearning4j.nn.conf.NeuralNetConfiguration; +import org.deeplearning4j.nn.conf.layers.BaseLayerConfiguration; +import org.deeplearning4j.nn.conf.layers.LayerConfiguration; import org.deeplearning4j.nn.gradient.DefaultGradient; import org.deeplearning4j.nn.gradient.Gradient; import org.deeplearning4j.nn.params.DefaultParamInitializer; @@ -31,421 +39,640 @@ import org.deeplearning4j.nn.workspace.ArrayType; import org.deeplearning4j.nn.workspace.LayerWorkspaceMgr; import org.deeplearning4j.optimize.Solver; import org.deeplearning4j.optimize.api.ConvexOptimizer; +import org.deeplearning4j.optimize.api.TrainingListener; +import org.nd4j.common.primitives.Pair; +import org.nd4j.evaluation.IEvaluation; import org.nd4j.linalg.api.buffer.DataType; import org.nd4j.linalg.api.memory.MemoryWorkspace; import org.nd4j.linalg.api.ndarray.INDArray; import org.nd4j.linalg.api.ops.impl.transforms.custom.LayerNorm; import org.nd4j.linalg.api.ops.impl.transforms.custom.LayerNormBp; +import org.nd4j.linalg.dataset.api.DataSet; +import org.nd4j.linalg.dataset.api.MultiDataSet; +import org.nd4j.linalg.dataset.api.iterator.DataSetIterator; +import org.nd4j.linalg.dataset.api.iterator.MultiDataSetIterator; import org.nd4j.linalg.factory.Nd4j; import org.nd4j.linalg.indexing.NDArrayIndex; import org.nd4j.linalg.learning.regularization.Regularization; -import org.nd4j.common.primitives.Pair; -import java.lang.reflect.Constructor; -import java.util.*; - -/** - * A layer with parameters - * @author Adam Gibson - */ +/** A layer with parameters */ @Slf4j -public abstract class BaseLayer - extends AbstractLayer { +public abstract class BaseLayer + extends AbstractLayer implements ITrainableLayer { - protected INDArray paramsFlattened; - protected INDArray gradientsFlattened; - protected Map params; - protected transient Map gradientViews; - protected double score = 0.0; - protected ConvexOptimizer optimizer; - protected Gradient gradient; - protected Solver solver; + protected double score = 0.0; + protected ConvexOptimizer optimizer; + protected Gradient gradient; + protected Solver solver; + protected Map weightNoiseParams = new HashMap<>(); + protected INDArray paramsFlattened; + protected INDArray gradientsFlattened; - protected Map weightNoiseParams = new HashMap<>(); + @Getter @Setter protected Map paramTable; - public BaseLayer(NeuralNetConfiguration conf, DataType dataType) { - super(conf, dataType); + @Getter protected transient Map gradientViews; + + /** + * we put this as a virtual function to access the models paramTable. @Getter @Setter private + * INDArray params; + */ + public BaseLayer(LayerConfiguration conf, DataType dataType) { + super(conf, dataType); + } + + /** + * This method executes evaluation of the model against given iterator and evaluation + * implementations + * + * @param iterator + * @param evaluations + */ + @Override + public T[] doEvaluation(DataSetIterator iterator, T... evaluations) { + return null; + } + + /** + * This method executes evaluation of the model against given iterator and evaluation + * implementations + * + * @param iterator + * @param evaluations + */ + @Override + public T[] doEvaluation(MultiDataSetIterator iterator, T... evaluations) { + return null; + } + + /** Init the model */ + @Override + public void init() {} + + /** + * Update layer weights and biases with gradient change + * + * @param gradient + */ + @Override + public void update(Gradient gradient) {} + + /** + * Perform one update applying the gradient + * + * @param gradient the gradient to apply + * @param paramType + */ + @Override + public void update(INDArray gradient, String paramType) {} + + /** + * the number of parameters for the model + * + * @return the number of parameters for the model + */ + @Override + public long numParams() { + int ret = 0; + for (INDArray val : paramTable.values()) ret += val.length(); + return ret; + } + + /** + * The current inputs batch size + * + * @return the current inputs batch size + */ + @Override + public int batchSize() { + return 0; + } + + /** + * Set the {@link TrainingListener}s for this model. If any listeners have previously been set, + * they will be replaced by this method + * + * @param listeners + */ + @Override + public void addTrainingListeners(TrainingListener... listeners) { + addTrainingListeners(List.of(listeners)); + } + + /** + * Set the parameters for a given parameter type. + * + * @param key the param type key to set + * @param val the new parameters ndarray + */ + @Override + public void setParam(String key, INDArray val) { + if (paramTable.containsKey(key)) { + paramTable.get(key).assign(val); + } else { + paramTable.put(key, val); + } + } + + @Override + public INDArray getParam(String param) { + return paramTable.get(param); + } + + @Override + public void setParams(INDArray params) { + if (params == paramsFlattened) return; // no op + setParams(params, 'f'); + } + + /** + * * The AbstractLayer does not implement Params, ParamTable and GradientView. A RuntimeException + * * will be triggered when calling this. + * + * @return 1d parameter vector + */ + @Override + public INDArray getParams() { + return paramsFlattened; + } + + /** */ + @Override + public void close() {} + + /** + * This method fits model with a given DataSet + * + * @param dataSet + */ + @Override + public void fit(DataSet dataSet) {} + + /** + * This method fits model with a given MultiDataSet + * + * @param dataSet + */ + @Override + public void fit(MultiDataSet dataSet) {} + + /** + * This method fits model with a given DataSetIterator + * + * @param iterator + */ + @Override + public void fit(DataSetIterator iterator) {} + + /** + * This method fits model with a given MultiDataSetIterator + * + * @param iterator + */ + @Override + public void fit(MultiDataSetIterator iterator) {} + + /** + * This method returns updater state (if applicable), null otherwise + * + * @return + */ + @Override + public INDArray updaterState() { + return null; + } + + /** + * and others even use \epsilon (epsilon) + * http://web.cs.swarthmore.edu/~meeden/cs81/s10/BackPropDeriv.pdf + * + * @param epsilon w^(L+1)*delta^(L+1). Or, equiv: dC/da, i.e., (dC/dz)*(dz/da) = dC/da, where C is + * cost function a=sigma(z) is activation. + * @param workspaceMgr Workspace manager + * @return + */ + @Override + public Pair backpropGradient( + INDArray epsilon, LayerWorkspaceMgr workspaceMgr) { + assertInputSet(true); + // If this layer is layer L, then epsilon is (w^(L+1)*(d^(L+1))^T) (or equivalent) + Pair zAndPreNorm = preOutputWithPreNorm(true, true, workspaceMgr); + INDArray z = + zAndPreNorm.getFirst(); // Note: using preOutput(INDArray) can't be used as this does a + // setInput(input) and resets the 'appliedDropout' flag + INDArray preNorm = zAndPreNorm.getSecond(); + INDArray delta = + getTypedLayerConfiguration() + .getActivationFn() + .backprop(z, epsilon) + .getFirst(); // TODO handle activation function params + + if (maskArray != null) { + applyMask(delta); } - public LayerConfT layerConf() { - return (LayerConfT) this.conf.getLayer(); + Gradient ret = new DefaultGradient(); + + if (hasBias()) { + INDArray biasGrad = gradientViews.get(DefaultParamInitializer.BIAS_KEY); + delta.sum(biasGrad, 0); // biasGrad is initialized/zeroed first + ret.gradientForVariable().put(DefaultParamInitializer.BIAS_KEY, biasGrad); } - @Override - public Pair backpropGradient(INDArray epsilon, LayerWorkspaceMgr workspaceMgr) { - assertInputSet(true); - //If this layer is layer L, then epsilon is (w^(L+1)*(d^(L+1))^T) (or equivalent) - Pair zAndPreNorm = preOutputWithPreNorm(true, true, workspaceMgr); - INDArray z = zAndPreNorm.getFirst(); //Note: using preOutput(INDArray) can't be used as this does a setInput(input) and resets the 'appliedDropout' flag - INDArray preNorm = zAndPreNorm.getSecond(); - INDArray delta = layerConf().getActivationFn().backprop(z, epsilon).getFirst(); //TODO handle activation function params + INDArray W = getParamWithNoise(DefaultParamInitializer.WEIGHT_KEY, true, workspaceMgr); - if (maskArray != null) { - applyMask(delta); + INDArray epsilonNext = + workspaceMgr.createUninitialized( + ArrayType.ACTIVATION_GRAD, + delta.dataType(), + new long[] {W.size(0), delta.size(0)}, + 'f'); + if (hasLayerNorm()) { + INDArray g = getParam(DefaultParamInitializer.GAIN_KEY); + + INDArray dldg = gradientViews.get(DefaultParamInitializer.GAIN_KEY); + Nd4j.getExecutioner().exec(new LayerNormBp(preNorm, g, delta, delta, dldg, true, 1)); + ret.gradientForVariable().put(DefaultParamInitializer.GAIN_KEY, dldg); + } + + epsilonNext = + W.mmuli(delta.transpose(), epsilonNext) + .transpose(); // W.mmul(delta.transpose()).transpose(); + + INDArray weightGrad = gradientViews.get(DefaultParamInitializer.WEIGHT_KEY); // f order + Nd4j.gemm( + getInput().castTo(weightGrad.dataType()), + delta, + weightGrad, + true, + false, + 1.0, + 0.0); // TODO avoid castTo? + ret.gradientForVariable().put(DefaultParamInitializer.WEIGHT_KEY, weightGrad); + + weightNoiseParams.clear(); + + epsilonNext = backpropDropOutIfPresent(epsilonNext); + return new Pair<>(ret, epsilonNext); + } + + public void computeGradientAndScore(LayerWorkspaceMgr workspaceMgr) { + if (getInput() == null) { + log.warn("There is no input for this layer '{}'", layerConfiguration); + return; + } + INDArray output = activate(true, workspaceMgr); + setScoreWithZ(output); + } + + protected void setScoreWithZ(INDArray z) {} + + /** + * Objective function: the specified objective + * + * @return the score for the objective + */ + @Override + public double getScore() { + return score; + } + + @Override + public Gradient gradient() { + return gradient; + } + + @Override + public ConvexOptimizer getOptimizer() { + if (optimizer == null) { + Solver solver = new Solver.Builder().model(this).configure(getNetConfiguration()).build(); + this.optimizer = solver.getOptimizer(); + } + return optimizer; + } + + /** + * Returns the parameters of the neural network as a flattened row vector + * + * @return the parameters of the neural network + */ + @Override + public INDArray getModelParams() { + return paramsFlattened; + } + + public void setParamsTable(INDArray paramsTable) { + if (paramsTable == paramsFlattened) { + return; // no op + } + setParams(paramsTable, 'f'); + } + + protected void setParams(INDArray params, char order) { + if (params == null) { + log.trace( + "setParams(INDArray params, char order): params is null. Skipping setParams in Layer {}[{}] at index {}", + getLayerConfiguration().getLayerName(), + getClass().getSimpleName(), + getIndex()); + return; + } + List parameterList = layerConfiguration.getVariables(); // netWideVariables(); + int length = 0; + for (String s : parameterList) { + length += getParam(s).length(); + } + if (params.length() != length) { + throw new IllegalArgumentException( + "Unable to set parameters: must be of length " + + length + + ", got params of length " + + params.length() + + " - " + + layerId()); + } + int idx = 0; + Set paramKeySet = this.getParamTable().keySet(); + for (String s : paramKeySet) { + INDArray param = getParam(s); + INDArray get = + params.get(NDArrayIndex.point(0), NDArrayIndex.interval(idx, idx + param.length())); + if (param.length() != get.length()) { + throw new IllegalStateException( + "Parameter " + + s + + " should have been of length " + + param.length() + + " but was " + + get.length() + + " - " + + layerId()); + } + param.assign( + get.reshape( + order, + param.shape())); // Use assign due to backprop params being a view of a larger array + idx += param.length(); + } + } + + @Override + public void setParamsViewArray(INDArray params) { + if (this.getParamTable() != null && params.length() != numParams()) { + throw new IllegalArgumentException( + "Invalid input: expect params of length " + + numParams() + + ", got params of length " + + params.length() + + " - " + + layerId()); + } + this.paramsFlattened = params; + } + + @Override + public Map getParamTable(boolean isBackprop) { + return paramTable; + } + + @Override + public INDArray getGradientsViewArray() { + return gradientsFlattened; + } + + @Override + public void setBackpropGradientsViewArray(INDArray gradients) { + if (this.getParamTable() != null && gradients.length() != numParams()) { + throw new IllegalArgumentException( + "Invalid input: expect gradients array of length " + + numParams(true) + + ", got array of length " + + gradients.length() + + " - " + + layerId()); + } + + this.gradientsFlattened = gradients; + this.gradientViews = + layerConfiguration.initializer().getGradientsFromFlattened(layerConfiguration, gradients); + } + + /** + * Get the parameter, after applying any weight noise (such as DropConnect) if necessary. Note + * that during training, this will store the post-noise parameters, as these should be used for + * both forward pass and backprop, for a single iteration. Consequently, the parameters (post + * noise) should be cleared after each training iteration + * + * @param param Parameter key + * @param training If true: during training + * @return The parameter, after applying any noise + */ + protected INDArray getParamWithNoise( + @NonNull String param, boolean training, @NonNull LayerWorkspaceMgr workspaceMgr) { + INDArray p; + LayerConfiguration lconf = getLayerConfiguration(); + if (lconf.getWeightNoise() != null) { + if (training && weightNoiseParams.size() > 0 && weightNoiseParams.containsKey(param)) { + // Re-use these weights for both forward pass and backprop - don't want to use 2 different + // params here + // These should be cleared during backprop + return weightNoiseParams.get(param); + } else { + try (MemoryWorkspace ws = Nd4j.getMemoryManager().scopeOutOfWorkspaces()) { + p = + lconf + .getWeightNoise() + .getParameter( + this, param, getIterationCount(), getEpochCount(), training, workspaceMgr); } + } - Gradient ret = new DefaultGradient(); - - if(hasBias()){ - INDArray biasGrad = gradientViews.get(DefaultParamInitializer.BIAS_KEY); - delta.sum(biasGrad, 0); //biasGrad is initialized/zeroed first - ret.gradientForVariable().put(DefaultParamInitializer.BIAS_KEY, biasGrad); - } - - INDArray W = getParamWithNoise(DefaultParamInitializer.WEIGHT_KEY, true, workspaceMgr); - - INDArray epsilonNext = workspaceMgr.createUninitialized(ArrayType.ACTIVATION_GRAD, delta.dataType(), new long[]{W.size(0), delta.size(0)}, 'f'); - if(hasLayerNorm()) { - INDArray g = getParam(DefaultParamInitializer.GAIN_KEY); - - INDArray dldg = gradientViews.get(DefaultParamInitializer.GAIN_KEY); - Nd4j.getExecutioner().exec(new LayerNormBp(preNorm, g, delta, delta, dldg, true, 1)); - ret.gradientForVariable().put(DefaultParamInitializer.GAIN_KEY, dldg); - - } - - epsilonNext = W.mmuli(delta.transpose(),epsilonNext).transpose(); //W.mmul(delta.transpose()).transpose(); - - INDArray weightGrad = gradientViews.get(DefaultParamInitializer.WEIGHT_KEY); //f order - Nd4j.gemm(input.castTo(weightGrad.dataType()), delta, weightGrad, true, false, 1.0, 0.0); //TODO avoid castTo? - ret.gradientForVariable().put(DefaultParamInitializer.WEIGHT_KEY, weightGrad); - - weightNoiseParams.clear(); - - epsilonNext = backpropDropOutIfPresent(epsilonNext); - return new Pair<>(ret, epsilonNext); + if (training) { + // Store for re-use in backprop + weightNoiseParams.put(param, p); + } + } else { + return getParam(param); } - public void fit() { - throw new UnsupportedOperationException("Not supported"); + return p; + } + + protected INDArray preOutput(boolean training, LayerWorkspaceMgr workspaceMgr) { + return preOutputWithPreNorm(training, false, workspaceMgr).getFirst(); + } + + protected Pair preOutputWithPreNorm( + boolean training, boolean forBackprop, @NonNull LayerWorkspaceMgr workspaceMgr) { + assertInputSet(forBackprop); + applyDropOutIfNecessary(training, workspaceMgr); + INDArray W = getParamWithNoise(DefaultParamInitializer.WEIGHT_KEY, training, workspaceMgr); + INDArray b = getParamWithNoise(DefaultParamInitializer.BIAS_KEY, training, workspaceMgr); + INDArray g = (hasLayerNorm() ? getParam(DefaultParamInitializer.GAIN_KEY) : null); + + INDArray input = getInput().castTo(dataType); + + // Input validation: + if (input.rank() != 2 || input.columns() != W.rows()) { + if (input.rank() != 2) { + throw new DL4JInvalidInputException( + "Input that is not a matrix; expected matrix (rank 2), got rank " + + input.rank() + + " array with shape " + + Arrays.toString(input.shape()) + + ". Missing preprocessor or wrong input type? " + + layerId()); + } + throw new DL4JInvalidInputException( + "Input size (" + + input.columns() + + " columns; shape = " + + Arrays.toString(input.shape()) + + ") is invalid: does not match layer input size (layer # inputs = " + + W.size(0) + + ") " + + layerId()); } - @Override - public void computeGradientAndScore(LayerWorkspaceMgr workspaceMgr) { - if (this.input == null) - return; + INDArray ret = + workspaceMgr.createUninitialized( + ArrayType.ACTIVATIONS, W.dataType(), input.size(0), W.size(1)); + input + .castTo(ret.dataType()) + .mmuli( + W, ret); // TODO Can we avoid this cast? (It sohuld be a no op if not required, however) - INDArray output = activate(true, workspaceMgr); - setScoreWithZ(output); + INDArray preNorm = ret; + if (hasLayerNorm()) { + preNorm = (forBackprop ? ret.dup(ret.ordering()) : ret); + Nd4j.getExecutioner().exec(new LayerNorm(preNorm, g, ret, true, 1)); } - - protected void setScoreWithZ(INDArray z) {} - - /** - * Objective function: the specified objective - * @return the score for the objective - */ - - @Override - public double score() { - return score; + if (hasBias()) { + ret.addiRowVector(b); } - @Override - public Gradient gradient() { - return gradient; + if (maskArray != null) { + applyMask(ret); } - @Override - public void update(Gradient gradient) { - for (String paramType : gradient.gradientForVariable().keySet()) { - update(gradient.getGradientFor(paramType), paramType); - } + return new Pair<>(ret, preNorm); + } + + @Override + public INDArray activate(boolean training, LayerWorkspaceMgr workspaceMgr) { + INDArray z = preOutput(training, workspaceMgr); // (Input*Weights) + bias + INDArray ret = getTypedLayerConfiguration().getActivationFn().getActivation(z, training); + + if (maskArray != null) { + applyMask(ret); } - @Override - public void update(INDArray gradient, String paramType) { - setParam(paramType, getParam(paramType).addi(gradient)); + return ret; + } + + @Override + public INDArray activate( + @NonNull INDArray input, boolean training, @NonNull LayerWorkspaceMgr workspaceMgr) { + setInput(input, workspaceMgr); + return activate(training, workspaceMgr); + } + + @Override + public double calcRegularizationScore(boolean backpropParamsOnly) { + double scoreSum = 0.0; + for (Map.Entry e : getParamTable().entrySet()) { + List l = getTypedLayerConfiguration().getRegularizationByParam(e.getKey()); + if (l == null || l.isEmpty()) { + continue; + } + for (Regularization r : l) { + scoreSum += r.score(e.getValue(), getIterationCount(), getEpochCount()); + } + } + return scoreSum; + } + + @Override + public Layer clone() { + Layer layer = null; + try { + Constructor c = getClass().getConstructor(NeuralNetConfiguration.class); + layer = (Layer) c.newInstance(layerConfiguration); + Map linkedTable = new LinkedHashMap<>(); + for (Map.Entry entry : getParamTable().entrySet()) { + linkedTable.put(entry.getKey(), entry.getValue().dup()); + } + layer.setParamTable(linkedTable); + } catch (Exception e) { + log.error("", e); } + return layer; + } - @Override - public ConvexOptimizer getOptimizer() { - if (optimizer == null) { - Solver solver = new Solver.Builder().model(this).configure(conf()).build(); - this.optimizer = solver.getOptimizer(); - } - return optimizer; + @Override + public void fit(INDArray input, LayerWorkspaceMgr workspaceMgr) { + if (input != null) { + setInput(input, workspaceMgr); + applyDropOutIfNecessary(true, workspaceMgr); } - - /**Returns the parameters of the neural network as a flattened row vector - * @return the parameters of the neural network - */ - @Override - public INDArray params() { - return paramsFlattened; + if (solver == null) { + solver = + new Solver.Builder() + .model(this) + .configure(getNetConfiguration()) + .listeners(getTrainingListeners()) + .build(); } + this.optimizer = solver.getOptimizer(); + solver.optimize(workspaceMgr); + } - @Override - public INDArray getParam(String param) { - return params.get(param); - } + @Override + public String toString() { + return getClass().getName() + + "{" + + "conf=" + + layerConfiguration + + ", score=" + + score + + ", optimizer=" + + optimizer + + ", listeners=" + + trainingListeners + + '}'; + } - @Override - public void setParam(String key, INDArray val) { - if (params.containsKey(key)) - params.get(key).assign(val); - else - params.put(key, val); - } + @Override + public void clear() { + super.clear(); + weightNoiseParams.clear(); + } - @Override - public void setParams(INDArray params) { - if (params == paramsFlattened) - return; //no op - setParams(params, 'f'); - } + @Override + public void clearNoiseWeightParams() { + weightNoiseParams.clear(); + } - protected void setParams(INDArray params, char order) { - List parameterList = conf.variables(); - int length = 0; - for (String s : parameterList) - length += getParam(s).length(); - if (params.length() != length) - throw new IllegalArgumentException("Unable to set parameters: must be of length " + length - + ", got params of length " + params.length() + " - " + layerId()); - int idx = 0; - Set paramKeySet = this.params.keySet(); - for (String s : paramKeySet) { - INDArray param = getParam(s); - INDArray get = params.get(NDArrayIndex.point(0), NDArrayIndex.interval(idx, idx + param.length())); - if (param.length() != get.length()) - throw new IllegalStateException("Parameter " + s + " should have been of length " + param.length() - + " but was " + get.length() + " - " + layerId()); - param.assign(get.reshape(order, param.shape())); //Use assign due to backprop params being a view of a larger array - idx += param.length(); - } - } + /** + * Does this layer have no bias term? Many layers (dense, convolutional, output, embedding) have + * biases by default, but no-bias versions are possible via configuration + * + * @return True if a bias term is present, false otherwise + */ + public boolean hasBias() { + // Overridden by layers supporting no bias mode: dense, output, convolutional, embedding + return true; + } - @Override - public void setParamsViewArray(INDArray params) { - if (this.params != null && params.length() != numParams()) - throw new IllegalArgumentException("Invalid input: expect params of length " + numParams() - + ", got params of length " + params.length() + " - " + layerId()); - - this.paramsFlattened = params; - } - - @Override - public INDArray getGradientsViewArray() { - return gradientsFlattened; - } - - @Override - public void setBackpropGradientsViewArray(INDArray gradients) { - if (this.params != null && gradients.length() != numParams()) - throw new IllegalArgumentException("Invalid input: expect gradients array of length " + numParams(true) - + ", got array of length " + gradients.length() + " - " + layerId()); - - this.gradientsFlattened = gradients; - this.gradientViews = conf.getLayer().initializer().getGradientsFromFlattened(conf, gradients); - } - - @Override - public void setParamTable(Map paramTable) { - this.params = paramTable; - } - - @Override - public Map paramTable() { - return paramTable(false); - } - - @Override - public Map paramTable(boolean backpropParamsOnly) { - return params; - } - - /** - * Get the parameter, after applying any weight noise (such as DropConnect) if necessary. - * Note that during training, this will store the post-noise parameters, as these should be used - * for both forward pass and backprop, for a single iteration. - * Consequently, the parameters (post noise) should be cleared after each training iteration - * - * @param param Parameter key - * @param training If true: during training - * @return The parameter, after applying any noise - */ - protected INDArray getParamWithNoise(String param, boolean training, LayerWorkspaceMgr workspaceMgr){ - INDArray p; - if(layerConf().getWeightNoise() != null){ - if(training && weightNoiseParams.size() > 0 && weightNoiseParams.containsKey(param) ){ - //Re-use these weights for both forward pass and backprop - don't want to use 2 different params here - //These should be cleared during backprop - return weightNoiseParams.get(param); - } else { - try (MemoryWorkspace ws = Nd4j.getMemoryManager().scopeOutOfWorkspaces()) { - p = layerConf().getWeightNoise().getParameter(this, param, getIterationCount(), getEpochCount(), training, workspaceMgr); - } - } - - if(training){ - //Store for re-use in backprop - weightNoiseParams.put(param, p); - } - } else { - return getParam(param); - } - - return p; - } - - protected INDArray preOutput(boolean training, LayerWorkspaceMgr workspaceMgr) { - return preOutputWithPreNorm(training, false, workspaceMgr).getFirst(); - } - - protected Pair preOutputWithPreNorm(boolean training, boolean forBackprop, LayerWorkspaceMgr workspaceMgr) { - assertInputSet(forBackprop); - applyDropOutIfNecessary(training, workspaceMgr); - INDArray W = getParamWithNoise(DefaultParamInitializer.WEIGHT_KEY, training, workspaceMgr); - INDArray b = getParamWithNoise(DefaultParamInitializer.BIAS_KEY, training, workspaceMgr); - INDArray g = (hasLayerNorm() ? getParam(DefaultParamInitializer.GAIN_KEY) : null); - - INDArray input = this.input.castTo(dataType); - - //Input validation: - if (input.rank() != 2 || input.columns() != W.rows()) { - if (input.rank() != 2) { - throw new DL4JInvalidInputException("Input that is not a matrix; expected matrix (rank 2), got rank " - + input.rank() + " array with shape " + Arrays.toString(input.shape()) - + ". Missing preprocessor or wrong input type? " + layerId()); - } - throw new DL4JInvalidInputException( - "Input size (" + input.columns() + " columns; shape = " + Arrays.toString(input.shape()) - + ") is invalid: does not match layer input size (layer # inputs = " - + W.size(0) + ") " + layerId()); - } - - - INDArray ret = workspaceMgr.createUninitialized(ArrayType.ACTIVATIONS, W.dataType(), input.size(0), W.size(1)); - input.castTo(ret.dataType()).mmuli(W, ret); //TODO Can we avoid this cast? (It sohuld be a no op if not required, however) - - INDArray preNorm = ret; - if(hasLayerNorm()){ - preNorm = (forBackprop ? ret.dup(ret.ordering()) : ret); - Nd4j.getExecutioner().exec(new LayerNorm(preNorm, g, ret, true, 1)); - } - - if(hasBias()){ - ret.addiRowVector(b); - } - - if (maskArray != null) { - applyMask(ret); - } - - return new Pair<>(ret, preNorm); - } - - @Override - public INDArray activate(boolean training, LayerWorkspaceMgr workspaceMgr) { - INDArray z = preOutput(training, workspaceMgr); - INDArray ret = layerConf().getActivationFn().getActivation(z, training); - - if (maskArray != null) { - applyMask(ret); - } - - return ret; - } - - @Override - public double calcRegularizationScore(boolean backpropParamsOnly){ - double scoreSum = 0.0; - for (Map.Entry e : paramTable().entrySet()) { - List l = layerConf().getRegularizationByParam(e.getKey()); - if(l == null || l.isEmpty()){ - continue; - } - for(Regularization r : l){ - scoreSum += r.score(e.getValue(), getIterationCount(), getEpochCount()); - } - } - return scoreSum; - } - - @Override - public Layer clone() { - Layer layer = null; - try { - Constructor c = getClass().getConstructor(NeuralNetConfiguration.class); - layer = (Layer) c.newInstance(conf); - Map linkedTable = new LinkedHashMap<>(); - for (Map.Entry entry : params.entrySet()) { - linkedTable.put(entry.getKey(), entry.getValue().dup()); - } - layer.setParamTable(linkedTable); - } catch (Exception e) { - log.error("",e); - } - - return layer; - - } - - /** - * The number of parameters for the model - * - * @return the number of parameters for the model - */ - @Override - public long numParams() { - int ret = 0; - for (INDArray val : params.values()) - ret += val.length(); - return ret; - } - - @Override - public void fit(INDArray input, LayerWorkspaceMgr workspaceMgr) { - if (input != null) { - setInput(input, workspaceMgr); - applyDropOutIfNecessary(true, workspaceMgr); - } - if (solver == null) { - solver = new Solver.Builder().model(this).configure(conf()).listeners(getListeners()).build(); - } - this.optimizer = solver.getOptimizer(); - solver.optimize(workspaceMgr); - } - - @Override - public String toString() { - return getClass().getName() + "{" + "conf=" + conf + ", score=" + score - + ", optimizer=" + optimizer + ", listeners=" + trainingListeners + '}'; - } - - @Override - public void clear(){ - super.clear(); - weightNoiseParams.clear(); - } - - @Override - public void clearNoiseWeightParams(){ - weightNoiseParams.clear(); - } - - /** - * Does this layer have no bias term? Many layers (dense, convolutional, output, embedding) have biases by - * default, but no-bias versions are possible via configuration - * - * @return True if a bias term is present, false otherwise - */ - public boolean hasBias(){ - //Overridden by layers supporting no bias mode: dense, output, convolutional, embedding - return true; - } - - /** - * Does this layer support and is it enabled layer normalization? Only Dense and SimpleRNN Layers support - * layer normalization. - * - * @return True if layer normalization is enabled on this layer, false otherwise - */ - public boolean hasLayerNorm(){ - // Overridden by layers supporting layer normalization. - return false; - } + /** + * Does this layer support and is it enabled layer normalization? Only Dense and SimpleRNN Layers + * support layer normalization. + * + * @return True if layer normalization is enabled on this layer, false otherwise + */ + public boolean hasLayerNorm() { + // Overridden by layers supporting layer normalization. + return false; + } } diff --git a/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/nn/layers/BaseOutputLayer.java b/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/nn/layers/BaseOutputLayer.java index 1f317eee6..1450507f7 100644 --- a/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/nn/layers/BaseOutputLayer.java +++ b/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/nn/layers/BaseOutputLayer.java @@ -22,7 +22,7 @@ package org.deeplearning4j.nn.layers; import org.deeplearning4j.nn.api.MaskState; import org.deeplearning4j.nn.api.layers.IOutputLayer; -import org.deeplearning4j.nn.conf.NeuralNetConfiguration; +import org.deeplearning4j.nn.conf.layers.LayerConfiguration; import org.deeplearning4j.nn.gradient.DefaultGradient; import org.deeplearning4j.nn.gradient.Gradient; import org.deeplearning4j.nn.params.DefaultParamInitializer; @@ -58,7 +58,7 @@ public abstract class BaseOutputLayer gradientAndScore() { - return new Pair<>(gradient(), score()); + return new Pair<>(gradient(), getScore()); } @Override @@ -166,19 +166,19 @@ public abstract class BaseOutputLayer getGradientsAndDelta(INDArray preOut, LayerWorkspaceMgr workspaceMgr) { - ILossFunction lossFunction = layerConf().getLossFn(); + ILossFunction lossFunction = getTypedLayerConfiguration().getLossFn(); INDArray labels2d = getLabels2d(workspaceMgr, ArrayType.BP_WORKING_MEM); //INDArray delta = lossFunction.computeGradient(labels2d, preOut, layerConf().getActivationFunction(), maskArray); - INDArray delta = lossFunction.computeGradient(labels2d, preOut, layerConf().getActivationFn(), maskArray); + INDArray delta = lossFunction.computeGradient(labels2d, preOut, getTypedLayerConfiguration().getActivationFn(), maskArray); Gradient gradient = new DefaultGradient(); - INDArray weightGradView = gradientViews.get(DefaultParamInitializer.WEIGHT_KEY); + INDArray weightGradView = getGradientViews().get(DefaultParamInitializer.WEIGHT_KEY); Nd4j.gemm(input.castTo(weightGradView.dataType()), delta, weightGradView, true, false, 1.0, 0.0); //Equivalent to: weightGradView.assign(input.transpose().mmul(delta)); //TODO can we avoid cast? gradient.gradientForVariable().put(DefaultParamInitializer.WEIGHT_KEY, weightGradView); if(hasBias()){ - INDArray biasGradView = gradientViews.get(DefaultParamInitializer.BIAS_KEY); + INDArray biasGradView = getGradientViews().get(DefaultParamInitializer.BIAS_KEY); delta.sum(biasGradView, 0); //biasGradView is initialized/zeroed first in sum op gradient.gradientForVariable().put(DefaultParamInitializer.BIAS_KEY, biasGradView); } @@ -349,6 +349,6 @@ public abstract class BaseOutputLayer { - public BasePretrainNetwork(NeuralNetConfiguration conf, DataType dataType) { + public BasePretrainNetwork(LayerConfiguration conf, DataType dataType) { super(conf, dataType); } @@ -108,10 +108,10 @@ public abstract class BasePretrainNetwork paramTable(boolean backpropParamsOnly) { + public Map getParamTable(boolean backpropParamsOnly) { if (!backpropParamsOnly) - return params; + return getParamTable(); Map map = new LinkedHashMap<>(); - map.put(PretrainParamInitializer.WEIGHT_KEY, params.get(PretrainParamInitializer.WEIGHT_KEY)); - map.put(PretrainParamInitializer.BIAS_KEY, params.get(PretrainParamInitializer.BIAS_KEY)); + map.put(PretrainParamInitializer.WEIGHT_KEY, super.getParamTable().get(PretrainParamInitializer.WEIGHT_KEY)); + map.put(PretrainParamInitializer.BIAS_KEY, super.getParamTable().get(PretrainParamInitializer.BIAS_KEY)); return map; } - - public INDArray params() { - return paramsFlattened; - } - /**The number of parameters for the model, for backprop (i.e., excluding visible bias) * @return the number of parameters for the model (ex. visible bias) */ public long numParams() { int ret = 0; - for (Map.Entry entry : params.entrySet()) { + for (Map.Entry entry : getParamTable().entrySet()) { ret += entry.getValue().length(); } return ret; } @Override - public void setParams(INDArray params) { - if (params == paramsFlattened) + public void setParamsTable(INDArray paramsTable) { + if (paramsTable == paramsFlattened) return; //No op //SetParams has two different uses: during pretrain vs. backprop. //pretrain = 3 sets of params (inc. visible bias); backprop = 2 - List parameterList = conf.variables(); + List parameterList = layerConfiguration.getVariables(); long paramLength = 0; for (String s : parameterList) { val len = getParam(s).length(); paramLength += len; } - if (params.length() != paramLength) { + if (paramsTable.length() != paramLength) { throw new IllegalArgumentException("Unable to set parameters: must be of length " + paramLength - + ", got params of length " + params.length() + " " + layerId()); + + ", got params of length " + paramsTable.length() + " " + layerId()); } // Set for backprop and only W & hb - paramsFlattened.assign(params); + paramsFlattened.assign(paramsTable); } @@ -190,8 +185,8 @@ public abstract class BasePretrainNetwork { - public DropoutLayer(NeuralNetConfiguration conf, DataType dataType) { + public DropoutLayer(LayerConfiguration conf, DataType dataType) { super(conf, dataType); } @@ -71,7 +71,7 @@ public class DropoutLayer extends BaseLayer(zeroGradient, underlying.score()); + return new Pair<>(zeroGradient, underlying.getScore()); } @Override @@ -199,9 +199,9 @@ public class FrozenLayer extends BaseWrapperLayer { } @Override - public TrainingConfig getConfig(){ + public ITraininableLayerConfiguration getTrainingConfig(){ if (config == null) { - config = new DummyConfig(getUnderlying().getConfig().getLayerName()); + config = new DummyConfig(getUnderlying().getTrainingConfig().getLayerName()); } return config; } diff --git a/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/nn/layers/FrozenLayerWithBackprop.java b/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/nn/layers/FrozenLayerWithBackprop.java index 918a21a4a..9cf762798 100644 --- a/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/nn/layers/FrozenLayerWithBackprop.java +++ b/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/nn/layers/FrozenLayerWithBackprop.java @@ -42,11 +42,11 @@ public class FrozenLayerWithBackprop extends BaseWrapperLayer { public FrozenLayerWithBackprop(final Layer insideLayer) { super(insideLayer); - this.zeroGradient = new DefaultGradient(insideLayer.params()); + this.zeroGradient = new DefaultGradient(insideLayer.getParams()); } protected String layerId() { - String name = underlying.conf().getLayer().getLayerName(); + String name = underlying.getLayerConfiguration().getLayerName(); return "(layer name: " + (name == null ? "\"\"" : name) + ", layer index: " + underlying.getIndex() + ")"; } @@ -58,7 +58,7 @@ public class FrozenLayerWithBackprop extends BaseWrapperLayer { @Override public Pair backpropGradient(INDArray epsilon, LayerWorkspaceMgr workspaceMgr) { INDArray backpropEpsilon = underlying.backpropGradient(epsilon, workspaceMgr).getSecond(); - //backprop might have already changed the gradient view (like BaseLayer and BaseOutputLayer do) + //backprop might have already changed the gradient view (like BaseLayerConfiguration and BaseOutputLayer do) //so we want to put it back to zeroes INDArray gradientView = underlying.getGradientsViewArray(); if(gradientView != null){ @@ -72,12 +72,6 @@ public class FrozenLayerWithBackprop extends BaseWrapperLayer { return underlying.activate(false, workspaceMgr); } - @Override - public INDArray activate(INDArray input, boolean training, LayerWorkspaceMgr workspaceMgr) { - logTestMode(training); - return underlying.activate(input, false, workspaceMgr); - } - @Override public void fit() { if (!logFit) { @@ -112,7 +106,7 @@ public class FrozenLayerWithBackprop extends BaseWrapperLayer { "Gradients for the frozen layer are not set and will therefore will not be updated.Warning will be issued only once per instance"); logGradient = true; } - underlying.score(); + underlying.getScore(); //no op } diff --git a/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/nn/layers/LossLayer.java b/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/nn/layers/LossLayer.java index e53fc6619..43bbc69d8 100644 --- a/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/nn/layers/LossLayer.java +++ b/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/nn/layers/LossLayer.java @@ -23,7 +23,7 @@ package org.deeplearning4j.nn.layers; import org.deeplearning4j.eval.Evaluation; import org.deeplearning4j.nn.api.layers.IOutputLayer; -import org.deeplearning4j.nn.conf.NeuralNetConfiguration; +import org.deeplearning4j.nn.conf.layers.LayerConfiguration; import org.deeplearning4j.nn.gradient.DefaultGradient; import org.deeplearning4j.nn.gradient.Gradient; import org.deeplearning4j.optimize.Solver; @@ -32,7 +32,6 @@ import org.nd4j.linalg.api.buffer.DataType; import org.nd4j.linalg.api.ndarray.INDArray; import org.nd4j.linalg.dataset.api.DataSet; import org.nd4j.linalg.dataset.api.iterator.DataSetIterator; -import org.nd4j.linalg.factory.Nd4j; import org.nd4j.linalg.lossfunctions.ILossFunction; import org.nd4j.common.primitives.Pair; import org.nd4j.linalg.util.FeatureUtil; @@ -54,7 +53,7 @@ public class LossLayer extends BaseLayer gradientAndScore() { - return new Pair<>(gradient(), score()); + return new Pair<>(gradient(), getScore()); } @Override @@ -134,8 +133,8 @@ public class LossLayer extends BaseLayer getGradientsAndDelta(INDArray preOut, LayerWorkspaceMgr workspaceMgr) { // delta calculation - ILossFunction lossFunction = layerConf().getLossFn(); - INDArray delta = lossFunction.computeGradient(getLabels2d(), preOut, layerConf().getActivationFn(), maskArray); + ILossFunction lossFunction = getTypedLayerConfiguration().getLossFn(); + INDArray delta = lossFunction.computeGradient(getLabels2d(), preOut, getTypedLayerConfiguration().getActivationFn(), maskArray); // grab the empty gradient Gradient gradient = new DefaultGradient(); @@ -171,7 +170,7 @@ public class LossLayer extends BaseLayer { - public OutputLayer(NeuralNetConfiguration conf, DataType dataType) { + public OutputLayer(LayerConfiguration conf, DataType dataType) { super(conf, dataType); } diff --git a/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/nn/layers/RepeatVector.java b/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/nn/layers/RepeatVector.java index 84dd1fd1f..166cbd896 100644 --- a/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/nn/layers/RepeatVector.java +++ b/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/nn/layers/RepeatVector.java @@ -23,8 +23,8 @@ package org.deeplearning4j.nn.layers; import org.deeplearning4j.exception.DL4JInvalidInputException; import org.deeplearning4j.nn.conf.CacheMode; -import org.deeplearning4j.nn.conf.NeuralNetConfiguration; import org.deeplearning4j.nn.conf.RNNFormat; +import org.deeplearning4j.nn.conf.layers.LayerConfiguration; import org.deeplearning4j.nn.gradient.DefaultGradient; import org.deeplearning4j.nn.gradient.Gradient; import org.deeplearning4j.nn.workspace.ArrayType; @@ -38,7 +38,7 @@ import java.util.Arrays; public class RepeatVector extends AbstractLayer { - public RepeatVector(NeuralNetConfiguration conf, DataType dataType) { + public RepeatVector(LayerConfiguration conf, DataType dataType) { super(conf, dataType); } @@ -63,7 +63,7 @@ public class RepeatVector extends AbstractLayer fwd = preOutput(false,true,workspaceMgr); - IActivation afn = layerConf().getActivationFn(); + IActivation afn = getTypedLayerConfiguration().getActivationFn(); INDArray delta = afn.backprop(fwd.getFirst(), epsilon).getFirst(); //TODO handle activation function params - org.deeplearning4j.nn.conf.layers.Convolution1DLayer c = layerConf(); + org.deeplearning4j.nn.conf.layers.Convolution1DLayer c = getTypedLayerConfiguration(); Conv1DConfig conf = Conv1DConfig.builder() .k(c.getKernelSize()[0]) .s(c.getStride()[0]) @@ -89,11 +85,11 @@ public class Convolution1DLayer extends ConvolutionLayer { getRnnDataFormat()); INDArray epsOut = workspaceMgr.createUninitialized(ArrayType.ACTIVATION_GRAD, input.dataType(), input.shape()); INDArray input = this.input.castTo(dataType); - if(layerConf().getRnnDataFormat() == RNNFormat.NWC) { + if(getTypedLayerConfiguration().getRnnDataFormat() == RNNFormat.NWC) { input = input.permute(0,2,1); //NHWC to NCHW } - if(layerConf().hasBias()) { + if(getTypedLayerConfiguration().hasBias()) { INDArray b = getParam(ConvolutionParamInitializer.BIAS_KEY); b = b.reshape(b.length()); inputArrs = new INDArray[]{input, w, b, delta}; @@ -109,7 +105,7 @@ public class Convolution1DLayer extends ConvolutionLayer { Nd4j.exec(op); Gradient retGradient = new DefaultGradient(); - if(layerConf().hasBias()) { + if(getTypedLayerConfiguration().hasBias()) { retGradient.setGradientFor(ConvolutionParamInitializer.BIAS_KEY, gradientViews.get(ConvolutionParamInitializer.BIAS_KEY)); } retGradient.setGradientFor(ConvolutionParamInitializer.WEIGHT_KEY, gradientViews.get(ConvolutionParamInitializer.WEIGHT_KEY), 'c'); @@ -133,11 +129,11 @@ public class Convolution1DLayer extends ConvolutionLayer { assertInputSet(false); INDArray input = this.input.castTo(dataType); - if(layerConf().getRnnDataFormat() == RNNFormat.NWC) { + if(getTypedLayerConfiguration().getRnnDataFormat() == RNNFormat.NWC) { input = input.permute(0,2,1); //NHWC to NCHW } - org.deeplearning4j.nn.conf.layers.Convolution1DLayer c = layerConf(); + org.deeplearning4j.nn.conf.layers.Convolution1DLayer c = getTypedLayerConfiguration(); Conv1DConfig conf = Conv1DConfig.builder() .k(c.getKernelSize()[0]) .s(c.getStride()[0]) @@ -154,7 +150,7 @@ public class Convolution1DLayer extends ConvolutionLayer { INDArray[] inputs; - if(layerConf().hasBias()) { + if(getTypedLayerConfiguration().hasBias()) { INDArray b = getParam(ConvolutionParamInitializer.BIAS_KEY); b = b.reshape(b.length()); inputs = new INDArray[]{input, w, b}; @@ -196,18 +192,18 @@ public class Convolution1DLayer extends ConvolutionLayer { @Override public Pair feedForwardMaskArray(INDArray maskArray, MaskState currentMaskState, int minibatchSize) { - INDArray reduced = ConvolutionUtils.cnn1dMaskReduction(maskArray, layerConf().getKernelSize()[0], - layerConf().getStride()[0], layerConf().getPadding()[0], layerConf().getDilation()[0], - layerConf().getConvolutionMode()); + INDArray reduced = ConvolutionUtils.cnn1dMaskReduction(maskArray, getTypedLayerConfiguration().getKernelSize()[0], + getTypedLayerConfiguration().getStride()[0], getTypedLayerConfiguration().getPadding()[0], getTypedLayerConfiguration().getDilation()[0], + getTypedLayerConfiguration().getConvolutionMode()); return new Pair<>(reduced, currentMaskState); } @Override - public org.deeplearning4j.nn.conf.layers.Convolution1DLayer layerConf() { - return (org.deeplearning4j.nn.conf.layers.Convolution1DLayer) conf().getLayer(); + public org.deeplearning4j.nn.conf.layers.Convolution1DLayer getTypedLayerConfiguration() { + return (org.deeplearning4j.nn.conf.layers.Convolution1DLayer)layerConfiguration; } private RNNFormat getRnnDataFormat(){ - return layerConf().getRnnDataFormat(); + return getTypedLayerConfiguration().getRnnDataFormat(); } } diff --git a/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/nn/layers/convolution/Convolution3DLayer.java b/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/nn/layers/convolution/Convolution3DLayer.java index 9c0dd079d..184c46723 100644 --- a/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/nn/layers/convolution/Convolution3DLayer.java +++ b/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/nn/layers/convolution/Convolution3DLayer.java @@ -22,8 +22,8 @@ package org.deeplearning4j.nn.layers.convolution; import org.deeplearning4j.exception.DL4JInvalidInputException; import org.deeplearning4j.nn.conf.ConvolutionMode; -import org.deeplearning4j.nn.conf.NeuralNetConfiguration; import org.deeplearning4j.nn.conf.layers.Convolution3D; +import org.deeplearning4j.nn.conf.layers.LayerConfiguration; import org.deeplearning4j.nn.gradient.DefaultGradient; import org.deeplearning4j.nn.gradient.Gradient; import org.deeplearning4j.nn.params.Convolution3DParamInitializer; @@ -42,7 +42,7 @@ import java.util.Arrays; public class Convolution3DLayer extends ConvolutionLayer { - public Convolution3DLayer(NeuralNetConfiguration conf, DataType dataType) { + public Convolution3DLayer(LayerConfiguration conf, DataType dataType) { super(conf, dataType); } @@ -66,7 +66,7 @@ public class Convolution3DLayer extends ConvolutionLayer { INDArray input = this.input.castTo(dataType); INDArray weights = getParamWithNoise(Convolution3DParamInitializer.WEIGHT_KEY, true, workspaceMgr); - Convolution3D layerConfig = (Convolution3D) layerConf(); + Convolution3D layerConfig = (Convolution3D) getTypedLayerConfiguration(); boolean isNCDHW = layerConfig.getDataFormat() == Convolution3D.DataFormat.NCDHW; @@ -75,7 +75,7 @@ public class Convolution3DLayer extends ConvolutionLayer { int inH = (int) (isNCDHW ? input.size(3) : input.size(2)); int inW = (int) (isNCDHW ? input.size(4) : input.size(3)); - int outEpsChannels = (int) layerConf().getNIn(); + int outEpsChannels = (int) getTypedLayerConfiguration().getNIn(); int[] dilation = layerConfig.getDilation(); int[] kernel = layerConfig.getKernelSize(); @@ -164,7 +164,7 @@ public class Convolution3DLayer extends ConvolutionLayer { protected Pair preOutput(boolean training, boolean forBackprop, LayerWorkspaceMgr workspaceMgr) { - Convolution3D layerConfig = (Convolution3D) layerConf(); + Convolution3D layerConfig = (Convolution3D) getTypedLayerConfiguration(); ConvolutionMode mode = layerConfig.getConvolutionMode(); boolean isNCDHW = layerConfig.getDataFormat() == Convolution3D.DataFormat.NCDHW; @@ -173,7 +173,7 @@ public class Convolution3DLayer extends ConvolutionLayer { INDArray weights = getParamWithNoise(Convolution3DParamInitializer.WEIGHT_KEY, training, workspaceMgr); if (input.rank() != 5) { - String layerName = conf.getLayer().getLayerName(); + String layerName = layerConfiguration.getLayerName(); if (layerName == null) layerName = "(not named)"; throw new DL4JInvalidInputException("Got rank " + input.rank() @@ -193,11 +193,11 @@ public class Convolution3DLayer extends ConvolutionLayer { int inH = (int) (isNCDHW ? input.size(3) : input.size(2)); int inW = (int) (isNCDHW ? input.size(4) : input.size(3)); - int outWeightChannels = (int)layerConf().getNOut(); - int inWeightChannels = (int)layerConf().getNIn(); + int outWeightChannels = (int) getTypedLayerConfiguration().getNOut(); + int inWeightChannels = (int) getTypedLayerConfiguration().getNIn(); if (inputChannels != inWeightChannels) { - String layerName = conf.getLayer().getLayerName(); + String layerName = layerConfiguration.getLayerName(); if (layerName == null) layerName = "(not named)"; long dataInCh = isNCDHW ? input.size(1) : input.size(4); diff --git a/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/nn/layers/convolution/ConvolutionLayer.java b/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/nn/layers/convolution/ConvolutionLayer.java index 022ed7c2b..c1a94338a 100644 --- a/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/nn/layers/convolution/ConvolutionLayer.java +++ b/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/nn/layers/convolution/ConvolutionLayer.java @@ -28,7 +28,7 @@ import org.deeplearning4j.nn.api.MaskState; import org.deeplearning4j.nn.conf.CNN2DFormat; import org.deeplearning4j.nn.conf.CacheMode; import org.deeplearning4j.nn.conf.ConvolutionMode; -import org.deeplearning4j.nn.conf.NeuralNetConfiguration; +import org.deeplearning4j.nn.conf.layers.LayerConfiguration; import org.deeplearning4j.nn.gradient.DefaultGradient; import org.deeplearning4j.nn.gradient.Gradient; import org.deeplearning4j.nn.layers.BaseLayer; @@ -36,7 +36,6 @@ import org.deeplearning4j.nn.layers.LayerHelper; import org.deeplearning4j.nn.layers.mkldnn.MKLDNNConvHelper; import org.deeplearning4j.nn.params.ConvolutionParamInitializer; import org.deeplearning4j.util.ConvolutionUtils; -import org.nd4j.common.base.Preconditions; import org.nd4j.linalg.activations.IActivation; import org.nd4j.linalg.api.buffer.DataType; import org.nd4j.linalg.api.memory.MemoryWorkspace; @@ -49,7 +48,6 @@ import org.nd4j.linalg.factory.Nd4j; import org.nd4j.common.primitives.Pair; import org.deeplearning4j.nn.workspace.LayerWorkspaceMgr; import org.deeplearning4j.nn.workspace.ArrayType; -import org.nd4j.common.util.OneTimeLogger; import java.util.Arrays; @@ -64,10 +62,10 @@ public class ConvolutionLayer extends BaseLayer p = preOutput4d(true, true, workspaceMgr); INDArray z = p.getFirst(); - CNN2DFormat f = layerConf().getCnn2dDataFormat(); + CNN2DFormat f = getTypedLayerConfiguration().getCnn2dDataFormat(); if(f != CNN2DFormat.NCHW){ z = z.permute(0,3,1,2); //NHWC to NCHW } delta = afn.backprop(z, epsilon).getFirst(); //TODO handle activation function params - if (helper != null && (helperCountFail == 0 || !layerConf().isCudnnAllowFallback())) { + if (helper != null && (helperCountFail == 0 || !getTypedLayerConfiguration().isCudnnAllowFallback())) { INDArray helperDelta = delta; - if(layerConf().getCnn2dDataFormat() == CNN2DFormat.NHWC) + if(getTypedLayerConfiguration().getCnn2dDataFormat() == CNN2DFormat.NHWC) helperDelta = delta.permute(0,2,3,1); //NCHW to NHWC if(!hasBias() && !(helper instanceof MKLDNNConvHelper)){ //MKL-DNN supports no bias, CuDNN doesn't if(dummyBiasGrad == null){ try (MemoryWorkspace wsO = Nd4j.getMemoryManager().scopeOutOfWorkspaces()) { - dummyBiasGrad = Nd4j.create(1, layerConf().getNOut()); + dummyBiasGrad = Nd4j.create(1, getTypedLayerConfiguration().getNOut()); } } biasGradView = dummyBiasGrad; @@ -178,8 +176,8 @@ public class ConvolutionLayer extends BaseLayer(preOutput, null); } @@ -414,7 +412,7 @@ public class ConvolutionLayer extends BaseLayer addiRowVector - if(layerConf().hasBias()){ + if(getTypedLayerConfiguration().hasBias()){ z.addiRowVector(bias); } @@ -500,7 +498,7 @@ public class ConvolutionLayer extends BaseLayer(maskArray, currentMaskState); } - INDArray outMask = ConvolutionUtils.cnn2dMaskReduction(maskArray, layerConf().getKernelSize(), layerConf().getStride(), - layerConf().getPadding(), layerConf().getDilation(), layerConf().getConvolutionMode()); + INDArray outMask = ConvolutionUtils.cnn2dMaskReduction(maskArray, getTypedLayerConfiguration().getKernelSize(), getTypedLayerConfiguration().getStride(), + getTypedLayerConfiguration().getPadding(), getTypedLayerConfiguration().getDilation(), getTypedLayerConfiguration().getConvolutionMode()); return new Pair<>(outMask, currentMaskState); } diff --git a/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/nn/layers/convolution/Cropping1DLayer.java b/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/nn/layers/convolution/Cropping1DLayer.java index ba7bd5328..94f752a6e 100644 --- a/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/nn/layers/convolution/Cropping1DLayer.java +++ b/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/nn/layers/convolution/Cropping1DLayer.java @@ -20,13 +20,17 @@ package org.deeplearning4j.nn.layers.convolution; +import java.util.Map; import lombok.val; import org.deeplearning4j.nn.api.Layer; import org.deeplearning4j.nn.conf.NeuralNetConfiguration; +import org.deeplearning4j.nn.conf.layers.LayerConfiguration; + import org.deeplearning4j.nn.conf.layers.convolutional.Cropping1D; import org.deeplearning4j.nn.gradient.DefaultGradient; import org.deeplearning4j.nn.gradient.Gradient; import org.deeplearning4j.nn.layers.AbstractLayer; +import org.deeplearning4j.nn.layers.BaseLayer; import org.deeplearning4j.nn.workspace.ArrayType; import org.deeplearning4j.nn.workspace.LayerWorkspaceMgr; import org.nd4j.linalg.api.buffer.DataType; @@ -41,9 +45,9 @@ public class Cropping1DLayer extends AbstractLayer { private final int[] cropping; //[padTop, padBottom] - public Cropping1DLayer(NeuralNetConfiguration conf, DataType dataType) { + public Cropping1DLayer(LayerConfiguration conf, DataType dataType) { super(conf, dataType); - this.cropping = ((org.deeplearning4j.nn.conf.layers.convolutional.Cropping1D) conf.getLayer()).getCropping(); + this.cropping = layerConfiguration.getCropping(); } @Override @@ -79,7 +83,8 @@ public class Cropping1DLayer extends AbstractLayer { @Override public Layer clone() { - return new Cropping2DLayer(conf.clone(), dataType); + + return new Cropping2DLayer(layerConfiguration.clone(), dataType); } @Override @@ -96,4 +101,5 @@ public class Cropping1DLayer extends AbstractLayer { } } } + } diff --git a/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/nn/layers/convolution/Cropping2DLayer.java b/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/nn/layers/convolution/Cropping2DLayer.java index 3d6beac05..83f17f216 100644 --- a/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/nn/layers/convolution/Cropping2DLayer.java +++ b/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/nn/layers/convolution/Cropping2DLayer.java @@ -23,7 +23,7 @@ package org.deeplearning4j.nn.layers.convolution; import lombok.val; import org.deeplearning4j.nn.api.Layer; import org.deeplearning4j.nn.conf.CNN2DFormat; -import org.deeplearning4j.nn.conf.NeuralNetConfiguration; +import org.deeplearning4j.nn.conf.layers.LayerConfiguration; import org.deeplearning4j.nn.gradient.DefaultGradient; import org.deeplearning4j.nn.gradient.Gradient; import org.deeplearning4j.nn.layers.AbstractLayer; @@ -40,11 +40,12 @@ public class Cropping2DLayer extends AbstractLayer p = preOutput4d(true, true, workspaceMgr); delta = afn.backprop(p.getFirst(), epsilon).getFirst(); @@ -118,7 +118,7 @@ public class Deconvolution2DLayer extends ConvolutionLayer { INDArray[] opInputs; INDArray[] opOutputs; - if(layerConf().hasBias()){ + if(getTypedLayerConfiguration().hasBias()){ INDArray bias = getParamWithNoise(DeconvolutionParamInitializer.BIAS_KEY, true, workspaceMgr); opInputs = new INDArray[]{input, weights, bias, delta}; opOutputs = new INDArray[]{outEps, weightGradViewOp, biasGradView}; @@ -136,7 +136,7 @@ public class Deconvolution2DLayer extends ConvolutionLayer { Gradient retGradient = new DefaultGradient(); - if(layerConf().hasBias()){ + if(getTypedLayerConfiguration().hasBias()){ retGradient.setGradientFor(DeconvolutionParamInitializer.BIAS_KEY, biasGradView); } retGradient.setGradientFor(DeconvolutionParamInitializer.WEIGHT_KEY, weightGradView, 'c'); @@ -153,7 +153,7 @@ public class Deconvolution2DLayer extends ConvolutionLayer { //Input validation: expect rank 4 matrix if (input.rank() != 4) { - String layerName = conf.getLayer().getLayerName(); + String layerName = layerConfiguration.getLayerName(); if (layerName == null) layerName = "(not named)"; throw new DL4JInvalidInputException("Got rank " + input.rank() @@ -166,7 +166,7 @@ public class Deconvolution2DLayer extends ConvolutionLayer { + " " + layerId()); } - CNN2DFormat format = layerConf().getCnn2dDataFormat(); + CNN2DFormat format = getTypedLayerConfiguration().getCnn2dDataFormat(); boolean nchw = format == CNN2DFormat.NCHW; int cDim = nchw ? 1 : 3; int hDim = nchw ? 2 : 1; @@ -176,7 +176,7 @@ public class Deconvolution2DLayer extends ConvolutionLayer { long outDepth = weights.size(1); if (input.size(cDim) != inDepth ) { - String layerName = conf.getLayer().getLayerName(); + String layerName = layerConfiguration.getLayerName(); if (layerName == null) layerName = "(not named)"; @@ -198,9 +198,9 @@ public class Deconvolution2DLayer extends ConvolutionLayer { int kH = (int) weights.size(2); int kW = (int) weights.size(3); - int[] dilation = layerConf().getDilation(); - int[] kernel = layerConf().getKernelSize(); - int[] strides = layerConf().getStride(); + int[] dilation = getTypedLayerConfiguration().getDilation(); + int[] kernel = getTypedLayerConfiguration().getKernelSize(); + int[] strides = getTypedLayerConfiguration().getStride(); int[] pad; int[] outSize; @@ -209,7 +209,7 @@ public class Deconvolution2DLayer extends ConvolutionLayer { pad = ConvolutionUtils.getSameModeTopLeftPadding(outSize, new int[] {(int) input.size(hDim), (int) input.size(wDim)}, kernel, strides, dilation ); } else { - pad = layerConf().getPadding(); + pad = getTypedLayerConfiguration().getPadding(); outSize = ConvolutionUtils.getDeconvolutionOutputSize(input, kernel, strides, pad, convolutionMode, dilation, format); //Also performs validation } @@ -234,7 +234,7 @@ public class Deconvolution2DLayer extends ConvolutionLayer { weights = weights.permute(2, 3, 1, 0); INDArray[] opInputs; - if (layerConf().hasBias()) { + if (getTypedLayerConfiguration().hasBias()) { opInputs = new INDArray[]{input, weights, bias}; } else { opInputs = new INDArray[]{input, weights}; @@ -261,10 +261,10 @@ public class Deconvolution2DLayer extends ConvolutionLayer { INDArray z = preOutput(training, false, workspaceMgr).getFirst(); - IActivation afn = layerConf().getActivationFn(); + IActivation afn = getTypedLayerConfiguration().getActivationFn(); if (helper != null && Shape.strideDescendingCAscendingF(z)) { - INDArray ret = helper.activate(z, layerConf().getActivationFn(), training); + INDArray ret = helper.activate(z, getTypedLayerConfiguration().getActivationFn(), training); if (ret != null) { return ret; } diff --git a/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/nn/layers/convolution/Deconvolution3DLayer.java b/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/nn/layers/convolution/Deconvolution3DLayer.java index 0913c7f7c..a14414d03 100644 --- a/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/nn/layers/convolution/Deconvolution3DLayer.java +++ b/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/nn/layers/convolution/Deconvolution3DLayer.java @@ -24,9 +24,9 @@ import lombok.val; import org.deeplearning4j.exception.DL4JInvalidInputException; import org.deeplearning4j.nn.conf.CacheMode; import org.deeplearning4j.nn.conf.ConvolutionMode; -import org.deeplearning4j.nn.conf.NeuralNetConfiguration; import org.deeplearning4j.nn.conf.layers.Convolution3D; import org.deeplearning4j.nn.conf.layers.Deconvolution3D; +import org.deeplearning4j.nn.conf.layers.LayerConfiguration; import org.deeplearning4j.nn.gradient.DefaultGradient; import org.deeplearning4j.nn.gradient.Gradient; import org.deeplearning4j.nn.layers.BaseLayer; @@ -47,7 +47,7 @@ import java.util.Arrays; public class Deconvolution3DLayer extends BaseLayer { - public Deconvolution3DLayer(NeuralNetConfiguration conf, DataType dataType) { + public Deconvolution3DLayer(LayerConfiguration conf, DataType dataType) { super(conf, dataType); } @@ -63,20 +63,20 @@ public class Deconvolution3DLayer extends BaseLayer { INDArray weights = getParamWithNoise(DeconvolutionParamInitializer.WEIGHT_KEY, true, workspaceMgr); - Convolution3D.DataFormat df = layerConf().getDataFormat(); - ConvolutionMode cm = layerConf().getConvolutionMode(); + Convolution3D.DataFormat df = getTypedLayerConfiguration().getDataFormat(); + ConvolutionMode cm = getTypedLayerConfiguration().getConvolutionMode(); - int[] dilation = layerConf().getDilation(); - int[] kernel = layerConf().getKernelSize(); - int[] strides = layerConf().getStride(); - int[] pad = layerConf().getPadding(); + int[] dilation = getTypedLayerConfiguration().getDilation(); + int[] kernel = getTypedLayerConfiguration().getKernelSize(); + int[] strides = getTypedLayerConfiguration().getStride(); + int[] pad = getTypedLayerConfiguration().getPadding(); INDArray biasGradView = gradientViews.get(DeconvolutionParamInitializer.BIAS_KEY); INDArray weightGradView = gradientViews.get(DeconvolutionParamInitializer.WEIGHT_KEY); INDArray outEps = workspaceMgr.create(ArrayType.ACTIVATION_GRAD, weights.dataType(), input.shape(), 'c'); - Integer sameMode = (layerConf().getConvolutionMode() == ConvolutionMode.Same) ? 1 : 0; + Integer sameMode = (getTypedLayerConfiguration().getConvolutionMode() == ConvolutionMode.Same) ? 1 : 0; int[] args = new int[] { kernel[0], kernel[1], kernel[2], strides[0], strides[1], strides[2], @@ -85,13 +85,13 @@ public class Deconvolution3DLayer extends BaseLayer { }; INDArray delta; - IActivation afn = layerConf().getActivationFn(); + IActivation afn = getTypedLayerConfiguration().getActivationFn(); INDArray preOutput = preOutput(true, workspaceMgr); delta = afn.backprop(preOutput, epsilon).getFirst(); INDArray[] opInputs; INDArray[] opOutputs; - if(layerConf().hasBias()){ + if(getTypedLayerConfiguration().hasBias()){ INDArray bias = getParamWithNoise(DeconvolutionParamInitializer.BIAS_KEY, true, workspaceMgr); opInputs = new INDArray[]{input, weights, bias, delta}; opOutputs = new INDArray[]{outEps, weightGradView, biasGradView}; @@ -109,7 +109,7 @@ public class Deconvolution3DLayer extends BaseLayer { Gradient retGradient = new DefaultGradient(); - if(layerConf().hasBias()){ + if(getTypedLayerConfiguration().hasBias()){ retGradient.setGradientFor(DeconvolutionParamInitializer.BIAS_KEY, biasGradView); } retGradient.setGradientFor(DeconvolutionParamInitializer.WEIGHT_KEY, weightGradView, 'c'); @@ -131,34 +131,34 @@ public class Deconvolution3DLayer extends BaseLayer { " [minibatchSize, inputHeight, inputWidth, inputDepth, channels]. " + layerId()); } - Convolution3D.DataFormat df = layerConf().getDataFormat(); - boolean ncdhw = layerConf().getDataFormat() == Convolution3D.DataFormat.NCDHW; + Convolution3D.DataFormat df = getTypedLayerConfiguration().getDataFormat(); + boolean ncdhw = getTypedLayerConfiguration().getDataFormat() == Convolution3D.DataFormat.NCDHW; int chDim = ncdhw ? 1 : 4; - if (input.size(chDim) != layerConf().getNIn() ) { - String layerName = conf.getLayer().getLayerName(); + if (input.size(chDim) != getTypedLayerConfiguration().getNIn() ) { + String layerName = getLayerConfiguration().getLayerName(); if (layerName == null) layerName = "(not named)"; throw new DL4JInvalidInputException("Cannot do forward pass in Deconvolution3D layer (layer name = " + layerName + ", layer index = " + index + "): input array channels does not match CNN layer configuration" + " (data input channels = " + input.size(chDim) + ", " + (ncdhw ? "[minibatch,channels,height,width,depth]=" : "[minibatch,height,width,depth,channels]=") - + Arrays.toString(input.shape()) + "; expected" + " input channels = " + layerConf().getNIn() + ") " + + Arrays.toString(input.shape()) + "; expected" + " input channels = " + getTypedLayerConfiguration().getNIn() + ") " + layerId()); } - int[] dilation = layerConf().getDilation(); - int[] kernel = layerConf().getKernelSize(); - int[] strides = layerConf().getStride(); + int[] dilation = getTypedLayerConfiguration().getDilation(); + int[] kernel = getTypedLayerConfiguration().getKernelSize(); + int[] strides = getTypedLayerConfiguration().getStride(); int[] pad; - ConvolutionMode cm = layerConf().getConvolutionMode(); + ConvolutionMode cm = getTypedLayerConfiguration().getConvolutionMode(); long[] outSize; int[] inSize = df == Convolution3D.DataFormat.NCDHW ? new int[]{(int)input.size(2), (int)input.size(3), (int)input.size(4)} : new int[]{(int)input.size(1), (int)input.size(2), (int)input.size(3)}; if (cm == ConvolutionMode.Same) { - outSize = ConvolutionUtils.getDeconvolution3DOutputSize(input, kernel, strides, null, dilation, cm, layerConf().getDataFormat()); //Also performs validation + outSize = ConvolutionUtils.getDeconvolution3DOutputSize(input, kernel, strides, null, dilation, cm, getTypedLayerConfiguration().getDataFormat()); //Also performs validation pad = ConvolutionUtils.getSameModeTopLeftPadding(ArrayUtil.toInts(outSize), inSize, kernel, strides, dilation ); } else { - pad = layerConf().getPadding(); - outSize = ConvolutionUtils.getDeconvolution3DOutputSize(input, kernel, strides, pad, dilation, cm, layerConf().getDataFormat()); //Also performs validation + pad = getTypedLayerConfiguration().getPadding(); + outSize = ConvolutionUtils.getDeconvolution3DOutputSize(input, kernel, strides, pad, dilation, cm, getTypedLayerConfiguration().getDataFormat()); //Also performs validation } long outH = outSize[0]; @@ -167,7 +167,7 @@ public class Deconvolution3DLayer extends BaseLayer { val miniBatch = input.size(0); - long[] outShape = df == Convolution3D.DataFormat.NCDHW ? new long[]{miniBatch, layerConf().getNOut(), outH, outW, outD} : new long[]{miniBatch, outH, outW, outD, layerConf().getNOut()}; + long[] outShape = df == Convolution3D.DataFormat.NCDHW ? new long[]{miniBatch, getTypedLayerConfiguration().getNOut(), outH, outW, outD} : new long[]{miniBatch, outH, outW, outD, getTypedLayerConfiguration().getNOut()}; INDArray output = workspaceMgr.create(ArrayType.ACTIVATIONS, input.dataType(), outShape, 'c'); int sameMode = (cm == ConvolutionMode.Same) ? 1 : 0; @@ -179,7 +179,7 @@ public class Deconvolution3DLayer extends BaseLayer { }; INDArray[] opInputs; - if (layerConf().hasBias()) { + if (getTypedLayerConfiguration().hasBias()) { opInputs = new INDArray[]{input, weights, bias}; } else { opInputs = new INDArray[]{input, weights}; @@ -206,7 +206,7 @@ public class Deconvolution3DLayer extends BaseLayer { INDArray z = preOutput(training, workspaceMgr); - IActivation afn = layerConf().getActivationFn(); + IActivation afn = getTypedLayerConfiguration().getActivationFn(); INDArray activation = afn.getActivation(z, training); return activation; diff --git a/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/nn/layers/convolution/DepthwiseConvolution2DLayer.java b/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/nn/layers/convolution/DepthwiseConvolution2DLayer.java index c63aeb3f9..2b39f70d2 100644 --- a/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/nn/layers/convolution/DepthwiseConvolution2DLayer.java +++ b/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/nn/layers/convolution/DepthwiseConvolution2DLayer.java @@ -25,7 +25,7 @@ import org.deeplearning4j.exception.DL4JInvalidInputException; import org.deeplearning4j.nn.conf.CNN2DFormat; import org.deeplearning4j.nn.conf.CacheMode; import org.deeplearning4j.nn.conf.ConvolutionMode; -import org.deeplearning4j.nn.conf.NeuralNetConfiguration; +import org.deeplearning4j.nn.conf.layers.LayerConfiguration; import org.deeplearning4j.nn.gradient.DefaultGradient; import org.deeplearning4j.nn.gradient.Gradient; import org.deeplearning4j.nn.params.DepthwiseConvolutionParamInitializer; @@ -45,7 +45,7 @@ import java.util.Arrays; public class DepthwiseConvolution2DLayer extends ConvolutionLayer { - public DepthwiseConvolution2DLayer(NeuralNetConfiguration conf, DataType dataType) { + public DepthwiseConvolution2DLayer(LayerConfiguration conf, DataType dataType) { super(conf, dataType); } @@ -59,12 +59,12 @@ public class DepthwiseConvolution2DLayer extends ConvolutionLayer { @Override public Pair backpropGradient(INDArray epsilon, LayerWorkspaceMgr workspaceMgr) { assertInputSet(true); - CNN2DFormat format = layerConf().getCnn2dDataFormat(); + CNN2DFormat format = getTypedLayerConfiguration().getCnn2dDataFormat(); boolean nchw = format == CNN2DFormat.NCHW; if (input.rank() != 4) { throw new DL4JInvalidInputException("Got rank " + input.rank() + " array as input to Convolution layer with shape " + Arrays.toString(input.shape()) - + ". Expected rank 4 array with shape " + layerConf().getCnn2dDataFormat().dimensionNames() + ". " + + ". Expected rank 4 array with shape " + getTypedLayerConfiguration().getCnn2dDataFormat().dimensionNames() + ". " + layerId()); } INDArray bias; @@ -81,16 +81,16 @@ public class DepthwiseConvolution2DLayer extends ConvolutionLayer { int kH = (int) depthWiseWeights.size(0); int kW = (int) depthWiseWeights.size(1); - int[] dilation = layerConf().getDilation(); - int[] kernel = layerConf().getKernelSize(); - int[] strides = layerConf().getStride(); + int[] dilation = getTypedLayerConfiguration().getDilation(); + int[] kernel = getTypedLayerConfiguration().getKernelSize(); + int[] strides = getTypedLayerConfiguration().getStride(); int[] pad; if (convolutionMode == ConvolutionMode.Same) { int[] outSize = ConvolutionUtils.getOutputSize( input, kernel, strides, null, convolutionMode, dilation, format); pad = ConvolutionUtils.getSameModeTopLeftPadding(outSize, new int[]{inH, inW}, kernel, strides, dilation); } else { - pad = layerConf().getPadding(); + pad = getTypedLayerConfiguration().getPadding(); ConvolutionUtils.getOutputSize(input, kernel, strides, pad, convolutionMode, dilation, format); } @@ -109,13 +109,13 @@ public class DepthwiseConvolution2DLayer extends ConvolutionLayer { }; INDArray delta; - IActivation afn = layerConf().getActivationFn(); + IActivation afn = getTypedLayerConfiguration().getActivationFn(); Pair p = preOutput4d(true, true, workspaceMgr); delta = afn.backprop(p.getFirst(), epsilon).getFirst(); INDArray[] inputs; INDArray[] outputs; - if (layerConf().hasBias()) { + if (getTypedLayerConfiguration().hasBias()) { bias = getParamWithNoise(DepthwiseConvolutionParamInitializer.BIAS_KEY, true, workspaceMgr); inputs = new INDArray[]{input, depthWiseWeights, bias, delta}; outputs = new INDArray[]{outEpsilon, weightGradView, biasGradView}; @@ -133,7 +133,7 @@ public class DepthwiseConvolution2DLayer extends ConvolutionLayer { Nd4j.getExecutioner().exec(op); Gradient retGradient = new DefaultGradient(); - if (layerConf().hasBias()) { + if (getTypedLayerConfiguration().hasBias()) { retGradient.setGradientFor(DepthwiseConvolutionParamInitializer.BIAS_KEY, biasGradView); } retGradient.setGradientFor(DepthwiseConvolutionParamInitializer.WEIGHT_KEY, weightGradView, 'c'); @@ -152,13 +152,13 @@ public class DepthwiseConvolution2DLayer extends ConvolutionLayer { getParamWithNoise(DepthwiseConvolutionParamInitializer.WEIGHT_KEY, training, workspaceMgr); if (input.rank() != 4) { - String layerName = conf.getLayer().getLayerName(); + String layerName = layerConfiguration.getLayerName(); if (layerName == null) layerName = "(not named)"; throw new DL4JInvalidInputException("Got rank " + input.rank() + " array as input to DepthwiseConvolution2D (layer name = " + layerName + ", layer index = " + index + ") with shape " + Arrays.toString(input.shape()) + ". " - + "Expected rank 4 array with shape " + layerConf().getCnn2dDataFormat().dimensionNames() + "." + + "Expected rank 4 array with shape " + getTypedLayerConfiguration().getCnn2dDataFormat().dimensionNames() + "." + (input.rank() == 2 ? " (Wrong input type (see InputType.convolutionalFlat()) or wrong data type?)" : "") + " " + layerId()); @@ -166,7 +166,7 @@ public class DepthwiseConvolution2DLayer extends ConvolutionLayer { INDArray input = this.input.castTo(dataType); //no-op if correct dtype - CNN2DFormat format = layerConf().getCnn2dDataFormat(); + CNN2DFormat format = getTypedLayerConfiguration().getCnn2dDataFormat(); boolean nchw = format == CNN2DFormat.NCHW; long inDepth = depthWiseWeights.size(2); @@ -174,7 +174,7 @@ public class DepthwiseConvolution2DLayer extends ConvolutionLayer { long outDepth = depthMultiplier * inDepth; if (input.size(nchw ? 1 : 3) != inDepth) { - String layerName = conf.getLayer().getLayerName(); + String layerName = layerConfiguration.getLayerName(); if (layerName == null) layerName = "(not named)"; @@ -196,9 +196,9 @@ public class DepthwiseConvolution2DLayer extends ConvolutionLayer { int kH = (int) depthWiseWeights.size(0); int kW = (int) depthWiseWeights.size(1); - int[] dilation = layerConf().getDilation(); - int[] kernel = layerConf().getKernelSize(); - int[] strides = layerConf().getStride(); + int[] dilation = getTypedLayerConfiguration().getDilation(); + int[] kernel = getTypedLayerConfiguration().getKernelSize(); + int[] strides = getTypedLayerConfiguration().getStride(); int[] pad; int[] outSize; @@ -211,7 +211,7 @@ public class DepthwiseConvolution2DLayer extends ConvolutionLayer { pad = ConvolutionUtils.getSameModeTopLeftPadding( outSize, new int[]{(int) input.size(nchw ? 2 : 1), (int) input.size(nchw ? 3 : 2)}, kernel, strides, dilation); } else { - pad = layerConf().getPadding(); + pad = getTypedLayerConfiguration().getPadding(); outSize = ConvolutionUtils.getOutputSize(input, kernel, strides, pad, convolutionMode, dilation, format); } @@ -230,7 +230,7 @@ public class DepthwiseConvolution2DLayer extends ConvolutionLayer { }; INDArray[] inputs; - if (layerConf().hasBias()) { + if (getTypedLayerConfiguration().hasBias()) { inputs = new INDArray[]{input, depthWiseWeights, bias}; } else { inputs = new INDArray[]{input, depthWiseWeights}; @@ -259,7 +259,7 @@ public class DepthwiseConvolution2DLayer extends ConvolutionLayer { INDArray z = preOutput(training, false, workspaceMgr).getFirst(); //String afn = conf.getLayer().getActivationFunction(); - IActivation afn = layerConf().getActivationFn(); + IActivation afn = getTypedLayerConfiguration().getActivationFn(); INDArray activation = afn.getActivation(z, training); return activation; diff --git a/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/nn/layers/convolution/SeparableConvolution2DLayer.java b/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/nn/layers/convolution/SeparableConvolution2DLayer.java index d5d0ebf0f..dc660bfc8 100644 --- a/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/nn/layers/convolution/SeparableConvolution2DLayer.java +++ b/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/nn/layers/convolution/SeparableConvolution2DLayer.java @@ -25,7 +25,7 @@ import org.deeplearning4j.exception.DL4JInvalidInputException; import org.deeplearning4j.nn.conf.CNN2DFormat; import org.deeplearning4j.nn.conf.CacheMode; import org.deeplearning4j.nn.conf.ConvolutionMode; -import org.deeplearning4j.nn.conf.NeuralNetConfiguration; +import org.deeplearning4j.nn.conf.layers.LayerConfiguration; import org.deeplearning4j.nn.gradient.DefaultGradient; import org.deeplearning4j.nn.gradient.Gradient; import org.deeplearning4j.nn.params.ConvolutionParamInitializer; @@ -46,7 +46,7 @@ import java.util.Arrays; public class SeparableConvolution2DLayer extends ConvolutionLayer { - public SeparableConvolution2DLayer(NeuralNetConfiguration conf, DataType dataType) { + public SeparableConvolution2DLayer(LayerConfiguration conf, DataType dataType) { super(conf, dataType); } @@ -63,7 +63,7 @@ public class SeparableConvolution2DLayer extends ConvolutionLayer { if (input.rank() != 4) { throw new DL4JInvalidInputException("Got rank " + input.rank() + " array as input to SubsamplingLayer with shape " + Arrays.toString(input.shape()) - + ". Expected rank 4 array with shape " + layerConf().getCnn2dDataFormat().dimensionNames() + ". " + + ". Expected rank 4 array with shape " + getTypedLayerConfiguration().getCnn2dDataFormat().dimensionNames() + ". " + layerId()); } INDArray bias; @@ -74,7 +74,7 @@ public class SeparableConvolution2DLayer extends ConvolutionLayer { INDArray input = this.input.castTo(dataType); - CNN2DFormat format = layerConf().getCnn2dDataFormat(); + CNN2DFormat format = getTypedLayerConfiguration().getCnn2dDataFormat(); boolean nchw = format == CNN2DFormat.NCHW; long miniBatch = input.size(0); @@ -85,15 +85,15 @@ public class SeparableConvolution2DLayer extends ConvolutionLayer { int kH = (int) depthWiseWeights.size(2); int kW = (int) depthWiseWeights.size(3); - int[] dilation = layerConf().getDilation(); - int[] kernel = layerConf().getKernelSize(); - int[] strides = layerConf().getStride(); + int[] dilation = getTypedLayerConfiguration().getDilation(); + int[] kernel = getTypedLayerConfiguration().getKernelSize(); + int[] strides = getTypedLayerConfiguration().getStride(); int[] pad; if (convolutionMode == ConvolutionMode.Same) { int[] outSize = ConvolutionUtils.getOutputSize(input, kernel, strides, null, convolutionMode, dilation, format); //Also performs validation pad = ConvolutionUtils.getSameModeTopLeftPadding(outSize, new int[] {inH, inW}, kernel, strides, dilation); } else { - pad = layerConf().getPadding(); + pad = getTypedLayerConfiguration().getPadding(); ConvolutionUtils.getOutputSize(input, kernel, strides, pad, convolutionMode, dilation, format); //Also performs validation } @@ -113,7 +113,7 @@ public class SeparableConvolution2DLayer extends ConvolutionLayer { }; INDArray delta; - IActivation afn = layerConf().getActivationFn(); + IActivation afn = getTypedLayerConfiguration().getActivationFn(); Pair p = preOutput4d(true, true, workspaceMgr); delta = afn.backprop(p.getFirst(), epsilon).getFirst(); @@ -125,7 +125,7 @@ public class SeparableConvolution2DLayer extends ConvolutionLayer { INDArray opPointWiseWeightGradView = pointWiseWeightGradView.permute(2, 3, 1, 0); CustomOp op; - if(layerConf().hasBias()){ + if(getTypedLayerConfiguration().hasBias()){ bias = getParamWithNoise(SeparableConvolutionParamInitializer.BIAS_KEY, true, workspaceMgr); op = DynamicCustomOp.builder("sconv2d_bp") @@ -145,7 +145,7 @@ public class SeparableConvolution2DLayer extends ConvolutionLayer { Nd4j.getExecutioner().exec(op); Gradient retGradient = new DefaultGradient(); - if(layerConf().hasBias()){ + if(getTypedLayerConfiguration().hasBias()){ retGradient.setGradientFor(ConvolutionParamInitializer.BIAS_KEY, biasGradView); } retGradient.setGradientFor(SeparableConvolutionParamInitializer.DEPTH_WISE_WEIGHT_KEY, depthWiseWeightGradView, 'c'); @@ -167,7 +167,7 @@ public class SeparableConvolution2DLayer extends ConvolutionLayer { getParamWithNoise(SeparableConvolutionParamInitializer.POINT_WISE_WEIGHT_KEY, training, workspaceMgr); INDArray input = this.input.castTo(dataType); - if(layerConf().getCnn2dDataFormat() == CNN2DFormat.NHWC) { + if(getTypedLayerConfiguration().getCnn2dDataFormat() == CNN2DFormat.NHWC) { input = input.permute(0,3,1,2).dup(); } @@ -176,13 +176,13 @@ public class SeparableConvolution2DLayer extends ConvolutionLayer { int wIdx = 3; if (input.rank() != 4) { - String layerName = conf.getLayer().getLayerName(); + String layerName = getLayerConfiguration().getLayerName(); if (layerName == null) layerName = "(not named)"; throw new DL4JInvalidInputException("Got rank " + input.rank() + " array as input to SeparableConvolution2D (layer name = " + layerName + ", layer index = " + index + ") with shape " + Arrays.toString(input.shape()) + ". " - + "Expected rank 4 array with shape " + layerConf().getCnn2dDataFormat().dimensionNames() + "." + + "Expected rank 4 array with shape " + getTypedLayerConfiguration().getCnn2dDataFormat().dimensionNames() + "." + (input.rank() == 2 ? " (Wrong input type (see InputType.convolutionalFlat()) or wrong data type?)" : "") @@ -193,13 +193,13 @@ public class SeparableConvolution2DLayer extends ConvolutionLayer { long outDepth = pointWiseWeights.size(0); if (input.size(chIdx) != inDepth) { - String layerName = conf.getLayer().getLayerName(); + String layerName = getLayerConfiguration().getLayerName(); if (layerName == null) layerName = "(not named)"; String s = "Cannot do forward pass in SeparableConvolution2D layer (layer name = " + layerName + ", layer index = " + index + "): input array channels does not match CNN layer configuration" - + " (data format = " + layerConf().getCnn2dDataFormat() + ", data input channels = " + input.size(1) + ", [minibatch,inputDepth,height,width]=" + + " (data format = " + getTypedLayerConfiguration().getCnn2dDataFormat() + ", data input channels = " + input.size(1) + ", [minibatch,inputDepth,height,width]=" + Arrays.toString(input.shape()) + "; expected" + " input channels = " + inDepth + ") " + layerId(); @@ -214,9 +214,9 @@ public class SeparableConvolution2DLayer extends ConvolutionLayer { int kH = (int) depthWiseWeights.size(2); int kW = (int) depthWiseWeights.size(3); - int[] dilation = layerConf().getDilation(); - int[] kernel = layerConf().getKernelSize(); - int[] strides = layerConf().getStride(); + int[] dilation = getTypedLayerConfiguration().getDilation(); + int[] kernel = getTypedLayerConfiguration().getKernelSize(); + int[] strides = getTypedLayerConfiguration().getStride(); int[] pad; int[] outSize; @@ -240,7 +240,7 @@ public class SeparableConvolution2DLayer extends ConvolutionLayer { strides, dilation); } else { - pad = layerConf().getPadding(); + pad = getTypedLayerConfiguration().getPadding(); outSize = ConvolutionUtils.getOutputSize( input, kernel, @@ -272,7 +272,7 @@ public class SeparableConvolution2DLayer extends ConvolutionLayer { pointWiseWeights = pointWiseWeights.permute(2, 3, 1, 0); INDArray[] opInputs; - if (layerConf().hasBias()) { + if (getTypedLayerConfiguration().hasBias()) { opInputs = new INDArray[]{input, depthWiseWeights, pointWiseWeights, bias}; } else { opInputs = new INDArray[]{input, depthWiseWeights, pointWiseWeights}; @@ -287,7 +287,7 @@ public class SeparableConvolution2DLayer extends ConvolutionLayer { .build(); Nd4j.getExecutioner().exec(op); - if(layerConf().getCnn2dDataFormat() == CNN2DFormat.NHWC) { + if(getTypedLayerConfiguration().getCnn2dDataFormat() == CNN2DFormat.NHWC) { output = output.permute(0,2,3,1); //NCHW to NHWC } @@ -306,7 +306,7 @@ public class SeparableConvolution2DLayer extends ConvolutionLayer { INDArray z = preOutput(training, false, workspaceMgr).getFirst(); //String afn = conf.getLayer().getActivationFunction(); - IActivation afn = layerConf().getActivationFn(); + IActivation afn = getTypedLayerConfiguration().getActivationFn(); INDArray activation = afn.getActivation(z, training); return activation; diff --git a/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/nn/layers/convolution/SpaceToBatch.java b/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/nn/layers/convolution/SpaceToBatch.java index 6abd39baa..1e5c7b270 100644 --- a/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/nn/layers/convolution/SpaceToBatch.java +++ b/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/nn/layers/convolution/SpaceToBatch.java @@ -23,7 +23,7 @@ package org.deeplearning4j.nn.layers.convolution; import lombok.extern.slf4j.Slf4j; import org.deeplearning4j.exception.DL4JInvalidInputException; import org.deeplearning4j.nn.conf.CNN2DFormat; -import org.deeplearning4j.nn.conf.NeuralNetConfiguration; +import org.deeplearning4j.nn.conf.layers.LayerConfiguration; import org.deeplearning4j.nn.gradient.DefaultGradient; import org.deeplearning4j.nn.gradient.Gradient; import org.deeplearning4j.nn.layers.AbstractLayer; @@ -42,25 +42,25 @@ import java.util.Arrays; @Slf4j public class SpaceToBatch extends AbstractLayer { - public SpaceToBatch(NeuralNetConfiguration conf, DataType dataType) { + public SpaceToBatch(LayerConfiguration conf, DataType dataType) { super(conf, dataType); } private int[] getBlocks() { - return layerConf().getBlocks(); + return getTypedLayerConfiguration().getBlocks(); } private int[][] getPadding() { - return layerConf().getPadding(); + return getTypedLayerConfiguration().getPadding(); } private INDArray getBlocksArray() { - int[] intBlocks = layerConf().getBlocks(); + int[] intBlocks = getTypedLayerConfiguration().getBlocks(); return Nd4j.createFromArray(intBlocks); } private INDArray getPaddingArray() { - int[][] intPad = layerConf().getPadding(); + int[][] intPad = getTypedLayerConfiguration().getPadding(); return Nd4j.createFromArray(intPad); } @@ -77,7 +77,7 @@ public class SpaceToBatch extends AbstractLayer { - public SpaceToDepth(NeuralNetConfiguration conf, DataType dataType) { + public SpaceToDepth(LayerConfiguration conf, DataType dataType) { super(conf, dataType); } private int getBlockSize() { - return layerConf().getBlockSize(); + return getTypedLayerConfiguration().getBlockSize(); } @Override @@ -63,7 +63,7 @@ public class SpaceToDepth extends AbstractLayer { - public ZeroPaddingLayer(NeuralNetConfiguration conf, DataType dataType) { + public ZeroPaddingLayer(LayerConfiguration conf, DataType dataType) { super(conf, dataType); } @@ -61,13 +61,13 @@ public class ZeroPaddingLayer extends AbstractLayer feedForwardMaskArray(INDArray maskArray, MaskState currentMaskState, int minibatchSize) { - INDArray reduced = ConvolutionUtils.cnn1dMaskReduction(maskArray, layerConf().getKernelSize()[0], - layerConf().getStride()[0], layerConf().getPadding()[0], layerConf().getDilation()[0], - layerConf().getConvolutionMode()); + INDArray reduced = ConvolutionUtils.cnn1dMaskReduction(maskArray, getTypedLayerConfiguration().getKernelSize()[0], + getTypedLayerConfiguration().getStride()[0], getTypedLayerConfiguration().getPadding()[0], getTypedLayerConfiguration().getDilation()[0], + getTypedLayerConfiguration().getConvolutionMode()); return new Pair<>(reduced, currentMaskState); } } diff --git a/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/nn/layers/convolution/subsampling/Subsampling3DLayer.java b/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/nn/layers/convolution/subsampling/Subsampling3DLayer.java index 0bec80b82..168d59357 100644 --- a/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/nn/layers/convolution/subsampling/Subsampling3DLayer.java +++ b/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/nn/layers/convolution/subsampling/Subsampling3DLayer.java @@ -23,8 +23,8 @@ package org.deeplearning4j.nn.layers.convolution.subsampling; import lombok.extern.slf4j.Slf4j; import org.deeplearning4j.exception.DL4JInvalidInputException; import org.deeplearning4j.nn.conf.ConvolutionMode; -import org.deeplearning4j.nn.conf.NeuralNetConfiguration; import org.deeplearning4j.nn.conf.layers.Convolution3D; +import org.deeplearning4j.nn.conf.layers.LayerConfiguration; import org.deeplearning4j.nn.conf.layers.PoolingType; import org.deeplearning4j.nn.gradient.DefaultGradient; import org.deeplearning4j.nn.gradient.Gradient; @@ -47,10 +47,10 @@ public class Subsampling3DLayer extends AbstractLayer backpropGradient(INDArray epsilon, LayerWorkspaceMgr workspaceMgr) { assertInputSet(true); - boolean isNCDHW = layerConf().getDataFormat() == Convolution3D.DataFormat.NCDHW; + boolean isNCDHW = getTypedLayerConfiguration().getDataFormat() == Convolution3D.DataFormat.NCDHW; long miniBatch = input.size(0); long inChannels = isNCDHW ? input.size(1) : input.size(4); @@ -76,9 +76,9 @@ public class Subsampling3DLayer extends AbstractLayer ret = null; try{ ret = helper.backpropGradient(input, epsilon, kernel, strides, pad, - layerConf().getPoolingType(), convolutionMode, dilation, dataFormat, workspaceMgr); + getTypedLayerConfiguration().getPoolingType(), convolutionMode, dilation, dataFormat, workspaceMgr); } catch (ND4JOpProfilerException e){ throw e; //NaN panic etc for debugging } catch (Exception e){ @@ -136,7 +136,7 @@ public class SubsamplingLayer extends AbstractLayer(maskArray, currentMaskState); } - INDArray outMask = ConvolutionUtils.cnn2dMaskReduction(maskArray, layerConf().getKernelSize(), layerConf().getStride(), - layerConf().getPadding(), layerConf().getDilation(), layerConf().getConvolutionMode()); + INDArray outMask = ConvolutionUtils.cnn2dMaskReduction(maskArray, getTypedLayerConfiguration().getKernelSize(), getTypedLayerConfiguration().getStride(), + getTypedLayerConfiguration().getPadding(), getTypedLayerConfiguration().getDilation(), getTypedLayerConfiguration().getConvolutionMode()); return super.feedForwardMaskArray(outMask, currentMaskState, minibatchSize); } } diff --git a/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/nn/layers/convolution/upsampling/Upsampling1D.java b/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/nn/layers/convolution/upsampling/Upsampling1D.java index bcff8be84..ae5417fc8 100644 --- a/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/nn/layers/convolution/upsampling/Upsampling1D.java +++ b/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/nn/layers/convolution/upsampling/Upsampling1D.java @@ -23,8 +23,8 @@ package org.deeplearning4j.nn.layers.convolution.upsampling; import lombok.extern.slf4j.Slf4j; import org.deeplearning4j.exception.DL4JInvalidInputException; import org.deeplearning4j.nn.conf.CNN2DFormat; -import org.deeplearning4j.nn.conf.NeuralNetConfiguration; import org.deeplearning4j.nn.conf.layers.BaseUpsamplingLayer; +import org.deeplearning4j.nn.conf.layers.LayerConfiguration; import org.deeplearning4j.nn.gradient.DefaultGradient; import org.deeplearning4j.nn.gradient.Gradient; import org.deeplearning4j.nn.workspace.ArrayType; @@ -43,7 +43,7 @@ import java.util.Arrays; public class Upsampling1D extends Upsampling2D { - public Upsampling1D(NeuralNetConfiguration conf, DataType dataType) { + public Upsampling1D(LayerConfiguration conf, DataType dataType) { super(conf, dataType); } @@ -56,7 +56,7 @@ public class Upsampling1D extends Upsampling2D { public Pair backpropGradient(INDArray epsilon, LayerWorkspaceMgr workspaceMgr) { assertInputSet(true); - int[] size = ((BaseUpsamplingLayer) layerConf()).getSize(); + int[] size = ((BaseUpsamplingLayer) getTypedLayerConfiguration()).getSize(); epsilon = epsilon.reshape(epsilon.size(0), epsilon.size(1), epsilon.size(2), 1); // we replicate the error term times "size" so that backprop works properly on it epsilon = epsilon.repeat(3, size[0]); @@ -94,7 +94,7 @@ public class Upsampling1D extends Upsampling2D { @Override protected int[] getSize(){ - return ((org.deeplearning4j.nn.conf.layers.Upsampling1D)conf.getLayer()).getSize(); + return getLayerConfiguration().getSize(); } @Override diff --git a/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/nn/layers/convolution/upsampling/Upsampling2D.java b/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/nn/layers/convolution/upsampling/Upsampling2D.java index ff1aebb20..cf9da710e 100644 --- a/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/nn/layers/convolution/upsampling/Upsampling2D.java +++ b/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/nn/layers/convolution/upsampling/Upsampling2D.java @@ -24,7 +24,7 @@ import lombok.extern.slf4j.Slf4j; import org.deeplearning4j.exception.DL4JInvalidInputException; import org.deeplearning4j.nn.conf.CNN2DFormat; import org.deeplearning4j.nn.conf.CacheMode; -import org.deeplearning4j.nn.conf.NeuralNetConfiguration; +import org.deeplearning4j.nn.conf.layers.LayerConfiguration; import org.deeplearning4j.nn.gradient.DefaultGradient; import org.deeplearning4j.nn.gradient.Gradient; import org.deeplearning4j.nn.layers.AbstractLayer; @@ -45,7 +45,7 @@ import java.util.Arrays; public class Upsampling2D extends AbstractLayer { - public Upsampling2D(NeuralNetConfiguration conf, DataType dataType) { + public Upsampling2D(LayerConfiguration conf, DataType dataType) { super(conf, dataType); } @@ -86,12 +86,12 @@ public class Upsampling2D extends AbstractLayer { - public Upsampling3D(NeuralNetConfiguration conf, DataType dataType) { + public Upsampling3D(LayerConfiguration conf, DataType dataType) { super(conf, dataType); } @@ -63,7 +63,7 @@ public class Upsampling3D extends AbstractLayer backpropGradient(INDArray epsilon, LayerWorkspaceMgr workspaceMgr) { assertInputSet(true); - boolean ncdhw = layerConf().getDataFormat() == org.deeplearning4j.nn.conf.layers.Convolution3D.DataFormat.NCDHW; + boolean ncdhw = getTypedLayerConfiguration().getDataFormat() == org.deeplearning4j.nn.conf.layers.Convolution3D.DataFormat.NCDHW; // Assumes NCDHW order long miniBatch = input.size(0); long inChannels, inD, inH, inW; @@ -109,7 +109,7 @@ public class Upsampling3D extends AbstractLayer { - long[] axes = layerConf().getSharedAxes(); + long[] axes = getTypedLayerConfiguration().getSharedAxes(); - public PReLU(NeuralNetConfiguration conf, DataType dataType) { + public PReLU(LayerConfiguration conf, DataType dataType) { super(conf, dataType); } diff --git a/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/nn/layers/feedforward/autoencoder/AutoEncoder.java b/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/nn/layers/feedforward/autoencoder/AutoEncoder.java index fd6b97aa5..5a65889f8 100644 --- a/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/nn/layers/feedforward/autoencoder/AutoEncoder.java +++ b/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/nn/layers/feedforward/autoencoder/AutoEncoder.java @@ -20,7 +20,7 @@ package org.deeplearning4j.nn.layers.feedforward.autoencoder; -import org.deeplearning4j.nn.conf.NeuralNetConfiguration; +import org.deeplearning4j.nn.conf.layers.LayerConfiguration; import org.deeplearning4j.nn.layers.BasePretrainNetwork; import org.deeplearning4j.nn.params.PretrainParamInitializer; import org.nd4j.linalg.api.buffer.DataType; @@ -31,7 +31,7 @@ import org.deeplearning4j.nn.workspace.ArrayType; public class AutoEncoder extends BasePretrainNetwork { - public AutoEncoder(NeuralNetConfiguration conf, DataType dataType) { + public AutoEncoder(LayerConfiguration conf, DataType dataType) { super(conf, dataType); } @@ -54,7 +54,7 @@ public class AutoEncoder extends BasePretrainNetwork 0 ? getCorruptedInput(input, corruptionLevel) : input; setInput(corruptedX, workspaceMgr); @@ -97,8 +97,8 @@ public class AutoEncoder extends BasePretrainNetwork { - public DenseLayer(NeuralNetConfiguration conf, DataType dataType) { + public DenseLayer(LayerConfiguration conf, DataType dataType) { super(conf, dataType); } @@ -47,11 +47,15 @@ public class DenseLayer extends BaseLayer { - public ElementWiseMultiplicationLayer(NeuralNetConfiguration conf, DataType dataType){ + public ElementWiseMultiplicationLayer(LayerConfiguration conf, DataType dataType){ super(conf, dataType); } @@ -46,7 +46,7 @@ public class ElementWiseMultiplicationLayer extends BaseLayer backpropGradient(INDArray epsilon, LayerWorkspaceMgr workspaceMgr) { //If this layer is layer L, then epsilon for this layer is ((w^(L+1)*(delta^(L+1))^T))^T (or equivalent) INDArray z = preOutput(true, workspaceMgr); //Note: using preOutput(INDArray) can't be used as this does a setInput(input) and resets the 'appliedDropout' flag - INDArray delta = layerConf().getActivationFn().backprop(z, epsilon).getFirst(); //TODO handle activation function params + INDArray delta = getTypedLayerConfiguration().getActivationFn().backprop(z, epsilon).getFirst(); //TODO handle activation function params if (maskArray != null) { applyMask(delta); @@ -68,7 +68,7 @@ public class ElementWiseMultiplicationLayer extends BaseLayer { private static final int[] DIM_1 = new int[]{1}; - public EmbeddingLayer(NeuralNetConfiguration conf, DataType dataType) { + public EmbeddingLayer(LayerConfiguration conf, DataType dataType) { super(conf, dataType); } @@ -49,7 +49,7 @@ public class EmbeddingLayer extends BaseLayer Integer.MAX_VALUE) throw new ND4JArraySizeException(); @@ -125,7 +125,7 @@ public class EmbeddingLayer extends BaseLayer { private static final int[] WEIGHT_DIM = new int[]{1}; - public EmbeddingSequenceLayer(NeuralNetConfiguration conf, DataType dataType) { + public EmbeddingSequenceLayer(LayerConfiguration conf, DataType dataType) { super(conf, dataType); } @@ -55,9 +55,9 @@ public class EmbeddingSequenceLayer extends BaseLayer backpropGradient(INDArray epsilon, LayerWorkspaceMgr workspaceMgr) { assertInputSet(true); INDArray z = preOutput(true, workspaceMgr); - INDArray delta = layerConf().getActivationFn().backprop(z, epsilon).getFirst(); //Shape: [mb, vector, seqLength] + INDArray delta = getTypedLayerConfiguration().getActivationFn().backprop(z, epsilon).getFirst(); //Shape: [mb, vector, seqLength] - boolean ncw = layerConf().getOutputFormat() == RNNFormat.NCW; + boolean ncw = getTypedLayerConfiguration().getOutputFormat() == RNNFormat.NCW; if (maskArray != null) { if(ncw){ @@ -67,9 +67,9 @@ public class EmbeddingSequenceLayer extends BaseLayer [minibatch, nOut, seqLen] i.e., NWC -> NCW } return workspaceMgr.leverageTo(ArrayType.ACTIVATIONS, ret); @@ -175,7 +175,7 @@ public class EmbeddingSequenceLayer extends BaseLayer getListeners() { - return listeners; - } @Override - public void setListeners(TrainingListener... listeners) { + public void addTrainingListeners(TrainingListener... listeners) { this.listeners = new ArrayList<>(Arrays.asList(listeners)); } @@ -622,7 +619,7 @@ public class BatchNormalization extends BaseLayer backpropGradient(INDArray epsilon, LayerWorkspaceMgr workspaceMgr) { assertInputSet(true); - double k = layerConf().getK(); - double n = layerConf().getN(); - double alpha = layerConf().getAlpha(); - double beta = layerConf().getBeta(); + double k = getTypedLayerConfiguration().getK(); + double n = getTypedLayerConfiguration().getN(); + double alpha = getTypedLayerConfiguration().getAlpha(); + double beta = getTypedLayerConfiguration().getBeta(); int halfN = (int) n / 2; - if (helper != null && (helperCountFail == 0 || !layerConf().isCudnnAllowFallback())){ + if (helper != null && (helperCountFail == 0 || !getTypedLayerConfiguration().isCudnnAllowFallback())){ Pair ret = null; try { ret = helper.backpropGradient(input, epsilon, k, n, alpha, beta, workspaceMgr); @@ -118,7 +119,7 @@ public class LocalResponseNormalization //This is a memory exception - don't fallback to built-in implementation throw t; } - if(layerConf().isCudnnAllowFallback()){ + if(getTypedLayerConfiguration().isCudnnAllowFallback()){ helperCountFail++; log.warn("CuDNN LocalResponseNormalization backprop execution failed - falling back on built-in implementation",t); } else { @@ -130,7 +131,7 @@ public class LocalResponseNormalization } } - boolean nchw = layerConf().getDataFormat() == CNN2DFormat.NCHW; + boolean nchw = getTypedLayerConfiguration().getDataFormat() == CNN2DFormat.NCHW; int chDim = nchw ? 1 : 3; int hDim = nchw ? 2 : 1; int wDim = nchw ? 3 : 2; @@ -183,13 +184,13 @@ public class LocalResponseNormalization private Triple activateHelper(boolean training, LayerWorkspaceMgr workspaceMgr, boolean forBackprop){ assertInputSet(false); - double k = layerConf().getK(); - double n = layerConf().getN(); - double alpha = layerConf().getAlpha(); - double beta = layerConf().getBeta(); + double k = getTypedLayerConfiguration().getK(); + double n = getTypedLayerConfiguration().getN(); + double alpha = getTypedLayerConfiguration().getAlpha(); + double beta = getTypedLayerConfiguration().getBeta(); int halfN = (int) n / 2; - if (helper != null && (helperCountFail == 0 || !layerConf().isCudnnAllowFallback())){ + if (helper != null && (helperCountFail == 0 || !getTypedLayerConfiguration().isCudnnAllowFallback())){ INDArray activations = null; try { activations = helper.activate(input, training, k, n, alpha, beta, workspaceMgr); @@ -201,7 +202,7 @@ public class LocalResponseNormalization throw t; } - if(layerConf().isCudnnAllowFallback()){ + if(getTypedLayerConfiguration().isCudnnAllowFallback()){ helperCountFail++; log.warn("CuDNN LocalResponseNormalization backprop execution failed - falling back on built-in implementation",t); } else { @@ -213,7 +214,7 @@ public class LocalResponseNormalization } } - boolean nchw = layerConf().getDataFormat() == CNN2DFormat.NCHW; + boolean nchw = getTypedLayerConfiguration().getDataFormat() == CNN2DFormat.NCHW; int chDim = nchw ? 1 : 3; val channel = input.size(chDim); @@ -285,13 +286,13 @@ public class LocalResponseNormalization } @Override - public INDArray params() { + public INDArray getModelParams() { return null; } @Override public INDArray getParam(String param) { - return params(); + return getModelParams(); } @Override diff --git a/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/nn/layers/objdetect/Yolo2OutputLayer.java b/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/nn/layers/objdetect/Yolo2OutputLayer.java index 69016dca4..49a61f496 100644 --- a/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/nn/layers/objdetect/Yolo2OutputLayer.java +++ b/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/nn/layers/objdetect/Yolo2OutputLayer.java @@ -24,7 +24,7 @@ import lombok.*; import org.deeplearning4j.nn.api.Layer; import org.deeplearning4j.nn.api.layers.IOutputLayer; import org.deeplearning4j.nn.conf.CNN2DFormat; -import org.deeplearning4j.nn.conf.NeuralNetConfiguration; +import org.deeplearning4j.nn.conf.layers.LayerConfiguration; import org.deeplearning4j.nn.gradient.DefaultGradient; import org.deeplearning4j.nn.gradient.Gradient; import org.deeplearning4j.nn.layers.AbstractLayer; @@ -68,7 +68,7 @@ public class Yolo2OutputLayer extends AbstractLayer Predicted WH in grid units (0 to 13 usually) INDArray predictedWHPreExp = input5.get(all(), all(), interval(2,4), all(), all()); INDArray predictedWH = Transforms.exp(predictedWHPreExp, true); - Broadcast.mul(predictedWH, layerConf().getBoundingBoxes().castTo(predictedWH.dataType()), predictedWH, 1, 2); //Box priors: [b, 2]; predictedWH: [mb, b, 2, h, w] + Broadcast.mul(predictedWH, getTypedLayerConfiguration().getBoundingBoxes().castTo(predictedWH.dataType()), predictedWH, 1, 2); //Box priors: [b, 2]; predictedWH: [mb, b, 2, h, w] //Apply sqrt to W/H in preparation for loss function INDArray predictedWHSqrt = Transforms.sqrt(predictedWH, true); @@ -235,11 +235,11 @@ public class Yolo2OutputLayer extends AbstractLayer gradientAndScore() { - return new Pair<>(gradient(), score()); + return new Pair<>(gradient(), this.getScore()); } @Override @@ -616,7 +616,7 @@ public class Yolo2OutputLayer extends AbstractLayer getPredictedObjects(INDArray networkOutput, double threshold){ - return YoloUtils.getPredictedObjects(layerConf().getBoundingBoxes(), networkOutput, threshold, 0.0); + return YoloUtils.getPredictedObjects(getTypedLayerConfiguration().getBoundingBoxes(), networkOutput, threshold, 0.0); } /** @@ -650,7 +650,7 @@ public class Yolo2OutputLayer extends AbstractLayer pair = getGradientsAndDelta(preOutput2d(true, workspaceMgr), workspaceMgr); //Returns Gradient and delta^(this), not Gradient and epsilon^(this-1) //150 - long inputShape = (( org.deeplearning4j.nn.conf.ocnn.OCNNOutputLayer) this.getConf().getLayer()).getNIn(); + long inputShape = (( org.deeplearning4j.nn.conf.ocnn.OCNNOutputLayer) getLayerConfiguration()).getNIn(); INDArray delta = pair.getSecond(); //4 x 150 INDArray epsilonNext = workspaceMgr.createUninitialized(ArrayType.ACTIVATION_GRAD, input.dataType(), new long[]{inputShape, delta.length()}, 'f'); @@ -122,10 +122,10 @@ public class OCNNOutputLayer extends BaseOutputLayer getGradientsAndDelta(INDArray preOut, LayerWorkspaceMgr workspaceMgr) { - ILossFunction lossFunction = layerConf().getLossFn(); + ILossFunction lossFunction = getTypedLayerConfiguration().getLossFn(); INDArray labels2d = getLabels2d(workspaceMgr, ArrayType.BP_WORKING_MEM); - INDArray delta = lossFunction.computeGradient(labels2d, preOut, layerConf().getActivationFn(), maskArray); - org.deeplearning4j.nn.conf.ocnn.OCNNOutputLayer conf = ( org.deeplearning4j.nn.conf.ocnn.OCNNOutputLayer) conf().getLayer(); + INDArray delta = lossFunction.computeGradient(labels2d, preOut, getTypedLayerConfiguration().getActivationFn(), maskArray); + org.deeplearning4j.nn.conf.ocnn.OCNNOutputLayer conf = ( org.deeplearning4j.nn.conf.ocnn.OCNNOutputLayer) getLayerConfiguration(); if(conf.getLastEpochSinceRUpdated() == 0 && epochCount == 0) { @@ -164,20 +164,20 @@ public class OCNNOutputLayer extends BaseOutputLayer sigmoid derivative - INDArray firstVertDerivV = layerConf().getActivationFn() + INDArray firstVertDerivV = getTypedLayerConfiguration().getActivationFn() .backprop(xTimesV.dup(),Nd4j.ones(input.dataType(), xTimesV.shape())) .getFirst().muliRowVector(getParam(W_KEY).neg()); firstVertDerivV = firstVertDerivV.muliColumnVector(delta) - .reshape('f',input.size(0),1,layerConf().getHiddenSize()); + .reshape('f',input.size(0),1, getTypedLayerConfiguration().getHiddenSize()); INDArray secondTermDerivV = input.reshape('f', input.size(0),getParam(V_KEY).size(0),1); @@ -250,7 +250,7 @@ public class OCNNOutputLayer extends BaseOutputLayer paramKeys(Layer layer) { + public List paramKeys(LayerConfiguration layer) { return PARAM_KEYS; } @Override - public List weightKeys(Layer layer) { + public List weightKeys(LayerConfiguration layer) { return WEIGHT_KEYS; } @Override - public List biasKeys(Layer layer) { + public List biasKeys(LayerConfiguration layer) { return Collections.emptyList(); } @Override - public boolean isWeightParam(Layer layer, String key) { + public boolean isWeightParam(LayerConfiguration layer, String key) { return WEIGHT_KEYS.contains(key); } @Override - public boolean isBiasParam(Layer layer, String key) { + public boolean isBiasParam(LayerConfiguration layer, String key) { return false; } @Override - public Map init(NeuralNetConfiguration conf, INDArray paramsView, boolean initializeParams) { - org.deeplearning4j.nn.conf.ocnn.OCNNOutputLayer ocnnOutputLayer = ( org.deeplearning4j.nn.conf.ocnn.OCNNOutputLayer) conf.getLayer(); + public Map init(LayerConfiguration conf, INDArray paramsView, boolean initializeParams) { + org.deeplearning4j.nn.conf.ocnn.OCNNOutputLayer ocnnOutputLayer = ( org.deeplearning4j.nn.conf.ocnn.OCNNOutputLayer) conf; Map params = Collections.synchronizedMap(new LinkedHashMap()); val nIn = ocnnOutputLayer.getNIn(); int hiddenLayer = ocnnOutputLayer.getHiddenSize(); @@ -133,8 +126,8 @@ public class OCNNParamInitializer extends DefaultParamInitializer { } @Override - public Map getGradientsFromFlattened(NeuralNetConfiguration conf, INDArray gradientView) { - org.deeplearning4j.nn.conf.ocnn.OCNNOutputLayer ocnnOutputLayer = ( org.deeplearning4j.nn.conf.ocnn.OCNNOutputLayer) conf.getLayer(); + public Map getGradientsFromFlattened(LayerConfiguration conf, INDArray gradientView) { + org.deeplearning4j.nn.conf.ocnn.OCNNOutputLayer ocnnOutputLayer = ( org.deeplearning4j.nn.conf.ocnn.OCNNOutputLayer) conf; Map params = Collections.synchronizedMap(new LinkedHashMap()); val nIn = ocnnOutputLayer.getNIn(); val hiddenLayer = ocnnOutputLayer.getHiddenSize(); @@ -155,12 +148,12 @@ public class OCNNParamInitializer extends DefaultParamInitializer { } - protected INDArray createWeightMatrix(NeuralNetConfiguration configuration, + protected INDArray createWeightMatrix(LayerConfiguration configuration, INDArray weightParamView, boolean initializeParameters) { - org.deeplearning4j.nn.conf.ocnn.OCNNOutputLayer ocnnOutputLayer = ( org.deeplearning4j.nn.conf.ocnn.OCNNOutputLayer) configuration.getLayer(); - IWeightInit weightInit = ocnnOutputLayer.getWeightInitFn(); + org.deeplearning4j.nn.conf.ocnn.OCNNOutputLayer ocnnOutputLayer = ( org.deeplearning4j.nn.conf.ocnn.OCNNOutputLayer) configuration; + IWeightInit weightInit = ocnnOutputLayer.getWeightInit(); if (initializeParameters) { INDArray ret = weightInit.init(weightParamView.size(0), //Fan in weightParamView.size(1), //Fan out diff --git a/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/nn/layers/pooling/GlobalPoolingLayer.java b/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/nn/layers/pooling/GlobalPoolingLayer.java index e8b8ae9a3..5eee50a92 100644 --- a/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/nn/layers/pooling/GlobalPoolingLayer.java +++ b/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/nn/layers/pooling/GlobalPoolingLayer.java @@ -20,11 +20,12 @@ package org.deeplearning4j.nn.layers.pooling; +import java.util.Map; import lombok.val; import org.apache.commons.lang3.ArrayUtils; import org.deeplearning4j.nn.api.Layer; import org.deeplearning4j.nn.api.MaskState; -import org.deeplearning4j.nn.conf.NeuralNetConfiguration; +import org.deeplearning4j.nn.conf.layers.LayerConfiguration; import org.deeplearning4j.nn.conf.layers.PoolingType; import org.deeplearning4j.nn.gradient.DefaultGradient; import org.deeplearning4j.nn.gradient.Gradient; @@ -54,11 +55,11 @@ public class GlobalPoolingLayer extends AbstractLayer backpropGradient(INDArray epsilon, LayerWorkspaceMgr workspaceMgr) { assertInputSet(true); - if (!layerConf().isCollapseDimensions() && epsilon.rank() != 2) { + if (!getTypedLayerConfiguration().isCollapseDimensions() && epsilon.rank() != 2) { val origShape = epsilon.shape(); //Don't collapse dims case: error should be [minibatch, vectorSize, 1] or [minibatch, channels, 1, 1] //Reshape it to 2d, to get rid of the 1s @@ -291,7 +292,7 @@ public class GlobalPoolingLayer extends AbstractLayer paramTable) { + throw new RuntimeException("Not implemented."); + } } diff --git a/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/nn/layers/recurrent/BaseRecurrentLayer.java b/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/nn/layers/recurrent/BaseRecurrentLayer.java index 6b02b6c6e..0ce976283 100644 --- a/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/nn/layers/recurrent/BaseRecurrentLayer.java +++ b/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/nn/layers/recurrent/BaseRecurrentLayer.java @@ -21,11 +21,9 @@ package org.deeplearning4j.nn.layers.recurrent; import org.deeplearning4j.nn.api.layers.RecurrentLayer; -import org.deeplearning4j.nn.conf.NeuralNetConfiguration; import org.deeplearning4j.nn.conf.RNNFormat; -import org.deeplearning4j.nn.conf.inputs.InputType; +import org.deeplearning4j.nn.conf.layers.LayerConfiguration; import org.deeplearning4j.nn.layers.BaseLayer; -import org.deeplearning4j.nn.workspace.LayerWorkspaceMgr; import org.nd4j.linalg.api.buffer.DataType; import org.nd4j.linalg.api.ndarray.INDArray; @@ -50,7 +48,7 @@ public abstract class BaseRecurrentLayer gradientViews; @@ -65,14 +86,25 @@ public class BidirectionalLayer implements RecurrentLayer { private INDArray outFwd; private INDArray outBwd; - public BidirectionalLayer(@NonNull NeuralNetConfiguration conf, @NonNull Layer fwd, @NonNull Layer bwd, @NonNull INDArray paramsView) { - this.conf = conf; + public BidirectionalLayer(@NonNull LayerConfiguration conf, @NonNull Layer fwd, @NonNull Layer bwd, @NonNull INDArray paramsView) { + this.layerConfiguration = conf; + this.conf = conf.getNetConfiguration(); this.fwd = fwd; this.bwd = bwd; - this.layerConf = (Bidirectional) conf.getLayer(); + this.layerConf = (Bidirectional) layerConfiguration; this.paramsView = paramsView; } + /** + * Return the configuration of this layer + * + * @return the configuration + */ + @Override + public LayerConfiguration getLayerConfiguration() { + return layerConf; + } + private RNNFormat getRNNDataFormat(){ return layerConf.getRNNDataFormat(); } @@ -87,6 +119,12 @@ public class BidirectionalLayer implements RecurrentLayer { "no previous state is supported"); } + @Override + public ITraininableLayerConfiguration getTrainingConfig() { + return (ITraininableLayerConfiguration) layerConfiguration; + } + + @Override public void rnnSetPreviousState(Map stateMap) { throw new UnsupportedOperationException("Not supported: cannot RnnTimeStep bidirectional layers therefore " + @@ -234,20 +272,14 @@ public class BidirectionalLayer implements RecurrentLayer { } @Override - public Collection getListeners() { - return fwd.getListeners(); + public Collection getTrainingListeners() { + return fwd.getTrainingListeners(); } @Override - public void setListeners(TrainingListener... listeners) { - fwd.setListeners(listeners); - bwd.setListeners(listeners); - } - - @Override - public void addListeners(TrainingListener... listener) { - fwd.addListeners(listener); - bwd.addListeners(listener); + public void addTrainingListeners(TrainingListener... listeners) { + fwd.addTrainingListeners(listeners); + bwd.addTrainingListeners(listeners); } @Override @@ -266,8 +298,8 @@ public class BidirectionalLayer implements RecurrentLayer { } @Override - public double score() { - return fwd.score() + bwd.score(); + public double getScore() { + return fwd.getScore() + bwd.getScore(); } @Override @@ -277,14 +309,10 @@ public class BidirectionalLayer implements RecurrentLayer { } @Override - public INDArray params() { + public INDArray getModelParams() { return paramsView; } - @Override - public TrainingConfig getConfig() { - return conf.getLayer(); - } @Override public long numParams() { @@ -349,13 +377,23 @@ public class BidirectionalLayer implements RecurrentLayer { } @Override - public NeuralNetConfiguration conf() { + public NeuralNetConfiguration getNetConfiguration() { return conf; } + /** + * @param netConfiguration + */ @Override - public void setConf(NeuralNetConfiguration conf) { - this.conf = conf; + public void setNetConfiguration(@NonNull NeuralNetConfiguration netConfiguration) { + + } + + + public void setLayerConfiguration(LayerConfiguration layerConfiguration) { + this.layerConfiguration = layerConfiguration; + this.layerConf = (Bidirectional) layerConfiguration; + this.conf = layerConfiguration.getNetConfiguration(); } @Override @@ -363,11 +401,86 @@ public class BidirectionalLayer implements RecurrentLayer { return input; } + /** + * This method returns updater state (if applicable), null otherwise + * + * @return + */ + @Override + public INDArray updaterState() { + return null; + } + @Override public ConvexOptimizer getOptimizer() { return null; } + /** + * This method fits model with a given DataSet + * + * @param dataSet + */ + @Override + public void fit(DataSet dataSet) { + + } + + /** + * This method fits model with a given MultiDataSet + * + * @param dataSet + */ + @Override + public void fit(MultiDataSet dataSet) { + + } + + /** + * This method fits model with a given DataSetIterator + * + * @param iterator + */ + @Override + public void fit(DataSetIterator iterator) { + + } + + /** + * This method fits model with a given MultiDataSetIterator + * + * @param iterator + */ + @Override + public void fit(MultiDataSetIterator iterator) { + + } + + /** + * This method executes evaluation of the model against given iterator and evaluation + * implementations + * + * @param iterator + * @param evaluations + */ + @Override + public T[] doEvaluation(DataSetIterator iterator, T... evaluations) { + return null; + } + + /** + * This method executes evaluation of the model against given iterator and evaluation + * implementations + * + * @param iterator + * @param evaluations + */ + @Override + public T[] doEvaluation(MultiDataSetIterator iterator, + T... evaluations) { + return null; + } + @Override public INDArray getParam(String param) { String sub = param.substring(1); @@ -379,17 +492,17 @@ public class BidirectionalLayer implements RecurrentLayer { } @Override - public Map paramTable() { - return paramTable(false); + public Map getParamTable() { + return getParamTable(false); } @Override - public Map paramTable(boolean backpropParamsOnly) { + public Map getParamTable(boolean backpropParamsOnly) { Map m = new LinkedHashMap<>(); - for(Map.Entry e : fwd.paramTable(backpropParamsOnly).entrySet()){ + for(Map.Entry e : fwd.getParamTable(backpropParamsOnly).entrySet()){ m.put(BidirectionalParamInitializer.FORWARD_PREFIX + e.getKey(), e.getValue()); } - for(Map.Entry e : bwd.paramTable(backpropParamsOnly).entrySet()){ + for(Map.Entry e : bwd.getParamTable(backpropParamsOnly).entrySet()){ m.put(BidirectionalParamInitializer.BACKWARD_PREFIX + e.getKey(), e.getValue()); } return m; @@ -442,10 +555,9 @@ public class BidirectionalLayer implements RecurrentLayer { //No op } - @Override - public void setListeners(Collection listeners) { - fwd.setListeners(listeners); - bwd.setListeners(listeners); + public void addTrainingListeners(Collection listeners) { + fwd.addTrainingListeners(listeners.toArray(new TrainingListener[]{})); + bwd.addTrainingListeners(listeners.toArray(new TrainingListener[]{})); } @Override @@ -603,4 +715,11 @@ public class BidirectionalLayer implements RecurrentLayer { public void close(){ //No-op for individual layers } + /** + * @return 1d parameter vector + */ + @Override + public INDArray getParams() { + throw new RuntimeException("Not implemented."); + } } diff --git a/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/nn/layers/recurrent/GravesBidirectionalLSTM.java b/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/nn/layers/recurrent/GravesBidirectionalLSTM.java index 99a2081dc..595dd0e2c 100644 --- a/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/nn/layers/recurrent/GravesBidirectionalLSTM.java +++ b/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/nn/layers/recurrent/GravesBidirectionalLSTM.java @@ -23,7 +23,7 @@ package org.deeplearning4j.nn.layers.recurrent; import lombok.extern.slf4j.Slf4j; import org.deeplearning4j.nn.api.MaskState; import org.deeplearning4j.nn.conf.CacheMode; -import org.deeplearning4j.nn.conf.NeuralNetConfiguration; +import org.deeplearning4j.nn.conf.layers.LayerConfiguration; import org.deeplearning4j.nn.gradient.DefaultGradient; import org.deeplearning4j.nn.gradient.Gradient; import org.deeplearning4j.nn.params.GravesBidirectionalLSTMParamInitializer; @@ -41,7 +41,7 @@ public class GravesBidirectionalLSTM protected FwdPassReturn cachedPassForward; protected FwdPassReturn cachedPassBackward; - public GravesBidirectionalLSTM(NeuralNetConfiguration conf, DataType dataType) { + public GravesBidirectionalLSTM(LayerConfiguration conf, DataType dataType) { super(conf, dataType); } @@ -74,30 +74,30 @@ public class GravesBidirectionalLSTM final FwdPassReturn fwdPass = activateHelperDirectional(true, null, null, true, true, workspaceMgr); fwdPass.fwdPassOutput = permuteIfNWC(fwdPass.fwdPassOutput); final Pair forwardsGradient = LSTMHelpers.backpropGradientHelper(this, - this.conf, - this.layerConf().getGateActivationFn(), permuteIfNWC(this.input), + this.layerConfiguration.getNetConfiguration(), + this.getTypedLayerConfiguration().getGateActivationFn(), permuteIfNWC(this.input), getParam(GravesBidirectionalLSTMParamInitializer.RECURRENT_WEIGHT_KEY_FORWARDS), getParam(GravesBidirectionalLSTMParamInitializer.INPUT_WEIGHT_KEY_FORWARDS), permuteIfNWC(epsilon), truncatedBPTT, tbpttBackwardLength, fwdPass, true, GravesBidirectionalLSTMParamInitializer.INPUT_WEIGHT_KEY_FORWARDS, GravesBidirectionalLSTMParamInitializer.RECURRENT_WEIGHT_KEY_FORWARDS, GravesBidirectionalLSTMParamInitializer.BIAS_KEY_FORWARDS, gradientViews, maskArray, true, - null, workspaceMgr, layerConf().isHelperAllowFallback()); + null, workspaceMgr, getTypedLayerConfiguration().isHelperAllowFallback()); final FwdPassReturn backPass = activateHelperDirectional(true, null, null, true, false, workspaceMgr); final Pair backwardsGradient = LSTMHelpers.backpropGradientHelper(this, - this.conf, - this.layerConf().getGateActivationFn(), permuteIfNWC(this.input), + this.layerConfiguration.getNetConfiguration(), + this.getTypedLayerConfiguration().getGateActivationFn(), permuteIfNWC(this.input), getParam(GravesBidirectionalLSTMParamInitializer.RECURRENT_WEIGHT_KEY_BACKWARDS), getParam(GravesBidirectionalLSTMParamInitializer.INPUT_WEIGHT_KEY_BACKWARDS), permuteIfNWC(epsilon), truncatedBPTT, tbpttBackwardLength, backPass, false, GravesBidirectionalLSTMParamInitializer.INPUT_WEIGHT_KEY_BACKWARDS, GravesBidirectionalLSTMParamInitializer.RECURRENT_WEIGHT_KEY_BACKWARDS, GravesBidirectionalLSTMParamInitializer.BIAS_KEY_BACKWARDS, gradientViews, maskArray, true, - null, workspaceMgr, layerConf().isHelperAllowFallback()); + null, workspaceMgr, getTypedLayerConfiguration().isHelperAllowFallback()); forwardsGradient.setSecond(permuteIfNWC(forwardsGradient.getSecond())); backwardsGradient.setSecond(permuteIfNWC(backwardsGradient.getSecond())); @@ -117,7 +117,7 @@ public class GravesBidirectionalLSTM final Gradient correctOrderedGradient = new DefaultGradient(); - for (final String key : params.keySet()) { + for (final String key : getParamTable().keySet()) { correctOrderedGradient.setGradientFor(key, combinedGradient.getGradientFor(key)); } @@ -155,22 +155,22 @@ public class GravesBidirectionalLSTM cachedPassForward = null; } else { - forwardsEval = LSTMHelpers.activateHelper(this, this.conf, this.layerConf().getGateActivationFn(), + forwardsEval = LSTMHelpers.activateHelper(this, this.layerConfiguration.getNetConfiguration(), this.getTypedLayerConfiguration().getGateActivationFn(), permuteIfNWC(this.input), getParam(GravesBidirectionalLSTMParamInitializer.RECURRENT_WEIGHT_KEY_FORWARDS), getParam(GravesBidirectionalLSTMParamInitializer.INPUT_WEIGHT_KEY_FORWARDS), getParam(GravesBidirectionalLSTMParamInitializer.BIAS_KEY_FORWARDS), training, null, null, forBackprop || (cacheMode != CacheMode.NONE && training), true, GravesBidirectionalLSTMParamInitializer.INPUT_WEIGHT_KEY_FORWARDS, maskArray, true, null, - forBackprop ? cacheMode : CacheMode.NONE, workspaceMgr, layerConf().isHelperAllowFallback()); + forBackprop ? cacheMode : CacheMode.NONE, workspaceMgr, getTypedLayerConfiguration().isHelperAllowFallback()); - backwardsEval = LSTMHelpers.activateHelper(this, this.conf, this.layerConf().getGateActivationFn(), + backwardsEval = LSTMHelpers.activateHelper(this, this.layerConfiguration.getNetConfiguration(), this.getTypedLayerConfiguration().getGateActivationFn(), permuteIfNWC(this.input), getParam(GravesBidirectionalLSTMParamInitializer.RECURRENT_WEIGHT_KEY_BACKWARDS), getParam(GravesBidirectionalLSTMParamInitializer.INPUT_WEIGHT_KEY_BACKWARDS), getParam(GravesBidirectionalLSTMParamInitializer.BIAS_KEY_BACKWARDS), training, null, null, forBackprop || (cacheMode != CacheMode.NONE && training), false, GravesBidirectionalLSTMParamInitializer.INPUT_WEIGHT_KEY_BACKWARDS, maskArray, true, null, - forBackprop ? cacheMode : CacheMode.NONE, workspaceMgr, layerConf().isHelperAllowFallback()); + forBackprop ? cacheMode : CacheMode.NONE, workspaceMgr, getTypedLayerConfiguration().isHelperAllowFallback()); forwardsEval.fwdPassOutput = permuteIfNWC(forwardsEval.fwdPassOutput); backwardsEval.fwdPassOutput = permuteIfNWC(backwardsEval.fwdPassOutput); @@ -215,10 +215,10 @@ public class GravesBidirectionalLSTM biasKey = GravesBidirectionalLSTMParamInitializer.BIAS_KEY_BACKWARDS; } - FwdPassReturn ret = LSTMHelpers.activateHelper(this, this.conf, this.layerConf().getGateActivationFn(), permuteIfNWC(this.input), + FwdPassReturn ret = LSTMHelpers.activateHelper(this, this.layerConfiguration.getNetConfiguration(), this.getTypedLayerConfiguration().getGateActivationFn(), permuteIfNWC(this.input), getParam(recurrentKey), getParam(inputKey), getParam(biasKey), training, prevOutputActivations, prevMemCellState, forBackprop, forwards, inputKey, maskArray, true, - null, forBackprop ? cacheMode : CacheMode.NONE, workspaceMgr, layerConf().isHelperAllowFallback()); + null, forBackprop ? cacheMode : CacheMode.NONE, workspaceMgr, getTypedLayerConfiguration().isHelperAllowFallback()); ret.fwdPassOutput = permuteIfNWC(ret.fwdPassOutput); return ret; } diff --git a/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/nn/layers/recurrent/GravesLSTM.java b/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/nn/layers/recurrent/GravesLSTM.java index 1e37cfe32..6626e927e 100644 --- a/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/nn/layers/recurrent/GravesLSTM.java +++ b/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/nn/layers/recurrent/GravesLSTM.java @@ -23,7 +23,7 @@ package org.deeplearning4j.nn.layers.recurrent; import lombok.extern.slf4j.Slf4j; import org.deeplearning4j.nn.api.MaskState; import org.deeplearning4j.nn.conf.CacheMode; -import org.deeplearning4j.nn.conf.NeuralNetConfiguration; +import org.deeplearning4j.nn.conf.layers.LayerConfiguration; import org.deeplearning4j.nn.gradient.Gradient; import org.deeplearning4j.nn.params.GravesLSTMParamInitializer; import org.nd4j.common.base.Preconditions; @@ -40,7 +40,7 @@ public class GravesLSTM extends BaseRecurrentLayer p = LSTMHelpers.backpropGradientHelper(this, - this.conf, this.layerConf().getGateActivationFn(), permuteIfNWC(this.input), + this.layerConfiguration.getNetConfiguration(), this.getTypedLayerConfiguration().getGateActivationFn(), permuteIfNWC(this.input), recurrentWeights, inputWeights, permuteIfNWC(epsilon), truncatedBPTT, tbpttBackwardLength, fwdPass, true, GravesLSTMParamInitializer.INPUT_WEIGHT_KEY, GravesLSTMParamInitializer.RECURRENT_WEIGHT_KEY, GravesLSTMParamInitializer.BIAS_KEY, gradientViews, maskArray, true, null, - workspaceMgr, layerConf().isHelperAllowFallback()); + workspaceMgr, getTypedLayerConfiguration().isHelperAllowFallback()); weightNoiseParams.clear(); p.setSecond(permuteIfNWC(backpropDropOutIfPresent(p.getSecond()))); @@ -128,11 +128,11 @@ public class GravesLSTM extends BaseRecurrentLayer { @@ -45,7 +44,7 @@ public class LSTM extends BaseRecurrentLayer p = LSTMHelpers.backpropGradientHelper(this, - this.conf, this.layerConf().getGateActivationFn(), permuteIfNWC(this.input), + getNetConfiguration(), this.getTypedLayerConfiguration().getGateActivationFn(), permuteIfNWC(this.input), recurrentWeights, inputWeights, permuteIfNWC(epsilon), truncatedBPTT, tbpttBackwardLength, fwdPass, true, LSTMParamInitializer.INPUT_WEIGHT_KEY, LSTMParamInitializer.RECURRENT_WEIGHT_KEY, LSTMParamInitializer.BIAS_KEY, gradientViews, null, false, helper, workspaceMgr, - layerConf().isHelperAllowFallback()); + getTypedLayerConfiguration().isHelperAllowFallback()); weightNoiseParams.clear(); p.setSecond(permuteIfNWC(backpropDropOutIfPresent(p.getSecond()))); @@ -140,7 +139,7 @@ public class LSTM extends BaseRecurrentLayer= endIdx; iTimeIndex--) { diff --git a/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/nn/layers/recurrent/LastTimeStepLayer.java b/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/nn/layers/recurrent/LastTimeStepLayer.java index 4656ce9d1..da5f0b782 100644 --- a/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/nn/layers/recurrent/LastTimeStepLayer.java +++ b/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/nn/layers/recurrent/LastTimeStepLayer.java @@ -57,7 +57,7 @@ public class LastTimeStepLayer extends BaseWrapperLayer { public Pair backpropGradient(INDArray epsilon, LayerWorkspaceMgr workspaceMgr) { long[] newEpsShape = origOutputShape; - boolean nwc = TimeSeriesUtils.getFormatFromRnnLayer(underlying.conf().getLayer()) == RNNFormat.NWC; + boolean nwc = TimeSeriesUtils.getFormatFromRnnLayer(underlying.getLayerConfiguration()) == RNNFormat.NWC; INDArray newEps = Nd4j.create(epsilon.dataType(), newEpsShape, 'f'); if(lastTimeStepIdxs == null){ //no mask case @@ -119,7 +119,7 @@ public class LastTimeStepLayer extends BaseWrapperLayer { "rank " + in.rank() + " with shape " + Arrays.toString(in.shape())); } origOutputShape = in.shape(); - boolean nwc = TimeSeriesUtils.getFormatFromRnnLayer(underlying.conf().getLayer()) == RNNFormat.NWC; + boolean nwc = TimeSeriesUtils.getFormatFromRnnLayer(underlying.getLayerConfiguration()) == RNNFormat.NWC; // underlying instanceof BaseRecurrentLayer && ((BaseRecurrentLayer)underlying).getDataFormat() == RNNFormat.NWC)|| // underlying instanceof MaskZeroLayer && ((MaskZeroLayer)underlying).getUnderlying() instanceof BaseRecurrentLayer && // ((BaseRecurrentLayer)((MaskZeroLayer)underlying).getUnderlying()).getDataFormat() == RNNFormat.NWC; diff --git a/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/nn/layers/recurrent/RnnLossLayer.java b/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/nn/layers/recurrent/RnnLossLayer.java index fb2117b9b..e734212e8 100644 --- a/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/nn/layers/recurrent/RnnLossLayer.java +++ b/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/nn/layers/recurrent/RnnLossLayer.java @@ -25,8 +25,8 @@ import lombok.Setter; import org.deeplearning4j.eval.Evaluation; import org.deeplearning4j.nn.api.MaskState; import org.deeplearning4j.nn.api.layers.IOutputLayer; -import org.deeplearning4j.nn.conf.NeuralNetConfiguration; import org.deeplearning4j.nn.conf.RNNFormat; +import org.deeplearning4j.nn.conf.layers.LayerConfiguration; import org.deeplearning4j.nn.gradient.DefaultGradient; import org.deeplearning4j.nn.gradient.Gradient; import org.deeplearning4j.nn.layers.BaseLayer; @@ -47,7 +47,7 @@ import java.util.List; public class RnnLossLayer extends BaseLayer implements IOutputLayer { @Setter @Getter protected INDArray labels; - public RnnLossLayer(NeuralNetConfiguration conf, DataType dataType) { + public RnnLossLayer(LayerConfiguration conf, DataType dataType) { super(conf, dataType); } @@ -63,7 +63,7 @@ public class RnnLossLayer extends BaseLayer { - public RnnOutputLayer(NeuralNetConfiguration conf, DataType dataType) { + public RnnOutputLayer(LayerConfiguration conf, DataType dataType) { super(conf, dataType); } @@ -53,7 +53,7 @@ public class RnnOutputLayer extends BaseOutputLayer paramTable(boolean backpropOnly) { + public Map getParamTable(boolean backpropOnly) { return paramTable; } @Override - public TrainingConfig getConfig() { + public ITraininableLayerConfiguration getTrainingConfig() { return config; } @Override - public INDArray params() { + public INDArray getParams() { return params; } diff --git a/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/nn/layers/samediff/SameDiffLayer.java b/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/nn/layers/samediff/SameDiffLayer.java index 021c1a5aa..7ee7cd33f 100644 --- a/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/nn/layers/samediff/SameDiffLayer.java +++ b/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/nn/layers/samediff/SameDiffLayer.java @@ -23,7 +23,7 @@ package org.deeplearning4j.nn.layers.samediff; import lombok.val; import org.deeplearning4j.nn.api.Layer; import org.deeplearning4j.nn.api.MaskState; -import org.deeplearning4j.nn.conf.NeuralNetConfiguration; +import org.deeplearning4j.nn.conf.layers.LayerConfiguration; import org.deeplearning4j.nn.conf.layers.samediff.AbstractSameDiffLayer; import org.deeplearning4j.nn.gradient.DefaultGradient; import org.deeplearning4j.nn.gradient.Gradient; @@ -64,7 +64,7 @@ public class SameDiffLayer extends AbstractLayer { protected Map gradTable; - public SameDiffLayer(NeuralNetConfiguration conf, DataType dataType){ + public SameDiffLayer(LayerConfiguration conf, DataType dataType){ super(conf, dataType); } @@ -95,7 +95,7 @@ public class SameDiffLayer extends AbstractLayer { } } - org.deeplearning4j.nn.conf.layers.samediff.SameDiffLayer bl = (org.deeplearning4j.nn.conf.layers.samediff.SameDiffLayer) layerConf(); + org.deeplearning4j.nn.conf.layers.samediff.SameDiffLayer bl = (org.deeplearning4j.nn.conf.layers.samediff.SameDiffLayer) getTypedLayerConfiguration(); bl.validateInput(input); Map phMap = new HashMap<>(); @@ -103,7 +103,7 @@ public class SameDiffLayer extends AbstractLayer { if(maskArray != null){ phMap.put(MASK_KEY, maskArray); } else { - phMap.put(MASK_KEY, layerConf().onesMaskForInput(input)); + phMap.put(MASK_KEY, getTypedLayerConfiguration().onesMaskForInput(input)); } //Configure memory management for SameDiff instance - use DL4J workspaces @@ -175,7 +175,7 @@ public class SameDiffLayer extends AbstractLayer { sessionMap.get(Thread.currentThread().getId()).setMmgr(mmgr); - org.deeplearning4j.nn.conf.layers.samediff.SameDiffLayer bl = (org.deeplearning4j.nn.conf.layers.samediff.SameDiffLayer) layerConf(); + org.deeplearning4j.nn.conf.layers.samediff.SameDiffLayer bl = (org.deeplearning4j.nn.conf.layers.samediff.SameDiffLayer) getTypedLayerConfiguration(); bl.validateInput(input); Map phMap = new HashMap<>(); @@ -184,7 +184,7 @@ public class SameDiffLayer extends AbstractLayer { if(maskArray != null){ phMap.put(MASK_KEY, maskArray); } else { - phMap.put(MASK_KEY, layerConf().onesMaskForInput(input)); + phMap.put(MASK_KEY, getTypedLayerConfiguration().onesMaskForInput(input)); } List requiredGrads = new ArrayList<>(paramTable.size() + 1); @@ -214,7 +214,7 @@ public class SameDiffLayer extends AbstractLayer { * @return the parameters of the neural network */ @Override - public INDArray params() { + public INDArray getModelParams() { return params; } @@ -271,7 +271,7 @@ public class SameDiffLayer extends AbstractLayer { @Override public void setBackpropGradientsViewArray(INDArray gradients) { this.gradients = gradients; - this.gradTable = layerConf().initializer().getGradientsFromFlattened(conf(), gradients); + this.gradTable = getTypedLayerConfiguration().initializer().getGradientsFromFlattened(this.getLayerConfiguration(), gradients); } @Override @@ -286,27 +286,27 @@ public class SameDiffLayer extends AbstractLayer { } @Override - public Map paramTable() { - return paramTable(false); + public Map getParamTable() { + return getParamTable(false); } @Override - public Map paramTable(boolean backpropParamsOnly) { + public Map getParamTable(boolean backpropParamsOnly) { return paramTable; } protected void doInit(){ try(MemoryWorkspace ws = Nd4j.getWorkspaceManager().scopeOutOfWorkspaces()) { - org.deeplearning4j.nn.conf.layers.samediff.SameDiffLayer bl = (org.deeplearning4j.nn.conf.layers.samediff.SameDiffLayer) layerConf(); + org.deeplearning4j.nn.conf.layers.samediff.SameDiffLayer bl = (org.deeplearning4j.nn.conf.layers.samediff.SameDiffLayer) getTypedLayerConfiguration(); sameDiff = SameDiff.create(); //Use SingleThreadArrayHolder so we can use views (also don't nede multithreading here, DL4J is not thread safe) sameDiff.setArrayHolders(new SingleThreadArrayHolder(), new SingleThreadArrayHolder(), false); - Map p = paramTable(); + Map p = getParamTable(); long[] inputShape = input.shape().clone(); inputShape[0] = -1; SDVariable inputVar = sameDiff.placeHolder(INPUT_KEY, dataType, inputShape); - Map paramShapes = layerConf().getLayerParams().getParamShapes(); + Map paramShapes = getTypedLayerConfiguration().getLayerParams().getParamShapes(); Map params = new LinkedHashMap<>(); for (String s : paramShapes.keySet()) { val ps = paramShapes.get(s); @@ -335,7 +335,7 @@ public class SameDiffLayer extends AbstractLayer { @Override public Pair feedForwardMaskArray(INDArray maskArray, MaskState currentMaskState, int minibatchSize) { - org.deeplearning4j.nn.conf.layers.samediff.SameDiffLayer bl = (org.deeplearning4j.nn.conf.layers.samediff.SameDiffLayer) layerConf(); + org.deeplearning4j.nn.conf.layers.samediff.SameDiffLayer bl = (org.deeplearning4j.nn.conf.layers.samediff.SameDiffLayer) getTypedLayerConfiguration(); this.maskArray = maskArray; this.maskState = currentMaskState; diff --git a/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/nn/layers/samediff/SameDiffOutputLayer.java b/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/nn/layers/samediff/SameDiffOutputLayer.java index 67cf9e648..60d4d4c7d 100644 --- a/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/nn/layers/samediff/SameDiffOutputLayer.java +++ b/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/nn/layers/samediff/SameDiffOutputLayer.java @@ -25,7 +25,7 @@ import lombok.Setter; import lombok.val; import org.deeplearning4j.nn.api.Layer; import org.deeplearning4j.nn.api.layers.IOutputLayer; -import org.deeplearning4j.nn.conf.NeuralNetConfiguration; +import org.deeplearning4j.nn.conf.layers.LayerConfiguration; import org.deeplearning4j.nn.gradient.DefaultGradient; import org.deeplearning4j.nn.gradient.Gradient; import org.deeplearning4j.nn.layers.AbstractLayer; @@ -67,7 +67,7 @@ public class SameDiffOutputLayer extends AbstractLayer gradTable; - public SameDiffOutputLayer(NeuralNetConfiguration conf, DataType dataType){ + public SameDiffOutputLayer(LayerConfiguration conf, DataType dataType){ super(conf, dataType); } @@ -96,7 +96,7 @@ public class SameDiffOutputLayer extends AbstractLayer phMap = new HashMap<>(); phMap.put(INPUT_KEY, input); - if(!activations && layerConf().labelsRequired() && labels != null) { + if(!activations && getTypedLayerConfiguration().labelsRequired() && labels != null) { phMap.put(LABELS_KEY, labels); } - String s = activations ? layerConf().activationsVertexName() : outputVar.name(); + String s = activations ? getTypedLayerConfiguration().activationsVertexName() : outputVar.name(); INDArray out = sameDiff.outputSingle(phMap, s); @@ -152,7 +152,7 @@ public class SameDiffOutputLayer extends AbstractLayer backpropGradient(INDArray epsilon, LayerWorkspaceMgr workspaceMgr) { assertInputSet(true); - Preconditions.checkState(!layerConf().labelsRequired() || labels != null, "Cannot execute backprop: Labels are not set. " + + Preconditions.checkState(!getTypedLayerConfiguration().labelsRequired() || labels != null, "Cannot execute backprop: Labels are not set. " + "If labels are not required for this SameDiff output layer, override SameDiffOutputLayer.labelsRequired()" + " to return false instead"); Gradient g = new DefaultGradient(); @@ -227,7 +227,7 @@ public class SameDiffOutputLayer extends AbstractLayer paramTable() { - return paramTable(false); + public Map getParamTable() { + return getParamTable(false); } @Override - public Map paramTable(boolean backpropParamsOnly) { + public Map getParamTable(boolean backpropParamsOnly) { return paramTable; } protected void doInit(){ try(MemoryWorkspace ws = Nd4j.getWorkspaceManager().scopeOutOfWorkspaces()) { - org.deeplearning4j.nn.conf.layers.samediff.SameDiffOutputLayer bl = layerConf(); + org.deeplearning4j.nn.conf.layers.samediff.SameDiffOutputLayer bl = getTypedLayerConfiguration(); sameDiff = SameDiff.create(); //Use SingleThreadArrayHolder so we can use views (also don't nede multithreading here, DL4J is not thread safe) sameDiff.setArrayHolders(new SingleThreadArrayHolder(), new SingleThreadArrayHolder(), false); - Map p = paramTable(); + Map p = getParamTable(); long[] inputShape = input.shape().clone(); inputShape[0] = -1; SDVariable inputVar = sameDiff.placeHolder(INPUT_KEY, dataType, inputShape); SDVariable labelVar = null; - if(layerConf().labelsRequired()){ + if(getTypedLayerConfiguration().labelsRequired()){ long[] labelShape = labels == null ? new long[]{-1, -1} : labels.shape().clone(); labelShape[0] = -1; labelVar = sameDiff.placeHolder(LABELS_KEY, dataType, labelShape); } - Map paramShapes = layerConf().getLayerParams().getParamShapes(); + Map paramShapes = getTypedLayerConfiguration().getLayerParams().getParamShapes(); Map params = new LinkedHashMap<>(); for (String s : paramShapes.keySet()) { val ps = paramShapes.get(s); @@ -340,7 +340,7 @@ public class SameDiffOutputLayer extends AbstractLayer gradientAndScore() { - return new Pair<>(gradient(), score()); + return new Pair<>(gradient(), getScore()); } @Override @@ -145,7 +145,7 @@ public class CenterLossOutputLayer extends BaseOutputLayer getGradientsAndDelta(INDArray preOut, LayerWorkspaceMgr workspaceMgr) { - ILossFunction lossFunction = layerConf().getLossFn(); + ILossFunction lossFunction = getTypedLayerConfiguration().getLossFn(); INDArray labels2d = getLabels2d(workspaceMgr, ArrayType.BP_WORKING_MEM); if (labels2d.size(1) != preOut.size(1)) { throw new DL4JInvalidInputException( @@ -181,7 +181,7 @@ public class CenterLossOutputLayer extends BaseOutputLayer { private final Gradient emptyGradient = new DefaultGradient(); - public MaskLayer(NeuralNetConfiguration conf, DataType dataType) { + public MaskLayer(LayerConfiguration conf, DataType dataType) { super(conf, dataType); } diff --git a/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/nn/layers/variational/VariationalAutoencoder.java b/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/nn/layers/variational/VariationalAutoencoder.java index 75df1dfad..bf21a6dc8 100644 --- a/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/nn/layers/variational/VariationalAutoencoder.java +++ b/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/nn/layers/variational/VariationalAutoencoder.java @@ -20,13 +20,20 @@ package org.deeplearning4j.nn.layers.variational; +import static org.deeplearning4j.nn.params.VariationalAutoencoderParamInitializer.BIAS_KEY_SUFFIX; +import static org.deeplearning4j.nn.params.VariationalAutoencoderParamInitializer.WEIGHT_KEY_SUFFIX; + +import java.util.*; import lombok.*; +import net.brutex.ai.dnn.api.IModel; +import org.deeplearning4j.nn.api.ITraininableLayerConfiguration; import org.deeplearning4j.nn.api.Layer; import org.deeplearning4j.nn.api.MaskState; -import org.deeplearning4j.nn.api.TrainingConfig; import org.deeplearning4j.nn.api.layers.LayerConstraint; import org.deeplearning4j.nn.conf.CacheMode; import org.deeplearning4j.nn.conf.NeuralNetConfiguration; +import org.deeplearning4j.nn.conf.layers.BaseLayerConfiguration; +import org.deeplearning4j.nn.conf.layers.LayerConfiguration; import org.deeplearning4j.nn.conf.layers.variational.CompositeReconstructionDistribution; import org.deeplearning4j.nn.conf.layers.variational.LossFunctionWrapper; import org.deeplearning4j.nn.conf.layers.variational.ReconstructionDistribution; @@ -39,41 +46,37 @@ import org.deeplearning4j.nn.workspace.LayerWorkspaceMgr; import org.deeplearning4j.optimize.Solver; import org.deeplearning4j.optimize.api.ConvexOptimizer; import org.deeplearning4j.optimize.api.TrainingListener; +import org.nd4j.common.primitives.Pair; +import org.nd4j.evaluation.IEvaluation; import org.nd4j.linalg.activations.IActivation; import org.nd4j.linalg.activations.impl.ActivationIdentity; import org.nd4j.linalg.api.blas.Level1; import org.nd4j.linalg.api.buffer.DataType; import org.nd4j.linalg.api.memory.MemoryWorkspace; import org.nd4j.linalg.api.ndarray.INDArray; +import org.nd4j.linalg.dataset.api.DataSet; +import org.nd4j.linalg.dataset.api.MultiDataSet; +import org.nd4j.linalg.dataset.api.iterator.DataSetIterator; +import org.nd4j.linalg.dataset.api.iterator.MultiDataSetIterator; import org.nd4j.linalg.exception.ND4JArraySizeException; import org.nd4j.linalg.factory.Nd4j; import org.nd4j.linalg.learning.regularization.Regularization; import org.nd4j.linalg.lossfunctions.ILossFunction; import org.nd4j.linalg.ops.transforms.Transforms; -import org.nd4j.common.primitives.Pair; - -import java.util.*; - -import static org.deeplearning4j.nn.params.VariationalAutoencoderParamInitializer.BIAS_KEY_SUFFIX; -import static org.deeplearning4j.nn.params.VariationalAutoencoderParamInitializer.WEIGHT_KEY_SUFFIX; public class VariationalAutoencoder implements Layer { - protected INDArray input; protected INDArray paramsFlattened; protected INDArray gradientsFlattened; protected Map params; @Getter protected transient Map gradientViews; - protected NeuralNetConfiguration conf; protected double score = 0.0; protected ConvexOptimizer optimizer; protected Gradient gradient; - protected Collection trainingListeners = new ArrayList<>(); protected int index = 0; protected INDArray maskArray; protected Solver solver; - protected int[] encoderLayerSizes; protected int[] decoderLayerSizes; protected ReconstructionDistribution reconstructionDistribution; @@ -81,37 +84,88 @@ public class VariationalAutoencoder implements Layer { protected int numSamples; protected CacheMode cacheMode = CacheMode.NONE; protected DataType dataType; - protected boolean zeroedPretrainParamGradients = false; - protected Map weightNoiseParams = new HashMap<>(); - @Getter @Setter protected int iterationCount; @Getter @Setter protected int epochCount; + @Getter @Setter @NonNull + private LayerConfiguration layerConfiguration; + private @Getter @Setter Collection trainingListeners = new HashSet<>(); - public VariationalAutoencoder(NeuralNetConfiguration conf, DataType dataType) { - this.conf = conf; + public VariationalAutoencoder(@NonNull LayerConfiguration layerConfiguration, DataType dataType) { + this.layerConfiguration = layerConfiguration; this.dataType = dataType; this.encoderLayerSizes = - ((org.deeplearning4j.nn.conf.layers.variational.VariationalAutoencoder) conf.getLayer()) + ((org.deeplearning4j.nn.conf.layers.variational.VariationalAutoencoder) layerConfiguration) .getEncoderLayerSizes(); this.decoderLayerSizes = - ((org.deeplearning4j.nn.conf.layers.variational.VariationalAutoencoder) conf.getLayer()) + ((org.deeplearning4j.nn.conf.layers.variational.VariationalAutoencoder) layerConfiguration) .getDecoderLayerSizes(); this.reconstructionDistribution = - ((org.deeplearning4j.nn.conf.layers.variational.VariationalAutoencoder) conf.getLayer()) + ((org.deeplearning4j.nn.conf.layers.variational.VariationalAutoencoder) layerConfiguration) .getOutputDistribution(); - this.pzxActivationFn = ((org.deeplearning4j.nn.conf.layers.variational.VariationalAutoencoder) conf.getLayer()) + this.pzxActivationFn = ((org.deeplearning4j.nn.conf.layers.variational.VariationalAutoencoder) layerConfiguration) .getPzxActivationFn(); - this.numSamples = ((org.deeplearning4j.nn.conf.layers.variational.VariationalAutoencoder) conf.getLayer()) + this.numSamples = ((org.deeplearning4j.nn.conf.layers.variational.VariationalAutoencoder) layerConfiguration) .getNumSamples(); } + /** + * Replace the TrainingListeners for this model + * + * @param listeners new listeners + */ + @Override + public void addTrainingListeners(TrainingListener... listeners) { + if(listeners != null) + trainingListeners.addAll(List.of(listeners)); + } + +/** +* + * @param listeners + */ + @Override + public void addTrainingListeners(Collection listeners) { + if(listeners != null) + trainingListeners.addAll(listeners); + } + + /** + * Get a reference to the network this layer is part of. + * + * @return + */ + @Override + public IModel getNet() { + throw new RuntimeException("Not implemented."); + } + protected org.deeplearning4j.nn.conf.layers.variational.VariationalAutoencoder layerConf() { - return (org.deeplearning4j.nn.conf.layers.variational.VariationalAutoencoder) conf().getLayer(); + return (org.deeplearning4j.nn.conf.layers.variational.VariationalAutoencoder) layerConfiguration; + } + + /** + * Return the configuration of this layer + * + * @return the configuration + */ + @Override + public LayerConfiguration getLayerConfiguration() { + return layerConf(); + } + + /** + * Set a new layer configuration, new init() needs to be called afterwards. + * + * @param lconf layer configuration + */ + @Override + public void setLayerConfiguration(LayerConfiguration lconf) { + } @Override @@ -123,7 +177,7 @@ public class VariationalAutoencoder implements Layer { } protected String layerId() { - String name = this.conf().getLayer().getLayerName(); + String name = this.getLayerConfiguration().getLayerName(); return "(layer name: " + (name == null ? "\"\"" : name) + ", layer index: " + index + ")"; } @@ -146,7 +200,7 @@ public class VariationalAutoencoder implements Layer { } @Override - public double score() { + public double getScore() { return score; } @@ -248,7 +302,7 @@ public class VariationalAutoencoder implements Layer { this.score += logPTheta / numSamples; //If we have any training listeners (for example, for UI StatsListener - pass on activations) - if (trainingListeners != null && !trainingListeners.isEmpty() && l == 0) { //Note: only doing this on the *first* sample + if (getTrainingConfig() != null && !getTrainingListeners().isEmpty() && l == 0) { //Note: only doing this on the *first* sample Map activations = new LinkedHashMap<>(); for (int i = 0; i < fwd.encoderActivations.length; i++) { activations.put("e" + i, fwd.encoderActivations[i]); @@ -259,9 +313,9 @@ public class VariationalAutoencoder implements Layer { } activations.put(VariationalAutoencoderParamInitializer.PXZ_PREFIX, reconstructionDistribution.generateAtMean(pxzDistributionPreOut)); - if (!trainingListeners.isEmpty()) { + if (!getTrainingListeners().isEmpty()) { try (MemoryWorkspace workspace = Nd4j.getMemoryManager().scopeOutOfWorkspaces()) { - for (TrainingListener tl : trainingListeners) { + for (TrainingListener tl : getTrainingListeners()) { tl.onForwardPass(this, activations); } } @@ -466,13 +520,28 @@ public class VariationalAutoencoder implements Layer { } @Override - public INDArray params() { + public INDArray getModelParams() { return paramsFlattened; } + /** + * The param table + * + * @return + */ @Override - public TrainingConfig getConfig() { - return conf.getLayer(); + public Map getParamTable() { + return null; + } + + @Override + public void setParamTable(Map paramTable) { + this.params = paramTable; + } + + @Override + public ITraininableLayerConfiguration getTrainingConfig() { + return (BaseLayerConfiguration) layerConfiguration; } @Override @@ -480,6 +549,24 @@ public class VariationalAutoencoder implements Layer { return numParams(false); } + /** + * @return 1d parameter vector + */ + @Override + public INDArray getParams() { + throw new RuntimeException("Not implemented."); + } + + @Override + public void setParams(INDArray params) { + if (params.length() != this.paramsFlattened.length()) { + throw new IllegalArgumentException("Cannot set parameters: expected parameters vector of length " + + this.paramsFlattened.length() + " but got parameters array of length " + params.length() + + " " + layerId()); + } + this.paramsFlattened.assign(params); + } + @Override public long numParams(boolean backwards) { int ret = 0; @@ -491,16 +578,6 @@ public class VariationalAutoencoder implements Layer { return ret; } - @Override - public void setParams(INDArray params) { - if (params.length() != this.paramsFlattened.length()) { - throw new IllegalArgumentException("Cannot set parameters: expected parameters vector of length " - + this.paramsFlattened.length() + " but got parameters array of length " + params.length() - + " " + layerId()); - } - this.paramsFlattened.assign(params); - } - @Override public void setParamsViewArray(INDArray params) { if (this.params != null && params.length() != numParams()) @@ -522,7 +599,7 @@ public class VariationalAutoencoder implements Layer { } this.gradientsFlattened = gradients; - this.gradientViews = conf.getLayer().initializer().getGradientsFromFlattened(conf, gradients); + this.gradientViews = layerConfiguration.initializer().getGradientsFromFlattened(this.layerConfiguration, gradients); } @Override @@ -538,7 +615,7 @@ public class VariationalAutoencoder implements Layer { @Override public Pair gradientAndScore() { - return new Pair<>(gradient(), score()); + return new Pair<>(gradient(), getScore()); } @Override @@ -548,14 +625,22 @@ public class VariationalAutoencoder implements Layer { return (int) input.size(0); } + /** + * The configuration for the neural network + * + * @return the configuration for the neural network + */ @Override - public NeuralNetConfiguration conf() { - return conf; + public NeuralNetConfiguration getNetConfiguration() { + return this.layerConfiguration.getNetConfiguration(); } + /** + * @param netConfiguration + */ @Override - public void setConf(NeuralNetConfiguration conf) { - this.conf = conf; + public void setNetConfiguration(@NonNull NeuralNetConfiguration netConfiguration) { + } @Override @@ -563,23 +648,93 @@ public class VariationalAutoencoder implements Layer { return input; } + /** + * This method returns updater state (if applicable), null otherwise + * + * @return + */ + @Override + public INDArray updaterState() { + return null; + } + @Override public ConvexOptimizer getOptimizer() { return optimizer; } + /** + * This method fits model with a given DataSet + * + * @param dataSet + */ + @Override + public void fit(DataSet dataSet) { + + } + + /** + * This method fits model with a given MultiDataSet + * + * @param dataSet + */ + @Override + public void fit(MultiDataSet dataSet) { + + } + + /** + * This method fits model with a given DataSetIterator + * + * @param iterator + */ + @Override + public void fit(DataSetIterator iterator) { + + } + + /** + * This method fits model with a given MultiDataSetIterator + * + * @param iterator + */ + @Override + public void fit(MultiDataSetIterator iterator) { + + } + + /** + * This method executes evaluation of the model against given iterator and evaluation + * implementations + * + * @param iterator + * @param evaluations + */ + @Override + public T[] doEvaluation(DataSetIterator iterator, T... evaluations) { + return null; + } + + /** + * This method executes evaluation of the model against given iterator and evaluation + * implementations + * + * @param iterator + * @param evaluations + */ + @Override + public T[] doEvaluation(MultiDataSetIterator iterator, + T... evaluations) { + return null; + } + @Override public INDArray getParam(String param) { return params.get(param); } @Override - public Map paramTable() { - return new LinkedHashMap<>(params); - } - - @Override - public Map paramTable(boolean backpropParamsOnly) { + public Map getParamTable(boolean backpropParamsOnly) { Map map = new LinkedHashMap<>(); for (Map.Entry e : params.entrySet()) { if (!backpropParamsOnly || !isPretrainParam(e.getKey())) { @@ -594,15 +749,10 @@ public class VariationalAutoencoder implements Layer { return true; } - @Override - public void setParamTable(Map paramTable) { - this.params = paramTable; - } - @Override public void setParam(String key, INDArray val) { - if (paramTable().containsKey(key)) { - paramTable().get(key).assign(val); + if (getParamTable().containsKey(key)) { + getParamTable().get(key).assign(val); } else { throw new IllegalArgumentException("Unknown parameter: " + key + " - " + layerId()); } @@ -630,7 +780,7 @@ public class VariationalAutoencoder implements Layer { @Override public double calcRegularizationScore(boolean backpropParamsOnly){ double scoreSum = 0.0; - for (Map.Entry e : paramTable().entrySet()) { + for (Map.Entry e : getParamTable().entrySet()) { if(backpropParamsOnly && isPretrainParam(e.getKey())) continue; List l = layerConf().getRegularizationByParam(e.getKey()); @@ -726,15 +876,6 @@ public class VariationalAutoencoder implements Layer { return f.pzxMeanPreOut; } - @AllArgsConstructor - @Data - private static class VAEFwdHelper { - private INDArray[] encoderPreOuts; - private INDArray pzxMeanPreOut; - private INDArray[] encoderActivations; - } - - private VAEFwdHelper doForward(boolean training, boolean forBackprop, LayerWorkspaceMgr workspaceMgr) { assertInputSet(false); @@ -786,49 +927,8 @@ public class VariationalAutoencoder implements Layer { } @Override - public Collection getListeners() { - if (trainingListeners == null) { - return null; - } - - return new ArrayList<>(trainingListeners); - } - - @Override - public void setListeners(TrainingListener... listeners) { - setListeners(Arrays.asList(listeners)); - } - - @Override - public void setListeners(Collection listeners) { - if (trainingListeners == null) - trainingListeners = new ArrayList<>(); - else - trainingListeners.clear(); - if (trainingListeners == null) - trainingListeners = new ArrayList<>(); - else - trainingListeners.clear(); - - if (listeners != null && !listeners.isEmpty()) { - trainingListeners.addAll(listeners); - } - } - - - /** - * This method ADDS additional TrainingListener to existing listeners - * - * @param listeners - */ - @Override - public void addListeners(TrainingListener... listeners) { - if (this.trainingListeners == null) { - setListeners(listeners); - return; - } - - Collections.addAll(trainingListeners, listeners); + public int getIndex() { + return index; } @Override @@ -836,21 +936,11 @@ public class VariationalAutoencoder implements Layer { this.index = index; } - @Override - public int getIndex() { - return index; - } - @Override public void setInput(INDArray input, LayerWorkspaceMgr layerWorkspaceMgr) { this.input = input; } - @Override - public void setInputMiniBatchSize(int size) { - - } - @Override public int getInputMiniBatchSize() { if (input.size(0) > Integer.MAX_VALUE) @@ -859,8 +949,8 @@ public class VariationalAutoencoder implements Layer { } @Override - public void setMaskArray(INDArray maskArray) { - this.maskArray = maskArray; + public void setInputMiniBatchSize(int size) { + } @Override @@ -868,6 +958,11 @@ public class VariationalAutoencoder implements Layer { return maskArray; } + @Override + public void setMaskArray(INDArray maskArray) { + this.maskArray = maskArray; + } + @Override public boolean isPretrainLayer() { return true; @@ -905,7 +1000,8 @@ public class VariationalAutoencoder implements Layer { if (solver == null) { try (MemoryWorkspace workspace = Nd4j.getMemoryManager().scopeOutOfWorkspaces()) { - solver = new Solver.Builder().model(this).configure(conf()).listeners(getListeners()).build(); + solver = new Solver.Builder().model(this).configure(getNetConfiguration()).listeners( + getTrainingListeners()).build(); } } this.optimizer = solver.getOptimizer(); @@ -1138,4 +1234,13 @@ public class VariationalAutoencoder implements Layer { public void close(){ //No-op for individual layers } + + + @AllArgsConstructor + @Data + private static class VAEFwdHelper { + private INDArray[] encoderPreOuts; + private INDArray pzxMeanPreOut; + private INDArray[] encoderActivations; + } } diff --git a/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/nn/layers/wrapper/BaseWrapperLayer.java b/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/nn/layers/wrapper/BaseWrapperLayer.java index 80439cbc5..497b08aaf 100644 --- a/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/nn/layers/wrapper/BaseWrapperLayer.java +++ b/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/nn/layers/wrapper/BaseWrapperLayer.java @@ -20,315 +20,415 @@ package org.deeplearning4j.nn.layers.wrapper; +import java.util.Collection; +import java.util.Map; import lombok.Data; import lombok.NonNull; +import net.brutex.ai.dnn.api.IModel; import org.deeplearning4j.nn.api.Layer; import org.deeplearning4j.nn.api.MaskState; -import org.deeplearning4j.nn.api.TrainingConfig; +import org.deeplearning4j.nn.api.ITraininableLayerConfiguration; import org.deeplearning4j.nn.conf.CacheMode; import org.deeplearning4j.nn.conf.NeuralNetConfiguration; +import org.deeplearning4j.nn.conf.layers.BaseLayerConfiguration; +import org.deeplearning4j.nn.conf.layers.LayerConfiguration; import org.deeplearning4j.nn.gradient.Gradient; +import org.deeplearning4j.nn.layers.AbstractLayer; import org.deeplearning4j.nn.layers.LayerHelper; import org.deeplearning4j.nn.workspace.LayerWorkspaceMgr; import org.deeplearning4j.optimize.api.ConvexOptimizer; import org.deeplearning4j.optimize.api.TrainingListener; -import org.nd4j.linalg.api.ndarray.INDArray; +import org.jetbrains.annotations.NotNull; import org.nd4j.common.primitives.Pair; - -import java.util.Collection; -import java.util.Map; +import org.nd4j.linalg.api.ndarray.INDArray; +import org.nd4j.linalg.dataset.api.DataSet; +import org.nd4j.linalg.dataset.api.MultiDataSet; +import org.nd4j.linalg.dataset.api.iterator.DataSetIterator; +import org.nd4j.linalg.dataset.api.iterator.MultiDataSetIterator; @Data -public abstract class BaseWrapperLayer implements Layer { +public abstract class BaseWrapperLayer extends AbstractLayer { - protected Layer underlying; + protected Layer underlying; - public BaseWrapperLayer(@NonNull Layer underlying){ - this.underlying = underlying; - } + public BaseWrapperLayer(@NonNull Layer underlying) { + this.underlying = underlying; + this.setLayerConfiguration( underlying.getLayerConfiguration() ); + } - @Override - public void setCacheMode(CacheMode mode) { - underlying.setCacheMode(mode); - } + @Override + public BaseLayerConfiguration getTypedLayerConfiguration() { + return (BaseLayerConfiguration) underlying.getLayerConfiguration(); + } - @Override - public double calcRegularizationScore(boolean backpropParamsOnly){ - return underlying.calcRegularizationScore(backpropParamsOnly); - } + /** + * This method returns updater state (if applicable), null otherwise + * + * @return + */ + @Override + public INDArray updaterState() { + return underlying.updaterState(); + } - @Override - public Type type() { - return underlying.type(); - } + /** + * This method fits model with a given DataSet + * + * @param dataSet + */ + @Override + public void fit(DataSet dataSet) { +underlying.fit(dataSet); + } - @Override - public Pair backpropGradient(INDArray epsilon, LayerWorkspaceMgr workspaceMgr) { - return underlying.backpropGradient(epsilon, workspaceMgr); - } + /** + * This method fits model with a given MultiDataSet + * + * @param dataSet + */ + @Override + public void fit(MultiDataSet dataSet) { +underlying.fit(dataSet); + } - @Override - public INDArray activate(boolean training, LayerWorkspaceMgr workspaceMgr) { - return underlying.activate(training, workspaceMgr); - } + /** + * This method fits model with a given DataSetIterator + * + * @param iterator + */ + @Override + public void fit(DataSetIterator iterator) { +underlying.fit(iterator); + } - @Override - public INDArray activate(INDArray input, boolean training, LayerWorkspaceMgr workspaceMgr) { - return underlying.activate(input, training, workspaceMgr); - } + /** + * This method fits model with a given MultiDataSetIterator + * + * @param iterator + */ + @Override + public void fit(MultiDataSetIterator iterator) { +underlying.fit(iterator); + } - @Override - public Collection getListeners() { - return underlying.getListeners(); - } + /** + * @param netConfiguration + */ + @Override + public void setNetConfiguration(@NonNull NeuralNetConfiguration netConfiguration) { +underlying.setNetConfiguration(netConfiguration); + } - @Override - public void setListeners(TrainingListener... listeners) { - underlying.setListeners(listeners); - } - @Override - public void addListeners(TrainingListener... listener) { - underlying.addListeners(listener); - } + /** + * Get a reference to the network this layer is part of. + * + * @return + */ + @Override + public IModel getNet() { + return underlying.getNet(); + } - @Override - public void fit() { - underlying.fit(); - } + /** + * @return 1d parameter vector + */ + @Override + public INDArray getParams() { + return underlying.getParams(); + } - @Override - public void update(Gradient gradient) { - underlying.update(gradient); - } + /** + * Return the configuration of this layer (which is the configuration of the underlying layer in + * this case + * + * @return the underlying layer configuration + */ + @Override + public LayerConfiguration getLayerConfiguration() { + return underlying.getLayerConfiguration(); + } - @Override - public void update(INDArray gradient, String paramType) { - underlying.update(gradient, paramType); - } + @Override + public void setLayerConfiguration(LayerConfiguration layerConfiguration) { + underlying.setLayerConfiguration(layerConfiguration); + } - @Override - public double score() { - return underlying.score(); - } + @Override + public void setCacheMode(CacheMode mode) { + underlying.setCacheMode(mode); + } - @Override - public void computeGradientAndScore(LayerWorkspaceMgr workspaceMgr) { - underlying.computeGradientAndScore(workspaceMgr); - } + @Override + public double calcRegularizationScore(boolean backpropParamsOnly) { + return underlying.calcRegularizationScore(backpropParamsOnly); + } - @Override - public INDArray params() { - return underlying.params(); - } + @Override + public Type type() { + return underlying.type(); + } - @Override - public long numParams() { - return underlying.numParams(); - } + @Override + public Pair backpropGradient(INDArray epsilon, + LayerWorkspaceMgr workspaceMgr) { + return underlying.backpropGradient(epsilon, workspaceMgr); + } - @Override - public long numParams(boolean backwards) { - return underlying.numParams(); - } + @Override + public INDArray activate(boolean training, LayerWorkspaceMgr workspaceMgr) { + return underlying.activate(training, workspaceMgr); + } - @Override - public void setParams(INDArray params) { - underlying.setParams(params); - } + @Override + public INDArray activate(INDArray input, boolean training, LayerWorkspaceMgr workspaceMgr) { + return underlying.activate(input, training, workspaceMgr); + } - @Override - public void setParamsViewArray(INDArray params) { - underlying.setParamsViewArray(params); - } + @NotNull + @Override + public Collection getTrainingListeners() { + return underlying.getTrainingListeners(); + } - @Override - public INDArray getGradientsViewArray() { - return underlying.getGradientsViewArray(); - } + @Override + public void addTrainingListeners(TrainingListener... listeners) { + underlying.addTrainingListeners(listeners); + } - @Override - public void setBackpropGradientsViewArray(INDArray gradients) { - underlying.setBackpropGradientsViewArray(gradients); - } + @Override + public void fit() { + underlying.fit(); + } - @Override - public void fit(INDArray data, LayerWorkspaceMgr workspaceMgr) { - underlying.fit(data, workspaceMgr); - } + @Override + public void update(Gradient gradient) { + underlying.update(gradient); + } - @Override - public Gradient gradient() { - return underlying.gradient(); - } + @Override + public void update(INDArray gradient, String paramType) { + underlying.update(gradient, paramType); + } - @Override - public Pair gradientAndScore() { - return underlying.gradientAndScore(); - } + @Override + public double getScore() { + return underlying.getScore(); + } - @Override - public int batchSize() { - return underlying.batchSize(); - } + @Override + public void computeGradientAndScore(LayerWorkspaceMgr workspaceMgr) { + underlying.computeGradientAndScore(workspaceMgr); + } - @Override - public NeuralNetConfiguration conf() { - return underlying.conf(); - } + @Override + public INDArray getModelParams() { + return underlying.getParams(); + } - @Override - public void setConf(NeuralNetConfiguration conf) { - underlying.setConf(conf); - } + @Override + public long numParams() { + return underlying.numParams(); + } - @Override - public INDArray input() { - return underlying.input(); - } + @Override + public long numParams(boolean backwards) { + return underlying.numParams(); + } - @Override - public ConvexOptimizer getOptimizer() { - return underlying.getOptimizer(); - } + @Override + public void setParams(INDArray params) { + underlying.setParams(params); + } - @Override - public INDArray getParam(String param) { - return underlying.getParam(param); - } + @Override + public void setParamsViewArray(INDArray params) { + underlying.setParamsViewArray(params); + } - @Override - public Map paramTable() { - return underlying.paramTable(); - } + @Override + public INDArray getGradientsViewArray() { + return underlying.getGradientsViewArray(); + } - @Override - public Map paramTable(boolean backpropParamsOnly) { - return underlying.paramTable(backpropParamsOnly); - } + @Override + public void setBackpropGradientsViewArray(INDArray gradients) { + underlying.setBackpropGradientsViewArray(gradients); + } - @Override - public void setParamTable(Map paramTable) { - underlying.setParamTable(paramTable); - } + @Override + public void fit(INDArray data, LayerWorkspaceMgr workspaceMgr) { + underlying.fit(data, workspaceMgr); + } - @Override - public void setParam(String key, INDArray val) { - underlying.setParam(key, val); - } + @Override + public Gradient gradient() { + return underlying.gradient(); + } - @Override - public void clear() { - underlying.clear(); - } + @Override + public Pair gradientAndScore() { + return underlying.gradientAndScore(); + } - @Override - public void applyConstraints(int iteration, int epoch) { - underlying.applyConstraints(iteration, epoch); - } + @Override + public int batchSize() { + return underlying.batchSize(); + } - @Override - public void init() { - underlying.init(); - } + @Override + public NeuralNetConfiguration getNetConfiguration() { + return underlying.getNetConfiguration(); + } - @Override - public void setListeners(Collection listeners) { - underlying.setListeners(listeners); - } + @Override + public INDArray input() { + return underlying.input(); + } - @Override - public void setIndex(int index) { - underlying.setIndex(index); - } + @Override + public ConvexOptimizer getOptimizer() { + return underlying.getOptimizer(); + } - @Override - public int getIndex() { - return underlying.getIndex(); - } + @Override + public INDArray getParam(String param) { + return underlying.getParam(param); + } - @Override - public int getIterationCount() { - return underlying.getIterationCount(); - } + @Override + public Map getParamTable() { + return underlying.getParamTable(); + } - @Override - public int getEpochCount() { - return underlying.getEpochCount(); - } + /** + * Setter for the param table + * + * @param paramTable Map<String, INDArray> + */ + @Override + public void setParamTable(Map paramTable) { + underlying.setParamTable(paramTable); + } - @Override - public void setIterationCount(int iterationCount) { - underlying.setIterationCount(iterationCount); - } + @Override + public Map getParamTable(boolean backpropParamsOnly) { + return underlying.getParamTable(backpropParamsOnly); + } - @Override - public void setEpochCount(int epochCount) { - underlying.setEpochCount(epochCount); - } + @Override + public void setParam(String key, INDArray val) { + underlying.setParam(key, val); + } - @Override - public void setInput(INDArray input, LayerWorkspaceMgr workspaceMgr) { - underlying.setInput(input, workspaceMgr); - } + @Override + public void clear() { + underlying.clear(); + } - @Override - public void setInputMiniBatchSize(int size) { - underlying.setInputMiniBatchSize(size); - } + @Override + public void applyConstraints(int iteration, int epoch) { + underlying.applyConstraints(iteration, epoch); + } - @Override - public int getInputMiniBatchSize() { - return underlying.getInputMiniBatchSize(); - } + @Override + public void init() { + underlying.init(); + } - @Override - public void setMaskArray(INDArray maskArray) { - underlying.setMaskArray(maskArray); - } + @Override + public int getIndex() { + return underlying.getIndex(); + } - @Override - public INDArray getMaskArray() { - return underlying.getMaskArray(); - } + @Override + public void setIndex(int index) { + underlying.setIndex(index); + } - @Override - public boolean isPretrainLayer() { - return underlying.isPretrainLayer(); - } + @Override + public int getIterationCount() { + return underlying.getIterationCount(); + } - @Override - public void clearNoiseWeightParams() { - underlying.clearNoiseWeightParams(); - } + @Override + public void setIterationCount(int iterationCount) { + underlying.setIterationCount(iterationCount); + } - @Override - public Pair feedForwardMaskArray(INDArray maskArray, MaskState currentMaskState, int minibatchSize) { - return underlying.feedForwardMaskArray(maskArray, currentMaskState, minibatchSize); - } + @Override + public int getEpochCount() { + return underlying.getEpochCount(); + } - @Override - public void allowInputModification(boolean allow) { - underlying.allowInputModification(allow); - } + @Override + public void setEpochCount(int epochCount) { + underlying.setEpochCount(epochCount); + } - @Override - public LayerHelper getHelper() { - return underlying.getHelper(); - } + @Override + public void setInput(INDArray input, LayerWorkspaceMgr workspaceMgr) { + underlying.setInput(input, workspaceMgr); + } - @Override - public TrainingConfig getConfig() { - return underlying.getConfig(); - } + @Override + public int getInputMiniBatchSize() { + return underlying.getInputMiniBatchSize(); + } - @Override - public boolean updaterDivideByMinibatch(String paramName) { - return underlying.updaterDivideByMinibatch(paramName); - } + @Override + public void setInputMiniBatchSize(int size) { + underlying.setInputMiniBatchSize(size); + } - @Override - public void close(){ - //No-op for individual layers - } + @Override + public INDArray getMaskArray() { + return underlying.getMaskArray(); + } + + @Override + public void setMaskArray(INDArray maskArray) { + underlying.setMaskArray(maskArray); + } + + @Override + public boolean isPretrainLayer() { + return underlying.isPretrainLayer(); + } + + @Override + public void clearNoiseWeightParams() { + underlying.clearNoiseWeightParams(); + } + + @Override + public Pair feedForwardMaskArray(INDArray maskArray, + MaskState currentMaskState, int minibatchSize) { + return underlying.feedForwardMaskArray(maskArray, currentMaskState, minibatchSize); + } + + @Override + public void allowInputModification(boolean allow) { + underlying.allowInputModification(allow); + } + + @Override + public LayerHelper getHelper() { + return underlying.getHelper(); + } + + @Override + public ITraininableLayerConfiguration getTrainingConfig() { + return underlying.getTrainingConfig(); + } + + @Override + public boolean updaterDivideByMinibatch(String paramName) { + return underlying.updaterDivideByMinibatch(paramName); + } + + @Override + public void close() { + //No-op for individual layers + } } diff --git a/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/nn/multilayer/MultiLayerNetwork.java b/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/nn/multilayer/MultiLayerNetwork.java index f590a1caa..2b27c0179 100644 --- a/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/nn/multilayer/MultiLayerNetwork.java +++ b/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/nn/multilayer/MultiLayerNetwork.java @@ -20,31 +20,38 @@ package org.deeplearning4j.nn.multilayer; - +import com.fasterxml.jackson.annotation.JsonIdentityInfo; +import com.fasterxml.jackson.annotation.ObjectIdGenerators; +import java.io.*; +import java.util.*; +import java.util.stream.Collectors; import lombok.Getter; import lombok.NonNull; import lombok.Setter; import lombok.extern.slf4j.Slf4j; import lombok.val; +import net.brutex.ai.dnn.api.IModel; +import net.brutex.ai.dnn.networks.ArtificialNeuralNetwork; import org.apache.commons.lang3.ArrayUtils; import org.apache.commons.lang3.StringUtils; import org.bytedeco.javacpp.Pointer; import org.deeplearning4j.datasets.iterator.MultiDataSetWrapperIterator; import org.deeplearning4j.exception.DL4JException; import org.deeplearning4j.exception.DL4JInvalidInputException; -import org.deeplearning4j.nn.api.Updater; import org.deeplearning4j.nn.api.*; +import org.deeplearning4j.nn.api.Updater; import org.deeplearning4j.nn.api.layers.IOutputLayer; import org.deeplearning4j.nn.api.layers.RecurrentLayer; import org.deeplearning4j.nn.conf.*; import org.deeplearning4j.nn.conf.inputs.InputType; -import org.deeplearning4j.nn.conf.layers.ConvolutionLayer; +import org.deeplearning4j.nn.conf.layers.BaseLayerConfiguration; import org.deeplearning4j.nn.conf.layers.FeedForwardLayer; -import org.deeplearning4j.nn.conf.layers.SubsamplingLayer; +import org.deeplearning4j.nn.conf.layers.LayerConfiguration; import org.deeplearning4j.nn.conf.layers.recurrent.Bidirectional; import org.deeplearning4j.nn.gradient.DefaultGradient; import org.deeplearning4j.nn.gradient.Gradient; import org.deeplearning4j.nn.graph.ComputationGraph; +import org.deeplearning4j.nn.layers.BaseLayer; import org.deeplearning4j.nn.layers.FrozenLayer; import org.deeplearning4j.nn.layers.FrozenLayerWithBackprop; import org.deeplearning4j.nn.layers.LayerHelper; @@ -58,8 +65,12 @@ import org.deeplearning4j.optimize.api.ConvexOptimizer; import org.deeplearning4j.optimize.api.TrainingListener; import org.deeplearning4j.optimize.solvers.accumulation.GradientsAccumulator; import org.deeplearning4j.util.*; +import org.jetbrains.annotations.NotNull; import org.nd4j.adapters.OutputAdapter; import org.nd4j.common.base.Preconditions; +import org.nd4j.common.primitives.Pair; +import org.nd4j.common.primitives.Triple; +import org.nd4j.common.util.OneTimeLogger; import org.nd4j.evaluation.IEvaluation; import org.nd4j.evaluation.classification.Evaluation; import org.nd4j.evaluation.classification.ROC; @@ -88,4057 +99,4710 @@ import org.nd4j.linalg.heartbeat.reports.Task; import org.nd4j.linalg.heartbeat.utils.EnvironmentUtils; import org.nd4j.linalg.heartbeat.utils.TaskUtils; import org.nd4j.linalg.indexing.NDArrayIndex; -import org.nd4j.common.primitives.Pair; -import org.nd4j.common.primitives.Triple; import org.nd4j.linalg.schedule.ISchedule; import org.nd4j.linalg.util.FeatureUtil; import org.nd4j.linalg.workspace.ND4JWorkspaceException; import org.nd4j.linalg.workspace.WorkspaceUtils; -import org.nd4j.common.util.OneTimeLogger; - -import java.io.*; -import java.util.*; - +/** + * Artificial Neural Network An artificial neural network (1) takes some input data, and (2) + * transforms this input data by calculating a weighted sum over the inputs and (3) applies a + * non-linear function to this transformation to calculate an intermediate state. The three steps + * above constitute what is known as a layer, and the transformative function is often referred to + * as a unit. The intermediate states—often termed features—are used as the input into another + * layer. + * + *

Through repetition of these steps, the artificial neural network learns multiple layers of + * non-linear features, which it then combines in a final layer to create a prediction. + * + *

The neural network learns by generating an error signal that measures the difference between + * the predictions of the network and the desired values and then using this error signal to change + * the weights (or parameters) so that predictions get more accurate. + */ @Slf4j -public class MultiLayerNetwork implements Serializable, Classifier, Layer, NeuralNetwork { +@JsonIdentityInfo(generator = ObjectIdGenerators.IntSequenceGenerator.class, property = "@id") +public class MultiLayerNetwork extends ArtificialNeuralNetwork + implements Serializable, Classifier, Layer, ITrainableLayer { - //the hidden neural network layers (including output layer) - protected Layer[] layers; - protected LinkedHashMap layerMap = new LinkedHashMap<>(); + /** + * Workspace for working memory for a single layer: forward pass and backward pass Note that this + * is opened/closed once per op (activate/backpropGradient call) + */ + protected static final String WS_LAYER_WORKING_MEM = "WS_LAYER_WORKING_MEM"; + /** + * Workspace for storing all layers' activations - used only to store activations (layer inputs) + * as part of backprop Not used for inference + */ + protected static final String WS_ALL_LAYERS_ACT = "WS_ALL_LAYERS_ACT"; + /** + * Next 2 workspaces: used for: (a) Inference: holds activations for one layer only (b) Backprop: + * holds activation gradients for one layer only In both cases, they are opened and closed on + * every second layer + */ + protected static final String WS_LAYER_ACT_1 = "WS_LAYER_ACT_1"; - //Current training data: input features and labels - protected INDArray input, labels; + protected static final String WS_LAYER_ACT_2 = "WS_LAYER_ACT_2"; + /** Workspace for output methods that use OutputAdapter */ + protected static final String WS_OUTPUT_MEM = "WS_OUTPUT_MEM"; + /** Workspace for working memory in RNNs - opened and closed once per RNN time step */ + protected static final String WS_RNN_LOOP_WORKING_MEM = "WS_RNN_LOOP_WORKING_MEM"; - protected boolean initCalled = false; - protected Collection trainingListeners = new ArrayList<>(); + protected static final WorkspaceConfiguration WS_ALL_LAYERS_ACT_CONFIG = + WorkspaceConfiguration.builder() + .initialSize(0) + .overallocationLimit(0.05) + .policyLearning(LearningPolicy.FIRST_LOOP) + .policyReset(ResetPolicy.BLOCK_LEFT) + .policySpill(SpillPolicy.REALLOCATE) + .policyAllocation(AllocationPolicy.OVERALLOCATE) + .build(); + protected static final WorkspaceConfiguration WS_RNN_LOOP_WORKING_MEM_CONFIG = + WorkspaceConfiguration.builder() + .initialSize(0) + .overallocationLimit(0.05) + .policyReset(ResetPolicy.BLOCK_LEFT) + .policyAllocation(AllocationPolicy.OVERALLOCATE) + .policySpill(SpillPolicy.REALLOCATE) + .policyLearning(LearningPolicy.FIRST_LOOP) + .build(); + // the hidden neural network layers (including output layer) + protected Layer[] layers; + // Current training data: input features and labels + protected INDArray input, labels; + protected boolean initCalled = false; + protected Collection trainingListeners = new ArrayList<>(); + protected Gradient gradient; + protected double score; + @Setter protected boolean initDone = false; + protected INDArray flattenedParams; // Params for all layers are a view/subset of this array - protected NeuralNetConfiguration defaultConfiguration; - protected MultiLayerConfiguration layerWiseConfigurations; - protected Gradient gradient; - protected double score; - @Setter - protected boolean initDone = false; - protected INDArray flattenedParams; //Params for all layers are a view/subset of this array - @Getter - protected transient INDArray flattenedGradients; //Gradients for all layers are a view/subset of this array + @Getter + protected transient INDArray + flattenedGradients; // Gradients for all layers are a view/subset of this array - protected boolean clearTbpttState = true; //Mainly for unit testing (should be enabled otherwise) - protected transient ThreadLocal lastEtlTime = new ThreadLocal<>(); - protected INDArray mask; + protected boolean clearTbpttState = true; // Mainly for unit testing (should be enabled otherwise) + protected transient ThreadLocal lastEtlTime = new ThreadLocal<>(); + protected INDArray mask; + protected int layerIndex; // For LayerConfiguration.get/setIndex() + protected transient Solver solver; // Used to call optimizers during backprop + // Workspaces for CUDNN. Pass to LayerWorkspaceMgr for re-use in cudnn helpers + @Getter protected transient Map helperWorkspaces = new HashMap<>(); + protected WorkspaceConfiguration WS_LAYER_WORKING_MEM_CONFIG; + protected WorkspaceConfiguration WS_LAYER_ACT_X_CONFIG; - protected int layerIndex; //For Layer.get/setIndex() + public MultiLayerNetwork(@NotNull NeuralNetConfiguration conf) { + super(conf); - protected transient Solver solver; //Used to call optimizers during backprop - //Workspaces for CUDNN. Pass to LayerWorkspaceMgr for re-use in cudnn helpers - @Getter - protected transient Map helperWorkspaces = new HashMap<>(); + // Working memory: should learn over course of: (a) full forward pass, and (b) full backward + // pass + // Working memory should be opened once per layer and once per preprocessor, for each of forward + // and backward passes + int numWorkingMem = + 2 * (conf.getFlattenedLayerConfigurations().size() + conf.getInputPreProcessors().size()); + WS_LAYER_WORKING_MEM_CONFIG = getLayerWorkingMemWSConfig(numWorkingMem); + WS_LAYER_ACT_X_CONFIG = + getLayerActivationWSConfig(conf.getFlattenedLayerConfigurations().size()); + } + public MultiLayerNetwork(@NotNull NeuralNetBaseBuilderConfiguration conf) { + this((NeuralNetConfiguration) conf); + } - /** - * Workspace for working memory for a single layer: forward pass and backward pass - * Note that this is opened/closed once per op (activate/backpropGradient call) - */ - protected static final String WS_LAYER_WORKING_MEM = "WS_LAYER_WORKING_MEM"; - /** - * Workspace for storing all layers' activations - used only to store activations (layer inputs) as part of backprop - * Not used for inference - */ - protected static final String WS_ALL_LAYERS_ACT = "WS_ALL_LAYERS_ACT"; - /** - * Next 2 workspaces: used for: - * (a) Inference: holds activations for one layer only - * (b) Backprop: holds activation gradients for one layer only - * In both cases, they are opened and closed on every second layer - */ - protected static final String WS_LAYER_ACT_1 = "WS_LAYER_ACT_1"; - protected static final String WS_LAYER_ACT_2 = "WS_LAYER_ACT_2"; + /** + * Initialize the network based on the configuration (a NeuralNetConfiguration in JSON format) and + * parameters array + * + * @param conf the configuration json + * @param params the parameters for the network + */ + public MultiLayerNetwork(String conf, INDArray params) { + this(NeuralNetConfiguration.fromJson(conf)); + init(); + setParameters(params); + } - /** - * Workspace for output methods that use OutputAdapter - */ - protected static final String WS_OUTPUT_MEM = "WS_OUTPUT_MEM"; + /** + * Initialize the network based on the configuration and parameters array + * + * @param conf the configuration + * @param params the parameters + */ + public MultiLayerNetwork(NeuralNetConfiguration conf, INDArray params) { + this(conf); + init(); + setParameters(params); + } - /** - * Workspace for working memory in RNNs - opened and closed once per RNN time step - */ - protected static final String WS_RNN_LOOP_WORKING_MEM = "WS_RNN_LOOP_WORKING_MEM"; + protected static WorkspaceConfiguration getLayerWorkingMemWSConfig(int numWorkingMemCycles) { + return WorkspaceConfiguration.builder() + .initialSize(0) + .overallocationLimit(0.02) + .policyLearning(LearningPolicy.OVER_TIME) + .cyclesBeforeInitialization(numWorkingMemCycles) + .policyReset(ResetPolicy.BLOCK_LEFT) + .policySpill(SpillPolicy.REALLOCATE) + .policyAllocation(AllocationPolicy.OVERALLOCATE) + .build(); + } + protected static WorkspaceConfiguration getLayerActivationWSConfig(int numLayers) { + // Activations memory: opened once per layer - for every second layer (preprocessors are within + // the loop). + // Technically we could set learning to numLayers / 2, but will set to numLayers for simplicity, + // and also to + // account for a backward pass + return WorkspaceConfiguration.builder() + .initialSize(0) + .overallocationLimit(0.02) + .policyLearning(LearningPolicy.OVER_TIME) + .cyclesBeforeInitialization(numLayers) + .policyReset(ResetPolicy.BLOCK_LEFT) + .policySpill(SpillPolicy.REALLOCATE) + .policyAllocation(AllocationPolicy.OVERALLOCATE) + .build(); + } - protected WorkspaceConfiguration WS_LAYER_WORKING_MEM_CONFIG; + /** + * Restore a MultiLayerNetwork to a file, saved using {@link #save(File)} or {@link + * ModelSerializer} + * + * @param f File to load the network from + * @param loadUpdater If true: load the updater if it is available (i.e., the state array for + * momentum/Adam/rmsprop etc) - use false if no further training is required, or + * true if further training will be undertaken + * @see ModelSerializer ModelSerializer for more details (and saving/loading via streams) + */ + public static MultiLayerNetwork load(File f, boolean loadUpdater) throws IOException { + return ModelSerializer.restoreMultiLayerNetwork(f, loadUpdater); + } - protected static final WorkspaceConfiguration WS_ALL_LAYERS_ACT_CONFIG = WorkspaceConfiguration.builder() - .initialSize(0) - .overallocationLimit(0.05) - .policyLearning(LearningPolicy.FIRST_LOOP) - .policyReset(ResetPolicy.BLOCK_LEFT) - .policySpill(SpillPolicy.REALLOCATE) - .policyAllocation(AllocationPolicy.OVERALLOCATE) - .build(); + /** + * Get a reference to this neural network. + * + * @return + */ + @Override + public IModel getNet() { + return this; + } - protected WorkspaceConfiguration WS_LAYER_ACT_X_CONFIG; + /** + * Return the configuration of this layer + * + * @return the configuration + */ + @Override + public LayerConfiguration getLayerConfiguration() { + // TODO + throw new RuntimeException( + "getLayerConfiguration cannot be called on a MultiLayerNetwork. This function is here because of inheritance from Layer (which should be fixed)."); + } - protected static final WorkspaceConfiguration WS_RNN_LOOP_WORKING_MEM_CONFIG = WorkspaceConfiguration.builder() - .initialSize(0).overallocationLimit(0.05).policyReset(ResetPolicy.BLOCK_LEFT) - .policyAllocation(AllocationPolicy.OVERALLOCATE).policySpill(SpillPolicy.REALLOCATE) - .policyLearning(LearningPolicy.FIRST_LOOP).build(); + /** + * Set a new layer configuration, new init() needs to be called afterwards. + * + * @param lconf layer configuration + */ + @Override + public void setLayerConfiguration(LayerConfiguration lconf) { + throw new RuntimeException("setLayerConfiguration has no effect on a MultiLayerNetwork"); + } - - public MultiLayerNetwork(MultiLayerConfiguration conf) { - this.layerWiseConfigurations = conf; - this.defaultConfiguration = conf.getConf(0).clone(); - - //Working memory: should learn over course of: (a) full forward pass, and (b) full backward pass - //Working memory should be opened once per layer and once per preprocessor, for each of forward and backward passes - int numWorkingMem = 2 * (layerWiseConfigurations.getConfs().size() + layerWiseConfigurations.getInputPreProcessors().size()); - WS_LAYER_WORKING_MEM_CONFIG = getLayerWorkingMemWSConfig(numWorkingMem); - WS_LAYER_ACT_X_CONFIG = getLayerActivationWSConfig(layerWiseConfigurations.getConfs().size()); + /** + * This method sets specified CacheMode for all layers within network + * + * @param mode + */ + public void setCacheMode(CacheMode mode) { + if (mode == null) { + mode = CacheMode.NONE; } - protected static WorkspaceConfiguration getLayerWorkingMemWSConfig(int numWorkingMemCycles){ - return WorkspaceConfiguration.builder() - .initialSize(0) - .overallocationLimit(0.02) - .policyLearning(LearningPolicy.OVER_TIME) - .cyclesBeforeInitialization(numWorkingMemCycles) - .policyReset(ResetPolicy.BLOCK_LEFT) - .policySpill(SpillPolicy.REALLOCATE) - .policyAllocation(AllocationPolicy.OVERALLOCATE) + for (Layer layer : layers) { + layer.setCacheMode(mode); + } + } + + /** + * Get the last ETL time. This in informational, and is the amount of time in milliseconds that + * was required to obtain the last DataSet/MultiDataSet during fitting. A value consistently above + * 0 may indicate a data feeding bottleneck, or no asynchronous data prefetching (async prefetch + * is enabled by default) + * + * @return The last ETL time in milliseconds, if avaliable (or 0 if not) + */ + public long getLastEtlTime() { + Long time = lastEtlTime.get(); + return time == null ? 0L : time; + } + + /** + * Set the last ETL time in milliseconds, for informational/reporting purposes. Generally used + * internally. + * + * @param time ETL time + */ + public void setLastEtlTime(long time) { + lastEtlTime.set(time); + } + + /** Perform layerwise pretraining for one epoch - see {@link #pretrain(DataSetIterator, int)} */ + public void pretrain(DataSetIterator iter) { + pretrain(iter, 1); + } + + /** + * Perform layerwise unsupervised training on all pre-trainable layers in the network (VAEs, + * Autoencoders, etc), for the specified number of epochs each. For example, if numEpochs=3, then + * layer 0 will be fit for 3 epochs, followed by layer 1 for 3 epochs, and so on.
+ * Note that pretraining will be performed on one layer after the other. To perform unsupervised + * training on a single layer, use {@link #pretrainLayer(int, DataSetIterator)} + * + * @param iter Training data + */ + public void pretrain(DataSetIterator iter, int numEpochs) { + if (flattenedGradients == null) { + initGradientsView(); + } + + for (int i = 0; i < getnLayers(); i++) { + pretrainLayer(i, iter, numEpochs); + } + } + + /** Fit for one epoch - see {@link #pretrainLayer(int, DataSetIterator, int)} */ + public void pretrainLayer(int layerIdx, DataSetIterator iter) { + pretrainLayer(layerIdx, iter, 1); + } + + /** + * Perform layerwise unsupervised training on a single pre-trainable layer in the network (VAEs, + * Autoencoders, etc) for the specified number of epochs
+ * If the specified layer index (0 to numLayers - 1) is not a pretrainable layer, this is a no-op. + * + * @param layerIdx Index of the layer to train (0 to numLayers-1) + * @param iter Training data + * @param numEpochs Number of epochs to fit the specified layer for + */ + public void pretrainLayer(int layerIdx, DataSetIterator iter, int numEpochs) { + Preconditions.checkState( + numEpochs > 0, "Number of epochs (%s) must be a positive number", numEpochs); + + if (flattenedGradients == null) { + initGradientsView(); + } + if (layerIdx >= layers.length) { + throw new IllegalArgumentException( + "Cannot pretrain layer: layerIdx (" + + layerIdx + + ") >= numLayers (" + + layers.length + + ")"); + } + + Layer layer = layers[layerIdx]; + if (!layer.isPretrainLayer()) { + return; + } + + if (numEpochs > 1 && !iter.resetSupported()) { + throw new IllegalStateException( + "Cannot fit multiple epochs (" + + numEpochs + + ") on an iterator that doesn't support resetting"); + } + + if (!iter.hasNext() && iter.resetSupported()) { + iter.reset(); + } + + log.info( + "Starting unsupervised training on layer " + layerIdx + " for " + numEpochs + " epochs"); + for (int i = 0; i < numEpochs; i++) { + if (i > 0) { + iter.reset(); + } + + while (iter.hasNext()) { + DataSet next = iter.next(); + input = next.getFeatures(); + pretrainLayer(layerIdx, input); + } + } + + int ec = getLayer(layerIdx).getNetConfiguration().getEpochCount() + 1; + getLayer(layerIdx).getNetConfiguration().setEpochCount(ec); + } + + /** + * Perform layerwise unsupervised training on a single pre-trainable layer in the network (VAEs, + * Autoencoders, etc)
+ * If the specified layer index (0 to numLayers - 1) is not a pretrainable layer, this is a no-op. + * + * @param layerIdx Index of the layer to train (0 to numLayers-1) + * @param features Training data array + */ + public void pretrainLayer(int layerIdx, INDArray features) { + setInput(features); + setLayerMaskArrays(null, null); + + if (flattenedGradients == null) { + initGradientsView(); + } + if (layerIdx >= layers.length) { + throw new IllegalArgumentException( + "Cannot pretrain layer: layerIdx (" + + layerIdx + + ") >= numLayers (" + + layers.length + + ")"); + } + + LayerWorkspaceMgr workspaceMgr; + if (getNetConfiguration().getTrainingWorkspaceMode() == WorkspaceMode.NONE) { + workspaceMgr = LayerWorkspaceMgr.noWorkspaces(); + } else { + workspaceMgr = + LayerWorkspaceMgr.builder() + .defaultWorkspace(WS_LAYER_WORKING_MEM, WS_LAYER_WORKING_MEM_CONFIG) + .with( + ArrayType.RNN_FF_LOOP_WORKING_MEM, + WS_RNN_LOOP_WORKING_MEM, + WS_RNN_LOOP_WORKING_MEM_CONFIG) + .build(); + } + workspaceMgr.setHelperWorkspacePointers(helperWorkspaces); + + Layer layer = layers[layerIdx]; + if (!layer.isPretrainLayer()) { + return; + } + + // Do forward pass to the layer to be pretrained + INDArray outputOfPrevLayer; + if (layerIdx == 0) { + outputOfPrevLayer = input; + } else { + // Yes, this part of training - but we'll do forward psas as inference mode when doing + // layerwise training + // to effectively freeze earlier layers and not apply dropout etc + outputOfPrevLayer = + outputOfLayerDetached( + false, FwdPassType.STANDARD, layerIndex - 1, features, null, null, null); + } + + try (MemoryWorkspace ws = workspaceMgr.notifyScopeEntered(ArrayType.FF_WORKING_MEM)) { + if (getNetConfiguration().getInputPreProcess(layerIdx) != null) { + + if (input.size(0) > Integer.MAX_VALUE) { + throw new ND4JArraySizeException(); + } + outputOfPrevLayer = + getNetConfiguration() + .getInputPreProcess(layerIdx) + .preProcess( + outputOfPrevLayer, + (int) input.size(0), + LayerWorkspaceMgr.noWorkspaces(helperWorkspaces)); + } + + layer.fit(outputOfPrevLayer, workspaceMgr); + } + } + + @Override + public int batchSize() { + // In 99+% of cases, the input and labels dimension 0 size should be identical + // The only real exceptions: space to batch, and batch to space layers + // In those cases, we should base it on the labels size, as this impacts gradient calculation + if (input.size(0) > Integer.MAX_VALUE || labels.size(0) > Integer.MAX_VALUE) { + throw new ND4JArraySizeException(); + } + return labels == null ? (int) input.size(0) : (int) labels.size(0); + } + + @Override + public INDArray input() { + return input; + } + + @Override + public ConvexOptimizer getOptimizer() { + return solver.getOptimizer(); + } + + /** + * Get one parameter array for the network.
+ * In MultiLayerNetwork, parameters are keyed like "0_W" and "0_b" to mean "weights of layer index + * 0" and "biases of layer index 0" respectively. Numbers increment sequentially, and the suffixes + * ("W", "b" etc.) depend on the layer type, and are defined in the relevant parameter + * initializers for each layer.
+ * Note that the returned INDArrays are views of the underlying network parameters, so + * modifications of the returned arrays will impact the parameters of the network. + * + * @param param the key of the parameter + * @return The specified parameter array for the network + * @see #getParamTable() paramTable() method, for a map of all parameters + */ + @Override + public INDArray getParam(String param) { + // Get params for MultiLayerNetwork sub layers. + int idx = param.indexOf('_'); + if (idx == -1) { + throw new IllegalStateException( + "Invalid param key: does not have layer separator: \"" + param + "\""); + } + int layerIdx = Integer.parseInt(param.substring(0, idx)); + String newKey = param.substring(idx + 1); + + return layers[layerIdx].getParam(newKey); + } + + /** + * Returns a map of all parameters in the network as per {@link #getParamTable()}.
+ * Optionally (with backpropParamsOnly=true) only the 'backprop' parameters are returned - that + * is, any parameters involved only in unsupervised layerwise pretraining not standard + * inference/backprop are excluded from the returned list. + * + * @param backpropParamsOnly If true, return backprop params only. If false: return all params + * @return Parameters for the network + */ + public Map paramTable(boolean backpropParamsOnly) { + // Get all parameters from all layers + Map allParams = new LinkedHashMap<>(); + for (int i = 0; i < layers.length; i++) { + Map paramMap = layers[i].getParamTable(backpropParamsOnly); + for (Map.Entry entry : paramMap.entrySet()) { + String newKey = i + "_" + entry.getKey(); + allParams.put(newKey, entry.getValue()); + } + } + return allParams; + } + + /** Intended for internal use */ + @Override + public boolean updaterDivideByMinibatch(String paramName) { + int idx = paramName.indexOf('_'); + int layerIdx = Integer.parseInt(paramName.substring(0, idx)); + String subName = paramName.substring(idx + 1); + return ((BaseLayer) getLayer(layerIdx)).updaterDivideByMinibatch(subName); + } + + /** + * Set the values of a single parameter. See {@link #setParamTable(Map)} and {@link + * #getParam(String)} for more details. + * + * @param key the key of the parameter to set + * @param val the new values for the parameter + */ + @Override + public void setParam(String key, INDArray val) { + // Set params for MultiLayerNetwork sub layers. + int idx = key.indexOf('_'); + if (idx == -1) { + throw new IllegalStateException( + "Invalid param key: not have layer separator: \"" + key + "\""); + } + int layerIdx = Integer.parseInt(key.substring(0, idx)); + String newKey = key.substring(idx + 1); + + layers[layerIdx].setParam(newKey, val); + } + + /** + * Initialize the MultiLayerNetwork. This should be called once before the network is used. This + * is functionally equivalent to calling {@code init(null, false)}. + * + * @see MultiLayerNetwork#init(INDArray, boolean) + */ + public void init() { + init(null, false); + } + + /** + * Initialize the MultiLayerNetwork, optionally with an existing parameters array. If an existing + * parameters array is specified, it will be used (and the values will not be modified) in the + * network; if no parameters array is specified, parameters will be initialized randomly according + * to the network configuration. + * + * @param parameters Network parameter. May be null. If null: randomly initialize. + * @param cloneParametersArray Whether the parameter array (if any) should be cloned, or used + * directly + */ + public void init(INDArray parameters, boolean cloneParametersArray) { + if (initCalled) { + log.trace( + "Initialisation in {} has already been called. Ignoring additional call to init().", + getClass().getSimpleName()); + return; + } + + /** + * Initialize the neural network configuration first. This also triggers inheritance of + * configuration setting where needed. + */ + getNetConfiguration().setNeuralNet(this); + getNetConfiguration() + .init(); // we cannot do this in constructor, as the config might be attached later. + + DataType netDtype = getNetConfiguration().getDataType(); + if (parameters != null && parameters.dataType() != netDtype) { + Preconditions.checkState( + parameters.rank() == 2 && parameters.size(0) == 1, + "Invalid parameters array: should be rank 2 with shape [1,numParams]. Got %ndShape", + parameters); + if (cloneParametersArray) { + try (MemoryWorkspace ws = Nd4j.getWorkspaceManager().scopeOutOfWorkspaces()) { + parameters = parameters.castTo(netDtype); + } + } else { + throw new IllegalStateException( + "Error initializing network: Network datatype is set to " + + netDtype + + " but provided array has datatype " + + parameters.dataType() + + " with cloneParametersArray argument" + + " set to false. Cannot initialize net with specified datatype array if that array does not match network datatype"); + } + } + /** Set default Training and Inference Workspace modes unless set already */ + if (getNetConfiguration().getTrainingWorkspaceMode() == null) { + getNetConfiguration().setTrainingWorkspaceMode(WorkspaceMode.NONE); + } + if (getNetConfiguration().getInferenceWorkspaceMode() == null) { + getNetConfiguration().setInferenceWorkspaceMode(WorkspaceMode.NONE); + } + /** set default Cache mode, unless set already */ + if (getNetConfiguration().getCacheMode() == null) { + getNetConfiguration().setCacheMode(CacheMode.NONE); + } + + OneTimeLogger.info( + log, // Todo: Why not SLF4J? + "Starting MultiLayerNetwork with WorkspaceModes set to [training: {}; inference: {}], cacheMode set to [{}]", + getNetConfiguration().getTrainingWorkspaceMode(), + getNetConfiguration().getInferenceWorkspaceMode(), + getNetConfiguration().getCacheMode()); + + int nLayers = getNetConfiguration().getFlattenedLayerConfigurations().size(); + if (nLayers < 1) { + throw new IllegalStateException("Unable to create network: number of layers is less than 1"); + } + + /** Initialize the array of Layers for this network using the number of LayerConfigurations */ + if (this.layers == null || this.layers[0] == null) { + if (this.layers == null) { + this.layers = new Layer[nLayers]; + } + + // First: Work out total length of params + long paramLength = 0; + val nParamsPerLayer = new long[nLayers]; + for (int i = 0; i < nLayers; i++) { + LayerConfiguration layer_conf = + getNetConfiguration().getFlattenedLayerConfigurations().get(i); + // Test if Layer type has parameters (is inherited from BaseLayerConfiguration rather then + // LayerConfiguration + if (layer_conf instanceof BaseLayerConfiguration) + ((BaseLayerConfiguration) layer_conf).setDataType(netDtype); + + nParamsPerLayer[i] = layer_conf.initializer().numParams(layer_conf); + paramLength += nParamsPerLayer[i]; + } + log.debug( + "Neural Network {} is initializes with a total number of {} parameters from {} layers.", + getClass().getSimpleName(), + paramLength, + nLayers); + + // Create parameters array, if required + boolean initializeParams; + if (parameters != null) { + if (!parameters.isRowVectorOrScalar()) { + throw new IllegalArgumentException("Invalid parameters: should be a row vector"); + } + if (parameters.length() != paramLength) { + throw new IllegalArgumentException( + "Invalid parameters: expected length " + + paramLength + + ", got length " + + parameters.length()); + } + + if (cloneParametersArray) { + flattenedParams = parameters.dup(); + } else { + flattenedParams = parameters; + } + + initializeParams = false; + } else if (paramLength > 0) { + flattenedParams = Nd4j.create(netDtype, 1, paramLength); + initializeParams = true; + } else { + // Edge case: 0 params in network + flattenedParams = null; + initializeParams = false; + } + + // Set RNG seed, for repeatability between initializations when set + if (initializeParams) { + Nd4j.getRandom().setSeed(getNetConfiguration().getSeed()); + } + + // construct multi-layer + long paramCountSoFar = 0; + for (int i = 0; i < nLayers; i++) { + INDArray paramsView; + if (nParamsPerLayer[i] > 0) { + paramsView = + flattenedParams.get( + NDArrayIndex.interval(0, 0, true), + NDArrayIndex.interval(paramCountSoFar, paramCountSoFar + nParamsPerLayer[i])); + } else { + paramsView = null; + } + paramCountSoFar += nParamsPerLayer[i]; + @NonNull + LayerConfiguration lc = getNetConfiguration().getFlattenedLayerConfigurations().get(i); + layers[i] = + lc.instantiate( + lc.getNetConfiguration(), + trainingListeners, + i, + paramsView, + initializeParams, + netDtype); + } + initCalled = true; + } + + // Set parameters in MultiLayerNetwork.getNetConfiguration() for later use in + // BaseOptimizer.setupSearchState() etc + getNetConfiguration().clearNetWideVariable(); + List variables = getNetConfiguration().netWideVariables(false); + for (int i = 0; i < layers.length; i++) { + if (layers[i] == null) { + throw new IllegalStateException( + "Encountered null layer during initialization for layer " + + i + + ": " + + layers[i].getClass().getSimpleName() + + " initialization " + + "returned null layer?"); + } + for (String s : layers[i].getLayerConfiguration().getVariables()) { + variables.add(i + "_" + s); + } + } + + // now we init solver & optimizer + if (solver == null) { + try (MemoryWorkspace wsO = Nd4j.getMemoryManager().scopeOutOfWorkspaces()) { + solver = + new Solver.Builder() + .configure(getNetConfiguration()) + .listeners(this.getTrainingListeners()) + .model(this) .build(); + solver.initOptimizer(); + } } - protected static WorkspaceConfiguration getLayerActivationWSConfig(int numLayers){ - //Activations memory: opened once per layer - for every second layer (preprocessors are within the loop). - //Technically we could set learning to numLayers / 2, but will set to numLayers for simplicity, and also to - // account for a backward pass - return WorkspaceConfiguration.builder() - .initialSize(0) - .overallocationLimit(0.02) - .policyLearning(LearningPolicy.OVER_TIME) - .cyclesBeforeInitialization(numLayers) - .policyReset(ResetPolicy.BLOCK_LEFT) - .policySpill(SpillPolicy.REALLOCATE) - .policyAllocation(AllocationPolicy.OVERALLOCATE) + // Mark that input modification is allowed. + // TODO When is it safe to NOT skip the very first layer? It's not always safe... + // For example dropout + iterating over List that is used for multiple epochs... + for (int i = 1; i < layers.length; i++) { + layers[i].allowInputModification(true); + } + + synchronizeIterEpochCounts(); + } + + /** + * This method allows you to specificy GradientsAccumulator instance to be used with this model + *
+ *
+ * PLEASE NOTE: Do not use this method unless you understand how to use GradientsAccumulator & + * updates sharing.
+ * PLEASE NOTE: Do not use this method on standalone model + * + * @param accumulator Gradient accumulator to use for the network + */ + public void setGradientsAccumulator(GradientsAccumulator accumulator) { + if (!isInitCalled()) { + init(); + } + + if (solver == null) { + try (MemoryWorkspace wsO = Nd4j.getMemoryManager().scopeOutOfWorkspaces()) { + solver = + new Solver.Builder() + .configure(getNetConfiguration()) + .listeners(this.getTrainingListeners()) + .model(this) .build(); + } } - /** - * This method sets specified CacheMode for all layers within network - * - * @param mode - */ - public void setCacheMode(CacheMode mode) { - if (mode == null) - mode = CacheMode.NONE; + solver.getOptimizer().setGradientsAccumulator(accumulator); + } - for (Layer layer : layers) { - layer.setCacheMode(mode); - } - } + public boolean isInitCalled() { + return initCalled; + } - /** - * Set the last ETL time in milliseconds, for informational/reporting purposes. Generally used internally. - * @param time ETL time - */ - public void setLastEtlTime(long time) { - lastEtlTime.set(time); - } - - /** - * Get the last ETL time. This in informational, and is the amount of time in milliseconds that was required - * to obtain the last DataSet/MultiDataSet during fitting. - * A value consistently above 0 may indicate a data feeding bottleneck, or no asynchronous data prefetching (async - * prefetch is enabled by default) - * @return The last ETL time in milliseconds, if avaliable (or 0 if not) - */ - public long getLastEtlTime() { - Long time = lastEtlTime.get(); - return time == null ? 0L : time; - } - - /** - * Initialize the network based on the configuration (a MultiLayerConfiguration in JSON format) and parameters array - * - * @param conf the configuration json - * @param params the parameters for the network - */ - public MultiLayerNetwork(String conf, INDArray params) { - this(MultiLayerConfiguration.fromJson(conf)); + /** + * This method: initializes the flattened gradients array (used in backprop) and sets the + * appropriate subset in all layers. As a general rule, this shouldn't ever need to be called + * manually when doing training via fit(DataSet) or fit(DataSetIterator) + */ + public void initGradientsView() { + try (MemoryWorkspace ws = Nd4j.getMemoryManager().scopeOutOfWorkspaces()) { + if (layers == null) { init(); - setParameters(params); + } + + int nLayers = layers.length; + + // First: Work out total length of params + long paramLength = 0; + val nParamsPerLayer = new long[nLayers]; + for (int i = 0; i < nLayers; i++) { + LayerConfiguration layerConfiguration = + getNetConfiguration().getFlattenedLayerConfigurations().get(i); + nParamsPerLayer[i] = + layerConfiguration + .initializer() + .numParams(layerConfiguration); // TODO better initialisation + paramLength += nParamsPerLayer[i]; + } + + if (paramLength > 0) { + flattenedGradients = + Nd4j.create( + flattenedParams.dataType(), + new long[] {1, paramLength}, + 'f'); // No need to initialize, as each layer will do it each iteration anyway + } + + long paramsSoFar = 0; + for (int i = 0; i < layers.length; i++) { + if (nParamsPerLayer[i] == 0) { + continue; // This layer doesn't have any parameters... + } + INDArray thisLayerGradView = + flattenedGradients.get( + NDArrayIndex.interval(0, 0, true), + NDArrayIndex.interval(paramsSoFar, paramsSoFar + nParamsPerLayer[i])); + layers[i].setBackpropGradientsViewArray(thisLayerGradView); + paramsSoFar += nParamsPerLayer[i]; + } + } + } + + protected INDArray activationFromPrevLayer( + int curr, INDArray input, boolean training, LayerWorkspaceMgr mgr) { + if (getNetConfiguration().getInputPreProcess(curr) != null) { + input = + getNetConfiguration() + .getInputPreProcess(curr) + .preProcess(input, getInputMiniBatchSize(), mgr); } + INDArray ret = layers[curr].activate(input, training, mgr); + return ret; + } - /** - * Initialize the network based on the configuration and parameters array - * - * @param conf the configuration - * @param params the parameters - */ - public MultiLayerNetwork(MultiLayerConfiguration conf, INDArray params) { - this(conf); - init(); - setParameters(params); + /** + * Calculate activation for few layers at once. Suitable for autoencoder partial activation. + * + *

In example: in 10-layer deep autoencoder, layers 0 - 4 inclusive are used for encoding part, + * and layers 5-9 inclusive are used for decoding part. + * + * @param from first layer to be activated, inclusive + * @param to last layer to be activated, inclusive + * @return the activation from the last layer + */ + public INDArray activateSelectedLayers(int from, int to, INDArray input) { + if (input == null) { + throw new IllegalStateException("Unable to perform activation; no input found"); + } + if (from < 0 || from >= layers.length || from >= to) { + throw new IllegalStateException("Unable to perform activation; FROM is out of layer space"); + } + if (to < 1 || to >= layers.length) { + throw new IllegalStateException("Unable to perform activation; TO is out of layer space"); } + try { + LayerWorkspaceMgr mgr = LayerWorkspaceMgr.noWorkspaces(helperWorkspaces); // TODO - protected void intializeConfigurations() { - if (layerWiseConfigurations == null) - layerWiseConfigurations = new MultiLayerConfiguration.Builder().build(); - - if (layers == null) - layers = new Layer[getnLayers()]; - - if (defaultConfiguration == null) - defaultConfiguration = new NeuralNetConfiguration.Builder().build(); + INDArray res = input; + for (int l = from; l <= to; l++) { + res = this.activationFromPrevLayer(l, res, false, mgr); + } + return res; + } catch (OutOfMemoryError e) { + CrashReportingUtil.writeMemoryCrashDump(this, e); + throw e; } + } + /** + * Compute all layer activations, from input to output of the output layer. Note that the input is + * included in the list: thus feedForward(in,train).get(0) is the inputs, .get(1) is the + * activations of layer 0, and so on. + * + * @param train Training: if true, perform forward pass/inference at training time. Usually, + * inference is performed with train = false. This impacts whether dropout etc is applied or + * not. + * @return The list of activations for each layer, including the input + */ + public List feedForward(INDArray input, boolean train) { + setInput(input); + return feedForward(train); + } - /** - * Perform layerwise pretraining for one epoch - see {@link #pretrain(DataSetIterator, int)} - */ - public void pretrain(DataSetIterator iter) { - pretrain(iter, 1); + /** + * Compute activations from input to output of the output layer. As per {@link + * #feedForward(INDArray, boolean)} but using the inputs that have previously been set using + * {@link #setInput(INDArray)} + * + * @return the list of activations for each layer + */ + public List feedForward(boolean train) { + try { + return ffToLayerActivationsDetached( + train, FwdPassType.STANDARD, false, layers.length - 1, input, mask, null, true); + } catch (OutOfMemoryError e) { + CrashReportingUtil.writeMemoryCrashDump(this, e); + throw e; } + } - /** - * Perform layerwise unsupervised training on all pre-trainable layers in the network (VAEs, Autoencoders, etc), for the specified - * number of epochs each. For example, if numEpochs=3, then layer 0 will be fit for 3 epochs, followed by layer 1 - * for 3 epochs, and so on.
- * Note that pretraining will be performed on one layer after the other. To perform unsupervised training on a single layer, - * use {@link #pretrainLayer(int, DataSetIterator)} - * - * @param iter Training data - */ - public void pretrain(DataSetIterator iter, int numEpochs){ - if (flattenedGradients == null) { - initGradientsView(); - } - - for (int i = 0; i < getnLayers(); i++) { - pretrainLayer(i, iter, numEpochs); - } + /** + * Perform feed-forward, optionally (not) clearing the layer input arrays.
+ * Note: when using clearInputs=false, there can be some performance and memory overhead: this is + * because the arrays are defined outside of workspaces (which are enabled by default) - + * otherwise, old/invalidated arrays could still be accessed after calling this method. + * Consequently: Don't use clearInputs=false unless you have a use case that requires them to + * remain after feed-forward has been completed + * + * @param train training mode (true) or test mode (false) + * @param clearInputs If false: don't clear the layer inputs + * @return Activations from feed-forward + */ + public List feedForward(boolean train, boolean clearInputs) { + try { + return ffToLayerActivationsDetached( + train, FwdPassType.STANDARD, false, layers.length - 1, input, mask, null, clearInputs); + } catch (OutOfMemoryError e) { + CrashReportingUtil.writeMemoryCrashDump(this, e); + throw e; } + } - /** - * Fit for one epoch - see {@link #pretrainLayer(int, DataSetIterator, int)} - */ - public void pretrainLayer(int layerIdx, DataSetIterator iter) { - pretrainLayer(layerIdx, iter, 1); + /** + * Compute the activations from the input to the specified layer.
+ * To compute activations for all layers, use feedForward(...) methods
+ * Note: output list includes the original input. So list.get(0) is always the original input, and + * list.get(i+1) is the activations of the ith layer. + * + * @param layerNum Index of the last layer to calculate activations for. Layers are zero-indexed. + * feedForwardToLayer(i,input) will return the activations for layers 0..i (inclusive) + * @param input Input to the network + * @return list of activations. + */ + public List feedForwardToLayer(int layerNum, INDArray input) { + try { + return ffToLayerActivationsDetached( + false, FwdPassType.STANDARD, false, layerNum, input, mask, null, true); + } catch (OutOfMemoryError e) { + CrashReportingUtil.writeMemoryCrashDump(this, e); + throw e; } + } - /** - * Perform layerwise unsupervised training on a single pre-trainable layer in the network (VAEs, Autoencoders, etc) - * for the specified number of epochs
- * If the specified layer index (0 to numLayers - 1) is not a pretrainable layer, this is a no-op. - * - * @param layerIdx Index of the layer to train (0 to numLayers-1) - * @param iter Training data - * @param numEpochs Number of epochs to fit the specified layer for - */ - public void pretrainLayer(int layerIdx, DataSetIterator iter, int numEpochs) { - Preconditions.checkState(numEpochs > 0, "Number of epochs (%s) must be a positive number", numEpochs); - - if (flattenedGradients == null) { - initGradientsView(); - } - if (layerIdx >= layers.length) { - throw new IllegalArgumentException( - "Cannot pretrain layer: layerIdx (" + layerIdx + ") >= numLayers (" + layers.length + ")"); - } - - Layer layer = layers[layerIdx]; - if (!layer.isPretrainLayer()) - return; - - if(numEpochs > 1 && !iter.resetSupported()) - throw new IllegalStateException("Cannot fit multiple epochs (" + numEpochs + ") on an iterator that doesn't support resetting"); - - if (!iter.hasNext() && iter.resetSupported()) { - iter.reset(); - } - - log.info("Starting unsupervised training on layer " + layerIdx + " for " + numEpochs + " epochs"); - for(int i=0; i 0) - iter.reset(); - - while (iter.hasNext()) { - DataSet next = iter.next(); - input = next.getFeatures(); - pretrainLayer(layerIdx, input); - } - } - - int ec = getLayer(layerIdx).conf().getEpochCount() + 1; - getLayer(layerIdx).conf().setEpochCount(ec); + /** + * Compute the activations from the input to the specified layer.
+ * To compute activations for all layers, use feedForward(...) methods
+ * Note: output list includes the original input. So list.get(0) is always the original input, and + * list.get(i+1) is the activations of the ith layer. + * + * @param layerNum Index of the last layer to calculate activations for. Layers are zero-indexed. + * feedForwardToLayer(i,input) will return the activations for layers 0..i (inclusive) + * @param input Input to the network + * @param train true for training, false for test (i.e., false if using network after training) + * @return list of activations. + */ + public List feedForwardToLayer(int layerNum, INDArray input, boolean train) { + try { + int layerVertexIdx = layers[layerNum].getIndex(); + return ffToLayerActivationsDetached( + train, FwdPassType.STANDARD, false, layerVertexIdx, input, mask, null, true); + } catch (OutOfMemoryError e) { + CrashReportingUtil.writeMemoryCrashDump(this, e); + throw e; } + } - /** - * Perform layerwise unsupervised training on a single pre-trainable layer in the network (VAEs, Autoencoders, etc)
- * If the specified layer index (0 to numLayers - 1) is not a pretrainable layer, this is a no-op. - * - * @param layerIdx Index of the layer to train (0 to numLayers-1) - * @param features Training data array - */ - public void pretrainLayer(int layerIdx, INDArray features) { - setInput(features); - setLayerMaskArrays(null, null); + /** + * Compute the activations from the input to the specified layer, using the currently set input + * for the network.
+ * To compute activations for all layers, use feedForward(...) methods
+ * Note: output list includes the original input. So list.get(0) is always the original input, and + * list.get(i+1) is the activations of the ith layer. + * + * @param layerNum Index of the last layer to calculate activations for. Layers are zero-indexed. + * feedForwardToLayer(i,input) will return the activations for layers 0..i (inclusive) + * @param train true for training, false for test (i.e., false if using network after training) + * @return list of activations. + */ + public List feedForwardToLayer(int layerNum, boolean train) { + try { + return ffToLayerActivationsDetached( + train, FwdPassType.STANDARD, false, layerNum, input, mask, null, true); + } catch (OutOfMemoryError e) { + CrashReportingUtil.writeMemoryCrashDump(this, e); + throw e; + } + } - if (flattenedGradients == null) { - initGradientsView(); - } - if (layerIdx >= layers.length) { - throw new IllegalArgumentException( - "Cannot pretrain layer: layerIdx (" + layerIdx + ") >= numLayers (" + layers.length + ")"); + protected void validateArrayWorkspaces( + @NonNull LayerWorkspaceMgr mgr, + @NonNull INDArray array, + @NonNull ArrayType arrayType, + int layerIdx, + boolean isPreprocessor, + String op) { + try { + mgr.validateArrayLocation(arrayType, array, false, layerIdx > 0); + } catch (ND4JWorkspaceException e) { + String layerName = layers[layerIdx].getLayerConfiguration().getLayerName(); + String clazz; + if (isPreprocessor) { + clazz = getNetConfiguration().getInputPreProcess(layerIdx).getClass().getName(); + } else { + clazz = layers[layerIdx].getClass().getName(); + } + throw new IllegalStateException( + op + + ": array (" + + arrayType + + ") workspace validation failed (" + + (isPreprocessor ? "preprocessor" : "layer ") + + layerIdx + + (layerName != null ? " - layer name \"" + layerName + "\"" : "") + + " - class: " + + clazz + + ") - array is defined in incorrect workspace", + e); + } + } + + /** + * Feed-forward through the network - returning all array activations in a list, detached from any + * workspace. Note that no workspace should be active externally when calling this method (an + * exception will be thrown if a workspace is open externally) + * + * @param train Training mode (true) or test/inference mode (false) + * @param fwdPassType Type of forward pass to perform (STANDARD or RNN_ACTIVATE_WITH_STORED_STATE + * only) + * @param storeLastForTBPTT ONLY used if fwdPassType == FwdPassType.RNN_ACTIVATE_WITH_STORED_STATE + * @param layerIndex Index (inclusive) to stop forward pass at. For all layers, use numLayers-1 + * @param input Input to the network + * @param fMask Feature mask array. May be null. + * @param lMask Label mask array. May be null. + * @param clearInputs Whether the layer inputs should be cleared + * @return List of activations (including the input), detached from any workspace + */ + protected synchronized List ffToLayerActivationsDetached( + boolean train, + @NonNull FwdPassType fwdPassType, + boolean storeLastForTBPTT, + int layerIndex, + @NonNull INDArray input, + INDArray fMask, + INDArray lMask, + boolean clearInputs) { + setInput(input); + setLayerMaskArrays(fMask, lMask); + + // Verify that no workspace is open externally + WorkspaceUtils.assertNoWorkspacesOpen( + "Expected no workspace active in ffToLayerActivationsDetached"); + + LayerWorkspaceMgr workspaceMgr; + WorkspaceMode wsm = + (train + ? getNetConfiguration().getTrainingWorkspaceMode() + : getNetConfiguration().getInferenceWorkspaceMode()); + if (wsm == WorkspaceMode.NONE) { + workspaceMgr = LayerWorkspaceMgr.noWorkspaces(); + } else { + workspaceMgr = + LayerWorkspaceMgr.builder() + .noWorkspaceFor(ArrayType.ACTIVATIONS) + .with(ArrayType.INPUT, WS_LAYER_WORKING_MEM, WS_LAYER_WORKING_MEM_CONFIG) + .with(ArrayType.FF_WORKING_MEM, WS_LAYER_WORKING_MEM, WS_LAYER_WORKING_MEM_CONFIG) + .with( + ArrayType.RNN_FF_LOOP_WORKING_MEM, + WS_RNN_LOOP_WORKING_MEM, + WS_RNN_LOOP_WORKING_MEM_CONFIG) + .build(); + + if (input.isAttached()) { + // Don't leverage out of async DataSetIterator workspaces + workspaceMgr.setNoLeverageOverride(input.data().getParentWorkspace().getId()); + } + + if (!clearInputs) { + workspaceMgr.setScopedOutFor(ArrayType.INPUT); + } + } + workspaceMgr.setHelperWorkspacePointers(helperWorkspaces); + + List out = new ArrayList<>(); + out.add( + workspaceMgr.leverageTo( + ArrayType.INPUT, + input)); // Should be unnecessary (and no op), if layer is implemented correctly + + for (int i = 0; i <= layerIndex; i++) { + try (MemoryWorkspace wsFFWorking = + workspaceMgr.notifyScopeEntered(ArrayType.FF_WORKING_MEM)) { + if (getNetConfiguration().getInputPreProcess(i) != null) { + input = + getNetConfiguration() + .getInputPreProcess(i) + .preProcess(input, getInputMiniBatchSize(), workspaceMgr); + // Validation: Exception if invalid (bad preprocessor implementation) + validateArrayWorkspaces( + workspaceMgr, + input, + ArrayType.ACTIVATIONS, + i, + true, + "Feed forward to layer (inference)"); } - LayerWorkspaceMgr workspaceMgr; - if(layerWiseConfigurations.getTrainingWorkspaceMode() == WorkspaceMode.NONE){ - workspaceMgr = LayerWorkspaceMgr.noWorkspaces(); + if (fwdPassType == FwdPassType.STANDARD) { + input = layers[i].activate(input, train, workspaceMgr); + } else if (fwdPassType == FwdPassType.RNN_ACTIVATE_WITH_STORED_STATE) { + if (layers[i] instanceof RecurrentLayer) { + input = + ((RecurrentLayer) layers[i]) + .rnnActivateUsingStoredState(input, train, storeLastForTBPTT, workspaceMgr); + } else if (layers[i] instanceof BaseWrapperLayer + && ((BaseWrapperLayer) layers[i]).getUnderlying() instanceof RecurrentLayer) { + RecurrentLayer rl = (RecurrentLayer) ((BaseWrapperLayer) layers[i]).getUnderlying(); + input = rl.rnnActivateUsingStoredState(input, train, storeLastForTBPTT, workspaceMgr); + } else if (layers[i] instanceof MultiLayerNetwork) { + List temp = + ((MultiLayerNetwork) layers[i]) + .rnnActivateUsingStoredState(input, train, storeLastForTBPTT); + input = temp.get(temp.size() - 1); + } else { + input = layers[i].activate(input, train, workspaceMgr); + } } else { - workspaceMgr = LayerWorkspaceMgr.builder() - .defaultWorkspace(WS_LAYER_WORKING_MEM, WS_LAYER_WORKING_MEM_CONFIG) - .with(ArrayType.RNN_FF_LOOP_WORKING_MEM, WS_RNN_LOOP_WORKING_MEM, WS_RNN_LOOP_WORKING_MEM_CONFIG) - .build(); + throw new IllegalStateException( + "Forward pass type not supported for this method: " + fwdPassType); } - workspaceMgr.setHelperWorkspacePointers(helperWorkspaces); - Layer layer = layers[layerIdx]; - if (!layer.isPretrainLayer()) - return; + // Validation: Exception if invalid (bad layer implementation) + validateArrayWorkspaces( + workspaceMgr, + input, + ArrayType.ACTIVATIONS, + i, + false, + "Feed forward to layer (inference)"); - //Do forward pass to the layer to be pretrained - INDArray outputOfPrevLayer; - if(layerIdx == 0) { - outputOfPrevLayer = input; + out.add(input); + } + if (clearInputs) { + layers[i].clear(); + } + } + + return out; + } + + /** + * Feed-forward through the network at training time - returning a list of all activations in a + * workspace (WS_ALL_LAYERS_ACT) if workspaces are enabled for training; or detached if no + * workspaces are used.
+ * Note: if using workspaces for training, this method requires that WS_ALL_LAYERS_ACT is open + * externally.
+ * If using NO workspaces, requires that no external workspace is open
+ * Note that this method does NOT clear the inputs to each layer - instead, they are in the + * WS_ALL_LAYERS_ACT workspace for use in later backprop. + * + * @param layerIndex Index (inclusive) to stop forward pass at. For all layers, use numLayers-1 + * @param fwdPassType Type of forward pass to perform (STANDARD or RNN_ACTIVATE_WITH_STORED_STATE + * only) + * @param storeLastForTBPTT ONLY used if fwdPassType == FwdPassType.RNN_ACTIVATE_WITH_STORED_STATE + * @param input Input to network + * @param fMask Feature mask array. May be null + * @param lMask Label mask aray. May be null. + * @return + */ + protected synchronized List ffToLayerActivationsInWs( + int layerIndex, + @NonNull FwdPassType fwdPassType, + boolean storeLastForTBPTT, + @NonNull INDArray input, + INDArray fMask, + INDArray lMask) { + setInput(input); + setLayerMaskArrays(fMask, lMask); + + LayerWorkspaceMgr workspaceMgr; + if (getNetConfiguration().getTrainingWorkspaceMode() == WorkspaceMode.NONE) { + WorkspaceUtils.assertNoWorkspacesOpen( + "Expected no workspace active in ffToLayerActivationsInWs when training workspace is set to NONE"); + workspaceMgr = LayerWorkspaceMgr.noWorkspaces(); + } else { + workspaceMgr = + LayerWorkspaceMgr.builder() + .with(ArrayType.INPUT, WS_ALL_LAYERS_ACT, WS_ALL_LAYERS_ACT_CONFIG) + .with(ArrayType.ACTIVATIONS, WS_ALL_LAYERS_ACT, WS_ALL_LAYERS_ACT_CONFIG) + .with(ArrayType.FF_WORKING_MEM, WS_LAYER_WORKING_MEM, WS_LAYER_WORKING_MEM_CONFIG) + .with( + ArrayType.RNN_FF_LOOP_WORKING_MEM, + WS_RNN_LOOP_WORKING_MEM, + WS_RNN_LOOP_WORKING_MEM_CONFIG) + .build(); + + if (input.isAttached()) { + // Don't leverage out of async DataSetIterator workspaces + workspaceMgr.setNoLeverageOverride(input.data().getParentWorkspace().getId()); + } + + if (getNetConfiguration().getCacheMode() != CacheMode.NONE) { + // For now: store cache mode activations in activations workspace + workspaceMgr.setWorkspace(ArrayType.FF_CACHE, WS_ALL_LAYERS_ACT, WS_ALL_LAYERS_ACT_CONFIG); + workspaceMgr.setWorkspace( + ArrayType.BP_WORKING_MEM, WS_LAYER_WORKING_MEM, WS_LAYER_WORKING_MEM_CONFIG); + } + + WorkspaceUtils.assertOpenAndActive( + WS_ALL_LAYERS_ACT, + "ffToLayerActivationsInWs method requires workspace WS_ALL_LAYERS_ACT to be open"); + } + workspaceMgr.setHelperWorkspacePointers(helperWorkspaces); + + List out = new ArrayList<>(); + out.add(workspaceMgr.leverageTo(ArrayType.INPUT, input)); // Probably unnecessary usually + + boolean traceLog = log.isTraceEnabled(); + + for (int i = 0; i <= layerIndex; i++) { + try (MemoryWorkspace wsFFWorking = + workspaceMgr.notifyScopeEntered(ArrayType.FF_WORKING_MEM)) { + if (getNetConfiguration().getInputPreProcess(i) != null) { + input = + getNetConfiguration() + .getInputPreProcess(i) + .preProcess(input, getInputMiniBatchSize(), workspaceMgr); + // Validation: Exception if invalid (bad preprocessor implementation) + validateArrayWorkspaces( + workspaceMgr, + input, + ArrayType.ACTIVATIONS, + i, + true, + "Feed forward to layer (training)"); + } + + if (traceLog) { + log.trace("About to forward pass: {} - {}", i, layers[i].getClass().getSimpleName()); + } + + if (fwdPassType == FwdPassType.STANDARD) { + input = layers[i].activate(input, true, workspaceMgr); + } else if (fwdPassType == FwdPassType.RNN_ACTIVATE_WITH_STORED_STATE) { + if (layers[i] instanceof RecurrentLayer) { + input = + ((RecurrentLayer) layers[i]) + .rnnActivateUsingStoredState(input, true, storeLastForTBPTT, workspaceMgr); + } else if (layers[i] instanceof BaseWrapperLayer + && ((BaseWrapperLayer) layers[i]).getUnderlying() instanceof RecurrentLayer) { + RecurrentLayer rl = (RecurrentLayer) ((BaseWrapperLayer) layers[i]).getUnderlying(); + input = rl.rnnActivateUsingStoredState(input, true, storeLastForTBPTT, workspaceMgr); + } else if (layers[i] instanceof MultiLayerNetwork) { + List temp = + ((MultiLayerNetwork) layers[i]) + .rnnActivateUsingStoredState(input, true, storeLastForTBPTT); + input = temp.get(temp.size() - 1); + } else { + input = layers[i].activate(input, true, workspaceMgr); + } } else { - //Yes, this part of training - but we'll do forward psas as inference mode when doing layerwise training - // to effectively freeze earlier layers and not apply dropout etc - outputOfPrevLayer = outputOfLayerDetached(false, FwdPassType.STANDARD, layerIndex-1, features, null, null, null); + throw new IllegalStateException( + "FwdPassType not supported for this method: " + fwdPassType); } - try(MemoryWorkspace ws = workspaceMgr.notifyScopeEntered(ArrayType.FF_WORKING_MEM)) { - if (layerWiseConfigurations.getInputPreProcess(layerIdx) != null) { + if (input == null) { + throw new IllegalStateException("LayerConfiguration " + i + " returned null activations"); + } - if (input.size(0) > Integer.MAX_VALUE) - throw new ND4JArraySizeException(); - outputOfPrevLayer = layerWiseConfigurations.getInputPreProcess(layerIdx).preProcess(outputOfPrevLayer, (int) input.size(0), - LayerWorkspaceMgr.noWorkspaces(helperWorkspaces)); + // Validation: Exception if invalid (bad layer implementation) + validateArrayWorkspaces( + workspaceMgr, + input, + ArrayType.ACTIVATIONS, + i, + false, + "Feed forward to layer (training)"); + if (layers[i].input() == null) { + log.error( + "Input for layer {} at index {} cannot be null.", + layers[i].getLayerConfiguration().getLayerName(), + i); + throw new RuntimeException("Layer input is null."); + } + validateArrayWorkspaces( + workspaceMgr, + layers[i].input(), + ArrayType.INPUT, + i, + false, + "Feed forward to layer (training)"); + + out.add(input); + + if (traceLog) { + log.trace("Completed forward pass: {} - {}", i, layers[i].getClass().getSimpleName()); + } + } + } + + return out; + } + + /** + * Provide the output of the specified layer, detached from any workspace. This is most commonly + * used at inference/test time, and is more memory efficient than {@link + * #ffToLayerActivationsDetached(boolean, FwdPassType, boolean, int, INDArray, INDArray, INDArray, + * boolean)} and {@link #ffToLayerActivationsInWs(int, FwdPassType, boolean, INDArray, INDArray, + * INDArray)}.
+ * This method clears all layer inputs. + * + *

NOTE: in general, no workspaces should be activated externally for this method! This method + * handles the workspace activation as required + * + * @param train Training mode (true) or test/inference mode (false) + * @param fwdPassType Type of forward pass to perform (STANDARD, RNN_TIMESTEP or + * RNN_ACTIVATE_WITH_STORED_STATE) + * @param layerIndex Index (inclusive) to stop forward pass at. For all layers, use numLayers-1 + * @param input Input to the network + * @param featureMask Input/feature mask array. May be null. + * @param labelsMask Labels mask array. May be null + * @param outputWorkspace Optional - if provided, outputs should be placed in this workspace. + * NOTE: this workspace must be open + * @return Output of the specified layer, detached from any workspace + */ + protected INDArray outputOfLayerDetached( + boolean train, + @NonNull FwdPassType fwdPassType, + int layerIndex, + @NonNull INDArray input, + INDArray featureMask, + INDArray labelsMask, + MemoryWorkspace outputWorkspace) { + setInput(input); + setLayerMaskArrays(featureMask, labelsMask); + + /* + Idea here: we want to minimize memory, and return only the final array + Approach to do this: keep activations in memory only as long as we need them. + In MultiLayerNetwork, the output activations of layer X are used as input to layer X+1 + Which means: the workspace for layer X has to be open for both layers X and X+1 forward pass. + + Here, we'll use two workspaces for activations: + 1. For even index layers, activations WS that opens on start of even layer fwd pass, closes at end of odd layer fwd pass + 2. For odd index layers, activations WS that opens on start of odd layer fwd pass, closes at end of even layer fwd pass + + Additionally, we'll reconfigure the workspace manager for the *final* layer, so that we don't have to detach + */ + if (outputWorkspace == null || outputWorkspace instanceof DummyWorkspace) { + WorkspaceUtils.assertNoWorkspacesOpen( + "Expected no workspace active in outputOfLayerDetached", true); + } else { + Preconditions.checkState( + outputWorkspace.isScopeActive(), + "Workspace \"" + + outputWorkspace.getId() + + "\" was provided for the network/layer outputs. When provided, this workspace must be opened before " + + "calling the output method; furthermore, closing the workspace is the responsibility of the user"); + } + + LayerWorkspaceMgr mgrEven; + LayerWorkspaceMgr mgrOdd; + + WorkspaceMode wsm = + train + ? getNetConfiguration().getTrainingWorkspaceMode() + : getNetConfiguration().getInferenceWorkspaceMode(); + if (wsm == WorkspaceMode.NONE) { + mgrEven = LayerWorkspaceMgr.noWorkspaces(); + mgrOdd = mgrEven; + + // Check for external workspace - doesn't make sense to have one with workspace mode NONE + if (outputWorkspace != null && !(outputWorkspace instanceof DummyWorkspace)) { + throw new IllegalStateException( + "Workspace \"" + + outputWorkspace.getId() + + "\" was provided for the network/layer outputs, however " + + (train ? "training" : "inference") + + " workspace mode is set to NONE. Cannot put output activations into the specified workspace if" + + "workspaces are disabled for the network. use getNetConfiguration().setTraining/InferenceWorkspaceMode(WorkspaceMode.ENABLED)"); + } + } else { + mgrEven = + LayerWorkspaceMgr.builder() + .with(ArrayType.FF_WORKING_MEM, WS_LAYER_WORKING_MEM, WS_LAYER_WORKING_MEM_CONFIG) + .with(ArrayType.ACTIVATIONS, WS_LAYER_ACT_1, WS_LAYER_ACT_X_CONFIG) + .with( + ArrayType.INPUT, + WS_LAYER_ACT_2, + WS_LAYER_ACT_X_CONFIG) // Inputs should always be in the previous WS + .with( + ArrayType.RNN_FF_LOOP_WORKING_MEM, + WS_RNN_LOOP_WORKING_MEM, + WS_RNN_LOOP_WORKING_MEM_CONFIG) + .build(); + + mgrOdd = + LayerWorkspaceMgr.builder() + .with(ArrayType.FF_WORKING_MEM, WS_LAYER_WORKING_MEM, WS_LAYER_WORKING_MEM_CONFIG) + .with(ArrayType.ACTIVATIONS, WS_LAYER_ACT_2, WS_LAYER_ACT_X_CONFIG) + .with( + ArrayType.INPUT, + WS_LAYER_ACT_1, + WS_LAYER_ACT_X_CONFIG) // Inputs should always be in the previous WS + .with( + ArrayType.RNN_FF_LOOP_WORKING_MEM, + WS_RNN_LOOP_WORKING_MEM, + WS_RNN_LOOP_WORKING_MEM_CONFIG) + .build(); + } + mgrEven.setHelperWorkspacePointers(helperWorkspaces); + mgrOdd.setHelperWorkspacePointers(helperWorkspaces); + + MemoryWorkspace wsActCloseNext = null; + MemoryWorkspace temp = null; + MemoryWorkspace initialWorkspace = Nd4j.getMemoryManager().getCurrentWorkspace(); + + boolean traceLog = log.isTraceEnabled(); + + Throwable t = null; + try { + for (int i = 0; i <= layerIndex; i++) { + LayerWorkspaceMgr mgr = (i % 2 == 0 ? mgrEven : mgrOdd); + + if (traceLog) { + log.trace("About to forward pass: {} - {}", i, layers[i].getClass().getSimpleName()); + } + + // Edge case: for first layer with dropout, inputs can't be in previous workspace (as it + // hasn't been opened yet) + // Hence: put inputs in working memory + if (i == 0 && wsm != WorkspaceMode.NONE) { + mgr.setWorkspace(ArrayType.INPUT, WS_LAYER_WORKING_MEM, WS_LAYER_WORKING_MEM_CONFIG); + } + + try (MemoryWorkspace wsFFWorking = + mgr.notifyScopeEntered( + ArrayType.FF_WORKING_MEM)) { // Working memory: opened/closed once per layer + // Activations workspaces: opened/closed every second layer. + // So mgrEven (WS_LAYER_ACT_1) open at start of 0, 2, 4, 8; closed at end of 1, 3, 5, 7 + // etc + // and mgrOdd (WS_LAYER_ACT_2) opened at start of 1, 3, 5, 7; closed at end of 2, 4, 6, 8 + // etc + temp = mgr.notifyScopeEntered(ArrayType.ACTIVATIONS); + + // Note that because we're opening activation workspaces not in a simple nested order, + // we'll manually + // override the previous workspace setting. Otherwise, when we close these workspaces, the + // "current" + // workspace may be set to the incorrect one + temp.setPreviousWorkspace(initialWorkspace); + + if (i == 0 && input.isAttached()) { + // Don't leverage out of async DataSetIterator workspaces + mgr.setNoLeverageOverride(input.data().getParentWorkspace().getId()); + } + + if (getNetConfiguration().getInputPreProcess(i) != null) { + input = + getNetConfiguration() + .getInputPreProcess(i) + .preProcess(input, getInputMiniBatchSize(), mgr); + // Validation: Exception if invalid (bad preprocessor implementation) + validateArrayWorkspaces( + mgr, input, ArrayType.ACTIVATIONS, i, true, "Output of layer (inference)"); + } + + if (i == layerIndex) { + if (outputWorkspace != null && !(outputWorkspace instanceof DummyWorkspace)) { + // Place activations in user-specified workspace + mgr.setWorkspace( + ArrayType.ACTIVATIONS, + outputWorkspace.getId(), + outputWorkspace.getWorkspaceConfiguration()); + } else { + // Final activations: should be detached + mgr.setScopedOutFor(ArrayType.ACTIVATIONS); } + } - layer.fit(outputOfPrevLayer, workspaceMgr); - } - } + if (fwdPassType == FwdPassType.STANDARD) { + // Standard feed-forward case + if (i > 0 + && ConvolutionUtils.layerHasConvolutionLayout(layers[i - 1].getLayerConfiguration()) + && ConvolutionUtils.layerHasConvolutionLayout(layers[i].getLayerConfiguration())) { - @Override - public int batchSize() { - //In 99+% of cases, the input and labels dimension 0 size should be identical - //The only real exceptions: space to batch, and batch to space layers - //In those cases, we should base it on the labels size, as this impacts gradient calculation - if (input.size(0) > Integer.MAX_VALUE || labels.size(0) > Integer.MAX_VALUE) - throw new ND4JArraySizeException(); - return labels == null ? (int) input.size(0) : (int)labels.size(0); - } - - @Override - public NeuralNetConfiguration conf() { - return defaultConfiguration; - } - - @Override - public void setConf(NeuralNetConfiguration conf) { - throw new UnsupportedOperationException(); - } - - @Override - public INDArray input() { - return input; - } - - @Override - public ConvexOptimizer getOptimizer() { - return solver.getOptimizer(); - } - - /** - * Get one parameter array for the network.
- * In MultiLayerNetwork, parameters are keyed like "0_W" and "0_b" to mean "weights of layer index 0" and "biases - * of layer index 0" respectively. Numbers increment sequentially, and the suffixes ("W", "b" etc) depend on the - * layer type, and are defined in the relevant parameter initializers for each layer.
- * Note that the returned INDArrays are views of the underlying network parameters, so modifications of the returned - * arrays will impact the parameters of the network. - * - * @param param the key of the parameter - * @return The specified parameter array for the network - * @see #paramTable() paramTable() method, for a map of all parameters - */ - @Override - public INDArray getParam(String param) { - //Get params for MultiLayerNetwork sub layers. - int idx = param.indexOf('_'); - if (idx == -1) - throw new IllegalStateException("Invalid param key: does not have layer separator: \"" + param + "\""); - int layerIdx = Integer.parseInt(param.substring(0, idx)); - String newKey = param.substring(idx + 1); - - return layers[layerIdx].getParam(newKey); - } - - /** - * Return a map of all parameters in the network. Parameter names are as described in {@link #getParam(String)}. - * As per {@link #getParam(String)} the returned arrays are views - modifications to these will impact - * the underlying network parameters - * @return A map of all parameters in the network - */ - @Override - public Map paramTable() { - return paramTable(false); - } - - /** - * Returns a map of all parameters in the network as per {@link #paramTable()}.
- * Optionally (with backpropParamsOnly=true) only the 'backprop' parameters are returned - that is, any parameters - * involved only in unsupervised layerwise pretraining not standard inference/backprop are excluded from the returned list. - * @param backpropParamsOnly If true, return backprop params only. If false: return all params - * @return Parameters for the network - */ - public Map paramTable(boolean backpropParamsOnly) { - //Get all parameters from all layers - Map allParams = new LinkedHashMap<>(); - for (int i = 0; i < layers.length; i++) { - Map paramMap = layers[i].paramTable(backpropParamsOnly); - for (Map.Entry entry : paramMap.entrySet()) { - String newKey = i + "_" + entry.getKey(); - allParams.put(newKey, entry.getValue()); - } - } - return allParams; - } - - /** - * Intended for internal use - */ - @Override - public boolean updaterDivideByMinibatch(String paramName) { - int idx = paramName.indexOf('_'); - int layerIdx = Integer.parseInt(paramName.substring(0, idx)); - String subName = paramName.substring(idx+1); - return getLayer(layerIdx).updaterDivideByMinibatch(subName); - } - - /** - * Set the parameters of the netowrk. Note that the parameter keys must match the format as described in {@link #getParam(String)} - * and {@link #paramTable()}. Note that the values of the parameters used as an argument to this method are copied - - * i.e., it is safe to later modify/reuse the values in the provided paramTable without this impacting the network. - * - * @param paramTable Parameters to set - */ - @Override - public void setParamTable(Map paramTable) { - Map currParamTable = paramTable(); - if (!currParamTable.keySet().equals(paramTable.keySet())) { - throw new IllegalArgumentException("Cannot set param table: parameter keys do not match.\n" + "Current: " - + currParamTable.keySet() + "\nTo set: " + paramTable.keySet()); - } - - for (String s : paramTable.keySet()) { - INDArray curr = currParamTable.get(s); - INDArray toSet = paramTable.get(s); - if (!Arrays.equals(curr.shape(), toSet.shape())) { - throw new IllegalArgumentException("Cannot set parameter table: parameter \"" + s + "\" shapes " - + "do not match. Current = " + Arrays.toString(curr.shape()) + ", to set = " - + Arrays.toString(toSet.shape())); - } - } - - //Now that we've checked ALL params (to avoid leaving net in half-modified state) - for (String s : paramTable.keySet()) { - INDArray curr = currParamTable.get(s); - INDArray toSet = paramTable.get(s); - curr.assign(toSet); - } - } - - /** - * Set the values of a single parameter. See {@link #setParamTable(Map)} and {@link #getParam(String)} for more - * details. - * @param key the key of the parameter to set - * @param val the new values for the parameter - */ - @Override - public void setParam(String key, INDArray val) { - //Set params for MultiLayerNetwork sub layers. - int idx = key.indexOf('_'); - if (idx == -1) - throw new IllegalStateException("Invalid param key: not have layer separator: \"" + key + "\""); - int layerIdx = Integer.parseInt(key.substring(0, idx)); - String newKey = key.substring(idx + 1); - - layers[layerIdx].setParam(newKey, val); - } - - /** - * Get the configuration for the network - * @return Network configuration - */ - public MultiLayerConfiguration getLayerWiseConfigurations() { - return layerWiseConfigurations; - } - - /** - * This method is intended for internal/developer use only. - */ - public void setLayerWiseConfigurations(MultiLayerConfiguration layerWiseConfigurations) { - this.layerWiseConfigurations = layerWiseConfigurations; - } - - /** - * Initialize the MultiLayerNetwork. This should be called once before the network is used. - * This is functionally equivalent to calling {@code init(null, false)}. - * @see MultiLayerNetwork#init(INDArray, boolean) - */ - public void init() { - init(null, false); - } - - /** - * Initialize the MultiLayerNetwork, optionally with an existing parameters array. - * If an existing parameters array is specified, it will be used (and the values will not be modified) in the network; - * if no parameters array is specified, parameters will be initialized randomly according to the network configuration. - * - * @param parameters Network parameter. May be null. If null: randomly initialize. - * @param cloneParametersArray Whether the parameter array (if any) should be cloned, or used directly - */ - public void init(INDArray parameters, boolean cloneParametersArray) { - if (layerWiseConfigurations == null || layers == null) - intializeConfigurations(); - if (initCalled) - return; - - DataType netDtype = getLayerWiseConfigurations().getDataType(); - if(parameters != null && parameters.dataType() != netDtype){ - Preconditions.checkState(parameters.rank() == 2 && parameters.size(0) == 1, "Invalid parameters array: should be rank 2 with shape [1,numParams]. Got %ndShape", parameters); - if(cloneParametersArray){ - try(MemoryWorkspace ws = Nd4j.getWorkspaceManager().scopeOutOfWorkspaces()) { - parameters = parameters.castTo(netDtype); + CNN2DFormat preLayerFormat = + ConvolutionUtils.getFormatForLayer(layers[i - 1].getLayerConfiguration()); + CNN2DFormat currLayerFormat = + ConvolutionUtils.getFormatForLayer(layers[i].getLayerConfiguration()); + if (preLayerFormat != currLayerFormat) { + // NHWC case + if (preLayerFormat == CNN2DFormat.NCHW) { + input = input.permute(0, 3, 1, 2); } - } else { - throw new IllegalStateException("Error initializing network: Network datatype is set to " + netDtype - + " but provided array has datatype " + parameters.dataType() + " with cloneParametersArray argument" + - " set to false. Cannot initialize net with specified datatype array if that array does not match network datatype"); - } - } + // NCHW case + else if (preLayerFormat == CNN2DFormat.NHWC) { + input = input.permute(0, 2, 3, 1); - - if (layerMap == null) - layerMap = new LinkedHashMap<>(); - - if (layerWiseConfigurations.getTrainingWorkspaceMode() == null) - layerWiseConfigurations.setTrainingWorkspaceMode(WorkspaceMode.NONE); - - if (layerWiseConfigurations.getInferenceWorkspaceMode() == null) - layerWiseConfigurations.setInferenceWorkspaceMode(WorkspaceMode.NONE); - - if (layerWiseConfigurations.getCacheMode() == null) - layerWiseConfigurations.setCacheMode(CacheMode.NONE); - - OneTimeLogger.info(log, "Starting MultiLayerNetwork with WorkspaceModes set to [training: {}; inference: {}], cacheMode set to [{}]", - layerWiseConfigurations.getTrainingWorkspaceMode(), - layerWiseConfigurations.getInferenceWorkspaceMode(), - layerWiseConfigurations.getCacheMode()); - - int nLayers = getnLayers(); - - if (nLayers < 1) - throw new IllegalStateException("Unable to create network: number of layers is less than 1"); - - if (this.layers == null || this.layers[0] == null) { - if (this.layers == null) - this.layers = new Layer[nLayers]; - - //First: Work out total length of params - long paramLength = 0; - val nParamsPerLayer = new long[nLayers]; - for (int i = 0; i < nLayers; i++) { - NeuralNetConfiguration conf = layerWiseConfigurations.getConf(i); - conf.getLayer().setDataType(netDtype); - nParamsPerLayer[i] = conf.getLayer().initializer().numParams(conf); - paramLength += nParamsPerLayer[i]; - } - - //Create parameters array, if required - boolean initializeParams; - if (parameters != null) { - if (!parameters.isRowVectorOrScalar()) - throw new IllegalArgumentException("Invalid parameters: should be a row vector"); - if (parameters.length() != paramLength) - throw new IllegalArgumentException("Invalid parameters: expected length " + paramLength - + ", got length " + parameters.length()); - - if (cloneParametersArray) - flattenedParams = parameters.dup(); - else - flattenedParams = parameters; - - initializeParams = false; - } else if(paramLength > 0){ - flattenedParams = Nd4j.create(netDtype, 1, paramLength); - initializeParams = true; - } else { - //Edge case: 0 params in network - flattenedParams = null; - initializeParams = false; - } - - //Set RNG seed, for repeatability between initializations when set - if (initializeParams) { - Nd4j.getRandom().setSeed(getDefaultConfiguration().getSeed()); - } - - // construct multi-layer - long paramCountSoFar = 0; - for (int i = 0; i < nLayers; i++) { - INDArray paramsView; - if (nParamsPerLayer[i] > 0) { - paramsView = flattenedParams.get(NDArrayIndex.interval(0,0,true), - NDArrayIndex.interval(paramCountSoFar, paramCountSoFar + nParamsPerLayer[i])); } else { - paramsView = null; + throw new IllegalStateException( + "No CNN2DDataFormat type found for previous layer!"); } - paramCountSoFar += nParamsPerLayer[i]; + } - NeuralNetConfiguration conf = layerWiseConfigurations.getConf(i); - layers[i] = conf.getLayer().instantiate(conf, trainingListeners, i, paramsView, initializeParams, netDtype); - layerMap.put(conf.getLayer().getLayerName(), layers[i]); - } - initCalled = true; - } + input = layers[i].activate(input, train, mgr); + } else if (i > 0 + && Convolution1DUtils.hasRnnDataFormat(layers[i - 1].getLayerConfiguration()) + && Convolution1DUtils.hasRnnDataFormat(layers[i].getLayerConfiguration())) { + RNNFormat preLayerFormat = + Convolution1DUtils.getRnnFormatFromLayer(layers[i - 1].getLayerConfiguration()); + RNNFormat currLayerFormat = + Convolution1DUtils.getRnnFormatFromLayer(layers[i].getLayerConfiguration()); + // permute for next layer + if (preLayerFormat != currLayerFormat) { + input = input.permute(0, 2, 1); + } - //Set parameters in MultiLayerNetwork.defaultConfiguration for later use in BaseOptimizer.setupSearchState() etc - defaultConfiguration.clearVariables(); - List variables = defaultConfiguration.variables(false); - for (int i = 0; i < layers.length; i++) { - if(layers[i] == null){ - throw new IllegalStateException("Encountered null layer during initialization for layer " + i + - ": " + layerWiseConfigurations.getConf(i).getLayer().getClass().getSimpleName() + " initialization " + - "returned null layer?"); - } + input = layers[i].activate(input, train, mgr); - for (String s : layers[i].conf().variables()) { - variables.add(i + "_" + s); - } - } - - // now we init solver & optimizer - if (solver == null) { - try (MemoryWorkspace wsO = Nd4j.getMemoryManager().scopeOutOfWorkspaces()) { - solver = new Solver.Builder().configure(conf()).listeners(getListeners()).model(this).build(); - solver.initOptimizer(); - } - } - - //Mark that input modification is allowed. - //TODO When is it safe to NOT skip the very first layer? It's not always safe... - // For example dropout + iterating over List that is used for multiple epochs... - for( int i=1; i - *
- * PLEASE NOTE: Do not use this method unless you understand how to use GradientsAccumulator & updates sharing.
- * PLEASE NOTE: Do not use this method on standalone model - * - * @param accumulator Gradient accumulator to use for the network - */ - public void setGradientsAccumulator(GradientsAccumulator accumulator) { - if (!isInitCalled()) - init(); - - if (solver == null) { - try (MemoryWorkspace wsO = Nd4j.getMemoryManager().scopeOutOfWorkspaces()) { - solver = new Solver.Builder().configure(conf()).listeners(getListeners()).model(this) - .build(); - } - } - - solver.getOptimizer().setGradientsAccumulator(accumulator); - } - - public boolean isInitCalled() { - return initCalled; - } - - /** - * This method: initializes the flattened gradients array (used in backprop) and sets the appropriate subset in all layers. - * As a general rule, this shouldn't ever need to be called manually when doing training via fit(DataSet) or fit(DataSetIterator) - */ - public void initGradientsView() { - try (MemoryWorkspace ws = Nd4j.getMemoryManager().scopeOutOfWorkspaces()) { - if (layers == null) - init(); - - int nLayers = layers.length; - - //First: Work out total length of params - long paramLength = 0; - val nParamsPerLayer = new long[nLayers]; - for (int i = 0; i < nLayers; i++) { - NeuralNetConfiguration conf = layerWiseConfigurations.getConf(i); - nParamsPerLayer[i] = conf.getLayer().initializer().numParams(conf); - paramLength += nParamsPerLayer[i]; - } - - if(paramLength > 0) { - flattenedGradients = Nd4j.create(flattenedParams.dataType(), new long[]{1, paramLength}, 'f'); //No need to initialize, as each layer will do it each iteration anyway - } - - long paramsSoFar = 0; - for (int i = 0; i < layers.length; i++) { - if (nParamsPerLayer[i] == 0) - continue; //This layer doesn't have any parameters... - INDArray thisLayerGradView = flattenedGradients.get(NDArrayIndex.interval(0,0,true), - NDArrayIndex.interval(paramsSoFar, paramsSoFar + nParamsPerLayer[i])); - layers[i].setBackpropGradientsViewArray(thisLayerGradView); - paramsSoFar += nParamsPerLayer[i]; - } - } - } - - protected INDArray activationFromPrevLayer(int curr, INDArray input, boolean training, LayerWorkspaceMgr mgr) { - if (getLayerWiseConfigurations().getInputPreProcess(curr) != null) { - input = getLayerWiseConfigurations().getInputPreProcess(curr).preProcess(input, getInputMiniBatchSize(), mgr); - } - - INDArray ret = layers[curr].activate(input, training, mgr); - return ret; - } - - /** - * Calculate activation for few layers at once. Suitable for autoencoder partial activation. - * - * In example: in 10-layer deep autoencoder, layers 0 - 4 inclusive are used for encoding part, and layers 5-9 inclusive are used for decoding part. - * - * @param from first layer to be activated, inclusive - * @param to last layer to be activated, inclusive - * @return the activation from the last layer - */ - public INDArray activateSelectedLayers(int from, int to, INDArray input) { - if (input == null) - throw new IllegalStateException("Unable to perform activation; no input found"); - if (from < 0 || from >= layers.length || from >= to) - throw new IllegalStateException("Unable to perform activation; FROM is out of layer space"); - if (to < 1 || to >= layers.length) - throw new IllegalStateException("Unable to perform activation; TO is out of layer space"); - - try { - LayerWorkspaceMgr mgr = LayerWorkspaceMgr.noWorkspaces(helperWorkspaces); //TODO - - INDArray res = input; - for (int l = from; l <= to; l++) { - res = this.activationFromPrevLayer(l, res, false, mgr); - } - return res; - } catch (OutOfMemoryError e){ - CrashReportingUtil.writeMemoryCrashDump(this, e); - throw e; - } - } - - /** - * Compute all layer activations, from input to output of the output layer. - * Note that the input is included in the list: thus feedForward(in,train).get(0) is the inputs, - * .get(1) is the activations of layer 0, and so on. - * - * @param train Training: if true, perform forward pass/inference at training time. Usually, inference is performed - * with train = false. This impacts whether dropout etc is applied or not. - * @return The list of activations for each layer, including the input - */ - public List feedForward(INDArray input, boolean train) { - setInput(input); - return feedForward(train); - } - - /** - * Compute activations from input to output of the output layer. - * As per {@link #feedForward(INDArray, boolean)} but using the inputs that have previously been set using {@link #setInput(INDArray)} - * - * @return the list of activations for each layer - */ - public List feedForward(boolean train) { - try { - return ffToLayerActivationsDetached(train, FwdPassType.STANDARD, false, layers.length-1, - input, mask, null, true); - } catch (OutOfMemoryError e) { - CrashReportingUtil.writeMemoryCrashDump(this, e); - throw e; - } - } - - /** - * Perform feed-forward, optionally (not) clearing the layer input arrays.
- * Note: when using clearInputs=false, there can be some performance and memory overhead: this is because the arrays are - * defined outside of workspaces (which are enabled by default) - otherwise, old/invalidated arrays could still be - * accessed after calling this method. Consequently: Don't use clearInputs=false unless you have a use case that - * requires them to remain after feed-forward has been completed - * - * @param train training mode (true) or test mode (false) - * @param clearInputs If false: don't clear the layer inputs - * @return Activations from feed-forward - */ - public List feedForward(boolean train, boolean clearInputs){ - try{ - return ffToLayerActivationsDetached(train, FwdPassType.STANDARD, false, layers.length-1, input, mask, null, clearInputs); - } catch (OutOfMemoryError e) { - CrashReportingUtil.writeMemoryCrashDump(this, e); - throw e; - } - } - - /** Compute the activations from the input to the specified layer.
- * To compute activations for all layers, use feedForward(...) methods
- * Note: output list includes the original input. So list.get(0) is always the original input, and - * list.get(i+1) is the activations of the ith layer. - * @param layerNum Index of the last layer to calculate activations for. Layers are zero-indexed. - * feedForwardToLayer(i,input) will return the activations for layers 0..i (inclusive) - * @param input Input to the network - * @return list of activations. - */ - public List feedForwardToLayer(int layerNum, INDArray input) { - try{ - return ffToLayerActivationsDetached(false, FwdPassType.STANDARD, false, layerNum, input, mask, null, true); - } catch (OutOfMemoryError e) { - CrashReportingUtil.writeMemoryCrashDump(this, e); - throw e; - } - } - - /** Compute the activations from the input to the specified layer.
- * To compute activations for all layers, use feedForward(...) methods
- * Note: output list includes the original input. So list.get(0) is always the original input, and - * list.get(i+1) is the activations of the ith layer. - * @param layerNum Index of the last layer to calculate activations for. Layers are zero-indexed. - * feedForwardToLayer(i,input) will return the activations for layers 0..i (inclusive) - * @param input Input to the network - * @param train true for training, false for test (i.e., false if using network after training) - * @return list of activations. - */ - public List feedForwardToLayer(int layerNum, INDArray input, boolean train) { - try { - int layerVertexIdx = layers[layerNum].getIndex(); - return ffToLayerActivationsDetached(train, FwdPassType.STANDARD, false, layerVertexIdx, input, mask, null, true); - } catch (OutOfMemoryError e) { - CrashReportingUtil.writeMemoryCrashDump(this, e); - throw e; - } - } - - /** Compute the activations from the input to the specified layer, using the currently set input for the network.
- * To compute activations for all layers, use feedForward(...) methods
- * Note: output list includes the original input. So list.get(0) is always the original input, and - * list.get(i+1) is the activations of the ith layer. - * @param layerNum Index of the last layer to calculate activations for. Layers are zero-indexed. - * feedForwardToLayer(i,input) will return the activations for layers 0..i (inclusive) - * @param train true for training, false for test (i.e., false if using network after training) - * @return list of activations. - */ - public List feedForwardToLayer(int layerNum, boolean train) { - try { - return ffToLayerActivationsDetached(train, FwdPassType.STANDARD, false, layerNum, input, mask, null, true); - } catch (OutOfMemoryError e) { - CrashReportingUtil.writeMemoryCrashDump(this, e); - throw e; - } - } - - - protected void validateArrayWorkspaces(LayerWorkspaceMgr mgr, INDArray array, ArrayType arrayType, int layerIdx, - boolean isPreprocessor, String op){ - try{ - mgr.validateArrayLocation(arrayType, array, false, layerIdx > 0); - } catch (ND4JWorkspaceException e){ - String layerName = layers[layerIdx].conf().getLayer().getLayerName(); - String clazz; - if(isPreprocessor){ - clazz = layerWiseConfigurations.getInputPreProcess(layerIdx).getClass().getName(); } else { - clazz = layers[layerIdx].getClass().getName(); + input = layers[i].activate(input, train, mgr); } - throw new IllegalStateException(op + ": array (" + arrayType + ") workspace validation failed (" + - (isPreprocessor ? "preprocessor" : "layer ") + layerIdx + (layerName != null ? " - layer name \"" + - layerName + "\"" : "") + " - class: " + clazz + ") - array is defined in incorrect workspace", e); - } - } - - /** - * Feed-forward through the network - returning all array activations in a list, detached from any workspace. - * Note that no workspace should be active externally when calling this method (an exception will be thrown - * if a workspace is open externally) - * - * @param train Training mode (true) or test/inference mode (false) - * @param fwdPassType Type of forward pass to perform (STANDARD or RNN_ACTIVATE_WITH_STORED_STATE only) - * @param storeLastForTBPTT ONLY used if fwdPassType == FwdPassType.RNN_ACTIVATE_WITH_STORED_STATE - * @param layerIndex Index (inclusive) to stop forward pass at. For all layers, use numLayers-1 - * @param input Input to the network - * @param fMask Feature mask array. May be null. - * @param lMask Label mask array. May be null. - * @param clearInputs Whether the layer inputs should be cleared - * @return List of activations (including the input), detached from any workspace - */ - protected synchronized List ffToLayerActivationsDetached(boolean train, @NonNull FwdPassType fwdPassType, - boolean storeLastForTBPTT, int layerIndex, @NonNull INDArray input, - INDArray fMask, INDArray lMask, boolean clearInputs){ - setInput(input); - setLayerMaskArrays(fMask, lMask); - - //Verify that no workspace is open externally - WorkspaceUtils.assertNoWorkspacesOpen("Expected no workspace active in ffToLayerActivationsDetached"); - - LayerWorkspaceMgr workspaceMgr; - WorkspaceMode wsm = (train ? layerWiseConfigurations.getTrainingWorkspaceMode() : layerWiseConfigurations.getInferenceWorkspaceMode()); - if(wsm == WorkspaceMode.NONE){ - workspaceMgr = LayerWorkspaceMgr.noWorkspaces(); - } else { - workspaceMgr = LayerWorkspaceMgr.builder() - .noWorkspaceFor(ArrayType.ACTIVATIONS) - .with(ArrayType.INPUT, WS_LAYER_WORKING_MEM, WS_LAYER_WORKING_MEM_CONFIG) - .with(ArrayType.FF_WORKING_MEM, WS_LAYER_WORKING_MEM, WS_LAYER_WORKING_MEM_CONFIG) - .with(ArrayType.RNN_FF_LOOP_WORKING_MEM, WS_RNN_LOOP_WORKING_MEM, WS_RNN_LOOP_WORKING_MEM_CONFIG) - .build(); - - if(input.isAttached()){ - //Don't leverage out of async DataSetIterator workspaces - workspaceMgr.setNoLeverageOverride(input.data().getParentWorkspace().getId()); - } - - if(!clearInputs){ - workspaceMgr.setScopedOutFor(ArrayType.INPUT); - } - } - workspaceMgr.setHelperWorkspacePointers(helperWorkspaces); - - List out = new ArrayList<>(); - out.add(workspaceMgr.leverageTo(ArrayType.INPUT, input)); //Should be unnecessary (and no op), if layer is implemented correctly - - for( int i=0; i<=layerIndex; i++ ){ - try(MemoryWorkspace wsFFWorking = workspaceMgr.notifyScopeEntered(ArrayType.FF_WORKING_MEM)){ - if (getLayerWiseConfigurations().getInputPreProcess(i) != null) { - input = getLayerWiseConfigurations().getInputPreProcess(i).preProcess(input, getInputMiniBatchSize(), workspaceMgr); - //Validation: Exception if invalid (bad preprocessor implementation) - validateArrayWorkspaces(workspaceMgr, input, ArrayType.ACTIVATIONS, i, true, "Feed forward to layer (inference)"); - } - - if(fwdPassType == FwdPassType.STANDARD){ - input = layers[i].activate(input, train, workspaceMgr); - } else if (fwdPassType == FwdPassType.RNN_ACTIVATE_WITH_STORED_STATE) { - if (layers[i] instanceof RecurrentLayer) { - input = ((RecurrentLayer) layers[i]).rnnActivateUsingStoredState(input, train, - storeLastForTBPTT, workspaceMgr); - } else if(layers[i] instanceof BaseWrapperLayer && ((BaseWrapperLayer)layers[i]).getUnderlying() instanceof RecurrentLayer) { - RecurrentLayer rl = (RecurrentLayer) ((BaseWrapperLayer)layers[i]).getUnderlying(); - input = rl.rnnActivateUsingStoredState(input, train,storeLastForTBPTT, workspaceMgr); - } else if (layers[i] instanceof MultiLayerNetwork) { - List temp = ((MultiLayerNetwork) layers[i]).rnnActivateUsingStoredState(input, train, storeLastForTBPTT); - input = temp.get(temp.size() - 1); - } else { - input = layers[i].activate(input, train, workspaceMgr); - } - } else { - throw new IllegalStateException("Forward pass type not supported for this method: " + fwdPassType); - } - - //Validation: Exception if invalid (bad layer implementation) - validateArrayWorkspaces(workspaceMgr, input, ArrayType.ACTIVATIONS, i, false, "Feed forward to layer (inference)"); - - out.add(input); - } - if(clearInputs) { - layers[i].clear(); - } - } - - return out; - } - - /** - * Feed-forward through the network at training time - returning a list of all activations in a workspace (WS_ALL_LAYERS_ACT) - * if workspaces are enabled for training; or detached if no workspaces are used.
- * Note: if using workspaces for training, this method requires that WS_ALL_LAYERS_ACT is open externally.
- * If using NO workspaces, requires that no external workspace is open
- * Note that this method does NOT clear the inputs to each layer - instead, they are in the WS_ALL_LAYERS_ACT workspace - * for use in later backprop. - * - * @param layerIndex Index (inclusive) to stop forward pass at. For all layers, use numLayers-1 - * @param fwdPassType Type of forward pass to perform (STANDARD or RNN_ACTIVATE_WITH_STORED_STATE only) - * @param storeLastForTBPTT ONLY used if fwdPassType == FwdPassType.RNN_ACTIVATE_WITH_STORED_STATE - * @param input Input to network - * @param fMask Feature mask array. May be null - * @param lMask Label mask aray. May be null. - * @return - */ - protected synchronized List ffToLayerActivationsInWs(int layerIndex, @NonNull FwdPassType fwdPassType, boolean storeLastForTBPTT, - @NonNull INDArray input, INDArray fMask, INDArray lMask){ - setInput(input); - setLayerMaskArrays(fMask, lMask); - - LayerWorkspaceMgr workspaceMgr; - if(layerWiseConfigurations.getTrainingWorkspaceMode() == WorkspaceMode.NONE){ - WorkspaceUtils.assertNoWorkspacesOpen("Expected no workspace active in ffToLayerActivationsInWs when training workspace is set to NONE"); - workspaceMgr = LayerWorkspaceMgr.noWorkspaces(); - } else { - workspaceMgr = LayerWorkspaceMgr.builder() - .with(ArrayType.INPUT, WS_ALL_LAYERS_ACT, WS_ALL_LAYERS_ACT_CONFIG) - .with(ArrayType.ACTIVATIONS, WS_ALL_LAYERS_ACT, WS_ALL_LAYERS_ACT_CONFIG) - .with(ArrayType.FF_WORKING_MEM, WS_LAYER_WORKING_MEM, WS_LAYER_WORKING_MEM_CONFIG) - .with(ArrayType.RNN_FF_LOOP_WORKING_MEM, WS_RNN_LOOP_WORKING_MEM, WS_RNN_LOOP_WORKING_MEM_CONFIG) - .build(); - - if(input.isAttached()){ - //Don't leverage out of async DataSetIterator workspaces - workspaceMgr.setNoLeverageOverride(input.data().getParentWorkspace().getId()); - } - - if(layerWiseConfigurations.getCacheMode() != CacheMode.NONE){ - //For now: store cache mode activations in activations workspace - workspaceMgr.setWorkspace(ArrayType.FF_CACHE, WS_ALL_LAYERS_ACT, WS_ALL_LAYERS_ACT_CONFIG); - workspaceMgr.setWorkspace(ArrayType.BP_WORKING_MEM, WS_LAYER_WORKING_MEM, WS_LAYER_WORKING_MEM_CONFIG); - } - - WorkspaceUtils.assertOpenAndActive(WS_ALL_LAYERS_ACT, "ffToLayerActivationsInWs method requires workspace WS_ALL_LAYERS_ACT to be open"); - } - workspaceMgr.setHelperWorkspacePointers(helperWorkspaces); - - List out = new ArrayList<>(); - out.add(workspaceMgr.leverageTo(ArrayType.INPUT, input)); //Probably unnecessary usually - - boolean traceLog = log.isTraceEnabled(); - - for( int i = 0; i <=layerIndex; i++) { - try(MemoryWorkspace wsFFWorking = workspaceMgr.notifyScopeEntered(ArrayType.FF_WORKING_MEM)){ - if (getLayerWiseConfigurations().getInputPreProcess(i) != null) { - input = getLayerWiseConfigurations().getInputPreProcess(i).preProcess(input, getInputMiniBatchSize(), workspaceMgr); - //Validation: Exception if invalid (bad preprocessor implementation) - validateArrayWorkspaces(workspaceMgr, input, ArrayType.ACTIVATIONS, i, true, "Feed forward to layer (training)"); - } - - if(traceLog){ - log.trace("About to forward pass: {} - {}", i, layers[i].getClass().getSimpleName()); - } - - if(fwdPassType == FwdPassType.STANDARD){ - input = layers[i].activate(input, true, workspaceMgr); - } else if(fwdPassType == FwdPassType.RNN_ACTIVATE_WITH_STORED_STATE){ - if (layers[i] instanceof RecurrentLayer) { - input = ((RecurrentLayer) layers[i]).rnnActivateUsingStoredState(input, true, storeLastForTBPTT, workspaceMgr); - }else if(layers[i] instanceof BaseWrapperLayer && ((BaseWrapperLayer)layers[i]).getUnderlying() instanceof RecurrentLayer) { - RecurrentLayer rl = (RecurrentLayer) ((BaseWrapperLayer)layers[i]).getUnderlying(); - input = rl.rnnActivateUsingStoredState(input, true, storeLastForTBPTT, workspaceMgr); - } else if (layers[i] instanceof MultiLayerNetwork) { - List temp = ((MultiLayerNetwork) layers[i]).rnnActivateUsingStoredState(input, true, storeLastForTBPTT); - input = temp.get(temp.size() - 1); - } else { - input = layers[i].activate(input, true, workspaceMgr); - } - } else { - throw new IllegalStateException("FwdPassType not supported for this method: " + fwdPassType); - } - - if(input == null){ - throw new IllegalStateException("Layer " + i + " returned null activations"); - } - - //Validation: Exception if invalid (bad layer implementation) - validateArrayWorkspaces(workspaceMgr, input, ArrayType.ACTIVATIONS, i, false, "Feed forward to layer (training)"); - validateArrayWorkspaces(workspaceMgr, layers[i].input(), ArrayType.INPUT, i, false, "Feed forward to layer (training)"); - - out.add(input); - - if(traceLog){ - log.trace("Completed forward pass: {} - {}", i, layers[i].getClass().getSimpleName()); - } - } - } - - return out; - } - - /** - * Provide the output of the specified layer, detached from any workspace. This is most commonly used at inference/test - * time, and is more memory efficient than {@link #ffToLayerActivationsDetached(boolean, FwdPassType, boolean, int, INDArray, INDArray, INDArray, boolean)} - * and {@link #ffToLayerActivationsInWs(int, FwdPassType, boolean, INDArray, INDArray, INDArray)}.
- * This method clears all layer inputs. - * - * NOTE: in general, no workspaces should be activated externally for this method! - * This method handles the workspace activation as required - * - * @param train Training mode (true) or test/inference mode (false) - * @param fwdPassType Type of forward pass to perform (STANDARD, RNN_TIMESTEP or RNN_ACTIVATE_WITH_STORED_STATE) - * @param layerIndex Index (inclusive) to stop forward pass at. For all layers, use numLayers-1 - * @param input Input to the network - * @param featureMask Input/feature mask array. May be null. - * @param labelsMask Labels mask array. May be null - * @param outputWorkspace Optional - if provided, outputs should be placed in this workspace. NOTE: this workspace - * must be open - * @return Output of the specified layer, detached from any workspace - */ - protected INDArray outputOfLayerDetached(boolean train, @NonNull FwdPassType fwdPassType, int layerIndex, @NonNull INDArray input, - INDArray featureMask, INDArray labelsMask, MemoryWorkspace outputWorkspace){ - setInput(input); - setLayerMaskArrays(featureMask, labelsMask); - - /* - Idea here: we want to minimize memory, and return only the final array - Approach to do this: keep activations in memory only as long as we need them. - In MultiLayerNetwork, the output activations of layer X are used as input to layer X+1 - Which means: the workspace for layer X has to be open for both layers X and X+1 forward pass. - - Here, we'll use two workspaces for activations: - 1. For even index layers, activations WS that opens on start of even layer fwd pass, closes at end of odd layer fwd pass - 2. For odd index layers, activations WS that opens on start of odd layer fwd pass, closes at end of even layer fwd pass - - Additionally, we'll reconfigure the workspace manager for the *final* layer, so that we don't have to detach - */ - if(outputWorkspace == null || outputWorkspace instanceof DummyWorkspace) { - WorkspaceUtils.assertNoWorkspacesOpen("Expected no workspace active in outputOfLayerDetached", true); - } else { - Preconditions.checkState(outputWorkspace.isScopeActive(), "Workspace \"" + outputWorkspace.getId() + - "\" was provided for the network/layer outputs. When provided, this workspace must be opened before " + - "calling the output method; furthermore, closing the workspace is the responsibility of the user"); - } - - LayerWorkspaceMgr mgrEven; - LayerWorkspaceMgr mgrOdd; - - WorkspaceMode wsm = train ? layerWiseConfigurations.getTrainingWorkspaceMode() : layerWiseConfigurations.getInferenceWorkspaceMode(); - if(wsm == WorkspaceMode.NONE){ - mgrEven = LayerWorkspaceMgr.noWorkspaces(); - mgrOdd = mgrEven; - - //Check for external workspace - doesn't make sense to have one with workspace mode NONE - if(outputWorkspace != null && !(outputWorkspace instanceof DummyWorkspace)){ - throw new IllegalStateException("Workspace \"" + outputWorkspace.getId() + - "\" was provided for the network/layer outputs, however " + (train ? "training" : "inference") + - " workspace mode is set to NONE. Cannot put output activations into the specified workspace if" + - "workspaces are disabled for the network. use getConfiguration().setTraining/InferenceWorkspaceMode(WorkspaceMode.ENABLED)"); - } - } else { - mgrEven = LayerWorkspaceMgr.builder() - .with(ArrayType.FF_WORKING_MEM, WS_LAYER_WORKING_MEM, WS_LAYER_WORKING_MEM_CONFIG) - .with(ArrayType.ACTIVATIONS, WS_LAYER_ACT_1, WS_LAYER_ACT_X_CONFIG) - .with(ArrayType.INPUT, WS_LAYER_ACT_2, WS_LAYER_ACT_X_CONFIG) //Inputs should always be in the previous WS - .with(ArrayType.RNN_FF_LOOP_WORKING_MEM, WS_RNN_LOOP_WORKING_MEM, WS_RNN_LOOP_WORKING_MEM_CONFIG) - .build(); - - mgrOdd = LayerWorkspaceMgr.builder() - .with(ArrayType.FF_WORKING_MEM, WS_LAYER_WORKING_MEM, WS_LAYER_WORKING_MEM_CONFIG) - .with(ArrayType.ACTIVATIONS, WS_LAYER_ACT_2, WS_LAYER_ACT_X_CONFIG) - .with(ArrayType.INPUT, WS_LAYER_ACT_1, WS_LAYER_ACT_X_CONFIG) //Inputs should always be in the previous WS - .with(ArrayType.RNN_FF_LOOP_WORKING_MEM, WS_RNN_LOOP_WORKING_MEM, WS_RNN_LOOP_WORKING_MEM_CONFIG) - .build(); - } - mgrEven.setHelperWorkspacePointers(helperWorkspaces); - mgrOdd.setHelperWorkspacePointers(helperWorkspaces); - - MemoryWorkspace wsActCloseNext = null; - MemoryWorkspace temp = null; - MemoryWorkspace initialWorkspace = Nd4j.getMemoryManager().getCurrentWorkspace(); - - boolean traceLog = log.isTraceEnabled(); - - Throwable t = null; - try { - for (int i = 0; i <= layerIndex; i++) { - LayerWorkspaceMgr mgr = (i % 2 == 0 ? mgrEven : mgrOdd); - - if (traceLog) { - log.trace("About to forward pass: {} - {}", i, layers[i].getClass().getSimpleName()); - } - - //Edge case: for first layer with dropout, inputs can't be in previous workspace (as it hasn't been opened yet) - //Hence: put inputs in working memory - if (i == 0 && wsm != WorkspaceMode.NONE) { - mgr.setWorkspace(ArrayType.INPUT, WS_LAYER_WORKING_MEM, WS_LAYER_WORKING_MEM_CONFIG); - } - - try (MemoryWorkspace wsFFWorking = mgr.notifyScopeEntered(ArrayType.FF_WORKING_MEM)) { //Working memory: opened/closed once per layer - //Activations workspaces: opened/closed every second layer. - //So mgrEven (WS_LAYER_ACT_1) open at start of 0, 2, 4, 8; closed at end of 1, 3, 5, 7 etc - //and mgrOdd (WS_LAYER_ACT_2) opened at start of 1, 3, 5, 7; closed at end of 2, 4, 6, 8 etc - temp = mgr.notifyScopeEntered(ArrayType.ACTIVATIONS); - - //Note that because we're opening activation workspaces not in a simple nested order, we'll manually - // override the previous workspace setting. Otherwise, when we close these workspaces, the "current" - // workspace may be set to the incorrect one - temp.setPreviousWorkspace(initialWorkspace); - - - if (i == 0 && input.isAttached()) { - //Don't leverage out of async DataSetIterator workspaces - mgr.setNoLeverageOverride(input.data().getParentWorkspace().getId()); - } - - if (getLayerWiseConfigurations().getInputPreProcess(i) != null) { - input = getLayerWiseConfigurations().getInputPreProcess(i).preProcess(input, getInputMiniBatchSize(), mgr); - //Validation: Exception if invalid (bad preprocessor implementation) - validateArrayWorkspaces(mgr, input, ArrayType.ACTIVATIONS, i, true, "Output of layer (inference)"); - } - - if (i == layerIndex) { - if (outputWorkspace != null && !(outputWorkspace instanceof DummyWorkspace)) { - //Place activations in user-specified workspace - mgr.setWorkspace(ArrayType.ACTIVATIONS, outputWorkspace.getId(), outputWorkspace.getWorkspaceConfiguration()); - } else { - //Final activations: should be detached - mgr.setScopedOutFor(ArrayType.ACTIVATIONS); - } - } - - if (fwdPassType == FwdPassType.STANDARD) { - //Standard feed-forward case - if(i > 0 && ConvolutionUtils.layerHasConvolutionLayout(layers[i - 1].conf().getLayer()) - && ConvolutionUtils.layerHasConvolutionLayout(layers[i].conf().getLayer())) { - - CNN2DFormat preLayerFormat = ConvolutionUtils.getFormatForLayer(layers[i - 1].conf().getLayer()); - CNN2DFormat currLayerFormat = ConvolutionUtils.getFormatForLayer(layers[i].conf().getLayer()); - if(preLayerFormat != currLayerFormat) { - //NHWC case - if(preLayerFormat == CNN2DFormat.NCHW) { - input = input.permute(0,3,1,2); - } - //NCHW case - else if(preLayerFormat == CNN2DFormat.NHWC) { - input = input.permute(0,2,3,1); - - } - else - throw new IllegalStateException("No CNN2DDataFormat type found for previous layer!"); - } - - input = layers[i].activate(input, train, mgr); - } else if(i > 0 && Convolution1DUtils.hasRnnDataFormat(layers[i - 1].conf().getLayer()) - && Convolution1DUtils.hasRnnDataFormat(layers[i].conf().getLayer())) { - RNNFormat preLayerFormat = Convolution1DUtils.getRnnFormatFromLayer(layers[i - 1].conf().getLayer()); - RNNFormat currLayerFormat = Convolution1DUtils.getRnnFormatFromLayer(layers[i].conf().getLayer()); - //permute for next layer - if(preLayerFormat != currLayerFormat) { - input = input.permute(0,2,1); - } - - input = layers[i].activate(input, train, mgr); - - - } else - input = layers[i].activate(input, train, mgr); - } else if (fwdPassType == FwdPassType.RNN_TIMESTEP) { - //rnnTimeStep case - if (layers[i] instanceof RecurrentLayer) { - input = ((RecurrentLayer) layers[i]).rnnTimeStep(reshapeTimeStepInput(input), mgr); - } else if (layers[i] instanceof BaseWrapperLayer && ((BaseWrapperLayer) layers[i]).getUnderlying() instanceof RecurrentLayer) { - RecurrentLayer rl = ((RecurrentLayer) ((BaseWrapperLayer) layers[i]).getUnderlying()); - input = rl.rnnTimeStep(reshapeTimeStepInput(input), mgr); - } else if (layers[i] instanceof MultiLayerNetwork) { - input = ((MultiLayerNetwork) layers[i]).rnnTimeStep(reshapeTimeStepInput(input)); - } else { - input = layers[i].activate(input, false, mgr); - } - } else { - throw new IllegalArgumentException("Unsupported forward pass type for this method: " + fwdPassType); - } - layers[i].clear(); - //Validation: Exception if invalid (bad layer implementation) - validateArrayWorkspaces(mgr, input, ArrayType.ACTIVATIONS, i, false, "Output of layer (inference)"); - - if (wsActCloseNext != null) { - wsActCloseNext.close(); - } - wsActCloseNext = temp; - temp = null; - } - - if (traceLog) { - log.trace("Completed forward pass: {} - {}", i, layers[i].getClass().getSimpleName()); - } - - //Edge case: for first layer with dropout, inputs can't be in previous workspace (as it hasn't been opened yet) - //Hence: put inputs in working memory -> set back to default for next use of workspace mgr - if (i == 0 && wsm != WorkspaceMode.NONE) { - mgr.setWorkspace(ArrayType.INPUT, WS_LAYER_ACT_2, WS_LAYER_ACT_X_CONFIG); //Inputs should always be in the previous WS - } - } - } catch (Throwable t2){ - t = t2; - } finally { - if(wsActCloseNext != null){ - try { - wsActCloseNext.close(); - } catch (Throwable t2){ - if(t != null){ - log.error("Encountered second exception while trying to close workspace after initial exception"); - log.error("Original exception:", t); - throw t2; - } - } - } - if(temp != null){ - //Should only be non-null on exception - while(temp.isScopeActive()){ - //For safety, should never occur in theory: a single close() call may not be sufficient, if - // workspace scope was borrowed and not properly closed when exception occurred - try{ - temp.close(); - } catch (Throwable t2){ - if(t != null){ - log.error("Encountered second exception while trying to close workspace after initial exception"); - log.error("Original exception:", t); - throw t2; - } - } - } - } - - Nd4j.getMemoryManager().setCurrentWorkspace(initialWorkspace); - - if(t != null){ - if(t instanceof RuntimeException){ - throw ((RuntimeException)t); - } - throw new RuntimeException("Error during neural network forward pass", t); - } - - if(outputWorkspace == null || outputWorkspace instanceof DummyWorkspace) { - WorkspaceUtils.assertNoWorkspacesOpen("Expected no workspace active at the end of outputOfLayerDetached", true); - } else { - Preconditions.checkState(outputWorkspace.isScopeActive(), "Expected output workspace to still be open" + - "at end of outputOfLayerDetached, but it is closed. This suggests an implementation or layer workspace problem"); - } - } - - return input; - } - - private INDArray reshapeTimeStepInput(INDArray input) { - if (input.rank() == 2) { // dynamically reshape to 3D input with one time-step. - long[] inShape = input.shape(); - input = input.reshape(inShape[0], inShape[1], 1); - } - return input; - } - - /** - * Compute activations of all layers from input (inclusive) to output of the final/output layer. - * Equivalent to calling {@link #feedForward(boolean)} with train=false - * - * @return the list of activations for each layer, including the input - */ - public List feedForward() { - return feedForward(false); - } - - /** - * Compute activations of all layers from input (inclusive) to output of the final/output layer. - * Equivalent to calling {@link #feedForward(INDArray, boolean)} with train = false - * - * @return the list of activations for each layer, including the input - */ - public List feedForward(INDArray input) { - if (input == null) - throw new IllegalStateException("Unable to perform feed forward; no input found"); - setInput(input); - return feedForward(); - } - - /** - * Compute the activations from the input to the output layer, given mask arrays (that may be null) - * The masking arrays are used in situations such an one-to-many and many-to-one rucerrent neural network (RNN) - * designs, as well as for supporting time series of varying lengths within the same minibatch for RNNs. - * Other than mask arrays, this is equivalent to calling {@link #feedForward(INDArray, boolean)} with train = false - */ - public List feedForward(INDArray input, INDArray featuresMask, INDArray labelsMask) { - setLayerMaskArrays(featuresMask, labelsMask); - List list = feedForward(input); - clearLayerMaskArrays(); - return list; - } - - - @Override - public Gradient gradient() { - return gradient; - } - - @Override - public Pair gradientAndScore() { - return new Pair<>(gradient(), score()); - } - - - /** - * Clone the MultiLayerNetwork - * @return A cloned MultiLayerNetwork with a copy of the configuration, parameters and updater identical to the current network. - */ - @Override - public MultiLayerNetwork clone() { - if(!initCalled) - init(); - MultiLayerConfiguration conf = this.layerWiseConfigurations.clone(); - MultiLayerNetwork ret = new MultiLayerNetwork(conf); - ret.init(this.params().dup(), false); - - if (solver != null) { - //If solver is null: updater hasn't been initialized -> getUpdater call will force initialization, however - Updater u = this.getUpdater(); - INDArray updaterState = u.getStateViewArray(); - if (updaterState != null) { - ret.getUpdater().setStateViewArray(ret, updaterState.dup(), false); - } - } - - if (hasAFrozenLayer()) { - //correct layers to frozen layers - Layer[] clonedLayers = ret.getLayers(); - for (int i = 0; i < layers.length; i++) { - if (layers[i] instanceof FrozenLayer) { - clonedLayers[i] = new FrozenLayer(ret.getLayer(i)); - } - } - ret.setLayers(clonedLayers); - } - return ret; - } - - protected boolean hasAFrozenLayer() { - for (int i = 0; i < layers.length - 1; i++) { - if (layers[i] instanceof FrozenLayer) - return true; - } - return false; - } - - - /** - * @deprecated To be removed. Use {@link #params()} instead - */ - @Deprecated - public INDArray params(boolean backwardOnly) { - return params(); - } - - - /** - * Returns a 1 x m vector where the vector is composed of a flattened vector of all of the parameters in the network.
- * See {@link #getParam(String)} and {@link #paramTable()} for a more useful/interpretable representation of the parameters.
- * Note that the parameter vector is not a copy, and changes to the returned INDArray will impact the network parameters. - * - * @return the parameters for this neural net - */ - @Override - public INDArray params() { - return flattenedParams; - } - - /** - * Set the parameters for this model. - * This expects a linear ndarray which then be unpacked internally relative to the expected ordering of the model.
- * See also: {@link #setParamTable(Map)} and {@link #setParam(String, INDArray)} - * - * @param params the parameters for the model - */ - @Override - public void setParams(INDArray params) { - if (flattenedParams == params) { - return; //No op - } - - if (flattenedParams != null && params.length() == flattenedParams.length()) { - if (params != flattenedParams) { - flattenedParams.assign(params); - } - } else { - if (flattenedParams == null) - flattenedParams = params.dup(); - int idx = 0; - for (int i = 0; i < getLayers().length; i++) { - Layer layer = getLayer(i); - long range = layer.numParams(); - if (range <= 0) - continue; //Some layers: no parameters (subsampling, etc) - INDArray get = params.get(NDArrayIndex.interval(0,0,true), NDArrayIndex.interval(idx, range + idx)); - layer.setParams(get); - idx += range; - } - } - } - - @Override - public void setParamsViewArray(INDArray params) { - throw new UnsupportedOperationException("Not yet implemented"); - } - - @Override - public INDArray getGradientsViewArray() { - return flattenedGradients; - } - - @Override - public void setBackpropGradientsViewArray(INDArray gradients) { - int paramsSoFar = 0; - for (Layer layer : layers) { - if (layer.numParams() == 0) - continue; - layer.setBackpropGradientsViewArray(gradients.get(NDArrayIndex.interval(0,0,true), - NDArrayIndex.interval(paramsSoFar, paramsSoFar + layer.numParams()))); - paramsSoFar += layer.numParams(); - } - } - - @Override - public TrainingConfig getConfig() { - throw new UnsupportedOperationException("Not supported"); - } - - /** - * Returns the number of parameters in the network - * - * @return The number of parameters - */ - @Override - public long numParams() { - if(!isInitCalled()) - init(); - return flattenedParams == null ? 0 : flattenedParams.length(); //Maybe nul for 0 params net - } - - /** - * Returns the number of parameters in the network - * - * @param backwards If true: exclude any parameters uned only in unsupervised layerwise training (such as the decoder - * parameters in an autoencoder) - * @return The number of parameters - */ - @Override - public long numParams(boolean backwards) { - int length = 0; - for (int i = 0; i < layers.length; i++) - length += layers[i].numParams(backwards); - - return length; - } - - /** - * Sets the input and labels and returns the F1 score for the prediction with respect to the true labels - * - * @param data the data to score - * @return the score for the given input,label pairs - */ - @Override - public double f1Score(org.nd4j.linalg.dataset.api.DataSet data) { - return f1Score(data.getFeatures(), data.getLabels()); - } - - /** - * Perform minibatch training on all minibatches in the DataSetIterator, for the specified number of epochs. - * Equvalent to calling {@link #fit(DataSetIterator)} numEpochs times in a loop - * - * @param iterator Training data (DataSetIterator). Iterator must support resetting - * @param numEpochs Number of training epochs, >= 1 - */ - public void fit(@NonNull DataSetIterator iterator, int numEpochs){ - Preconditions.checkArgument(numEpochs > 0, "Number of epochs much be > 0. Got numEpochs = %s", numEpochs); - Preconditions.checkArgument(numEpochs == 1 || iterator.resetSupported(), "Cannot perform multiple epochs training using" + - "iterator thas does not support resetting (iterator.resetSupported() returned false)"); - - for(int i=0; i - * Note that this method does not do layerwise pretraining.
- * For pretraining use method pretrain.. {@link #pretrain(DataSetIterator)}
- * @param iterator Training data (DataSetIterator) - */ - @Override - public void fit(DataSetIterator iterator) { - try{ - fitHelper(iterator); - } catch (OutOfMemoryError e){ - CrashReportingUtil.writeMemoryCrashDump(this, e); - throw e; - } - } - - private synchronized void fitHelper(DataSetIterator iterator){ - // we're wrapping all iterators into AsyncDataSetIterator to provide background prefetch - where appropriate - DataSetIterator iter; - boolean destructable = false; - if (iterator.asyncSupported()) { - iter = new AsyncDataSetIterator(iterator, Math.min(Nd4j.getAffinityManager().getNumberOfDevices() * 2, 2), true); - destructable = true; - } else { - iter = iterator; - } - - for (TrainingListener tl : trainingListeners) { - tl.onEpochStart(this); - } - - LayerWorkspaceMgr workspaceMgr; - if(getLayerWiseConfigurations().getTrainingWorkspaceMode() == WorkspaceMode.NONE){ - workspaceMgr = LayerWorkspaceMgr.noWorkspaces(); - } else { - workspaceMgr = LayerWorkspaceMgr.builder() - .with(ArrayType.ACTIVATIONS, WS_ALL_LAYERS_ACT, WS_ALL_LAYERS_ACT_CONFIG) - .with(ArrayType.INPUT, WS_ALL_LAYERS_ACT, WS_ALL_LAYERS_ACT_CONFIG) - .with(ArrayType.FF_WORKING_MEM, WS_LAYER_WORKING_MEM, WS_LAYER_WORKING_MEM_CONFIG) - .with(ArrayType.BP_WORKING_MEM, WS_LAYER_WORKING_MEM, WS_LAYER_WORKING_MEM_CONFIG) - .with(ArrayType.RNN_FF_LOOP_WORKING_MEM, WS_RNN_LOOP_WORKING_MEM, WS_RNN_LOOP_WORKING_MEM_CONFIG) - .with(ArrayType.RNN_BP_LOOP_WORKING_MEM, WS_RNN_LOOP_WORKING_MEM, WS_RNN_LOOP_WORKING_MEM_CONFIG) - //Note for updater working memory, we have the option to re-use WS_ALL_LAYERS_ACT or FF/BP_WORKING_MEM - // as these should be closed by the time updaters are executed - //Generally, WS_ALL_LAYERS_ACT will be the larger of the two, so we'll use this - .with(ArrayType.UPDATER_WORKING_MEM, WS_ALL_LAYERS_ACT, WS_ALL_LAYERS_ACT_CONFIG) - .build(); - } - workspaceMgr.setHelperWorkspacePointers(helperWorkspaces); - - update(TaskUtils.buildTask(iter)); - if (!iter.hasNext() && iter.resetSupported()) { - iter.reset(); - } - long time1 = System.currentTimeMillis(); - while (iter.hasNext()) { - - DataSet next = iter.next(); - long time2 = System.currentTimeMillis(); - - lastEtlTime.set((time2 - time1)); - - if (next.getFeatures() == null || next.getLabels() == null) - break; - - // TODO: basically we want to wrap internals of this loop into workspace - - - boolean hasMaskArrays = next.hasMaskArrays(); - - if (layerWiseConfigurations.getBackpropType() == BackpropType.TruncatedBPTT) { - doTruncatedBPTT(next.getFeatures(), next.getLabels(), next.getFeaturesMaskArray(), - next.getLabelsMaskArray(), workspaceMgr); - } else { - if (hasMaskArrays) - setLayerMaskArrays(next.getFeaturesMaskArray(), next.getLabelsMaskArray()); - - setInput(next.getFeatures()); - setLabels(next.getLabels()); - - if (solver == null) { - try (MemoryWorkspace wsO = Nd4j.getMemoryManager().scopeOutOfWorkspaces()) { - solver = new Solver.Builder().configure(conf()).listeners(getListeners()).model(this) - .build(); - } - } - - //TODO CACHE - solver.optimize(workspaceMgr); - } - - if (hasMaskArrays) - clearLayerMaskArrays(); - - time1 = System.currentTimeMillis(); - synchronizeIterEpochCounts(); - } - - if (!trainingListeners.isEmpty()) { - for (TrainingListener tl : trainingListeners) { - tl.onEpochEnd(this); - } - } - - clearLayersStates(); - - if (destructable) - ((AsyncDataSetIterator) iter).shutdown(); - - incrementEpochCount(); - } - - /** - * Calculate parameter gradients and input activation gradients given the input and labels, and optionally mask arrays - * - * @param features Features for gradient calculation - * @param label Labels for gradient - * @param fMask Features mask array (may be null) - * @param labelMask Label mask array (may be null) - * @return A pair of gradient arrays: parameter gradients (in Gradient object) and input activation gradients - */ - public Pair calculateGradients(@NonNull INDArray features, @NonNull INDArray label, - INDArray fMask, INDArray labelMask) { - try{ - return calculateGradientsHelper(features, label, fMask, labelMask); - } catch (OutOfMemoryError e){ - CrashReportingUtil.writeMemoryCrashDump(this, e); - throw e; - } - } - - private Pair calculateGradientsHelper(INDArray features, INDArray label, INDArray fMask, - INDArray labelMask){ - setInput(features); - setLabels(label); - setLayerMaskArrays(fMask, labelMask); - - LayerWorkspaceMgr mgr; - if(layerWiseConfigurations.getTrainingWorkspaceMode() == WorkspaceMode.NONE){ - mgr = LayerWorkspaceMgr.noWorkspaces(); - } else { - mgr = LayerWorkspaceMgr.builder() - .with(ArrayType.INPUT, WS_ALL_LAYERS_ACT, WS_ALL_LAYERS_ACT_CONFIG) - .with(ArrayType.ACTIVATIONS, WS_ALL_LAYERS_ACT, WS_ALL_LAYERS_ACT_CONFIG) - .with(ArrayType.FF_WORKING_MEM, WS_LAYER_WORKING_MEM, WS_LAYER_WORKING_MEM_CONFIG) - .with(ArrayType.BP_WORKING_MEM, WS_LAYER_WORKING_MEM, WS_LAYER_WORKING_MEM_CONFIG) - .with(ArrayType.RNN_FF_LOOP_WORKING_MEM, WS_RNN_LOOP_WORKING_MEM, WS_RNN_LOOP_WORKING_MEM_CONFIG) - .with(ArrayType.RNN_BP_LOOP_WORKING_MEM, WS_RNN_LOOP_WORKING_MEM, WS_RNN_LOOP_WORKING_MEM_CONFIG) - .build(); - - if(layerWiseConfigurations.getCacheMode() != null){ - //For now: store cache mode activations in activations workspace - mgr.setWorkspace(ArrayType.FF_CACHE, WS_ALL_LAYERS_ACT, WS_ALL_LAYERS_ACT_CONFIG); - } - } - mgr.setHelperWorkspacePointers(helperWorkspaces); - - //Calculate activations (which are stored in each layer, and used in backprop) - try(MemoryWorkspace ws = mgr.notifyScopeEntered(ArrayType.ACTIVATIONS)) { - //First: do a feed-forward through the network - //Note that we don't actually need to do the full forward pass through the output layer right now; but we do - // need the input to the output layer to be set (such that backprop can be done) - List activations = ffToLayerActivationsInWs(layers.length - 2, FwdPassType.STANDARD, false, input, mask, fMask); - if (!trainingListeners.isEmpty()) { - //TODO: We possibly do want output layer activations in some cases here... - for (TrainingListener tl : trainingListeners) { - tl.onForwardPass(this, activations); - } - } - INDArray inputToOutputLayer = activations.get(activations.size() - 1); - if (layerWiseConfigurations.getInputPreProcess(layers.length - 1) != null) { - inputToOutputLayer = layerWiseConfigurations.getInputPreProcess(layers.length - 1) - .preProcess(inputToOutputLayer, getInputMiniBatchSize(), mgr); - //Validate activations location - } - getOutputLayer().setInput(inputToOutputLayer, mgr); - - Pair p = calcBackpropGradients(null, true, false, true); - if(p.getSecond() != null){ - p.setSecond( p.getSecond().detach()); - } - return p; - } - } - - /** Calculate gradients and errors. Used in two places: - * (a) backprop (for standard multi layer network learning) - * (b) backpropGradient (layer method, for when MultiLayerNetwork is used as a layer) - * @param epsilon Errors (technically errors .* activations). Not used if withOutputLayer = true - * @param withOutputLayer if true: assume last layer is output layer, and calculate errors based on labels. In this - * case, the epsilon input is not used (may/should be null). - * If false: calculate backprop gradients - * @param returnInputActGrad If true: terun the input activation gradients (detached). False: don't return - * @return Gradients and the error (epsilon) at the input - */ - protected Pair calcBackpropGradients(INDArray epsilon, boolean withOutputLayer, boolean tbptt, - boolean returnInputActGrad) { - if (flattenedGradients == null) { - initGradientsView(); - } - String multiGradientKey; - Gradient gradient = new DefaultGradient(flattenedGradients); - - LayerWorkspaceMgr mgrEven; - LayerWorkspaceMgr mgrOdd; - - if(layerWiseConfigurations.getTrainingWorkspaceMode() == WorkspaceMode.NONE){ - mgrEven = LayerWorkspaceMgr.noWorkspaces(); - mgrOdd = mgrEven; - WorkspaceUtils.assertNoWorkspacesOpen("Expected no workspace active in calcBackpropGradients when " + - "training workspace is set to none"); - } else { - /* - Workspaces for backprop in MLN share some features with outputOfLayerDetached, in terms of the - "two alternating workspaces" idea (but for activation gradients here, instead of activations there). - - Workspace design for backprop: - First: we calculate all activations, and ensure they are in WS_ALL_LAYERS_ACT. We assume this is done - EXTERNALLY to this method - Then: we iterate backwards over layers. - - Activations gradient workspaces: opened/closed every second layer. - mgrEven (WS_LAYER_ACT_1) activation grad WS opens at start of 8, 4, 2, 0; closed at end of 7, 5, 3, 1 etc - mgrOdd (WS_LAYER_ACT_2) activation grad WS opens at start of 7, 3, 5, 1; closed at end of 6, 4, 2, 0 etc - - */ - - mgrEven = LayerWorkspaceMgr.builder() - //Activations in context of backprop (preOut methods etc) are not used outside of the layer itself - .with(ArrayType.ACTIVATIONS, WS_LAYER_WORKING_MEM, WS_LAYER_WORKING_MEM_CONFIG) - .with(ArrayType.INPUT, WS_ALL_LAYERS_ACT, WS_ALL_LAYERS_ACT_CONFIG) //Usually not required here. Exception: OutputLayer dropout - .with(ArrayType.ACTIVATION_GRAD, WS_LAYER_ACT_1, WS_LAYER_ACT_X_CONFIG) - .with(ArrayType.FF_WORKING_MEM, WS_LAYER_WORKING_MEM, WS_LAYER_WORKING_MEM_CONFIG) - .with(ArrayType.BP_WORKING_MEM, WS_LAYER_WORKING_MEM, WS_LAYER_WORKING_MEM_CONFIG) - .with(ArrayType.RNN_FF_LOOP_WORKING_MEM, WS_RNN_LOOP_WORKING_MEM, WS_RNN_LOOP_WORKING_MEM_CONFIG) - .with(ArrayType.RNN_BP_LOOP_WORKING_MEM, WS_RNN_LOOP_WORKING_MEM, WS_RNN_LOOP_WORKING_MEM_CONFIG) - .build(); - - mgrOdd = LayerWorkspaceMgr.builder() - //Activations in context of backprop (preOut methods etc) are not used outside of the layer itself - .with(ArrayType.ACTIVATIONS, WS_LAYER_WORKING_MEM, WS_LAYER_WORKING_MEM_CONFIG) - .with(ArrayType.INPUT, WS_ALL_LAYERS_ACT, WS_ALL_LAYERS_ACT_CONFIG) //Usually not required here. Exception: OutputLayer dropout - .with(ArrayType.ACTIVATION_GRAD, WS_LAYER_ACT_2, WS_LAYER_ACT_X_CONFIG) - .with(ArrayType.FF_WORKING_MEM, WS_LAYER_WORKING_MEM, WS_LAYER_WORKING_MEM_CONFIG) - .with(ArrayType.BP_WORKING_MEM, WS_LAYER_WORKING_MEM, WS_LAYER_WORKING_MEM_CONFIG) - .with(ArrayType.RNN_FF_LOOP_WORKING_MEM, WS_RNN_LOOP_WORKING_MEM, WS_RNN_LOOP_WORKING_MEM_CONFIG) - .with(ArrayType.RNN_BP_LOOP_WORKING_MEM, WS_RNN_LOOP_WORKING_MEM, WS_RNN_LOOP_WORKING_MEM_CONFIG) - .build(); - - if(epsilon == null) { - //If epsilon is non-null: external errors use case -> inputs are already detached - WorkspaceUtils.assertOpenActiveAndCurrent(WS_ALL_LAYERS_ACT, "calcBackpropGradients method requires workspace WS_ALL_LAYERS_ACT" + - " to be open when workspaces are used"); - } - } - mgrEven.setHelperWorkspacePointers(helperWorkspaces); - mgrOdd.setHelperWorkspacePointers(helperWorkspaces); - - //calculate and apply the backward gradient for every layer - /* - * Skip the output layer for the indexing and just loop backwards updating the coefficients for each layer. - * (when withOutputLayer == true) - * - * Activate applies the activation function for each layer and sets that as the input for the following layer. - * - * Typical literature contains most trivial case for the error calculation: wT * weights - * This interpretation transpose a few things to get mini batch because ND4J is rows vs columns organization for params - */ - int numLayers = getnLayers(); - //Store gradients is a list; used to ensure iteration order in DefaultGradient linked hash map. i.e., layer 0 first instead of output layer - LinkedList> gradientList = new LinkedList<>(); - - - Pair currPair = null; - MemoryWorkspace wsActGradCloseNext = null; - MemoryWorkspace wsActGradTemp = null; - MemoryWorkspace initialWorkspace = Nd4j.getMemoryManager().getCurrentWorkspace(); - - boolean traceLog = log.isTraceEnabled(); - - Throwable t = null; - try { - for (int i = layers.length - 1; i >= 0; i--) { - if (layers[i] instanceof FrozenLayer) { - break; - } - - if (traceLog) { - log.trace("About to backprop: {} - {}", i, layers[i].getClass().getSimpleName()); - } - - LayerWorkspaceMgr workspaceMgr = (i % 2 == 0 ? mgrEven : mgrOdd); - - if (withOutputLayer && i == layers.length - 1) { - if (!(getOutputLayer() instanceof IOutputLayer)) { - log.warn("Warning: final layer isn't output layer. You cannot use backprop without an output layer."); - return null; - } - - IOutputLayer outputLayer = (IOutputLayer) getOutputLayer(); - if (labels == null && outputLayer.needsLabels()) - throw new IllegalStateException("No labels found"); - outputLayer.setLabels(labels); - } - - //Open activation gradients WS *then* BP working memory, so BP working memory is opened last for use in layers - wsActGradTemp = workspaceMgr.notifyScopeEntered(ArrayType.ACTIVATION_GRAD); - try (MemoryWorkspace wsBPWorking = workspaceMgr.notifyScopeEntered(ArrayType.BP_WORKING_MEM)) { - - //Note that because we're opening activation workspaces not in a simple nested order, we'll manually - // override the previous workspace setting. Otherwise, when we close these workspaces, the "current" - // workspace may be set to the incorrect one - wsActGradTemp.setPreviousWorkspace(initialWorkspace); - wsBPWorking.setPreviousWorkspace(initialWorkspace); - - INDArray eps = (i == layers.length - 1 ? epsilon : currPair.getRight()); //eps is null for OutputLayer - - if (!tbptt) { - //Standard case - currPair = layers[i].backpropGradient(eps, workspaceMgr); - } else { - //TBPTT gradient - if (layers[i] instanceof RecurrentLayer) { - currPair = ((RecurrentLayer) layers[i]).tbpttBackpropGradient(currPair.getSecond(), - layerWiseConfigurations.getTbpttBackLength(), workspaceMgr); - } else { - currPair = layers[i].backpropGradient(currPair.getSecond(), workspaceMgr); - } - } - - if (currPair.getSecond() != null) { - //Edge case: may be null for Embedding layer, for example - validateArrayWorkspaces(workspaceMgr, currPair.getSecond(), ArrayType.ACTIVATION_GRAD, i, - false, "Backprop"); - } - - for (Map.Entry entry : currPair.getFirst().gradientForVariable().entrySet()) { - String origName = entry.getKey(); - multiGradientKey = i + "_" + origName; - gradientList.addLast(new Triple<>(multiGradientKey, entry.getValue(), - currPair.getFirst().flatteningOrderForVariable(origName))); - } - if (getLayerWiseConfigurations().getInputPreProcess(i) != null) { - currPair = new Pair<>(currPair.getFirst(), - this.layerWiseConfigurations.getInputPreProcess(i) - .backprop(currPair.getSecond(), getInputMiniBatchSize(), workspaceMgr)); - if (i > 0 && currPair.getSecond() != null) { - validateArrayWorkspaces(workspaceMgr, currPair.getSecond(), ArrayType.ACTIVATION_GRAD, i, - true, "Backprop"); - } - } - - if (i == 0) { - if (returnInputActGrad && currPair.getSecond() != null) { - currPair.setSecond(currPair.getSecond().detach()); - } else { - currPair.setSecond(null); - } - } - - if (wsActGradCloseNext != null) { - wsActGradCloseNext.close(); - } - wsActGradCloseNext = wsActGradTemp; - wsActGradTemp = null; - } - - if (traceLog) { - log.trace("Completed backprop: {} - {}", i, layers[i].getClass().getSimpleName()); - } - } - } catch (Throwable thr ){ - t = thr; - } finally { - if(wsActGradCloseNext != null){ - try { - wsActGradCloseNext.close(); - } catch (Throwable t2){ - if(t != null){ - log.error("Encountered second exception while trying to close workspace after initial exception"); - log.error("Original exception:", t); - throw t2; - } - } - } - if(wsActGradTemp != null) { - //Should only be non-null on exception - try { - wsActGradTemp.close(); - } catch (Throwable t2) { - if (t != null) { - log.error("Encountered second exception while trying to close workspace after initial exception"); - log.error("Original exception:", t); - throw t2; - } - } - } - Nd4j.getMemoryManager().setCurrentWorkspace(initialWorkspace); - - if(t != null){ - if(t instanceof RuntimeException){ - throw ((RuntimeException)t); - } - throw new RuntimeException("Error during neural network forward pass", t); - } - } - - if (layerWiseConfigurations.getTrainingWorkspaceMode() == WorkspaceMode.NONE) { - WorkspaceUtils.assertNoWorkspacesOpen("Expected no workspace active in calcBackpropGradients when " + - "training workspace is set to none"); - } else { - if(epsilon == null) { - //If epsilon != null: external errors use case (inputs are detached instead) - WorkspaceUtils.assertOpenActiveAndCurrent(WS_ALL_LAYERS_ACT, "calcBackpropGradients: WS_ALL_LAYERS_ACT is no" + - " longer the currently open/active workspace"); - } - } - - //Add gradients to Gradients (map), in correct order - for (Triple triple : gradientList) { - gradient.setGradientFor(triple.getFirst(), triple.getSecond(), triple.getThird()); - } - - return new Pair<>(gradient, currPair.getSecond()); - } - - protected void doTruncatedBPTT(INDArray input, INDArray labels, INDArray featuresMaskArray, - INDArray labelsMaskArray, LayerWorkspaceMgr workspaceMgr) { - if (input.rank() != 3 || labels.rank() != 3) { - log.warn("Cannot do truncated BPTT with non-3d inputs or labels. Expect input with shape [miniBatchSize,nIn,timeSeriesLength], got " - + Arrays.toString(input.shape()) + "\tand labels with shape " - + Arrays.toString(labels.shape())); - return; - } - if (input.size(2) != labels.size(2)) { - log.warn("Input and label time series have different lengths: {} input length, {} label length", - input.size(2), labels.size(2)); - return; - } - - int fwdLen = layerWiseConfigurations.getTbpttFwdLength(); - update(TaskUtils.buildTask(input, labels)); - val timeSeriesLength = input.size(2); - long nSubsets = timeSeriesLength / fwdLen; - if (timeSeriesLength % fwdLen != 0) - nSubsets++; //Example: 100 fwdLen with timeSeriesLength=120 -> want 2 subsets (1 of size 100, 1 of size 20) - - rnnClearPreviousState(); - - for (int i = 0; i < nSubsets; i++) { - long startTimeIdx = (long) i * fwdLen; - long endTimeIdx = startTimeIdx + fwdLen; - if (endTimeIdx > timeSeriesLength) - endTimeIdx = timeSeriesLength; - - if (startTimeIdx > Integer.MAX_VALUE || endTimeIdx > Integer.MAX_VALUE) - throw new ND4JArraySizeException(); - INDArray[] subsets = getSubsetsForTbptt((int) startTimeIdx, (int) endTimeIdx, input, labels, - featuresMaskArray, labelsMaskArray); - - setInput(subsets[0]); - setLabels(subsets[1]); - setLayerMaskArrays(subsets[2], subsets[3]); - - if (solver == null) { - try (MemoryWorkspace wsO = Nd4j.getMemoryManager().scopeOutOfWorkspaces()) { - solver = new Solver.Builder().configure(conf()).listeners(getListeners()).model(this) - .build(); - } - } - solver.optimize(workspaceMgr); - - //Finally, update the state of the RNN layers: - updateRnnStateWithTBPTTState(); - } - - rnnClearPreviousState(); - clearLayerMaskArrays(); - } - - private INDArray[] getSubsetsForTbptt(int startTimeIdx, int endTimeIdx, INDArray input, INDArray labels, - INDArray fMask, INDArray lMask ){ - INDArray[] out = new INDArray[4]; - out[0] = input.get(NDArrayIndex.all(), NDArrayIndex.all(), - NDArrayIndex.interval(startTimeIdx, endTimeIdx)); - out[1] = labels.get(NDArrayIndex.all(), NDArrayIndex.all(), - NDArrayIndex.interval(startTimeIdx, endTimeIdx)); - - if (fMask != null) { - out[2] = fMask.get(NDArrayIndex.all(), - NDArrayIndex.interval(startTimeIdx, endTimeIdx)); - } - if (lMask != null) { - out[3] = lMask.get(NDArrayIndex.all(), - NDArrayIndex.interval(startTimeIdx, endTimeIdx)); - } - - return out; - } - - /** - * Intended for internal/developer use - */ - public void updateRnnStateWithTBPTTState() { - for (int i = 0; i < layers.length; i++) { + } else if (fwdPassType == FwdPassType.RNN_TIMESTEP) { + // rnnTimeStep case if (layers[i] instanceof RecurrentLayer) { - RecurrentLayer l = ((RecurrentLayer) layers[i]); - l.rnnSetPreviousState(l.rnnGetTBPTTState()); + input = ((RecurrentLayer) layers[i]).rnnTimeStep(reshapeTimeStepInput(input), mgr); + } else if (layers[i] instanceof BaseWrapperLayer + && ((BaseWrapperLayer) layers[i]).getUnderlying() instanceof RecurrentLayer) { + RecurrentLayer rl = ((RecurrentLayer) ((BaseWrapperLayer) layers[i]).getUnderlying()); + input = rl.rnnTimeStep(reshapeTimeStepInput(input), mgr); } else if (layers[i] instanceof MultiLayerNetwork) { - ((MultiLayerNetwork) layers[i]).updateRnnStateWithTBPTTState(); - } - } - } - - /** - * Get the {@link TrainingListener}s set for this network, if any - * @return listeners set for this network - */ - public Collection getListeners() { - return trainingListeners; - } - - /** - * @deprecated Use {@link #getListeners()} - */ - @Deprecated - public Collection getTrainingListeners() { - return trainingListeners; - } - - @Override - public void setListeners(Collection listeners) { - if (layers == null) { - init(); - } - for (Layer layer : layers) { - layer.setListeners(listeners); - } - - if (solver != null) { - solver.setListeners(listeners); - } - - this.trainingListeners.clear(); - if (listeners != null) { - this.trainingListeners.addAll(listeners); - } - } - - /** - * This method ADDS additional TrainingListener to existing listeners - * - * @param listeners - */ - @Override - public void addListeners(TrainingListener... listeners) { - Collections.addAll(trainingListeners, listeners); - - // fixme this is wrong, since it removes existing listeners from the solver - if (solver != null) { - solver.setListeners(this.trainingListeners); - } - } - - @Override - public void setListeners(TrainingListener... listeners) { - Collection cListeners = new ArrayList<>(); - //Check: user might have done setListeners(null) thinking this would clear the current listeners. - //This results in an TrainingListener[1] with a single null value -> results in a NPE later - if (listeners != null && listeners.length > 0) { - for (TrainingListener i : listeners) { - if (i != null) - cListeners.add(i); - } - } - setListeners(cListeners); - } - - /** - * Usable only for classification networks in conjunction with OutputLayer. Cannot be used with RnnOutputLayer, - * CnnLossLayer, or networks used for regression.
- * To get the raw output activations of the output layer, use {@link #output(INDArray)} or similar.
- *
- * Equivalent to argmax(this.output(input)): Returns the predicted class indices corresponding to the predictions - * for each example in the features array. - * - * @param d The input features to perform inference on - * @return The predicted class index for each example - */ - @Override - public int[] predict(INDArray d) { - INDArray output = output(d, Layer.TrainingMode.TEST); - - if (d.size(0) > Integer.MAX_VALUE) - throw new ND4JArraySizeException(); - - Preconditions.checkState(output.rank() == 2, "predict(INDArray) method can only be used on rank 2 output - got array with rank %s", output.rank()); - return output.argMax(1).toIntVector(); - } - - /** - * As per {@link #predict(INDArray)} but the returned values are looked up from the list of label names - * in the provided DataSet - */ - @Override - public List predict(org.nd4j.linalg.dataset.api.DataSet dataSet) { - Preconditions.checkState(dataSet.getLabelNamesList() != null, "This method can only be used when the DataSet contains a label name list"); - int[] intRet = predict(dataSet.getFeatures()); - List ret = new ArrayList<>(); - for (int i = 0; i < intRet.length; i++) { - ret.add(i, dataSet.getLabelName(intRet[i])); - } - return ret; - } - - /** - * Fit the model for one iteration on the provided data - * - * @param data the examples to classify (one example in each row) - * @param labels the example labels(a binary outcome matrix) - */ - @Override - public void fit(INDArray data, INDArray labels) { - fit(data, labels, null, null); - } - - /** - * Fit the model for one iteration on the provided data - * - * @param features the examples to classify (one example in each row) - * @param labels the example labels(a binary outcome matrix) - * @param featuresMask The mask array for the features (used for variable length time series, etc). May be null. - * @param labelsMask The mask array for the labels (used for variable length time series, etc). May be null. - */ - public synchronized void fit(INDArray features, INDArray labels, INDArray featuresMask, INDArray labelsMask) { - try{ - fitHelper(features, labels, featuresMask, labelsMask); - } catch (OutOfMemoryError e){ - CrashReportingUtil.writeMemoryCrashDump(this, e); - throw e; - } - } - - private void fitHelper(INDArray features, INDArray labels, INDArray featuresMask, INDArray labelsMask){ - if(numParams() == 0) { - //No op: can't fit a network with 0 parameters - return; - } - - setInput(features); - setLabels(labels); - this.setLayerMaskArrays(featuresMask, labelsMask); - update(TaskUtils.buildTask(features, labels)); - - LayerWorkspaceMgr workspaceMgr; - if(layerWiseConfigurations.getTrainingWorkspaceMode() == null){ - workspaceMgr = LayerWorkspaceMgr.noWorkspaces(); - } else { - workspaceMgr = LayerWorkspaceMgr.builder() - .with(ArrayType.INPUT, WS_ALL_LAYERS_ACT, WS_ALL_LAYERS_ACT_CONFIG) - .with(ArrayType.ACTIVATIONS, WS_ALL_LAYERS_ACT, WS_ALL_LAYERS_ACT_CONFIG) - //Note for updater working memory, we have the option to re-use WS_ALL_LAYERS_ACT or FF/BP_WORKING_MEM - // these should be closed by the time updaters are executed - //Generally, WS_ALL_LAYERS_ACT will be the larger of the two, so we'll use this - .with(ArrayType.UPDATER_WORKING_MEM, WS_ALL_LAYERS_ACT, WS_ALL_LAYERS_ACT_CONFIG) - .build(); - } - workspaceMgr.setHelperWorkspacePointers(helperWorkspaces); - - if (layerWiseConfigurations.getBackpropType() == BackpropType.TruncatedBPTT) { - doTruncatedBPTT(features, labels, featuresMask, labelsMask, workspaceMgr); - } else { - if (solver == null) { - try (MemoryWorkspace wsO = Nd4j.getMemoryManager().scopeOutOfWorkspaces()) { - solver = new Solver.Builder().configure(conf()).listeners(getListeners()).model(this).build(); - } - } - //TODO CACHE WORKSPACE, IF USED??? - solver.optimize(workspaceMgr); - } - - clearLayerMaskArrays(); - clearLayersStates(); - synchronizeIterEpochCounts(); - } - - @Override - public void fit(INDArray data, LayerWorkspaceMgr workspaceMgr){ - throw new UnsupportedOperationException("Not supported: use pretrainLayer"); - } - - - /** - * Fit the model for one iteration on the provided data - * - * @param data the data to train on - */ - @Override - public void fit(org.nd4j.linalg.dataset.api.DataSet data) { - fit(data.getFeatures(), data.getLabels(), data.getFeaturesMaskArray(), data.getLabelsMaskArray()); - } - - /** - * Fit the model for one iteration on the provided data - * - * @param examples the examples to classify (one example in each row) - * @param labels the labels for each example (the number of labels must match - */ - @Override - public void fit(INDArray examples, int[] labels) { - org.deeplearning4j.nn.conf.layers.OutputLayer layerConf = - (org.deeplearning4j.nn.conf.layers.OutputLayer) getOutputLayer().conf().getLayer(); - - if (layerConf.getNOut() > Integer.MAX_VALUE) - throw new ND4JArraySizeException(); - fit(examples, FeatureUtil.toOutcomeMatrix(labels, (int) layerConf.getNOut())); - } - - - /** - * Perform inference on the provided input/features - i.e., perform forward pass using the provided input/features - * and return the output of the final layer. - * - * @param input Input to the network - * @param train whether the output is test or train. This mainly affect hyper parameters such as dropout and - * batch normalization, which have different behaviour for test vs. train - * @return The network predictions - i.e., the activations of the final layer - */ - public INDArray output(INDArray input, TrainingMode train) { - return output(input, train == TrainingMode.TRAIN); - } - - /** - * Perform inference on the provided input/features - i.e., perform forward pass using the provided input/features - * and return the output of the final layer. - * - * @param input Input to the network - * @param train whether the output is test or train. This mainly affect hyper parameters such as dropout and - * batch normalization, which have different behaviour for test vs. train - * @return The network predictions - i.e., the activations of the final layer - */ - public INDArray output(INDArray input, boolean train) { - return output(input, train, null, null); - } - - /** - * Calculate the output of the network, with masking arrays. The masking arrays are used in situations such - * as one-to-many and many-to-one recurrent neural network (RNN) designs, as well as for supporting time series - * of varying lengths within the same minibatch. - */ - public INDArray output(INDArray input, boolean train, INDArray featuresMask, INDArray labelsMask) { - return output(input, train, featuresMask, labelsMask, null); - } - - /** - * Get the network output, which is optionally placed in the specified memory workspace.
- * If no memory workspace is provided, the output will be detached (not in any workspace).
- * If a memory workspace is provided, the output activation array (i.e., the INDArray returned by this method) - * will be placed in the specified workspace. This workspace must be opened by the user before calling this method - - * and the user is responsible for (a) closing this workspace, and (b) ensuring the output array is not used out - * of scope (i.e., not used after closing the workspace to which it belongs - as this is likely to cause either - * an exception when used, or a crash). - * - * @param input Input to the network - * @param train True for train, false otherwise - * @param outputWorkspace May be null. If not null: the workspace MUST be opened before calling this method. - * @return The output/activations from the network (either detached or in the specified workspace if provided) - */ - public INDArray output(INDArray input, boolean train, MemoryWorkspace outputWorkspace) { - return output(input, train, null, null, outputWorkspace); - } - - /** - * Get the network output, which is optionally placed in the specified memory workspace.
- * If no memory workspace is provided, the output will be detached (not in any workspace).
- * If a memory workspace is provided, the output activation array (i.e., the INDArray returned by this method) - * will be placed in the specified workspace. This workspace must be opened by the user before calling this method - - * and the user is responsible for (a) closing this workspace, and (b) ensuring the output array is not used out - * of scope (i.e., not used after closing the workspace to which it belongs - as this is likely to cause either - * an exception when used, or a crash). - * - * @param input Input to the network - * @param train True for train, false otherwise - * @param outputWorkspace May be null. If not null: the workspace MUST be opened before calling this method. - * @return The output/activations from the network (either detached or in the specified workspace if provided) - */ - public synchronized INDArray output(INDArray input, boolean train, INDArray featuresMask, INDArray labelsMask, MemoryWorkspace outputWorkspace) { - try { - return outputOfLayerDetached(train, FwdPassType.STANDARD, layers.length - 1, input, featuresMask, labelsMask, outputWorkspace); - } catch (OutOfMemoryError e) { - CrashReportingUtil.writeMemoryCrashDump(this, e); - throw e; - } - } - - /** - * This method uses provided OutputAdapter to return custom object built from INDArray - * - * PLEASE NOTE: This method uses dedicated Workspace for output generation to avoid redundant allocations - * - * @param inputs Input arrays to the netwonk - * @param inputMasks Optional input mask arrays (may be null) - * @param labelMasks Optional label mask arrays (may be null - * @param outputAdapter OutputAdapter instance - * @param T extends Object - * @return T instance produced by OutputAdapter - */ - public synchronized T output(@NonNull INDArray inputs, INDArray inputMasks, INDArray labelMasks, @NonNull OutputAdapter outputAdapter) { - try (val ws = Nd4j.getWorkspaceManager().getAndActivateWorkspace(WS_ALL_LAYERS_ACT_CONFIG, WS_OUTPUT_MEM)) { - if (outputAdapter instanceof ModelAdapter) - return ((ModelAdapter) outputAdapter).apply(this, new INDArray[]{inputs}, new INDArray[]{ inputMasks}, new INDArray[]{labelMasks}); - else - return outputAdapter.apply(output(inputs, false, inputMasks, labelMasks, ws)); - } - } - - /** - * Perform inference on the provided input/features - i.e., perform forward pass using the provided input/features - * and return the output of the final layer. Equivalent to {@link #output(INDArray, boolean)} with train=false - i.e., - * this method is used for inference. - * - * @param input Input to the network - * @return The network predictions - i.e., the activations of the final layer - */ - public INDArray output(INDArray input) { - return output(input, TrainingMode.TEST); - } - - /** - * Generate the output for all examples/batches in the input iterator, and concatenate them into a single array. - * See {@link #output(INDArray)}
- * NOTE 1: The output array can require a considerable amount of memory for iterators with a large number of examples
- * NOTE 2: This method cannot be used for variable length time series outputs, as this would require padding arrays - * for some outputs, or returning a mask array (which cannot be done with this method). For variable length time - * series applications, use one of the other output methods. This method also cannot be used with fully convolutional - * networks with different output sizes (for example, segmentation on different input image sizes). - * - * - * @param iterator Data to pass through the network - * @return output for all examples in the iterator, concatenated into a - */ - public INDArray output(DataSetIterator iterator, boolean train) { - List outList = new ArrayList<>(); - long[] firstOutputShape = null; - while (iterator.hasNext()) { - DataSet next = iterator.next(); - INDArray features = next.getFeatures(); - - if (features == null) - continue; - - INDArray fMask = next.getFeaturesMaskArray(); - INDArray lMask = next.getLabelsMaskArray(); - INDArray output = this.output(features, train, fMask, lMask); - outList.add(output); - if(firstOutputShape == null){ - firstOutputShape = output.shape(); + input = ((MultiLayerNetwork) layers[i]).rnnTimeStep(reshapeTimeStepInput(input)); } else { - //Validate that shapes are the same (may not be, for some RNN variable length time series applications) - long[] currShape = output.shape(); - Preconditions.checkState(firstOutputShape.length == currShape.length, "Error during forward pass:" + - "different minibatches have different output array ranks - first minibatch shape %s, last minibatch shape %s", firstOutputShape, currShape); - for( int i=1; i - * This is equivalent to {@link #score(DataSet, boolean)} with training==false. - * @param data the data to score - * @return the score for the given input,label pairs - * @see #score(DataSet, boolean) - */ - public double score(DataSet data) { - return score(data, false); - } - - /** - * Sets the input and labels and calculates the score (value of the output layer loss function plus l1/l2 if applicable) - * for the prediction with respect to the true labels
- * @param data data to calculate score for - * @param training If true: score during training. If false: score at test time. This can affect the application of - * certain features, such as dropout and dropconnect (which are applied at training time only) - * @return the score (value of the loss function) - */ - public double score(DataSet data, boolean training) { - try{ - return scoreHelper(data, training); - } catch (OutOfMemoryError e){ - CrashReportingUtil.writeMemoryCrashDump(this, e); - throw e; - } - } - - private double scoreHelper(DataSet data, boolean training){ - boolean hasMaskArray = data.hasMaskArrays(); - if (hasMaskArray) - setLayerMaskArrays(data.getFeaturesMaskArray(), data.getLabelsMaskArray()); - - if (!(getOutputLayer() instanceof IOutputLayer)) { - throw new IllegalStateException("Cannot calculate score if final layer is not an instance of IOutputLayer. " + - "Final layer is of type: " + getOutputLayer().getClass()); + if (wsActCloseNext != null) { + wsActCloseNext.close(); + } + wsActCloseNext = temp; + temp = null; } - WorkspaceMode wsm = (training ? layerWiseConfigurations.getTrainingWorkspaceMode() : layerWiseConfigurations.getInferenceWorkspaceMode()); - LayerWorkspaceMgr mgr; - if(wsm == WorkspaceMode.NONE){ - mgr = LayerWorkspaceMgr.noWorkspaces(); - } else { - mgr = LayerWorkspaceMgr.builder() - .with(ArrayType.FF_WORKING_MEM, WS_LAYER_WORKING_MEM, WS_LAYER_WORKING_MEM_CONFIG) - .with(ArrayType.RNN_FF_LOOP_WORKING_MEM, WS_RNN_LOOP_WORKING_MEM, WS_RNN_LOOP_WORKING_MEM_CONFIG) - //TODO we can probably optimize this - .noWorkspaceFor(ArrayType.ACTIVATIONS) - .noWorkspaceFor(ArrayType.INPUT) - .build(); - } - mgr.setHelperWorkspacePointers(helperWorkspaces); - - INDArray inputToOutputLayer = outputOfLayerDetached(training, FwdPassType.STANDARD,layers.length-2, data.getFeatures(), - data.getFeaturesMaskArray(), data.getLabelsMaskArray(), null); - - if (data.getFeatures().size(0) > Integer.MAX_VALUE) - throw new ND4JArraySizeException(); - IOutputLayer ol = (IOutputLayer) getOutputLayer(); - if (getLayerWiseConfigurations().getInputPreProcess(layers.length - 1) != null) { - inputToOutputLayer = getLayerWiseConfigurations().getInputPreProcess(layers.length - 1) - .preProcess(inputToOutputLayer, (int) data.getFeatures().size(0), mgr); - } - ol.setInput(inputToOutputLayer, mgr); //Feedforward doesn't include output layer for efficiency - ol.setLabels(data.getLabels()); - double score; - try(MemoryWorkspace ws = mgr.notifyScopeEntered(ArrayType.FF_WORKING_MEM)) { - score = ol.computeScore(calcRegularizationScore(true), training, mgr); + if (traceLog) { + log.trace("Completed forward pass: {} - {}", i, layers[i].getClass().getSimpleName()); } - if (hasMaskArray) - clearLayerMaskArrays(); - clearLayersStates(); - - return score; - } - - /** - * As per {@link #scoreExamples(DataSet, boolean)} - the outputs (example scores) for all DataSets in the iterator are concatenated - */ - public INDArray scoreExamples(DataSetIterator iter, boolean addRegularizationTerms) { - List out = new ArrayList<>(); - - while (iter.hasNext()) { - out.add(scoreExamples(iter.next(), addRegularizationTerms)); + // Edge case: for first layer with dropout, inputs can't be in previous workspace (as it + // hasn't been opened yet) + // Hence: put inputs in working memory -> set back to default for next use of workspace mgr + if (i == 0 && wsm != WorkspaceMode.NONE) { + mgr.setWorkspace( + ArrayType.INPUT, + WS_LAYER_ACT_2, + WS_LAYER_ACT_X_CONFIG); // Inputs should always be in the previous WS } - return Nd4j.toFlattened('f', out); - } - - /**Calculate the score for each example in a DataSet individually. Unlike {@link #score(DataSet)} and {@link #score(DataSet, boolean)} - * this method does not average/sum over examples. This method allows for examples to be scored individually (at test time only), which - * may be useful for example for autoencoder architectures and the like.
- * Each row of the output (assuming addRegularizationTerms == true) is equivalent to calling score(DataSet) with a single example. - * @param data The data to score - * @param addRegularizationTerms If true: add l1/l2 regularization terms (if any) to the score. If false: don't add regularization terms - * @return An INDArray (column vector) of size input.numRows(); the ith entry is the score (loss value) of the ith example - */ - public INDArray scoreExamples(DataSet data, boolean addRegularizationTerms) { - try{ - return scoreExamplesHelper(data, addRegularizationTerms); - } catch (OutOfMemoryError e){ - CrashReportingUtil.writeMemoryCrashDump(this, e); - throw e; - } - } - - private INDArray scoreExamplesHelper(DataSet data, boolean addRegularizationTerms){ - INDArray inputLast = outputOfLayerDetached(false, FwdPassType.STANDARD,layers.length-2, data.getFeatures(), - data.getFeaturesMaskArray(), data.getLabelsMaskArray(), null); - setLabels(data.getLabels()); - setLayerMaskArrays(data.getFeaturesMaskArray(), data.getLabelsMaskArray()); - - //TODO we might want workspaces here? - LayerWorkspaceMgr mgr = LayerWorkspaceMgr.noWorkspaces(); - - INDArray out; - if (getOutputLayer() instanceof IOutputLayer) { - IOutputLayer ol = (IOutputLayer) getOutputLayer(); - if(layerWiseConfigurations.getInputPreProcess(layers.length-1) != null){ - - if (data.getFeatures().size(0) > Integer.MAX_VALUE) - throw new ND4JArraySizeException(); - inputLast = layerWiseConfigurations.getInputPreProcess(layers.length-1).preProcess(inputLast, - (int) data.getFeatures().size(0), mgr); - } - ol.setLabels(data.getLabels()); - ol.setInput(inputLast, mgr); - double r = (addRegularizationTerms ? calcRegularizationScore(true) : 0); - out = ol.computeScoreForExamples(r, mgr); - } else { - throw new UnsupportedOperationException( - "Cannot calculate score with respect to labels without an OutputLayer"); - } - - clearLayersStates(); - clearLayerMaskArrays(); - return out; - } - - - @Override - public void fit() { - fit(input, labels); - } - - @Override - public void update(INDArray gradient, String paramType) { - throw new UnsupportedOperationException("Not implemented"); - } - - - /** - * Score of the model (relative to the objective function) - previously calculated on the last minibatch - * - * @return the score of the model (relative to the objective function) - */ - @Override - public double score() { - return score; - } - - /** - * Intended for developer/internal use - */ - public void setScore(double score) { - this.score = score; - } - - @Override - public void computeGradientAndScore(LayerWorkspaceMgr layerWorkspaceMgr){ - computeGradientAndScore(); - } - - public void computeGradientAndScore() { - - if (!(getOutputLayer() instanceof IOutputLayer)) { - throw new DL4JException( - "Cannot calculate gradient and score with respect to labels: final layer is not an IOutputLayer. " + - "Final layer class: " + getOutputLayer().getClass() + ". To calculate gradients and fit a network " + - "using backpropagation, the final layer must be an output layer"); - } - - //Note: Workspace manager is only ose here for score calculation... other workspace managers are used in the - // various FF/backprop methds - LayerWorkspaceMgr mgr; - if(layerWiseConfigurations.getTrainingWorkspaceMode() == WorkspaceMode.NONE){ - mgr = LayerWorkspaceMgr.noWorkspaces(); - } else { - mgr = LayerWorkspaceMgr.builder() - .with(ArrayType.INPUT, WS_ALL_LAYERS_ACT, WS_ALL_LAYERS_ACT_CONFIG) - .with(ArrayType.ACTIVATIONS, WS_ALL_LAYERS_ACT, WS_ALL_LAYERS_ACT_CONFIG) - .with(ArrayType.FF_WORKING_MEM, WS_LAYER_WORKING_MEM, WS_LAYER_WORKING_MEM_CONFIG) - .with(ArrayType.BP_WORKING_MEM, WS_LAYER_WORKING_MEM, WS_LAYER_WORKING_MEM_CONFIG) - .with(ArrayType.RNN_FF_LOOP_WORKING_MEM, WS_RNN_LOOP_WORKING_MEM, WS_RNN_LOOP_WORKING_MEM_CONFIG) - .with(ArrayType.RNN_BP_LOOP_WORKING_MEM, WS_RNN_LOOP_WORKING_MEM, WS_RNN_LOOP_WORKING_MEM_CONFIG) - .build(); - - if(layerWiseConfigurations.getCacheMode() != null){ - //For now: store cache mode activations in activations workspace - mgr.setWorkspace(ArrayType.FF_CACHE, WS_ALL_LAYERS_ACT, WS_ALL_LAYERS_ACT_CONFIG); - } - } - - boolean tbptt = layerWiseConfigurations.getBackpropType() == BackpropType.TruncatedBPTT; - FwdPassType fwdType = (tbptt ? FwdPassType.RNN_ACTIVATE_WITH_STORED_STATE : FwdPassType.STANDARD); - synchronizeIterEpochCounts(); - - //Calculate activations (which are stored in each layer, and used in backprop) - try(MemoryWorkspace ws = mgr.notifyScopeEntered(ArrayType.ACTIVATIONS)) { - //First: do a feed-forward through the network - //Note that we don't actually need to do the full forward pass through the output layer right now; but we do - // need the input to the output layer to be set (such that backprop can be done) - List activations = ffToLayerActivationsInWs(layers.length - 2, fwdType, tbptt, input, mask, null); - if (!trainingListeners.isEmpty()) { - //TODO: We possibly do want output layer activations in some cases here... - for (TrainingListener tl : trainingListeners) { - tl.onForwardPass(this, activations); - } - } - INDArray inputToOutputLayer = activations.get(activations.size() - 1); - if (layerWiseConfigurations.getInputPreProcess(layers.length - 1) != null) { - inputToOutputLayer = layerWiseConfigurations.getInputPreProcess(layers.length - 1) - .preProcess(inputToOutputLayer, getInputMiniBatchSize(), mgr); - //Validate activations location - } - getOutputLayer().setInput(inputToOutputLayer, mgr); - //Then: compute gradients - Pair pair = calcBackpropGradients(null, true, false, false); - this.gradient = (pair == null ? null : pair.getFirst()); - - //Calculate score - try(MemoryWorkspace wsFF = mgr.notifyScopeEntered(ArrayType.FF_WORKING_MEM)) { - double r = calcRegularizationScore(true); - score = ((IOutputLayer) getOutputLayer()).computeScore(r, true, mgr); - } - - //Listeners - if (!trainingListeners.isEmpty()) { - try (MemoryWorkspace workspace = Nd4j.getMemoryManager().scopeOutOfWorkspaces()) { - for (TrainingListener tl : trainingListeners) { - tl.onBackwardPass(this); - } - } - } - } - - //Clear the post noise/dropconnect parameters on the output layer - getOutputLayer().clearNoiseWeightParams(); - } - - /** - * Clear the inputs. Clears optimizer state. - */ - public void clear() { - for (Layer layer : layers) - layer.clear(); - - input = null; - labels = null; - solver = null; - } - - @Override - public void applyConstraints(int iteration, int epoch) { - for(Layer l : layers){ - l.applyConstraints(iteration, epoch); - } - } - - - /** - * Set the input array for the network - * - * @param input Input array to set - */ - public void setInput(INDArray input) { - this.input = input; - if (this.layers == null) { - init(); - } - if (input != null) { - if (input.length() == 0) - throw new IllegalArgumentException( - "Invalid input: length 0 (shape: " + Arrays.toString(input.shape()) + ")"); - - if (input.size(0) > Integer.MAX_VALUE) - throw new ND4JArraySizeException(); - setInputMiniBatchSize((int) input.size(0)); - } - } - - @Override - public void setInput(INDArray input, LayerWorkspaceMgr mgr){ - throw new UnsupportedOperationException("Not supported"); - } - - /** - * Get the output layer - i.e., the last layer in the netwok - * - * @return - */ - public Layer getOutputLayer() { - Layer ret = getLayers()[getLayers().length - 1]; - if (ret instanceof FrozenLayerWithBackprop) { - ret = ((FrozenLayerWithBackprop) ret).getInsideLayer(); - } - return ret; - } - - - /** - * See {@link #setParams(INDArray)} - */ - public void setParameters(INDArray params) { - setParams(params); - } - - /** - * Intended for internal/developer use - */ - public NeuralNetConfiguration getDefaultConfiguration() { - return defaultConfiguration; - } - - public INDArray getLabels() { - return labels; - } - - public INDArray getInput() { - return input; - } - - - /** - * @param labels Labels to set - */ - public void setLabels(INDArray labels) { - this.labels = labels; - } - - /** - * Get the number of layers in the network - * - * @return the number of layers in the network - */ - public int getnLayers() { - return layerWiseConfigurations.getConfs().size(); - } - - /** - * @return The layers in the network - */ - public synchronized Layer[] getLayers() { - return layers; - } - - public Layer getLayer(int i) { - Preconditions.checkArgument(i >= 0 && i < layers.length, "Invalid layer index: layer index must be 0" + - " to %s (inclusive), got index %s", layers.length-1, i); - return layers[i]; - } - - public Layer getLayer(String name) { - return layerMap.get(name); - } - - public List getLayerNames() { - return new ArrayList<>(layerMap.keySet()); - } - - public void setLayers(Layer[] layers) { - this.layers = layers; - } - - public INDArray getMask() { - return mask; - } - - public void setMask(INDArray mask) { - this.mask = mask; - } - - public INDArray getMaskArray() { - return mask; - } - - @Override - public boolean isPretrainLayer() { - return false; - } - - @Override - public void clearNoiseWeightParams() { - for(Layer l : layers){ - l.clearNoiseWeightParams(); - } - } - - @Override - public void allowInputModification(boolean allow) { - throw new UnsupportedOperationException("Not supported"); - } - - @Override - public Pair feedForwardMaskArray(INDArray maskArray, MaskState currentMaskState, - int minibatchSize) { - if (maskArray == null) { - for (int i = 0; i < layers.length; i++) { - layers[i].feedForwardMaskArray(null, null, minibatchSize); - } - } else { - //Do a forward pass through each preprocessor and layer - for (int i = 0; i < layers.length; i++) { - InputPreProcessor preProcessor = getLayerWiseConfigurations().getInputPreProcess(i); - - if (preProcessor != null) { - Pair p = - preProcessor.feedForwardMaskArray(maskArray, currentMaskState, minibatchSize); - if (p != null) { - maskArray = p.getFirst(); - currentMaskState = p.getSecond(); - } else { - maskArray = null; - currentMaskState = null; - } - } - - Pair p = - layers[i].feedForwardMaskArray(maskArray, currentMaskState, minibatchSize); - if (p != null) { - maskArray = p.getFirst(); - currentMaskState = p.getSecond(); - } else { - maskArray = null; - currentMaskState = null; - } - } - } - - return new Pair<>(maskArray, currentMaskState); - } - - @Override - public LayerHelper getHelper() { - throw new UnsupportedOperationException("Not supported"); - } - - //========== - //Layer methods - - @Override - public Type type() { - return Type.MULTILAYER; - } - - - /** - * Equivalent to {@link #output(INDArray)} using the input set via {@link #setInput(INDArray)} - */ - public INDArray activate(TrainingMode training) { - return output(input, training == TrainingMode.TRAIN); - } - - /** - * Equivalent to {@link #output(INDArray, TrainingMode)} - */ - public INDArray activate(INDArray input, TrainingMode training) { - return output(input, training == TrainingMode.TRAIN); - } - - @Override - public Pair backpropGradient(INDArray epsilon, LayerWorkspaceMgr workspaceMgr) { - if (getOutputLayer() instanceof IOutputLayer) - throw new UnsupportedOperationException("Cannot calculate gradients based on epsilon with OutputLayer"); - - return calcBackpropGradients(epsilon, false, false, true); - } - - @Override - public void setIndex(int index) { - layerIndex = index; - } - - @Override - public int getIndex() { - return layerIndex; - } - - @Override - public int getIterationCount() { - return getLayerWiseConfigurations().getIterationCount(); - } - - @Override - public int getEpochCount() { - return getLayerWiseConfigurations().getEpochCount(); - } - - @Override - public void setIterationCount(int iterationCount) { - getLayerWiseConfigurations().setIterationCount(iterationCount); - } - - @Override - public void setEpochCount(int epochCount) { - getLayerWiseConfigurations().setEpochCount(epochCount); - } - - @Override - public double calcRegularizationScore(boolean backpropParamsOnly){ - double scoreSum = 0.0; - for (int i = 0; i < layers.length; i++) { - scoreSum += layers[i].calcRegularizationScore(backpropParamsOnly); - } - return scoreSum; - } - - @Override - public void update(Gradient gradient) { - if (gradient.gradient().length() != numParams(true)) - throw new IllegalArgumentException("Invalid input: expect gradients array of length " + numParams(true)); - for (Map.Entry entry : gradient.gradientForVariable().entrySet()) { - String key = entry.getKey(); - INDArray val = entry.getValue(); - int idx = key.indexOf('_'); - if (idx == -1) - throw new IllegalStateException("Invalid param key: not have layer separator: \"" + key + "\""); - Integer layerId = Integer.parseInt(key.substring(0, idx)); - String paramType = key.substring(idx + 1); - // Update MLN gradient - this.gradient.gradientForVariable().put(key, val); - // Update layer params - layers[layerId].update(val, paramType); - } - // Update layerwise gradient view - setBackpropGradientsViewArray(gradient.gradient()); - - } - - @Override - public INDArray activate(boolean training, LayerWorkspaceMgr mgr) { - throw new UnsupportedOperationException(); - } - - @Override - public INDArray activate(INDArray input, boolean training, LayerWorkspaceMgr mgr) { - throw new UnsupportedOperationException(); - } - - @Override - public void setInputMiniBatchSize(int size) { - if (layers != null) - for (Layer l : layers) - l.setInputMiniBatchSize(size); - } - - @Override - public int getInputMiniBatchSize() { - if(!conf().isMiniBatch()) - return 1; - - if (input.size(0) > Integer.MAX_VALUE) - throw new ND4JArraySizeException(); - return (int) input.size(0); - } - - @Override - public void setMaskArray(INDArray maskArray) { - throw new UnsupportedOperationException(); - } - - /** - * - * If this MultiLayerNetwork contains one or more RNN layers: conduct forward pass (prediction) - * but using previous stored state for any RNN layers. The activations for the final step are - * also stored in the RNN layers for use next time rnnTimeStep() is called.
- * This method can be used to generate output one or more steps at a time instead of always having to do - * forward pass from t=0. Example uses are for streaming data, and for generating samples from network output - * one step at a time (where samples are then fed back into the network as input)
- * If no previous state is present in RNN layers (i.e., initially or after calling rnnClearPreviousState()), - * the default initialization (usually 0) is used.
- * Supports mini-batch (i.e., multiple predictions/forward pass in parallel) as well as for single examples.
- * @param input Input to network. May be for one or multiple time steps. For single time step: - * input has shape [miniBatchSize,inputSize] or [miniBatchSize,inputSize,1]. miniBatchSize=1 for single example.
- * For multiple time steps: [miniBatchSize,inputSize,inputTimeSeriesLength] - * @return Output activations. If output is RNN layer (such as RnnOutputLayer): if input has shape [miniBatchSize,inputSize] - * i.e., is 2d, output has shape [miniBatchSize,outputSize] (i.e., also 2d).
- * Otherwise output is 3d [miniBatchSize,outputSize,inputTimeSeriesLength] when using RnnOutputLayer. - * @see #rnnTimeStep(INDArray, MemoryWorkspace) For outputting the activations in the specified workspace - */ - public INDArray rnnTimeStep(INDArray input) { - return rnnTimeStep(input, null); - } - - /** - * See {@link #rnnTimeStep(INDArray)} for details
- * If no memory workspace is provided, the output will be detached (not in any workspace).
- * If a memory workspace is provided, the output activation array (i.e., the INDArray returned by this method) - * will be placed in the specified workspace. This workspace must be opened by the user before calling this method - - * and the user is responsible for (a) closing this workspace, and (b) ensuring the output array is not used out - * of scope (i.e., not used after closing the workspace to which it belongs - as this is likely to cause either - * an exception when used, or a crash). - * - * @param input Input activations - * @param outputWorkspace Output workspace. May be null - * @return The output/activations from the network (either detached or in the specified workspace if provided) - */ - public INDArray rnnTimeStep(INDArray input, MemoryWorkspace outputWorkspace ) { + } + } catch (Throwable t2) { + t = t2; + } finally { + if (wsActCloseNext != null) { try { - boolean inputIs2d = input.rank() == 2; - INDArray out = outputOfLayerDetached(false, FwdPassType.RNN_TIMESTEP, layers.length - 1, input, null, null, outputWorkspace); - if (inputIs2d && out.rank() == 3 && layers[layers.length - 1].type() == Type.RECURRENT) { - //Return 2d output with shape [miniBatchSize,nOut] - // instead of 3d output with shape [miniBatchSize,nOut,1] - return out.tensorAlongDimension(0, 1, 0); + wsActCloseNext.close(); + } catch (Throwable t2) { + if (t != null) { + log.error( + "Encountered second exception while trying to close workspace after initial exception"); + log.error("Original exception:", t); + throw t2; + } + } + } + if (temp != null) { + // Should only be non-null on exception + while (temp.isScopeActive()) { + // For safety, should never occur in theory: a single close() call may not be sufficient, + // if + // workspace scope was borrowed and not properly closed when exception occurred + try { + temp.close(); + } catch (Throwable t2) { + if (t != null) { + log.error( + "Encountered second exception while trying to close workspace after initial exception"); + log.error("Original exception:", t); + throw t2; } - return out; - } catch (OutOfMemoryError e){ - CrashReportingUtil.writeMemoryCrashDump(this, e); - throw e; + } } - } + } - /**Get the state of the RNN layer, as used in rnnTimeStep(). - * @param layer Number/index of the layer. - * @return Hidden state, or null if layer is not an RNN layer - */ - public Map rnnGetPreviousState(int layer) { - if (layer < 0 || layer >= layers.length) - throw new IllegalArgumentException("Invalid layer number"); - Layer l = layers[layer]; - if(l instanceof org.deeplearning4j.nn.layers.wrapper.BaseWrapperLayer){ - l = ((org.deeplearning4j.nn.layers.wrapper.BaseWrapperLayer)l).getUnderlying(); + Nd4j.getMemoryManager().setCurrentWorkspace(initialWorkspace); + + if (t != null) { + if (t instanceof RuntimeException) { + throw ((RuntimeException) t); } - if (!(l instanceof RecurrentLayer)) - throw new IllegalArgumentException("Layer is not an RNN layer"); - return ((RecurrentLayer) l).rnnGetPreviousState(); + throw new RuntimeException("Error during neural network forward pass", t); + } + + if (outputWorkspace == null || outputWorkspace instanceof DummyWorkspace) { + WorkspaceUtils.assertNoWorkspacesOpen( + "Expected no workspace active at the end of outputOfLayerDetached", true); + } else { + Preconditions.checkState( + outputWorkspace.isScopeActive(), + "Expected output workspace to still be open" + + "at end of outputOfLayerDetached, but it is closed. This suggests an implementation or layer workspace problem"); + } } - /**Set the state of the RNN layer. - * @param layer The number/index of the layer. - * @param state The state to set the specified layer to - */ - public void rnnSetPreviousState(int layer, Map state) { - if (layer < 0 || layer >= layers.length) - throw new IllegalArgumentException("Invalid layer number"); - Layer l = layers[layer]; - if(l instanceof org.deeplearning4j.nn.layers.wrapper.BaseWrapperLayer){ - l = ((org.deeplearning4j.nn.layers.wrapper.BaseWrapperLayer)l).getUnderlying(); + return input; + } + + private INDArray reshapeTimeStepInput(INDArray input) { + if (input.rank() == 2) { // dynamically reshape to 3D input with one time-step. + long[] inShape = input.shape(); + input = input.reshape(inShape[0], inShape[1], 1); + } + return input; + } + + /** + * Compute activations of all layers from input (inclusive) to output of the final/output layer. + * Equivalent to calling {@link #feedForward(boolean)} with train=false + * + * @return the list of activations for each layer, including the input + */ + public List feedForward() { + return feedForward(false); + } + + /** + * Compute activations of all layers from input (inclusive) to output of the final/output layer. + * Equivalent to calling {@link #feedForward(INDArray, boolean)} with train = false + * + * @return the list of activations for each layer, including the input + */ + public List feedForward(INDArray input) { + if (input == null) { + throw new IllegalStateException("Unable to perform feed forward; no input found"); + } + setInput(input); + return feedForward(); + } + + /** + * Compute the activations from the input to the output layer, given mask arrays (that may be + * null) The masking arrays are used in situations such an one-to-many and many-to-one rucerrent + * neural network (RNN) designs, as well as for supporting time series of varying lengths within + * the same minibatch for RNNs. Other than mask arrays, this is equivalent to calling {@link + * #feedForward(INDArray, boolean)} with train = false + */ + public List feedForward(INDArray input, INDArray featuresMask, INDArray labelsMask) { + setLayerMaskArrays(featuresMask, labelsMask); + List list = feedForward(input); + clearLayerMaskArrays(); + return list; + } + + @Override + public Gradient gradient() { + return gradient; + } + + @Override + public Pair gradientAndScore() { + return new Pair<>(gradient(), getScore()); + } + + /** + * Clone the MultiLayerNetwork + * + * @return A cloned MultiLayerNetwork with a copy of the configuration, parameters and updater + * identical to the current network. + */ + @Override + public MultiLayerNetwork clone() { + if (!initCalled) { + init(); + } + NeuralNetConfiguration conf = this.getNetConfiguration().clone(); + MultiLayerNetwork ret = new MultiLayerNetwork(conf); + ret.init(this.getModelParams().dup(), false); + + if (solver != null) { + // If solver is null: updater hasn't been initialized -> getUpdater call will force + // initialization, however + Updater u = this.getUpdater(); + INDArray updaterState = u.getStateViewArray(); + if (updaterState != null) { + ret.getUpdater().setStateViewArray(ret, updaterState.dup(), false); + } + } + + if (hasAFrozenLayer()) { + // correct layers to frozen layers + Layer[] clonedLayers = ret.getLayers(); + for (int i = 0; i < layers.length; i++) { + if (layers[i] instanceof FrozenLayer) { + clonedLayers[i] = new FrozenLayer(ret.getLayer(i)); } - if (!(l instanceof RecurrentLayer)) - throw new IllegalArgumentException("Layer is not an RNN layer"); - RecurrentLayer r = (RecurrentLayer) l; - r.rnnSetPreviousState(state); + } + ret.setLayers(clonedLayers); + } + return ret; + } + + protected boolean hasAFrozenLayer() { + for (int i = 0; i < layers.length - 1; i++) { + if (layers[i] instanceof FrozenLayer) { + return true; + } + } + return false; + } + + /** + * @deprecated To be removed. Use {@link #getModelParams()} instead + */ + @Deprecated + public INDArray params(boolean backwardOnly) { + return getModelParams(); + } + + /** + * Returns a 1 x m vector where the vector is composed of a flattened vector of all of the + * parameters in the network.
+ * See {@link #getParam(String)} and {@link #getParamTable()} for a more useful/interpretable + * representation of the parameters.
+ * Note that the parameter vector is not a copy, and changes to the returned INDArray will impact + * the network parameters. + * + * @return the parameters for this neural net + */ + @Override + public INDArray getModelParams() { + return flattenedParams; + } + + @Override + public void setParamsViewArray(INDArray params) { + throw new UnsupportedOperationException("Not yet implemented"); + } + + @Override + public INDArray getGradientsViewArray() { + return flattenedGradients; + } + + @Override + public void setBackpropGradientsViewArray(INDArray gradients) { + int paramsSoFar = 0; + for (Layer layer : layers) { + if (layer.numParams() == 0) { + continue; + } + layer.setBackpropGradientsViewArray( + gradients.get( + NDArrayIndex.interval(0, 0, true), + NDArrayIndex.interval(paramsSoFar, paramsSoFar + layer.numParams()))); + paramsSoFar += layer.numParams(); + } + } + + @Override + public ITraininableLayerConfiguration getTrainingConfig() { + throw new UnsupportedOperationException("Not supported"); + } + + /** + * Returns the number of parameters in the network + * + * @return The number of parameters + */ + @Override + public long numParams() { + if (!isInitCalled()) { + init(); + } + return flattenedParams == null ? 0 : flattenedParams.length(); // Maybe nul for 0 params net + } + + /** + * @return 1d parameter vector + */ + @Override + public INDArray getParams() { + throw new RuntimeException("Calling getParams on the MultiLazerNetwork !?"); + } + + /** + * Set the parameters for this model. This expects a linear ndarray which then be unpacked + * internally relative to the expected ordering of the model.
+ * See also: {@link #setParamTable(Map)} and {@link #setParam(String, INDArray)} + * + * @param params the parameters for the model + */ + @Override + public void setParams(INDArray params) { + if (flattenedParams == params) { + return; // No op } - /** Clear the previous state of the RNN layers (if any). - */ - public void rnnClearPreviousState() { - if (layers == null) - return; - for (int i = 0; i < layers.length; i++) { - if (layers[i] instanceof RecurrentLayer) - ((RecurrentLayer) layers[i]).rnnClearPreviousState(); - else if (layers[i] instanceof MultiLayerNetwork) { - ((MultiLayerNetwork) layers[i]).rnnClearPreviousState(); - } else if(layers[i] instanceof BaseWrapperLayer && ((BaseWrapperLayer)layers[i]).getUnderlying() instanceof RecurrentLayer){ - ((RecurrentLayer) ((BaseWrapperLayer)layers[i]).getUnderlying()).rnnClearPreviousState(); - } + if (flattenedParams != null && params.length() == flattenedParams.length()) { + if (params != flattenedParams) { + flattenedParams.assign(params); + } + } else { + if (flattenedParams == null) { + flattenedParams = params.dup(); + } + int idx = 0; + for (int i = 0; i < getLayers().length; i++) { + Layer layer = getLayer(i); + long range = layer.numParams(); + if (range <= 0) { + continue; // Some layers: no parameters (subsampling, etc) } + INDArray get = + params.get(NDArrayIndex.interval(0, 0, true), NDArrayIndex.interval(idx, range + idx)); + layer.setParams(get); + idx += range; + } + } + } + + /** + * Returns the number of parameters in the network + * + * @param backwards If true: exclude any parameters uned only in unsupervised layerwise training + * (such as the decoder parameters in an autoencoder) + * @return The number of parameters + */ + @Override + public long numParams(boolean backwards) { + int length = 0; + for (int i = 0; i < layers.length; i++) { + length += layers[i].numParams(backwards); } - /** Similar to rnnTimeStep and feedForward() methods. Difference here is that this method:
- * (a) like rnnTimeStep does forward pass using stored state for RNN layers, and
- * (b) unlike rnnTimeStep does not modify the RNN layer state
- * Therefore multiple calls to this method with the same input should have the same output.
- * Typically used during training only. Use rnnTimeStep for prediction/forward pass at test time. - * @param input Input to network - * @param training Whether training or not - * @param storeLastForTBPTT set to true if used as part of truncated BPTT training - * @return Activations for each layer (including input, as per feedforward() etc) - */ - public List rnnActivateUsingStoredState(INDArray input, boolean training, boolean storeLastForTBPTT) { - return ffToLayerActivationsDetached(training, FwdPassType.RNN_ACTIVATE_WITH_STORED_STATE, storeLastForTBPTT, layers.length-1, input, mask, null, false); + return length; + } + + /** + * Sets the input and labels and returns the F1 score for the prediction with respect to the true + * labels + * + * @param data the data to score + * @return the score for the given input,label pairs + */ + @Override + public double f1Score(org.nd4j.linalg.dataset.api.DataSet data) { + return f1Score(data.getFeatures(), data.getLabels()); + } + + /** + * Perform minibatch training on all minibatches in the DataSetIterator, for the specified number + * of epochs. Equvalent to calling {@link #fit(DataSetIterator)} numEpochs times in a loop + * + * @param iterator Training data (DataSetIterator). Iterator must support resetting + * @param numEpochs Number of training epochs, >= 1 + */ + public void fit(@NonNull DataSetIterator iterator, int numEpochs) { + Preconditions.checkArgument( + numEpochs > 0, "Number of epochs much be > 0. Got numEpochs = %s", numEpochs); + Preconditions.checkArgument( + numEpochs == 1 || iterator.resetSupported(), + "Cannot perform multiple epochs training using" + + "iterator thas does not support resetting (iterator.resetSupported() returned false)"); + + for (int i = 0; i < numEpochs; i++) { + fit(iterator); + } + } + + /** + * Perform minibatch training on all minibatches in the DataSetIterator for 1 epoch.
+ * Note that this method does not do layerwise pretraining.
+ * For pretraining use method pretrain.. {@link #pretrain(DataSetIterator)}
+ * + * @param iterator Training data (DataSetIterator) + */ + @Override + public void fit(DataSetIterator iterator) { + try { + fitHelper(iterator); + } catch (OutOfMemoryError e) { + CrashReportingUtil.writeMemoryCrashDump(this, e); + throw e; + } + } + + private synchronized void fitHelper(DataSetIterator iterator) { + // we're wrapping all iterators into AsyncDataSetIterator to provide background prefetch - where + // appropriate + DataSetIterator iter; + boolean destructable = false; + if (iterator.asyncSupported()) { + iter = + new AsyncDataSetIterator( + iterator, Math.min(Nd4j.getAffinityManager().getNumberOfDevices() * 2, 2), true); + destructable = true; + } else { + iter = iterator; } - /** Get the updater for this MultiLayerNetwork - * @return Updater for MultiLayerNetwork - */ - public Updater getUpdater() { - return getUpdater(true); + for (TrainingListener tl : trainingListeners) { + tl.onEpochStart(this); } - public Updater getUpdater(boolean initializeIfReq) { - if (solver == null && initializeIfReq) { - synchronized(this){ - if(solver == null) { //May have been created while waiting for lock - solver = new Solver.Builder().configure(conf()).listeners(getListeners()).model(this).build(); - solver.getOptimizer().setUpdater(UpdaterCreator.getUpdater(this)); - } - } + LayerWorkspaceMgr workspaceMgr; + if (getNetConfiguration().getTrainingWorkspaceMode() == WorkspaceMode.NONE) { + workspaceMgr = LayerWorkspaceMgr.noWorkspaces(); + } else { + workspaceMgr = + LayerWorkspaceMgr.builder() + .with(ArrayType.ACTIVATIONS, WS_ALL_LAYERS_ACT, WS_ALL_LAYERS_ACT_CONFIG) + .with(ArrayType.INPUT, WS_ALL_LAYERS_ACT, WS_ALL_LAYERS_ACT_CONFIG) + .with(ArrayType.FF_WORKING_MEM, WS_LAYER_WORKING_MEM, WS_LAYER_WORKING_MEM_CONFIG) + .with(ArrayType.BP_WORKING_MEM, WS_LAYER_WORKING_MEM, WS_LAYER_WORKING_MEM_CONFIG) + .with( + ArrayType.RNN_FF_LOOP_WORKING_MEM, + WS_RNN_LOOP_WORKING_MEM, + WS_RNN_LOOP_WORKING_MEM_CONFIG) + .with( + ArrayType.RNN_BP_LOOP_WORKING_MEM, + WS_RNN_LOOP_WORKING_MEM, + WS_RNN_LOOP_WORKING_MEM_CONFIG) + // Note for updater working memory, we have the option to re-use WS_ALL_LAYERS_ACT or + // FF/BP_WORKING_MEM + // as these should be closed by the time updaters are executed + // Generally, WS_ALL_LAYERS_ACT will be the larger of the two, so we'll use this + .with(ArrayType.UPDATER_WORKING_MEM, WS_ALL_LAYERS_ACT, WS_ALL_LAYERS_ACT_CONFIG) + .build(); + } + workspaceMgr.setHelperWorkspacePointers(helperWorkspaces); + + update(TaskUtils.buildTask(iter)); + if (!iter.hasNext() && iter.resetSupported()) { + iter.reset(); + } + long time1 = System.currentTimeMillis(); + while (iter.hasNext()) { + + DataSet next = iter.next(); + long time2 = System.currentTimeMillis(); + + lastEtlTime.set((time2 - time1)); + + if (next.getFeatures() == null || next.getLabels() == null) { + break; + } + + // TODO: basically we want to wrap internals of this loop into workspace + + boolean hasMaskArrays = next.hasMaskArrays(); + + if (getNetConfiguration().getBackpropType() == BackpropType.TruncatedBPTT) { + doTruncatedBPTT( + next.getFeatures(), + next.getLabels(), + next.getFeaturesMaskArray(), + next.getLabelsMaskArray(), + workspaceMgr); + } else { + if (hasMaskArrays) { + setLayerMaskArrays(next.getFeaturesMaskArray(), next.getLabelsMaskArray()); } - if(solver != null) { - return solver.getOptimizer().getUpdater(initializeIfReq); - } - return null; - } - /** Set the updater for the MultiLayerNetwork */ - public void setUpdater(Updater updater) { + setInput(next.getFeatures()); + setLabels(next.getLabels()); + if (solver == null) { - solver = new Solver.Builder().configure(conf()).listeners(getListeners()).model(this).build(); + try (MemoryWorkspace wsO = Nd4j.getMemoryManager().scopeOutOfWorkspaces()) { + solver = + new Solver.Builder() + .configure(getNetConfiguration()) + .listeners(this.getTrainingListeners()) + .model(this) + .build(); + } } - solver.getOptimizer().setUpdater(updater); + + // TODO CACHE + solver.optimize(workspaceMgr); + } + + if (hasMaskArrays) { + clearLayerMaskArrays(); + } + + time1 = System.currentTimeMillis(); + synchronizeIterEpochCounts(); } - /**Set the mask arrays for features and labels. Mask arrays are typically used in situations such as one-to-many - * and many-to-one learning with recurrent neural networks, as well as for supporting time series of varying lengths - * within the same minibatch.
- * For example, with RNN data sets with input of shape [miniBatchSize,nIn,timeSeriesLength] and outputs of shape - * [miniBatchSize,nOut,timeSeriesLength], the features and mask arrays will have shape [miniBatchSize,timeSeriesLength] - * and contain values 0 or 1 at each element (to specify whether a given input/example is present - or merely padding - - * at a given time step).
- * NOTE: This method is not usually used directly. Instead, methods such as {@link #feedForward(INDArray, INDArray, INDArray)} - * and {@link #output(INDArray, boolean, INDArray, INDArray)} handle setting of masking internally. - * @param featuresMaskArray Mask array for features (input) - * @param labelsMaskArray Mask array for labels (output) - * @see #clearLayerMaskArrays() - */ - public void setLayerMaskArrays(INDArray featuresMaskArray, INDArray labelsMaskArray) { - if (featuresMaskArray != null) { - - if (featuresMaskArray.size(0) > Integer.MAX_VALUE) - throw new ND4JArraySizeException(); - //New approach: use feedForwardMaskArray method - feedForwardMaskArray(featuresMaskArray, MaskState.Active, (int) featuresMaskArray.size(0)); - - - /* - //feedforward layers below a RNN layer: need the input (features) mask array - //Reason: even if the time series input is zero padded, the output from the dense layers are - // non-zero (i.e., activationFunction(0*weights + bias) != 0 in general) - //This assumes that the time series input is masked - i.e., values are 0 at the padded time steps, - // so we don't need to do anything for the recurrent layer - - //Now, if mask array is 2d -> need to reshape to 1d (column vector) in the exact same order - // as is done for 3d -> 2d time series reshaping - INDArray reshapedFeaturesMask = TimeSeriesUtils.reshapeTimeSeriesMaskToVector(featuresMaskArray); - - for( int i=0; i - * See {@link #setLayerMaskArrays(INDArray, INDArray)} for details on mask arrays. - */ - public void clearLayerMaskArrays() { - for (Layer layer : layers) { - layer.setMaskArray(null); - } + clearLayersStates(); + + if (destructable) { + ((AsyncDataSetIterator) iter).shutdown(); } - /** - * Evaluate the network (classification performance) + incrementEpochCount(); + } + + /** + * Calculate parameter gradients and input activation gradients given the input and labels, and + * optionally mask arrays + * + * @param features Features for gradient calculation + * @param label Labels for gradient + * @param fMask Features mask array (may be null) + * @param labelMask Label mask array (may be null) + * @return A pair of gradient arrays: parameter gradients (in Gradient object) and input + * activation gradients + */ + public Pair calculateGradients( + @NonNull INDArray features, @NonNull INDArray label, INDArray fMask, INDArray labelMask) { + try { + return calculateGradientsHelper(features, label, fMask, labelMask); + } catch (OutOfMemoryError e) { + CrashReportingUtil.writeMemoryCrashDump(this, e); + throw e; + } + } + + private Pair calculateGradientsHelper( + INDArray features, INDArray label, INDArray fMask, INDArray labelMask) { + setInput(features); + setLabels(label); + setLayerMaskArrays(fMask, labelMask); + + LayerWorkspaceMgr mgr; + if (getNetConfiguration().getTrainingWorkspaceMode() == WorkspaceMode.NONE) { + mgr = LayerWorkspaceMgr.noWorkspaces(); + } else { + mgr = + LayerWorkspaceMgr.builder() + .with(ArrayType.INPUT, WS_ALL_LAYERS_ACT, WS_ALL_LAYERS_ACT_CONFIG) + .with(ArrayType.ACTIVATIONS, WS_ALL_LAYERS_ACT, WS_ALL_LAYERS_ACT_CONFIG) + .with(ArrayType.FF_WORKING_MEM, WS_LAYER_WORKING_MEM, WS_LAYER_WORKING_MEM_CONFIG) + .with(ArrayType.BP_WORKING_MEM, WS_LAYER_WORKING_MEM, WS_LAYER_WORKING_MEM_CONFIG) + .with( + ArrayType.RNN_FF_LOOP_WORKING_MEM, + WS_RNN_LOOP_WORKING_MEM, + WS_RNN_LOOP_WORKING_MEM_CONFIG) + .with( + ArrayType.RNN_BP_LOOP_WORKING_MEM, + WS_RNN_LOOP_WORKING_MEM, + WS_RNN_LOOP_WORKING_MEM_CONFIG) + .build(); + + if (getNetConfiguration().getCacheMode() != null) { + // For now: store cache mode activations in activations workspace + mgr.setWorkspace(ArrayType.FF_CACHE, WS_ALL_LAYERS_ACT, WS_ALL_LAYERS_ACT_CONFIG); + } + } + mgr.setHelperWorkspacePointers(helperWorkspaces); + + // Calculate activations (which are stored in each layer, and used in backprop) + try (MemoryWorkspace ws = mgr.notifyScopeEntered(ArrayType.ACTIVATIONS)) { + // First: do a feed-forward through the network + // Note that we don't actually need to do the full forward pass through the output layer right + // now; but we do + // need the input to the output layer to be set (such that backprop can be done) + List activations = + ffToLayerActivationsInWs( + layers.length - 2, FwdPassType.STANDARD, false, input, mask, fMask); + if (!trainingListeners.isEmpty()) { + // TODO: We possibly do want output layer activations in some cases here... + for (TrainingListener tl : trainingListeners) { + tl.onForwardPass(this, activations); + } + } + INDArray inputToOutputLayer = activations.get(activations.size() - 1); + if (getNetConfiguration().getInputPreProcess(layers.length - 1) != null) { + inputToOutputLayer = + getNetConfiguration() + .getInputPreProcess(layers.length - 1) + .preProcess(inputToOutputLayer, getInputMiniBatchSize(), mgr); + // Validate activations location + } + getOutputLayer().setInput(inputToOutputLayer, mgr); + + Pair p = calcBackpropGradients(null, true, false, true); + if (p.getSecond() != null) { + p.setSecond(p.getSecond().detach()); + } + return p; + } + } + + /** + * Calculate gradients and errors. Used in two places: (a) backprop (for standard multi layer + * network learning) (b) backpropGradient (layer method, for when MultiLayerNetwork is used as a + * layer) + * + * @param epsilon Errors (technically errors .* activations). Not used if withOutputLayer = true + * @param withOutputLayer if true: assume last layer is output layer, and calculate errors based + * on labels. In this case, the epsilon input is not used (may/should be null). If false: + * calculate backprop gradients + * @param returnInputActGrad If true: terun the input activation gradients (detached). False: + * don't return + * @return Gradients and the error (epsilon) at the input + */ + protected Pair calcBackpropGradients( + INDArray epsilon, boolean withOutputLayer, boolean tbptt, boolean returnInputActGrad) { + if (flattenedGradients == null) { + initGradientsView(); + } + String multiGradientKey; + Gradient gradient = new DefaultGradient(flattenedGradients); + + LayerWorkspaceMgr mgrEven; + LayerWorkspaceMgr mgrOdd; + + if (getNetConfiguration().getTrainingWorkspaceMode() == WorkspaceMode.NONE) { + mgrEven = LayerWorkspaceMgr.noWorkspaces(); + mgrOdd = mgrEven; + WorkspaceUtils.assertNoWorkspacesOpen( + "Expected no workspace active in calcBackpropGradients when " + + "training workspace is set to none"); + } else { + /* + Workspaces for backprop in MLN share some features with outputOfLayerDetached, in terms of the + "two alternating workspaces" idea (but for activation gradients here, instead of activations there). + + Workspace design for backprop: + First: we calculate all activations, and ensure they are in WS_ALL_LAYERS_ACT. We assume this is done + EXTERNALLY to this method + Then: we iterate backwards over layers. + + Activations gradient workspaces: opened/closed every second layer. + mgrEven (WS_LAYER_ACT_1) activation grad WS opens at start of 8, 4, 2, 0; closed at end of 7, 5, 3, 1 etc + mgrOdd (WS_LAYER_ACT_2) activation grad WS opens at start of 7, 3, 5, 1; closed at end of 6, 4, 2, 0 etc + + */ + + mgrEven = + LayerWorkspaceMgr.builder() + // Activations in context of backprop (preOut methods etc) are not used outside of the + // layer itself + .with(ArrayType.ACTIVATIONS, WS_LAYER_WORKING_MEM, WS_LAYER_WORKING_MEM_CONFIG) + .with( + ArrayType.INPUT, + WS_ALL_LAYERS_ACT, + WS_ALL_LAYERS_ACT_CONFIG) // Usually not required here. Exception: OutputLayer + // dropout + .with(ArrayType.ACTIVATION_GRAD, WS_LAYER_ACT_1, WS_LAYER_ACT_X_CONFIG) + .with(ArrayType.FF_WORKING_MEM, WS_LAYER_WORKING_MEM, WS_LAYER_WORKING_MEM_CONFIG) + .with(ArrayType.BP_WORKING_MEM, WS_LAYER_WORKING_MEM, WS_LAYER_WORKING_MEM_CONFIG) + .with( + ArrayType.RNN_FF_LOOP_WORKING_MEM, + WS_RNN_LOOP_WORKING_MEM, + WS_RNN_LOOP_WORKING_MEM_CONFIG) + .with( + ArrayType.RNN_BP_LOOP_WORKING_MEM, + WS_RNN_LOOP_WORKING_MEM, + WS_RNN_LOOP_WORKING_MEM_CONFIG) + .build(); + + mgrOdd = + LayerWorkspaceMgr.builder() + // Activations in context of backprop (preOut methods etc) are not used outside of the + // layer itself + .with(ArrayType.ACTIVATIONS, WS_LAYER_WORKING_MEM, WS_LAYER_WORKING_MEM_CONFIG) + .with( + ArrayType.INPUT, + WS_ALL_LAYERS_ACT, + WS_ALL_LAYERS_ACT_CONFIG) // Usually not required here. Exception: OutputLayer + // dropout + .with(ArrayType.ACTIVATION_GRAD, WS_LAYER_ACT_2, WS_LAYER_ACT_X_CONFIG) + .with(ArrayType.FF_WORKING_MEM, WS_LAYER_WORKING_MEM, WS_LAYER_WORKING_MEM_CONFIG) + .with(ArrayType.BP_WORKING_MEM, WS_LAYER_WORKING_MEM, WS_LAYER_WORKING_MEM_CONFIG) + .with( + ArrayType.RNN_FF_LOOP_WORKING_MEM, + WS_RNN_LOOP_WORKING_MEM, + WS_RNN_LOOP_WORKING_MEM_CONFIG) + .with( + ArrayType.RNN_BP_LOOP_WORKING_MEM, + WS_RNN_LOOP_WORKING_MEM, + WS_RNN_LOOP_WORKING_MEM_CONFIG) + .build(); + + if (epsilon == null) { + // If epsilon is non-null: external errors use case -> inputs are already detached + WorkspaceUtils.assertOpenActiveAndCurrent( + WS_ALL_LAYERS_ACT, + "calcBackpropGradients method requires workspace WS_ALL_LAYERS_ACT" + + " to be open when workspaces are used"); + } + } + mgrEven.setHelperWorkspacePointers(helperWorkspaces); + mgrOdd.setHelperWorkspacePointers(helperWorkspaces); + + // calculate and apply the backward gradient for every layer + /* + * Skip the output layer for the indexing and just loop backwards updating the coefficients for each layer. + * (when withOutputLayer == true) * - * @param iterator Iterator to evaluate on - * @return Evaluation object; results of evaluation on all examples in the data set - */ - public T evaluate(@NonNull DataSetIterator iterator) { - return (T)evaluate(iterator, null); - } - - /** - * Evaluate the network (classification performance). - * Can only be used with MultiDataSetIterator instances with a single input/output array + * Activate applies the activation function for each layer and sets that as the input for the following layer. * - * @param iterator Iterator to evaluate on - * @return Evaluation object; results of evaluation on all examples in the data set + * Typical literature contains most trivial case for the error calculation: wT * weights + * This interpretation transpose a few things to get mini batch because ND4J is rows vs columns organization for params */ - public Evaluation evaluate(@NonNull MultiDataSetIterator iterator) { - return evaluate(new MultiDataSetWrapperIterator(iterator)); - } + int numLayers = getnLayers(); + // Store gradients is a list; used to ensure iteration order in DefaultGradient linked hash map. + // i.e., layer 0 first instead of output layer + LinkedList> gradientList = new LinkedList<>(); - /** - * Evaluate the network for regression performance - * @param iterator Data to evaluate on - * @return Regression evaluation - */ - public T evaluateRegression(DataSetIterator iterator) { - return (T)doEvaluation(iterator, new RegressionEvaluation(iterator.totalOutcomes()))[0]; - } + Pair currPair = null; + MemoryWorkspace wsActGradCloseNext = null; + MemoryWorkspace wsActGradTemp = null; + MemoryWorkspace initialWorkspace = Nd4j.getMemoryManager().getCurrentWorkspace(); - /** - * Evaluate the network for regression performance - * Can only be used with MultiDataSetIterator instances with a single input/output array - * @param iterator Data to evaluate on - */ - public org.nd4j.evaluation.regression.RegressionEvaluation evaluateRegression(MultiDataSetIterator iterator) { - return evaluateRegression(new MultiDataSetWrapperIterator(iterator)); - } + boolean traceLog = log.isTraceEnabled(); - /** - * @deprecated To be removed - use {@link #evaluateROC(DataSetIterator, int)} to enforce selection of appropriate ROC/threshold configuration - */ - @Deprecated - public T evaluateROC(DataSetIterator iterator){ - return evaluateROC(iterator, 0); - } - - /** - * Evaluate the network (must be a binary classifier) on the specified data, using the {@link ROC} class - * - * @param iterator Data to evaluate on - * @param rocThresholdSteps Number of threshold steps to use with {@link ROC} - see that class for details. - * @return ROC evaluation on the given dataset - */ - public T evaluateROC(DataSetIterator iterator, int rocThresholdSteps) { - Layer outputLayer = getOutputLayer(); - if(getLayerWiseConfigurations().isValidateOutputLayerConfig()){ - OutputLayerUtil.validateOutputLayerForClassifierEvaluation(outputLayer.conf().getLayer(), ROC.class); - } - return (T)doEvaluation(iterator, new org.deeplearning4j.eval.ROC(rocThresholdSteps))[0]; - } - - /** - * @deprecated To be removed - use {@link #evaluateROCMultiClass(DataSetIterator, int)} to enforce selection of appropriate ROC/threshold configuration - */ - @Deprecated - public T evaluateROCMultiClass(DataSetIterator iterator) { - return evaluateROCMultiClass(iterator, 0); - } - - /** - * Evaluate the network on the specified data, using the {@link ROCMultiClass} class - * - * @param iterator Data to evaluate on - * @param rocThresholdSteps Number of threshold steps to use with {@link ROCMultiClass} - * @return Multi-class ROC evaluation on the given dataset - */ - public T evaluateROCMultiClass(DataSetIterator iterator, int rocThresholdSteps) { - Layer outputLayer = getOutputLayer(); - if(getLayerWiseConfigurations().isValidateOutputLayerConfig()){ - OutputLayerUtil.validateOutputLayerForClassifierEvaluation(outputLayer.conf().getLayer(), ROCMultiClass.class); - } - return (T)doEvaluation(iterator, new org.deeplearning4j.eval.ROCMultiClass(rocThresholdSteps))[0]; - } - - /** - * Perform evaluation using an arbitrary IEvaluation instance. - * - * @param iterator data to evaluate on - */ - public T[] doEvaluation(DataSetIterator iterator, T... evaluations) { - try{ - return doEvaluationHelper(iterator, evaluations); - } catch (OutOfMemoryError e){ - CrashReportingUtil.writeMemoryCrashDump(this, e); - throw e; - } - } - - public T[] doEvaluationHelper(DataSetIterator iterator, T... evaluations) { - if (!iterator.hasNext() && iterator.resetSupported()) { - iterator.reset(); + Throwable t = null; + try { + for (int i = layers.length - 1; i >= 0; i--) { + if (layers[i] instanceof FrozenLayer) { + break; } - DataSetIterator iter = iterator.asyncSupported() ? new AsyncDataSetIterator(iterator, 2, true) : iterator; - - WorkspaceMode cMode = layerWiseConfigurations.getTrainingWorkspaceMode(); - layerWiseConfigurations.setTrainingWorkspaceMode(layerWiseConfigurations.getInferenceWorkspaceMode()); - - //First: let's determine if we should do 'split feed forward' for long time series - //The idea: RNN 20k time steps. Train using TBPTT length 100 -> 200 segments of length 100. If we naively - // just use .output(INDArray) here, then our memory requirements are 200x larger than if we did the same - // evaluation in segments... - //Only do this if TBPTT is enabled - if not, it means we can train without TBPTT and hence should be able - // to test without splitting also - boolean useRnnSegments = (layerWiseConfigurations.getBackpropType() == BackpropType.TruncatedBPTT); - - MemoryWorkspace outputWs; - if(getLayerWiseConfigurations().getInferenceWorkspaceMode() == WorkspaceMode.ENABLED){ - outputWs = Nd4j.getWorkspaceManager().getWorkspaceForCurrentThread(WS_ALL_LAYERS_ACT_CONFIG, WS_OUTPUT_MEM); - } else { - outputWs = new DummyWorkspace(); + if (traceLog) { + log.trace("About to backprop: {} - {}", i, layers[i].getClass().getSimpleName()); } - while (iter.hasNext()) { - DataSet next = iter.next(); + LayerWorkspaceMgr workspaceMgr = (i % 2 == 0 ? mgrEven : mgrOdd); - if (next.getFeatures() == null || next.getLabels() == null) - continue; + if (withOutputLayer && i == layers.length - 1) { + if (!(getOutputLayer() instanceof IOutputLayer)) { + log.warn( + "Warning: final layer isn't output layer. You cannot use backprop without an output layer."); + return null; + } + IOutputLayer outputLayer = (IOutputLayer) getOutputLayer(); + if (labels == null && outputLayer.needsLabels()) { + throw new IllegalStateException("No labels found"); + } + outputLayer.setLabels(labels); + } - INDArray features = next.getFeatures(); - INDArray labels = next.getLabels(); - INDArray fMask = next.getFeaturesMaskArray(); - INDArray lMask = next.getLabelsMaskArray(); - List meta = next.getExampleMetaData(); + // Open activation gradients WS *then* BP working memory, so BP working memory is opened + // last for use in layers + wsActGradTemp = workspaceMgr.notifyScopeEntered(ArrayType.ACTIVATION_GRAD); + try (MemoryWorkspace wsBPWorking = + workspaceMgr.notifyScopeEntered(ArrayType.BP_WORKING_MEM)) { + // Note that because we're opening activation workspaces not in a simple nested order, + // we'll manually + // override the previous workspace setting. Otherwise, when we close these workspaces, the + // "current" + // workspace may be set to the incorrect one + wsActGradTemp.setPreviousWorkspace(initialWorkspace); + wsBPWorking.setPreviousWorkspace(initialWorkspace); - if (!useRnnSegments) { - //Standard/non-RNN case: - try (MemoryWorkspace ws = outputWs.notifyScopeEntered()) { - INDArray out = outputOfLayerDetached(false, FwdPassType.STANDARD, layers.length - 1, features, fMask, lMask, ws); + INDArray eps = + (i == layers.length - 1 + ? epsilon + : currPair.getRight()); // eps is null for OutputLayer - try (MemoryWorkspace wsO = Nd4j.getWorkspaceManager().scopeOutOfWorkspaces()) { - for (T evaluation : evaluations) - evaluation.eval(labels, out, lMask, meta); - } - } + if (!tbptt) { + // Standard case + currPair = layers[i].backpropGradient(eps, workspaceMgr); + } else { + // TBPTT gradient + if (layers[i] instanceof RecurrentLayer) { + currPair = + ((RecurrentLayer) layers[i]) + .tbpttBackpropGradient( + currPair.getSecond(), + getNetConfiguration().getTbpttBackLength(), + workspaceMgr); } else { - rnnClearPreviousState(); - - - //Get subset of features and labels: - val fwdLen = layerWiseConfigurations.getTbpttFwdLength(); - val tsLength = features.size(2); - long nSubsets = tsLength / fwdLen; - if (tsLength % fwdLen != 0) - nSubsets++; //Example: 100 fwdLen with timeSeriesLength=120 -> want 2 subsets (1 of size 100, 1 of size 20) - for (int i = 0; i < nSubsets; i++) { - val startTimeIdx = i * fwdLen; - val endTimeIdx = Math.min(startTimeIdx + fwdLen, tsLength); - - if (endTimeIdx > Integer.MAX_VALUE) - throw new ND4JArraySizeException(); - INDArray[] subsets = getSubsetsForTbptt(startTimeIdx, (int) endTimeIdx, features, labels, fMask, lMask); - - setLayerMaskArrays(subsets[2], subsets[3]); - - try (MemoryWorkspace ws = outputWs.notifyScopeEntered()) { - INDArray outSub = rnnTimeStep(subsets[0], ws); - try (MemoryWorkspace wsO = Nd4j.getWorkspaceManager().scopeOutOfWorkspaces()) { - for (T evaluation : evaluations) - evaluation.eval(subsets[1], outSub, subsets[3]); - } - } - } + currPair = layers[i].backpropGradient(currPair.getSecond(), workspaceMgr); } + } - //Clear inputs, masks etc. Important to avoid leaking invalidated/out of scope arrays between iterations - clearLayersStates(); - } + if (currPair.getSecond() != null) { + // Edge case: may be null for Embedding layer, for example + validateArrayWorkspaces( + workspaceMgr, + currPair.getSecond(), + ArrayType.ACTIVATION_GRAD, + i, + false, + "Backprop"); + } - if (iterator.asyncSupported()) - ((AsyncDataSetIterator) iter).shutdown(); - - layerWiseConfigurations.setTrainingWorkspaceMode(cMode); - - return evaluations; - } - - /** - * Evaluate the network on the provided data set. Used for evaluating the performance of classifiers - * - * @param iterator Data to undertake evaluation on - * @return Evaluation object, summarizing the results of the evaluation on the provided DataSetIterator - */ - public Evaluation evaluate(DataSetIterator iterator, List labelsList) { - return evaluate(iterator, labelsList, 1); - } - - @Override - public INDArray updaterState() { - return getUpdater() != null ? getUpdater().getStateViewArray() : null; - } - - @Override - public void fit(MultiDataSet dataSet) { - if (dataSet.getFeatures().length == 1 && dataSet.getLabels().length == 1) { - INDArray features = dataSet.getFeatures(0); - INDArray labels = dataSet.getLabels(0); - INDArray fMask = null; - INDArray lMask = null; - - if (dataSet.getFeaturesMaskArrays() != null) - fMask = dataSet.getFeaturesMaskArrays()[0]; - - if (dataSet.getFeaturesMaskArrays() != null) - lMask = dataSet.getLabelsMaskArrays()[0]; - - DataSet ds = new DataSet(features, labels, fMask, lMask); - fit(ds); - } else { - throw new DL4JInvalidInputException( - "MultiLayerNetwork can't handle MultiDataSet with more than 1 features or labels array." + - "Please consider use of ComputationGraph"); - } - } - - /** - * Perform minibatch training on all minibatches in the MultiDataSetIterator, for the specified number of epochs. - * Equvalent to calling {@link #fit(MultiDataSetIterator)} numEpochs times in a loop - * - * @param iterator Training data (DataSetIterator). Iterator must support resetting - * @param numEpochs Number of training epochs, >= 1 - */ - public void fit(@NonNull MultiDataSetIterator iterator, int numEpochs){ - Preconditions.checkArgument(numEpochs > 0, "Number of epochs much be > 0. Got numEpochs = %s", numEpochs); - Preconditions.checkArgument(numEpochs == 1 || iterator.resetSupported(), "Cannot perform multiple epochs training using" + - "iterator has does not support resetting (iterator.resetSupported() returned false)"); - - for(int i = 0; i < numEpochs; i++) { - fit(iterator); - } - } - - /** - * Perform minibatch training on all minibatches in the MultiDataSetIterator.
- * Note: The MultiDataSets in the MultiDataSetIterator must have exactly 1 input and output array (as - * MultiLayerNetwork only supports 1 input and 1 output) - * - * @param iterator Training data (DataSetIterator). Iterator must support resetting - */ - @Override - public void fit(MultiDataSetIterator iterator) { - fit(new MultiDataSetWrapperIterator(iterator)); - } - - @Override - public T[] doEvaluation(MultiDataSetIterator iterator, T[] evaluations) { - return doEvaluation(new MultiDataSetWrapperIterator(iterator), evaluations); - } - - /** - * Evaluate the network (for classification) on the provided data set, with top N accuracy in addition to standard accuracy. - * For 'standard' accuracy evaluation only, use topN = 1 - * - * @param iterator Iterator (data) to evaluate on - * @param labelsList List of labels. May be null. - * @param topN N value for top N accuracy evaluation - * @return Evaluation object, summarizing the results of the evaluation on the provided DataSetIterator - */ - public Evaluation evaluate(DataSetIterator iterator, List labelsList, int topN) { - if (layers == null || !(getOutputLayer() instanceof IOutputLayer)) { - throw new IllegalStateException("Cannot evaluate network with no output layer"); - } - if (labelsList == null) { - try { - labelsList = iterator.getLabels(); - } catch (Throwable t){ } //Ignore, maybe UnsupportedOperationException etc - } - - Layer outputLayer = getOutputLayer(); - if(getLayerWiseConfigurations().isValidateOutputLayerConfig()){ - OutputLayerUtil.validateOutputLayerForClassifierEvaluation(outputLayer.conf().getLayer(), Evaluation.class); - } - - Evaluation e = new org.deeplearning4j.eval.Evaluation(labelsList, topN); - doEvaluation(iterator, e); - - return e; - } - - protected void update(Task task) { - if (!initDone) { - initDone = true; - Heartbeat heartbeat = Heartbeat.getInstance(); - task = ModelSerializer.taskByModel(this); - Environment env = EnvironmentUtils.buildEnvironment(); - heartbeat.reportEvent(Event.STANDALONE, env, task); - } - } - - /** - * String detailing the architecture of the multilayernetwork. - * Columns are LayerIndex with layer type, nIn, nOut, Total number of parameters and the Shapes of the parameters - * Will also give information about frozen layers, if any. - * @return Summary as a string - * @see #memoryInfo(int, InputType) - */ - public String summary() { - return summary(null); - } - - /** - * String detailing the architecture of the multilayernetwork. - * Will also display activation size when given an input type. - * Columns are LayerIndex with layer type, nIn, nOut, Total number of parameters, Shapes of the parameters, Input activation shape, Output activation shape - * Will also give information about frozen layers, if any. - * @return Summary as a string - * @see #memoryInfo(int, InputType) - */ - public String summary(InputType inputType) { - StringBuilder ret = new StringBuilder(); - ret.append("\n"); - - List lines = new ArrayList<>(); - if(inputType == null){ - lines.add(new String[]{"LayerName (LayerType)", "nIn,nOut", "TotalParams", "ParamsShape"}); - } else { - lines.add(new String[]{"LayerName (LayerType)", "nIn,nOut", "TotalParams", "ParamsShape", "InputShape", "OutputShape"}); - } - int[] maxLength = new int[inputType == null ? 4 : 6]; - String[] header = lines.get(0); - for( int i=0; i 0) { - paramShape = ""; - if (currentLayer instanceof BidirectionalLayer) { // Bidirectional layer is not an FFL - BidirectionalLayer bi = (BidirectionalLayer) currentLayer; - in = String.valueOf(((Bidirectional)bi.conf().getLayer()).getNIn()); - out = String.valueOf(((Bidirectional)bi.conf().getLayer()).getNOut()); - } else { - try { - in = String.valueOf(((FeedForwardLayer) currentLayer.conf().getLayer()).getNIn()); - out = String.valueOf(((FeedForwardLayer) currentLayer.conf().getLayer()).getNOut()); - } - catch (Exception e) { // Some layers, like PReLU, are just BaseLayers (but have parameters) - } - } - Set paraNames = currentLayer.paramTable().keySet(); - for (String aP : paraNames) { - String paramS = ArrayUtils.toString(currentLayer.paramTable().get(aP).shape()); - paramShape += aP + ":" + paramS + ", "; - } - paramShape = paramShape.subSequence(0, paramShape.lastIndexOf(",")).toString(); - } - if (currentLayer instanceof FrozenLayer) { - frozenParams += currentLayer.numParams(); - classNameArr = ((FrozenLayer) currentLayer).getInsideLayer().getClass().getName().split("\\."); - className = "Frozen " + classNameArr[classNameArr.length - 1]; + for (Map.Entry entry : + currPair.getFirst().gradientForVariable().entrySet()) { + String origName = entry.getKey(); + multiGradientKey = i + "_" + origName; + gradientList.addLast( + new Triple<>( + multiGradientKey, + entry.getValue(), + currPair.getFirst().flatteningOrderForVariable(origName))); + } + if (getNetConfiguration().getInputPreProcess(i) != null) { + currPair = + new Pair<>( + currPair.getFirst(), + this.getNetConfiguration() + .getInputPreProcess(i) + .backprop(currPair.getSecond(), getInputMiniBatchSize(), workspaceMgr)); + if (i > 0 && currPair.getSecond() != null) { + validateArrayWorkspaces( + workspaceMgr, + currPair.getSecond(), + ArrayType.ACTIVATION_GRAD, + i, + true, + "Backprop"); } + } - String[] line; - if (inputType == null) { - line = new String[]{name + " (" + className + ")", in + "," + out, paramCount, paramShape}; + if (i == 0) { + if (returnInputActGrad && currPair.getSecond() != null) { + currPair.setSecond(currPair.getSecond().detach()); } else { - line = new String[]{name + " (" + className + ")", in + "," + out, paramCount,paramShape,inShape,outShape}; + currPair.setSecond(null); } - for( int i=0; i triple : gradientList) { + gradient.setGradientFor(triple.getFirst(), triple.getSecond(), triple.getThird()); + } + + return new Pair<>(gradient, currPair.getSecond()); + } + + protected void doTruncatedBPTT( + INDArray input, + INDArray labels, + INDArray featuresMaskArray, + INDArray labelsMaskArray, + LayerWorkspaceMgr workspaceMgr) { + if (input.rank() != 3 || labels.rank() != 3) { + log.warn( + "Cannot do truncated BPTT with non-3d inputs or labels. Expect input with shape [miniBatchSize,nIn,timeSeriesLength], got " + + Arrays.toString(input.shape()) + + "\tand labels with shape " + + Arrays.toString(labels.shape())); + return; + } + if (input.size(2) != labels.size(2)) { + log.warn( + "Input and label time series have different lengths: {} input length, {} label length", + input.size(2), + labels.size(2)); + return; + } + + int fwdLen = getNetConfiguration().getTbpttFwdLength(); + update(TaskUtils.buildTask(input, labels)); + val timeSeriesLength = input.size(2); + long nSubsets = timeSeriesLength / fwdLen; + if (timeSeriesLength % fwdLen != 0) { + nSubsets++; // Example: 100 fwdLen with timeSeriesLength=120 -> want 2 subsets (1 of size 100, + // 1 of size 20) + } + + rnnClearPreviousState(); + + for (int i = 0; i < nSubsets; i++) { + long startTimeIdx = (long) i * fwdLen; + long endTimeIdx = startTimeIdx + fwdLen; + if (endTimeIdx > timeSeriesLength) { + endTimeIdx = timeSeriesLength; + } + + if (startTimeIdx > Integer.MAX_VALUE || endTimeIdx > Integer.MAX_VALUE) { + throw new ND4JArraySizeException(); + } + INDArray[] subsets = + getSubsetsForTbptt( + (int) startTimeIdx, + (int) endTimeIdx, + input, + labels, + featuresMaskArray, + labelsMaskArray); + + setInput(subsets[0]); + setLabels(subsets[1]); + setLayerMaskArrays(subsets[2], subsets[3]); + + if (solver == null) { + try (MemoryWorkspace wsO = Nd4j.getMemoryManager().scopeOutOfWorkspaces()) { + solver = + new Solver.Builder() + .configure(getNetConfiguration()) + .listeners(this.getTrainingListeners()) + .model(this) + .build(); + } + } + solver.optimize(workspaceMgr); + + // Finally, update the state of the RNN layers: + updateRnnStateWithTBPTTState(); + } + + rnnClearPreviousState(); + clearLayerMaskArrays(); + } + + private INDArray[] getSubsetsForTbptt( + int startTimeIdx, + int endTimeIdx, + INDArray input, + INDArray labels, + INDArray fMask, + INDArray lMask) { + INDArray[] out = new INDArray[4]; + out[0] = + input.get( + NDArrayIndex.all(), + NDArrayIndex.all(), + NDArrayIndex.interval(startTimeIdx, endTimeIdx)); + out[1] = + labels.get( + NDArrayIndex.all(), + NDArrayIndex.all(), + NDArrayIndex.interval(startTimeIdx, endTimeIdx)); + + if (fMask != null) { + out[2] = fMask.get(NDArrayIndex.all(), NDArrayIndex.interval(startTimeIdx, endTimeIdx)); + } + if (lMask != null) { + out[3] = lMask.get(NDArrayIndex.all(), NDArrayIndex.interval(startTimeIdx, endTimeIdx)); + } + + return out; + } + + /** Intended for internal/developer use */ + public void updateRnnStateWithTBPTTState() { + for (int i = 0; i < layers.length; i++) { + if (layers[i] instanceof RecurrentLayer) { + RecurrentLayer l = ((RecurrentLayer) layers[i]); + l.rnnSetPreviousState(l.rnnGetTBPTTState()); + } else if (layers[i] instanceof MultiLayerNetwork) { + ((MultiLayerNetwork) layers[i]).updateRnnStateWithTBPTTState(); + } + } + } + + /** + * Get the {@link TrainingListener}s set for this network, if any + * + * @return listeners set for this network + */ + public Collection getTrainingListeners() { + return trainingListeners; + } + + /** + * @param listeners + */ + @Override + public void addTrainingListeners(Collection listeners) { + this.addTrainingListeners(listeners.toArray(new TrainingListener[] {})); + } + + /** + * This method ADDS additional TrainingListener to existing listeners + * + * @param listeners + */ + @Override + public void addTrainingListeners(TrainingListener... listeners) { + Collections.addAll(trainingListeners, listeners); + + // fixme this is wrong, since it removes existing listeners from the solver + if (solver != null) { + solver.setListeners(this.trainingListeners); + } + } + + /** + * Usable only for classification networks in conjunction with OutputLayer. Cannot be used with + * RnnOutputLayer, CnnLossLayer, or networks used for regression.
+ * To get the raw output activations of the output layer, use {@link #output(INDArray)} or + * similar.
+ *
+ * Equivalent to argmax(this.output(input)): Returns the predicted class indices corresponding to + * the predictions for each example in the features array. + * + * @param d The input features to perform inference on + * @return The predicted class index for each example + */ + @Override + public int[] predict(INDArray d) { + INDArray output = output(d, Layer.TrainingMode.TEST); + + if (d.size(0) > Integer.MAX_VALUE) { + throw new ND4JArraySizeException(); + } + + Preconditions.checkState( + output.rank() == 2, + "predict(INDArray) method can only be used on rank 2 output - got array with rank %s", + output.rank()); + return output.argMax(1).toIntVector(); + } + + /** + * As per {@link #predict(INDArray)} but the returned values are looked up from the list of label + * names in the provided DataSet + */ + @Override + public List predict(org.nd4j.linalg.dataset.api.DataSet dataSet) { + Preconditions.checkState( + dataSet.getLabelNamesList() != null, + "This method can only be used when the DataSet contains a label name list"); + int[] intRet = predict(dataSet.getFeatures()); + List ret = new ArrayList<>(); + for (int i = 0; i < intRet.length; i++) { + ret.add(i, dataSet.getLabelName(intRet[i])); + } + return ret; + } + + /** + * Fit the model for one iteration on the provided data + * + * @param data the examples to classify (one example in each row) + * @param labels the example labels(a binary outcome matrix) + */ + @Override + public void fit(INDArray data, INDArray labels) { + if (!initCalled) init(); + fit(data, labels, null, null); + } + + /** + * Fit the model for one iteration on the provided data + * + * @param features the examples to classify (one example in each row) + * @param labels the example labels(a binary outcome matrix) + * @param featuresMask The mask array for the features (used for variable length time series, + * etc). May be null. + * @param labelsMask The mask array for the labels (used for variable length time series, etc). + * May be null. + */ + public synchronized void fit( + INDArray features, INDArray labels, INDArray featuresMask, INDArray labelsMask) { + if (!initCalled) init(); + try { + fitHelper(features, labels, featuresMask, labelsMask); + } catch (OutOfMemoryError e) { + CrashReportingUtil.writeMemoryCrashDump(this, e); + throw e; + } + } + + private void fitHelper( + INDArray features, INDArray labels, INDArray featuresMask, INDArray labelsMask) { + if (!initCalled) init(); + if (numParams() == 0) { + // No op: can't fit a network with 0 parameters + return; + } + + setInput(features); + setLabels(labels); + this.setLayerMaskArrays(featuresMask, labelsMask); + update(TaskUtils.buildTask(features, labels)); + + LayerWorkspaceMgr workspaceMgr; + if (getNetConfiguration().getTrainingWorkspaceMode() == null) { + workspaceMgr = LayerWorkspaceMgr.noWorkspaces(); + } else { + workspaceMgr = + LayerWorkspaceMgr.builder() + .with(ArrayType.INPUT, WS_ALL_LAYERS_ACT, WS_ALL_LAYERS_ACT_CONFIG) + .with(ArrayType.ACTIVATIONS, WS_ALL_LAYERS_ACT, WS_ALL_LAYERS_ACT_CONFIG) + // Note for updater working memory, we have the option to re-use WS_ALL_LAYERS_ACT or + // FF/BP_WORKING_MEM + // these should be closed by the time updaters are executed + // Generally, WS_ALL_LAYERS_ACT will be the larger of the two, so we'll use this + .with(ArrayType.UPDATER_WORKING_MEM, WS_ALL_LAYERS_ACT, WS_ALL_LAYERS_ACT_CONFIG) + .build(); + } + workspaceMgr.setHelperWorkspacePointers(helperWorkspaces); + + if (getNetConfiguration().getBackpropType() == BackpropType.TruncatedBPTT) { + doTruncatedBPTT(features, labels, featuresMask, labelsMask, workspaceMgr); + } else { + if (solver == null) { + try (MemoryWorkspace wsO = Nd4j.getMemoryManager().scopeOutOfWorkspaces()) { + solver = + new Solver.Builder() + .configure(getNetConfiguration()) + .listeners(this.getTrainingListeners()) + .model(this) + .build(); + } + } + // TODO CACHE WORKSPACE, IF USED??? + solver.optimize(workspaceMgr); + } + + clearLayerMaskArrays(); + clearLayersStates(); + synchronizeIterEpochCounts(); + } + + @Override + public void fit(INDArray data, LayerWorkspaceMgr workspaceMgr) { + throw new UnsupportedOperationException("Not supported: use pretrainLayer"); + } + + /** + * Fit the model for one iteration on the provided data + * + * @param data the data to train on + */ + @Override + public void fit(org.nd4j.linalg.dataset.api.DataSet data) { + if (!initCalled) init(); + fit( + data.getFeatures(), + data.getLabels(), + data.getFeaturesMaskArray(), + data.getLabelsMaskArray()); + } + + /** + * Fit the model for one iteration on the provided data + * + * @param examples the examples to classify (one example in each row) + * @param labels the labels for each example (the number of labels must match + */ + @Override + public void fit(INDArray examples, int[] labels) { + if (!initCalled) init(); + org.deeplearning4j.nn.conf.layers.OutputLayer layerConf = + (org.deeplearning4j.nn.conf.layers.OutputLayer) getOutputLayer().getLayerConfiguration(); + + if (layerConf.getNOut() > Integer.MAX_VALUE) { + throw new ND4JArraySizeException(); + } + fit(examples, FeatureUtil.toOutcomeMatrix(labels, (int) layerConf.getNOut())); + } + + /** + * Perform inference on the provided input/features - i.e., perform forward pass using the + * provided input/features and return the output of the final layer. + * + * @param input Input to the network + * @param train whether the output is test or train. This mainly affect hyper parameters such as + * dropout and batch normalization, which have different behaviour for test vs. train + * @return The network predictions - i.e., the activations of the final layer + */ + public INDArray output(INDArray input, TrainingMode train) { + return output(input, train == TrainingMode.TRAIN); + } + + /** + * Perform inference on the provided input/features - i.e., perform forward pass using the + * provided input/features and return the output of the final layer. + * + * @param input Input to the network + * @param train whether the output is test or train. This mainly affect hyper parameters such as + * dropout and batch normalization, which have different behaviour for test vs. train + * @return The network predictions - i.e., the activations of the final layer + */ + public INDArray output(INDArray input, boolean train) { + return output(input, train, null, null); + } + + /** + * Calculate the output of the network, with masking arrays. The masking arrays are used in + * situations such as one-to-many and many-to-one recurrent neural network (RNN) designs, as well + * as for supporting time series of varying lengths within the same minibatch. + */ + public INDArray output( + INDArray input, boolean train, INDArray featuresMask, INDArray labelsMask) { + return output(input, train, featuresMask, labelsMask, null); + } + + /** + * Get the network output, which is optionally placed in the specified memory workspace.
+ * If no memory workspace is provided, the output will be detached (not in any workspace).
+ * If a memory workspace is provided, the output activation array (i.e., the INDArray returned by + * this method) will be placed in the specified workspace. This workspace must be opened by the + * user before calling this method - and the user is responsible for (a) closing this workspace, + * and (b) ensuring the output array is not used out of scope (i.e., not used after closing the + * workspace to which it belongs - as this is likely to cause either an exception when used, or a + * crash). + * + * @param input Input to the network + * @param train True for train, false otherwise + * @param outputWorkspace May be null. If not null: the workspace MUST be opened before calling + * this method. + * @return The output/activations from the network (either detached or in the specified workspace + * if provided) + */ + public INDArray output(INDArray input, boolean train, MemoryWorkspace outputWorkspace) { + return output(input, train, null, null, outputWorkspace); + } + + /** + * Get the network output, which is optionally placed in the specified memory workspace.
+ * If no memory workspace is provided, the output will be detached (not in any workspace).
+ * If a memory workspace is provided, the output activation array (i.e., the INDArray returned by + * this method) will be placed in the specified workspace. This workspace must be opened by the + * user before calling this method - and the user is responsible for (a) closing this workspace, + * and (b) ensuring the output array is not used out of scope (i.e., not used after closing the + * workspace to which it belongs - as this is likely to cause either an exception when used, or a + * crash). + * + * @param input Input to the network + * @param train True for train, false otherwise + * @param outputWorkspace May be null. If not null: the workspace MUST be opened before calling + * this method. + * @return The output/activations from the network (either detached or in the specified workspace + * if provided) + */ + public synchronized INDArray output( + INDArray input, + boolean train, + INDArray featuresMask, + INDArray labelsMask, + MemoryWorkspace outputWorkspace) { + try { + return outputOfLayerDetached( + train, + FwdPassType.STANDARD, + layers.length - 1, + input, + featuresMask, + labelsMask, + outputWorkspace); + } catch (OutOfMemoryError e) { + CrashReportingUtil.writeMemoryCrashDump(this, e); + throw e; + } + } + + /** + * This method uses provided OutputAdapter to return custom object built from INDArray + * + *

PLEASE NOTE: This method uses dedicated Workspace for output generation to avoid redundant + * allocations + * + * @param inputs Input arrays to the netwonk + * @param inputMasks Optional input mask arrays (may be null) + * @param labelMasks Optional label mask arrays (may be null + * @param outputAdapter OutputAdapter instance + * @param T extends Object + * @return T instance produced by OutputAdapter + */ + public synchronized T output( + @NonNull INDArray inputs, + INDArray inputMasks, + INDArray labelMasks, + @NonNull OutputAdapter outputAdapter) { + try (val ws = + Nd4j.getWorkspaceManager() + .getAndActivateWorkspace(WS_ALL_LAYERS_ACT_CONFIG, WS_OUTPUT_MEM)) { + if (outputAdapter instanceof ModelAdapter) { + return ((ModelAdapter) outputAdapter) + .apply( + this, + new INDArray[] {inputs}, + new INDArray[] {inputMasks}, + new INDArray[] {labelMasks}); + } else { + return outputAdapter.apply(output(inputs, false, inputMasks, labelMasks, ws)); + } + } + } + + /** + * Perform inference on the provided input/features - i.e., perform forward pass using the + * provided input/features and return the output of the final layer. Equivalent to {@link + * #output(INDArray, boolean)} with train=false - i.e., this method is used for inference. + * + * @param input Input to the network + * @return The network predictions - i.e., the activations of the final layer + */ + public INDArray output(INDArray input) { + return output(input, TrainingMode.TEST); + } + + /** + * Generate the output for all examples/batches in the input iterator, and concatenate them into a + * single array. See {@link #output(INDArray)}
+ * NOTE 1: The output array can require a considerable amount of memory for iterators with a large + * number of examples
+ * NOTE 2: This method cannot be used for variable length time series outputs, as this would + * require padding arrays for some outputs, or returning a mask array (which cannot be done with + * this method). For variable length time series applications, use one of the other output + * methods. This method also cannot be used with fully convolutional networks with different + * output sizes (for example, segmentation on different input image sizes). + * + * @param iterator Data to pass through the network + * @return output for all examples in the iterator, concatenated into a + */ + public INDArray output(DataSetIterator iterator, boolean train) { + List outList = new ArrayList<>(); + long[] firstOutputShape = null; + while (iterator.hasNext()) { + DataSet next = iterator.next(); + INDArray features = next.getFeatures(); + + if (features == null) { + continue; + } + + INDArray fMask = next.getFeaturesMaskArray(); + INDArray lMask = next.getLabelsMaskArray(); + INDArray output = this.output(features, train, fMask, lMask); + outList.add(output); + if (firstOutputShape == null) { + firstOutputShape = output.shape(); + } else { + // Validate that shapes are the same (may not be, for some RNN variable length time series + // applications) + long[] currShape = output.shape(); + Preconditions.checkState( + firstOutputShape.length == currShape.length, + "Error during forward pass:" + + "different minibatches have different output array ranks - first minibatch shape %s, last minibatch shape %s", + firstOutputShape, + currShape); + for (int i = 1; + i < currShape.length; + i++) { // Skip checking minibatch dimension, fine if this varies + Preconditions.checkState( + firstOutputShape[i] == currShape[i], + "Current output shape does not match first" + + " output array shape at position %s: all dimensions must match other than the first dimension.\n" + + " For variable length output size/length use cases such as for RNNs with multiple sequence lengths," + + " use one of the other (non iterator) output methods. First batch output shape: %s, current batch output shape: %s", + i, + firstOutputShape, + currShape); + } + } + } + return Nd4j.concat(0, outList.toArray(new INDArray[outList.size()])); + } + + /** Equivalent to {@link #output(DataSetIterator, boolean)} with train=false */ + public INDArray output(DataSetIterator iterator) { + return output(iterator, false); + } + + /** + * Perform inference and then calculate the F1 score of the output(input) vs. the labels. + * + * @param input the input to perform inference with + * @param labels the true labels + * @return the score for the given input,label pairs + */ + @Override + public double f1Score(INDArray input, INDArray labels) { + feedForward(input); + setLabels(labels); + Evaluation eval = new Evaluation(); + eval.eval(labels, output(input)); + return eval.f1(); + } + + /** + * @deprecated Will be removed in a future release + */ + @Deprecated + @Override + public int numLabels() { + return (int) labels.size(1); + } + + /** + * Sets the input and labels and calculates the score (value of the output layer loss function + * plus l1/l2 if applicable) for the prediction with respect to the true labels
+ * This is equivalent to {@link #score(DataSet, boolean)} with training==false. + * + * @param data the data to score + * @return the score for the given input,label pairs + * @see #score(DataSet, boolean) + */ + public double score(DataSet data) { + return score(data, false); + } + + /** + * Sets the input and labels and calculates the score (value of the output layer loss function + * plus l1/l2 if applicable) for the prediction with respect to the true labels
+ * + * @param data data to calculate score for + * @param training If true: score during training. If false: score at test time. This can affect + * the application of certain features, such as dropout and dropconnect (which are applied at + * training time only) + * @return the score (value of the loss function) + */ + public double score(DataSet data, boolean training) { + try { + return scoreHelper(data, training); + } catch (OutOfMemoryError e) { + CrashReportingUtil.writeMemoryCrashDump(this, e); + throw e; + } + } + + private double scoreHelper(DataSet data, boolean training) { + boolean hasMaskArray = data.hasMaskArrays(); + if (hasMaskArray) { + setLayerMaskArrays(data.getFeaturesMaskArray(), data.getLabelsMaskArray()); + } + + if (!(getOutputLayer() instanceof IOutputLayer)) { + throw new IllegalStateException( + "Cannot calculate score if final layer is not an instance of IOutputLayer. " + + "Final layer is of type: " + + getOutputLayer().getClass()); + } + + WorkspaceMode wsm = + (training + ? getNetConfiguration().getTrainingWorkspaceMode() + : getNetConfiguration().getInferenceWorkspaceMode()); + LayerWorkspaceMgr mgr; + if (wsm == WorkspaceMode.NONE) { + mgr = LayerWorkspaceMgr.noWorkspaces(); + } else { + mgr = + LayerWorkspaceMgr.builder() + .with(ArrayType.FF_WORKING_MEM, WS_LAYER_WORKING_MEM, WS_LAYER_WORKING_MEM_CONFIG) + .with( + ArrayType.RNN_FF_LOOP_WORKING_MEM, + WS_RNN_LOOP_WORKING_MEM, + WS_RNN_LOOP_WORKING_MEM_CONFIG) + // TODO we can probably optimize this + .noWorkspaceFor(ArrayType.ACTIVATIONS) + .noWorkspaceFor(ArrayType.INPUT) + .build(); + } + mgr.setHelperWorkspacePointers(helperWorkspaces); + + INDArray inputToOutputLayer = + outputOfLayerDetached( + training, + FwdPassType.STANDARD, + layers.length - 2, + data.getFeatures(), + data.getFeaturesMaskArray(), + data.getLabelsMaskArray(), + null); + + if (data.getFeatures().size(0) > Integer.MAX_VALUE) { + throw new ND4JArraySizeException(); + } + IOutputLayer ol = (IOutputLayer) getOutputLayer(); + if (getNetConfiguration().getInputPreProcess(layers.length - 1) != null) { + inputToOutputLayer = + getNetConfiguration() + .getInputPreProcess(layers.length - 1) + .preProcess(inputToOutputLayer, (int) data.getFeatures().size(0), mgr); + } + ol.setInput(inputToOutputLayer, mgr); // Feedforward doesn't include output layer for efficiency + ol.setLabels(data.getLabels()); + double score; + try (MemoryWorkspace ws = mgr.notifyScopeEntered(ArrayType.FF_WORKING_MEM)) { + score = ol.computeScore(calcRegularizationScore(true), training, mgr); + } + + if (hasMaskArray) { + clearLayerMaskArrays(); + } + clearLayersStates(); + + return score; + } + + /** + * As per {@link #scoreExamples(DataSet, boolean)} - the outputs (example scores) for all DataSets + * in the iterator are concatenated + */ + public INDArray scoreExamples(DataSetIterator iter, boolean addRegularizationTerms) { + List out = new ArrayList<>(); + + while (iter.hasNext()) { + out.add(scoreExamples(iter.next(), addRegularizationTerms)); + } + return Nd4j.toFlattened('f', out); + } + + /** + * Calculate the score for each example in a DataSet individually. Unlike {@link #score(DataSet)} + * and {@link #score(DataSet, boolean)} this method does not average/sum over examples. This + * method allows for examples to be scored individually (at test time only), which may be useful + * for example for autoencoder architectures and the like.
+ * Each row of the output (assuming addRegularizationTerms == true) is equivalent to calling + * score(DataSet) with a single example. + * + * @param data The data to score + * @param addRegularizationTerms If true: add l1/l2 regularization terms (if any) to the score. If + * false: don't add regularization terms + * @return An INDArray (column vector) of size input.numRows(); the ith entry is the score (loss + * value) of the ith example + */ + public INDArray scoreExamples(DataSet data, boolean addRegularizationTerms) { + try { + return scoreExamplesHelper(data, addRegularizationTerms); + } catch (OutOfMemoryError e) { + CrashReportingUtil.writeMemoryCrashDump(this, e); + throw e; + } + } + + private INDArray scoreExamplesHelper(DataSet data, boolean addRegularizationTerms) { + INDArray inputLast = + outputOfLayerDetached( + false, + FwdPassType.STANDARD, + layers.length - 2, + data.getFeatures(), + data.getFeaturesMaskArray(), + data.getLabelsMaskArray(), + null); + setLabels(data.getLabels()); + setLayerMaskArrays(data.getFeaturesMaskArray(), data.getLabelsMaskArray()); + + // TODO we might want workspaces here? + LayerWorkspaceMgr mgr = LayerWorkspaceMgr.noWorkspaces(); + + INDArray out; + if (getOutputLayer() instanceof IOutputLayer) { + IOutputLayer ol = (IOutputLayer) getOutputLayer(); + if (getNetConfiguration().getInputPreProcess(layers.length - 1) != null) { + + if (data.getFeatures().size(0) > Integer.MAX_VALUE) { + throw new ND4JArraySizeException(); + } + inputLast = + getNetConfiguration() + .getInputPreProcess(layers.length - 1) + .preProcess(inputLast, (int) data.getFeatures().size(0), mgr); + } + ol.setLabels(data.getLabels()); + ol.setInput(inputLast, mgr); + double r = (addRegularizationTerms ? calcRegularizationScore(true) : 0); + out = ol.computeScoreForExamples(r, mgr); + } else { + throw new UnsupportedOperationException( + "Cannot calculate score with respect to labels without an OutputLayer"); + } + + clearLayersStates(); + clearLayerMaskArrays(); + return out; + } + + @Override + public void fit() { + fit(input, labels); + } + + @Override + public void update(INDArray gradient, String paramType) { + throw new UnsupportedOperationException("Not implemented"); + } + + /** + * Score of the model (relative to the objective function) - previously calculated on the last + * minibatch + * + * @return the score of the model (relative to the objective function) + */ + @Override + public double getScore() { + return score; + } + + /** Intended for developer/internal use */ + public void setScore(double score) { + this.score = score; + } + + @Override + public void computeGradientAndScore(LayerWorkspaceMgr layerWorkspaceMgr) { + computeGradientAndScore(); + } + + public void computeGradientAndScore() { + + if (!(getOutputLayer() instanceof IOutputLayer)) { + throw new DL4JException( + "Cannot calculate gradient and score with respect to labels: final layer is not an IOutputLayer. " + + "Final layer class: " + + getOutputLayer().getClass() + + ". To calculate gradients and fit a network " + + "using backpropagation, the final layer must be an output layer"); + } + + // Note: Workspace manager is only ose here for score calculation... other workspace managers + // are used in the + // various FF/backprop methds + LayerWorkspaceMgr mgr; + if (getNetConfiguration().getTrainingWorkspaceMode() == WorkspaceMode.NONE) { + mgr = LayerWorkspaceMgr.noWorkspaces(); + } else { + mgr = + LayerWorkspaceMgr.builder() + .with(ArrayType.INPUT, WS_ALL_LAYERS_ACT, WS_ALL_LAYERS_ACT_CONFIG) + .with(ArrayType.ACTIVATIONS, WS_ALL_LAYERS_ACT, WS_ALL_LAYERS_ACT_CONFIG) + .with(ArrayType.FF_WORKING_MEM, WS_LAYER_WORKING_MEM, WS_LAYER_WORKING_MEM_CONFIG) + .with(ArrayType.BP_WORKING_MEM, WS_LAYER_WORKING_MEM, WS_LAYER_WORKING_MEM_CONFIG) + .with( + ArrayType.RNN_FF_LOOP_WORKING_MEM, + WS_RNN_LOOP_WORKING_MEM, + WS_RNN_LOOP_WORKING_MEM_CONFIG) + .with( + ArrayType.RNN_BP_LOOP_WORKING_MEM, + WS_RNN_LOOP_WORKING_MEM, + WS_RNN_LOOP_WORKING_MEM_CONFIG) + .build(); + + if (getNetConfiguration().getCacheMode() != null) { + // For now: store cache mode activations in activations workspace + mgr.setWorkspace(ArrayType.FF_CACHE, WS_ALL_LAYERS_ACT, WS_ALL_LAYERS_ACT_CONFIG); + } + } + + boolean tbptt = getNetConfiguration().getBackpropType() == BackpropType.TruncatedBPTT; + FwdPassType fwdType = + (tbptt ? FwdPassType.RNN_ACTIVATE_WITH_STORED_STATE : FwdPassType.STANDARD); + synchronizeIterEpochCounts(); + + // Calculate activations (which are stored in each layer, and used in backprop) + try (MemoryWorkspace ws = mgr.notifyScopeEntered(ArrayType.ACTIVATIONS)) { + // First: do a feed-forward through the network + // Note that we don't actually need to do the full forward pass through the output layer right + // now; but we do + // need the input to the output layer to be set (such that backprop can be done) + List activations = + ffToLayerActivationsInWs(layers.length - 2, fwdType, tbptt, input, mask, null); + if (!trainingListeners.isEmpty()) { + // TODO: We possibly do want output layer activations in some cases here... + for (TrainingListener tl : trainingListeners) { + tl.onForwardPass(this, activations); + } + } + INDArray inputToOutputLayer = activations.get(activations.size() - 1); + if (getNetConfiguration().getInputPreProcess(layers.length - 1) != null) { + inputToOutputLayer = + getNetConfiguration() + .getInputPreProcess(layers.length - 1) + .preProcess(inputToOutputLayer, getInputMiniBatchSize(), mgr); + // Validate activations location + } + getOutputLayer().setInput(inputToOutputLayer, mgr); + // Then: compute gradients + Pair pair = calcBackpropGradients(null, true, false, false); + this.gradient = (pair == null ? null : pair.getFirst()); + + // Calculate score + try (MemoryWorkspace wsFF = mgr.notifyScopeEntered(ArrayType.FF_WORKING_MEM)) { + double r = calcRegularizationScore(true); + score = ((IOutputLayer) getOutputLayer()).computeScore(r, true, mgr); + } + + // Listeners + if (!trainingListeners.isEmpty()) { + try (MemoryWorkspace workspace = Nd4j.getMemoryManager().scopeOutOfWorkspaces()) { + for (TrainingListener tl : trainingListeners) { + tl.onBackwardPass(this); + } + } + } + } + + // Clear the post noise/dropconnect parameters on the output layer + getOutputLayer().clearNoiseWeightParams(); + } + + /** Clear the inputs. Clears optimizer state. */ + public void clear() { + for (Layer layer : layers) { + layer.clear(); + } + + input = null; + labels = null; + solver = null; + } + + @Override + public void applyConstraints(int iteration, int epoch) { + for (Layer l : layers) { + l.applyConstraints(iteration, epoch); + } + } + + @Override + public void setInput(INDArray input, LayerWorkspaceMgr mgr) { + throw new UnsupportedOperationException("Not supported"); + } + + /** + * Get the output layer - i.e., the last layer in the netwok + * + * @return + */ + public Layer getOutputLayer() { + Layer ret = getLayers()[getLayers().length - 1]; + if (ret instanceof FrozenLayerWithBackprop) { + ret = ((FrozenLayerWithBackprop) ret).getInsideLayer(); + } + return ret; + } + + /** See {@link #setParams(INDArray)} */ + public void setParameters(INDArray params) { + setParams(params); + } + + public INDArray getLabels() { + return labels; + } + + /** + * @param labels Labels to set + */ + public void setLabels(INDArray labels) { + this.labels = labels; + } + + public INDArray getInput() { + return input; + } + + /** + * Set the input array for the network + * + * @param input Input array to set + */ + public void setInput(INDArray input) { + this.input = input; + if (this.layers == null) { + init(); + } + if (input != null) { + if (input.length() == 0) { + throw new IllegalArgumentException( + "Invalid input: length 0 (shape: " + Arrays.toString(input.shape()) + ")"); + } + + if (input.size(0) > Integer.MAX_VALUE) { + throw new ND4JArraySizeException(); + } + setInputMiniBatchSize((int) input.size(0)); + } + } + + /** + * Get the number of layers in the network + * + * @return the number of layers in the network + */ + public int getnLayers() { + return getNetConfiguration().getFlattenedLayerConfigurations().size(); + } + + /** + * @return The layers in the network + */ + public synchronized Layer[] getLayers() { + return layers; + } + + public void setLayers(Layer[] layers) { + this.layers = layers; + } + + public Layer getLayer(int i) { + Preconditions.checkArgument( + i >= 0 && i < layers.length, + "Invalid layer index: layer index must be 0" + " to %s (inclusive), got index %s", + layers.length - 1, + i); + return layers[i]; + } + + public Layer getLayer(@NotNull String name) { + return Arrays.stream(layers) + .filter(l -> !l.getLayerConfiguration().getLayerName().equals(name)) + .findFirst() + .get(); + } + + public List getLayerNames() { + return Arrays.stream(layers) + .map(l -> l.getLayerConfiguration().getLayerName()) + .collect(Collectors.toList()); + } + + public INDArray getMask() { + return mask; + } + + public void setMask(INDArray mask) { + this.mask = mask; + } + + public INDArray getMaskArray() { + return mask; + } + + @Override + public void setMaskArray(INDArray maskArray) { + throw new UnsupportedOperationException(); + } + + @Override + public boolean isPretrainLayer() { + return false; + } + + @Override + public void clearNoiseWeightParams() { + for (Layer l : layers) { + l.clearNoiseWeightParams(); + } + } + + @Override + public void allowInputModification(boolean allow) { + throw new UnsupportedOperationException("Not supported"); + } + + // ========== + // LayerConfiguration methods + + @Override + public Pair feedForwardMaskArray( + INDArray maskArray, MaskState currentMaskState, int minibatchSize) { + if (maskArray == null) { + for (int i = 0; i < layers.length; i++) { + layers[i].feedForwardMaskArray(null, null, minibatchSize); + } + } else { + // Do a forward pass through each preprocessor and layer + for (int i = 0; i < layers.length; i++) { + InputPreProcessor preProcessor = getNetConfiguration().getInputPreProcess(i); + + if (preProcessor != null) { + Pair p = + preProcessor.feedForwardMaskArray(maskArray, currentMaskState, minibatchSize); + if (p != null) { + maskArray = p.getFirst(); + currentMaskState = p.getSecond(); + } else { + maskArray = null; + currentMaskState = null; + } + } + + Pair p = + layers[i].feedForwardMaskArray(maskArray, currentMaskState, minibatchSize); + if (p != null) { + maskArray = p.getFirst(); + currentMaskState = p.getSecond(); + } else { + maskArray = null; + currentMaskState = null; + } + } + } + + return new Pair<>(maskArray, currentMaskState); + } + + @Override + public LayerHelper getHelper() { + throw new UnsupportedOperationException("Not supported"); + } + + @Override + public Type type() { + return Type.MULTILAYER; + } + + /** Equivalent to {@link #output(INDArray)} using the input set via {@link #setInput(INDArray)} */ + public INDArray activate(TrainingMode training) { + return output(input, training == TrainingMode.TRAIN); + } + + /** Equivalent to {@link #output(INDArray, TrainingMode)} */ + public INDArray activate(INDArray input, TrainingMode training) { + return output(input, training == TrainingMode.TRAIN); + } + + @Override + public Pair backpropGradient( + INDArray epsilon, LayerWorkspaceMgr workspaceMgr) { + if (getOutputLayer() instanceof IOutputLayer) { + throw new UnsupportedOperationException( + "Cannot calculate gradients based on epsilon with OutputLayer"); + } + + return calcBackpropGradients(epsilon, false, false, true); + } + + @Override + public int getIndex() { + return layerIndex; + } + + @Override + public void setIndex(int index) { + layerIndex = index; + } + + @Override + public int getIterationCount() { + return getNetConfiguration().getIterationCount(); + } + + @Override + public void setIterationCount(int iterationCount) { + getNetConfiguration().setIterationCount(iterationCount); + } + + @Override + public int getEpochCount() { + return getNetConfiguration().getEpochCount(); + } + + @Override + public void setEpochCount(int epochCount) { + getNetConfiguration().setEpochCount(epochCount); + } + + @Override + public double calcRegularizationScore(boolean backpropParamsOnly) { + double scoreSum = 0.0; + for (int i = 0; i < layers.length; i++) { + scoreSum += layers[i].calcRegularizationScore(backpropParamsOnly); + } + return scoreSum; + } + + @Override + public void update(Gradient gradient) { + if (gradient.gradient().length() != numParams(true)) { + throw new IllegalArgumentException( + "Invalid input: expect gradients array of length " + numParams(true)); + } + for (Map.Entry entry : gradient.gradientForVariable().entrySet()) { + String key = entry.getKey(); + INDArray val = entry.getValue(); + int idx = key.indexOf('_'); + if (idx == -1) { + throw new IllegalStateException( + "Invalid param key: not have layer separator: \"" + key + "\""); + } + Integer layerId = Integer.parseInt(key.substring(0, idx)); + String paramType = key.substring(idx + 1); + // Update MLN gradient + this.gradient.gradientForVariable().put(key, val); + // Update layer params + layers[layerId].update(val, paramType); + } + // Update layerwise gradient view + setBackpropGradientsViewArray(gradient.gradient()); + } + + @Override + public INDArray activate(boolean training, LayerWorkspaceMgr mgr) { + throw new UnsupportedOperationException(); + } + + @Override + public INDArray activate(INDArray input, boolean training, LayerWorkspaceMgr mgr) { + throw new UnsupportedOperationException(); + } + + @Override + public int getInputMiniBatchSize() { + if (!getNetConfiguration().isMiniBatch()) { + return 1; + } + + if (input.size(0) > Integer.MAX_VALUE) { + throw new ND4JArraySizeException(); + } + return (int) input.size(0); + } + + @Override + public void setInputMiniBatchSize(int size) { + if (layers != null) { + for (Layer l : layers) { + l.setInputMiniBatchSize(size); + } + } + } + + /** + * If this MultiLayerNetwork contains one or more RNN layers: conduct forward pass (prediction) + * but using previous stored state for any RNN layers. The activations for the final step are also + * stored in the RNN layers for use next time rnnTimeStep() is called.
+ * This method can be used to generate output one or more steps at a time instead of always having + * to do forward pass from t=0. Example uses are for streaming data, and for generating samples + * from network output one step at a time (where samples are then fed back into the network as + * input)
+ * If no previous state is present in RNN layers (i.e., initially or after calling + * rnnClearPreviousState()), the default initialization (usually 0) is used.
+ * Supports mini-batch (i.e., multiple predictions/forward pass in parallel) as well as for single + * examples.
+ * + * @param input Input to network. May be for one or multiple time steps. For single time step: + * input has shape [miniBatchSize,inputSize] or [miniBatchSize,inputSize,1]. miniBatchSize=1 + * for single example.
+ * For multiple time steps: [miniBatchSize,inputSize,inputTimeSeriesLength] + * @return Output activations. If output is RNN layer (such as RnnOutputLayer): if input has shape + * [miniBatchSize,inputSize] i.e., is 2d, output has shape [miniBatchSize,outputSize] (i.e., + * also 2d).
+ * Otherwise output is 3d [miniBatchSize,outputSize,inputTimeSeriesLength] when using + * RnnOutputLayer. + * @see #rnnTimeStep(INDArray, MemoryWorkspace) For outputting the activations in the specified + * workspace + */ + public INDArray rnnTimeStep(INDArray input) { + return rnnTimeStep(input, null); + } + + /** + * See {@link #rnnTimeStep(INDArray)} for details
+ * If no memory workspace is provided, the output will be detached (not in any workspace).
+ * If a memory workspace is provided, the output activation array (i.e., the INDArray returned by + * this method) will be placed in the specified workspace. This workspace must be opened by the + * user before calling this method - and the user is responsible for (a) closing this workspace, + * and (b) ensuring the output array is not used out of scope (i.e., not used after closing the + * workspace to which it belongs - as this is likely to cause either an exception when used, or a + * crash). + * + * @param input Input activations + * @param outputWorkspace Output workspace. May be null + * @return The output/activations from the network (either detached or in the specified workspace + * if provided) + */ + public INDArray rnnTimeStep(INDArray input, MemoryWorkspace outputWorkspace) { + try { + boolean inputIs2d = input.rank() == 2; + INDArray out = + outputOfLayerDetached( + false, + FwdPassType.RNN_TIMESTEP, + layers.length - 1, + input, + null, + null, + outputWorkspace); + if (inputIs2d && out.rank() == 3 && layers[layers.length - 1].type() == Type.RECURRENT) { + // Return 2d output with shape [miniBatchSize,nOut] + // instead of 3d output with shape [miniBatchSize,nOut,1] + return out.tensorAlongDimension(0, 1, 0); + } + return out; + } catch (OutOfMemoryError e) { + CrashReportingUtil.writeMemoryCrashDump(this, e); + throw e; + } + } + + /** + * Get the state of the RNN layer, as used in rnnTimeStep(). + * + * @param layer Number/index of the layer. + * @return Hidden state, or null if layer is not an RNN layer + */ + public Map rnnGetPreviousState(int layer) { + if (layer < 0 || layer >= layers.length) { + throw new IllegalArgumentException("Invalid layer number"); + } + Layer l = layers[layer]; + if (l instanceof org.deeplearning4j.nn.layers.wrapper.BaseWrapperLayer) { + l = ((org.deeplearning4j.nn.layers.wrapper.BaseWrapperLayer) l).getUnderlying(); + } + if (!(l instanceof RecurrentLayer)) { + throw new IllegalArgumentException("LayerConfiguration is not an RNN layer"); + } + return ((RecurrentLayer) l).rnnGetPreviousState(); + } + + /** + * Set the state of the RNN layer. + * + * @param layer The number/index of the layer. + * @param state The state to set the specified layer to + */ + public void rnnSetPreviousState(int layer, Map state) { + if (layer < 0 || layer >= layers.length) { + throw new IllegalArgumentException("Invalid layer number"); + } + Layer l = layers[layer]; + if (l instanceof org.deeplearning4j.nn.layers.wrapper.BaseWrapperLayer) { + l = ((org.deeplearning4j.nn.layers.wrapper.BaseWrapperLayer) l).getUnderlying(); + } + if (!(l instanceof RecurrentLayer)) { + throw new IllegalArgumentException("LayerConfiguration is not an RNN layer"); + } + RecurrentLayer r = (RecurrentLayer) l; + r.rnnSetPreviousState(state); + } + + /** Clear the previous state of the RNN layers (if any). */ + public void rnnClearPreviousState() { + if (layers == null) { + return; + } + for (int i = 0; i < layers.length; i++) { + if (layers[i] instanceof RecurrentLayer) { + ((RecurrentLayer) layers[i]).rnnClearPreviousState(); + } else if (layers[i] instanceof MultiLayerNetwork) { + ((MultiLayerNetwork) layers[i]).rnnClearPreviousState(); + } else if (layers[i] instanceof BaseWrapperLayer + && ((BaseWrapperLayer) layers[i]).getUnderlying() instanceof RecurrentLayer) { + ((RecurrentLayer) ((BaseWrapperLayer) layers[i]).getUnderlying()).rnnClearPreviousState(); + } + } + } + + /** + * Similar to rnnTimeStep and feedForward() methods. Difference here is that this method:
+ * (a) like rnnTimeStep does forward pass using stored state for RNN layers, and
+ * (b) unlike rnnTimeStep does not modify the RNN layer state
+ * Therefore multiple calls to this method with the same input should have the same output.
+ * Typically used during training only. Use rnnTimeStep for prediction/forward pass at test time. + * + * @param input Input to network + * @param training Whether training or not + * @param storeLastForTBPTT set to true if used as part of truncated BPTT training + * @return Activations for each layer (including input, as per feedforward() etc) + */ + public List rnnActivateUsingStoredState( + INDArray input, boolean training, boolean storeLastForTBPTT) { + return ffToLayerActivationsDetached( + training, + FwdPassType.RNN_ACTIVATE_WITH_STORED_STATE, + storeLastForTBPTT, + layers.length - 1, + input, + mask, + null, + false); + } + + /** + * Get the updater for this MultiLayerNetwork + * + * @return Updater for MultiLayerNetwork + */ + public Updater getUpdater() { + return getUpdater(true); + } + + /** Set the updater for the MultiLayerNetwork */ + public void setUpdater(Updater updater) { + if (solver == null) { + solver = + new Solver.Builder() + .configure(getNetConfiguration()) + .listeners(this.getTrainingListeners()) + .model(this) + .build(); + } + solver.getOptimizer().setUpdater(updater); + } + + public Updater getUpdater(boolean initializeIfReq) { + if (solver == null && initializeIfReq) { + synchronized (this) { + if (solver == null) { // May have been created while waiting for lock + solver = + new Solver.Builder() + .configure(getNetConfiguration()) + .listeners(this.getTrainingListeners()) + .model(this) + .build(); + solver.getOptimizer().setUpdater(UpdaterCreator.getUpdater(this)); + } + } + } + if (solver != null) { + return solver.getOptimizer().getUpdater(initializeIfReq); + } + return null; + } + + /** + * Set the mask arrays for features and labels. Mask arrays are typically used in situations such + * as one-to-many and many-to-one learning with recurrent neural networks, as well as for + * supporting time series of varying lengths within the same minibatch.
+ * For example, with RNN data sets with input of shape [miniBatchSize,nIn,timeSeriesLength] and + * outputs of shape [miniBatchSize,nOut,timeSeriesLength], the features and mask arrays will have + * shape [miniBatchSize,timeSeriesLength] and contain values 0 or 1 at each element (to specify + * whether a given input/example is present - or merely padding - at a given time step).
+ * NOTE: This method is not usually used directly. Instead, methods such as {@link + * #feedForward(INDArray, INDArray, INDArray)} and {@link #output(INDArray, boolean, INDArray, + * INDArray)} handle setting of masking internally. + * + * @param featuresMaskArray Mask array for features (input) + * @param labelsMaskArray Mask array for labels (output) + * @see #clearLayerMaskArrays() + */ + public void setLayerMaskArrays(INDArray featuresMaskArray, INDArray labelsMaskArray) { + if (featuresMaskArray != null) { + + if (featuresMaskArray.size(0) > Integer.MAX_VALUE) { + throw new ND4JArraySizeException(); + } + // New approach: use feedForwardMaskArray method + feedForwardMaskArray(featuresMaskArray, MaskState.Active, (int) featuresMaskArray.size(0)); + + /* + //feedforward layers below a RNN layer: need the input (features) mask array + //Reason: even if the time series input is zero padded, the output from the dense layers are + // non-zero (i.e., activationFunction(0*weights + bias) != 0 in general) + //This assumes that the time series input is masked - i.e., values are 0 at the padded time steps, + // so we don't need to do anything for the recurrent layer + + //Now, if mask array is 2d -> need to reshape to 1d (column vector) in the exact same order + // as is done for 3d -> 2d time series reshaping + INDArray reshapedFeaturesMask = TimeSeriesUtils.reshapeTimeSeriesMaskToVector(featuresMaskArray); + + for( int i=0; i + * See {@link #setLayerMaskArrays(INDArray, INDArray)} for details on mask arrays. + */ + public void clearLayerMaskArrays() { + for (Layer layer : layers) { + layer.setMaskArray(null); + } + } + + /** + * Evaluate the network (classification performance) + * + * @param iterator Iterator to evaluate on + * @return Evaluation object; results of evaluation on all examples in the data set + */ + public T evaluate(@NonNull DataSetIterator iterator) { + return (T) evaluate(iterator, null); + } + + /** + * Evaluate the network (classification performance). Can only be used with MultiDataSetIterator + * instances with a single input/output array + * + * @param iterator Iterator to evaluate on + * @return Evaluation object; results of evaluation on all examples in the data set + */ + public Evaluation evaluate(@NonNull MultiDataSetIterator iterator) { + return evaluate(new MultiDataSetWrapperIterator(iterator)); + } + + /** + * Evaluate the network for regression performance + * + * @param iterator Data to evaluate on + * @return Regression evaluation + */ + public T evaluateRegression(DataSetIterator iterator) { + return (T) doEvaluation(iterator, new RegressionEvaluation(iterator.totalOutcomes()))[0]; + } + + /** + * Evaluate the network for regression performance Can only be used with MultiDataSetIterator + * instances with a single input/output array + * + * @param iterator Data to evaluate on + */ + public org.nd4j.evaluation.regression.RegressionEvaluation evaluateRegression( + MultiDataSetIterator iterator) { + return evaluateRegression(new MultiDataSetWrapperIterator(iterator)); + } + + /** + * @deprecated To be removed - use {@link #evaluateROC(DataSetIterator, int)} to enforce selection + * of appropriate ROC/threshold configuration + */ + @Deprecated + public T evaluateROC(DataSetIterator iterator) { + return evaluateROC(iterator, 0); + } + + /** + * Evaluate the network (must be a binary classifier) on the specified data, using the {@link ROC} + * class + * + * @param iterator Data to evaluate on + * @param rocThresholdSteps Number of threshold steps to use with {@link ROC} - see that class for + * details. + * @return ROC evaluation on the given dataset + */ + public T evaluateROC(DataSetIterator iterator, int rocThresholdSteps) { + Layer outputLayer = getOutputLayer(); + if (getNetConfiguration().isValidateOutputLayerConfig()) { + OutputLayerUtil.validateOutputLayerForClassifierEvaluation( + outputLayer.getLayerConfiguration(), ROC.class); + } + return (T) doEvaluation(iterator, new org.deeplearning4j.eval.ROC(rocThresholdSteps))[0]; + } + + /** + * @deprecated To be removed - use {@link #evaluateROCMultiClass(DataSetIterator, int)} to enforce + * selection of appropriate ROC/threshold configuration + */ + @Deprecated + public T evaluateROCMultiClass(DataSetIterator iterator) { + return evaluateROCMultiClass(iterator, 0); + } + + /** + * Evaluate the network on the specified data, using the {@link ROCMultiClass} class + * + * @param iterator Data to evaluate on + * @param rocThresholdSteps Number of threshold steps to use with {@link ROCMultiClass} + * @return Multi-class ROC evaluation on the given dataset + */ + public T evaluateROCMultiClass( + DataSetIterator iterator, int rocThresholdSteps) { + Layer outputLayer = getOutputLayer(); + if (getNetConfiguration().isValidateOutputLayerConfig()) { + OutputLayerUtil.validateOutputLayerForClassifierEvaluation( + outputLayer.getLayerConfiguration(), ROCMultiClass.class); + } + return (T) + doEvaluation(iterator, new org.deeplearning4j.eval.ROCMultiClass(rocThresholdSteps))[0]; + } + + /** + * Perform evaluation using an arbitrary IEvaluation instance. + * + * @param iterator data to evaluate on + */ + public T[] doEvaluation(DataSetIterator iterator, T... evaluations) { + try { + return doEvaluationHelper(iterator, evaluations); + } catch (OutOfMemoryError e) { + CrashReportingUtil.writeMemoryCrashDump(this, e); + throw e; + } + } + + public T[] doEvaluationHelper( + DataSetIterator iterator, T... evaluations) { + if (!iterator.hasNext() && iterator.resetSupported()) { + iterator.reset(); + } + + DataSetIterator iter = + iterator.asyncSupported() ? new AsyncDataSetIterator(iterator, 2, true) : iterator; + + WorkspaceMode cMode = getNetConfiguration().getTrainingWorkspaceMode(); + getNetConfiguration() + .setTrainingWorkspaceMode(getNetConfiguration().getInferenceWorkspaceMode()); + + // First: let's determine if we should do 'split feed forward' for long time series + // The idea: RNN 20k time steps. Train using TBPTT length 100 -> 200 segments of length 100. If + // we naively + // just use .output(INDArray) here, then our memory requirements are 200x larger than if we did + // the same + // evaluation in segments... + // Only do this if TBPTT is enabled - if not, it means we can train without TBPTT and hence + // should be able + // to test without splitting also + boolean useRnnSegments = + (getNetConfiguration().getBackpropType() == BackpropType.TruncatedBPTT); + + MemoryWorkspace outputWs; + if (getNetConfiguration().getInferenceWorkspaceMode() == WorkspaceMode.ENABLED) { + outputWs = + Nd4j.getWorkspaceManager() + .getWorkspaceForCurrentThread(WS_ALL_LAYERS_ACT_CONFIG, WS_OUTPUT_MEM); + } else { + outputWs = new DummyWorkspace(); + } + + while (iter.hasNext()) { + DataSet next = iter.next(); + + if (next.getFeatures() == null || next.getLabels() == null) { + continue; + } + + INDArray features = next.getFeatures(); + INDArray labels = next.getLabels(); + INDArray fMask = next.getFeaturesMaskArray(); + INDArray lMask = next.getLabelsMaskArray(); + List meta = next.getExampleMetaData(); + + if (!useRnnSegments) { + // Standard/non-RNN case: + try (MemoryWorkspace ws = outputWs.notifyScopeEntered()) { + INDArray out = + outputOfLayerDetached( + false, FwdPassType.STANDARD, layers.length - 1, features, fMask, lMask, ws); + + try (MemoryWorkspace wsO = Nd4j.getWorkspaceManager().scopeOutOfWorkspaces()) { + for (T evaluation : evaluations) { + evaluation.eval(labels, out, lMask, meta); } - lines.add(line); + } } + } else { + rnnClearPreviousState(); - StringBuilder sbFormat = new StringBuilder(); - int totalLength = 0; - int pos = 0; - for(int length : maxLength){ - int currLength; - if(pos++ == maxLength.length-1){ - currLength = length; - } else { - currLength = length+3; - } - sbFormat.append("%-").append(currLength).append("s"); - totalLength += currLength; + // Get subset of features and labels: + val fwdLen = getNetConfiguration().getTbpttFwdLength(); + val tsLength = features.size(2); + long nSubsets = tsLength / fwdLen; + if (tsLength % fwdLen != 0) { + nSubsets++; // Example: 100 fwdLen with timeSeriesLength=120 -> want 2 subsets (1 of size + // 100, 1 of size 20) } - sbFormat.append("\n"); - String format = sbFormat.toString(); + for (int i = 0; i < nSubsets; i++) { + val startTimeIdx = i * fwdLen; + val endTimeIdx = Math.min(startTimeIdx + fwdLen, tsLength); - - - ret.append(StringUtils.repeat("=", totalLength)) - .append("\n"); - - boolean first = true; - for(String[] line : lines){ - String formatted = String.format(format, (Object[])line); - ret.append(formatted); - if(first){ - ret.append(StringUtils.repeat("=", totalLength)).append("\n"); - first = false; - } - } - - ret.append(StringUtils.repeat("-", totalLength)); - ret.append(String.format("\n%30s %,d", "Total Parameters: ", params().length())); - ret.append(String.format("\n%30s %,d", "Trainable Parameters: ", params().length() - frozenParams)); - ret.append(String.format("\n%30s %,d", "Frozen Parameters: ", frozenParams)); - ret.append("\n"); - ret.append(StringUtils.repeat("=", totalLength)); - ret.append("\n"); - return ret.toString(); - } - - /** - * Generate information regarding memory use for the network, for the given input type and minibatch size. - * Note that when using workspaces or CuDNN, the network should be trained for some iterations so that the memory - * workspaces have time to initialize. Without this, the memory requirements during training may be underestimated. - * - * Note also that this is the same information that is generated during an OOM crash when training or performing - * inference. - * - * @param minibatch Minibatch size to estimate memory for - * @param inputType Input type to the network - * @return A String with information about network memory use information - */ - public String memoryInfo(int minibatch, InputType inputType){ - return CrashReportingUtil.generateMemoryStatus(this, minibatch, inputType); - } - - /** - * This method just makes sure there's no state preserved within layers - */ - public void clearLayersStates() { - for (Layer layer : layers) { - layer.clear(); - layer.clearNoiseWeightParams(); - } - } - - /** - * Increment the epoch count (in the underlying {@link MultiLayerConfiguration} by 1). - * Note that this is done automatically when using iterator-based fitting methods, such as - * {@link #fit(DataSetIterator)}. However, when using non-iterator fit methods (DataSet, INDArray/INDArray etc), - * the network has no way to know when one epoch ends and another starts. In such situations, this method - * can be used to increment the epoch counter.
- * Note that the epoch counter is used for situations such as some learning rate schedules, and the like. - * - * The current epoch count can be obtained using {@code MultiLayerConfiguration.getLayerwiseConfiguration().getEpochCount()} - */ - public void incrementEpochCount(){ - layerWiseConfigurations.setEpochCount(layerWiseConfigurations.getEpochCount() + 1); - synchronizeIterEpochCounts(); - } - - - protected void synchronizeIterEpochCounts() { - //TODO: this is necessary for some schedules - but the redundant values are a little ugly... - int currIter = getIterationCount(); - int currEpoch = getEpochCount(); - for(Layer l : layers) { - l.setIterationCount(currIter); - l.setEpochCount(currEpoch); - } - } - - /** - * Save the MultiLayerNetwork to a file. Restore using {@link #load(File, boolean)}. - * Note that this saves the updater (i.e., the state array for momentum/Adam/rmsprop etc), which is desirable - * if further training will be undertaken. - * - * @param f File to save the network to - * @see ModelSerializer ModelSerializer for more details (and saving/loading via streams) - * @see #save(File, boolean) - */ - public void save( File f ) throws IOException { - save(f, true); - } - - /** - * Save the MultiLayerNetwork to a file. Restore using {@link #load(File, boolean)}. - * - * @param f File to save the network to - * @param saveUpdater If true: save the updater (i.e., the state array for momentum/Adam/rmsprop etc), which should - * usually be saved if further training is required - * @see ModelSerializer ModelSerializer for more details (and saving/loading via streams) - * @see #save(File, boolean) - */ - public void save(File f, boolean saveUpdater) throws IOException{ - ModelSerializer.writeModel(this, f, saveUpdater); - } - - /** - * Restore a MultiLayerNetwork to a file, saved using {@link #save(File)} or {@link ModelSerializer} - * @param f File to load the network from - * @param loadUpdater If true: load the updater if it is available (i.e., the state array for momentum/Adam/rmsprop - * etc) - use false if no further training is required, or true if further training - * will be undertaken - * @see ModelSerializer ModelSerializer for more details (and saving/loading via streams) - */ - public static MultiLayerNetwork load(File f, boolean loadUpdater) throws IOException { - return ModelSerializer.restoreMultiLayerNetwork(f, loadUpdater); - } - - /** - * Convert this MultiLayerNetwork to a ComputationGraph - * - * @return ComputationGraph equivalent to this network (including parameters and updater state) - */ - public ComputationGraph toComputationGraph(){ - return NetworkUtils.toComputationGraph(this); - } - - /** - * Return a copy of the network with the parameters and activations set to use the specified (floating point) data type. - * If the existing datatype is the same as the requested dataype, the original network will be returned unchanged. - * Only floating point datatypes (DOUBLE, FLOAT, HALF) may be used. - * - * @param dataType Datatype to convert the network to - * @return The network, set to use the specified datatype for the parameters and activations - */ - public MultiLayerNetwork convertDataType(@NonNull DataType dataType){ - Preconditions.checkState(dataType.isFPType(), "Invalid DataType: %s. Can only convert network to a floating point type", dataType); - if(dataType == params().dataType()){ - return this; - } - - try(MemoryWorkspace ws = Nd4j.getMemoryManager().scopeOutOfWorkspaces()) { - INDArray newParams = params().castTo(dataType); - String jsonConfig = getLayerWiseConfigurations().toJson(); - MultiLayerConfiguration newConf = MultiLayerConfiguration.fromJson(jsonConfig); - newConf.setDataType(dataType); - MultiLayerNetwork newNet = new MultiLayerNetwork(newConf); - newNet.init(newParams, false); - - Updater u = getUpdater(false); - if(u != null && u.getStateViewArray() != null){ - INDArray oldUpdaterState = u.getStateViewArray(); - newNet.getUpdater(true).getStateViewArray().assign(oldUpdaterState); - } - return newNet; - } - } - - /** - * Set the learning rate for all layers in the network to the specified value. Note that if any learning rate - * schedules are currently present, these will be removed in favor of the new (fixed) learning rate.
- *
- * Note: This method not free from a performance point of view: a proper learning rate schedule - * should be used in preference to calling this method at every iteration. - * - * @param newLr New learning rate for all layers - * @see #setLearningRate(ISchedule) - * @see #setLearningRate(int, double) - */ - public void setLearningRate(double newLr){ - NetworkUtils.setLearningRate(this, newLr); - } - - /** - * Set the learning rate schedule for all layers in the network to the specified schedule. - * This schedule will replace any/all existing schedules, and also any fixed learning rate values.
- * Note that the iteration/epoch counts will not be reset. Use {@link MultiLayerConfiguration#setIterationCount(int)} - * and {@link MultiLayerConfiguration#setEpochCount(int)} if this is required - * - * @param newLr New learning rate schedule for all layers - * @see #setLearningRate(ISchedule) - * @see #setLearningRate(int, double) - */ - public void setLearningRate(ISchedule newLr){ - NetworkUtils.setLearningRate(this, newLr); - } - - /** - * Set the learning rate for a single layer in the network to the specified value. Note that if any learning rate - * schedules are currently present, these will be removed in favor of the new (fixed) learning rate.
- *
- * Note: This method not free from a performance point of view: a proper learning rate schedule - * should be used in preference to calling this method at every iteration. Note also that - * {@link #setLearningRate(double)} should also be used in preference, when all layers need to be set to a new LR - * - * @param layerNumber Number of the layer to set the LR for - * @param newLr New learning rate for a single layer - * @see #setLearningRate(ISchedule) - * @see #setLearningRate(int, double) - */ - public void setLearningRate(int layerNumber, double newLr){ - NetworkUtils.setLearningRate(this, layerNumber, newLr); - } - - /** - * Set the learning rate schedule for a single layer in the network to the specified value.
- * Note also that {@link #setLearningRate(ISchedule)} should also be used in preference, when all layers need - * to be set to a new LR schedule.
- * This schedule will replace any/all existing schedules, and also any fixed learning rate values.
- * Note also that the iteration/epoch counts will not be reset. Use {@link MultiLayerConfiguration#setIterationCount(int)} - * and {@link MultiLayerConfiguration#setEpochCount(int)} if this is required - * - * @param layerNumber Number of the layer to set the LR schedule for - * @param newLr New learning rate for a single layer - * @see #setLearningRate(ISchedule) - * @see #setLearningRate(int, double) - */ - public void setLearningRate(int layerNumber, ISchedule newLr){ - NetworkUtils.setLearningRate(this, layerNumber, newLr); - } - - /** - * Get the current learning rate, for the specified layer, from the network. - * Note: If the layer has no learning rate (no parameters, or an updater without a learning rate) then null is returned - * @param layerNumber Layer number to get the learning rate for - * @return Learning rate for the specified layer, or null - */ - public Double getLearningRate(int layerNumber){ - return NetworkUtils.getLearningRate(this, layerNumber); - } - - /** - * Return the layer size (number of units) for the specified layer.
- * Note that the meaning of the "layer size" can depend on the type of layer. For example:
- * - DenseLayer, OutputLayer, recurrent layers: number of units (nOut configuration option)
- * - ConvolutionLayer: the channels (number of channels)
- * - Subsampling layers, global pooling layers, etc: size of 0 is always returned
- * - * @param layer Index of the layer to get the size of. Must be in range 0 to nLayers-1 inclusive - * @return Size of the layer - */ - public int layerSize(int layer) { - if (layer < 0 || layer > layers.length) { - throw new IllegalArgumentException("Invalid layer index: " + layer + ". Layer index must be between 0 and " - + (layers.length - 1) + " inclusive"); - } - org.deeplearning4j.nn.conf.layers.Layer conf = layers[layer].conf().getLayer(); - if (conf == null || !(conf instanceof FeedForwardLayer)) { - return 0; - } - FeedForwardLayer ffl = (FeedForwardLayer) conf; - - if (ffl.getNOut() > Integer.MAX_VALUE) + if (endTimeIdx > Integer.MAX_VALUE) { throw new ND4JArraySizeException(); - return (int) ffl.getNOut(); - } + } + INDArray[] subsets = + getSubsetsForTbptt(startTimeIdx, (int) endTimeIdx, features, labels, fMask, lMask); - /** - * Return the input size (number of inputs) for the specified layer.
- * Note that the meaning of the "input size" can depend on the type of layer. For example:
- * - DenseLayer, OutputLayer, etc: the feature vector size (nIn configuration option)
- * - Recurrent layers: the feature vector size per time step (nIn configuration option)
- * - ConvolutionLayer: the channels (number of channels)
- * - Subsampling layers, global pooling layers, etc: size of 0 is always returned
- * - * @param layer Index of the layer to get the size of. Must be in range 0 to nLayers-1 inclusive - * @return Size of the layer - */ - public int layerInputSize(int layer) { - if (layer < 0 || layer > layers.length) { - throw new IllegalArgumentException("Invalid layer index: " + layer + ". Layer index must be between 0 and " - + (layers.length - 1) + " inclusive"); + setLayerMaskArrays(subsets[2], subsets[3]); + + try (MemoryWorkspace ws = outputWs.notifyScopeEntered()) { + INDArray outSub = rnnTimeStep(subsets[0], ws); + try (MemoryWorkspace wsO = Nd4j.getWorkspaceManager().scopeOutOfWorkspaces()) { + for (T evaluation : evaluations) { + evaluation.eval(subsets[1], outSub, subsets[3]); + } + } + } } - org.deeplearning4j.nn.conf.layers.Layer conf = layers[layer].conf().getLayer(); - if (conf == null || !(conf instanceof FeedForwardLayer)) { - return 0; + } + + // Clear inputs, masks etc. Important to avoid leaking invalidated/out of scope arrays between + // iterations + clearLayersStates(); + } + + if (iterator.asyncSupported()) { + ((AsyncDataSetIterator) iter).shutdown(); + } + + getNetConfiguration().setTrainingWorkspaceMode(cMode); + + return evaluations; + } + + /** + * Evaluate the network on the provided data set. Used for evaluating the performance of + * classifiers + * + * @param iterator Data to undertake evaluation on + * @return Evaluation object, summarizing the results of the evaluation on the provided + * DataSetIterator + */ + public Evaluation evaluate(DataSetIterator iterator, List labelsList) { + return evaluate(iterator, labelsList, 1); + } + + @Override + public INDArray updaterState() { + return getUpdater() != null ? getUpdater().getStateViewArray() : null; + } + + @Override + public void fit(MultiDataSet dataSet) { + if (dataSet.getFeatures().length == 1 && dataSet.getLabels().length == 1) { + INDArray features = dataSet.getFeatures(0); + INDArray labels = dataSet.getLabels(0); + INDArray fMask = null; + INDArray lMask = null; + + if (dataSet.getFeaturesMaskArrays() != null) { + fMask = dataSet.getFeaturesMaskArrays()[0]; + } + + if (dataSet.getFeaturesMaskArrays() != null) { + lMask = dataSet.getLabelsMaskArrays()[0]; + } + + DataSet ds = new DataSet(features, labels, fMask, lMask); + fit(ds); + } else { + throw new DL4JInvalidInputException( + "MultiLayerNetwork can't handle MultiDataSet with more than 1 features or labels array." + + "Please consider use of ComputationGraph"); + } + } + + /** + * Perform minibatch training on all minibatches in the MultiDataSetIterator, for the specified + * number of epochs. Equvalent to calling {@link #fit(MultiDataSetIterator)} numEpochs times in a + * loop + * + * @param iterator Training data (DataSetIterator). Iterator must support resetting + * @param numEpochs Number of training epochs, >= 1 + */ + public void fit(@NonNull MultiDataSetIterator iterator, int numEpochs) { + Preconditions.checkArgument( + numEpochs > 0, "Number of epochs much be > 0. Got numEpochs = %s", numEpochs); + Preconditions.checkArgument( + numEpochs == 1 || iterator.resetSupported(), + "Cannot perform multiple epochs training using" + + "iterator has does not support resetting (iterator.resetSupported() returned false)"); + + for (int i = 0; i < numEpochs; i++) { + fit(iterator); + } + } + + /** + * Perform minibatch training on all minibatches in the MultiDataSetIterator.
+ * Note: The MultiDataSets in the MultiDataSetIterator must have exactly 1 input and output array + * (as MultiLayerNetwork only supports 1 input and 1 output) + * + * @param iterator Training data (DataSetIterator). Iterator must support resetting + */ + @Override + public void fit(MultiDataSetIterator iterator) { + fit(new MultiDataSetWrapperIterator(iterator)); + } + + @Override + public T[] doEvaluation(MultiDataSetIterator iterator, T[] evaluations) { + return doEvaluation(new MultiDataSetWrapperIterator(iterator), evaluations); + } + + /** + * Evaluate the network (for classification) on the provided data set, with top N accuracy in + * addition to standard accuracy. For 'standard' accuracy evaluation only, use topN = 1 + * + * @param iterator Iterator (data) to evaluate on + * @param labelsList List of labels. May be null. + * @param topN N value for top N accuracy evaluation + * @return Evaluation object, summarizing the results of the evaluation on the provided + * DataSetIterator + */ + public Evaluation evaluate(DataSetIterator iterator, List labelsList, int topN) { + if (layers == null || !(getOutputLayer() instanceof IOutputLayer)) { + throw new IllegalStateException("Cannot evaluate network with no output layer"); + } + if (labelsList == null) { + try { + labelsList = iterator.getLabels(); + } catch (Throwable t) { + } // Ignore, maybe UnsupportedOperationException etc + } + + Layer outputLayer = getOutputLayer(); + if (getNetConfiguration().isValidateOutputLayerConfig()) { + OutputLayerUtil.validateOutputLayerForClassifierEvaluation( + outputLayer.getLayerConfiguration(), Evaluation.class); + } + + Evaluation e = new org.deeplearning4j.eval.Evaluation(labelsList, topN); + doEvaluation(iterator, e); + + return e; + } + + protected void update(Task task) { + if (!initDone) { + initDone = true; + Heartbeat heartbeat = Heartbeat.getInstance(); + task = ModelSerializer.taskByModel(this); + Environment env = EnvironmentUtils.buildEnvironment(); + heartbeat.reportEvent(Event.STANDALONE, env, task); + } + } + + /** + * String detailing the architecture of the multilayernetwork. Columns are LayerIndex with layer + * type, nIn, nOut, Total number of parameters and the Shapes of the parameters Will also give + * information about frozen layers, if any. + * + * @return Summary as a string + * @see #memoryInfo(int, InputType) + */ + public String summary() { + return summary(null); + } + + /** + * String detailing the architecture of the multilayernetwork. Will also display activation size + * when given an input type. Columns are LayerIndex with layer type, nIn, nOut, Total number of + * parameters, Shapes of the parameters, Input activation shape, Output activation shape Will also + * give information about frozen layers, if any. + * + * @return Summary as a string + * @see #memoryInfo(int, InputType) + */ + public String summary(InputType inputType) { + StringBuilder ret = new StringBuilder(); + ret.append("\n"); + + List lines = new ArrayList<>(); + if (inputType == null) { + lines.add(new String[] {"LayerName (LayerType)", "nIn,nOut", "TotalParams", "ParamsShape"}); + } else { + lines.add( + new String[] { + "LayerName (LayerType)", + "nIn,nOut", + "TotalParams", + "ParamsShape", + "InputShape", + "OutputShape" + }); + } + int[] maxLength = new int[inputType == null ? 4 : 6]; + String[] header = lines.get(0); + for (int i = 0; i < header.length; i++) { + maxLength[i] = header[i].length(); + } + + int frozenParams = 0; + for (org.deeplearning4j.nn.api.Layer currentLayer : getLayers()) { + String name = currentLayer.getLayerConfiguration().getLayerName(); + if (name == null) { + name = String.valueOf(currentLayer.getIndex()); + } + String paramShape = "-"; + String in = "-"; + String out = "-"; + String[] classNameArr = currentLayer.getClass().getName().split("\\."); + String className = classNameArr[classNameArr.length - 1]; + String paramCount = String.format("%,d", currentLayer.numParams()); + String inShape = ""; + String outShape = ""; + InputPreProcessor preProcessor; + InputType outType; + if (inputType != null) { + preProcessor = getNetConfiguration().getInputPreProcess(currentLayer.getIndex()); + inShape = inputType.toString(); + if (preProcessor != null) { + inputType = preProcessor.getOutputType(inputType); + inShape += "--> " + inputType.toString(); } - FeedForwardLayer ffl = (FeedForwardLayer) conf; - - if (ffl.getNIn() > Integer.MAX_VALUE) - throw new ND4JArraySizeException(); - return (int) ffl.getNIn(); - } - - /** - * Indicates whether some other object is "equal to" this one. - *

- * The {@code equals} method implements an equivalence relation - * on non-null object references: - *

    - *
  • It is reflexive: for any non-null reference value - * {@code x}, {@code x.equals(x)} should return - * {@code true}. - *
  • It is symmetric: for any non-null reference values - * {@code x} and {@code y}, {@code x.equals(y)} - * should return {@code true} if and only if - * {@code y.equals(x)} returns {@code true}. - *
  • It is transitive: for any non-null reference values - * {@code x}, {@code y}, and {@code z}, if - * {@code x.equals(y)} returns {@code true} and - * {@code y.equals(z)} returns {@code true}, then - * {@code x.equals(z)} should return {@code true}. - *
  • It is consistent: for any non-null reference values - * {@code x} and {@code y}, multiple invocations of - * {@code x.equals(y)} consistently return {@code true} - * or consistently return {@code false}, provided no - * information used in {@code equals} comparisons on the - * objects is modified. - *
  • For any non-null reference value {@code x}, - * {@code x.equals(null)} should return {@code false}. - *
- *

- * The {@code equals} method for class {@code Object} implements - * the most discriminating possible equivalence relation on objects; - * that is, for any non-null reference values {@code x} and - * {@code y}, this method returns {@code true} if and only - * if {@code x} and {@code y} refer to the same object - * ({@code x == y} has the value {@code true}). - *

- * Note that it is generally necessary to override the {@code hashCode} - * method whenever this method is overridden, so as to maintain the - * general contract for the {@code hashCode} method, which states - * that equal objects must have equal hash codes. - * - * @param obj the reference object with which to compare. - * @return {@code true} if this object is the same as the obj - * argument; {@code false} otherwise. - * @see #hashCode() - * @see HashMap - */ - @Override - public boolean equals(Object obj) { - if (obj == null) - return false; - if (obj instanceof MultiLayerNetwork) { - MultiLayerNetwork network = (MultiLayerNetwork) obj; - boolean paramsEquals = network.params().equals(params()); - boolean confEquals = getLayerWiseConfigurations().equals(network.getLayerWiseConfigurations()); - boolean updaterEquals = getUpdater().equals(network.getUpdater()); - return paramsEquals && confEquals && updaterEquals; + outType = + currentLayer.getLayerConfiguration().getOutputType(currentLayer.getIndex(), inputType); + outShape = outType.toString(); + inputType = outType; + } + if (currentLayer.numParams() > 0) { + paramShape = ""; + if (currentLayer instanceof BidirectionalLayer) { // Bidirectional layer is not an FFL + BidirectionalLayer bi = (BidirectionalLayer) currentLayer; + in = String.valueOf(((Bidirectional) bi.getLayerConfiguration()).getNIn()); + out = String.valueOf(((Bidirectional) bi.getLayerConfiguration()).getNOut()); + } else { + try { + in = String.valueOf(((FeedForwardLayer) currentLayer.getLayerConfiguration()).getNIn()); + out = + String.valueOf(((FeedForwardLayer) currentLayer.getLayerConfiguration()).getNOut()); + } catch ( + Exception e) { // Some layers, like PReLU, are just BaseLayers (but have parameters) + } } - return false; - } - - private void writeObject(ObjectOutputStream oos) throws IOException { - ModelSerializer.writeModel(this, oos, true); - } - - private void readObject(ObjectInputStream ois) throws ClassNotFoundException, IOException { - val mln = ModelSerializer.restoreMultiLayerNetwork(ois, true); - - this.defaultConfiguration = mln.defaultConfiguration.clone(); - this.layerWiseConfigurations = mln.layerWiseConfigurations.clone(); - this.init(); - this.flattenedParams.assign(mln.flattenedParams); - - int numWorkingMem = 2 * (layerWiseConfigurations.getConfs().size() + layerWiseConfigurations.getInputPreProcessors().size()); - WS_LAYER_WORKING_MEM_CONFIG = getLayerWorkingMemWSConfig(numWorkingMem); - WS_LAYER_ACT_X_CONFIG = getLayerActivationWSConfig(layerWiseConfigurations.getConfs().size()); - - if (mln.getUpdater() != null && mln.getUpdater(false).getStateViewArray() != null) - this.getUpdater(true).getStateViewArray().assign(mln.getUpdater(false).getStateViewArray()); - } - - /** - * Close the network and deallocate all native memory, including: parameters, gradients, updater memory and workspaces - * Note that the network should not be used again for any purpose after it has been closed - */ - @Override - public void close(){ - //Close the INDArray and dealloc - if(flattenedParams.closeable()) - flattenedParams.close(); - - if(flattenedGradients != null && flattenedGradients.closeable()) - flattenedGradients.close(); - - Updater u = getUpdater(false); - if(u != null && u.getStateViewArray() != null) { - INDArray state = u.getStateViewArray(); - if(state.closeable()) - state.close(); + Set paraNames = currentLayer.getParamTable().keySet(); + for (String aP : paraNames) { + String paramS = ArrayUtils.toString(currentLayer.getParamTable().get(aP).shape()); + paramShape += aP + ":" + paramS + ", "; } + paramShape = paramShape.subSequence(0, paramShape.lastIndexOf(",")).toString(); + } + if (currentLayer instanceof FrozenLayer) { + frozenParams += currentLayer.numParams(); + classNameArr = + ((FrozenLayer) currentLayer).getInsideLayer().getClass().getName().split("\\."); + className = "Frozen " + classNameArr[classNameArr.length - 1]; + } - Nd4j.getWorkspaceManager().destroyAllWorkspacesForCurrentThread(); - System.gc(); + String[] line; + if (inputType == null) { + line = new String[] {name + " (" + className + ")", in + "," + out, paramCount, paramShape}; + } else { + line = + new String[] { + name + " (" + className + ")", + in + "," + out, + paramCount, + paramShape, + inShape, + outShape + }; + } + for (int i = 0; i < line.length; i++) { + maxLength[i] = Math.max(maxLength[i], line[i] == null ? 0 : line[i].length()); + } + lines.add(line); } + + StringBuilder sbFormat = new StringBuilder(); + int totalLength = 0; + int pos = 0; + for (int length : maxLength) { + int currLength; + if (pos++ == maxLength.length - 1) { + currLength = length; + } else { + currLength = length + 3; + } + sbFormat.append("%-").append(currLength).append("s"); + totalLength += currLength; + } + sbFormat.append("\n"); + String format = sbFormat.toString(); + + ret.append(StringUtils.repeat("=", totalLength)).append("\n"); + + boolean first = true; + for (String[] line : lines) { + String formatted = String.format(format, (Object[]) line); + ret.append(formatted); + if (first) { + ret.append(StringUtils.repeat("=", totalLength)).append("\n"); + first = false; + } + } + + ret.append(StringUtils.repeat("-", totalLength)); + ret.append(String.format("\n%30s %,d", "Total Parameters: ", getModelParams().length())); + ret.append( + String.format( + "\n%30s %,d", + "ITrainableLayer Parameters: ", getModelParams().length() - frozenParams)); + ret.append(String.format("\n%30s %,d", "Frozen Parameters: ", frozenParams)); + ret.append("\n"); + ret.append(StringUtils.repeat("=", totalLength)); + ret.append("\n"); + return ret.toString(); + } + + /** + * Generate information regarding memory use for the network, for the given input type and + * minibatch size. Note that when using workspaces or CuDNN, the network should be trained for + * some iterations so that the memory workspaces have time to initialize. Without this, the memory + * requirements during training may be underestimated. + * + *

Note also that this is the same information that is generated during an OOM crash when + * training or performing inference. + * + * @param minibatch Minibatch size to estimate memory for + * @param inputType Input type to the network + * @return A String with information about network memory use information + */ + public String memoryInfo(int minibatch, InputType inputType) { + return CrashReportingUtil.generateMemoryStatus(this, minibatch, inputType); + } + + /** This method just makes sure there's no state preserved within layers */ + public void clearLayersStates() { + for (Layer layer : layers) { + layer.clear(); + layer.clearNoiseWeightParams(); + } + } + + /** + * Increment the epoch count (in the underlying {@link NeuralNetConfiguration} by 1). Note that + * this is done automatically when using iterator-based fitting methods, such as {@link + * #fit(DataSetIterator)}. However, when using non-iterator fit methods (DataSet, + * INDArray/INDArray etc), the network has no way to know when one epoch ends and another starts. + * In such situations, this method can be used to increment the epoch counter.
+ * Note that the epoch counter is used for situations such as some learning rate schedules, and + * the like. + * + *

The current epoch count can be obtained using {@code + * NeuralNetConfiguration.getLayerwiseConfiguration().getEpochCount()} + */ + public void incrementEpochCount() { + getNetConfiguration().setEpochCount(getNetConfiguration().getEpochCount() + 1); + synchronizeIterEpochCounts(); + } + + protected void synchronizeIterEpochCounts() { + // TODO: this is necessary for some schedules - but the redundant values are a little ugly... + int currIter = getIterationCount(); + int currEpoch = getEpochCount(); + for (Layer l : layers) { + l.setIterationCount(currIter); + l.setEpochCount(currEpoch); + } + } + + /** + * Save the MultiLayerNetwork to a file. Restore using {@link #load(File, boolean)}. Note that + * this saves the updater (i.e., the state array for momentum/Adam/rmsprop etc), which is + * desirable if further training will be undertaken. + * + * @param f File to save the network to + * @see ModelSerializer ModelSerializer for more details (and saving/loading via streams) + * @see #save(File, boolean) + */ + public void save(File f) throws IOException { + save(f, true); + } + + /** + * Save the MultiLayerNetwork to a file. Restore using {@link #load(File, boolean)}. + * + * @param f File to save the network to + * @param saveUpdater If true: save the updater (i.e., the state array for momentum/Adam/rmsprop + * etc), which should usually be saved if further training is required + * @see ModelSerializer ModelSerializer for more details (and saving/loading via streams) + * @see #save(File, boolean) + */ + public void save(File f, boolean saveUpdater) throws IOException { + ModelSerializer.writeModel(this, f, saveUpdater); + } + + /** + * Convert this MultiLayerNetwork to a ComputationGraph + * + * @return ComputationGraph equivalent to this network (including parameters and updater state) + */ + public ComputationGraph toComputationGraph() { + return NetworkUtils.toComputationGraph(this); + } + + /** + * Return a copy of the network with the parameters and activations set to use the specified + * (floating point) data type. If the existing datatype is the same as the requested dataype, the + * original network will be returned unchanged. Only floating point datatypes (DOUBLE, FLOAT, + * HALF) may be used. + * + * @param dataType Datatype to convert the network to + * @return The network, set to use the specified datatype for the parameters and activations + */ + public MultiLayerNetwork convertDataType(@NonNull DataType dataType) { + Preconditions.checkState( + dataType.isFPType(), + "Invalid DataType: %s. Can only convert network to a floating point type", + dataType); + if (dataType == getModelParams().dataType()) { + return this; + } + + try (MemoryWorkspace ws = Nd4j.getMemoryManager().scopeOutOfWorkspaces()) { + INDArray newParams = getModelParams().castTo(dataType); + String jsonConfig = getNetConfiguration().toJson(); + NeuralNetConfiguration newConf = NeuralNetConfiguration.fromJson(jsonConfig); + newConf.setDataType(dataType); + MultiLayerNetwork newNet = new MultiLayerNetwork(newConf); + newNet.init(newParams, false); + + Updater u = getUpdater(false); + if (u != null && u.getStateViewArray() != null) { + INDArray oldUpdaterState = u.getStateViewArray(); + newNet.getUpdater(true).getStateViewArray().assign(oldUpdaterState); + } + return newNet; + } + } + + /** + * Set the learning rate for all layers in the network to the specified value. Note that if any + * learning rate schedules are currently present, these will be removed in favor of the new + * (fixed) learning rate.
+ *
+ * Note: This method not free from a performance point of view: a proper learning + * rate schedule should be used in preference to calling this method at every iteration. + * + * @param newLr New learning rate for all layers + * @see #setLearningRate(ISchedule) + * @see #setLearningRate(int, double) + */ + public void setLearningRate(double newLr) { + NetworkUtils.setLearningRate(this, newLr); + } + + /** + * Set the learning rate schedule for all layers in the network to the specified schedule. This + * schedule will replace any/all existing schedules, and also any fixed learning rate values.
+ * Note that the iteration/epoch counts will not be reset. Use {@link + * NeuralNetConfiguration#setIterationCount(int)} and {@link + * NeuralNetConfiguration#setEpochCount(int)} if this is required + * + * @param newLr New learning rate schedule for all layers + * @see #setLearningRate(ISchedule) + * @see #setLearningRate(int, double) + */ + public void setLearningRate(ISchedule newLr) { + NetworkUtils.setLearningRate(this, newLr); + } + + /** + * Set the learning rate for a single layer in the network to the specified value. Note that if + * any learning rate schedules are currently present, these will be removed in favor of the new + * (fixed) learning rate.
+ *
+ * Note: This method not free from a performance point of view: a proper learning + * rate schedule should be used in preference to calling this method at every iteration. Note also + * that {@link #setLearningRate(double)} should also be used in preference, when all layers need + * to be set to a new LR + * + * @param layerNumber Number of the layer to set the LR for + * @param newLr New learning rate for a single layer + * @see #setLearningRate(ISchedule) + * @see #setLearningRate(int, double) + */ + public void setLearningRate(int layerNumber, double newLr) { + NetworkUtils.setLearningRate(this, layerNumber, newLr); + } + + /** + * Set the learning rate schedule for a single layer in the network to the specified value.
+ * Note also that {@link #setLearningRate(ISchedule)} should also be used in preference, when all + * layers need to be set to a new LR schedule.
+ * This schedule will replace any/all existing schedules, and also any fixed learning rate values. + *
+ * Note also that the iteration/epoch counts will not be reset. Use {@link + * NeuralNetConfiguration#setIterationCount(int)} and {@link + * NeuralNetConfiguration#setEpochCount(int)} if this is required + * + * @param layerNumber Number of the layer to set the LR schedule for + * @param newLr New learning rate for a single layer + * @see #setLearningRate(ISchedule) + * @see #setLearningRate(int, double) + */ + public void setLearningRate(int layerNumber, ISchedule newLr) { + NetworkUtils.setLearningRate(this, layerNumber, newLr); + } + + /** + * Get the current learning rate, for the specified layer, from the network. Note: If the layer + * has no learning rate (no parameters, or an updater without a learning rate) then null is + * returned + * + * @param layerNumber LayerConfiguration number to get the learning rate for + * @return Learning rate for the specified layer, or null + */ + public Double getLearningRate(int layerNumber) { + return NetworkUtils.getLearningRate(this, layerNumber); + } + + /** + * Return the layer size (number of units) for the specified layer.
+ * Note that the meaning of the "layer size" can depend on the type of layer. For example:
+ * - DenseLayer, OutputLayer, recurrent layers: number of units (nOut configuration option)
+ * - ConvolutionLayer: the channels (number of channels)
+ * - Subsampling layers, global pooling layers, etc: size of 0 is always returned
+ * + * @param layer Index of the layer to get the size of. Must be in range 0 to nLayers-1 inclusive + * @return Size of the layer + */ + public int layerSize(int layer) { + if (layer < 0 || layer > layers.length) { + throw new IllegalArgumentException( + "Invalid layer index: " + + layer + + ". LayerConfiguration index must be between 0 and " + + (layers.length - 1) + + " inclusive"); + } + LayerConfiguration conf = layers[layer].getLayerConfiguration(); + if (conf == null || !(conf instanceof FeedForwardLayer)) { + return 0; + } + FeedForwardLayer ffl = (FeedForwardLayer) conf; + + if (ffl.getNOut() > Integer.MAX_VALUE) { + throw new ND4JArraySizeException(); + } + return (int) ffl.getNOut(); + } + + /** + * Return the input size (number of inputs) for the specified layer.
+ * Note that the meaning of the "input size" can depend on the type of layer. For example:
+ * - DenseLayer, OutputLayer, etc: the feature vector size (nIn configuration option)
+ * - Recurrent layers: the feature vector size per time step (nIn configuration option)
+ * - ConvolutionLayer: the channels (number of channels)
+ * - Subsampling layers, global pooling layers, etc: size of 0 is always returned
+ * + * @param layer Index of the layer to get the size of. Must be in range 0 to nLayers-1 inclusive + * @return Size of the layer + */ + public int layerInputSize(int layer) { + if (layer < 0 || layer > layers.length) { + throw new IllegalArgumentException( + "Invalid layer index: " + + layer + + ". LayerConfiguration index must be between 0 and " + + (layers.length - 1) + + " inclusive"); + } + LayerConfiguration conf = layers[layer].getLayerConfiguration(); + if (conf == null || !(conf instanceof FeedForwardLayer)) { + return 0; + } + FeedForwardLayer ffl = (FeedForwardLayer) conf; + + if (ffl.getNIn() > Integer.MAX_VALUE) { + throw new ND4JArraySizeException(); + } + return (int) ffl.getNIn(); + } + + /** + * Indicates whether some other object is "equal to" this one. + * + *

The {@code equals} method implements an equivalence relation on non-null object references: + * + *

    + *
  • It is reflexive: for any non-null reference value {@code x}, {@code x.equals(x)} + * should return {@code true}. + *
  • It is symmetric: for any non-null reference values {@code x} and {@code y}, {@code + * x.equals(y)} should return {@code true} if and only if {@code y.equals(x)} returns {@code + * true}. + *
  • It is transitive: for any non-null reference values {@code x}, {@code y}, and + * {@code z}, if {@code x.equals(y)} returns {@code true} and {@code y.equals(z)} returns + * {@code true}, then {@code x.equals(z)} should return {@code true}. + *
  • It is consistent: for any non-null reference values {@code x} and {@code y}, + * multiple invocations of {@code x.equals(y)} consistently return {@code true} or + * consistently return {@code false}, provided no information used in {@code equals} + * comparisons on the objects is modified. + *
  • For any non-null reference value {@code x}, {@code x.equals(null)} should return {@code + * false}. + *
+ * + *

The {@code equals} method for class {@code Object} implements the most discriminating + * possible equivalence relation on objects; that is, for any non-null reference values {@code x} + * and {@code y}, this method returns {@code true} if and only if {@code x} and {@code y} refer to + * the same object ({@code x == y} has the value {@code true}). + * + *

Note that it is generally necessary to override the {@code hashCode} method whenever this + * method is overridden, so as to maintain the general contract for the {@code hashCode} method, + * which states that equal objects must have equal hash codes. + * + * @param obj the reference object with which to compare. + * @return {@code true} if this object is the same as the obj argument; {@code false} otherwise. + * @see #hashCode() + * @see HashMap + */ + @Override + public boolean equals(Object obj) { + if (obj == null) { + return false; + } + if (obj instanceof MultiLayerNetwork) { + MultiLayerNetwork network = (MultiLayerNetwork) obj; + boolean paramsEquals = network.getModelParams().equals(getModelParams()); + boolean confEquals = getNetConfiguration().equals(network.getNetConfiguration()); + boolean updaterEquals = getUpdater().equals(network.getUpdater()); + return paramsEquals && confEquals && updaterEquals; + } + return false; + } + + private void writeObject(ObjectOutputStream oos) throws IOException { + ModelSerializer.writeModel(this, oos, true); + } + + private void readObject(ObjectInputStream ois) throws ClassNotFoundException, IOException { + val mln = ModelSerializer.restoreMultiLayerNetwork(ois, true); + + this.setNetConfiguration(mln.getNetConfiguration().clone()); + this.init(); + this.flattenedParams.assign(mln.flattenedParams); + + int numWorkingMem = + 2 + * (getNetConfiguration().getFlattenedLayerConfigurations().size() + + getNetConfiguration().getInputPreProcessors().size()); + WS_LAYER_WORKING_MEM_CONFIG = getLayerWorkingMemWSConfig(numWorkingMem); + WS_LAYER_ACT_X_CONFIG = + getLayerActivationWSConfig(getNetConfiguration().getFlattenedLayerConfigurations().size()); + + if (mln.getUpdater() != null && mln.getUpdater(false).getStateViewArray() != null) { + this.getUpdater(true).getStateViewArray().assign(mln.getUpdater(false).getStateViewArray()); + } + } + + /** + * Close the network and deallocate all native memory, including: parameters, gradients, updater + * memory and workspaces Note that the network should not be used again for any purpose after it + * has been closed + */ + @Override + public void close() { + // Close the INDArray and dealloc + if (flattenedParams.closeable()) { + flattenedParams.close(); + } + + if (flattenedGradients != null && flattenedGradients.closeable()) { + flattenedGradients.close(); + } + + Updater u = getUpdater(false); + if (u != null && u.getStateViewArray() != null) { + INDArray state = u.getStateViewArray(); + if (state.closeable()) { + state.close(); + } + } + + Nd4j.getWorkspaceManager().destroyAllWorkspacesForCurrentThread(); + System.gc(); + } + + /** + * Returns a string representation of the underlying configuration. + * + * @return a string representation of the configuration. + */ + @Override + public String toString() { + return getNetConfiguration().toString(); + } } diff --git a/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/nn/params/BatchNormalizationParamInitializer.java b/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/nn/params/BatchNormalizationParamInitializer.java index 5215e2276..c68403835 100644 --- a/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/nn/params/BatchNormalizationParamInitializer.java +++ b/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/nn/params/BatchNormalizationParamInitializer.java @@ -21,16 +21,15 @@ package org.deeplearning4j.nn.params; import lombok.val; -import org.deeplearning4j.nn.api.ParamInitializer; -import org.deeplearning4j.nn.conf.NeuralNetConfiguration; +import org.deeplearning4j.nn.api.AbstractParamInitializer; import org.deeplearning4j.nn.conf.layers.BatchNormalization; -import org.deeplearning4j.nn.conf.layers.Layer; +import org.deeplearning4j.nn.conf.layers.LayerConfiguration; import org.nd4j.linalg.api.ndarray.INDArray; import org.nd4j.linalg.indexing.NDArrayIndex; import java.util.*; -public class BatchNormalizationParamInitializer implements ParamInitializer { +public class BatchNormalizationParamInitializer extends AbstractParamInitializer { private static final BatchNormalizationParamInitializer INSTANCE = new BatchNormalizationParamInitializer(); @@ -45,12 +44,7 @@ public class BatchNormalizationParamInitializer implements ParamInitializer { public static final String GLOBAL_LOG_STD = "log10stdev"; @Override - public long numParams(NeuralNetConfiguration conf) { - return numParams(conf.getLayer()); - } - - @Override - public long numParams(Layer l) { + public long numParams(LayerConfiguration l) { BatchNormalization layer = (BatchNormalization) l; //Parameters in batch norm: //gamma, beta, global mean estimate, global variance estimate @@ -66,7 +60,7 @@ public class BatchNormalizationParamInitializer implements ParamInitializer { } @Override - public List paramKeys(Layer layer) { + public List paramKeys(LayerConfiguration layer) { if(((BatchNormalization)layer).isUseLogStd()){ return Arrays.asList(GAMMA, BETA, GLOBAL_MEAN, GLOBAL_LOG_STD); } else { @@ -75,30 +69,30 @@ public class BatchNormalizationParamInitializer implements ParamInitializer { } @Override - public List weightKeys(Layer layer) { + public List weightKeys(LayerConfiguration layer) { return Collections.emptyList(); } @Override - public List biasKeys(Layer layer) { + public List biasKeys(LayerConfiguration layer) { return Collections.emptyList(); } @Override - public boolean isWeightParam(Layer layer, String key) { + public boolean isWeightParam(LayerConfiguration layer, String key) { return false; } @Override - public boolean isBiasParam(Layer layer, String key) { + public boolean isBiasParam(LayerConfiguration layer, String key) { return false; } @Override - public Map init(NeuralNetConfiguration conf, INDArray paramView, boolean initializeParams) { + public Map init(LayerConfiguration conf, INDArray paramView, boolean initializeParams) { Map params = Collections.synchronizedMap(new LinkedHashMap()); // TODO setup for RNN - BatchNormalization layer = (BatchNormalization) conf.getLayer(); + BatchNormalization layer = (BatchNormalization) conf; val nOut = layer.getNOut(); long meanOffset = 0; @@ -107,9 +101,9 @@ public class BatchNormalizationParamInitializer implements ParamInitializer { INDArray betaView = paramView.get(NDArrayIndex.interval(0,0,true), NDArrayIndex.interval(nOut, 2 * nOut)); params.put(GAMMA, createGamma(conf, gammaView, initializeParams)); - conf.addVariable(GAMMA); + conf.getNetConfiguration().addNetWideVariable(GAMMA); params.put(BETA, createBeta(conf, betaView, initializeParams)); - conf.addVariable(BETA); + conf.getNetConfiguration().addNetWideVariable(BETA); meanOffset = 2 * nOut; } @@ -131,21 +125,21 @@ public class BatchNormalizationParamInitializer implements ParamInitializer { } params.put(GLOBAL_MEAN, globalMeanView); - conf.addVariable(GLOBAL_MEAN); + conf.getNetConfiguration().addNetWideVariable(GLOBAL_MEAN); if(layer.isUseLogStd()){ params.put(GLOBAL_LOG_STD, globalVarView); - conf.addVariable(GLOBAL_LOG_STD); + conf.getNetConfiguration().addNetWideVariable(GLOBAL_LOG_STD); } else { params.put(GLOBAL_VAR, globalVarView); - conf.addVariable(GLOBAL_VAR); + conf.getNetConfiguration().addNetWideVariable(GLOBAL_VAR); } return params; } @Override - public Map getGradientsFromFlattened(NeuralNetConfiguration conf, INDArray gradientView) { - BatchNormalization layer = (BatchNormalization) conf.getLayer(); + public Map getGradientsFromFlattened(LayerConfiguration conf, INDArray gradientView) { + BatchNormalization layer = (BatchNormalization) conf; val nOut = layer.getNOut(); Map out = new LinkedHashMap<>(); @@ -171,15 +165,15 @@ public class BatchNormalizationParamInitializer implements ParamInitializer { return out; } - private INDArray createBeta(NeuralNetConfiguration conf, INDArray betaView, boolean initializeParams) { - BatchNormalization layer = (BatchNormalization) conf.getLayer(); + private INDArray createBeta(LayerConfiguration conf, INDArray betaView, boolean initializeParams) { + BatchNormalization layer = (BatchNormalization) conf; if (initializeParams) betaView.assign(layer.getBeta()); return betaView; } - private INDArray createGamma(NeuralNetConfiguration conf, INDArray gammaView, boolean initializeParams) { - BatchNormalization layer = (BatchNormalization) conf.getLayer(); + private INDArray createGamma(LayerConfiguration conf, INDArray gammaView, boolean initializeParams) { + BatchNormalization layer = (BatchNormalization) conf; if (initializeParams) gammaView.assign(layer.getGamma()); return gammaView; diff --git a/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/nn/params/BidirectionalParamInitializer.java b/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/nn/params/BidirectionalParamInitializer.java index a75128790..27905d60f 100644 --- a/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/nn/params/BidirectionalParamInitializer.java +++ b/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/nn/params/BidirectionalParamInitializer.java @@ -21,12 +21,10 @@ package org.deeplearning4j.nn.params; import lombok.val; +import org.deeplearning4j.nn.api.AbstractParamInitializer; import org.deeplearning4j.nn.api.ParamInitializer; import org.deeplearning4j.nn.conf.NeuralNetConfiguration; -import org.deeplearning4j.nn.conf.layers.BaseLayer; -import org.deeplearning4j.nn.conf.layers.BaseRecurrentLayer; -import org.deeplearning4j.nn.conf.layers.FeedForwardLayer; -import org.deeplearning4j.nn.conf.layers.Layer; +import org.deeplearning4j.nn.conf.layers.LayerConfiguration; import org.deeplearning4j.nn.conf.layers.recurrent.Bidirectional; import org.nd4j.linalg.api.ndarray.INDArray; @@ -36,14 +34,13 @@ import java.util.List; import java.util.Map; import static org.nd4j.linalg.indexing.NDArrayIndex.interval; -import static org.nd4j.linalg.indexing.NDArrayIndex.point; -public class BidirectionalParamInitializer implements ParamInitializer { +public class BidirectionalParamInitializer extends AbstractParamInitializer { public static final String FORWARD_PREFIX = "f"; public static final String BACKWARD_PREFIX = "b"; private final Bidirectional layer; - private final Layer underlying; + private final LayerConfiguration underlying; private List paramKeys; private List weightKeys; @@ -55,19 +52,14 @@ public class BidirectionalParamInitializer implements ParamInitializer { } @Override - public long numParams(NeuralNetConfiguration conf) { - return numParams(conf.getLayer()); - } - - @Override - public long numParams(Layer layer) { + public long numParams(LayerConfiguration layer) { return 2 * underlying(layer).initializer().numParams(underlying(layer)); } @Override - public List paramKeys(Layer layer) { + public List paramKeys(LayerConfiguration layer) { if(paramKeys == null) { - Layer u = underlying(layer); + LayerConfiguration u = underlying(layer); List orig = u.initializer().paramKeys(u); paramKeys = withPrefixes(orig); } @@ -75,9 +67,9 @@ public class BidirectionalParamInitializer implements ParamInitializer { } @Override - public List weightKeys(Layer layer) { + public List weightKeys(LayerConfiguration layer) { if(weightKeys == null) { - Layer u = underlying(layer); + LayerConfiguration u = underlying(layer); List orig = u.initializer().weightKeys(u); weightKeys = withPrefixes(orig); } @@ -85,9 +77,9 @@ public class BidirectionalParamInitializer implements ParamInitializer { } @Override - public List biasKeys(Layer layer) { + public List biasKeys(LayerConfiguration layer) { if(biasKeys == null) { - Layer u = underlying(layer); + LayerConfiguration u = underlying(layer); List orig = u.initializer().weightKeys(u); biasKeys = withPrefixes(orig); } @@ -95,27 +87,27 @@ public class BidirectionalParamInitializer implements ParamInitializer { } @Override - public boolean isWeightParam(Layer layer, String key) { + public boolean isWeightParam(LayerConfiguration layer, String key) { return weightKeys(this.layer).contains(key); } @Override - public boolean isBiasParam(Layer layer, String key) { + public boolean isBiasParam(LayerConfiguration layer, String key) { return biasKeys(this.layer).contains(key); } @Override - public Map init(NeuralNetConfiguration conf, INDArray paramsView, boolean initializeParams) { + public Map init(LayerConfiguration conf, INDArray paramsView, boolean initializeParams) { val n = paramsView.length()/2; INDArray forwardView = paramsView.get(interval(0,0,true), interval(0, n)); INDArray backwardView = paramsView.get(interval(0,0,true), interval(n, 2*n)); conf.clearVariables(); - NeuralNetConfiguration c1 = conf.clone(); - NeuralNetConfiguration c2 = conf.clone(); - c1.setLayer(underlying); - c2.setLayer(underlying); + LayerConfiguration c1 = conf.clone(); + LayerConfiguration c2 = conf.clone(); + //c1.setLayer(underlying); + //c2.setLayer(underlying); Map origFwd = underlying.initializer().init(c1, forwardView, initializeParams); Map origBwd = underlying.initializer().init(c2, backwardView, initializeParams); List variables = addPrefixes(c1.getVariables(), c2.getVariables()); @@ -156,7 +148,7 @@ public class BidirectionalParamInitializer implements ParamInitializer { } @Override - public Map getGradientsFromFlattened(NeuralNetConfiguration conf, INDArray gradientView) { + public Map getGradientsFromFlattened(LayerConfiguration conf, INDArray gradientView) { val n = gradientView.length()/2; INDArray forwardView = gradientView.get(interval(0,0,true), interval(0, n)); INDArray backwardView = gradientView.get(interval(0,0,true), interval(n, 2*n)); @@ -175,7 +167,7 @@ public class BidirectionalParamInitializer implements ParamInitializer { return out; } - private Layer underlying(Layer layer){ + private LayerConfiguration underlying(LayerConfiguration layer){ Bidirectional b = (Bidirectional)layer; return b.getFwd(); } diff --git a/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/nn/params/CenterLossParamInitializer.java b/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/nn/params/CenterLossParamInitializer.java index 65df1fea5..8a02c397e 100644 --- a/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/nn/params/CenterLossParamInitializer.java +++ b/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/nn/params/CenterLossParamInitializer.java @@ -22,7 +22,9 @@ package org.deeplearning4j.nn.params; import lombok.val; +import org.deeplearning4j.nn.api.Layer; import org.deeplearning4j.nn.conf.NeuralNetConfiguration; +import org.deeplearning4j.nn.conf.layers.LayerConfiguration; import org.nd4j.linalg.api.ndarray.INDArray; import org.nd4j.linalg.indexing.NDArrayIndex; @@ -43,20 +45,20 @@ public class CenterLossParamInitializer extends DefaultParamInitializer { public final static String CENTER_KEY = "cL"; @Override - public long numParams(NeuralNetConfiguration conf) { + public long numParams(LayerConfiguration conf) { org.deeplearning4j.nn.conf.layers.FeedForwardLayer layerConf = - (org.deeplearning4j.nn.conf.layers.FeedForwardLayer) conf.getLayer(); + (org.deeplearning4j.nn.conf.layers.FeedForwardLayer) conf; val nIn = layerConf.getNIn(); val nOut = layerConf.getNOut(); // also equal to numClasses return nIn * nOut + nOut + nIn * nOut; //weights + bias + embeddings } @Override - public Map init(NeuralNetConfiguration conf, INDArray paramsView, boolean initializeParams) { + public Map init(LayerConfiguration conf, INDArray paramsView, boolean initializeParams) { Map params = Collections.synchronizedMap(new LinkedHashMap()); org.deeplearning4j.nn.conf.layers.CenterLossOutputLayer layerConf = - (org.deeplearning4j.nn.conf.layers.CenterLossOutputLayer) conf.getLayer(); + (org.deeplearning4j.nn.conf.layers.CenterLossOutputLayer) conf; val nIn = layerConf.getNIn(); val nOut = layerConf.getNOut(); // also equal to numClasses @@ -81,9 +83,9 @@ public class CenterLossParamInitializer extends DefaultParamInitializer { } @Override - public Map getGradientsFromFlattened(NeuralNetConfiguration conf, INDArray gradientView) { + public Map getGradientsFromFlattened(LayerConfiguration conf, INDArray gradientView) { org.deeplearning4j.nn.conf.layers.CenterLossOutputLayer layerConf = - (org.deeplearning4j.nn.conf.layers.CenterLossOutputLayer) conf.getLayer(); + (org.deeplearning4j.nn.conf.layers.CenterLossOutputLayer) conf; val nIn = layerConf.getNIn(); val nOut = layerConf.getNOut(); // also equal to numClasses @@ -107,10 +109,10 @@ public class CenterLossParamInitializer extends DefaultParamInitializer { } - protected INDArray createCenterLossMatrix(NeuralNetConfiguration conf, INDArray centerLossView, + protected INDArray createCenterLossMatrix(LayerConfiguration conf, INDArray centerLossView, boolean initializeParameters) { org.deeplearning4j.nn.conf.layers.CenterLossOutputLayer layerConf = - (org.deeplearning4j.nn.conf.layers.CenterLossOutputLayer) conf.getLayer(); + (org.deeplearning4j.nn.conf.layers.CenterLossOutputLayer) conf; if (initializeParameters) { centerLossView.assign(0.0); diff --git a/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/nn/params/Convolution3DParamInitializer.java b/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/nn/params/Convolution3DParamInitializer.java index 8ebabb433..b11f9f3d2 100644 --- a/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/nn/params/Convolution3DParamInitializer.java +++ b/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/nn/params/Convolution3DParamInitializer.java @@ -22,9 +22,8 @@ package org.deeplearning4j.nn.params; import lombok.val; -import org.deeplearning4j.nn.conf.NeuralNetConfiguration; import org.deeplearning4j.nn.conf.layers.Convolution3D; -import org.deeplearning4j.nn.conf.layers.Layer; +import org.deeplearning4j.nn.conf.layers.LayerConfiguration; import org.deeplearning4j.nn.weights.WeightInitUtil; import org.nd4j.linalg.api.ndarray.INDArray; import org.nd4j.linalg.indexing.NDArrayIndex; @@ -44,13 +43,9 @@ public class Convolution3DParamInitializer extends ConvolutionParamInitializer { public final static String WEIGHT_KEY = DefaultParamInitializer.WEIGHT_KEY; public final static String BIAS_KEY = DefaultParamInitializer.BIAS_KEY; - @Override - public long numParams(NeuralNetConfiguration conf) { - return numParams(conf.getLayer()); - } @Override - public long numParams(Layer l) { + public long numParams(LayerConfiguration l) { Convolution3D layerConf = (Convolution3D) l; @@ -62,13 +57,13 @@ public class Convolution3DParamInitializer extends ConvolutionParamInitializer { @Override - public Map init(NeuralNetConfiguration conf, INDArray paramsView, boolean initializeParams) { - Convolution3D layer = (Convolution3D) conf.getLayer(); + public Map init(LayerConfiguration conf, INDArray paramsView, boolean initializeParams) { + Convolution3D layer = (Convolution3D) conf; if (layer.getKernelSize().length != 3) throw new IllegalArgumentException("Filter size must be == 3"); Map params = Collections.synchronizedMap(new LinkedHashMap()); - Convolution3D layerConf = (Convolution3D) conf.getLayer(); + Convolution3D layerConf = (Convolution3D) conf; val nOut = layerConf.getNOut(); if (layer.hasBias()) { @@ -88,9 +83,9 @@ public class Convolution3DParamInitializer extends ConvolutionParamInitializer { } @Override - public Map getGradientsFromFlattened(NeuralNetConfiguration conf, INDArray gradientView) { + public Map getGradientsFromFlattened(LayerConfiguration conf, INDArray gradientView) { - Convolution3D layerConf = (Convolution3D) conf.getLayer(); + Convolution3D layerConf = (Convolution3D) conf; int[] kernel = layerConf.getKernelSize(); val nIn = layerConf.getNIn(); @@ -112,7 +107,7 @@ public class Convolution3DParamInitializer extends ConvolutionParamInitializer { } - protected INDArray createWeightMatrix(NeuralNetConfiguration conf, INDArray weightView, boolean initializeParams) { + protected INDArray createWeightMatrix(LayerConfiguration conf, INDArray weightView, boolean initializeParams) { /* Create a 5d weight matrix of: (number of kernels, num input channels, kernel depth, kernel height, kernel width) @@ -120,7 +115,7 @@ public class Convolution3DParamInitializer extends ConvolutionParamInitializer { Inputs to the convolution layer are: (batch size, num input feature maps, image depth, image height, image width) */ - Convolution3D layerConf = (Convolution3D) conf.getLayer(); + Convolution3D layerConf = (Convolution3D) conf; if (initializeParams) { int[] kernel = layerConf.getKernelSize(); @@ -135,7 +130,7 @@ public class Convolution3DParamInitializer extends ConvolutionParamInitializer { val weightsShape = new long[]{outputDepth, inputDepth, kernel[0], kernel[1], kernel[2]}; - return layerConf.getWeightInitFn().init(fanIn, fanOut, weightsShape, 'c', + return layerConf.getWeightInit().init(fanIn, fanOut, weightsShape, 'c', weightView); } else { int[] kernel = layerConf.getKernelSize(); diff --git a/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/nn/params/ConvolutionParamInitializer.java b/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/nn/params/ConvolutionParamInitializer.java index 4618e2c3e..9b53e3713 100644 --- a/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/nn/params/ConvolutionParamInitializer.java +++ b/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/nn/params/ConvolutionParamInitializer.java @@ -22,17 +22,16 @@ package org.deeplearning4j.nn.params; import lombok.val; -import org.deeplearning4j.nn.api.ParamInitializer; -import org.deeplearning4j.nn.conf.NeuralNetConfiguration; +import org.deeplearning4j.nn.api.AbstractParamInitializer; import org.deeplearning4j.nn.conf.layers.ConvolutionLayer; -import org.deeplearning4j.nn.conf.layers.Layer; +import org.deeplearning4j.nn.conf.layers.LayerConfiguration; import org.deeplearning4j.nn.weights.WeightInitUtil; import org.nd4j.linalg.api.ndarray.INDArray; import org.nd4j.linalg.indexing.NDArrayIndex; import java.util.*; -public class ConvolutionParamInitializer implements ParamInitializer { +public class ConvolutionParamInitializer extends AbstractParamInitializer { private static final ConvolutionParamInitializer INSTANCE = new ConvolutionParamInitializer(); @@ -45,12 +44,7 @@ public class ConvolutionParamInitializer implements ParamInitializer { public final static String BIAS_KEY = DefaultParamInitializer.BIAS_KEY; @Override - public long numParams(NeuralNetConfiguration conf) { - return numParams(conf.getLayer()); - } - - @Override - public long numParams(Layer l) { + public long numParams(LayerConfiguration l) { org.deeplearning4j.nn.conf.layers.ConvolutionLayer layerConf = (org.deeplearning4j.nn.conf.layers.ConvolutionLayer) l; @@ -61,7 +55,7 @@ public class ConvolutionParamInitializer implements ParamInitializer { } @Override - public List paramKeys(Layer layer) { + public List paramKeys(LayerConfiguration layer) { org.deeplearning4j.nn.conf.layers.ConvolutionLayer layerConf = (org.deeplearning4j.nn.conf.layers.ConvolutionLayer) layer; if(layerConf.hasBias()){ @@ -72,12 +66,12 @@ public class ConvolutionParamInitializer implements ParamInitializer { } @Override - public List weightKeys(Layer layer) { + public List weightKeys(LayerConfiguration layer) { return Collections.singletonList(WEIGHT_KEY); } @Override - public List biasKeys(Layer layer) { + public List biasKeys(LayerConfiguration layer) { org.deeplearning4j.nn.conf.layers.ConvolutionLayer layerConf = (org.deeplearning4j.nn.conf.layers.ConvolutionLayer) layer; if(layerConf.hasBias()){ @@ -88,24 +82,24 @@ public class ConvolutionParamInitializer implements ParamInitializer { } @Override - public boolean isWeightParam(Layer layer, String key) { + public boolean isWeightParam(LayerConfiguration layer, String key) { return WEIGHT_KEY.equals(key); } @Override - public boolean isBiasParam(Layer layer, String key) { + public boolean isBiasParam(LayerConfiguration layer, String key) { return BIAS_KEY.equals(key); } @Override - public Map init(NeuralNetConfiguration conf, INDArray paramsView, boolean initializeParams) { - ConvolutionLayer layer = (org.deeplearning4j.nn.conf.layers.ConvolutionLayer) conf.getLayer(); + public Map init(LayerConfiguration conf, INDArray paramsView, boolean initializeParams) { + ConvolutionLayer layer = (org.deeplearning4j.nn.conf.layers.ConvolutionLayer) conf; if (layer.getKernelSize().length != 2) throw new IllegalArgumentException("Filter size must be == 2"); Map params = Collections.synchronizedMap(new LinkedHashMap()); org.deeplearning4j.nn.conf.layers.ConvolutionLayer layerConf = - (org.deeplearning4j.nn.conf.layers.ConvolutionLayer) conf.getLayer(); + (org.deeplearning4j.nn.conf.layers.ConvolutionLayer) conf; val nOut = layerConf.getNOut(); @@ -115,23 +109,23 @@ public class ConvolutionParamInitializer implements ParamInitializer { INDArray weightView = paramsView.get(NDArrayIndex.interval(0,0,true), NDArrayIndex.interval(nOut, numParams(conf))); params.put(BIAS_KEY, createBias(conf, biasView, initializeParams)); params.put(WEIGHT_KEY, createWeightMatrix(conf, weightView, initializeParams)); - conf.addVariable(WEIGHT_KEY); - conf.addVariable(BIAS_KEY); - conf.addVariable(BIAS_KEY); + conf.getNetConfiguration().addNetWideVariable(WEIGHT_KEY); + conf.getNetConfiguration().addNetWideVariable(BIAS_KEY); + conf.getNetConfiguration().addNetWideVariable(BIAS_KEY); } else { INDArray weightView = paramsView; params.put(WEIGHT_KEY, createWeightMatrix(conf, weightView, initializeParams)); - conf.addVariable(WEIGHT_KEY); + conf.getNetConfiguration().addNetWideVariable(WEIGHT_KEY); } return params; } @Override - public Map getGradientsFromFlattened(NeuralNetConfiguration conf, INDArray gradientView) { + public Map getGradientsFromFlattened(LayerConfiguration conf, INDArray gradientView) { org.deeplearning4j.nn.conf.layers.ConvolutionLayer layerConf = - (org.deeplearning4j.nn.conf.layers.ConvolutionLayer) conf.getLayer(); + (org.deeplearning4j.nn.conf.layers.ConvolutionLayer) conf; int[] kernel = layerConf.getKernelSize(); val nIn = layerConf.getNIn(); @@ -154,17 +148,17 @@ public class ConvolutionParamInitializer implements ParamInitializer { } //1 bias per feature map - protected INDArray createBias(NeuralNetConfiguration conf, INDArray biasView, boolean initializeParams) { + protected INDArray createBias(LayerConfiguration conf, INDArray biasView, boolean initializeParams) { //the bias is a 1D tensor -- one bias per output feature map org.deeplearning4j.nn.conf.layers.ConvolutionLayer layerConf = - (org.deeplearning4j.nn.conf.layers.ConvolutionLayer) conf.getLayer(); + (org.deeplearning4j.nn.conf.layers.ConvolutionLayer) conf; if (initializeParams) biasView.assign(layerConf.getBiasInit()); return biasView; } - protected INDArray createWeightMatrix(NeuralNetConfiguration conf, INDArray weightView, boolean initializeParams) { + protected INDArray createWeightMatrix(LayerConfiguration conf, INDArray weightView, boolean initializeParams) { /* Create a 4d weight matrix of: (number of kernels, num input channels, kernel height, kernel width) @@ -173,7 +167,7 @@ public class ConvolutionParamInitializer implements ParamInitializer { (batch size, num input feature maps, image height, image width) */ org.deeplearning4j.nn.conf.layers.ConvolutionLayer layerConf = - (org.deeplearning4j.nn.conf.layers.ConvolutionLayer) conf.getLayer(); + (org.deeplearning4j.nn.conf.layers.ConvolutionLayer) conf; if (initializeParams) { int[] kernel = layerConf.getKernelSize(); int[] stride = layerConf.getStride(); @@ -186,7 +180,7 @@ public class ConvolutionParamInitializer implements ParamInitializer { val weightsShape = new long[] {outputDepth, inputDepth, kernel[0], kernel[1]}; - return layerConf.getWeightInitFn().init(fanIn, fanOut, weightsShape, 'c', weightView); + return layerConf.getWeightInit().init(fanIn, fanOut, weightsShape, 'c', weightView); } else { int[] kernel = layerConf.getKernelSize(); return WeightInitUtil.reshapeWeights( diff --git a/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/nn/params/Deconvolution3DParamInitializer.java b/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/nn/params/Deconvolution3DParamInitializer.java index 8169fec5f..6e2d2b128 100644 --- a/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/nn/params/Deconvolution3DParamInitializer.java +++ b/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/nn/params/Deconvolution3DParamInitializer.java @@ -22,9 +22,8 @@ package org.deeplearning4j.nn.params; import lombok.val; -import org.deeplearning4j.nn.conf.NeuralNetConfiguration; import org.deeplearning4j.nn.conf.layers.Deconvolution3D; -import org.deeplearning4j.nn.conf.layers.Layer; +import org.deeplearning4j.nn.conf.layers.LayerConfiguration; import org.deeplearning4j.nn.weights.WeightInitUtil; import org.nd4j.linalg.api.ndarray.INDArray; import org.nd4j.linalg.indexing.NDArrayIndex; @@ -45,12 +44,7 @@ public class Deconvolution3DParamInitializer extends ConvolutionParamInitializer public final static String BIAS_KEY = DefaultParamInitializer.BIAS_KEY; @Override - public long numParams(NeuralNetConfiguration conf) { - return numParams(conf.getLayer()); - } - - @Override - public long numParams(Layer l) { + public long numParams(LayerConfiguration l) { Deconvolution3D layerConf = (Deconvolution3D) l; int[] kernel = layerConf.getKernelSize(); @@ -61,13 +55,13 @@ public class Deconvolution3DParamInitializer extends ConvolutionParamInitializer @Override - public Map init(NeuralNetConfiguration conf, INDArray paramsView, boolean initializeParams) { - Deconvolution3D layer = (Deconvolution3D) conf.getLayer(); + public Map init(LayerConfiguration conf, INDArray paramsView, boolean initializeParams) { + Deconvolution3D layer = (Deconvolution3D) conf; if (layer.getKernelSize().length != 3) throw new IllegalArgumentException("Filter size must be == 3"); Map params = Collections.synchronizedMap(new LinkedHashMap()); - Deconvolution3D layerConf = (Deconvolution3D) conf.getLayer(); + Deconvolution3D layerConf = (Deconvolution3D) conf; val nOut = layerConf.getNOut(); if (layer.hasBias()) { @@ -87,9 +81,9 @@ public class Deconvolution3DParamInitializer extends ConvolutionParamInitializer } @Override - public Map getGradientsFromFlattened(NeuralNetConfiguration conf, INDArray gradientView) { + public Map getGradientsFromFlattened(LayerConfiguration conf, INDArray gradientView) { - Deconvolution3D layerConf = (Deconvolution3D) conf.getLayer(); + Deconvolution3D layerConf = (Deconvolution3D) conf; int[] kernel = layerConf.getKernelSize(); val nIn = layerConf.getNIn(); @@ -111,7 +105,7 @@ public class Deconvolution3DParamInitializer extends ConvolutionParamInitializer } - protected INDArray createWeightMatrix(NeuralNetConfiguration conf, INDArray weightView, boolean initializeParams) { + protected INDArray createWeightMatrix(LayerConfiguration conf, INDArray weightView, boolean initializeParams) { /* Create a 5d weight matrix of: (number of kernels, num input channels, kernel depth, kernel height, kernel width) @@ -119,7 +113,7 @@ public class Deconvolution3DParamInitializer extends ConvolutionParamInitializer Inputs to the convolution layer are: (batch size, num input feature maps, image depth, image height, image width) */ - Deconvolution3D layerConf = (Deconvolution3D) conf.getLayer(); + Deconvolution3D layerConf = (Deconvolution3D) conf; if (initializeParams) { int[] kernel = layerConf.getKernelSize(); @@ -135,7 +129,7 @@ public class Deconvolution3DParamInitializer extends ConvolutionParamInitializer //libnd4j: [kD, kH, kW, oC, iC] val weightsShape = new long[]{kernel[0], kernel[1], kernel[2], outputDepth, inputDepth}; - return layerConf.getWeightInitFn().init(fanIn, fanOut, weightsShape, 'c', weightView); + return layerConf.getWeightInit().init(fanIn, fanOut, weightsShape, 'c', weightView); } else { int[] kernel = layerConf.getKernelSize(); return WeightInitUtil.reshapeWeights( diff --git a/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/nn/params/DeconvolutionParamInitializer.java b/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/nn/params/DeconvolutionParamInitializer.java index a39a1f454..463c24ae3 100644 --- a/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/nn/params/DeconvolutionParamInitializer.java +++ b/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/nn/params/DeconvolutionParamInitializer.java @@ -21,7 +21,7 @@ package org.deeplearning4j.nn.params; import lombok.val; -import org.deeplearning4j.nn.conf.NeuralNetConfiguration; +import org.deeplearning4j.nn.conf.layers.LayerConfiguration; import org.deeplearning4j.nn.weights.WeightInitUtil; import org.nd4j.linalg.api.ndarray.INDArray; import org.nd4j.linalg.indexing.NDArrayIndex; @@ -38,7 +38,7 @@ public class DeconvolutionParamInitializer extends ConvolutionParamInitializer { } @Override - protected INDArray createWeightMatrix(NeuralNetConfiguration conf, INDArray weightView, boolean initializeParams) { + protected INDArray createWeightMatrix(LayerConfiguration conf, INDArray weightView, boolean initializeParams) { /* Create a 4d weight matrix of: (number of kernels, num input channels, kernel height, kernel width) @@ -47,7 +47,7 @@ public class DeconvolutionParamInitializer extends ConvolutionParamInitializer { (batch size, num input feature maps, image height, image width) */ org.deeplearning4j.nn.conf.layers.Deconvolution2D layerConf = - (org.deeplearning4j.nn.conf.layers.Deconvolution2D) conf.getLayer(); + (org.deeplearning4j.nn.conf.layers.Deconvolution2D) conf; if (initializeParams) { int[] kernel = layerConf.getKernelSize(); int[] stride = layerConf.getStride(); @@ -60,7 +60,7 @@ public class DeconvolutionParamInitializer extends ConvolutionParamInitializer { val weightsShape = new long[] {inputDepth, outputDepth, kernel[0], kernel[1]}; - INDArray weights = layerConf.getWeightInitFn().init( + INDArray weights = layerConf.getWeightInit().init( fanIn, fanOut, weightsShape, 'c', weightView); return weights; @@ -76,10 +76,10 @@ public class DeconvolutionParamInitializer extends ConvolutionParamInitializer { } @Override - public Map getGradientsFromFlattened(NeuralNetConfiguration conf, INDArray gradientView) { + public Map getGradientsFromFlattened(LayerConfiguration conf, INDArray gradientView) { org.deeplearning4j.nn.conf.layers.Deconvolution2D layerConf = - (org.deeplearning4j.nn.conf.layers.Deconvolution2D) conf.getLayer(); + (org.deeplearning4j.nn.conf.layers.Deconvolution2D) conf; int[] kernel = layerConf.getKernelSize(); val nIn = layerConf.getNIn(); diff --git a/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/nn/params/DefaultParamInitializer.java b/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/nn/params/DefaultParamInitializer.java index b41f05b4e..a7f444c91 100644 --- a/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/nn/params/DefaultParamInitializer.java +++ b/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/nn/params/DefaultParamInitializer.java @@ -20,18 +20,20 @@ package org.deeplearning4j.nn.params; +import lombok.NonNull; +import lombok.extern.slf4j.Slf4j; import lombok.val; -import org.deeplearning4j.nn.api.ParamInitializer; -import org.deeplearning4j.nn.conf.NeuralNetConfiguration; +import org.deeplearning4j.nn.api.AbstractParamInitializer; import org.deeplearning4j.nn.conf.layers.*; import org.deeplearning4j.nn.weights.IWeightInit; import org.deeplearning4j.nn.weights.WeightInitUtil; +import org.deeplearning4j.nn.weights.WeightInitXavier; import org.nd4j.linalg.api.ndarray.INDArray; import org.nd4j.linalg.indexing.NDArrayIndex; import java.util.*; - -public class DefaultParamInitializer implements ParamInitializer { +@Slf4j +public class DefaultParamInitializer extends AbstractParamInitializer { private static final DefaultParamInitializer INSTANCE = new DefaultParamInitializer(); @@ -44,12 +46,7 @@ public class DefaultParamInitializer implements ParamInitializer { public final static String GAIN_KEY = "g"; @Override - public long numParams(NeuralNetConfiguration conf) { - return numParams(conf.getLayer()); - } - - @Override - public long numParams(Layer l) { + public long numParams(LayerConfiguration l) { FeedForwardLayer layerConf = (FeedForwardLayer) l; val nIn = layerConf.getNIn(); val nOut = layerConf.getNOut(); @@ -57,7 +54,7 @@ public class DefaultParamInitializer implements ParamInitializer { } @Override - public List paramKeys(Layer layer) { + public List paramKeys(LayerConfiguration layer) { final ArrayList keys = new ArrayList<>(3); keys.addAll(weightKeys(layer)); keys.addAll(biasKeys(layer)); @@ -65,7 +62,7 @@ public class DefaultParamInitializer implements ParamInitializer { } @Override - public List weightKeys(Layer layer) { + public List weightKeys(LayerConfiguration layer) { if(hasLayerNorm(layer)){ return Arrays.asList(WEIGHT_KEY, GAIN_KEY); } @@ -73,7 +70,7 @@ public class DefaultParamInitializer implements ParamInitializer { } @Override - public List biasKeys(Layer layer) { + public List biasKeys(LayerConfiguration layer) { if(hasBias(layer)){ return Collections.singletonList(BIAS_KEY); } else { @@ -83,19 +80,19 @@ public class DefaultParamInitializer implements ParamInitializer { @Override - public boolean isWeightParam(Layer layer, String key) { + public boolean isWeightParam(LayerConfiguration layer, String key) { return WEIGHT_KEY.equals(key) || (hasLayerNorm(layer) && GAIN_KEY.equals(key)); } @Override - public boolean isBiasParam(Layer layer, String key) { + public boolean isBiasParam(LayerConfiguration layer, String key) { return BIAS_KEY.equals(key); } @Override - public Map init(NeuralNetConfiguration conf, INDArray paramsView, boolean initializeParams) { - if (!(conf.getLayer() instanceof org.deeplearning4j.nn.conf.layers.FeedForwardLayer)) - throw new IllegalArgumentException("unsupported layer type: " + conf.getLayer().getClass().getName()); + public Map init(@NonNull LayerConfiguration conf, INDArray paramsView, boolean initializeParams) { + if (!(conf instanceof org.deeplearning4j.nn.conf.layers.FeedForwardLayer)) + throw new IllegalArgumentException("unsupported layer type: " + conf.getClass().getName()); Map params = Collections.synchronizedMap(new LinkedHashMap()); @@ -105,22 +102,22 @@ public class DefaultParamInitializer implements ParamInitializer { "Expected params view of length " + length + ", got length " + paramsView.length()); org.deeplearning4j.nn.conf.layers.FeedForwardLayer layerConf = - (org.deeplearning4j.nn.conf.layers.FeedForwardLayer) conf.getLayer(); + (org.deeplearning4j.nn.conf.layers.FeedForwardLayer) conf; val nIn = layerConf.getNIn(); val nOut = layerConf.getNOut(); val nWeightParams = nIn * nOut; INDArray weightView = paramsView.get(NDArrayIndex.interval(0,0,true), NDArrayIndex.interval(0, nWeightParams)); - params.put(WEIGHT_KEY, createWeightMatrix(conf, weightView, initializeParams)); - conf.addVariable(WEIGHT_KEY); + params.put(WEIGHT_KEY, createWeightMatrix(layerConf, weightView, initializeParams)); + layerConf.addVariable(WEIGHT_KEY); long offset = nWeightParams; if(hasBias(layerConf)){ INDArray biasView = paramsView.get(NDArrayIndex.interval(0,0,true), NDArrayIndex.interval(offset, offset + nOut)); - params.put(BIAS_KEY, createBias(conf, biasView, initializeParams)); - conf.addVariable(BIAS_KEY); + params.put(BIAS_KEY, createBias(layerConf, biasView, initializeParams)); + layerConf.addVariable(BIAS_KEY); offset += nOut; } @@ -135,9 +132,9 @@ public class DefaultParamInitializer implements ParamInitializer { } @Override - public Map getGradientsFromFlattened(NeuralNetConfiguration conf, INDArray gradientView) { + public Map getGradientsFromFlattened(LayerConfiguration conf, INDArray gradientView) { org.deeplearning4j.nn.conf.layers.FeedForwardLayer layerConf = - (org.deeplearning4j.nn.conf.layers.FeedForwardLayer) conf.getLayer(); + (org.deeplearning4j.nn.conf.layers.FeedForwardLayer) conf; val nIn = layerConf.getNIn(); val nOut = layerConf.getNOut(); val nWeightParams = nIn * nOut; @@ -166,9 +163,9 @@ public class DefaultParamInitializer implements ParamInitializer { } - protected INDArray createBias(NeuralNetConfiguration conf, INDArray biasParamView, boolean initializeParameters) { + protected INDArray createBias(LayerConfiguration conf, INDArray biasParamView, boolean initializeParameters) { org.deeplearning4j.nn.conf.layers.FeedForwardLayer layerConf = - (org.deeplearning4j.nn.conf.layers.FeedForwardLayer) conf.getLayer(); + (org.deeplearning4j.nn.conf.layers.FeedForwardLayer) conf; return createBias(layerConf.getNOut(), layerConf.getBiasInit(), biasParamView, initializeParameters); } @@ -179,9 +176,9 @@ public class DefaultParamInitializer implements ParamInitializer { return biasParamView; } - protected INDArray createGain(NeuralNetConfiguration conf, INDArray gainParamView, boolean initializeParameters) { + protected INDArray createGain(LayerConfiguration conf, INDArray gainParamView, boolean initializeParameters) { org.deeplearning4j.nn.conf.layers.FeedForwardLayer layerConf = - (org.deeplearning4j.nn.conf.layers.FeedForwardLayer) conf.getLayer(); + (org.deeplearning4j.nn.conf.layers.FeedForwardLayer) conf; return createGain(layerConf.getNOut(), layerConf.getGainInit(), gainParamView, initializeParameters); } @@ -193,20 +190,27 @@ public class DefaultParamInitializer implements ParamInitializer { } - protected INDArray createWeightMatrix(NeuralNetConfiguration conf, INDArray weightParamView, + protected INDArray createWeightMatrix(LayerConfiguration conf, INDArray weightParamView, boolean initializeParameters) { org.deeplearning4j.nn.conf.layers.FeedForwardLayer layerConf = - (org.deeplearning4j.nn.conf.layers.FeedForwardLayer) conf.getLayer(); + (org.deeplearning4j.nn.conf.layers.FeedForwardLayer) conf; if (initializeParameters) { - return createWeightMatrix(layerConf.getNIn(), layerConf.getNOut(), layerConf.getWeightInitFn(), + if( layerConf.getWeightInit() == null) { + // set a default and set warning + layerConf.setWeightInit(new WeightInitXavier()); + log.warn("Weight Initializer function was not set on layer {} of class {}, it will default to {}", conf.getLayerName(), + conf.getClass().getSimpleName(), WeightInitXavier.class.getSimpleName()); + } + return createWeightMatrix(layerConf.getNIn(), layerConf.getNOut(), layerConf.getWeightInit(), weightParamView, true); } else { return createWeightMatrix(layerConf.getNIn(), layerConf.getNOut(), null, weightParamView, false); } } - protected INDArray createWeightMatrix(long nIn, long nOut, IWeightInit weightInit, + protected INDArray createWeightMatrix(long nIn, long nOut, + IWeightInit weightInit, INDArray weightParamView, boolean initializeParameters) { val shape = new long[] {nIn, nOut}; @@ -220,7 +224,7 @@ public class DefaultParamInitializer implements ParamInitializer { } } - protected boolean hasBias(Layer layer){ + protected boolean hasBias(LayerConfiguration layer){ if(layer instanceof BaseOutputLayer ) { return ((BaseOutputLayer) layer).hasBias(); } else if(layer instanceof DenseLayer){ @@ -233,7 +237,7 @@ public class DefaultParamInitializer implements ParamInitializer { return true; } - protected boolean hasLayerNorm(Layer layer){ + protected boolean hasLayerNorm(LayerConfiguration layer){ if(layer instanceof DenseLayer){ return ((DenseLayer) layer).hasLayerNorm(); } diff --git a/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/nn/params/DepthwiseConvolutionParamInitializer.java b/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/nn/params/DepthwiseConvolutionParamInitializer.java index b9f682818..d1bd00449 100644 --- a/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/nn/params/DepthwiseConvolutionParamInitializer.java +++ b/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/nn/params/DepthwiseConvolutionParamInitializer.java @@ -22,17 +22,16 @@ package org.deeplearning4j.nn.params; import lombok.val; -import org.deeplearning4j.nn.api.ParamInitializer; -import org.deeplearning4j.nn.conf.NeuralNetConfiguration; +import org.deeplearning4j.nn.api.AbstractParamInitializer; import org.deeplearning4j.nn.conf.layers.DepthwiseConvolution2D; -import org.deeplearning4j.nn.conf.layers.Layer; +import org.deeplearning4j.nn.conf.layers.LayerConfiguration; import org.deeplearning4j.nn.weights.WeightInitUtil; import org.nd4j.linalg.api.ndarray.INDArray; import org.nd4j.linalg.indexing.NDArrayIndex; import java.util.*; -public class DepthwiseConvolutionParamInitializer implements ParamInitializer { +public class DepthwiseConvolutionParamInitializer extends AbstractParamInitializer { private static final DepthwiseConvolutionParamInitializer INSTANCE = new DepthwiseConvolutionParamInitializer(); @@ -44,12 +43,7 @@ public class DepthwiseConvolutionParamInitializer implements ParamInitializer { public final static String BIAS_KEY = DefaultParamInitializer.BIAS_KEY; @Override - public long numParams(NeuralNetConfiguration conf) { - return numParams(conf.getLayer()); - } - - @Override - public long numParams(Layer l) { + public long numParams(LayerConfiguration l) { DepthwiseConvolution2D layerConf = (DepthwiseConvolution2D) l; val depthWiseParams = numDepthWiseParams(layerConf); @@ -79,7 +73,7 @@ public class DepthwiseConvolutionParamInitializer implements ParamInitializer { } @Override - public List paramKeys(Layer layer) { + public List paramKeys(LayerConfiguration layer) { DepthwiseConvolution2D layerConf = (DepthwiseConvolution2D) layer; if(layerConf.hasBias()){ @@ -90,12 +84,12 @@ public class DepthwiseConvolutionParamInitializer implements ParamInitializer { } @Override - public List weightKeys(Layer layer) { + public List weightKeys(LayerConfiguration layer) { return Collections.singletonList(WEIGHT_KEY); } @Override - public List biasKeys(Layer layer) { + public List biasKeys(LayerConfiguration layer) { DepthwiseConvolution2D layerConf = (DepthwiseConvolution2D) layer; if(layerConf.hasBias()){ @@ -106,23 +100,23 @@ public class DepthwiseConvolutionParamInitializer implements ParamInitializer { } @Override - public boolean isWeightParam(Layer layer, String key) { + public boolean isWeightParam(LayerConfiguration layer, String key) { return WEIGHT_KEY.equals(key); } @Override - public boolean isBiasParam(Layer layer, String key) { + public boolean isBiasParam(LayerConfiguration layer, String key) { return BIAS_KEY.equals(key); } @Override - public Map init(NeuralNetConfiguration conf, INDArray paramsView, boolean initializeParams) { - DepthwiseConvolution2D layer = (DepthwiseConvolution2D) conf.getLayer(); + public Map init(LayerConfiguration conf, INDArray paramsView, boolean initializeParams) { + DepthwiseConvolution2D layer = (DepthwiseConvolution2D) conf; if (layer.getKernelSize().length != 2) throw new IllegalArgumentException("Filter size must be == 2"); Map params = Collections.synchronizedMap(new LinkedHashMap()); - DepthwiseConvolution2D layerConf = (DepthwiseConvolution2D) conf.getLayer(); + DepthwiseConvolution2D layerConf = (DepthwiseConvolution2D) conf; val depthWiseParams = numDepthWiseParams(layerConf); val biasParams = numBiasParams(layerConf); @@ -143,9 +137,9 @@ public class DepthwiseConvolutionParamInitializer implements ParamInitializer { } @Override - public Map getGradientsFromFlattened(NeuralNetConfiguration conf, INDArray gradientView) { + public Map getGradientsFromFlattened(LayerConfiguration conf, INDArray gradientView) { - DepthwiseConvolution2D layerConf = (DepthwiseConvolution2D) conf.getLayer(); + DepthwiseConvolution2D layerConf = (DepthwiseConvolution2D) conf; int[] kernel = layerConf.getKernelSize(); val nIn = layerConf.getNIn(); @@ -169,21 +163,21 @@ public class DepthwiseConvolutionParamInitializer implements ParamInitializer { return out; } - protected INDArray createBias(NeuralNetConfiguration conf, INDArray biasView, boolean initializeParams) { - DepthwiseConvolution2D layerConf = (DepthwiseConvolution2D) conf.getLayer(); + protected INDArray createBias(LayerConfiguration conf, INDArray biasView, boolean initializeParams) { + DepthwiseConvolution2D layerConf = (DepthwiseConvolution2D) conf; if (initializeParams) biasView.assign(layerConf.getBiasInit()); return biasView; } - protected INDArray createDepthWiseWeightMatrix(NeuralNetConfiguration conf, INDArray weightView, boolean initializeParams) { + protected INDArray createDepthWiseWeightMatrix(LayerConfiguration conf, INDArray weightView, boolean initializeParams) { /* Create a 4d weight matrix of: (channels multiplier, num input channels, kernel height, kernel width) Inputs to the convolution layer are: (batch size, num input feature maps, image height, image width) */ DepthwiseConvolution2D layerConf = - (DepthwiseConvolution2D) conf.getLayer(); + (DepthwiseConvolution2D) conf; int depthMultiplier = layerConf.getDepthMultiplier(); if (initializeParams) { @@ -197,7 +191,7 @@ public class DepthwiseConvolutionParamInitializer implements ParamInitializer { val weightsShape = new long[] {kernel[0], kernel[1], inputDepth, depthMultiplier}; - return layerConf.getWeightInitFn().init(fanIn, fanOut, weightsShape, 'c', + return layerConf.getWeightInit().init(fanIn, fanOut, weightsShape, 'c', weightView); } else { int[] kernel = layerConf.getKernelSize(); diff --git a/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/nn/params/ElementWiseParamInitializer.java b/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/nn/params/ElementWiseParamInitializer.java index 7245d6dab..665a47d7f 100644 --- a/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/nn/params/ElementWiseParamInitializer.java +++ b/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/nn/params/ElementWiseParamInitializer.java @@ -23,7 +23,7 @@ package org.deeplearning4j.nn.params; import lombok.val; import org.deeplearning4j.nn.conf.NeuralNetConfiguration; import org.deeplearning4j.nn.conf.layers.FeedForwardLayer; -import org.deeplearning4j.nn.conf.layers.Layer; +import org.deeplearning4j.nn.conf.layers.LayerConfiguration; import org.deeplearning4j.nn.weights.IWeightInit; import org.nd4j.linalg.api.ndarray.INDArray; import org.nd4j.linalg.indexing.NDArrayIndex; @@ -41,7 +41,7 @@ public class ElementWiseParamInitializer extends DefaultParamInitializer{ } @Override - public long numParams(Layer layer) { + public long numParams(LayerConfiguration layer) { FeedForwardLayer layerConf = (FeedForwardLayer) layer; val nIn = layerConf.getNIn(); return nIn*2; //weights + bias @@ -57,9 +57,9 @@ public class ElementWiseParamInitializer extends DefaultParamInitializer{ * @return Map of parameters keyed by type (view of the 'paramsView' array) */ @Override - public Map init(NeuralNetConfiguration conf, INDArray paramsView, boolean initializeParams) { - if (!(conf.getLayer() instanceof org.deeplearning4j.nn.conf.layers.FeedForwardLayer)) - throw new IllegalArgumentException("unsupported layer type: " + conf.getLayer().getClass().getName()); + public Map init(LayerConfiguration conf, INDArray paramsView, boolean initializeParams) { + if (!(conf instanceof org.deeplearning4j.nn.conf.layers.FeedForwardLayer)) + throw new IllegalArgumentException("unsupported layer type: " + conf.getClass().getName()); Map params = Collections.synchronizedMap(new LinkedHashMap()); @@ -69,7 +69,7 @@ public class ElementWiseParamInitializer extends DefaultParamInitializer{ "Expected params view of length " + length + ", got length " + paramsView.length()); org.deeplearning4j.nn.conf.layers.FeedForwardLayer layerConf = - (org.deeplearning4j.nn.conf.layers.FeedForwardLayer) conf.getLayer(); + (org.deeplearning4j.nn.conf.layers.FeedForwardLayer) conf; val nIn = layerConf.getNIn(); val nWeightParams = nIn ; @@ -96,9 +96,9 @@ public class ElementWiseParamInitializer extends DefaultParamInitializer{ * @return A map containing an array by parameter type, that is a view of the full network gradients array */ @Override - public Map getGradientsFromFlattened(NeuralNetConfiguration conf, INDArray gradientView) { + public Map getGradientsFromFlattened(LayerConfiguration conf, INDArray gradientView) { org.deeplearning4j.nn.conf.layers.FeedForwardLayer layerConf = - (org.deeplearning4j.nn.conf.layers.FeedForwardLayer) conf.getLayer(); + (org.deeplearning4j.nn.conf.layers.FeedForwardLayer) conf; val nIn = layerConf.getNIn(); val nOut = layerConf.getNOut(); val nWeightParams = nIn ; diff --git a/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/nn/params/EmptyParamInitializer.java b/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/nn/params/EmptyParamInitializer.java index 7ec9ea885..28d458e78 100644 --- a/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/nn/params/EmptyParamInitializer.java +++ b/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/nn/params/EmptyParamInitializer.java @@ -20,9 +20,10 @@ package org.deeplearning4j.nn.params; +import org.deeplearning4j.nn.api.AbstractParamInitializer; import org.deeplearning4j.nn.api.ParamInitializer; import org.deeplearning4j.nn.conf.NeuralNetConfiguration; -import org.deeplearning4j.nn.conf.layers.Layer; +import org.deeplearning4j.nn.conf.layers.LayerConfiguration; import org.nd4j.linalg.api.ndarray.INDArray; import java.util.Collections; @@ -32,7 +33,7 @@ import java.util.Map; /** * @author Adam Gibson */ -public class EmptyParamInitializer implements ParamInitializer { +public class EmptyParamInitializer extends AbstractParamInitializer { private static final EmptyParamInitializer INSTANCE = new EmptyParamInitializer(); @@ -41,47 +42,42 @@ public class EmptyParamInitializer implements ParamInitializer { } @Override - public long numParams(NeuralNetConfiguration conf) { + public long numParams(LayerConfiguration layer) { return 0; } @Override - public long numParams(Layer layer) { - return 0; - } - - @Override - public List paramKeys(Layer layer) { + public List paramKeys(LayerConfiguration layer) { return Collections.emptyList(); } @Override - public List weightKeys(Layer layer) { + public List weightKeys(LayerConfiguration layer) { return Collections.emptyList(); } @Override - public List biasKeys(Layer layer) { + public List biasKeys(LayerConfiguration layer) { return Collections.emptyList(); } @Override - public boolean isWeightParam(Layer layer, String key) { + public boolean isWeightParam(LayerConfiguration layer, String key) { return false; } @Override - public boolean isBiasParam(Layer layer, String key) { + public boolean isBiasParam(LayerConfiguration layer, String key) { return false; } @Override - public Map init(NeuralNetConfiguration conf, INDArray paramsView, boolean initializeParams) { + public Map init(LayerConfiguration conf, INDArray paramsView, boolean initializeParams) { return Collections.EMPTY_MAP; } @Override - public Map getGradientsFromFlattened(NeuralNetConfiguration conf, INDArray gradientView) { + public Map getGradientsFromFlattened(LayerConfiguration conf, INDArray gradientView) { return Collections.emptyMap(); } } diff --git a/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/nn/params/FrozenLayerParamInitializer.java b/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/nn/params/FrozenLayerParamInitializer.java index 71bff7702..580d07402 100644 --- a/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/nn/params/FrozenLayerParamInitializer.java +++ b/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/nn/params/FrozenLayerParamInitializer.java @@ -20,81 +20,75 @@ package org.deeplearning4j.nn.params; -import org.deeplearning4j.nn.api.ParamInitializer; -import org.deeplearning4j.nn.conf.NeuralNetConfiguration; -import org.deeplearning4j.nn.conf.layers.Layer; -import org.deeplearning4j.nn.conf.layers.misc.FrozenLayer; -import org.nd4j.linalg.api.ndarray.INDArray; - import java.util.Collections; import java.util.List; import java.util.Map; +import org.deeplearning4j.nn.api.AbstractParamInitializer; +import org.deeplearning4j.nn.api.ParamInitializer; +import org.deeplearning4j.nn.conf.NeuralNetConfiguration; +import org.deeplearning4j.nn.conf.layers.LayerConfiguration; +import org.deeplearning4j.nn.conf.layers.misc.FrozenLayer; +import org.nd4j.linalg.api.ndarray.INDArray; -public class FrozenLayerParamInitializer implements ParamInitializer { +public class FrozenLayerParamInitializer extends AbstractParamInitializer { - private static final FrozenLayerParamInitializer INSTANCE = new FrozenLayerParamInitializer(); + private static final FrozenLayerParamInitializer INSTANCE = new FrozenLayerParamInitializer(); - public static FrozenLayerParamInitializer getInstance() { - return INSTANCE; - } + public static FrozenLayerParamInitializer getInstance() { + return INSTANCE; + } - @Override - public long numParams(NeuralNetConfiguration conf) { - return numParams(conf.getLayer()); - } + @Override + public long numParams(LayerConfiguration layer) { + FrozenLayer fl = (FrozenLayer) layer; + ParamInitializer initializer = fl.getInnerConfiguration().initializer(); + return initializer.numParams(fl.getInnerConfiguration()); + } - @Override - public long numParams(Layer layer) { - FrozenLayer fl = (FrozenLayer) layer; - ParamInitializer initializer = fl.getLayer().initializer(); - return initializer.numParams(fl.getLayer()); - } + @Override + public List paramKeys(LayerConfiguration layer) { + return Collections.emptyList(); + } - @Override - public List paramKeys(Layer layer) { - return Collections.emptyList(); - } + @Override + public List weightKeys(LayerConfiguration layer) { + return Collections.emptyList(); + } - @Override - public List weightKeys(Layer layer) { - return Collections.emptyList(); - } + @Override + public List biasKeys(LayerConfiguration layer) { + return Collections.emptyList(); + } - @Override - public List biasKeys(Layer layer) { - return Collections.emptyList(); - } + @Override + public boolean isWeightParam(LayerConfiguration layer, String key) { + return false; + } - @Override - public boolean isWeightParam(Layer layer, String key) { - return false; - } + @Override + public boolean isBiasParam(LayerConfiguration layer, String key) { + return false; + } - @Override - public boolean isBiasParam(Layer layer, String key) { - return false; - } + @Override + public Map init(LayerConfiguration conf, INDArray paramsView, + boolean initializeParams) { + FrozenLayer fl_conf = (FrozenLayer) conf; + LayerConfiguration innerLayer = fl_conf.getInnerConfiguration(); + ParamInitializer initializer = innerLayer.initializer(); + fl_conf.setInnerConfiguration(innerLayer); + Map m = initializer.init(conf, paramsView, initializeParams); + return m; + } - @Override - public Map init(NeuralNetConfiguration conf, INDArray paramsView, boolean initializeParams) { - FrozenLayer fl = (FrozenLayer) conf.getLayer(); - Layer innerLayer = fl.getLayer(); - ParamInitializer initializer = innerLayer.initializer(); - conf.setLayer(innerLayer); - Map m = initializer.init(conf, paramsView, initializeParams); - conf.setLayer(fl); - - return m; - } - - @Override - public Map getGradientsFromFlattened(NeuralNetConfiguration conf, INDArray gradientView) { - FrozenLayer fl = (FrozenLayer) conf.getLayer(); - Layer innerLayer = fl.getLayer(); - ParamInitializer initializer = innerLayer.initializer(); - conf.setLayer(innerLayer); - Map m = initializer.getGradientsFromFlattened(conf, gradientView); - conf.setLayer(fl); - return m; - } + @Override + public Map getGradientsFromFlattened(LayerConfiguration conf, + INDArray gradientView) { + FrozenLayer fl = (FrozenLayer) conf; + LayerConfiguration innerLayer = fl.getInnerConfiguration(); + ParamInitializer initializer = innerLayer.initializer(); + fl.setInnerConfiguration(innerLayer); + Map m = initializer.getGradientsFromFlattened(conf, gradientView); + return m; + } } diff --git a/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/nn/params/FrozenLayerWithBackpropParamInitializer.java b/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/nn/params/FrozenLayerWithBackpropParamInitializer.java index 5aa01b3e4..1328e28d9 100644 --- a/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/nn/params/FrozenLayerWithBackpropParamInitializer.java +++ b/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/nn/params/FrozenLayerWithBackpropParamInitializer.java @@ -20,10 +20,10 @@ package org.deeplearning4j.nn.params; +import org.deeplearning4j.nn.api.AbstractParamInitializer; import org.deeplearning4j.nn.api.ParamInitializer; import org.deeplearning4j.nn.conf.NeuralNetConfiguration; -import org.deeplearning4j.nn.conf.layers.Layer; -import org.deeplearning4j.nn.conf.layers.misc.FrozenLayer; +import org.deeplearning4j.nn.conf.layers.LayerConfiguration; import org.deeplearning4j.nn.conf.layers.misc.FrozenLayerWithBackprop; import org.nd4j.linalg.api.ndarray.INDArray; @@ -31,7 +31,7 @@ import java.util.Collections; import java.util.List; import java.util.Map; -public class FrozenLayerWithBackpropParamInitializer implements ParamInitializer { +public class FrozenLayerWithBackpropParamInitializer extends AbstractParamInitializer { private static final FrozenLayerWithBackpropParamInitializer INSTANCE = new FrozenLayerWithBackpropParamInitializer(); @@ -40,62 +40,54 @@ public class FrozenLayerWithBackpropParamInitializer implements ParamInitializer } @Override - public long numParams(NeuralNetConfiguration conf) { - return numParams(conf.getLayer()); - } - - @Override - public long numParams(Layer layer) { + public long numParams(LayerConfiguration layer) { FrozenLayerWithBackprop fl = (FrozenLayerWithBackprop) layer; ParamInitializer initializer = fl.getUnderlying().initializer(); return initializer.numParams(fl.getUnderlying()); } @Override - public List paramKeys(Layer layer) { + public List paramKeys(LayerConfiguration layer) { return Collections.emptyList(); } @Override - public List weightKeys(Layer layer) { + public List weightKeys(LayerConfiguration layer) { return Collections.emptyList(); } @Override - public List biasKeys(Layer layer) { + public List biasKeys(LayerConfiguration layer) { return Collections.emptyList(); } @Override - public boolean isWeightParam(Layer layer, String key) { + public boolean isWeightParam(LayerConfiguration layer, String key) { return false; } @Override - public boolean isBiasParam(Layer layer, String key) { + public boolean isBiasParam(LayerConfiguration layer, String key) { return false; } @Override - public Map init(NeuralNetConfiguration conf, INDArray paramsView, boolean initializeParams) { - FrozenLayerWithBackprop fl = (FrozenLayerWithBackprop) conf.getLayer(); - Layer innerLayer = fl.getUnderlying(); + public Map init(LayerConfiguration conf, INDArray paramsView, boolean initializeParams) { + FrozenLayerWithBackprop fl = (FrozenLayerWithBackprop) conf; + LayerConfiguration innerLayer = fl.getUnderlying(); ParamInitializer initializer = innerLayer.initializer(); - conf.setLayer(innerLayer); + fl.setUnderlying(innerLayer); Map m = initializer.init(conf, paramsView, initializeParams); - conf.setLayer(fl); - return m; } @Override - public Map getGradientsFromFlattened(NeuralNetConfiguration conf, INDArray gradientView) { - FrozenLayerWithBackprop fl = (FrozenLayerWithBackprop) conf.getLayer(); - Layer innerLayer = fl.getUnderlying(); + public Map getGradientsFromFlattened(LayerConfiguration conf, INDArray gradientView) { + FrozenLayerWithBackprop fl = (FrozenLayerWithBackprop) conf; + LayerConfiguration innerLayer = fl.getUnderlying(); ParamInitializer initializer = innerLayer.initializer(); - conf.setLayer(innerLayer); + fl.setUnderlying(innerLayer); Map m = initializer.getGradientsFromFlattened(conf, gradientView); - conf.setLayer(fl); return m; } } diff --git a/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/nn/params/GravesBidirectionalLSTMParamInitializer.java b/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/nn/params/GravesBidirectionalLSTMParamInitializer.java index de437ee6d..e74d69a1a 100644 --- a/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/nn/params/GravesBidirectionalLSTMParamInitializer.java +++ b/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/nn/params/GravesBidirectionalLSTMParamInitializer.java @@ -21,9 +21,8 @@ package org.deeplearning4j.nn.params; import lombok.val; -import org.deeplearning4j.nn.api.ParamInitializer; -import org.deeplearning4j.nn.conf.NeuralNetConfiguration; -import org.deeplearning4j.nn.conf.layers.Layer; +import org.deeplearning4j.nn.api.AbstractParamInitializer; +import org.deeplearning4j.nn.conf.layers.LayerConfiguration; import org.deeplearning4j.nn.weights.IWeightInit; import org.deeplearning4j.nn.weights.WeightInitUtil; import org.nd4j.linalg.api.ndarray.INDArray; @@ -33,7 +32,7 @@ import org.nd4j.linalg.indexing.NDArrayIndex; import java.util.*; -public class GravesBidirectionalLSTMParamInitializer implements ParamInitializer { +public class GravesBidirectionalLSTMParamInitializer extends AbstractParamInitializer { private static final GravesBidirectionalLSTMParamInitializer INSTANCE = new GravesBidirectionalLSTMParamInitializer(); @@ -61,12 +60,7 @@ public class GravesBidirectionalLSTMParamInitializer implements ParamInitializer BIAS_KEY_BACKWARDS)); @Override - public long numParams(NeuralNetConfiguration conf) { - return numParams(conf.getLayer()); - } - - @Override - public long numParams(Layer l) { + public long numParams(LayerConfiguration l) { org.deeplearning4j.nn.conf.layers.GravesBidirectionalLSTM layerConf = (org.deeplearning4j.nn.conf.layers.GravesBidirectionalLSTM) l; @@ -81,37 +75,37 @@ public class GravesBidirectionalLSTMParamInitializer implements ParamInitializer } @Override - public List paramKeys(Layer layer) { + public List paramKeys(LayerConfiguration layer) { return ALL_PARAM_KEYS; } @Override - public List weightKeys(Layer layer) { + public List weightKeys(LayerConfiguration layer) { return WEIGHT_KEYS; } @Override - public List biasKeys(Layer layer) { + public List biasKeys(LayerConfiguration layer) { return BIAS_KEYS; } @Override - public boolean isWeightParam(Layer layer, String key) { + public boolean isWeightParam(LayerConfiguration layer, String key) { return RECURRENT_WEIGHT_KEY_FORWARDS.equals(key) || INPUT_WEIGHT_KEY_FORWARDS.equals(key) || RECURRENT_WEIGHT_KEY_BACKWARDS.equals(key) || INPUT_WEIGHT_KEY_BACKWARDS.equals(key); } @Override - public boolean isBiasParam(Layer layer, String key) { + public boolean isBiasParam(LayerConfiguration layer, String key) { return BIAS_KEY_FORWARDS.equals(key) || BIAS_KEY_BACKWARDS.equals(key); } @Override - public Map init(NeuralNetConfiguration conf, INDArray paramsView, boolean initializeParams) { + public Map init(LayerConfiguration conf, INDArray paramsView, boolean initializeParams) { Map params = Collections.synchronizedMap(new LinkedHashMap()); org.deeplearning4j.nn.conf.layers.GravesBidirectionalLSTM layerConf = - (org.deeplearning4j.nn.conf.layers.GravesBidirectionalLSTM) conf.getLayer(); + (org.deeplearning4j.nn.conf.layers.GravesBidirectionalLSTM) conf; double forgetGateInit = layerConf.getForgetGateBiasInit(); val nL = layerConf.getNOut(); //i.e., n neurons in this layer @@ -163,14 +157,14 @@ public class GravesBidirectionalLSTMParamInitializer implements ParamInitializer val inputWShape = new long[]{nLast, 4 * nL}; val recurrentWShape = new long[]{nL, 4 * nL + 3}; - params.put(INPUT_WEIGHT_KEY_FORWARDS, layerConf.getWeightInitFn().init(fanIn, fanOut, inputWShape, + params.put(INPUT_WEIGHT_KEY_FORWARDS, layerConf.getWeightInit().init(fanIn, fanOut, inputWShape, IWeightInit.DEFAULT_WEIGHT_INIT_ORDER, iwF)); - params.put(RECURRENT_WEIGHT_KEY_FORWARDS, layerConf.getWeightInitFn().init(fanIn, fanOut, recurrentWShape, + params.put(RECURRENT_WEIGHT_KEY_FORWARDS, layerConf.getWeightInit().init(fanIn, fanOut, recurrentWShape, IWeightInit.DEFAULT_WEIGHT_INIT_ORDER, rwF)); params.put(BIAS_KEY_FORWARDS, bF); - params.put(INPUT_WEIGHT_KEY_BACKWARDS, layerConf.getWeightInitFn().init(fanIn, fanOut, inputWShape, + params.put(INPUT_WEIGHT_KEY_BACKWARDS, layerConf.getWeightInit().init(fanIn, fanOut, inputWShape, IWeightInit.DEFAULT_WEIGHT_INIT_ORDER, iwR)); - params.put(RECURRENT_WEIGHT_KEY_BACKWARDS, layerConf.getWeightInitFn().init(fanIn, fanOut, recurrentWShape, + params.put(RECURRENT_WEIGHT_KEY_BACKWARDS, layerConf.getWeightInit().init(fanIn, fanOut, recurrentWShape, IWeightInit.DEFAULT_WEIGHT_INIT_ORDER, rwR)); params.put(BIAS_KEY_BACKWARDS, bR); } else { @@ -187,9 +181,9 @@ public class GravesBidirectionalLSTMParamInitializer implements ParamInitializer @Override - public Map getGradientsFromFlattened(NeuralNetConfiguration conf, INDArray gradientView) { + public Map getGradientsFromFlattened(LayerConfiguration conf, INDArray gradientView) { org.deeplearning4j.nn.conf.layers.GravesBidirectionalLSTM layerConf = - (org.deeplearning4j.nn.conf.layers.GravesBidirectionalLSTM) conf.getLayer(); + (org.deeplearning4j.nn.conf.layers.GravesBidirectionalLSTM) conf; val nL = layerConf.getNOut(); //i.e., n neurons in this layer val nLast = layerConf.getNIn(); //i.e., n neurons in previous layer diff --git a/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/nn/params/GravesLSTMParamInitializer.java b/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/nn/params/GravesLSTMParamInitializer.java index 37e4d1cdf..265027812 100644 --- a/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/nn/params/GravesLSTMParamInitializer.java +++ b/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/nn/params/GravesLSTMParamInitializer.java @@ -21,9 +21,8 @@ package org.deeplearning4j.nn.params; import lombok.val; -import org.deeplearning4j.nn.api.ParamInitializer; -import org.deeplearning4j.nn.conf.NeuralNetConfiguration; -import org.deeplearning4j.nn.conf.layers.Layer; +import org.deeplearning4j.nn.api.AbstractParamInitializer; +import org.deeplearning4j.nn.conf.layers.LayerConfiguration; import org.deeplearning4j.nn.weights.IWeightInit; import org.deeplearning4j.nn.weights.WeightInitUtil; import org.nd4j.linalg.api.ndarray.INDArray; @@ -33,7 +32,7 @@ import org.nd4j.linalg.indexing.NDArrayIndex; import java.util.*; -public class GravesLSTMParamInitializer implements ParamInitializer { +public class GravesLSTMParamInitializer extends AbstractParamInitializer { private static final GravesLSTMParamInitializer INSTANCE = new GravesLSTMParamInitializer(); @@ -47,12 +46,7 @@ public class GravesLSTMParamInitializer implements ParamInitializer { public final static String INPUT_WEIGHT_KEY = LSTMParamInitializer.INPUT_WEIGHT_KEY; @Override - public long numParams(NeuralNetConfiguration conf) { - return numParams(conf.getLayer()); - } - - @Override - public long numParams(Layer l) { + public long numParams(LayerConfiguration l) { org.deeplearning4j.nn.conf.layers.GravesLSTM layerConf = (org.deeplearning4j.nn.conf.layers.GravesLSTM) l; val nL = layerConf.getNOut(); //i.e., n neurons in this layer @@ -66,35 +60,35 @@ public class GravesLSTMParamInitializer implements ParamInitializer { } @Override - public List paramKeys(Layer layer) { + public List paramKeys(LayerConfiguration layer) { return Arrays.asList(INPUT_WEIGHT_KEY, RECURRENT_WEIGHT_KEY, BIAS_KEY); } @Override - public List weightKeys(Layer layer) { + public List weightKeys(LayerConfiguration layer) { return Arrays.asList(INPUT_WEIGHT_KEY, RECURRENT_WEIGHT_KEY); } @Override - public List biasKeys(Layer layer) { + public List biasKeys(LayerConfiguration layer) { return Collections.singletonList(BIAS_KEY); } @Override - public boolean isWeightParam(Layer layer, String key) { + public boolean isWeightParam(LayerConfiguration layer, String key) { return RECURRENT_WEIGHT_KEY.equals(key) || INPUT_WEIGHT_KEY.equals(key); } @Override - public boolean isBiasParam(Layer layer, String key) { + public boolean isBiasParam(LayerConfiguration layer, String key) { return BIAS_KEY.equals(key); } @Override - public Map init(NeuralNetConfiguration conf, INDArray paramsView, boolean initializeParams) { + public Map init(LayerConfiguration conf, INDArray paramsView, boolean initializeParams) { Map params = Collections.synchronizedMap(new LinkedHashMap()); org.deeplearning4j.nn.conf.layers.GravesLSTM layerConf = - (org.deeplearning4j.nn.conf.layers.GravesLSTM) conf.getLayer(); + (org.deeplearning4j.nn.conf.layers.GravesLSTM) conf; double forgetGateInit = layerConf.getForgetGateBiasInit(); val nL = layerConf.getNOut(); //i.e., n neurons in this layer @@ -128,10 +122,10 @@ public class GravesLSTMParamInitializer implements ParamInitializer { if(layerConf.getWeightInitFnRecurrent() != null){ rwInit = layerConf.getWeightInitFnRecurrent(); } else { - rwInit = layerConf.getWeightInitFn(); + rwInit = layerConf.getWeightInit(); } - params.put(INPUT_WEIGHT_KEY,layerConf.getWeightInitFn().init(fanIn, fanOut, inputWShape, + params.put(INPUT_WEIGHT_KEY,layerConf.getWeightInit().init(fanIn, fanOut, inputWShape, IWeightInit.DEFAULT_WEIGHT_INIT_ORDER, inputWeightView)); params.put(RECURRENT_WEIGHT_KEY, rwInit.init(fanIn, fanOut, recurrentWShape, IWeightInit.DEFAULT_WEIGHT_INIT_ORDER, recurrentWeightView)); @@ -157,9 +151,9 @@ public class GravesLSTMParamInitializer implements ParamInitializer { } @Override - public Map getGradientsFromFlattened(NeuralNetConfiguration conf, INDArray gradientView) { + public Map getGradientsFromFlattened(LayerConfiguration conf, INDArray gradientView) { org.deeplearning4j.nn.conf.layers.GravesLSTM layerConf = - (org.deeplearning4j.nn.conf.layers.GravesLSTM) conf.getLayer(); + (org.deeplearning4j.nn.conf.layers.GravesLSTM) conf; val nL = layerConf.getNOut(); //i.e., n neurons in this layer val nLast = layerConf.getNIn(); //i.e., n neurons in previous layer diff --git a/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/nn/params/LSTMParamInitializer.java b/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/nn/params/LSTMParamInitializer.java index 2a7418957..040822a8a 100644 --- a/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/nn/params/LSTMParamInitializer.java +++ b/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/nn/params/LSTMParamInitializer.java @@ -20,11 +20,15 @@ package org.deeplearning4j.nn.params; +import java.util.Arrays; +import java.util.Collections; +import java.util.LinkedHashMap; +import java.util.List; +import java.util.Map; import lombok.val; -import org.deeplearning4j.nn.api.ParamInitializer; -import org.deeplearning4j.nn.conf.NeuralNetConfiguration; +import org.deeplearning4j.nn.api.AbstractParamInitializer; import org.deeplearning4j.nn.conf.layers.LSTM; -import org.deeplearning4j.nn.conf.layers.Layer; +import org.deeplearning4j.nn.conf.layers.LayerConfiguration; import org.deeplearning4j.nn.weights.IWeightInit; import org.deeplearning4j.nn.weights.WeightInitUtil; import org.nd4j.linalg.api.ndarray.INDArray; @@ -32,9 +36,7 @@ import org.nd4j.linalg.factory.Nd4j; import org.nd4j.linalg.indexing.INDArrayIndex; import org.nd4j.linalg.indexing.NDArrayIndex; -import java.util.*; - -public class LSTMParamInitializer implements ParamInitializer { +public class LSTMParamInitializer extends AbstractParamInitializer { private static final LSTMParamInitializer INSTANCE = new LSTMParamInitializer(); @@ -54,12 +56,7 @@ public class LSTMParamInitializer implements ParamInitializer { private static final List BIAS_KEYS = Collections.unmodifiableList(Collections.singletonList(BIAS_KEY)); @Override - public long numParams(NeuralNetConfiguration conf) { - return numParams(conf.getLayer()); - } - - @Override - public long numParams(Layer l) { + public long numParams(LayerConfiguration l) { LSTM layerConf = (LSTM) l; val nL = layerConf.getNOut(); //i.e., n neurons in this layer @@ -73,34 +70,34 @@ public class LSTMParamInitializer implements ParamInitializer { } @Override - public List paramKeys(Layer layer) { + public List paramKeys(LayerConfiguration layer) { return LAYER_PARAM_KEYS; } @Override - public List weightKeys(Layer layer) { + public List weightKeys(LayerConfiguration layer) { return WEIGHT_KEYS; } @Override - public List biasKeys(Layer layer) { + public List biasKeys(LayerConfiguration layer) { return BIAS_KEYS; } @Override - public boolean isWeightParam(Layer layer, String key) { + public boolean isWeightParam(LayerConfiguration layer, String key) { return RECURRENT_WEIGHT_KEY.equals(key) || INPUT_WEIGHT_KEY.equals(key); } @Override - public boolean isBiasParam(Layer layer, String key) { + public boolean isBiasParam(LayerConfiguration layer, String key) { return BIAS_KEY.equals(key); } @Override - public Map init(NeuralNetConfiguration conf, INDArray paramsView, boolean initializeParams) { + public Map init(LayerConfiguration conf, INDArray paramsView, boolean initializeParams) { Map params = Collections.synchronizedMap(new LinkedHashMap()); - org.deeplearning4j.nn.conf.layers.LSTM layerConf = (org.deeplearning4j.nn.conf.layers.LSTM) conf.getLayer(); + org.deeplearning4j.nn.conf.layers.LSTM layerConf = (org.deeplearning4j.nn.conf.layers.LSTM) conf; double forgetGateInit = layerConf.getForgetGateBiasInit(); val nL = layerConf.getNOut(); //i.e., n neurons in this layer @@ -134,10 +131,10 @@ public class LSTMParamInitializer implements ParamInitializer { if(layerConf.getWeightInitFnRecurrent() != null){ rwInit = layerConf.getWeightInitFnRecurrent(); } else { - rwInit = layerConf.getWeightInitFn(); + rwInit = layerConf.getWeightInit(); } - params.put(INPUT_WEIGHT_KEY, layerConf.getWeightInitFn().init(fanIn, fanOut, inputWShape, + params.put(INPUT_WEIGHT_KEY, layerConf.getWeightInit().init(fanIn, fanOut, inputWShape, IWeightInit.DEFAULT_WEIGHT_INIT_ORDER, inputWeightView)); params.put(RECURRENT_WEIGHT_KEY, rwInit.init(fanIn, fanOut, recurrentWShape, IWeightInit.DEFAULT_WEIGHT_INIT_ORDER, recurrentWeightView)); biasView.put(new INDArrayIndex[] {NDArrayIndex.interval(0,0,true), NDArrayIndex.interval(nL, 2 * nL)}, @@ -162,8 +159,8 @@ public class LSTMParamInitializer implements ParamInitializer { } @Override - public Map getGradientsFromFlattened(NeuralNetConfiguration conf, INDArray gradientView) { - org.deeplearning4j.nn.conf.layers.LSTM layerConf = (org.deeplearning4j.nn.conf.layers.LSTM) conf.getLayer(); + public Map getGradientsFromFlattened(LayerConfiguration conf, INDArray gradientView) { + org.deeplearning4j.nn.conf.layers.LSTM layerConf = (org.deeplearning4j.nn.conf.layers.LSTM) conf; val nL = layerConf.getNOut(); //i.e., n neurons in this layer val nLast = layerConf.getNIn(); //i.e., n neurons in previous layer diff --git a/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/nn/params/PReLUParamInitializer.java b/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/nn/params/PReLUParamInitializer.java index d0a93e368..11d5638fe 100644 --- a/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/nn/params/PReLUParamInitializer.java +++ b/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/nn/params/PReLUParamInitializer.java @@ -21,10 +21,9 @@ package org.deeplearning4j.nn.params; import lombok.val; -import org.deeplearning4j.nn.api.ParamInitializer; -import org.deeplearning4j.nn.conf.NeuralNetConfiguration; -import org.deeplearning4j.nn.conf.layers.BaseLayer; -import org.deeplearning4j.nn.conf.layers.Layer; +import org.deeplearning4j.nn.api.AbstractParamInitializer; +import org.deeplearning4j.nn.conf.layers.BaseLayerConfiguration; +import org.deeplearning4j.nn.conf.layers.LayerConfiguration; import org.deeplearning4j.nn.conf.layers.PReLULayer; import org.deeplearning4j.nn.weights.IWeightInit; import org.deeplearning4j.nn.weights.WeightInitUtil; @@ -36,7 +35,7 @@ import java.util.LinkedHashMap; import java.util.List; import java.util.Map; -public class PReLUParamInitializer implements ParamInitializer { +public class PReLUParamInitializer extends AbstractParamInitializer { public final static String WEIGHT_KEY = "W"; private final long[] weightShape; @@ -58,14 +57,8 @@ public class PReLUParamInitializer implements ParamInitializer { return new PReLUParamInitializer(shape, sharedAxes); } - @Override - public long numParams(NeuralNetConfiguration conf) { - return numParams(conf.getLayer()); - } - - @Override - public long numParams(Layer l) { + public long numParams(LayerConfiguration l) { return numParams(weightShape); } @@ -78,34 +71,34 @@ public class PReLUParamInitializer implements ParamInitializer { } @Override - public List paramKeys(Layer layer) { + public List paramKeys(LayerConfiguration layer) { return weightKeys(layer); } @Override - public List weightKeys(Layer layer) { + public List weightKeys(LayerConfiguration layer) { return Collections.singletonList(WEIGHT_KEY); } @Override - public List biasKeys(Layer layer) { + public List biasKeys(LayerConfiguration layer) { return Collections.emptyList(); } @Override - public boolean isWeightParam(Layer layer, String key) { + public boolean isWeightParam(LayerConfiguration layer, String key) { return WEIGHT_KEY.equals(key); } @Override - public boolean isBiasParam(Layer layer, String key) { + public boolean isBiasParam(LayerConfiguration layer, String key) { return false; } @Override - public Map init(NeuralNetConfiguration conf, INDArray paramsView, boolean initializeParams) { - if (!(conf.getLayer() instanceof BaseLayer)) - throw new IllegalArgumentException("unsupported layer type: " + conf.getLayer().getClass().getName()); + public Map init(LayerConfiguration conf, INDArray paramsView, boolean initializeParams) { + if (!(conf instanceof BaseLayerConfiguration)) + throw new IllegalArgumentException("unsupported layer type: " + conf.getClass().getName()); Map params = Collections.synchronizedMap(new LinkedHashMap()); @@ -123,7 +116,7 @@ public class PReLUParamInitializer implements ParamInitializer { } @Override - public Map getGradientsFromFlattened(NeuralNetConfiguration conf, INDArray gradientView) { + public Map getGradientsFromFlattened(LayerConfiguration conf, INDArray gradientView) { val length = numParams(conf); INDArray weightGradientView = gradientView.get(NDArrayIndex.interval(0,0,true), NDArrayIndex.interval(0, length)) @@ -135,12 +128,12 @@ public class PReLUParamInitializer implements ParamInitializer { } - protected INDArray createWeightMatrix(NeuralNetConfiguration conf, INDArray weightParamView, + protected INDArray createWeightMatrix(LayerConfiguration conf, INDArray weightParamView, boolean initializeParameters) { - PReLULayer layerConf = (PReLULayer) conf.getLayer(); + PReLULayer layerConf = (PReLULayer) conf; if (initializeParameters) { - return layerConf.getWeightInitFn().init(layerConf.getNIn(), layerConf.getNOut(), + return layerConf.getWeightInit().init(layerConf.getNIn(), layerConf.getNOut(), weightShape, IWeightInit.DEFAULT_WEIGHT_INIT_ORDER, weightParamView); } else { return WeightInitUtil.reshapeWeights(weightShape, weightParamView); diff --git a/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/nn/params/PretrainParamInitializer.java b/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/nn/params/PretrainParamInitializer.java index c794a452c..4eb87427a 100644 --- a/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/nn/params/PretrainParamInitializer.java +++ b/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/nn/params/PretrainParamInitializer.java @@ -22,6 +22,7 @@ package org.deeplearning4j.nn.params; import lombok.val; import org.deeplearning4j.nn.conf.NeuralNetConfiguration; +import org.deeplearning4j.nn.conf.layers.LayerConfiguration; import org.nd4j.linalg.api.ndarray.INDArray; import org.nd4j.linalg.factory.Nd4j; import org.nd4j.linalg.indexing.NDArrayIndex; @@ -45,18 +46,18 @@ public class PretrainParamInitializer extends DefaultParamInitializer { public final static String VISIBLE_BIAS_KEY = "v" + DefaultParamInitializer.BIAS_KEY; @Override - public long numParams(NeuralNetConfiguration conf) { + public long numParams(LayerConfiguration conf) { org.deeplearning4j.nn.conf.layers.BasePretrainNetwork layerConf = - (org.deeplearning4j.nn.conf.layers.BasePretrainNetwork) conf.getLayer(); + (org.deeplearning4j.nn.conf.layers.BasePretrainNetwork) conf; return super.numParams(conf) + layerConf.getNIn(); } @Override - public Map init(NeuralNetConfiguration conf, INDArray paramsView, boolean initializeParams) { + public Map init(LayerConfiguration conf, INDArray paramsView, boolean initializeParams) { Map params = super.init(conf, paramsView, initializeParams); org.deeplearning4j.nn.conf.layers.BasePretrainNetwork layerConf = - (org.deeplearning4j.nn.conf.layers.BasePretrainNetwork) conf.getLayer(); + (org.deeplearning4j.nn.conf.layers.BasePretrainNetwork) conf; val nIn = layerConf.getNIn(); val nOut = layerConf.getNOut(); val nWeightParams = nIn * nOut; @@ -69,10 +70,10 @@ public class PretrainParamInitializer extends DefaultParamInitializer { return params; } - protected INDArray createVisibleBias(NeuralNetConfiguration conf, INDArray visibleBiasView, + protected INDArray createVisibleBias(LayerConfiguration conf, INDArray visibleBiasView, boolean initializeParameters) { org.deeplearning4j.nn.conf.layers.BasePretrainNetwork layerConf = - (org.deeplearning4j.nn.conf.layers.BasePretrainNetwork) conf.getLayer(); + (org.deeplearning4j.nn.conf.layers.BasePretrainNetwork) conf; if (initializeParameters) { INDArray ret = Nd4j.valueArrayOf(new long[]{1, layerConf.getNIn()}, layerConf.getVisibleBiasInit()); visibleBiasView.assign(ret); @@ -82,10 +83,10 @@ public class PretrainParamInitializer extends DefaultParamInitializer { @Override - public Map getGradientsFromFlattened(NeuralNetConfiguration conf, INDArray gradientView) { + public Map getGradientsFromFlattened(LayerConfiguration conf, INDArray gradientView) { Map out = super.getGradientsFromFlattened(conf, gradientView); org.deeplearning4j.nn.conf.layers.FeedForwardLayer layerConf = - (org.deeplearning4j.nn.conf.layers.FeedForwardLayer) conf.getLayer(); + (org.deeplearning4j.nn.conf.layers.FeedForwardLayer) conf; val nIn = layerConf.getNIn(); val nOut = layerConf.getNOut(); diff --git a/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/nn/params/SameDiffParamInitializer.java b/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/nn/params/SameDiffParamInitializer.java index 2b9c3484c..0846e0bf5 100644 --- a/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/nn/params/SameDiffParamInitializer.java +++ b/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/nn/params/SameDiffParamInitializer.java @@ -22,9 +22,10 @@ package org.deeplearning4j.nn.params; import lombok.extern.slf4j.Slf4j; import lombok.val; +import org.deeplearning4j.nn.api.AbstractParamInitializer; import org.deeplearning4j.nn.api.ParamInitializer; import org.deeplearning4j.nn.conf.NeuralNetConfiguration; -import org.deeplearning4j.nn.conf.layers.Layer; +import org.deeplearning4j.nn.conf.layers.LayerConfiguration; import org.deeplearning4j.nn.conf.layers.samediff.AbstractSameDiffLayer; import org.deeplearning4j.nn.conf.layers.samediff.SameDiffVertex; import org.nd4j.linalg.api.ndarray.INDArray; @@ -38,7 +39,7 @@ import java.util.Map; import static org.nd4j.linalg.indexing.NDArrayIndex.interval; @Slf4j -public class SameDiffParamInitializer implements ParamInitializer { +public class SameDiffParamInitializer extends AbstractParamInitializer { private static final SameDiffParamInitializer INSTANCE = new SameDiffParamInitializer(); @@ -47,12 +48,7 @@ public class SameDiffParamInitializer implements ParamInitializer { } @Override - public long numParams(NeuralNetConfiguration conf) { - return numParams(conf.getLayer()); - } - - @Override - public long numParams(Layer layer) { + public long numParams(LayerConfiguration layer) { AbstractSameDiffLayer sd = (AbstractSameDiffLayer)layer; Map m = sd.getLayerParams().getParamShapes(); int n = 0; @@ -63,36 +59,36 @@ public class SameDiffParamInitializer implements ParamInitializer { } @Override - public List paramKeys(Layer layer) { + public List paramKeys(LayerConfiguration layer) { AbstractSameDiffLayer sd = (AbstractSameDiffLayer)layer; return sd.getLayerParams().getParameterKeys(); } @Override - public List weightKeys(Layer layer) { + public List weightKeys(LayerConfiguration layer) { AbstractSameDiffLayer sd = (AbstractSameDiffLayer)layer; return sd.getLayerParams().getWeightParameterKeys(); } @Override - public List biasKeys(Layer layer) { + public List biasKeys(LayerConfiguration layer) { AbstractSameDiffLayer sd = (AbstractSameDiffLayer)layer; return sd.getLayerParams().getBiasParameterKeys(); } @Override - public boolean isWeightParam(Layer layer, String key) { + public boolean isWeightParam(LayerConfiguration layer, String key) { return weightKeys(layer).contains(key); } @Override - public boolean isBiasParam(Layer layer, String key) { + public boolean isBiasParam(LayerConfiguration layer, String key) { return biasKeys(layer).contains(key); } @Override - public Map init(NeuralNetConfiguration conf, INDArray paramsView, boolean initializeParams) { - AbstractSameDiffLayer sd = (AbstractSameDiffLayer) conf.getLayer(); + public Map init(LayerConfiguration conf, INDArray paramsView, boolean initializeParams) { + AbstractSameDiffLayer sd = (AbstractSameDiffLayer) conf; Map out = subsetAndReshape(sd.getLayerParams().getParameterKeys(), sd.getLayerParams().getParamShapes(), paramsView, sd); if(initializeParams){ @@ -107,8 +103,8 @@ public class SameDiffParamInitializer implements ParamInitializer { } @Override - public Map getGradientsFromFlattened(NeuralNetConfiguration conf, INDArray gradientView) { - AbstractSameDiffLayer sd = (AbstractSameDiffLayer) conf.getLayer(); + public Map getGradientsFromFlattened(LayerConfiguration conf, INDArray gradientView) { + AbstractSameDiffLayer sd = (AbstractSameDiffLayer) conf; return subsetAndReshape(sd.getLayerParams().getParameterKeys(), sd.getLayerParams().getParamShapes(), gradientView, sd); } diff --git a/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/nn/params/SeparableConvolutionParamInitializer.java b/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/nn/params/SeparableConvolutionParamInitializer.java index bb7dabb4e..58547886f 100644 --- a/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/nn/params/SeparableConvolutionParamInitializer.java +++ b/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/nn/params/SeparableConvolutionParamInitializer.java @@ -22,9 +22,8 @@ package org.deeplearning4j.nn.params; import lombok.val; -import org.deeplearning4j.nn.api.ParamInitializer; -import org.deeplearning4j.nn.conf.NeuralNetConfiguration; -import org.deeplearning4j.nn.conf.layers.Layer; +import org.deeplearning4j.nn.api.AbstractParamInitializer; +import org.deeplearning4j.nn.conf.layers.LayerConfiguration; import org.deeplearning4j.nn.conf.layers.SeparableConvolution2D; import org.deeplearning4j.nn.weights.WeightInitUtil; import org.nd4j.linalg.api.ndarray.INDArray; @@ -32,7 +31,7 @@ import org.nd4j.linalg.indexing.NDArrayIndex; import java.util.*; -public class SeparableConvolutionParamInitializer implements ParamInitializer { +public class SeparableConvolutionParamInitializer extends AbstractParamInitializer { private static final SeparableConvolutionParamInitializer INSTANCE = new SeparableConvolutionParamInitializer(); @@ -45,12 +44,7 @@ public class SeparableConvolutionParamInitializer implements ParamInitializer { public final static String BIAS_KEY = DefaultParamInitializer.BIAS_KEY; @Override - public long numParams(NeuralNetConfiguration conf) { - return numParams(conf.getLayer()); - } - - @Override - public long numParams(Layer l) { + public long numParams(LayerConfiguration l) { SeparableConvolution2D layerConf = (SeparableConvolution2D) l; val depthWiseParams = numDepthWiseParams(layerConf); @@ -96,7 +90,7 @@ public class SeparableConvolutionParamInitializer implements ParamInitializer { } @Override - public List paramKeys(Layer layer) { + public List paramKeys(LayerConfiguration layer) { SeparableConvolution2D layerConf = (SeparableConvolution2D) layer; if(layerConf.hasBias()){ @@ -107,12 +101,12 @@ public class SeparableConvolutionParamInitializer implements ParamInitializer { } @Override - public List weightKeys(Layer layer) { + public List weightKeys(LayerConfiguration layer) { return Arrays.asList(DEPTH_WISE_WEIGHT_KEY, POINT_WISE_WEIGHT_KEY); } @Override - public List biasKeys(Layer layer) { + public List biasKeys(LayerConfiguration layer) { SeparableConvolution2D layerConf = (SeparableConvolution2D) layer; if(layerConf.hasBias()){ @@ -123,23 +117,23 @@ public class SeparableConvolutionParamInitializer implements ParamInitializer { } @Override - public boolean isWeightParam(Layer layer, String key) { + public boolean isWeightParam(LayerConfiguration layer, String key) { return DEPTH_WISE_WEIGHT_KEY.equals(key) || POINT_WISE_WEIGHT_KEY.equals(key); } @Override - public boolean isBiasParam(Layer layer, String key) { + public boolean isBiasParam(LayerConfiguration layer, String key) { return BIAS_KEY.equals(key); } @Override - public Map init(NeuralNetConfiguration conf, INDArray paramsView, boolean initializeParams) { - SeparableConvolution2D layer = (SeparableConvolution2D) conf.getLayer(); + public Map init(LayerConfiguration conf, INDArray paramsView, boolean initializeParams) { + SeparableConvolution2D layer = (SeparableConvolution2D) conf; if (layer.getKernelSize().length != 2) throw new IllegalArgumentException("Filter size must be == 2"); Map params = Collections.synchronizedMap(new LinkedHashMap()); - SeparableConvolution2D layerConf = (SeparableConvolution2D) conf.getLayer(); + SeparableConvolution2D layerConf = (SeparableConvolution2D) conf; val depthWiseParams = numDepthWiseParams(layerConf); val biasParams = numBiasParams(layerConf); @@ -164,10 +158,10 @@ public class SeparableConvolutionParamInitializer implements ParamInitializer { } @Override - public Map getGradientsFromFlattened(NeuralNetConfiguration conf, INDArray gradientView) { + public Map getGradientsFromFlattened(LayerConfiguration conf, INDArray gradientView) { SeparableConvolution2D layerConf = - (SeparableConvolution2D) conf.getLayer(); + (SeparableConvolution2D) conf; int[] kernel = layerConf.getKernelSize(); val nIn = layerConf.getNIn(); @@ -195,22 +189,22 @@ public class SeparableConvolutionParamInitializer implements ParamInitializer { return out; } - protected INDArray createBias(NeuralNetConfiguration conf, INDArray biasView, boolean initializeParams) { + protected INDArray createBias(LayerConfiguration conf, INDArray biasView, boolean initializeParams) { SeparableConvolution2D layerConf = - (SeparableConvolution2D) conf.getLayer(); + (SeparableConvolution2D) conf; if (initializeParams) biasView.assign(layerConf.getBiasInit()); return biasView; } - protected INDArray createDepthWiseWeightMatrix(NeuralNetConfiguration conf, INDArray weightView, boolean initializeParams) { + protected INDArray createDepthWiseWeightMatrix(LayerConfiguration conf, INDArray weightView, boolean initializeParams) { /* Create a 4d weight matrix of: (channels multiplier, num input channels, kernel height, kernel width) Inputs to the convolution layer are: (batch size, num input feature maps, image height, image width) */ SeparableConvolution2D layerConf = - (SeparableConvolution2D) conf.getLayer(); + (SeparableConvolution2D) conf; int depthMultiplier = layerConf.getDepthMultiplier(); if (initializeParams) { @@ -224,7 +218,7 @@ public class SeparableConvolutionParamInitializer implements ParamInitializer { val weightsShape = new long[] {depthMultiplier, inputDepth, kernel[0], kernel[1]}; - return layerConf.getWeightInitFn().init(fanIn, fanOut, weightsShape, 'c', + return layerConf.getWeightInit().init(fanIn, fanOut, weightsShape, 'c', weightView); } else { int[] kernel = layerConf.getKernelSize(); @@ -233,14 +227,14 @@ public class SeparableConvolutionParamInitializer implements ParamInitializer { } } - protected INDArray createPointWiseWeightMatrix(NeuralNetConfiguration conf, INDArray weightView, + protected INDArray createPointWiseWeightMatrix(LayerConfiguration conf, INDArray weightView, boolean initializeParams) { /* Create a 4d weight matrix of: (num output channels, channels multiplier * num input channels, kernel height, kernel width) */ SeparableConvolution2D layerConf = - (SeparableConvolution2D) conf.getLayer(); + (SeparableConvolution2D) conf; int depthMultiplier = layerConf.getDepthMultiplier(); if (initializeParams) { @@ -253,7 +247,7 @@ public class SeparableConvolutionParamInitializer implements ParamInitializer { val weightsShape = new long[] {outputDepth, depthMultiplier * inputDepth, 1, 1}; - return layerConf.getWeightInitFn().init(fanIn, fanOut, weightsShape, 'c', + return layerConf.getWeightInit().init(fanIn, fanOut, weightsShape, 'c', weightView); } else { return WeightInitUtil.reshapeWeights( diff --git a/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/nn/params/SimpleRnnParamInitializer.java b/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/nn/params/SimpleRnnParamInitializer.java index f3fbf1e11..488c00396 100644 --- a/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/nn/params/SimpleRnnParamInitializer.java +++ b/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/nn/params/SimpleRnnParamInitializer.java @@ -21,9 +21,8 @@ package org.deeplearning4j.nn.params; import lombok.val; -import org.deeplearning4j.nn.api.ParamInitializer; -import org.deeplearning4j.nn.conf.NeuralNetConfiguration; -import org.deeplearning4j.nn.conf.layers.Layer; +import org.deeplearning4j.nn.api.AbstractParamInitializer; +import org.deeplearning4j.nn.conf.layers.LayerConfiguration; import org.deeplearning4j.nn.conf.layers.recurrent.SimpleRnn; import org.deeplearning4j.nn.weights.IWeightInit; import org.nd4j.linalg.api.ndarray.INDArray; @@ -31,9 +30,8 @@ import org.nd4j.linalg.api.ndarray.INDArray; import java.util.*; import static org.nd4j.linalg.indexing.NDArrayIndex.interval; -import static org.nd4j.linalg.indexing.NDArrayIndex.point; -public class SimpleRnnParamInitializer implements ParamInitializer { +public class SimpleRnnParamInitializer extends AbstractParamInitializer { private static final SimpleRnnParamInitializer INSTANCE = new SimpleRnnParamInitializer(); @@ -51,12 +49,7 @@ public class SimpleRnnParamInitializer implements ParamInitializer { @Override - public long numParams(NeuralNetConfiguration conf) { - return numParams(conf.getLayer()); - } - - @Override - public long numParams(Layer layer) { + public long numParams(LayerConfiguration layer) { SimpleRnn c = (SimpleRnn)layer; val nIn = c.getNIn(); val nOut = c.getNOut(); @@ -64,7 +57,7 @@ public class SimpleRnnParamInitializer implements ParamInitializer { } @Override - public List paramKeys(Layer layer) { + public List paramKeys(LayerConfiguration layer) { final ArrayList keys = new ArrayList<>(3); keys.addAll(weightKeys(layer)); keys.addAll(biasKeys(layer)); @@ -72,7 +65,7 @@ public class SimpleRnnParamInitializer implements ParamInitializer { } @Override - public List weightKeys(Layer layer) { + public List weightKeys(LayerConfiguration layer) { final ArrayList keys = new ArrayList<>(WEIGHT_KEYS); if(hasLayerNorm(layer)){ @@ -83,23 +76,23 @@ public class SimpleRnnParamInitializer implements ParamInitializer { } @Override - public List biasKeys(Layer layer) { + public List biasKeys(LayerConfiguration layer) { return BIAS_KEYS; } @Override - public boolean isWeightParam(Layer layer, String key) { + public boolean isWeightParam(LayerConfiguration layer, String key) { return WEIGHT_KEY.equals(key) || RECURRENT_WEIGHT_KEY.equals(key) || GAIN_KEY.equals(key); } @Override - public boolean isBiasParam(Layer layer, String key) { + public boolean isBiasParam(LayerConfiguration layer, String key) { return BIAS_KEY.equals(key); } @Override - public Map init(NeuralNetConfiguration conf, INDArray paramsView, boolean initializeParams) { - SimpleRnn c = (SimpleRnn)conf.getLayer(); + public Map init(LayerConfiguration conf, INDArray paramsView, boolean initializeParams) { + SimpleRnn c = (SimpleRnn)conf; val nIn = c.getNIn(); val nOut = c.getNOut(); @@ -107,14 +100,14 @@ public class SimpleRnnParamInitializer implements ParamInitializer { if (initializeParams) { m = getSubsets(paramsView, nIn, nOut, false, hasLayerNorm(c)); - INDArray w = c.getWeightInitFn().init(nIn, nOut, new long[]{nIn, nOut}, 'f', m.get(WEIGHT_KEY)); + INDArray w = c.getWeightInit().init(nIn, nOut, new long[]{nIn, nOut}, 'f', m.get(WEIGHT_KEY)); m.put(WEIGHT_KEY, w); IWeightInit rwInit; if (c.getWeightInitFnRecurrent() != null) { rwInit = c.getWeightInitFnRecurrent(); } else { - rwInit = c.getWeightInitFn(); + rwInit = c.getWeightInit(); } INDArray rw = rwInit.init(nOut, nOut, new long[]{nOut, nOut}, 'f', m.get(RECURRENT_WEIGHT_KEY)); @@ -140,8 +133,8 @@ public class SimpleRnnParamInitializer implements ParamInitializer { } @Override - public Map getGradientsFromFlattened(NeuralNetConfiguration conf, INDArray gradientView) { - SimpleRnn c = (SimpleRnn)conf.getLayer(); + public Map getGradientsFromFlattened(LayerConfiguration conf, INDArray gradientView) { + SimpleRnn c = (SimpleRnn)conf; val nIn = c.getNIn(); val nOut = c.getNOut(); @@ -172,7 +165,7 @@ public class SimpleRnnParamInitializer implements ParamInitializer { return m; } - protected boolean hasLayerNorm(Layer layer){ + protected boolean hasLayerNorm(LayerConfiguration layer){ if(layer instanceof SimpleRnn){ return ((SimpleRnn) layer).hasLayerNorm(); } diff --git a/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/nn/params/VariationalAutoencoderParamInitializer.java b/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/nn/params/VariationalAutoencoderParamInitializer.java index 399bf3a47..362c35170 100644 --- a/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/nn/params/VariationalAutoencoderParamInitializer.java +++ b/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/nn/params/VariationalAutoencoderParamInitializer.java @@ -21,8 +21,7 @@ package org.deeplearning4j.nn.params; import lombok.val; -import org.deeplearning4j.nn.conf.NeuralNetConfiguration; -import org.deeplearning4j.nn.conf.layers.Layer; +import org.deeplearning4j.nn.conf.layers.LayerConfiguration; import org.deeplearning4j.nn.conf.layers.variational.VariationalAutoencoder; import org.deeplearning4j.nn.weights.IWeightInit; import org.nd4j.linalg.api.ndarray.INDArray; @@ -71,8 +70,8 @@ public class VariationalAutoencoderParamInitializer extends DefaultParamInitiali @Override - public long numParams(NeuralNetConfiguration conf) { - VariationalAutoencoder layer = (VariationalAutoencoder) conf.getLayer(); + public long numParams(LayerConfiguration conf) { + VariationalAutoencoder layer = (VariationalAutoencoder) conf; val nIn = layer.getNIn(); val nOut = layer.getNOut(); @@ -116,7 +115,7 @@ public class VariationalAutoencoderParamInitializer extends DefaultParamInitiali } @Override - public List paramKeys(Layer l) { + public List paramKeys(LayerConfiguration l) { VariationalAutoencoder layer = (VariationalAutoencoder) l; int[] encoderLayerSizes = layer.getEncoderLayerSizes(); int[] decoderLayerSizes = layer.getDecoderLayerSizes(); @@ -154,7 +153,7 @@ public class VariationalAutoencoderParamInitializer extends DefaultParamInitiali } @Override - public List weightKeys(Layer layer) { + public List weightKeys(LayerConfiguration layer) { List out = new ArrayList<>(); for(String s : paramKeys(layer)){ if(isWeightParam(layer, s)){ @@ -165,7 +164,7 @@ public class VariationalAutoencoderParamInitializer extends DefaultParamInitiali } @Override - public List biasKeys(Layer layer) { + public List biasKeys(LayerConfiguration layer) { List out = new ArrayList<>(); for(String s : paramKeys(layer)){ if(isBiasParam(layer, s)){ @@ -176,31 +175,31 @@ public class VariationalAutoencoderParamInitializer extends DefaultParamInitiali } @Override - public boolean isWeightParam(Layer layer, String key) { + public boolean isWeightParam(LayerConfiguration layer, String key) { return key.endsWith(WEIGHT_KEY_SUFFIX); } @Override - public boolean isBiasParam(Layer layer, String key) { + public boolean isBiasParam(LayerConfiguration layer, String key) { return key.endsWith(BIAS_KEY_SUFFIX); } @Override - public Map init(NeuralNetConfiguration conf, INDArray paramsView, boolean initializeParams) { + public Map init(LayerConfiguration conf, INDArray paramsView, boolean initializeParams) { if (paramsView.length() != numParams(conf)) { throw new IllegalArgumentException("Incorrect paramsView length: Expected length " + numParams(conf) + ", got length " + paramsView.length()); } Map ret = new LinkedHashMap<>(); - VariationalAutoencoder layer = (VariationalAutoencoder) conf.getLayer(); + VariationalAutoencoder layer = (VariationalAutoencoder) conf; val nIn = layer.getNIn(); val nOut = layer.getNOut(); int[] encoderLayerSizes = layer.getEncoderLayerSizes(); int[] decoderLayerSizes = layer.getDecoderLayerSizes(); - IWeightInit weightInit = layer.getWeightInitFn(); + IWeightInit weightInit = layer.getWeightInit(); int soFar = 0; for (int i = 0; i < encoderLayerSizes.length; i++) { @@ -316,9 +315,9 @@ public class VariationalAutoencoderParamInitializer extends DefaultParamInitiali } @Override - public Map getGradientsFromFlattened(NeuralNetConfiguration conf, INDArray gradientView) { + public Map getGradientsFromFlattened(LayerConfiguration conf, INDArray gradientView) { Map ret = new LinkedHashMap<>(); - VariationalAutoencoder layer = (VariationalAutoencoder) conf.getLayer(); + VariationalAutoencoder layer = (VariationalAutoencoder) conf; val nIn = layer.getNIn(); val nOut = layer.getNOut(); diff --git a/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/nn/params/WrapperLayerParamInitializer.java b/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/nn/params/WrapperLayerParamInitializer.java index 234226eb4..5744e70ad 100644 --- a/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/nn/params/WrapperLayerParamInitializer.java +++ b/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/nn/params/WrapperLayerParamInitializer.java @@ -20,16 +20,15 @@ package org.deeplearning4j.nn.params; -import org.deeplearning4j.nn.api.ParamInitializer; -import org.deeplearning4j.nn.conf.NeuralNetConfiguration; -import org.deeplearning4j.nn.conf.layers.Layer; -import org.deeplearning4j.nn.conf.layers.wrapper.BaseWrapperLayer; +import org.deeplearning4j.nn.api.AbstractParamInitializer; +import org.deeplearning4j.nn.conf.layers.LayerConfiguration; +import org.deeplearning4j.nn.conf.layers.wrapper.BaseWrapperLayerConfiguration; import org.nd4j.linalg.api.ndarray.INDArray; import java.util.List; import java.util.Map; -public class WrapperLayerParamInitializer implements ParamInitializer { +public class WrapperLayerParamInitializer extends AbstractParamInitializer { private static final WrapperLayerParamInitializer INSTANCE = new WrapperLayerParamInitializer(); @@ -42,69 +41,64 @@ public class WrapperLayerParamInitializer implements ParamInitializer { } @Override - public long numParams(NeuralNetConfiguration conf) { - return numParams(conf.getLayer()); - } - - @Override - public long numParams(Layer layer) { - Layer l = underlying(layer); + public long numParams(LayerConfiguration layer) { + LayerConfiguration l = underlying(layer); return l.initializer().numParams(l); } @Override - public List paramKeys(Layer layer) { - Layer l = underlying(layer); + public List paramKeys(LayerConfiguration layer) { + LayerConfiguration l = underlying(layer); return l.initializer().paramKeys(l); } @Override - public List weightKeys(Layer layer) { - Layer l = underlying(layer); + public List weightKeys(LayerConfiguration layer) { + LayerConfiguration l = underlying(layer); return l.initializer().weightKeys(l); } @Override - public List biasKeys(Layer layer) { - Layer l = underlying(layer); + public List biasKeys(LayerConfiguration layer) { + LayerConfiguration l = underlying(layer); return l.initializer().biasKeys(l); } @Override - public boolean isWeightParam(Layer layer, String key) { - Layer l = underlying(layer); + public boolean isWeightParam(LayerConfiguration layer, String key) { + LayerConfiguration l = underlying(layer); return l.initializer().isWeightParam(layer, key); } @Override - public boolean isBiasParam(Layer layer, String key) { - Layer l = underlying(layer); + public boolean isBiasParam(LayerConfiguration layer, String key) { + LayerConfiguration l = underlying(layer); return l.initializer().isBiasParam(layer, key); } @Override - public Map init(NeuralNetConfiguration conf, INDArray paramsView, boolean initializeParams) { - Layer orig = conf.getLayer(); - Layer l = underlying(conf.getLayer()); - conf.setLayer(l); + public Map init(LayerConfiguration conf, INDArray paramsView, boolean initializeParams) { + LayerConfiguration orig = conf; + LayerConfiguration l = underlying(conf); + Map m = l.initializer().init(conf, paramsView, initializeParams); - conf.setLayer(orig); + return m; } @Override - public Map getGradientsFromFlattened(NeuralNetConfiguration conf, INDArray gradientView) { - Layer orig = conf.getLayer(); - Layer l = underlying(conf.getLayer()); - conf.setLayer(l); + public Map getGradientsFromFlattened(LayerConfiguration conf, INDArray gradientView) { + LayerConfiguration orig = conf; + LayerConfiguration l = underlying(conf); + Map m = l.initializer().getGradientsFromFlattened(conf, gradientView); - conf.setLayer(orig); + return m; } - private Layer underlying(Layer layer){ - while (layer instanceof BaseWrapperLayer) { - layer = ((BaseWrapperLayer)layer).getUnderlying(); + private LayerConfiguration underlying(LayerConfiguration layer){ + while (layer instanceof BaseWrapperLayerConfiguration) { + layer = ((BaseWrapperLayerConfiguration)layer).getUnderlying(); } return layer; } diff --git a/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/nn/transferlearning/FineTuneConfiguration.java b/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/nn/transferlearning/FineTuneConfiguration.java index 3f2ddd88b..b62e77e83 100644 --- a/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/nn/transferlearning/FineTuneConfiguration.java +++ b/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/nn/transferlearning/FineTuneConfiguration.java @@ -20,23 +20,40 @@ package org.deeplearning4j.nn.transferlearning; +import com.fasterxml.jackson.annotation.JsonInclude; +import com.fasterxml.jackson.annotation.JsonTypeInfo; +import com.fasterxml.jackson.core.JsonProcessingException; +import java.io.IOException; +import java.util.ArrayList; +import java.util.List; import lombok.AllArgsConstructor; import lombok.Data; import lombok.NoArgsConstructor; import lombok.ToString; import org.deeplearning4j.nn.api.OptimizationAlgorithm; import org.deeplearning4j.nn.api.layers.LayerConstraint; -import org.deeplearning4j.nn.conf.*; +import org.deeplearning4j.nn.conf.BackpropType; +import org.deeplearning4j.nn.conf.ComputationGraphConfiguration; +import org.deeplearning4j.nn.conf.ConvolutionMode; +import org.deeplearning4j.nn.conf.GradientNormalization; +import org.deeplearning4j.nn.conf.NeuralNetConfiguration; +import org.deeplearning4j.nn.conf.Updater; +import org.deeplearning4j.nn.conf.WorkspaceMode; import org.deeplearning4j.nn.conf.distribution.Distribution; import org.deeplearning4j.nn.conf.dropout.Dropout; import org.deeplearning4j.nn.conf.dropout.IDropout; -import org.deeplearning4j.nn.conf.layers.*; +import org.deeplearning4j.nn.conf.layers.BaseLayerConfiguration; +import org.deeplearning4j.nn.conf.layers.ConvolutionLayer; +import org.deeplearning4j.nn.conf.layers.LayerConfiguration; +import org.deeplearning4j.nn.conf.layers.LayerValidation; +import org.deeplearning4j.nn.conf.layers.SubsamplingLayer; import org.deeplearning4j.nn.conf.stepfunctions.StepFunction; import org.deeplearning4j.nn.conf.weightnoise.IWeightNoise; import org.deeplearning4j.nn.weights.IWeightInit; import org.deeplearning4j.nn.weights.WeightInit; import org.deeplearning4j.nn.weights.WeightInitDistribution; import org.deeplearning4j.util.NetworkUtils; +import org.nd4j.common.primitives.Optional; import org.nd4j.linalg.activations.Activation; import org.nd4j.linalg.activations.IActivation; import org.nd4j.linalg.learning.config.IUpdater; @@ -44,14 +61,6 @@ import org.nd4j.linalg.learning.regularization.L1Regularization; import org.nd4j.linalg.learning.regularization.L2Regularization; import org.nd4j.linalg.learning.regularization.Regularization; import org.nd4j.linalg.learning.regularization.WeightDecay; -import org.nd4j.common.primitives.Optional; -import com.fasterxml.jackson.annotation.JsonInclude; -import com.fasterxml.jackson.annotation.JsonTypeInfo; -import com.fasterxml.jackson.core.JsonProcessingException; - -import java.io.IOException; -import java.util.ArrayList; -import java.util.List; @JsonTypeInfo(use = JsonTypeInfo.Id.CLASS, include = JsonTypeInfo.As.PROPERTY, property = "type") @JsonInclude(JsonInclude.Include.NON_NULL) @@ -60,738 +69,794 @@ import java.util.List; @Data public class FineTuneConfiguration { - protected IActivation activationFn; - protected IWeightInit weightInitFn; - protected Double biasInit; - protected List regularization; - protected List regularizationBias; + protected IActivation activationFn; + protected IWeightInit weightInitFn; + protected Double biasInit; + protected List regularization; + protected List regularizationBias; + protected boolean removeL2 = false; //For: .l2(0.0) -> user means "no l2" so we should remove it if it is present in the original model... + protected boolean removeL2Bias = false; + protected boolean removeL1 = false; + protected boolean removeL1Bias = false; + protected boolean removeWD = false; + protected boolean removeWDBias = false; + protected Optional dropout; + protected Optional weightNoise; + protected IUpdater updater; + protected IUpdater biasUpdater; + protected Boolean miniBatch; + protected Integer maxNumLineSearchIterations; + protected Long seed; + protected OptimizationAlgorithm optimizationAlgo; + protected StepFunction stepFunction; + protected Boolean minimize; + protected Optional gradientNormalization; + protected Double gradientNormalizationThreshold; + protected ConvolutionMode convolutionMode; + protected ConvolutionLayer.AlgoMode cudnnAlgoMode; + protected Optional> constraints; + + protected Boolean pretrain; + protected Boolean backprop; + protected BackpropType backpropType; + protected Integer tbpttFwdLength; + protected Integer tbpttBackLength; + + protected WorkspaceMode trainingWorkspaceMode; + protected WorkspaceMode inferenceWorkspaceMode; + + public static Builder builder() { + return new Builder(); + } + + private static T get(Optional optional) { + if (optional == null) { + return null; + } + return optional.orElse(null); + } + + public static FineTuneConfiguration fromJson(String json) { + try { + return NeuralNetConfiguration.mapper().readValue(json, FineTuneConfiguration.class); + } catch (IOException e) { + throw new RuntimeException(e); + } + } + + public static FineTuneConfiguration fromYaml(String yaml) { + try { + return NeuralNetConfiguration.mapperYaml().readValue(yaml, FineTuneConfiguration.class); + } catch (IOException e) { + throw new RuntimeException(e); + } + } + + /** + * public NeuralNetConfiguration appliedNeuralNetConfiguration(NeuralNetConfiguration nnc) { + * applyToNeuralNetConfiguration(nnc); nnc = new + * NeuralNetConfiguration.NeuralNetConfigurationBuilder(nnc.clone()).build(); return nnc; } + **/ + + public void applyToLayerConfiguration(LayerConfiguration layerConfiguration) { + + Updater originalUpdater = null; + WeightInit origWeightInit = null; + + if (layerConfiguration != null) { + //As per NeuralNetConfiguration.configureLayer and LayerValidation.configureBaseLayer: only copy dropout to base layers + // this excludes things like subsampling and activation layers + if (dropout != null && layerConfiguration instanceof BaseLayerConfiguration) { + IDropout d = dropout.orElse(null); + if (d != null) { + d = d.clone(); //Clone to avoid shared state between layers + } + layerConfiguration.setIDropout(d); + } + if (constraints != null) { + layerConfiguration.setConstraints(constraints.orElse(null)); + } + } + + if (layerConfiguration != null && layerConfiguration instanceof BaseLayerConfiguration) { + BaseLayerConfiguration bl = (BaseLayerConfiguration) layerConfiguration; + if (activationFn != null) { + bl.setActivationFn(activationFn); + } + if (weightInitFn != null) { + bl.setWeightInit(weightInitFn); + } + if (biasInit != null) { + bl.setBiasInit(biasInit); + } + if (regularization != null && !regularization.isEmpty()) { + bl.setRegularization(regularization); + } + if (regularizationBias != null && !regularizationBias.isEmpty()) { + bl.setRegularizationBias(regularizationBias); + } + if (removeL2) { + NetworkUtils.removeInstances(bl.getRegularization(), L2Regularization.class); + } + if (removeL2Bias) { + NetworkUtils.removeInstances(bl.getRegularizationBias(), L2Regularization.class); + } + if (removeL1) { + NetworkUtils.removeInstances(bl.getRegularization(), L1Regularization.class); + } + if (removeL1Bias) { + NetworkUtils.removeInstances(bl.getRegularizationBias(), L1Regularization.class); + } + if (removeWD) { + NetworkUtils.removeInstances(bl.getRegularization(), WeightDecay.class); + } + if (removeWDBias) { + NetworkUtils.removeInstances(bl.getRegularizationBias(), WeightDecay.class); + } + if (gradientNormalization != null) { + bl.setGradientNormalization(gradientNormalization.orElse(null)); + } + if (gradientNormalizationThreshold != null) { + bl.setGradientNormalizationThreshold(gradientNormalizationThreshold); + } + if (updater != null) { + bl.setIUpdater(updater); + } + if (biasUpdater != null) { + bl.setBiasUpdater(biasUpdater); + } + if (weightNoise != null) { + bl.setWeightNoise(weightNoise.orElse(null)); + } + } + NeuralNetConfiguration nnc = layerConfiguration.getNetConfiguration(); + if (miniBatch != null) { + nnc.setMiniBatch(miniBatch); + } + if (maxNumLineSearchIterations != null) { + nnc.setMaxNumLineSearchIterations(maxNumLineSearchIterations); + } + if (seed != null) { + nnc.setSeed(seed); + } + if (optimizationAlgo != null) { + nnc.setOptimizationAlgo(optimizationAlgo); + } + if (stepFunction != null) { + nnc.setStepFunction(stepFunction); + } + if (minimize != null) { + nnc.setMinimize(minimize); + } + + if (convolutionMode != null && layerConfiguration instanceof ConvolutionLayer) { + ((ConvolutionLayer) layerConfiguration).setConvolutionMode(convolutionMode); + } + if (cudnnAlgoMode != null && layerConfiguration instanceof ConvolutionLayer) { + ((ConvolutionLayer) layerConfiguration).setCudnnAlgoMode(cudnnAlgoMode); + } + if (convolutionMode != null && layerConfiguration instanceof SubsamplingLayer) { + ((SubsamplingLayer) layerConfiguration).setConvolutionMode(convolutionMode); + } + + //Perform validation + if (layerConfiguration != null) { + LayerValidation.generalValidation(layerConfiguration.getLayerName(), layerConfiguration, get(dropout), regularization, + regularizationBias, + get(constraints), null, null); + } + } + + + public void applyToComputationGraphConfiguration(ComputationGraphConfiguration conf) { + if (backpropType != null) { + conf.setBackpropType(backpropType); + } + if (tbpttFwdLength != null) { + conf.setTbpttFwdLength(tbpttFwdLength); + } + if (tbpttBackLength != null) { + conf.setTbpttBackLength(tbpttBackLength); + } + } + + public NeuralNetConfiguration appliedNeuralNetConfigurationBuilder() { + NeuralNetConfiguration.NeuralNetConfigurationBuilder confBuilder = NeuralNetConfiguration.builder(); + + if (activationFn != null) { + confBuilder.activation(activationFn); + } + if (weightInitFn != null) { + confBuilder.weightInit(weightInitFn); + } + if (biasInit != null) { + confBuilder.biasInit(biasInit); + } + if (regularization != null) { + confBuilder.regularization(regularization); + } + if (regularizationBias != null) { + confBuilder.regularizationBias(regularizationBias); + } + if (dropout != null) { + confBuilder.idropOut(dropout.orElse(null)); + } + if (updater != null) { + confBuilder.updater(updater); + } + if (biasUpdater != null) { + confBuilder.biasUpdater(biasUpdater); + } + if (miniBatch != null) { + confBuilder.miniBatch(miniBatch); + } + if (maxNumLineSearchIterations != null) { + confBuilder.maxNumLineSearchIterations(maxNumLineSearchIterations); + } + if (seed != null) { + confBuilder.seed(seed); + } + if (optimizationAlgo != null) { + confBuilder.optimizationAlgo(optimizationAlgo); + } + if (stepFunction != null) { + confBuilder.stepFunction(stepFunction); + } + if (minimize != null) { + confBuilder.minimize(minimize); + } + if (gradientNormalization != null) { + confBuilder.gradientNormalization(gradientNormalization.orElse(null)); + } + if (gradientNormalizationThreshold != null) { + confBuilder.gradientNormalizationThreshold(gradientNormalizationThreshold); + } + if (trainingWorkspaceMode != null) { + confBuilder.trainingWorkspaceMode(trainingWorkspaceMode); + } + if (inferenceWorkspaceMode != null) { + confBuilder.inferenceWorkspaceMode(inferenceWorkspaceMode); + } + return confBuilder.build(); + } + + public String toJson() { + try { + return NeuralNetConfiguration.mapper().writeValueAsString(this); + } catch (JsonProcessingException e) { + throw new RuntimeException(e); + } + } + + public String toYaml() { + try { + return NeuralNetConfiguration.mapperYaml().writeValueAsString(this); + } catch (JsonProcessingException e) { + throw new RuntimeException(e); + } + } + + /* + * Can't use Lombok @Builder annotation due to optionals (otherwise we have a bunch of ugly .x(Optional value) + * methods - lombok builder doesn't support excluding fields? :( + * Note the use of optional here: gives us 3 states... + * 1. Null: not set + * 2. Optional (empty): set to null + * 3. Optional (not empty): set to specific value + * + * Obviously, having null only makes sense for some things (dropout, etc) whereas null for other things doesn't + * make sense + */ + @ToString + public static class Builder { + + protected List regularization = new ArrayList<>(); + protected List regularizationBias = new ArrayList<>(); protected boolean removeL2 = false; //For: .l2(0.0) -> user means "no l2" so we should remove it if it is present in the original model... protected boolean removeL2Bias = false; protected boolean removeL1 = false; protected boolean removeL1Bias = false; protected boolean removeWD = false; protected boolean removeWDBias = false; - protected Optional dropout; - protected Optional weightNoise; - protected IUpdater updater; - protected IUpdater biasUpdater; - protected Boolean miniBatch; - protected Integer maxNumLineSearchIterations; - protected Long seed; - protected OptimizationAlgorithm optimizationAlgo; - protected StepFunction stepFunction; - protected Boolean minimize; - protected Optional gradientNormalization; - protected Double gradientNormalizationThreshold; - protected ConvolutionMode convolutionMode; - protected ConvolutionLayer.AlgoMode cudnnAlgoMode; - protected Optional> constraints; + private IActivation activation; + private IWeightInit weightInitFn; + private Double biasInit; + private Optional dropout; + private Optional weightNoise; + private IUpdater updater; + private IUpdater biasUpdater; + private Boolean miniBatch; + private Integer maxNumLineSearchIterations; + private Long seed; + private OptimizationAlgorithm optimizationAlgo; + private StepFunction stepFunction; + private Boolean minimize; + private Optional gradientNormalization; + private Double gradientNormalizationThreshold; + private ConvolutionMode convolutionMode; + private ConvolutionLayer.AlgoMode cudnnAlgoMode; + private Optional> constraints; + private Boolean pretrain; + private Boolean backprop; + private BackpropType backpropType; + private Integer tbpttFwdLength; + private Integer tbpttBackLength; + private WorkspaceMode trainingWorkspaceMode; + private WorkspaceMode inferenceWorkspaceMode; - protected Boolean pretrain; - protected Boolean backprop; - protected BackpropType backpropType; - protected Integer tbpttFwdLength; - protected Integer tbpttBackLength; + public Builder() { - protected WorkspaceMode trainingWorkspaceMode; - protected WorkspaceMode inferenceWorkspaceMode; - - public static Builder builder() { - return new Builder(); } - /* - * Can't use Lombok @Builder annotation due to optionals (otherwise we have a bunch of ugly .x(Optional value) - * methods - lombok builder doesn't support excluding fields? :( - * Note the use of optional here: gives us 3 states... - * 1. Null: not set - * 2. Optional (empty): set to null - * 3. Optional (not empty): set to specific value - * - * Obviously, having null only makes sense for some things (dropout, etc) whereas null for other things doesn't - * make sense + /** + * Activation function / neuron non-linearity */ - @ToString - public static class Builder { - private IActivation activation; - private IWeightInit weightInitFn; - private Double biasInit; - protected List regularization = new ArrayList<>(); - protected List regularizationBias = new ArrayList<>(); - protected boolean removeL2 = false; //For: .l2(0.0) -> user means "no l2" so we should remove it if it is present in the original model... - protected boolean removeL2Bias = false; - protected boolean removeL1 = false; - protected boolean removeL1Bias = false; - protected boolean removeWD = false; - protected boolean removeWDBias = false; - private Optional dropout; - private Optional weightNoise; - private IUpdater updater; - private IUpdater biasUpdater; - private Boolean miniBatch; - private Integer maxNumLineSearchIterations; - private Long seed; - private OptimizationAlgorithm optimizationAlgo; - private StepFunction stepFunction; - private Boolean minimize; - private Optional gradientNormalization; - private Double gradientNormalizationThreshold; - private ConvolutionMode convolutionMode; - private ConvolutionLayer.AlgoMode cudnnAlgoMode; - private Optional> constraints; - private Boolean pretrain; - private Boolean backprop; - private BackpropType backpropType; - private Integer tbpttFwdLength; - private Integer tbpttBackLength; - private WorkspaceMode trainingWorkspaceMode; - private WorkspaceMode inferenceWorkspaceMode; + public Builder activation(IActivation activationFn) { + this.activation = activationFn; + return this; + } - public Builder() { + /** + * Activation function / neuron non-linearity + */ + public Builder activation(Activation activation) { + this.activation = activation.getActivationFunction(); + return this; + } - } + /** + * Weight initialization scheme to use, for initial weight values + * + * @see IWeightInit + */ + public Builder weightInit(IWeightInit weightInit) { + this.weightInitFn = weightInit; + return this; + } - /** - * Activation function / neuron non-linearity - */ - public Builder activation(IActivation activationFn) { - this.activation = activationFn; - return this; - } + /** + * Weight initialization scheme to use, for initial weight values + * + * @see WeightInit + */ + public Builder weightInit(WeightInit weightInit) { + if (weightInit == WeightInit.DISTRIBUTION) { + throw new UnsupportedOperationException( + "Not supported!, User weightInit(Distribution distribution) instead!"); + } - /** - * Activation function / neuron non-linearity - */ - public Builder activation(Activation activation) { - this.activation = activation.getActivationFunction(); - return this; - } - - /** - * Weight initialization scheme to use, for initial weight values - * - * @see IWeightInit - */ - public Builder weightInit(IWeightInit weightInit) { - this.weightInitFn = weightInit; - return this; - } - - /** - * Weight initialization scheme to use, for initial weight values - * - * @see WeightInit - */ - public Builder weightInit(WeightInit weightInit) { - if(weightInit == WeightInit.DISTRIBUTION) { - throw new UnsupportedOperationException("Not supported!, User weightInit(Distribution distribution) instead!"); - } - - this.weightInitFn = weightInit.getWeightInitFunction(); - return this; - } - - - /** - * Set weight initialization scheme to random sampling via the specified distribution. - * Equivalent to: {@code .weightInit(new WeightInitDistribution(distribution))} - * - * @param distribution Distribution to use for weight initialization - */ - public Builder weightInit(Distribution distribution){ - return weightInit(new WeightInitDistribution(distribution)); - } - - /** - * Constant for bias initialization. Default: 0.0 - * - * @param biasInit Constant for bias initialization - */ - public Builder biasInit(double biasInit) { - this.biasInit = biasInit; - return this; - } - - /** - * Distribution to sample initial weights from. - * Equivalent to: {@code .weightInit(new WeightInitDistribution(distribution))} - */ - @Deprecated - public Builder dist(Distribution dist) { - return weightInit(dist); - } - - /** - * L1 regularization coefficient for the weights (excluding biases) - */ - public Builder l1(double l1) { - NetworkUtils.removeInstances(regularization, L1Regularization.class); - if(l1 > 0.0) { - regularization.add(new L1Regularization(l1)); - } - return this; - } - - /** - * L2 regularization coefficient for the weights (excluding biases)
- * Note: Generally, {@link WeightDecay} (set via {@link #weightDecay(double,boolean)} should be preferred to - * L2 regularization. See {@link WeightDecay} javadoc for further details.
- */ - public Builder l2(double l2) { - NetworkUtils.removeInstances(regularization, L2Regularization.class); - if(l2 > 0.0) { - NetworkUtils.removeInstancesWithWarning(regularization, WeightDecay.class, "WeightDecay regularization removed: incompatible with added L2 regularization"); - regularization.add(new L2Regularization(l2)); - } else { - removeL2 = true; - } - return this; - } - - /** - * L1 regularization coefficient for the bias parameters - */ - public Builder l1Bias(double l1Bias) { - NetworkUtils.removeInstances(regularizationBias, L1Regularization.class); - if(l1Bias > 0.0) { - regularizationBias.add(new L1Regularization(l1Bias)); - } else { - removeL1Bias = true; - } - return this; - } - - /** - * L2 regularization coefficient for the bias parameters
- * Note: Generally, {@link WeightDecay} (set via {@link #weightDecayBias(double,boolean)} should be preferred to - * L2 regularization. See {@link WeightDecay} javadoc for further details.
- */ - public Builder l2Bias(double l2Bias) { - NetworkUtils.removeInstances(regularizationBias, L2Regularization.class); - if(l2Bias > 0.0) { - NetworkUtils.removeInstancesWithWarning(regularizationBias, WeightDecay.class, "WeightDecay bias regularization removed: incompatible with added L2 regularization"); - regularizationBias.add(new L2Regularization(l2Bias)); - } else { - removeL2Bias = true; - } - return this; - } - - /** - * Add weight decay regularization for the network parameters (excluding biases).
- * This applies weight decay with multiplying the learning rate - see {@link WeightDecay} for more details.
- * - * @param coefficient Weight decay regularization coefficient - * @see #weightDecay(double, boolean) - */ - public Builder weightDecay(double coefficient) { - return weightDecay(coefficient, true); - } - - /** - * Add weight decay regularization for the network parameters (excluding biases). See {@link WeightDecay} for more details.
- * - * @param coefficient Weight decay regularization coefficient - * @param applyLR Whether the learning rate should be multiplied in when performing weight decay updates. See {@link WeightDecay} for more details. - * @see #weightDecay(double, boolean) - */ - public Builder weightDecay(double coefficient, boolean applyLR) { - //Check if existing weight decay if it exists; if so, replace it. Also remove L2 - it doesn't make sense to use both - NetworkUtils.removeInstances(this.regularization, WeightDecay.class); - if(coefficient > 0.0) { - NetworkUtils.removeInstancesWithWarning(this.regularization, L2Regularization.class, "L2 regularization removed: incompatible with added WeightDecay regularization"); - this.regularization.add(new WeightDecay(coefficient, applyLR)); - } else { - removeWD = true; - } - return this; - } - - /** - * Weight decay for the biases only - see {@link #weightDecay(double)} for more details. - * This applies weight decay with multiplying the learning rate.
- * - * @param coefficient Weight decay regularization coefficient - * @see #weightDecayBias(double, boolean) - */ - public Builder weightDecayBias(double coefficient) { - return weightDecayBias(coefficient, true); - } - - /** - * Weight decay for the biases only - see {@link #weightDecay(double)} for more details
- * - * @param coefficient Weight decay regularization coefficient - */ - public Builder weightDecayBias(double coefficient, boolean applyLR) { - //Check if existing weight decay if it exists; if so, replace it. Also remove L2 - it doesn't make sense to use both - NetworkUtils.removeInstances(this.regularizationBias, WeightDecay.class); - if(coefficient > 0) { - NetworkUtils.removeInstancesWithWarning(this.regularizationBias, L2Regularization.class, "L2 bias regularization removed: incompatible with added WeightDecay regularization"); - this.regularizationBias.add(new WeightDecay(coefficient, applyLR)); - } else { - removeWDBias = true; - } - return this; - } - - /** - * Set the dropout - * - * @param dropout Dropout, such as {@link Dropout}, {@link org.deeplearning4j.nn.conf.dropout.GaussianDropout}, - * {@link org.deeplearning4j.nn.conf.dropout.GaussianNoise} etc - */ - public Builder dropout(IDropout dropout) { - this.dropout = Optional.ofNullable(dropout); - return this; - } - - /** - * Dropout probability. This is the probability of retaining each input activation value for a layer. - * dropOut(x) will keep an input activation with probability x, and set to 0 with probability 1-x.
- * dropOut(0.0) is a special value / special case - when set to 0.0., dropout is disabled (not applied). Note - * that a dropout value of 1.0 is functionally equivalent to no dropout: i.e., 100% probability of retaining - * each input activation.
- *

- * Note 1: Dropout is applied at training time only - and is automatically not applied at test time - * (for evaluation, etc)
- * Note 2: This sets the probability per-layer. Care should be taken when setting lower values for - * complex networks (too much information may be lost with aggressive (very low) dropout values).
- * Note 3: Frequently, dropout is not applied to (or, has higher retain probability for) input (first layer) - * layers. Dropout is also often not applied to output layers. This needs to be handled MANUALLY by the user - * - set .dropout(0) on those layers when using global dropout setting.
- * Note 4: Implementation detail (most users can ignore): DL4J uses inverted dropout, as described here: - * http://cs231n.github.io/neural-networks-2/ - *

- * - * @param inputRetainProbability Dropout probability (probability of retaining each input activation value for a layer) - * @see #dropout(IDropout) - */ - public Builder dropOut(double inputRetainProbability){ - if(inputRetainProbability == 0.0){ - return dropout(null); - } - return dropout(new Dropout(inputRetainProbability)); - } - - /** - * Set the weight noise (such as {@link org.deeplearning4j.nn.conf.weightnoise.DropConnect} and - * {@link org.deeplearning4j.nn.conf.weightnoise.WeightNoise}) - * - * @param weightNoise Weight noise instance to use - */ - public Builder weightNoise(IWeightNoise weightNoise) { - this.weightNoise = Optional.ofNullable(weightNoise); - return this; - } - - /** - * Gradient updater configuration. For example, {@link org.nd4j.linalg.learning.config.Adam} - * or {@link org.nd4j.linalg.learning.config.Nesterovs} - * - * @param updater Updater to use - */ - public Builder updater(IUpdater updater) { - this.updater = updater; - return this; - } - - /** - * @deprecated Use {@link #updater(IUpdater)} - */ - @Deprecated - public Builder updater(Updater updater) { - return updater(updater.getIUpdaterWithDefaultConfig()); - } - - /** - * Gradient updater configuration, for the biases only. If not set, biases will use the updater as - * set by {@link #updater(IUpdater)} - * - * @param biasUpdater Updater to use for bias parameters - */ - public Builder biasUpdater(IUpdater biasUpdater) { - this.biasUpdater = biasUpdater; - return this; - } - - /** - * Whether scores and gradients should be divided by the minibatch size.
- * Most users should leave this ast he default value of true. - */ - public Builder miniBatch(boolean miniBatch) { - this.miniBatch = miniBatch; - return this; - } - - public Builder maxNumLineSearchIterations(int maxNumLineSearchIterations) { - this.maxNumLineSearchIterations = maxNumLineSearchIterations; - return this; - } - - /** - * RNG seed for reproducibility - * @param seed RNG seed to use - */ - public Builder seed(long seed) { - this.seed = seed; - return this; - } - - /** - * RNG seed for reproducibility - * @param seed RNG seed to use - */ - public Builder seed(int seed){ - return seed((long)seed); - } - - public Builder optimizationAlgo(OptimizationAlgorithm optimizationAlgo) { - this.optimizationAlgo = optimizationAlgo; - return this; - } - - public Builder stepFunction(StepFunction stepFunction) { - this.stepFunction = stepFunction; - return this; - } - - public Builder minimize(boolean minimize) { - this.minimize = minimize; - return this; - } - - /** - * Gradient normalization strategy. Used to specify gradient renormalization, gradient clipping etc. - * See {@link GradientNormalization} for details - * - * @param gradientNormalization Type of normalization to use. Defaults to None. - * @see GradientNormalization - */ - public Builder gradientNormalization(GradientNormalization gradientNormalization) { - this.gradientNormalization = Optional.ofNullable(gradientNormalization); - return this; - } - - /** - * Threshold for gradient normalization, only used for GradientNormalization.ClipL2PerLayer, - * GradientNormalization.ClipL2PerParamType, and GradientNormalization.ClipElementWiseAbsoluteValue
- * Not used otherwise.
- * L2 threshold for first two types of clipping, or absolute value threshold for last type of clipping - */ - public Builder gradientNormalizationThreshold(double gradientNormalizationThreshold) { - this.gradientNormalizationThreshold = gradientNormalizationThreshold; - return this; - } - - /** - * Sets the convolution mode for convolutional layers, which impacts padding and output sizes. - * See {@link ConvolutionMode} for details. Defaults to ConvolutionMode.TRUNCATE
- * @param convolutionMode Convolution mode to use - */ - public Builder convolutionMode(ConvolutionMode convolutionMode) { - this.convolutionMode = convolutionMode; - return this; - } - - /** - * Sets the cuDNN algo mode for convolutional layers, which impacts performance and memory usage of cuDNN. - * See {@link ConvolutionLayer.AlgoMode} for details. Defaults to "PREFER_FASTEST", but "NO_WORKSPACE" uses less memory. - */ - public Builder cudnnAlgoMode(ConvolutionLayer.AlgoMode cudnnAlgoMode) { - this.cudnnAlgoMode = cudnnAlgoMode; - return this; - } - - /** - * Set constraints to be applied to all layers. Default: no constraints.
- * Constraints can be used to enforce certain conditions (non-negativity of parameters, max-norm regularization, - * etc). These constraints are applied at each iteration, after the parameters have been updated. - * - * @param constraints Constraints to apply to all parameters of all layers - */ - public Builder constraints(List constraints) { - this.constraints = Optional.ofNullable(constraints); - return this; - } - - public Builder pretrain(boolean pretrain) { - this.pretrain = pretrain; - return this; - } - - public Builder backprop(boolean backprop) { - this.backprop = backprop; - return this; - } - - /** - * The type of backprop. Default setting is used for most networks (MLP, CNN etc), - * but optionally truncated BPTT can be used for training recurrent neural networks. - * If using TruncatedBPTT make sure you set both tBPTTForwardLength() and tBPTTBackwardLength() - * - * @param backpropType Type of backprop. Default: BackpropType.Standard - */ - public Builder backpropType(BackpropType backpropType) { - this.backpropType = backpropType; - return this; - } - - /** - * When doing truncated BPTT: how many steps of forward pass should we do - * before doing (truncated) backprop?
- * Only applicable when doing backpropType(BackpropType.TruncatedBPTT)
- * Typically tBPTTForwardLength parameter is same as the tBPTTBackwardLength parameter, - * but may be larger than it in some circumstances (but never smaller)
- * Ideally your training data time series length should be divisible by this - * This is the k1 parameter on pg23 of - * http://www.cs.utoronto.ca/~ilya/pubs/ilya_sutskever_phd_thesis.pdf - * - * @param tbpttFwdLength Forward length > 0, >= backwardLength - */ - public Builder tbpttFwdLength(int tbpttFwdLength) { - this.tbpttFwdLength = tbpttFwdLength; - return this; - } - - /** - * When doing truncated BPTT: how many steps of backward should we do?
- * Only applicable when doing backpropType(BackpropType.TruncatedBPTT)
- * This is the k2 parameter on pg23 of - * http://www.cs.utoronto.ca/~ilya/pubs/ilya_sutskever_phd_thesis.pdf - * - * @param tbpttBackLength <= forwardLength - */ - public Builder tbpttBackLength(int tbpttBackLength) { - this.tbpttBackLength = tbpttBackLength; - return this; - } - - /** - * This method defines Workspace mode being used during training: - * NONE: workspace won't be used - * ENABLED: workspaces will be used for training (reduced memory and better performance) - * - * @param trainingWorkspaceMode Workspace mode for training - * @return Builder - */ - public Builder trainingWorkspaceMode(WorkspaceMode trainingWorkspaceMode) { - this.trainingWorkspaceMode = trainingWorkspaceMode; - return this; - } - - /** - * This method defines Workspace mode being used during inference:
- * NONE: workspace won't be used
- * ENABLED: workspaces will be used for inference (reduced memory and better performance) - * - * @param inferenceWorkspaceMode Workspace mode for inference - * @return Builder - */ - public Builder inferenceWorkspaceMode(WorkspaceMode inferenceWorkspaceMode) { - this.inferenceWorkspaceMode = inferenceWorkspaceMode; - return this; - } - - public FineTuneConfiguration build() { - return new FineTuneConfiguration(activation, weightInitFn, biasInit, regularization, regularizationBias, - removeL2, removeL2Bias, removeL1, removeL1Bias, removeWD, removeWDBias, dropout, - weightNoise, updater, biasUpdater, miniBatch, maxNumLineSearchIterations, seed, optimizationAlgo, stepFunction, - minimize, gradientNormalization, gradientNormalizationThreshold, convolutionMode, cudnnAlgoMode, constraints, - pretrain, backprop, backpropType, tbpttFwdLength, tbpttBackLength, trainingWorkspaceMode, inferenceWorkspaceMode); - } + this.weightInitFn = weightInit.getWeightInitFunction(); + return this; } - public NeuralNetConfiguration appliedNeuralNetConfiguration(NeuralNetConfiguration nnc) { - applyToNeuralNetConfiguration(nnc); - nnc = new NeuralNetConfiguration.Builder(nnc.clone()).build(); - return nnc; + /** + * Set weight initialization scheme to random sampling via the specified distribution. + * Equivalent to: {@code .weightInit(new WeightInitDistribution(distribution))} + * + * @param distribution Distribution to use for weight initialization + */ + public Builder weightInit(Distribution distribution) { + return weightInit(new WeightInitDistribution(distribution)); } - public void applyToNeuralNetConfiguration(NeuralNetConfiguration nnc) { - - Layer l = nnc.getLayer(); - Updater originalUpdater = null; - WeightInit origWeightInit = null; - - if (l != null) { - //As per NeuralNetConfiguration.configureLayer and LayerValidation.configureBaseLayer: only copy dropout to base layers - // this excludes things like subsampling and activation layers - if (dropout != null && l instanceof BaseLayer) { - IDropout d = dropout.orElse(null); - if(d != null) - d = d.clone(); //Clone to avoid shared state between layers - l.setIDropout(d); - } - if(constraints != null) - l.setConstraints(constraints.orElse(null)); - } - - if (l != null && l instanceof BaseLayer) { - BaseLayer bl = (BaseLayer) l; - if (activationFn != null) - bl.setActivationFn(activationFn); - if (weightInitFn != null) - bl.setWeightInitFn(weightInitFn); - if (biasInit != null) - bl.setBiasInit(biasInit); - if (regularization != null && !regularization.isEmpty()) - bl.setRegularization(regularization); - if (regularizationBias != null && !regularizationBias.isEmpty()) - bl.setRegularizationBias(regularizationBias); - if (removeL2) - NetworkUtils.removeInstances(bl.getRegularization(), L2Regularization.class); - if (removeL2Bias) - NetworkUtils.removeInstances(bl.getRegularizationBias(), L2Regularization.class); - if (removeL1) - NetworkUtils.removeInstances(bl.getRegularization(), L1Regularization.class); - if (removeL1Bias) - NetworkUtils.removeInstances(bl.getRegularizationBias(), L1Regularization.class); - if (removeWD) - NetworkUtils.removeInstances(bl.getRegularization(), WeightDecay.class); - if (removeWDBias) - NetworkUtils.removeInstances(bl.getRegularizationBias(), WeightDecay.class); - if (gradientNormalization != null) - bl.setGradientNormalization(gradientNormalization.orElse(null)); - if (gradientNormalizationThreshold != null) - bl.setGradientNormalizationThreshold(gradientNormalizationThreshold); - if (updater != null){ - bl.setIUpdater(updater); - } - if (biasUpdater != null){ - bl.setBiasUpdater(biasUpdater); - } - if (weightNoise != null){ - bl.setWeightNoise(weightNoise.orElse(null)); - } - } - if (miniBatch != null) - nnc.setMiniBatch(miniBatch); - if (maxNumLineSearchIterations != null) - nnc.setMaxNumLineSearchIterations(maxNumLineSearchIterations); - if (seed != null) - nnc.setSeed(seed); - if (optimizationAlgo != null) - nnc.setOptimizationAlgo(optimizationAlgo); - if (stepFunction != null) - nnc.setStepFunction(stepFunction); - if (minimize != null) - nnc.setMinimize(minimize); - - if (convolutionMode != null && l instanceof ConvolutionLayer) { - ((ConvolutionLayer) l).setConvolutionMode(convolutionMode); - } - if (cudnnAlgoMode != null && l instanceof ConvolutionLayer) { - ((ConvolutionLayer) l).setCudnnAlgoMode(cudnnAlgoMode); - } - if (convolutionMode != null && l instanceof SubsamplingLayer) { - ((SubsamplingLayer) l).setConvolutionMode(convolutionMode); - } - - //Perform validation - if (l != null) { - LayerValidation.generalValidation(l.getLayerName(), l, get(dropout), regularization, regularizationBias, - get(constraints), null, null); - } + /** + * Constant for bias initialization. Default: 0.0 + * + * @param biasInit Constant for bias initialization + */ + public Builder biasInit(double biasInit) { + this.biasInit = biasInit; + return this; } - private static T get(Optional optional){ - if(optional == null){ - return null; - } - return optional.orElse(null); + /** + * Distribution to sample initial weights from. Equivalent to: + * {@code .weightInit(new WeightInitDistribution(distribution))} + */ + @Deprecated + public Builder dist(Distribution dist) { + return weightInit(dist); } - public void applyToMultiLayerConfiguration(MultiLayerConfiguration conf) { - if (backpropType != null) - conf.setBackpropType(backpropType); - if (tbpttFwdLength != null) - conf.setTbpttFwdLength(tbpttFwdLength); - if (tbpttBackLength != null) - conf.setTbpttBackLength(tbpttBackLength); + /** + * L1 regularization coefficient for the weights (excluding biases) + */ + public Builder l1(double l1) { + NetworkUtils.removeInstances(regularization, L1Regularization.class); + if (l1 > 0.0) { + regularization.add(new L1Regularization(l1)); + } + return this; } - public void applyToComputationGraphConfiguration(ComputationGraphConfiguration conf) { - if (backpropType != null) - conf.setBackpropType(backpropType); - if (tbpttFwdLength != null) - conf.setTbpttFwdLength(tbpttFwdLength); - if (tbpttBackLength != null) - conf.setTbpttBackLength(tbpttBackLength); + /** + * L2 regularization coefficient for the weights (excluding biases)
+ * Note: Generally, {@link WeightDecay} (set via {@link #weightDecay(double, boolean)} + * should be preferred to + * L2 regularization. See {@link WeightDecay} javadoc for further details.
+ */ + public Builder l2(double l2) { + NetworkUtils.removeInstances(regularization, L2Regularization.class); + if (l2 > 0.0) { + NetworkUtils.removeInstancesWithWarning(regularization, WeightDecay.class, + "WeightDecay regularization removed: incompatible with added L2 regularization"); + regularization.add(new L2Regularization(l2)); + } else { + removeL2 = true; + } + return this; } - public NeuralNetConfiguration.Builder appliedNeuralNetConfigurationBuilder() { - NeuralNetConfiguration.Builder confBuilder = new NeuralNetConfiguration.Builder(); - if (activationFn != null) - confBuilder.setActivationFn(activationFn); - if (weightInitFn != null) - confBuilder.setWeightInitFn(weightInitFn); - if (biasInit != null) - confBuilder.setBiasInit(biasInit); - if (regularization != null) - confBuilder.setRegularization(regularization); - if (regularizationBias != null) - confBuilder.setRegularizationBias(regularizationBias); - if (dropout != null) - confBuilder.setIdropOut(dropout.orElse(null)); - if (updater != null) - confBuilder.updater(updater); - if(biasUpdater != null) - confBuilder.biasUpdater(biasUpdater); - if (miniBatch != null) - confBuilder.setMiniBatch(miniBatch); - if (maxNumLineSearchIterations != null) - confBuilder.setMaxNumLineSearchIterations(maxNumLineSearchIterations); - if (seed != null) - confBuilder.setSeed(seed); - if (optimizationAlgo != null) - confBuilder.setOptimizationAlgo(optimizationAlgo); - if (stepFunction != null) - confBuilder.setStepFunction(stepFunction); - if (minimize != null) - confBuilder.setMinimize(minimize); - if (gradientNormalization != null) - confBuilder.setGradientNormalization(gradientNormalization.orElse(null)); - if (gradientNormalizationThreshold != null) - confBuilder.setGradientNormalizationThreshold(gradientNormalizationThreshold); - if (trainingWorkspaceMode != null) - confBuilder.trainingWorkspaceMode(trainingWorkspaceMode); - if (inferenceWorkspaceMode != null) - confBuilder.inferenceWorkspaceMode(inferenceWorkspaceMode); - return confBuilder; + /** + * L1 regularization coefficient for the bias parameters + */ + public Builder l1Bias(double l1Bias) { + NetworkUtils.removeInstances(regularizationBias, L1Regularization.class); + if (l1Bias > 0.0) { + regularizationBias.add(new L1Regularization(l1Bias)); + } else { + removeL1Bias = true; + } + return this; } - - public String toJson() { - try { - return NeuralNetConfiguration.mapper().writeValueAsString(this); - } catch (JsonProcessingException e) { - throw new RuntimeException(e); - } + /** + * L2 regularization coefficient for the bias parameters
+ * Note: Generally, {@link WeightDecay} (set via {@link #weightDecayBias(double, boolean)} + * should be preferred to + * L2 regularization. See {@link WeightDecay} javadoc for further details.
+ */ + public Builder l2Bias(double l2Bias) { + NetworkUtils.removeInstances(regularizationBias, L2Regularization.class); + if (l2Bias > 0.0) { + NetworkUtils.removeInstancesWithWarning(regularizationBias, WeightDecay.class, + "WeightDecay bias regularization removed: incompatible with added L2 regularization"); + regularizationBias.add(new L2Regularization(l2Bias)); + } else { + removeL2Bias = true; + } + return this; } - public String toYaml() { - try { - return NeuralNetConfiguration.mapperYaml().writeValueAsString(this); - } catch (JsonProcessingException e) { - throw new RuntimeException(e); - } + /** + * Add weight decay regularization for the network parameters (excluding biases).
This + * applies weight decay with multiplying the learning rate - see {@link WeightDecay} for + * more details.
+ * + * @param coefficient Weight decay regularization coefficient + * @see #weightDecay(double, boolean) + */ + public Builder weightDecay(double coefficient) { + return weightDecay(coefficient, true); } - public static FineTuneConfiguration fromJson(String json) { - try { - return NeuralNetConfiguration.mapper().readValue(json, FineTuneConfiguration.class); - } catch (IOException e) { - throw new RuntimeException(e); - } + /** + * Add weight decay regularization for the network parameters (excluding biases). See + * {@link WeightDecay} for more details.
+ * + * @param coefficient Weight decay regularization coefficient + * @param applyLR Whether the learning rate should be multiplied in when performing weight + * decay updates. See {@link WeightDecay} for more details. + * @see #weightDecay(double, boolean) + */ + public Builder weightDecay(double coefficient, boolean applyLR) { + //Check if existing weight decay if it exists; if so, replace it. Also remove L2 - it doesn't make sense to use both + NetworkUtils.removeInstances(this.regularization, WeightDecay.class); + if (coefficient > 0.0) { + NetworkUtils.removeInstancesWithWarning(this.regularization, L2Regularization.class, + "L2 regularization removed: incompatible with added WeightDecay regularization"); + this.regularization.add(new WeightDecay(coefficient, applyLR)); + } else { + removeWD = true; + } + return this; } - public static FineTuneConfiguration fromYaml(String yaml) { - try { - return NeuralNetConfiguration.mapperYaml().readValue(yaml, FineTuneConfiguration.class); - } catch (IOException e) { - throw new RuntimeException(e); - } + /** + * Weight decay for the biases only - see {@link #weightDecay(double)} for more details. This + * applies weight decay with multiplying the learning rate.
+ * + * @param coefficient Weight decay regularization coefficient + * @see #weightDecayBias(double, boolean) + */ + public Builder weightDecayBias(double coefficient) { + return weightDecayBias(coefficient, true); } + + /** + * Weight decay for the biases only - see {@link #weightDecay(double)} for more details
+ * + * @param coefficient Weight decay regularization coefficient + */ + public Builder weightDecayBias(double coefficient, boolean applyLR) { + //Check if existing weight decay if it exists; if so, replace it. Also remove L2 - it doesn't make sense to use both + NetworkUtils.removeInstances(this.regularizationBias, WeightDecay.class); + if (coefficient > 0) { + NetworkUtils.removeInstancesWithWarning(this.regularizationBias, L2Regularization.class, + "L2 bias regularization removed: incompatible with added WeightDecay regularization"); + this.regularizationBias.add(new WeightDecay(coefficient, applyLR)); + } else { + removeWDBias = true; + } + return this; + } + + /** + * Set the dropout + * + * @param dropout Dropout, such as {@link Dropout}, + * {@link org.deeplearning4j.nn.conf.dropout.GaussianDropout}, + * {@link org.deeplearning4j.nn.conf.dropout.GaussianNoise} etc + */ + public Builder dropout(IDropout dropout) { + this.dropout = Optional.ofNullable(dropout); + return this; + } + + /** + * Dropout probability. This is the probability of retaining each input activation + * value for a layer. dropOut(x) will keep an input activation with probability x, and set to 0 + * with probability 1-x.
dropOut(0.0) is a special value / special case - when set to 0.0., + * dropout is disabled (not applied). Note that a dropout value of 1.0 is functionally + * equivalent to no dropout: i.e., 100% probability of retaining each input activation.
+ *

+ * Note 1: Dropout is applied at training time only - and is automatically not applied at test + * time (for evaluation, etc)
Note 2: This sets the probability per-layer. Care should be + * taken when setting lower values for complex networks (too much information may be lost with + * aggressive (very low) dropout values).
Note 3: Frequently, dropout is not applied to (or, + * has higher retain probability for) input (first layer) layers. Dropout is also often not + * applied to output layers. This needs to be handled MANUALLY by the user - set .dropout(0) on + * those layers when using global dropout setting.
Note 4: Implementation detail (most users + * can ignore): DL4J uses inverted dropout, as described here: + * http://cs231n.github.io/neural-networks-2/ + *

+ * + * @param inputRetainProbability Dropout probability (probability of retaining each input + * activation value for a layer) + * @see #dropout(IDropout) + */ + public Builder dropOut(double inputRetainProbability) { + if (inputRetainProbability == 0.0) { + return dropout(null); + } + return dropout(new Dropout(inputRetainProbability)); + } + + /** + * Set the weight noise (such as {@link org.deeplearning4j.nn.conf.weightnoise.DropConnect} and + * {@link org.deeplearning4j.nn.conf.weightnoise.WeightNoise}) + * + * @param weightNoise Weight noise instance to use + */ + public Builder weightNoise(IWeightNoise weightNoise) { + this.weightNoise = Optional.ofNullable(weightNoise); + return this; + } + + /** + * Gradient updater configuration. For example, {@link org.nd4j.linalg.learning.config.Adam} or + * {@link org.nd4j.linalg.learning.config.Nesterovs} + * + * @param updater Updater to use + */ + public Builder updater(IUpdater updater) { + this.updater = updater; + return this; + } + + /** + * @deprecated Use {@link #updater(IUpdater)} + */ + @Deprecated + public Builder updater(Updater updater) { + return updater(updater.getIUpdaterWithDefaultConfig()); + } + + /** + * Gradient updater configuration, for the biases only. If not set, biases will use the updater + * as set by {@link #updater(IUpdater)} + * + * @param biasUpdater Updater to use for bias parameters + */ + public Builder biasUpdater(IUpdater biasUpdater) { + this.biasUpdater = biasUpdater; + return this; + } + + /** + * Whether scores and gradients should be divided by the minibatch size.
Most users should + * leave this ast he default value of true. + */ + public Builder miniBatch(boolean miniBatch) { + this.miniBatch = miniBatch; + return this; + } + + public Builder maxNumLineSearchIterations(int maxNumLineSearchIterations) { + this.maxNumLineSearchIterations = maxNumLineSearchIterations; + return this; + } + + /** + * RNG seed for reproducibility + * + * @param seed RNG seed to use + */ + public Builder seed(long seed) { + this.seed = seed; + return this; + } + + /** + * RNG seed for reproducibility + * + * @param seed RNG seed to use + */ + public Builder seed(int seed) { + return seed((long) seed); + } + + public Builder optimizationAlgo(OptimizationAlgorithm optimizationAlgo) { + this.optimizationAlgo = optimizationAlgo; + return this; + } + + public Builder stepFunction(StepFunction stepFunction) { + this.stepFunction = stepFunction; + return this; + } + + public Builder minimize(boolean minimize) { + this.minimize = minimize; + return this; + } + + /** + * Gradient normalization strategy. Used to specify gradient renormalization, gradient clipping + * etc. See {@link GradientNormalization} for details + * + * @param gradientNormalization Type of normalization to use. Defaults to None. + * @see GradientNormalization + */ + public Builder gradientNormalization(GradientNormalization gradientNormalization) { + this.gradientNormalization = Optional.ofNullable(gradientNormalization); + return this; + } + + /** + * Threshold for gradient normalization, only used for GradientNormalization.ClipL2PerLayer, + * GradientNormalization.ClipL2PerParamType, and + * GradientNormalization.ClipElementWiseAbsoluteValue
Not used otherwise.
L2 threshold + * for first two types of clipping, or absolute value threshold for last type of clipping + */ + public Builder gradientNormalizationThreshold(double gradientNormalizationThreshold) { + this.gradientNormalizationThreshold = gradientNormalizationThreshold; + return this; + } + + /** + * Sets the convolution mode for convolutional layers, which impacts padding and output sizes. + * See {@link ConvolutionMode} for details. Defaults to ConvolutionMode.TRUNCATE
+ * + * @param convolutionMode Convolution mode to use + */ + public Builder convolutionMode(ConvolutionMode convolutionMode) { + this.convolutionMode = convolutionMode; + return this; + } + + /** + * Sets the cuDNN algo mode for convolutional layers, which impacts performance and memory usage + * of cuDNN. See {@link ConvolutionLayer.AlgoMode} for details. Defaults to "PREFER_FASTEST", + * but "NO_WORKSPACE" uses less memory. + */ + public Builder cudnnAlgoMode(ConvolutionLayer.AlgoMode cudnnAlgoMode) { + this.cudnnAlgoMode = cudnnAlgoMode; + return this; + } + + /** + * Set constraints to be applied to all layers. Default: no constraints.
Constraints can be + * used to enforce certain conditions (non-negativity of parameters, max-norm regularization, + * etc). These constraints are applied at each iteration, after the parameters have been + * updated. + * + * @param constraints Constraints to apply to all parameters of all layers + */ + public Builder constraints(List constraints) { + this.constraints = Optional.ofNullable(constraints); + return this; + } + + public Builder pretrain(boolean pretrain) { + this.pretrain = pretrain; + return this; + } + + public Builder backprop(boolean backprop) { + this.backprop = backprop; + return this; + } + + /** + * The type of backprop. Default setting is used for most networks (MLP, CNN etc), but + * optionally truncated BPTT can be used for training recurrent neural networks. If using + * TruncatedBPTT make sure you set both tBPTTForwardLength() and tBPTTBackwardLength() + * + * @param backpropType Type of backprop. Default: BackpropType.Standard + */ + public Builder backpropType(BackpropType backpropType) { + this.backpropType = backpropType; + return this; + } + + /** + * When doing truncated BPTT: how many steps of forward pass should we do before doing + * (truncated) backprop?
Only applicable when doing + * backpropType(BackpropType.TruncatedBPTT)
Typically tBPTTForwardLength parameter is same + * as the tBPTTBackwardLength parameter, but may be larger than it in some circumstances (but + * never smaller)
Ideally your training data time series length should be divisible by this + * This is the k1 parameter on pg23 of + * http://www.cs.utoronto.ca/~ilya/pubs/ilya_sutskever_phd_thesis.pdf + * + * @param tbpttFwdLength Forward length > 0, >= backwardLength + */ + public Builder tbpttFwdLength(int tbpttFwdLength) { + this.tbpttFwdLength = tbpttFwdLength; + return this; + } + + /** + * When doing truncated BPTT: how many steps of backward should we do?
Only applicable when + * doing backpropType(BackpropType.TruncatedBPTT)
This is the k2 parameter on pg23 of + * http://www.cs.utoronto.ca/~ilya/pubs/ilya_sutskever_phd_thesis.pdf + * + * @param tbpttBackLength <= forwardLength + */ + public Builder tbpttBackLength(int tbpttBackLength) { + this.tbpttBackLength = tbpttBackLength; + return this; + } + + /** + * This method defines Workspace mode being used during training: NONE: workspace won't be used + * ENABLED: workspaces will be used for training (reduced memory and better performance) + * + * @param trainingWorkspaceMode Workspace mode for training + * @return Builder + */ + public Builder trainingWorkspaceMode(WorkspaceMode trainingWorkspaceMode) { + this.trainingWorkspaceMode = trainingWorkspaceMode; + return this; + } + + /** + * This method defines Workspace mode being used during inference:
NONE: workspace won't be + * used
ENABLED: workspaces will be used for inference (reduced memory and better + * performance) + * + * @param inferenceWorkspaceMode Workspace mode for inference + * @return Builder + */ + public Builder inferenceWorkspaceMode(WorkspaceMode inferenceWorkspaceMode) { + this.inferenceWorkspaceMode = inferenceWorkspaceMode; + return this; + } + + public FineTuneConfiguration build() { + return new FineTuneConfiguration(activation, weightInitFn, biasInit, regularization, + regularizationBias, + removeL2, removeL2Bias, removeL1, removeL1Bias, removeWD, removeWDBias, dropout, + weightNoise, updater, biasUpdater, miniBatch, maxNumLineSearchIterations, seed, + optimizationAlgo, stepFunction, + minimize, gradientNormalization, gradientNormalizationThreshold, convolutionMode, + cudnnAlgoMode, constraints, + pretrain, backprop, backpropType, tbpttFwdLength, tbpttBackLength, trainingWorkspaceMode, + inferenceWorkspaceMode); + } + } } diff --git a/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/nn/transferlearning/TransferLearning.java b/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/nn/transferlearning/TransferLearning.java index 52ae7c891..708568d19 100644 --- a/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/nn/transferlearning/TransferLearning.java +++ b/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/nn/transferlearning/TransferLearning.java @@ -28,7 +28,7 @@ import org.deeplearning4j.nn.conf.graph.GraphVertex; import org.deeplearning4j.nn.conf.graph.LayerVertex; import org.deeplearning4j.nn.conf.inputs.InputType; import org.deeplearning4j.nn.conf.layers.FeedForwardLayer; -import org.deeplearning4j.nn.conf.layers.Layer; +import org.deeplearning4j.nn.conf.layers.LayerConfiguration; import org.deeplearning4j.nn.graph.ComputationGraph; import org.deeplearning4j.nn.graph.vertex.VertexIndices; import org.deeplearning4j.nn.graph.vertex.impl.FrozenVertex; @@ -51,7 +51,7 @@ import java.util.*; public class TransferLearning { public static class Builder { - private final MultiLayerConfiguration origConf; + private final NeuralNetConfiguration origConf; private final MultiLayerNetwork origModel; private MultiLayerNetwork editedModel; @@ -64,9 +64,9 @@ public class TransferLearning { new HashMap<>(); private final Map> nInEditedMap = new HashMap<>(); private final List editedParams = new ArrayList<>(); - private final List editedConfs = new ArrayList<>(); + private final List editedConfs = new ArrayList<>(); private final List appendParams = new ArrayList<>(); //these could be new arrays, and views from origParams - private final List appendConfs = new ArrayList<>(); + private final List appendConfs = new ArrayList<>(); private Map inputPreProcessors = new HashMap<>(); @@ -80,8 +80,8 @@ public class TransferLearning { */ public Builder(MultiLayerNetwork origModel) { this.origModel = origModel; - this.origConf = origModel.getLayerWiseConfigurations().clone(); - this.dataType = origModel.getLayerWiseConfigurations().getDataType(); + this.origConf = origModel.getNetConfiguration().clone(); + this.dataType = origModel.getNetConfiguration().getDataType(); this.inputPreProcessors = origConf.getInputPreProcessors(); } @@ -299,31 +299,31 @@ public class TransferLearning { * At the very least an outputLayer must be added (output layer should be added last - as per the note on order) * Learning configs (like updaters, learning rate etc) specified with the layer here will be honored * - * @param layer layer conf to add (similar to the NeuralNetConfiguration .list().layer(...) + * @param layerConf layer conf to add (similar to the NeuralNetConfiguration .list().layer(...) * @return Builder */ - public Builder addLayer(Layer layer) { + public Builder addLayer(LayerConfiguration layerConf) { if (!prepDone) { doPrep(); } - // Use the fineTune config to create the required NeuralNetConfiguration + Layer instances + // Use the fineTune config to create the required NeuralNetConfiguration + LayerConfiguration instances //instantiate dummy layer to get the params //Build a nn config builder with settings from finetune. Set layer with the added layer //Issue: fine tune config has .learningRate(x), then I add a layer with .learningRate(y)... //We don't want that to be overridden - NeuralNetConfiguration layerConf = - finetuneConfiguration.appliedNeuralNetConfigurationBuilder().layer(layer).build(); + NeuralNetConfiguration netConf = + finetuneConfiguration.appliedNeuralNetConfigurationBuilder(); - val numParams = layer.initializer().numParams(layerConf); + val numParams = layerConf.initializer().numParams(layerConf); INDArray params; if (numParams > 0) { - params = Nd4j.create(origModel.getLayerWiseConfigurations().getDataType(), 1, numParams); - org.deeplearning4j.nn.api.Layer someLayer = layer.instantiate(layerConf, null, 0, params, true, dataType); - appendParams.add(someLayer.params()); - appendConfs.add(someLayer.conf()); + params = Nd4j.create(origModel.getNetConfiguration().getDataType(), 1, numParams); + org.deeplearning4j.nn.api.Layer someLayer = layerConf.instantiate(layerConf.getNetConfiguration(), null, 0, params, true, dataType); + appendParams.add(someLayer.getParams()); + appendConfs.add(someLayer.getLayerConfiguration()); } else { appendConfs.add(layerConf); @@ -364,27 +364,27 @@ public class TransferLearning { if (frozenTill != -1) { org.deeplearning4j.nn.api.Layer[] layers = editedModel.getLayers(); for (int i = frozenTill; i >= 0; i--) { - //Complication here: inner Layer (implementation) NeuralNetConfiguration.layer (config) should keep + //Complication here: inner LayerConfiguration (implementation) NeuralNetConfiguration.layer (config) should keep // the original layer config. While network NNC should have the frozen layer, for to/from JSON etc - NeuralNetConfiguration origNNC = editedModel.getLayerWiseConfigurations().getConf(i); - NeuralNetConfiguration layerNNC = origNNC.clone(); - layers[i].setConf(layerNNC); + LayerConfiguration origNNC = editedModel.getNetConfiguration().getFlattenedLayerConfigurations().get(i); + LayerConfiguration layerNNC = origNNC.clone(); + layers[i].setLayerConfiguration(layerNNC); layers[i] = new FrozenLayer(layers[i]); if (origNNC.getVariables() != null) { - List vars = origNNC.variables(true); + List vars = origNNC.getVariables(); origNNC.clearVariables(); layerNNC.clearVariables(); for (String s : vars) { - origNNC.variables(false).add(s); - layerNNC.variables(false).add(s); + origNNC.addVariable(s); + layerNNC.addVariable(s); } } - Layer origLayerConf = editedModel.getLayerWiseConfigurations().getConf(i).getLayer(); - Layer newLayerConf = new org.deeplearning4j.nn.conf.layers.misc.FrozenLayer(origLayerConf); + LayerConfiguration origLayerConf = editedModel.getNetConfiguration().getFlattenedLayerConfigurations().get(i); + LayerConfiguration newLayerConf = new org.deeplearning4j.nn.conf.layers.misc.FrozenLayer(origLayerConf); newLayerConf.setLayerName(origLayerConf.getLayerName()); - editedModel.getLayerWiseConfigurations().getConf(i).setLayer(newLayerConf); + editedModel.getNetConfiguration().getNetConfigurations().get(i).setLayer(newLayerConf); } editedModel.setLayers(layers); } @@ -400,9 +400,9 @@ public class TransferLearning { for (int i = 0; i < origModel.getnLayers(); i++) { if (origModel.getLayer(i).numParams() > 0) { //dup only if params are there - editedParams.add(origModel.getLayer(i).params().dup()); + editedParams.add(origModel.getLayer(i).getParams().dup()); } else { - editedParams.add(origModel.getLayer(i).params()); + editedParams.add(origModel.getLayer(i).getParams()); } } //apply changes to nout/nin if any in sorted order and save to editedParams @@ -441,15 +441,14 @@ public class TransferLearning { private void fineTuneConfigurationBuild() { - - for (int i = 0; i < origConf.getConfs().size(); i++) { - NeuralNetConfiguration layerConf; + for (int i = 0; i < origConf.getFlattenedLayerConfigurations().size(); i++) { + LayerConfiguration layerConf; if (finetuneConfiguration != null) { - NeuralNetConfiguration nnc = origConf.getConf(i).clone(); - finetuneConfiguration.applyToNeuralNetConfiguration(nnc); + LayerConfiguration nnc = origConf.getFlattenedLayerConfigurations().get(i).clone(); + finetuneConfiguration.applyToLayerConfiguration(nnc); layerConf = nnc; } else { - layerConf = origConf.getConf(i).clone(); + layerConf = origConf.getFlattenedLayerConfigurations().get(i).clone(); } editedConfs.add(layerConf); } @@ -458,17 +457,17 @@ public class TransferLearning { private void nInReplaceBuild(int layerNum, int nIn, IWeightInit init) { Preconditions.checkArgument(layerNum >= 0 && layerNum < editedConfs.size(), "Invalid layer index: must be 0 to " + "numLayers-1 = %s includive, got %s", editedConfs.size(), layerNum); - NeuralNetConfiguration layerConf = editedConfs.get(layerNum); - Layer layerImpl = layerConf.getLayer(); //not a clone need to modify nOut in place + LayerConfiguration layerConf = editedConfs.get(layerNum); + LayerConfiguration layerImpl = layerConf; //not a clone need to modify nOut in place Preconditions.checkArgument(layerImpl instanceof FeedForwardLayer, "nInReplace can only be applide on FeedForward layers;" + "got layer of type %s", layerImpl.getClass().getSimpleName()); FeedForwardLayer layerImplF = (FeedForwardLayer) layerImpl; - layerImplF.setWeightInitFn(init); + layerImplF.setWeightInit(init); layerImplF.setNIn(nIn); long numParams = layerImpl.initializer().numParams(layerConf); - INDArray params = Nd4j.create(origModel.getLayerWiseConfigurations().getDataType(), 1, numParams); - org.deeplearning4j.nn.api.Layer someLayer = layerImpl.instantiate(layerConf, null, 0, params, true, dataType); - editedParams.set(layerNum, someLayer.params()); + INDArray params = Nd4j.create(origModel.getNetConfiguration().getDataType(), 1, numParams); + org.deeplearning4j.nn.api.Layer someLayer = layerImpl.instantiate(layerConf.getNetConfiguration(), null, 0, params, true, dataType); + editedParams.set(layerNum, someLayer.getParams()); } @@ -476,30 +475,30 @@ public class TransferLearning { Preconditions.checkArgument(layerNum >= 0 && layerNum < editedConfs.size(), "Invalid layer index: must be 0 to " + "numLayers-1 = %s includive, got %s", editedConfs.size(), layerNum); - NeuralNetConfiguration layerConf = editedConfs.get(layerNum); - Layer layerImpl = layerConf.getLayer(); //not a clone need to modify nOut in place + LayerConfiguration layerConf = editedConfs.get(layerNum); + LayerConfiguration layerImpl = layerConf; //not a clone need to modify nOut in place Preconditions.checkArgument(layerImpl instanceof FeedForwardLayer, "nOutReplace can only be applide on FeedForward layers;" + "got layer of type %s", layerImpl.getClass().getSimpleName()); FeedForwardLayer layerImplF = (FeedForwardLayer) layerImpl; - layerImplF.setWeightInitFn(scheme); + layerImplF.setWeightInit(scheme); layerImplF.setNOut(nOut); long numParams = layerImpl.initializer().numParams(layerConf); - INDArray params = Nd4j.create(origModel.getLayerWiseConfigurations().getDataType(), 1, numParams); - org.deeplearning4j.nn.api.Layer someLayer = layerImpl.instantiate(layerConf, null, 0, params, true, dataType); - editedParams.set(layerNum, someLayer.params()); + INDArray params = Nd4j.create(origModel.getNetConfiguration().getDataType(), 1, numParams); + org.deeplearning4j.nn.api.Layer someLayer = layerImpl.instantiate(layerConf.getNetConfiguration(), null, 0, params, true, dataType); + editedParams.set(layerNum, someLayer.getParams()); if (layerNum + 1 < editedConfs.size()) { layerConf = editedConfs.get(layerNum + 1); - layerImpl = layerConf.getLayer(); //modify in place + layerImpl = layerConf; //modify in place if(layerImpl instanceof FeedForwardLayer) { layerImplF = (FeedForwardLayer) layerImpl; - layerImplF.setWeightInitFn(schemeNext); + layerImplF.setWeightInit(schemeNext); layerImplF.setNIn(nOut); numParams = layerImpl.initializer().numParams(layerConf); if (numParams > 0) { - params = Nd4j.create(origModel.getLayerWiseConfigurations().getDataType(), 1, numParams); - someLayer = layerImpl.instantiate(layerConf, null, 0, params, true, dataType); - editedParams.set(layerNum + 1, someLayer.params()); + params = Nd4j.create(origModel.getNetConfiguration().getDataType(), 1, numParams); + someLayer = layerImpl.instantiate(layerConf.getNetConfiguration(), null, 0, params, true, dataType); + editedParams.set(layerNum + 1, someLayer.getParams()); } } } @@ -526,27 +525,27 @@ public class TransferLearning { } } - private MultiLayerConfiguration constructConf() { + private NeuralNetConfiguration constructConf() { //use the editedConfs list to make a new config - List allConfs = new ArrayList<>(); + List allConfs = new ArrayList<>(); allConfs.addAll(editedConfs); allConfs.addAll(appendConfs); //Set default layer names, if not set - as per NeuralNetConfiguration.ListBuilder.build() for (int i = 0; i < allConfs.size(); i++) { - if (allConfs.get(i).getLayer().getLayerName() == null) { - allConfs.get(i).getLayer().setLayerName("layer" + i); + if (allConfs.get(i).getLayerName() == null) { + allConfs.get(i).setLayerName("layer" + i); } } - MultiLayerConfiguration conf = new MultiLayerConfiguration.Builder().inputPreProcessors(inputPreProcessors) - .setInputType(this.inputType).confs(allConfs) + NeuralNetConfiguration conf = NeuralNetConfiguration.builder().inputPreProcessors(inputPreProcessors) + .inputType(this.inputType) + .layersFromList(allConfs) + .validateOutputLayerConfig(validateOutputLayerConfig == null || validateOutputLayerConfig) .dataType(origConf.getDataType()) .build(); - if (finetuneConfiguration != null) { - finetuneConfiguration.applyToMultiLayerConfiguration(conf); - } + return conf; } } @@ -572,7 +571,7 @@ public class TransferLearning { */ public GraphBuilder(ComputationGraph origGraph) { this.origGraph = origGraph; - this.origConfig = origGraph.getConfiguration().clone(); + this.origConfig = origGraph.getComputationGraphConfiguration().clone(); } /** @@ -590,10 +589,10 @@ public class TransferLearning { for (Map.Entry gv : vertices.entrySet()) { if (gv.getValue() instanceof LayerVertex) { LayerVertex lv = (LayerVertex) gv.getValue(); - NeuralNetConfiguration nnc = lv.getLayerConf().clone(); - fineTuneConfiguration.applyToNeuralNetConfiguration(nnc); + NeuralNetConfiguration nnc = lv.getNetConfiguration().clone(); + fineTuneConfiguration.applyToLayerConfiguration(lv.getLayerConfiguration()); vertices.put(gv.getKey(), new LayerVertex(nnc, lv.getPreProcessor())); - nnc.getLayer().setLayerName(gv.getKey()); + lv.getLayerConfiguration().setLayerName(gv.getKey()); } } @@ -725,26 +724,26 @@ public class TransferLearning { * @return GraphBuilder */ public GraphBuilder nInReplace(String layerName, int nIn, IWeightInit scheme) { - Preconditions.checkState(origGraph.getVertex(layerName) != null, "Layer with name %s not found", + Preconditions.checkState(origGraph.getVertex(layerName) != null, "LayerConfiguration with name %s not found", layerName); Preconditions.checkState(origGraph.getVertex(layerName).hasLayer(), "nInReplace can only be applied" + " on vertices with layers. Vertex %s does not have a layer", layerName); initBuilderIfReq(); - NeuralNetConfiguration layerConf = origGraph.getLayer(layerName).conf(); - Layer layerImpl = layerConf.getLayer().clone(); + LayerConfiguration layerConf = origGraph.getLayer(layerName).getLayerConfiguration(); + LayerConfiguration layerImpl = layerConf.clone(); Preconditions.checkState(layerImpl instanceof FeedForwardLayer, "Can only use nInReplace on FeedForward layers;" + "got layer of type %s for layer name %s", layerImpl.getClass().getSimpleName(), layerName); layerImpl.resetLayerDefaultConfig(); FeedForwardLayer layerImplF = (FeedForwardLayer) layerImpl; - layerImplF.setWeightInitFn(scheme); + layerImplF.setWeightInit(scheme); layerImplF.setNIn(nIn); if(editedVertices.contains(layerName) && editedConfigBuilder.getVertices().get(layerName) instanceof LayerVertex && nInFromNewConfig.containsKey(layerName)){ - Layer l = ((LayerVertex)editedConfigBuilder.getVertices().get(layerName)).getLayerConf().getLayer(); + LayerConfiguration l = ((LayerVertex)editedConfigBuilder.getVertices().get(layerName)).getLayerConfiguration(); if(l instanceof FeedForwardLayer){ layerImplF.setNIn(nInFromNewConfig.get(layerName)); } @@ -764,16 +763,16 @@ public class TransferLearning { if (origGraph.getVertex(layerName).hasLayer()) { - NeuralNetConfiguration layerConf = origGraph.getLayer(layerName).conf(); - Layer layerImpl = layerConf.getLayer().clone(); + LayerConfiguration layerConf = origGraph.getLayer(layerName).getLayerConfiguration(); + LayerConfiguration layerImpl = layerConf.clone(); layerImpl.resetLayerDefaultConfig(); FeedForwardLayer layerImplF = (FeedForwardLayer) layerImpl; - layerImplF.setWeightInitFn(scheme); + layerImplF.setWeightInit(scheme); layerImplF.setNOut(nOut); if(editedVertices.contains(layerName) && editedConfigBuilder.getVertices().get(layerName) instanceof LayerVertex && nInFromNewConfig.containsKey(layerName)){ - Layer l = ((LayerVertex)editedConfigBuilder.getVertices().get(layerName)).getLayerConf().getLayer(); + LayerConfiguration l = ((LayerVertex)editedConfigBuilder.getVertices().get(layerName)).getLayerConfiguration(); if(l instanceof FeedForwardLayer){ layerImplF.setNIn(nInFromNewConfig.get(layerName)); } @@ -802,12 +801,12 @@ public class TransferLearning { throw new UnsupportedOperationException( "Cannot modify nOut of a layer vertex that feeds non-layer vertices. Use removeVertexKeepConnections followed by addVertex instead"); } - layerConf = origGraph.getLayer(fanoutVertexName).conf(); - if(!(layerConf.getLayer() instanceof FeedForwardLayer)) + layerConf = origGraph.getLayer(fanoutVertexName).getLayerConfiguration(); + if(!(layerConf instanceof FeedForwardLayer)) continue; - layerImpl = layerConf.getLayer().clone(); + layerImpl = layerConf.clone(); layerImplF = (FeedForwardLayer) layerImpl; - layerImplF.setWeightInitFn(schemeNext); + layerImplF.setWeightInit(schemeNext); layerImplF.setNIn(nOut); nInFromNewConfig.put(fanoutVertexName, nOut); @@ -859,7 +858,7 @@ public class TransferLearning { * @param layerInputs * @return */ - public GraphBuilder addLayer(String layerName, Layer layer, String... layerInputs) { + public GraphBuilder addLayer(String layerName, LayerConfiguration layer, String... layerInputs) { initBuilderIfReq(); editedConfigBuilder.addLayer(layerName, layer, null, layerInputs); editedVertices.add(layerName); @@ -874,7 +873,7 @@ public class TransferLearning { * @param layerInputs * @return */ - public GraphBuilder addLayer(String layerName, Layer layer, InputPreProcessor preProcessor, + public GraphBuilder addLayer(String layerName, LayerConfiguration layer, InputPreProcessor preProcessor, String... layerInputs) { initBuilderIfReq(); editedConfigBuilder.addLayer(layerName, layer, preProcessor, layerInputs); @@ -980,11 +979,11 @@ public class TransferLearning { continue; //some layers have no params if (editedVertices.contains(layerName)) continue; //keep the changed params - INDArray origParams = origGraph.getLayer(layerName).params(); + INDArray origParams = origGraph.getLayer(layerName).getParams(); layer.setParams(origParams.dup()); //copy over origGraph params } } else { - newGraph.setParams(origGraph.params()); + newGraph.setParams(origGraph.getModelParams()); } //Freeze layers as necessary. Note: we can't simply say "everything before frozen layer X needs to be frozen @@ -1009,24 +1008,24 @@ public class TransferLearning { String layerName = gv.getVertexName(); LayerVertex currLayerVertex = (LayerVertex) newConfig.getVertices().get(layerName); - Layer origLayerConf = currLayerVertex.getLayerConf().getLayer(); - Layer newLayerConf = new org.deeplearning4j.nn.conf.layers.misc.FrozenLayer(origLayerConf); + LayerConfiguration origLayerConf = currLayerVertex.getLayerConfiguration(); + LayerConfiguration newLayerConf = new org.deeplearning4j.nn.conf.layers.misc.FrozenLayer(origLayerConf); newLayerConf.setLayerName(origLayerConf.getLayerName()); - //Complication here(and reason for clone on next line): inner Layer (implementation) + //Complication here(and reason for clone on next line): inner LayerConfiguration (implementation) // NeuralNetConfiguration.layer (config) should keep the original layer config. While network // NNC should have the frozen layer - NeuralNetConfiguration newNNC = currLayerVertex.getLayerConf().clone(); - currLayerVertex.setLayerConf(newNNC); - currLayerVertex.getLayerConf().setLayer(newLayerConf); + NeuralNetConfiguration newNNC = currLayerVertex.getNetConfiguration().clone(); + currLayerVertex.setNetConfiguration(newNNC); + currLayerVertex.getNetConfiguration().setLayer(newLayerConf); //Make sure the underlying layer doesn't change: - List vars = currLayerVertex.getLayerConf().variables(true); - currLayerVertex.getLayerConf().clearVariables(); + List vars = currLayerVertex.getNetConfiguration().netWideVariables(true); + currLayerVertex.getNetConfiguration().clearNetWideVariable(); for (String s : vars) { - newNNC.variables(false).add(s); + newNNC.netWideVariables(false).add(s); } - //We also need to place the layer in the CompGraph Layer[] (replacing the old one) + //We also need to place the layer in the CompGraph LayerConfiguration[] (replacing the old one) //This could no doubt be done more efficiently org.deeplearning4j.nn.api.Layer[] layers = newGraph.getLayers(); for (int j = 0; j < layers.length; j++) { diff --git a/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/nn/transferlearning/TransferLearningHelper.java b/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/nn/transferlearning/TransferLearningHelper.java index f6f3a35c1..bd6cc18a3 100644 --- a/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/nn/transferlearning/TransferLearningHelper.java +++ b/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/nn/transferlearning/TransferLearningHelper.java @@ -21,7 +21,6 @@ package org.deeplearning4j.nn.transferlearning; import org.apache.commons.lang3.ArrayUtils; -import org.deeplearning4j.nn.conf.MultiLayerConfiguration; import org.deeplearning4j.nn.conf.NeuralNetConfiguration; import org.deeplearning4j.nn.graph.ComputationGraph; import org.deeplearning4j.nn.graph.vertex.GraphVertex; @@ -179,7 +178,7 @@ public class TransferLearningHelper { org.deeplearning4j.nn.api.Layer l = gv.getLayer(); gv.setLayerAsFrozen(); - //We also need to place the layer in the CompGraph Layer[] (replacing the old one) + //We also need to place the layer in the CompGraph LayerConfiguration[] (replacing the old one) //This could no doubt be done more efficiently org.deeplearning4j.nn.api.Layer[] layers = origGraph.getLayers(); for (int j = 0; j < layers.length; j++) { @@ -242,7 +241,7 @@ public class TransferLearningHelper { } Set frozenInputVerticesSorted = new HashSet<>(); - frozenInputVerticesSorted.addAll(origGraph.getConfiguration().getNetworkInputs()); + frozenInputVerticesSorted.addAll(origGraph.getComputationGraphConfiguration().getNetworkInputs()); frozenInputVerticesSorted.removeAll(allFrozen); //remove input vertices - just to add back in a predictable order for (String existingInput : frozenInputVerticesSorted) { @@ -282,21 +281,21 @@ public class TransferLearningHelper { } List allConfs = new ArrayList<>(); for (int i = frozenInputLayer + 1; i < origMLN.getnLayers(); i++) { - allConfs.add(origMLN.getLayer(i).conf()); + allConfs.add(origMLN.getLayer(i).getNetConfiguration()); } - MultiLayerConfiguration c = origMLN.getLayerWiseConfigurations(); + NeuralNetConfiguration c = origMLN.getNetConfiguration(); - unFrozenSubsetMLN = new MultiLayerNetwork(new MultiLayerConfiguration.Builder() + unFrozenSubsetMLN = new MultiLayerNetwork(NeuralNetConfiguration.builder() .inputPreProcessors(c.getInputPreProcessors()) - .backpropType(c.getBackpropType()).tBPTTForwardLength(c.getTbpttFwdLength()) - .tBPTTBackwardLength(c.getTbpttBackLength()).confs(allConfs) - .dataType(origMLN.getLayerWiseConfigurations().getDataType()) + .backpropType(c.getBackpropType()).tbpttFwdLength(c.getTbpttFwdLength()) + .tbpttBackLength(c.getTbpttBackLength()).confs(allConfs) + .dataType(origMLN.getNetConfiguration().getDataType()) .build()); unFrozenSubsetMLN.init(); //copy over params for (int i = frozenInputLayer + 1; i < origMLN.getnLayers(); i++) { - unFrozenSubsetMLN.getLayer(i - frozenInputLayer - 1).setParams(origMLN.getLayer(i).params()); + unFrozenSubsetMLN.getLayer(i - frozenInputLayer - 1).setParams(origMLN.getLayer(i).getParams()); } //unFrozenSubsetMLN.setListeners(origMLN.getListeners()); } @@ -328,7 +327,7 @@ public class TransferLearningHelper { String anInput = graphInputs.get(i); if (origGraph.getVertex(anInput).isInputVertex()) { //was an original input to the graph - int inputIndex = origGraph.getConfiguration().getNetworkInputs().indexOf(anInput); + int inputIndex = origGraph.getComputationGraphConfiguration().getNetworkInputs().indexOf(anInput); featuresNow[i] = origGraph.getInput(inputIndex); } else { //needs to be grabbed from the internal activations @@ -414,7 +413,7 @@ public class TransferLearningHelper { for (GraphVertex aVertex : unFrozenSubsetGraph.getVertices()) { if (!aVertex.hasLayer()) continue; - origGraph.getVertex(aVertex.getVertexName()).getLayer().setParams(aVertex.getLayer().params()); + origGraph.getVertex(aVertex.getVertexName()).getLayer().setParams(aVertex.getLayer().getParams()); } } @@ -422,13 +421,13 @@ public class TransferLearningHelper { for (GraphVertex aVertex : unFrozenSubsetGraph.getVertices()) { if (!aVertex.hasLayer()) continue; - aVertex.getLayer().setParams(origGraph.getLayer(aVertex.getVertexName()).params()); + aVertex.getLayer().setParams(origGraph.getLayer(aVertex.getVertexName()).getParams()); } } private void copyParamsFromSubsetMLNToOrig() { for (int i = frozenInputLayer + 1; i < origMLN.getnLayers(); i++) { - origMLN.getLayer(i).setParams(unFrozenSubsetMLN.getLayer(i - frozenInputLayer - 1).params()); + origMLN.getLayer(i).setParams(unFrozenSubsetMLN.getLayer(i - frozenInputLayer - 1).getParams()); } } diff --git a/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/nn/updater/BaseMultiLayerUpdater.java b/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/nn/updater/BaseMultiLayerUpdater.java index 4f4d1690f..e7a74999c 100644 --- a/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/nn/updater/BaseMultiLayerUpdater.java +++ b/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/nn/updater/BaseMultiLayerUpdater.java @@ -21,8 +21,8 @@ package org.deeplearning4j.nn.updater; import lombok.Getter; -import org.deeplearning4j.nn.api.Model; -import org.deeplearning4j.nn.api.Trainable; +import net.brutex.ai.dnn.api.IModel; +import org.deeplearning4j.nn.api.ITrainableLayer; import org.deeplearning4j.nn.api.Updater; import org.deeplearning4j.nn.conf.GradientNormalization; import org.deeplearning4j.nn.gradient.DefaultGradient; @@ -44,10 +44,10 @@ import org.nd4j.linalg.learning.config.IUpdater; import java.util.*; @Getter -public abstract class BaseMultiLayerUpdater implements Updater { +public abstract class BaseMultiLayerUpdater implements Updater { protected final T network; - protected Map layersByName; + protected Map layersByName; protected final List updaterBlocks; protected INDArray updaterStateViewArray; protected boolean initializedMinibatchDivision; @@ -64,31 +64,31 @@ public abstract class BaseMultiLayerUpdater implements Updater */ public BaseMultiLayerUpdater(T network, INDArray updaterState) { this.network = network; - Trainable[] layers = getOrderedLayers(); //May also include vertices + ITrainableLayer[] layers = getOrderedLayers(); //May also include vertices int updaterStateSize = 0; //Iterate through layers, and variables for each layer. //While the updater configuration is the same: combine into one op, rather than doing a lot of smaller // (yet identical) ops. - Trainable lastLayer = null; + ITrainableLayer lastLayer = null; String lastVariable = null; UpdaterBlock currentBlock = null; updaterBlocks = new ArrayList<>(); - INDArray paramsView = network.params(); + INDArray paramsView = network.getModelParams(); INDArray gradientView = getFlattenedGradientsView(); int paramsViewSoFar = 0; int currentUpdaterOffset = 0; for (int i = 0; i < layers.length; i++) { - Map layerParamTable = layers[i].paramTable(false); + Map layerParamTable = layers[i].getParamTable(false); if (layerParamTable != null) { List variables = new ArrayList<>(layerParamTable.keySet()); //Is from a set, but iteration order should be fixed per layer as it's a from a LinkedHashSet for (int j = 0; j < variables.size(); j++) { String var = variables.get(j); long paramSizeThisVariable = layerParamTable.get(var).length(); - IUpdater u = layers[i].getConfig().getUpdaterByParam(var); - Preconditions.checkNotNull(u, "Updater for parameter %s, layer \"%s\" was null", var, layers[i].getConfig().getLayerName()); + IUpdater u = layers[i].getTrainingConfig().getUpdaterByParam(var); + Preconditions.checkNotNull(u, "Updater for parameter %s, layer \"%s\" was null", var, layers[i].getTrainingConfig().getLayerName()); int updaterStateSizeThisVariable = (int) u.stateSize(paramSizeThisVariable); INDArray gradientViewSubset = null; @@ -145,7 +145,7 @@ public abstract class BaseMultiLayerUpdater implements Updater updaterRequiresInit = false; } else if (updaterStateSize > 0) { //May be 0 if all SGD or NONE updaters, for example - updaterStateViewArray = Nd4j.createUninitialized(network.params().dataType(), new long[] {1, updaterStateSize}, Nd4j.order()); + updaterStateViewArray = Nd4j.createUninitialized(network.getModelParams().dataType(), new long[] {1, updaterStateSize}, Nd4j.order()); updaterRequiresInit = true; } @@ -183,7 +183,7 @@ public abstract class BaseMultiLayerUpdater implements Updater * @return Array of layers, in the correct order (i.e., same order as the parameter/gradient/updater flattening * order - input to output for MultiLayerNetwork, or topological order for ComputationGraph) */ - protected abstract Trainable[] getOrderedLayers(); + protected abstract ITrainableLayer[] getOrderedLayers(); /** * @return The flattened gradient view array for the model @@ -207,10 +207,11 @@ public abstract class BaseMultiLayerUpdater implements Updater */ public void setStateViewArray(INDArray viewArray) { if(this.updaterStateViewArray == null){ - if(viewArray == null) + if(viewArray == null || viewArray.length()==0) return; //No op - for example, SGD and NoOp updater - i.e., no stored state else { - throw new IllegalStateException("Attempting to set updater state view array with null value"); + //this.updaterStateViewArray.set + // throw new IllegalStateException("Attempting to set updater state view array with null value"); } } if (this.updaterStateViewArray.length() != viewArray.length()) @@ -220,7 +221,7 @@ public abstract class BaseMultiLayerUpdater implements Updater } @Override - public void setStateViewArray(Trainable layer, INDArray viewArray, boolean initialize) { + public void setStateViewArray(ITrainableLayer layer, INDArray viewArray, boolean initialize) { this.setStateViewArray(viewArray); } @@ -241,7 +242,7 @@ public abstract class BaseMultiLayerUpdater implements Updater } @Override - public void update(Trainable layer, Gradient gradient, int iteration, int epoch, int batchSize, LayerWorkspaceMgr workspaceMgr) { + public void update(ITrainableLayer layer, Gradient gradient, int iteration, int epoch, int batchSize, LayerWorkspaceMgr workspaceMgr) { update(gradient, iteration, epoch, batchSize, workspaceMgr); } @@ -266,9 +267,9 @@ public abstract class BaseMultiLayerUpdater implements Updater //Split up the gradients on a per-layer basis, for pre-apply Map layerGradients = new HashMap<>(); - Trainable[] layers = getOrderedLayers(); + ITrainableLayer[] layers = getOrderedLayers(); if (layers.length == 1 && isSingleLayerUpdater()) { - layerGradients.put(layers[0].getConfig().getLayerName(), gradient); + layerGradients.put(layers[0].getTrainingConfig().getLayerName(), gradient); } else { for (Map.Entry gradientPair : gradient.gradientForVariable().entrySet()) { String key = gradientPair.getKey(); @@ -296,7 +297,7 @@ public abstract class BaseMultiLayerUpdater implements Updater //PRE apply (gradient clipping, etc): done on a per-layer basis for (Map.Entry entry : layerGradients.entrySet()) { String layerName = entry.getKey(); - Trainable layer = layersByName.get(layerName); + ITrainableLayer layer = layersByName.get(layerName); //Todo Layers may have the same name!? preApply(layer, layerGradients.get(layerName), iteration); } @@ -350,9 +351,9 @@ public abstract class BaseMultiLayerUpdater implements Updater long paramsSoFar = 0; long currentStart = 0; long currentEnd = 0; - for(Trainable t : getOrderedLayers()){ - Set layerParams = t.paramTable(false).keySet(); - Map paramTable = t.paramTable(false); + for(ITrainableLayer t : getOrderedLayers()){ + Set layerParams = t.getParamTable(false).keySet(); + Map paramTable = t.getParamTable(false); for(String s : layerParams) { if(t.updaterDivideByMinibatch(s)){ long l = paramTable.get(s).length(); @@ -385,22 +386,22 @@ public abstract class BaseMultiLayerUpdater implements Updater /** * Pre-apply: Apply gradient normalization/clipping * - * @param layer Layer to apply gradient normalization/clipping for + * @param layer ILayer to apply gradient normalization/clipping for * @param gradient Gradient to update * @param iteration The current iteration (i.e., number of parameter updates so far) */ - public void preApply(Trainable layer, Gradient gradient, int iteration) { + public void preApply(ITrainableLayer layer, Gradient gradient, int iteration) { - if (layer.getConfig() == null || layer.numParams() == 0) { - //Layer does not have parameters -> no gradient + if (layer.getTrainingConfig() == null || layer.numParams() == 0) { + //ILayer does not have parameters -> no gradient return; } - GradientNormalization normalization = layer.getConfig().getGradientNormalization(); + GradientNormalization normalization = layer.getTrainingConfig().getGradientNormalization(); if (normalization == null || normalization == GradientNormalization.None) return; //no op - final double threshold = layer.getConfig().getGradientNormalizationThreshold(); + final double threshold = layer.getTrainingConfig().getGradientNormalizationThreshold(); INDArray layerGradientView = layer.getGradientsViewArray(); switch (normalization) { diff --git a/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/nn/updater/LayerUpdater.java b/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/nn/updater/LayerUpdater.java index dea50edd9..3dafbb3f9 100644 --- a/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/nn/updater/LayerUpdater.java +++ b/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/nn/updater/LayerUpdater.java @@ -22,7 +22,7 @@ package org.deeplearning4j.nn.updater; import lombok.extern.slf4j.Slf4j; import org.deeplearning4j.nn.api.Layer; -import org.deeplearning4j.nn.api.Trainable; +import org.deeplearning4j.nn.api.ITrainableLayer; import org.deeplearning4j.nn.multilayer.MultiLayerNetwork; import org.nd4j.linalg.api.ndarray.INDArray; @@ -42,12 +42,12 @@ public class LayerUpdater extends BaseMultiLayerUpdater { } layersByName = new HashMap<>(); - layersByName.put(layer.conf().getLayer().getLayerName(), layer); + layersByName.put(layer.getLayerConfiguration().getLayerName(), layer); } @Override - protected Trainable[] getOrderedLayers() { - return new Trainable[] {network}; + protected ITrainableLayer[] getOrderedLayers() { + return new ITrainableLayer[] {network}; } @Override @@ -57,12 +57,12 @@ public class LayerUpdater extends BaseMultiLayerUpdater { @Override protected INDArray getParams() { - return network.params(); + return network.getParams(); } @Override protected boolean isMiniBatch() { - return network.conf().isMiniBatch(); + return network.getNetConfiguration().isMiniBatch(); } @Override diff --git a/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/nn/updater/MultiLayerUpdater.java b/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/nn/updater/MultiLayerUpdater.java index 58f64f66f..1027f5003 100644 --- a/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/nn/updater/MultiLayerUpdater.java +++ b/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/nn/updater/MultiLayerUpdater.java @@ -22,8 +22,8 @@ package org.deeplearning4j.nn.updater; import lombok.Getter; import lombok.extern.slf4j.Slf4j; +import org.deeplearning4j.nn.api.ITrainableLayer; import org.deeplearning4j.nn.api.Layer; -import org.deeplearning4j.nn.api.Trainable; import org.deeplearning4j.nn.api.Updater; import org.deeplearning4j.nn.multilayer.MultiLayerNetwork; import org.nd4j.linalg.api.ndarray.INDArray; @@ -49,9 +49,9 @@ public class MultiLayerUpdater extends BaseMultiLayerUpdater } @Override - protected Trainable[] getOrderedLayers() { + protected ITrainableLayer[] getOrderedLayers() { Layer[] layers = network.getLayers(); - Trainable[] t = new Trainable[layers.length]; + ITrainableLayer[] t = new ITrainableLayer[layers.length]; System.arraycopy(layers, 0, t, 0, layers.length); return t; } @@ -66,12 +66,12 @@ public class MultiLayerUpdater extends BaseMultiLayerUpdater @Override protected INDArray getParams() { - return network.params(); + return network.getModelParams(); } @Override protected boolean isMiniBatch() { - return network.conf().isMiniBatch(); + return network.getNetConfiguration().isMiniBatch(); } @Override diff --git a/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/nn/updater/UpdaterBlock.java b/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/nn/updater/UpdaterBlock.java index 3366a48f9..7b496468f 100644 --- a/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/nn/updater/UpdaterBlock.java +++ b/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/nn/updater/UpdaterBlock.java @@ -22,18 +22,11 @@ package org.deeplearning4j.nn.updater; import lombok.AllArgsConstructor; import lombok.Data; -import lombok.val; -import org.deeplearning4j.nn.api.Layer; -import org.deeplearning4j.nn.api.Trainable; -import org.deeplearning4j.nn.conf.NeuralNetConfiguration; -import org.deeplearning4j.nn.conf.layers.BaseLayer; -import org.deeplearning4j.nn.layers.FrozenLayer; +import org.deeplearning4j.nn.api.ITrainableLayer; import org.nd4j.linalg.api.ndarray.INDArray; -import org.nd4j.linalg.factory.Nd4j; import org.nd4j.linalg.indexing.NDArrayIndex; import org.nd4j.linalg.learning.GradientUpdater; import org.nd4j.linalg.learning.regularization.Regularization; -import org.nd4j.linalg.ops.transforms.Transforms; import java.util.ArrayList; import java.util.List; @@ -56,7 +49,7 @@ public class UpdaterBlock { @AllArgsConstructor @Data public static class ParamState { - private final Trainable layer; + private final ITrainableLayer layer; private final String paramName; private final int paramOffsetStart; private final int paramOffsetEnd; @@ -89,7 +82,7 @@ public class UpdaterBlock { if (gradientUpdater == null) { ParamState varState = layersAndVariablesInBlock.get(0); String varName = varState.getParamName(); - gradientUpdater = varState.getLayer().getConfig().getUpdaterByParam(varName).instantiate(updaterView, + gradientUpdater = varState.getLayer().getTrainingConfig().getUpdaterByParam(varName).instantiate(updaterView, updaterViewRequiresInitialization); //UpdaterUtils.getGradientUpdater(varState.getLayer(), varState.getParamName()); } } @@ -97,7 +90,7 @@ public class UpdaterBlock { public boolean isPretrainUpdaterBlock() { //All in block should be the same layer, and all be pretrain params ParamState vs = layersAndVariablesInBlock.get(0); - return vs.getLayer().getConfig().isPretrainParam(vs.getParamName()); + return vs.getLayer().getTrainingConfig().isPretrainParam(vs.getParamName()); } public boolean skipDueToPretrainConfig( boolean isLayerUpdater) { @@ -148,7 +141,7 @@ public class UpdaterBlock { //Second: apply learning rate policy. Note that by definition we have the same LR policy for every single // variable in the block - Trainable l0 = layersAndVariablesInBlock.get(0).getLayer(); + ITrainableLayer l0 = layersAndVariablesInBlock.get(0).getLayer(); if (l0.numParams() == 0) { //No params for this layer return; @@ -194,10 +187,10 @@ public class UpdaterBlock { * @param gradientView Gradient view array for the layer + param * @param paramsView Parameter view array for the layer + param */ - protected void applyRegularization(Regularization.ApplyStep step, Trainable layer, String paramName, INDArray gradientView, INDArray paramsView, int iter, int epoch, double lr) { + protected void applyRegularization(Regularization.ApplyStep step, ITrainableLayer layer, String paramName, INDArray gradientView, INDArray paramsView, int iter, int epoch, double lr) { //TODO: do this for multiple contiguous params/layers (fewer, larger ops) - List l = layer.getConfig().getRegularizationByParam(paramName); + List l = layer.getTrainingConfig().getRegularizationByParam(paramName); if(l != null && !l.isEmpty()){ for(Regularization r : l){ if(r.applyStep() == step){ diff --git a/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/nn/updater/UpdaterCreator.java b/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/nn/updater/UpdaterCreator.java index 3194de852..11573daa0 100644 --- a/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/nn/updater/UpdaterCreator.java +++ b/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/nn/updater/UpdaterCreator.java @@ -21,21 +21,26 @@ package org.deeplearning4j.nn.updater; import org.deeplearning4j.nn.api.Layer; -import org.deeplearning4j.nn.api.Model; +import net.brutex.ai.dnn.api.IModel; import org.deeplearning4j.nn.graph.ComputationGraph; import org.deeplearning4j.nn.multilayer.MultiLayerNetwork; import org.deeplearning4j.nn.updater.graph.ComputationGraphUpdater; /** * - * - * @author Adam Gibson + * Create an {@link org.deeplearning4j.nn.api.Updater} based on the provided {@link IModel}. */ public class UpdaterCreator { private UpdaterCreator() {} - public static org.deeplearning4j.nn.api.Updater getUpdater(Model layer) { + /** + * Create an Updater for a given model type. This is either {@link ComputationGraphUpdater} or + * {@link MultiLayerUpdater} or a {@link LayerUpdater}. + * @param layer + * @return + */ + public static org.deeplearning4j.nn.api.Updater getUpdater(IModel layer) { if (layer instanceof MultiLayerNetwork) { return new MultiLayerUpdater((MultiLayerNetwork) layer); } else if (layer instanceof ComputationGraph) { diff --git a/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/nn/updater/UpdaterUtils.java b/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/nn/updater/UpdaterUtils.java index 73bd5410e..14a2a54de 100644 --- a/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/nn/updater/UpdaterUtils.java +++ b/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/nn/updater/UpdaterUtils.java @@ -20,16 +20,16 @@ package org.deeplearning4j.nn.updater; -import org.deeplearning4j.nn.api.Trainable; -import org.deeplearning4j.nn.api.TrainingConfig; +import org.deeplearning4j.nn.api.ITrainableLayer; +import org.deeplearning4j.nn.api.ITraininableLayerConfiguration; import org.nd4j.linalg.learning.config.IUpdater; public class UpdaterUtils { - public static boolean updaterConfigurationsEquals(Trainable layer1, String param1, Trainable layer2, String param2) { - TrainingConfig l1 = layer1.getConfig(); - TrainingConfig l2 = layer2.getConfig(); + public static boolean updaterConfigurationsEquals(ITrainableLayer layer1, String param1, ITrainableLayer layer2, String param2) { + ITraininableLayerConfiguration l1 = layer1.getTrainingConfig(); + ITraininableLayerConfiguration l2 = layer2.getTrainingConfig(); IUpdater u1 = l1.getUpdaterByParam(param1); IUpdater u2 = l2.getUpdaterByParam(param2); diff --git a/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/nn/updater/graph/ComputationGraphUpdater.java b/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/nn/updater/graph/ComputationGraphUpdater.java index 6af2901d6..952258bcf 100644 --- a/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/nn/updater/graph/ComputationGraphUpdater.java +++ b/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/nn/updater/graph/ComputationGraphUpdater.java @@ -20,8 +20,7 @@ package org.deeplearning4j.nn.updater.graph; -import org.deeplearning4j.nn.api.Layer; -import org.deeplearning4j.nn.api.Trainable; +import org.deeplearning4j.nn.api.ITrainableLayer; import org.deeplearning4j.nn.graph.ComputationGraph; import org.deeplearning4j.nn.graph.vertex.GraphVertex; import org.deeplearning4j.nn.updater.BaseMultiLayerUpdater; @@ -32,7 +31,7 @@ import java.util.HashMap; public class ComputationGraphUpdater extends BaseMultiLayerUpdater { - protected Trainable[] orderedLayers; + protected ITrainableLayer[] orderedLayers; public ComputationGraphUpdater(ComputationGraph graph) { this(graph, null); @@ -42,14 +41,14 @@ public class ComputationGraphUpdater extends BaseMultiLayerUpdater(); - Trainable[] layers = getOrderedLayers(); - for (Trainable l : layers) { - layersByName.put(l.getConfig().getLayerName(), l); + ITrainableLayer[] layers = getOrderedLayers(); + for (ITrainableLayer l : layers) { + layersByName.put(l.getTrainingConfig().getLayerName(), l); } } @Override - protected Trainable[] getOrderedLayers() { + protected ITrainableLayer[] getOrderedLayers() { if (orderedLayers != null) { return orderedLayers; } @@ -58,7 +57,7 @@ public class ComputationGraphUpdater extends BaseMultiLayerUpdater listeners; - private Model model; + private IModel model; private ConvexOptimizer optimizer; private StepFunction stepFunction; @@ -90,7 +90,7 @@ public class Solver { public static class Builder { private NeuralNetConfiguration conf; - private Model model; + private IModel model; private final List listeners = new ArrayList<>(); public Builder configure(NeuralNetConfiguration conf) { @@ -112,7 +112,7 @@ public class Solver { return this; } - public Builder model(Model model) { + public Builder model(IModel model) { this.model = model; return this; } diff --git a/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/optimize/api/BaseTrainingListener.java b/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/optimize/api/BaseTrainingListener.java index c7d755187..d72b836f5 100644 --- a/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/optimize/api/BaseTrainingListener.java +++ b/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/optimize/api/BaseTrainingListener.java @@ -20,7 +20,7 @@ package org.deeplearning4j.optimize.api; -import org.deeplearning4j.nn.api.Model; +import net.brutex.ai.dnn.api.IModel; import org.nd4j.linalg.api.ndarray.INDArray; import java.util.List; @@ -29,43 +29,43 @@ import java.util.Map; public abstract class BaseTrainingListener implements TrainingListener { @Override - public void onEpochStart(Model model) { + public void onEpochStart(IModel model) { //No op } @Override - public void onEpochEnd(Model model) { + public void onEpochEnd(IModel model) { //No op } @Override - public void onForwardPass(Model model, List activations) { + public void onForwardPass(IModel model, List activations) { //No op } @Override - public void onForwardPass(Model model, Map activations) { + public void onForwardPass(IModel model, Map activations) { //No op } @Override - public void onGradientCalculation(Model model) { + public void onGradientCalculation(IModel model) { //No op } @Override - public void onBackwardPass(Model model) { + public void onBackwardPass(IModel model) { //No op } @Override - public void iterationDone(Model model, int iteration, int epoch) { + public void iterationDone(IModel model, int iteration, int epoch) { //No op } } diff --git a/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/optimize/api/ConvexOptimizer.java b/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/optimize/api/ConvexOptimizer.java index c32a0fac3..0d6999fce 100644 --- a/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/optimize/api/ConvexOptimizer.java +++ b/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/optimize/api/ConvexOptimizer.java @@ -20,7 +20,7 @@ package org.deeplearning4j.optimize.api; -import org.deeplearning4j.nn.api.Model; +import net.brutex.ai.dnn.api.IModel; import org.deeplearning4j.nn.api.Updater; import org.deeplearning4j.nn.conf.NeuralNetConfiguration; import org.deeplearning4j.nn.gradient.Gradient; @@ -128,6 +128,6 @@ public interface ConvexOptimizer extends Serializable { * @param batchSize batchSize for update * @paramType paramType to update */ - void updateGradientAccordingToParams(Gradient gradient, Model model, int batchSize, LayerWorkspaceMgr workspaceMgr); + void updateGradientAccordingToParams(Gradient gradient, IModel model, int batchSize, LayerWorkspaceMgr workspaceMgr); } diff --git a/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/optimize/api/IterationListener.java b/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/optimize/api/IterationListener.java index 085d734b1..309f478fe 100644 --- a/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/optimize/api/IterationListener.java +++ b/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/optimize/api/IterationListener.java @@ -21,7 +21,7 @@ package org.deeplearning4j.optimize.api; -import org.deeplearning4j.nn.api.Model; +import net.brutex.ai.dnn.api.IModel; import java.io.Serializable; @@ -33,6 +33,6 @@ public abstract class IterationListener extends BaseTrainingListener implements * @param iteration the iteration * @param model the model iterating */ - public abstract void iterationDone(Model model, int iteration, int epoch); + public abstract void iterationDone(IModel model, int iteration, int epoch); } diff --git a/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/optimize/api/TrainingListener.java b/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/optimize/api/TrainingListener.java index 7c96fd750..20fe978dc 100644 --- a/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/optimize/api/TrainingListener.java +++ b/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/optimize/api/TrainingListener.java @@ -20,7 +20,7 @@ package org.deeplearning4j.optimize.api; -import org.deeplearning4j.nn.api.Model; +import net.brutex.ai.dnn.api.IModel; import org.nd4j.linalg.api.ndarray.INDArray; import org.nd4j.linalg.dataset.api.iterator.DataSetIterator; import org.nd4j.linalg.dataset.api.iterator.MultiDataSetIterator; @@ -35,60 +35,60 @@ public interface TrainingListener { * @param iteration the iteration * @param model the model iterating */ - void iterationDone(Model model, int iteration, int epoch); + void iterationDone(IModel model, int iteration, int epoch); /** * Called once at the start of each epoch, when using methods such as {@link org.deeplearning4j.nn.multilayer.MultiLayerNetwork#fit(DataSetIterator)}, * {@link org.deeplearning4j.nn.graph.ComputationGraph#fit(DataSetIterator)} or {@link org.deeplearning4j.nn.graph.ComputationGraph#fit(MultiDataSetIterator)} */ - void onEpochStart(Model model); + void onEpochStart(IModel model); /** * Called once at the end of each epoch, when using methods such as {@link org.deeplearning4j.nn.multilayer.MultiLayerNetwork#fit(DataSetIterator)}, * {@link org.deeplearning4j.nn.graph.ComputationGraph#fit(DataSetIterator)} or {@link org.deeplearning4j.nn.graph.ComputationGraph#fit(MultiDataSetIterator)} */ - void onEpochEnd(Model model); + void onEpochEnd(IModel model); /** * Called once per iteration (forward pass) for activations (usually for a {@link org.deeplearning4j.nn.multilayer.MultiLayerNetwork}), * only at training time * * @param model Model - * @param activations Layer activations (including input) + * @param activations ILayer activations (including input) */ - void onForwardPass(Model model, List activations); + void onForwardPass(IModel model, List activations); /** * Called once per iteration (forward pass) for activations (usually for a {@link org.deeplearning4j.nn.graph.ComputationGraph}), * only at training time * * @param model Model - * @param activations Layer activations (including input) + * @param activations ILayer activations (including input) */ - void onForwardPass(Model model, Map activations); + void onForwardPass(IModel model, Map activations); /** * Called once per iteration (backward pass) before the gradients are updated - * Gradients are available via {@link Model#gradient()}. + * Gradients are available via {@link IModel#gradient()}. * Note that gradients will likely be updated in-place - thus they should be copied or processed synchronously * in this method. *

- * For updates (gradients post learning rate/momentum/rmsprop etc) see {@link #onBackwardPass(Model)} + * For updates (gradients post learning rate/momentum/rmsprop etc) see {@link #onBackwardPass(IModel)} * * @param model Model */ - void onGradientCalculation(Model model); + void onGradientCalculation(IModel model); /** * Called once per iteration (backward pass) after gradients have been calculated, and updated - * Gradients are available via {@link Model#gradient()}. + * Gradients are available via {@link IModel#gradient()}. *

- * Unlike {@link #onGradientCalculation(Model)} the gradients at this point will be post-update, rather than + * Unlike {@link #onGradientCalculation(IModel)} the gradients at this point will be post-update, rather than * raw (pre-update) gradients at that method call. * * @param model Model */ - void onBackwardPass(Model model); + void onBackwardPass(IModel model); } diff --git a/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/optimize/listeners/CheckpointListener.java b/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/optimize/listeners/CheckpointListener.java index 4ebf2e050..120099da4 100644 --- a/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/optimize/listeners/CheckpointListener.java +++ b/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/optimize/listeners/CheckpointListener.java @@ -23,8 +23,8 @@ package org.deeplearning4j.optimize.listeners; import com.google.common.io.Files; import lombok.NonNull; import lombok.extern.slf4j.Slf4j; +import net.brutex.ai.dnn.api.IModel; import org.apache.commons.io.IOUtils; -import org.deeplearning4j.nn.api.Model; import org.deeplearning4j.nn.graph.ComputationGraph; import org.deeplearning4j.nn.multilayer.MultiLayerNetwork; import org.deeplearning4j.optimize.api.BaseTrainingListener; @@ -109,7 +109,7 @@ public class CheckpointListener extends BaseTrainingListener implements Serializ } @Override - public void onEpochEnd(Model model) { + public void onEpochEnd(IModel model) { int epochsDone = getEpoch(model) + 1; if(saveEveryNEpochs != null && epochsDone > 0 && epochsDone % saveEveryNEpochs == 0){ //Save: @@ -119,7 +119,7 @@ public class CheckpointListener extends BaseTrainingListener implements Serializ } @Override - public void iterationDone(Model model, int iteration, int epoch) { + public void iterationDone(IModel model, int iteration, int epoch) { if (startTime < 0) { startTime = System.currentTimeMillis(); startIter = iteration; @@ -164,7 +164,7 @@ public class CheckpointListener extends BaseTrainingListener implements Serializ } } - private void saveCheckpoint(Model model) { + private void saveCheckpoint(IModel model) { try{ saveCheckpointHelper(model); } catch (Exception e){ @@ -172,7 +172,7 @@ public class CheckpointListener extends BaseTrainingListener implements Serializ } } - private void saveCheckpointHelper(Model model) throws Exception { + private void saveCheckpointHelper(IModel model) throws Exception { if(!checkpointRecordFile.exists()){ checkpointRecordFile.createNewFile(); write(Checkpoint.getFileHeader() + "\n", checkpointRecordFile); @@ -243,27 +243,27 @@ public class CheckpointListener extends BaseTrainingListener implements Serializ return str; } - protected static int getIter(Model model) { + protected static int getIter(IModel model) { if (model instanceof MultiLayerNetwork) { - return ((MultiLayerNetwork) model).getLayerWiseConfigurations().getIterationCount(); + return ((MultiLayerNetwork) model).getNetConfiguration().getIterationCount(); } else if (model instanceof ComputationGraph) { - return ((ComputationGraph) model).getConfiguration().getIterationCount(); + return ((ComputationGraph) model).getComputationGraphConfiguration().getIterationCount(); } else { - return model.conf().getIterationCount(); + return model.getNetConfiguration().getIterationCount(); } } - protected static int getEpoch(Model model) { + protected static int getEpoch(IModel model) { if (model instanceof MultiLayerNetwork) { - return ((MultiLayerNetwork) model).getLayerWiseConfigurations().getEpochCount(); + return ((MultiLayerNetwork) model).getNetConfiguration().getEpochCount(); } else if (model instanceof ComputationGraph) { - return ((ComputationGraph) model).getConfiguration().getEpochCount(); + return ((ComputationGraph) model).getComputationGraphConfiguration().getEpochCount(); } else { - return model.conf().getEpochCount(); + return model.getNetConfiguration().getEpochCount(); } } - protected static String getModelType(Model model){ + protected static String getModelType(IModel model){ if(model.getClass() == MultiLayerNetwork.class){ return "MultiLayerNetwork"; } else if(model.getClass() == ComputationGraph.class){ diff --git a/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/optimize/listeners/CollectScoresIterationListener.java b/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/optimize/listeners/CollectScoresIterationListener.java index 51f798e26..32058d3db 100644 --- a/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/optimize/listeners/CollectScoresIterationListener.java +++ b/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/optimize/listeners/CollectScoresIterationListener.java @@ -20,7 +20,7 @@ package org.deeplearning4j.optimize.listeners; -import org.deeplearning4j.nn.api.Model; +import net.brutex.ai.dnn.api.IModel; import org.deeplearning4j.optimize.api.BaseTrainingListener; import java.io.File; @@ -132,9 +132,9 @@ public class CollectScoresIterationListener extends BaseTrainingListener { } @Override - public void iterationDone(Model model, int iteration, int epoch) { + public void iterationDone(IModel model, int iteration, int epoch) { if (++iterationCount % frequency == 0) { - double score = model.score(); + double score = model.getScore(); scoreVsIter.reallocateGuard(); scoreVsIter.addScore(iteration, score); } diff --git a/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/optimize/listeners/CollectScoresListener.java b/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/optimize/listeners/CollectScoresListener.java index 4f6d17b3c..a08b18814 100644 --- a/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/optimize/listeners/CollectScoresListener.java +++ b/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/optimize/listeners/CollectScoresListener.java @@ -25,7 +25,7 @@ import it.unimi.dsi.fastutil.ints.IntArrayList; import lombok.Data; import lombok.EqualsAndHashCode; import lombok.extern.slf4j.Slf4j; -import org.deeplearning4j.nn.api.Model; +import net.brutex.ai.dnn.api.IModel; import org.deeplearning4j.optimize.api.BaseTrainingListener; @@ -53,9 +53,9 @@ public class CollectScoresListener extends BaseTrainingListener implements Seria } @Override - public void iterationDone(Model model, int iteration, int epoch) { + public void iterationDone(IModel model, int iteration, int epoch) { if(iteration % frequency == 0){ - double score = model.score(); + double score = model.getScore(); listIteration.add(iteration); listScore.add(score); if(logScore) { diff --git a/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/optimize/listeners/ComposableIterationListener.java b/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/optimize/listeners/ComposableIterationListener.java index 3b82fc6b2..4b67fcede 100644 --- a/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/optimize/listeners/ComposableIterationListener.java +++ b/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/optimize/listeners/ComposableIterationListener.java @@ -20,7 +20,7 @@ package org.deeplearning4j.optimize.listeners; -import org.deeplearning4j.nn.api.Model; +import net.brutex.ai.dnn.api.IModel; import org.deeplearning4j.optimize.api.BaseTrainingListener; import org.deeplearning4j.optimize.api.TrainingListener; @@ -42,7 +42,7 @@ public class ComposableIterationListener extends BaseTrainingListener implements } @Override - public void iterationDone(Model model, int iteration, int epoch) { + public void iterationDone(IModel model, int iteration, int epoch) { for (TrainingListener listener : listeners) listener.iterationDone(model, iteration, epoch); } diff --git a/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/optimize/listeners/EvaluativeListener.java b/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/optimize/listeners/EvaluativeListener.java index a05d14a87..f98dd0aad 100644 --- a/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/optimize/listeners/EvaluativeListener.java +++ b/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/optimize/listeners/EvaluativeListener.java @@ -24,8 +24,8 @@ import lombok.Getter; import lombok.NonNull; import lombok.Setter; import lombok.extern.slf4j.Slf4j; +import net.brutex.ai.dnn.api.IModel; import org.deeplearning4j.exception.DL4JInvalidInputException; -import org.deeplearning4j.nn.api.Model; import org.deeplearning4j.nn.graph.ComputationGraph; import org.deeplearning4j.nn.multilayer.MultiLayerNetwork; import org.deeplearning4j.optimize.api.BaseTrainingListener; @@ -39,8 +39,6 @@ import org.nd4j.linalg.dataset.MultiDataSet; import org.nd4j.linalg.dataset.api.iterator.DataSetIterator; import org.nd4j.linalg.dataset.api.iterator.MultiDataSetIterator; -import java.util.List; -import java.util.Map; import java.util.concurrent.atomic.AtomicLong; @Slf4j @@ -193,24 +191,24 @@ public class EvaluativeListener extends BaseTrainingListener { * @param iteration the iteration */ @Override - public void iterationDone(Model model, int iteration, int epoch) { + public void iterationDone(IModel model, int iteration, int epoch) { if (invocationType == InvocationType.ITERATION_END) invokeListener(model); } @Override - public void onEpochStart(Model model) { + public void onEpochStart(IModel model) { if (invocationType == InvocationType.EPOCH_START) invokeListener(model); } @Override - public void onEpochEnd(Model model) { + public void onEpochEnd(IModel model) { if (invocationType == InvocationType.EPOCH_END) invokeListener(model); } - protected void invokeListener(Model model) { + protected void invokeListener(IModel model) { if (iterationCount.get() == null) iterationCount.set(new AtomicLong(0)); diff --git a/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/optimize/listeners/FailureTestingListener.java b/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/optimize/listeners/FailureTestingListener.java index c05626511..d6ac11b41 100644 --- a/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/optimize/listeners/FailureTestingListener.java +++ b/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/optimize/listeners/FailureTestingListener.java @@ -25,7 +25,7 @@ import lombok.Data; import lombok.EqualsAndHashCode; import lombok.NonNull; import lombok.extern.slf4j.Slf4j; -import org.deeplearning4j.nn.api.Model; +import net.brutex.ai.dnn.api.IModel; import org.deeplearning4j.nn.graph.ComputationGraph; import org.deeplearning4j.nn.multilayer.MultiLayerNetwork; import org.deeplearning4j.optimize.api.TrainingListener; @@ -51,41 +51,41 @@ public class FailureTestingListener implements TrainingListener, Serializable { } @Override - public void iterationDone(Model model, int iteration, int epoch) { + public void iterationDone(IModel model, int iteration, int epoch) { call(CallType.ITER_DONE, model); } @Override - public void onEpochStart(Model model) { + public void onEpochStart(IModel model) { call(CallType.EPOCH_START, model); } @Override - public void onEpochEnd(Model model) { + public void onEpochEnd(IModel model) { call(CallType.EPOCH_END, model); } @Override - public void onForwardPass(Model model, List activations) { + public void onForwardPass(IModel model, List activations) { call(CallType.FORWARD_PASS, model); } @Override - public void onForwardPass(Model model, Map activations) { + public void onForwardPass(IModel model, Map activations) { call(CallType.FORWARD_PASS, model); } @Override - public void onGradientCalculation(Model model) { + public void onGradientCalculation(IModel model) { call(CallType.GRADIENT_CALC, model); } @Override - public void onBackwardPass(Model model) { + public void onBackwardPass(IModel model) { call(CallType.BACKWARD_PASS, model); } - protected void call(CallType callType, Model model){ + protected void call(CallType callType, IModel model){ if(!trigger.initialized()){ trigger.initialize(); } @@ -149,7 +149,7 @@ public class FailureTestingListener implements TrainingListener, Serializable { * @param model Model * @return */ - public abstract boolean triggerFailure(CallType callType, int iteration, int epoch, Model model); + public abstract boolean triggerFailure(CallType callType, int iteration, int epoch, IModel model); public boolean initialized(){ return initialized; @@ -170,7 +170,7 @@ public class FailureTestingListener implements TrainingListener, Serializable { } @Override - public boolean triggerFailure(CallType callType, int iteration, int epoch, Model model) { + public boolean triggerFailure(CallType callType, int iteration, int epoch, IModel model) { boolean b = true; for(FailureTrigger ft : triggers) b &= ft.triggerFailure(callType, iteration, epoch, model); @@ -191,7 +191,7 @@ public class FailureTestingListener implements TrainingListener, Serializable { } @Override - public boolean triggerFailure(CallType callType, int iteration, int epoch, Model model) { + public boolean triggerFailure(CallType callType, int iteration, int epoch, IModel model) { boolean b = false; for(FailureTrigger ft : triggers) b |= ft.triggerFailure(callType, iteration, epoch, model); @@ -213,7 +213,7 @@ public class FailureTestingListener implements TrainingListener, Serializable { } @Override - public boolean triggerFailure(CallType callType, int iteration, int epoch, Model model) { + public boolean triggerFailure(CallType callType, int iteration, int epoch, IModel model) { return (this.callType == CallType.ANY || callType == this.callType) && rng.nextDouble() < probability; } @@ -237,7 +237,7 @@ public class FailureTestingListener implements TrainingListener, Serializable { } @Override - public boolean triggerFailure(CallType callType, int iteration, int epoch, Model model) { + public boolean triggerFailure(CallType callType, int iteration, int epoch, IModel model) { return (System.currentTimeMillis() - initTime) > msSinceInit; } @@ -260,7 +260,7 @@ public class FailureTestingListener implements TrainingListener, Serializable { @Override - public boolean triggerFailure(CallType callType, int iteration, int epoch, Model model) { + public boolean triggerFailure(CallType callType, int iteration, int epoch, IModel model) { return shouldFail; } @@ -284,7 +284,7 @@ public class FailureTestingListener implements TrainingListener, Serializable { @Override - public boolean triggerFailure(CallType callType, int iteration, int epoch, Model model) { + public boolean triggerFailure(CallType callType, int iteration, int epoch, IModel model) { return shouldFail; } @@ -314,7 +314,7 @@ public class FailureTestingListener implements TrainingListener, Serializable { } @Override - public boolean triggerFailure(CallType callType, int iteration, int epoch, Model model) { + public boolean triggerFailure(CallType callType, int iteration, int epoch, IModel model) { return (isEpoch && epoch == count) || (!isEpoch && iteration == count); } } diff --git a/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/optimize/listeners/PerformanceListener.java b/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/optimize/listeners/PerformanceListener.java index 68402f40e..4f287816e 100644 --- a/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/optimize/listeners/PerformanceListener.java +++ b/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/optimize/listeners/PerformanceListener.java @@ -22,14 +22,12 @@ package org.deeplearning4j.optimize.listeners; import com.google.common.base.Preconditions; import lombok.extern.slf4j.Slf4j; -import org.deeplearning4j.nn.api.Model; +import net.brutex.ai.dnn.api.IModel; import org.deeplearning4j.nn.graph.ComputationGraph; import org.deeplearning4j.nn.multilayer.MultiLayerNetwork; import org.deeplearning4j.optimize.api.BaseTrainingListener; import org.nd4j.linalg.api.ndarray.INDArray; import org.nd4j.linalg.factory.Nd4j; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; import java.io.IOException; import java.io.ObjectInputStream; @@ -78,7 +76,7 @@ public class PerformanceListener extends BaseTrainingListener implements Seriali } @Override - public void iterationDone(Model model, int iteration, int epoch) { + public void iterationDone(IModel model, int iteration, int epoch) { // we update lastTime on every iteration // just to simplify things if (lastTime.get() == null) @@ -142,7 +140,7 @@ public class PerformanceListener extends BaseTrainingListener implements Seriali builder.append("batches/sec: ").append(String.format("%.3f", batchesPerSec.get())).append("; "); if (reportScore) - builder.append("score: ").append(model.score()).append(";"); + builder.append("score: ").append(model.getScore()).append(";"); if (reportGC){ if(gcBeans == null){ diff --git a/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/optimize/listeners/ScoreIterationListener.java b/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/optimize/listeners/ScoreIterationListener.java index 6568d2f67..0d65332ad 100644 --- a/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/optimize/listeners/ScoreIterationListener.java +++ b/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/optimize/listeners/ScoreIterationListener.java @@ -21,10 +21,8 @@ package org.deeplearning4j.optimize.listeners; import lombok.extern.slf4j.Slf4j; -import org.deeplearning4j.nn.api.Model; +import net.brutex.ai.dnn.api.IModel; import org.deeplearning4j.optimize.api.BaseTrainingListener; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; import java.io.Serializable; @@ -43,11 +41,11 @@ public class ScoreIterationListener extends BaseTrainingListener implements Seri public ScoreIterationListener() {} @Override - public void iterationDone(Model model, int iteration, int epoch) { + public void iterationDone(IModel model, int iteration, int epoch) { if (printIterations <= 0) printIterations = 1; if (iteration % printIterations == 0) { - double score = model.score(); + double score = model.getScore(); log.info("Score at iteration {} is {}", iteration, score); } } diff --git a/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/optimize/listeners/ScoreToChartListener.java b/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/optimize/listeners/ScoreToChartListener.java index 2fc2999d6..1370bbad7 100644 --- a/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/optimize/listeners/ScoreToChartListener.java +++ b/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/optimize/listeners/ScoreToChartListener.java @@ -26,7 +26,7 @@ import lombok.extern.slf4j.Slf4j; import okhttp3.OkHttpClient; import okhttp3.Request; import okhttp3.Response; -import org.deeplearning4j.nn.api.Model; +import net.brutex.ai.dnn.api.IModel; import org.deeplearning4j.optimize.api.BaseTrainingListener; @Slf4j @@ -40,8 +40,8 @@ public class ScoreToChartListener extends BaseTrainingListener { } @Override - public void iterationDone(Model model, int iteration, int epoch) { - double score = model.score(); + public void iterationDone(IModel model, int iteration, int epoch) { + double score = model.getScore(); String nurl = url+"s="+score+"&n="+seriesName; OkHttpClient client = new OkHttpClient(); diff --git a/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/optimize/listeners/SleepyTrainingListener.java b/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/optimize/listeners/SleepyTrainingListener.java index 4c262a64c..834778001 100644 --- a/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/optimize/listeners/SleepyTrainingListener.java +++ b/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/optimize/listeners/SleepyTrainingListener.java @@ -22,7 +22,7 @@ package org.deeplearning4j.optimize.listeners; import lombok.*; import lombok.extern.slf4j.Slf4j; -import org.deeplearning4j.nn.api.Model; +import net.brutex.ai.dnn.api.IModel; import org.deeplearning4j.optimize.api.BaseTrainingListener; import org.nd4j.common.util.ThreadUtils; import org.nd4j.linalg.api.ndarray.INDArray; @@ -160,7 +160,7 @@ public class SleepyTrainingListener extends BaseTrainingListener implements Seri } @Override - public void onEpochStart(Model model) { + public void onEpochStart(IModel model) { sleep(lastES.get(), timerES); if (lastES.get() == null) @@ -170,7 +170,7 @@ public class SleepyTrainingListener extends BaseTrainingListener implements Seri } @Override - public void onEpochEnd(Model model) { + public void onEpochEnd(IModel model) { sleep(lastEE.get(), timerEE); if (lastEE.get() == null) @@ -180,7 +180,7 @@ public class SleepyTrainingListener extends BaseTrainingListener implements Seri } @Override - public void onForwardPass(Model model, List activations) { + public void onForwardPass(IModel model, List activations) { sleep(lastFF.get(), timerFF); if (lastFF.get() == null) @@ -190,7 +190,7 @@ public class SleepyTrainingListener extends BaseTrainingListener implements Seri } @Override - public void onForwardPass(Model model, Map activations) { + public void onForwardPass(IModel model, Map activations) { sleep(lastFF.get(), timerFF); if (lastFF.get() == null) @@ -200,7 +200,7 @@ public class SleepyTrainingListener extends BaseTrainingListener implements Seri } @Override - public void iterationDone(Model model, int iteration, int epoch) { + public void iterationDone(IModel model, int iteration, int epoch) { sleep(lastIteration.get(), timerIteration); if (lastIteration.get() == null) @@ -210,7 +210,7 @@ public class SleepyTrainingListener extends BaseTrainingListener implements Seri } @Override - public void onBackwardPass(Model model) { + public void onBackwardPass(IModel model) { sleep(lastBP.get(), timerBP); if (lastBP.get() == null) @@ -220,7 +220,7 @@ public class SleepyTrainingListener extends BaseTrainingListener implements Seri } @Override - public void onGradientCalculation(Model model) { + public void onGradientCalculation(IModel model) { // } } diff --git a/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/optimize/listeners/TimeIterationListener.java b/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/optimize/listeners/TimeIterationListener.java index cc48c216b..8a947e4c0 100644 --- a/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/optimize/listeners/TimeIterationListener.java +++ b/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/optimize/listeners/TimeIterationListener.java @@ -21,10 +21,8 @@ package org.deeplearning4j.optimize.listeners; import lombok.extern.slf4j.Slf4j; -import org.deeplearning4j.nn.api.Model; +import net.brutex.ai.dnn.api.IModel; import org.deeplearning4j.optimize.api.BaseTrainingListener; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; import java.io.Serializable; import java.util.Date; @@ -46,7 +44,7 @@ public class TimeIterationListener extends BaseTrainingListener implements Seria } @Override - public void iterationDone(Model model, int iteration, int epoch) { + public void iterationDone(IModel model, int iteration, int epoch) { long currentIteration = iterationCounter.incrementAndGet(); long elapsed = System.currentTimeMillis() - start; long remaining = (iterationCount - currentIteration) * elapsed / currentIteration; diff --git a/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/optimize/listeners/callbacks/EvaluationCallback.java b/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/optimize/listeners/callbacks/EvaluationCallback.java index 6cb756bb8..5f14ef3b0 100644 --- a/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/optimize/listeners/callbacks/EvaluationCallback.java +++ b/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/optimize/listeners/callbacks/EvaluationCallback.java @@ -20,11 +20,11 @@ package org.deeplearning4j.optimize.listeners.callbacks; -import org.deeplearning4j.nn.api.Model; +import net.brutex.ai.dnn.api.IModel; import org.deeplearning4j.optimize.listeners.EvaluativeListener; import org.nd4j.evaluation.IEvaluation; public interface EvaluationCallback { - void call(EvaluativeListener listener, Model model, long invocationsCount, IEvaluation[] evaluations); + void call(EvaluativeListener listener, IModel model, long invocationsCount, IEvaluation[] evaluations); } diff --git a/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/optimize/listeners/callbacks/ModelSavingCallback.java b/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/optimize/listeners/callbacks/ModelSavingCallback.java index df46f1fc6..cb0fe44a0 100644 --- a/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/optimize/listeners/callbacks/ModelSavingCallback.java +++ b/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/optimize/listeners/callbacks/ModelSavingCallback.java @@ -23,7 +23,7 @@ package org.deeplearning4j.optimize.listeners.callbacks; import lombok.NonNull; import org.apache.commons.io.FilenameUtils; import org.deeplearning4j.exception.DL4JInvalidConfigException; -import org.deeplearning4j.nn.api.Model; +import net.brutex.ai.dnn.api.IModel; import org.deeplearning4j.optimize.listeners.EvaluativeListener; import org.deeplearning4j.util.ModelSerializer; import org.nd4j.evaluation.IEvaluation; @@ -66,7 +66,7 @@ public class ModelSavingCallback implements EvaluationCallback { } @Override - public void call(EvaluativeListener listener, Model model, long invocationsCount, IEvaluation[] evaluations) { + public void call(EvaluativeListener listener, IModel model, long invocationsCount, IEvaluation[] evaluations) { String temp = template.replaceAll("%d", "" + invocationsCount); @@ -81,7 +81,7 @@ public class ModelSavingCallback implements EvaluationCallback { * @param model * @param filename */ - protected void save(Model model, String filename) { + protected void save(IModel model, String filename) { try { ModelSerializer.writeModel(model, filename, true); } catch (IOException e) { diff --git a/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/optimize/solvers/BackTrackLineSearch.java b/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/optimize/solvers/BackTrackLineSearch.java index 18e64c081..534807c1f 100644 --- a/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/optimize/solvers/BackTrackLineSearch.java +++ b/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/optimize/solvers/BackTrackLineSearch.java @@ -20,9 +20,9 @@ package org.deeplearning4j.optimize.solvers; +import net.brutex.ai.dnn.api.IModel; import org.apache.commons.math3.util.FastMath; import org.deeplearning4j.exception.InvalidStepException; -import org.deeplearning4j.nn.api.Model; import org.deeplearning4j.nn.conf.stepfunctions.NegativeGradientStepFunction; import org.deeplearning4j.optimize.api.ConvexOptimizer; import org.deeplearning4j.optimize.api.LineOptimizer; @@ -33,7 +33,6 @@ import org.nd4j.linalg.api.buffer.DataType; import org.nd4j.linalg.api.ndarray.INDArray; import org.nd4j.linalg.api.ops.impl.scalar.comparison.ScalarSetValue; import org.nd4j.linalg.api.ops.impl.transforms.comparison.Eps; -import org.nd4j.linalg.api.shape.Shape; import org.nd4j.linalg.factory.Nd4j; import org.deeplearning4j.nn.workspace.LayerWorkspaceMgr; import org.slf4j.Logger; @@ -44,7 +43,7 @@ import static org.nd4j.linalg.ops.transforms.Transforms.abs; public class BackTrackLineSearch implements LineOptimizer { private static final Logger log = LoggerFactory.getLogger(BackTrackLineSearch.class); - private final Model layer; + private final IModel layer; private final StepFunction stepFunction; private final ConvexOptimizer optimizer; private int maxIterations; @@ -64,18 +63,18 @@ public class BackTrackLineSearch implements LineOptimizer { * @param stepFunction * @param optimizer */ - public BackTrackLineSearch(Model layer, StepFunction stepFunction, ConvexOptimizer optimizer) { + public BackTrackLineSearch(IModel layer, StepFunction stepFunction, ConvexOptimizer optimizer) { this.layer = layer; this.stepFunction = stepFunction; this.optimizer = optimizer; - this.maxIterations = layer.conf().getMaxNumLineSearchIterations(); + this.maxIterations = layer.getNetConfiguration().getMaxNumLineSearchIterations(); } /** * @param optimizable * @param optimizer */ - public BackTrackLineSearch(Model optimizable, ConvexOptimizer optimizer) { + public BackTrackLineSearch(IModel optimizable, ConvexOptimizer optimizer) { this(optimizable, new NegativeDefaultStepFunction(), optimizer); } @@ -118,7 +117,7 @@ public class BackTrackLineSearch implements LineOptimizer { public double setScoreFor(INDArray parameters, LayerWorkspaceMgr workspaceMgr) { layer.setParams(parameters); layer.computeGradientAndScore(workspaceMgr); - return layer.score(); + return layer.getScore(); } // returns fraction of step size if found a good step @@ -157,7 +156,7 @@ public class BackTrackLineSearch implements LineOptimizer { oldStep = 0.0; step2 = 0.0; - score = score2 = scoreAtStart = layer.score(); + score = score2 = scoreAtStart = layer.getScore(); double bestScore = score; double bestStepSize = 1.0; diff --git a/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/optimize/solvers/BaseOptimizer.java b/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/optimize/solvers/BaseOptimizer.java index 3a8bfee10..0cd76263c 100644 --- a/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/optimize/solvers/BaseOptimizer.java +++ b/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/optimize/solvers/BaseOptimizer.java @@ -21,12 +21,11 @@ package org.deeplearning4j.optimize.solvers; import lombok.Getter; +import net.brutex.ai.dnn.api.IModel; import org.deeplearning4j.exception.InvalidStepException; import org.deeplearning4j.nn.api.Layer; -import org.deeplearning4j.nn.api.Model; import org.deeplearning4j.nn.api.Updater; import org.deeplearning4j.nn.conf.ComputationGraphConfiguration; -import org.deeplearning4j.nn.conf.MultiLayerConfiguration; import org.deeplearning4j.nn.conf.NeuralNetConfiguration; import org.deeplearning4j.nn.gradient.Gradient; import org.deeplearning4j.nn.graph.ComputationGraph; @@ -64,7 +63,7 @@ public abstract class BaseOptimizer implements ConvexOptimizer { @Getter protected StepFunction stepFunction; protected Collection trainingListeners = new ArrayList<>(); - protected Model model; + protected IModel model; protected BackTrackLineSearch lineMaximizer; protected Updater updater; protected ComputationGraphUpdater computationGraphUpdater; @@ -90,7 +89,7 @@ public abstract class BaseOptimizer implements ConvexOptimizer { * @param model */ public BaseOptimizer(NeuralNetConfiguration conf, StepFunction stepFunction, - Collection trainingListeners, Model model) { + Collection trainingListeners, IModel model) { this.conf = conf; this.stepFunction = (stepFunction != null ? stepFunction : getDefaultStepFunctionForOptimizer(this.getClass())); this.trainingListeners = trainingListeners != null ? trainingListeners : new ArrayList(); @@ -289,7 +288,7 @@ public abstract class BaseOptimizer implements ConvexOptimizer { @Override - public void updateGradientAccordingToParams(Gradient gradient, Model model, int batchSize, LayerWorkspaceMgr workspaceMgr) { + public void updateGradientAccordingToParams(Gradient gradient, IModel model, int batchSize, LayerWorkspaceMgr workspaceMgr) { if (model instanceof ComputationGraph) { ComputationGraph graph = (ComputationGraph) model; if (computationGraphUpdater == null) { @@ -316,8 +315,8 @@ public abstract class BaseOptimizer implements ConvexOptimizer { */ @Override public void setupSearchState(Pair pair) { - INDArray gradient = pair.getFirst().gradient(conf.variables()); - INDArray params = model.params().dup(); //Need dup here: params returns an array that isn't a copy (hence changes to this are problematic for line search methods) + INDArray gradient = pair.getFirst().gradient(conf.netWideVariables()); + INDArray params = model.getModelParams().dup(); //Need dup here: params returns an array that isn't a copy (hence changes to this are problematic for line search methods) searchState.put(GRADIENT_KEY, gradient); searchState.put(SCORE_KEY, pair.getSecond()); searchState.put(PARAMS_KEY, params); @@ -332,39 +331,39 @@ public abstract class BaseOptimizer implements ConvexOptimizer { } } - public static int getIterationCount(Model model) { + public static int getIterationCount(IModel model) { if (model instanceof MultiLayerNetwork) { - return ((MultiLayerNetwork) model).getLayerWiseConfigurations().getIterationCount(); + return ((MultiLayerNetwork) model).getNetConfiguration().getIterationCount(); } else if (model instanceof ComputationGraph) { - return ((ComputationGraph) model).getConfiguration().getIterationCount(); + return ((ComputationGraph) model).getComputationGraphConfiguration().getIterationCount(); } else { - return model.conf().getIterationCount(); + return model.getNetConfiguration().getIterationCount(); } } - public static void incrementIterationCount(Model model, int incrementBy) { + public static void incrementIterationCount(IModel model, int incrementBy) { if (model instanceof MultiLayerNetwork) { - MultiLayerConfiguration conf = ((MultiLayerNetwork) model).getLayerWiseConfigurations(); + NeuralNetConfiguration conf = ((MultiLayerNetwork) model).getNetConfiguration(); conf.setIterationCount(conf.getIterationCount() + incrementBy); } else if (model instanceof ComputationGraph) { - ComputationGraphConfiguration conf = ((ComputationGraph) model).getConfiguration(); + ComputationGraphConfiguration conf = ((ComputationGraph) model).getComputationGraphConfiguration(); conf.setIterationCount(conf.getIterationCount() + incrementBy); } else { - model.conf().setIterationCount(model.conf().getIterationCount() + incrementBy); + model.getNetConfiguration().setIterationCount(model.getNetConfiguration().getIterationCount() + incrementBy); } } - public static int getEpochCount(Model model){ + public static int getEpochCount(IModel model){ if (model instanceof MultiLayerNetwork) { - return ((MultiLayerNetwork) model).getLayerWiseConfigurations().getEpochCount(); + return ((MultiLayerNetwork) model).getNetConfiguration().getEpochCount(); } else if (model instanceof ComputationGraph) { - return ((ComputationGraph) model).getConfiguration().getEpochCount(); + return ((ComputationGraph) model).getComputationGraphConfiguration().getEpochCount(); } else { - return model.conf().getEpochCount(); + return model.getNetConfiguration().getEpochCount(); } } - public static void applyConstraints(Model model){ + public static void applyConstraints(IModel model){ int iter = getIterationCount(model); int epoch = getEpochCount(model); model.applyConstraints(iter, epoch); diff --git a/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/optimize/solvers/ConjugateGradient.java b/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/optimize/solvers/ConjugateGradient.java index b07ade04a..614075e20 100644 --- a/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/optimize/solvers/ConjugateGradient.java +++ b/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/optimize/solvers/ConjugateGradient.java @@ -20,7 +20,7 @@ package org.deeplearning4j.optimize.solvers; -import org.deeplearning4j.nn.api.Model; +import net.brutex.ai.dnn.api.IModel; import org.deeplearning4j.nn.conf.NeuralNetConfiguration; import org.deeplearning4j.optimize.api.StepFunction; import org.deeplearning4j.optimize.api.TrainingListener; @@ -38,7 +38,7 @@ public class ConjugateGradient extends BaseOptimizer { public ConjugateGradient(NeuralNetConfiguration conf, StepFunction stepFunction, - Collection trainingListeners, Model model) { + Collection trainingListeners, IModel model) { super(conf, stepFunction, trainingListeners, model); } diff --git a/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/optimize/solvers/LBFGS.java b/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/optimize/solvers/LBFGS.java index 5760ee337..80a94c6e6 100644 --- a/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/optimize/solvers/LBFGS.java +++ b/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/optimize/solvers/LBFGS.java @@ -20,7 +20,7 @@ package org.deeplearning4j.optimize.solvers; -import org.deeplearning4j.nn.api.Model; +import net.brutex.ai.dnn.api.IModel; import org.deeplearning4j.nn.conf.NeuralNetConfiguration; import org.deeplearning4j.nn.gradient.Gradient; import org.deeplearning4j.optimize.api.StepFunction; @@ -42,7 +42,7 @@ public class LBFGS extends BaseOptimizer { private final int m = 4; public LBFGS(NeuralNetConfiguration conf, StepFunction stepFunction, - Collection trainingListeners, Model model) { + Collection trainingListeners, IModel model) { super(conf, stepFunction, trainingListeners, model); } @@ -71,7 +71,7 @@ public class LBFGS extends BaseOptimizer { @Override public void postStep(INDArray gradient) { INDArray previousParameters = (INDArray) searchState.get("oldparams"); - INDArray parameters = model.params(); + INDArray parameters = model.getModelParams(); INDArray previousGradient = (INDArray) searchState.get(GRADIENT_KEY); LinkedList rho = (LinkedList) searchState.get("rho"); diff --git a/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/optimize/solvers/LineGradientDescent.java b/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/optimize/solvers/LineGradientDescent.java index 2afc53453..78ebf3231 100644 --- a/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/optimize/solvers/LineGradientDescent.java +++ b/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/optimize/solvers/LineGradientDescent.java @@ -20,7 +20,7 @@ package org.deeplearning4j.optimize.solvers; -import org.deeplearning4j.nn.api.Model; +import net.brutex.ai.dnn.api.IModel; import org.deeplearning4j.nn.conf.NeuralNetConfiguration; import org.deeplearning4j.optimize.api.StepFunction; import org.deeplearning4j.optimize.api.TrainingListener; @@ -33,7 +33,7 @@ public class LineGradientDescent extends BaseOptimizer { private static final long serialVersionUID = 6336124657542062284L; public LineGradientDescent(NeuralNetConfiguration conf, StepFunction stepFunction, - Collection trainingListeners, Model model) { + Collection trainingListeners, IModel model) { super(conf, stepFunction, trainingListeners, model); } diff --git a/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/optimize/solvers/StochasticGradientDescent.java b/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/optimize/solvers/StochasticGradientDescent.java index fbee9c2a3..e0de12fe9 100644 --- a/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/optimize/solvers/StochasticGradientDescent.java +++ b/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/optimize/solvers/StochasticGradientDescent.java @@ -21,7 +21,7 @@ package org.deeplearning4j.optimize.solvers; import lombok.extern.slf4j.Slf4j; -import org.deeplearning4j.nn.api.Model; +import net.brutex.ai.dnn.api.IModel; import org.deeplearning4j.nn.conf.NeuralNetConfiguration; import org.deeplearning4j.nn.gradient.Gradient; import org.deeplearning4j.nn.graph.ComputationGraph; @@ -41,7 +41,7 @@ public class StochasticGradientDescent extends BaseOptimizer { public StochasticGradientDescent(NeuralNetConfiguration conf, StepFunction stepFunction, - Collection trainingListeners, Model model) { + Collection trainingListeners, IModel model) { super(conf, stepFunction, trainingListeners, model); } @@ -54,7 +54,7 @@ public class StochasticGradientDescent extends BaseOptimizer { log.info("Applying external updates before FF..."); // we'll just fire off params update process - accumulator.applyUpdate(stepFunction, model.params(), Nd4j.createUninitialized(model.params().shape(), model.params().ordering()), false); + accumulator.applyUpdate(stepFunction, model.getModelParams(), Nd4j.createUninitialized(model.getModelParams().shape(), model.getModelParams().ordering()), false); } } @@ -62,7 +62,7 @@ public class StochasticGradientDescent extends BaseOptimizer { Gradient gradient = pair.getFirst(); - INDArray params = model.params(); + INDArray params = model.getModelParams(); // if optimizer has GradientsAccumulator defined - go for it if (accumulator != null) { @@ -87,7 +87,7 @@ public class StochasticGradientDescent extends BaseOptimizer { // if there's no update available - just go on then } else { - // if accumulator isn't used - we just to for direct updates application + // if accumulator isn't used - we just go for direct updates application stepFunction.step(params, gradient.gradient()); } diff --git a/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/optimize/solvers/accumulation/EncodedGradientsAccumulator.java b/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/optimize/solvers/accumulation/EncodedGradientsAccumulator.java index 490acc178..16e8a97e7 100644 --- a/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/optimize/solvers/accumulation/EncodedGradientsAccumulator.java +++ b/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/optimize/solvers/accumulation/EncodedGradientsAccumulator.java @@ -24,8 +24,8 @@ import lombok.Getter; import lombok.NonNull; import lombok.extern.slf4j.Slf4j; import lombok.val; +import net.brutex.ai.dnn.api.IModel; import org.deeplearning4j.exception.DL4JInvalidConfigException; -import org.deeplearning4j.nn.api.Model; import org.deeplearning4j.optimize.api.StepFunction; import org.deeplearning4j.optimize.solvers.accumulation.encoding.ResidualPostProcessor; import org.deeplearning4j.optimize.solvers.accumulation.encoding.ThresholdAlgorithm; @@ -171,8 +171,8 @@ public class EncodedGradientsAccumulator implements GradientsAccumulator, Regist } - public static long getOptimalBufferSize(Model model, int numWorkers, int queueSize) { - return getOptimalBufferSize(model.params().length(), numWorkers, queueSize); + public static long getOptimalBufferSize(IModel model, int numWorkers, int queueSize) { + return getOptimalBufferSize(model.getModelParams().length(), numWorkers, queueSize); } @Override diff --git a/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/util/Convolution1DUtils.java b/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/util/Convolution1DUtils.java index 32c40bdfc..cd3bd3f2c 100644 --- a/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/util/Convolution1DUtils.java +++ b/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/util/Convolution1DUtils.java @@ -27,7 +27,6 @@ import org.deeplearning4j.nn.conf.CNN2DFormat; import org.deeplearning4j.nn.conf.ConvolutionMode; import org.deeplearning4j.nn.conf.RNNFormat; import org.deeplearning4j.nn.conf.layers.*; -import org.deeplearning4j.nn.conf.layers.convolutional.Cropping1D; import org.deeplearning4j.nn.conf.layers.recurrent.SimpleRnn; import org.nd4j.common.base.Preconditions; import org.nd4j.linalg.api.ndarray.INDArray; @@ -62,7 +61,7 @@ public class Convolution1DUtils { * @return true if the input layer has an rnn format * false otherwise */ - public static boolean hasRnnDataFormat(Layer layer) { + public static boolean hasRnnDataFormat(LayerConfiguration layer) { return layer instanceof Convolution1D || layer instanceof Convolution1DLayer || layer instanceof Subsampling1DLayer || @@ -78,8 +77,8 @@ public class Convolution1DUtils { * @param layer the layer to get the format for * @return the format for the layer */ - public static RNNFormat getRnnFormatFromLayer(Layer layer) { - Preconditions.checkState(hasRnnDataFormat(layer),"Layer of type " + layer.getClass().getName() + " and name " + layer.getLayerName() + " does not have an RNNFormat"); + public static RNNFormat getRnnFormatFromLayer(LayerConfiguration layer) { + Preconditions.checkState(hasRnnDataFormat(layer),"ILayer of type " + layer.getClass().getName() + " and name " + layer.getLayerName() + " does not have an RNNFormat"); if(layer instanceof SimpleRnn) { SimpleRnn simpleRnn = (SimpleRnn) layer; return simpleRnn.getRnnDataFormat(); diff --git a/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/util/ConvolutionUtils.java b/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/util/ConvolutionUtils.java index 616f1c620..e7adaa86a 100644 --- a/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/util/ConvolutionUtils.java +++ b/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/util/ConvolutionUtils.java @@ -52,7 +52,7 @@ public class ConvolutionUtils { public static final String NCHW_NHWC_ERROR_MSG = "Note: Convolution layers can be configured for either NCHW (channels first)" + " or NHWC (channels last) format for input images and activations.\n" + "Layers can be configured using .dataFormat(CNN2DFormat.NCHW/NHWC) when constructing the layer, or for the entire net using" + - " .setInputType(InputType.convolutional(height, width, depth, CNN2DForman.NCHW/NHWC)).\n" + + " .inputType(InputType.convolutional(height, width, depth, CNN2DForman.NCHW/NHWC)).\n" + "ImageRecordReader and NativeImageLoader can also be configured to load image data in either NCHW or NHWC format which must match the network"; @@ -176,7 +176,7 @@ public class ConvolutionUtils { * @param layer the layer to check * @return true if the layer is one of the above types, false otherwise */ - public static boolean layerHasConvolutionLayout(Layer layer) { + public static boolean layerHasConvolutionLayout(LayerConfiguration layer) { return layer instanceof ConvolutionLayer || layer instanceof SubsamplingLayer || layer instanceof SpaceToBatchLayer || @@ -191,15 +191,15 @@ public class ConvolutionUtils { /** * Get the format for a given layer. - * {@link #layerHasConvolutionLayout(Layer)} - * should return true on the given {@link Layer} + * {@link #layerHasConvolutionLayout(LayerConfiguration)} + * should return true on the given {@link LayerConfiguration} * type or an {@link IllegalArgumentException} * will be thrown * @param layer the input layer * @return the {@link CNN2DFormat} for the given * layer */ - public static CNN2DFormat getFormatForLayer(Layer layer) { + public static CNN2DFormat getFormatForLayer(LayerConfiguration layer) { if(layer instanceof Convolution1DLayer) { Convolution1DLayer convolution1DLayer = (Convolution1DLayer) layer; return convolution1DLayer.getCnn2dDataFormat(); @@ -520,9 +520,9 @@ public class ConvolutionUtils { * @param conf the configuration to get height and width from * @return the configuration to get height and width from */ - public static int[] getHeightAndWidth(NeuralNetConfiguration conf) { + public static int[] getHeightAndWidth(LayerConfiguration conf) { return getHeightAndWidth( - ((org.deeplearning4j.nn.conf.layers.ConvolutionLayer) conf.getLayer()).getKernelSize()); + ((org.deeplearning4j.nn.conf.layers.ConvolutionLayer) conf).getKernelSize()); } @@ -531,8 +531,8 @@ public class ConvolutionUtils { * the number of kernels from * @return the number of kernels/filters to apply */ - public static long numFeatureMap(NeuralNetConfiguration conf) { - return ((org.deeplearning4j.nn.conf.layers.ConvolutionLayer) conf.getLayer()).getNOut(); + public static long numFeatureMap(LayerConfiguration conf) { + return ((org.deeplearning4j.nn.conf.layers.ConvolutionLayer) conf).getNOut(); } /** diff --git a/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/util/CrashReportingUtil.java b/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/util/CrashReportingUtil.java index ac28ced80..b2e10ece5 100644 --- a/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/util/CrashReportingUtil.java +++ b/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/util/CrashReportingUtil.java @@ -41,12 +41,12 @@ import java.util.Set; import lombok.Getter; import lombok.NonNull; import lombok.extern.slf4j.Slf4j; +import net.brutex.ai.dnn.api.IModel; import org.apache.commons.io.FileUtils; import org.apache.commons.lang3.exception.ExceptionUtils; import org.bytedeco.javacpp.Pointer; import org.deeplearning4j.common.config.DL4JSystemProperties; import org.deeplearning4j.nn.api.Layer; -import org.deeplearning4j.nn.api.Model; import org.deeplearning4j.nn.conf.BackpropType; import org.deeplearning4j.nn.conf.inputs.InputType; import org.deeplearning4j.nn.graph.ComputationGraph; @@ -136,7 +136,7 @@ public class CrashReportingUtil { * @param net Net to generate the crash dump for. May not be null * @param e Throwable/exception. Stack trace will be included in the network output */ - public static void writeMemoryCrashDump(@NonNull Model net, @NonNull Throwable e){ + public static void writeMemoryCrashDump(@NonNull IModel net, @NonNull Throwable e){ if(!crashDumpsEnabled){ return; } @@ -189,7 +189,7 @@ public class CrashReportingUtil { * @param net Net to generate the report for * @return Report as a String */ - public static String generateMemoryStatus(Model net, int minibatch, InputType... inputTypes){ + public static String generateMemoryStatus(IModel net, int minibatch, InputType... inputTypes){ MultiLayerNetwork mln = null; ComputationGraph cg = null; boolean isMLN; @@ -204,7 +204,7 @@ public class CrashReportingUtil { StringBuilder sb = genericMemoryStatus(); int bytesPerElement; - switch (isMLN ? mln.params().dataType() : cg.params().dataType()){ + switch (isMLN ? mln.getModelParams().dataType() : cg.getModelParams().dataType()){ case DOUBLE: bytesPerElement = 8; break; @@ -260,7 +260,7 @@ public class CrashReportingUtil { } long sumMem = 0; - long nParams = net.params().length(); + long nParams = net.getModelParams().length(); sb.append("\n----- Network Information -----\n") .append(f("Network # Parameters", nParams)) .append(fBytes("Parameter Memory", bytesPerElement * nParams)); @@ -310,22 +310,22 @@ public class CrashReportingUtil { //Workspaces, backprop type, layer info, activation info, helper info if(isMLN) { - sb.append(f("Backprop Type", mln.getLayerWiseConfigurations().getBackpropType())); - if(mln.getLayerWiseConfigurations().getBackpropType() == BackpropType.TruncatedBPTT){ - sb.append(f("TBPTT Length", mln.getLayerWiseConfigurations().getTbpttFwdLength() + "/" + mln.getLayerWiseConfigurations().getTbpttBackLength())); + sb.append(f("Backprop Type", mln.getNetConfiguration().getBackpropType())); + if(mln.getNetConfiguration().getBackpropType() == BackpropType.TruncatedBPTT){ + sb.append(f("TBPTT Length", mln.getNetConfiguration().getTbpttFwdLength() + "/" + mln.getNetConfiguration().getTbpttBackLength())); } - sb.append(f("Workspace Mode: Training", mln.getLayerWiseConfigurations().getTrainingWorkspaceMode())); - sb.append(f("Workspace Mode: Inference", mln.getLayerWiseConfigurations().getInferenceWorkspaceMode())); + sb.append(f("Workspace Mode: Training", mln.getNetConfiguration().getTrainingWorkspaceMode())); + sb.append(f("Workspace Mode: Inference", mln.getNetConfiguration().getInferenceWorkspaceMode())); appendLayerInformation(sb, mln.getLayers(), bytesPerElement); appendHelperInformation(sb, mln.getLayers()); appendActivationShapes(mln, (inputTypes == null || inputTypes.length == 0 ? null : inputTypes[0]), minibatch, sb, bytesPerElement); } else { - sb.append(f("Backprop Type", cg.getConfiguration().getBackpropType())); - if(cg.getConfiguration().getBackpropType() == BackpropType.TruncatedBPTT){ - sb.append(f("TBPTT Length", cg.getConfiguration().getTbpttFwdLength() + "/" + cg.getConfiguration().getTbpttBackLength())); + sb.append(f("Backprop Type", cg.getComputationGraphConfiguration().getBackpropType())); + if(cg.getComputationGraphConfiguration().getBackpropType() == BackpropType.TruncatedBPTT){ + sb.append(f("TBPTT Length", cg.getComputationGraphConfiguration().getTbpttFwdLength() + "/" + cg.getComputationGraphConfiguration().getTbpttBackLength())); } - sb.append(f("Workspace Mode: Training", cg.getConfiguration().getTrainingWorkspaceMode())); - sb.append(f("Workspace Mode: Inference", cg.getConfiguration().getInferenceWorkspaceMode())); + sb.append(f("Workspace Mode: Training", cg.getComputationGraphConfiguration().getTrainingWorkspaceMode())); + sb.append(f("Workspace Mode: Inference", cg.getComputationGraphConfiguration().getInferenceWorkspaceMode())); appendLayerInformation(sb, cg.getLayers(), bytesPerElement); appendHelperInformation(sb, cg.getLayers()); appendActivationShapes(cg, sb, bytesPerElement); @@ -334,9 +334,9 @@ public class CrashReportingUtil { //Listener info: Collection listeners; if(isMLN){ - listeners = mln.getListeners(); + listeners = mln.getTrainingListeners(); } else { - listeners = cg.getListeners(); + listeners = cg.getTrainingListeners(); } sb.append("\n----- Network Training Listeners -----\n"); @@ -461,29 +461,29 @@ public class CrashReportingUtil { List l = new ArrayList<>(layerClasses.keySet()); Collections.sort(l); sb.append(f("Number of Layers", layers.length)); - sb.append("Layer Counts\n"); + sb.append("ILayer Counts\n"); for(String s : l){ sb.append(" ").append(f(s, layerClasses.get(s))); } - sb.append("Layer Parameter Breakdown\n"); + sb.append("ILayer Parameter Breakdown\n"); String format = " %-3s %-20s %-20s %-20s %-20s"; - sb.append(String.format(format, "Idx", "Name", "Layer Type", "Layer # Parameters", "Layer Parameter Memory")).append("\n"); + sb.append(String.format(format, "Idx", "Name", "ILayer Type", "ILayer # Parameters", "ILayer Parameter Memory")).append("\n"); for(Layer layer : layers){ long numParams = layer.numParams(); - sb.append(String.format(format, layer.getIndex(), layer.conf().getLayer().getLayerName(), + sb.append(String.format(format, layer.getIndex(), layer.getLayerConfiguration().getLayerName(), layer.getClass().getSimpleName(), numParams, fBytes(numParams * bytesPerElement))).append("\n"); } } private static void appendHelperInformation(StringBuilder sb, org.deeplearning4j.nn.api.Layer[] layers){ - sb.append("\n----- Layer Helpers - Memory Use -----\n"); + sb.append("\n----- ILayer Helpers - Memory Use -----\n"); int helperCount = 0; long helperWithMemCount = 0L; long totalHelperMem = 0L; - //Layer index, layer name, layer class, helper class, total memory, breakdown + //ILayer index, layer name, layer class, helper class, total memory, breakdown String format = "%-3s %-20s %-25s %-30s %-12s %s"; boolean header = false; for(Layer l : layers){ @@ -503,13 +503,13 @@ public class CrashReportingUtil { } int idx = l.getIndex(); - String layerName = l.conf().getLayer().getLayerName(); + String layerName = l.getLayerConfiguration().getLayerName(); if(layerName == null) layerName = String.valueOf(idx); if(!header){ - sb.append(String.format(format, "#", "Layer Name", "Layer Class", "Helper Class", "Total Memory", "Memory Breakdown")) + sb.append(String.format(format, "#", "ILayer Name", "ILayer Class", "Helper Class", "Total Memory", "Memory Breakdown")) .append("\n"); header = true; } @@ -549,9 +549,9 @@ public class CrashReportingUtil { sb.append(f("Current Minibatch Size", minibatch)); sb.append(f("Input Shape", Arrays.toString(inputShape))); - List inputTypes = net.getLayerWiseConfigurations().getLayerActivationTypes(inputType); + List inputTypes = net.getNetConfiguration().getLayerActivationTypes(inputType); String format = "%-3s %-20s %-20s %-42s %-20s %-12s %-12s"; - sb.append(String.format(format, "Idx", "Name", "Layer Type", "Activations Type", "Activations Shape", + sb.append(String.format(format, "Idx", "Name", "ILayer Type", "Activations Type", "Activations Shape", "# Elements", "Memory")).append("\n"); org.deeplearning4j.nn.api.Layer[] layers = net.getLayers(); long totalActivationBytes = 0; @@ -567,7 +567,7 @@ public class CrashReportingUtil { bytes = 0; } totalActivationBytes += bytes; - sb.append(String.format(format, i, layers[i].conf().getLayer().getLayerName(), layers[i].getClass().getSimpleName(), + sb.append(String.format(format, i, layers[i].getLayerConfiguration().getLayerName(), layers[i].getClass().getSimpleName(), inputTypes.get(i), Arrays.toString(shape), (numElements < 0 ? "" : String.valueOf(numElements)), fBytes(bytes))).append("\n"); last = bytes; } @@ -598,11 +598,11 @@ public class CrashReportingUtil { for( int i=0; i inputTypes = net.getConfiguration().getLayerActivationTypes(inputType); + Map inputTypes = net.getComputationGraphConfiguration().getLayerActivationTypes(inputType); GraphIndices indices = net.calculateIndices(); String format = "%-3s %-20s %-20s %-42s %-20s %-12s %-12s"; - sb.append(String.format(format, "Idx", "Name", "Layer Type", "Activations Type", "Activations Shape", + sb.append(String.format(format, "Idx", "Name", "ILayer Type", "Activations Type", "Activations Shape", "# Elements", "Memory")).append("\n"); org.deeplearning4j.nn.api.Layer[] layers = net.getLayers(); long totalActivationBytes = 0; @@ -633,7 +633,7 @@ public class CrashReportingUtil { sb.append(String.format(format, i, layerName, className, it, Arrays.toString(shape), (numElements < 0 ? "" : String.valueOf(numElements)), fBytes(bytes))).append("\n"); - if(!net.getConfiguration().getNetworkOutputs().contains(layerName)){ + if(!net.getComputationGraphConfiguration().getNetworkOutputs().contains(layerName)){ totalExOutput += bytes; } } diff --git a/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/util/DL4JModelValidator.java b/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/util/DL4JModelValidator.java index 6413a5eb4..739d3482c 100644 --- a/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/util/DL4JModelValidator.java +++ b/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/util/DL4JModelValidator.java @@ -22,9 +22,9 @@ package org.deeplearning4j.util; import lombok.NonNull; import org.apache.commons.io.IOUtils; -import org.deeplearning4j.nn.api.Model; +import net.brutex.ai.dnn.api.IModel; import org.deeplearning4j.nn.conf.ComputationGraphConfiguration; -import org.deeplearning4j.nn.conf.MultiLayerConfiguration; +import org.deeplearning4j.nn.conf.NeuralNetConfiguration; import org.deeplearning4j.nn.graph.ComputationGraph; import org.deeplearning4j.nn.multilayer.MultiLayerNetwork; import org.nd4j.common.validation.Nd4jCommonValidator; @@ -47,7 +47,7 @@ public class DL4JModelValidator { /** * Validate whether the file represents a valid MultiLayerNetwork saved previously with {@link MultiLayerNetwork#save(File)} - * or {@link ModelSerializer#writeModel(Model, File, boolean)}, to be read with {@link MultiLayerNetwork#load(File, boolean)} + * or {@link ModelSerializer#writeModel(IModel, File, boolean)}, to be read with {@link MultiLayerNetwork#load(File, boolean)} * * @param f File that should represent an saved MultiLayerNetwork * @return Result of validation @@ -80,14 +80,14 @@ public class DL4JModelValidator { } try{ - MultiLayerConfiguration.fromJson(config); + NeuralNetConfiguration.fromJson(config); } catch (Throwable t){ return ValidationResult.builder() .formatType("MultiLayerNetwork") .formatClass(MultiLayerNetwork.class) .valid(false) .path(Nd4jCommonValidator.getPath(f)) - .issues(Collections.singletonList("Zip file JSON model configuration does not appear to represent a valid MultiLayerConfiguration")) + .issues(Collections.singletonList("Zip file JSON model configuration does not appear to represent a valid NeuralNetConfiguration")) .exception(t) .build(); } @@ -104,7 +104,7 @@ public class DL4JModelValidator { /** * Validate whether the file represents a valid ComputationGraph saved previously with {@link ComputationGraph#save(File)} - * or {@link ModelSerializer#writeModel(Model, File, boolean)}, to be read with {@link ComputationGraph#load(File, boolean)} + * or {@link ModelSerializer#writeModel(IModel, File, boolean)}, to be read with {@link ComputationGraph#load(File, boolean)} * * @param f File that should represent an saved MultiLayerNetwork * @return Result of validation diff --git a/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/util/ModelSerializer.java b/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/util/ModelSerializer.java index ae7e2e2df..8649bcd19 100644 --- a/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/util/ModelSerializer.java +++ b/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/util/ModelSerializer.java @@ -20,6 +20,7 @@ package org.deeplearning4j.util; +import net.brutex.ai.dnn.api.IModel; import org.apache.commons.io.input.CloseShieldInputStream; import org.deeplearning4j.common.util.DL4JFileUtils; import com.google.common.io.Files; @@ -28,10 +29,9 @@ import lombok.extern.slf4j.Slf4j; import org.apache.commons.io.IOUtils; import org.apache.commons.io.output.CloseShieldOutputStream; import org.deeplearning4j.nn.api.Layer; -import org.deeplearning4j.nn.api.Model; import org.deeplearning4j.nn.api.Updater; import org.deeplearning4j.nn.conf.ComputationGraphConfiguration; -import org.deeplearning4j.nn.conf.MultiLayerConfiguration; +import org.deeplearning4j.nn.conf.NeuralNetConfiguration; import org.deeplearning4j.nn.graph.ComputationGraph; import org.deeplearning4j.nn.multilayer.MultiLayerNetwork; import org.nd4j.common.base.Preconditions; @@ -74,7 +74,7 @@ public class ModelSerializer { * @param saveUpdater whether to save the updater or not * @throws IOException */ - public static void writeModel(@NonNull Model model, @NonNull File file, boolean saveUpdater) throws IOException { + public static void writeModel(@NonNull IModel model, @NonNull File file, boolean saveUpdater) throws IOException { writeModel(model,file,saveUpdater,null); } @@ -88,7 +88,7 @@ public class ModelSerializer { * @param dataNormalization the normalizer to save (optional) * @throws IOException */ - public static void writeModel(@NonNull Model model, @NonNull File file, boolean saveUpdater,DataNormalization dataNormalization) throws IOException { + public static void writeModel(@NonNull IModel model, @NonNull File file, boolean saveUpdater,DataNormalization dataNormalization) throws IOException { try (BufferedOutputStream stream = new BufferedOutputStream(new FileOutputStream(file))) { writeModel(model, stream, saveUpdater,dataNormalization); } @@ -103,7 +103,7 @@ public class ModelSerializer { * or not * @throws IOException */ - public static void writeModel(@NonNull Model model, @NonNull String path, boolean saveUpdater) throws IOException { + public static void writeModel(@NonNull IModel model, @NonNull String path, boolean saveUpdater) throws IOException { try (BufferedOutputStream stream = new BufferedOutputStream(new FileOutputStream(path))) { writeModel(model, stream, saveUpdater); } @@ -116,7 +116,7 @@ public class ModelSerializer { * @param saveUpdater whether to save the updater for the model or not * @throws IOException */ - public static void writeModel(@NonNull Model model, @NonNull OutputStream stream, boolean saveUpdater) + public static void writeModel(@NonNull IModel model, @NonNull OutputStream stream, boolean saveUpdater) throws IOException { writeModel(model,stream,saveUpdater,null); } @@ -132,16 +132,16 @@ public class ModelSerializer { * @param dataNormalization the normalizer ot save (may be null) * @throws IOException */ - public static void writeModel(@NonNull Model model, @NonNull OutputStream stream, boolean saveUpdater,DataNormalization dataNormalization) + public static void writeModel(@NonNull IModel model, @NonNull OutputStream stream, boolean saveUpdater,DataNormalization dataNormalization) throws IOException { ZipOutputStream zipfile = new ZipOutputStream(new CloseShieldOutputStream(stream)); // Save configuration as JSON String json = ""; if (model instanceof MultiLayerNetwork) { - json = ((MultiLayerNetwork) model).getLayerWiseConfigurations().toJson(); + json = ((MultiLayerNetwork) model).getNetConfiguration().toJson(); } else if (model instanceof ComputationGraph) { - json = ((ComputationGraph) model).getConfiguration().toJson(); + json = ((ComputationGraph) model).getComputationGraphConfiguration().toJson(); } ZipEntry config = new ZipEntry(CONFIGURATION_JSON); @@ -152,10 +152,10 @@ public class ModelSerializer { ZipEntry coefficients = new ZipEntry(COEFFICIENTS_BIN); zipfile.putNextEntry(coefficients); DataOutputStream dos = new DataOutputStream(new BufferedOutputStream(zipfile)); - INDArray params = model.params(); + INDArray params = model.getModelParams(); if(params != null) { try { - Nd4j.write(model.params(), dos); + Nd4j.write(model.getModelParams(), dos); } finally { dos.flush(); } @@ -318,20 +318,20 @@ public class ModelSerializer { if (gotConfig && gotCoefficients) { - MultiLayerConfiguration confFromJson; + NeuralNetConfiguration confFromJson; try{ - confFromJson = MultiLayerConfiguration.fromJson(json); + confFromJson = NeuralNetConfiguration.fromJson(json); } catch (Exception e){ ComputationGraphConfiguration cg; try{ cg = ComputationGraphConfiguration.fromJson(json); } catch (Exception e2){ //Invalid, and not a compgraph - throw new RuntimeException("Error deserializing JSON MultiLayerConfiguration. Saved model JSON is" + - " not a valid MultiLayerConfiguration", e); + throw new RuntimeException("Error deserializing JSON NeuralNetConfiguration. Saved model JSON is" + + " not a valid NeuralNetConfiguration", e); } if(cg.getNetworkInputs() != null && cg.getVertices() != null) { - throw new RuntimeException("Error deserializing JSON MultiLayerConfiguration. Saved model appears to be " + + throw new RuntimeException("Error deserializing JSON NeuralNetConfiguration. Saved model appears to be " + "a ComputationGraph - use ModelSerializer.restoreComputationGraph instead"); } else { throw e; @@ -554,7 +554,7 @@ public class ModelSerializer { throw e; } try{ - MultiLayerConfiguration.fromJson(json); + NeuralNetConfiguration.fromJson(json); } catch (Exception e2){ //Invalid, and not a compgraph throw new RuntimeException("Error deserializing JSON ComputationGraphConfiguration. Saved model JSON is" + @@ -652,7 +652,7 @@ public class ModelSerializer { * @param model * @return */ - public static Task taskByModel(Model model) { + public static Task taskByModel(IModel model) { Task task = new Task(); try { task.setArchitectureType(Task.ArchitectureType.RECURRENT); diff --git a/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/util/NetworkUtils.java b/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/util/NetworkUtils.java index 7ed0a4bcb..f19dd8a47 100644 --- a/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/util/NetworkUtils.java +++ b/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/util/NetworkUtils.java @@ -21,14 +21,13 @@ package org.deeplearning4j.util; import lombok.extern.slf4j.Slf4j; -import org.deeplearning4j.nn.api.Model; -import org.deeplearning4j.nn.api.Trainable; +import net.brutex.ai.dnn.api.IModel; +import org.deeplearning4j.nn.api.ITrainableLayer; import org.deeplearning4j.nn.conf.ComputationGraphConfiguration; import org.deeplearning4j.nn.conf.InputPreProcessor; -import org.deeplearning4j.nn.conf.MultiLayerConfiguration; import org.deeplearning4j.nn.conf.NeuralNetConfiguration; -import org.deeplearning4j.nn.conf.layers.BaseLayer; -import org.deeplearning4j.nn.conf.layers.Layer; +import org.deeplearning4j.nn.conf.layers.BaseLayerConfiguration; +import org.deeplearning4j.nn.conf.layers.LayerConfiguration; import org.deeplearning4j.nn.graph.ComputationGraph; import org.deeplearning4j.nn.graph.vertex.GraphVertex; import org.deeplearning4j.nn.multilayer.MultiLayerNetwork; @@ -61,21 +60,21 @@ public class NetworkUtils { // by definition the identical for a MLN and "single stack" computation graph. This also has to hold // for the updater state... - ComputationGraphConfiguration.GraphBuilder b = new NeuralNetConfiguration.Builder() - .dataType(net.getLayerWiseConfigurations().getDataType()) + ComputationGraphConfiguration.GraphBuilder b = NeuralNetConfiguration.builder() + .dataType(net.getNetConfiguration().getDataType()) .graphBuilder(); - MultiLayerConfiguration origConf = net.getLayerWiseConfigurations().clone(); + NeuralNetConfiguration origConf = net.getNetConfiguration().clone(); int layerIdx = 0; String lastLayer = "in"; b.addInputs("in"); - for (NeuralNetConfiguration c : origConf.getConfs()) { + for (NeuralNetConfiguration c : origConf.getNetConfigurations()) { String currLayer = String.valueOf(layerIdx); InputPreProcessor preproc = origConf.getInputPreProcess(layerIdx); - b.addLayer(currLayer, c.getLayer(), preproc, lastLayer); + b.addLayer(currLayer, c.getFlattenedLayerConfigurations().get(layerIdx), preproc, lastLayer); lastLayer = currLayer; layerIdx++; @@ -87,7 +86,7 @@ public class NetworkUtils { ComputationGraph cg = new ComputationGraph(conf); cg.init(); - cg.setParams(net.params()); + cg.setParams(net.getModelParams()); //Also copy across updater state: INDArray updaterState = net.getUpdater().getStateViewArray(); @@ -123,9 +122,9 @@ public class NetworkUtils { private static void setLearningRate(MultiLayerNetwork net, int layerNumber, double newLr, ISchedule newLrSchedule, boolean refreshUpdater) { - Layer l = net.getLayer(layerNumber).conf().getLayer(); - if (l instanceof BaseLayer) { - BaseLayer bl = (BaseLayer) l; + LayerConfiguration l = net.getLayer(layerNumber).getLayerConfiguration(); + if (l instanceof BaseLayerConfiguration) { + BaseLayerConfiguration bl = (BaseLayerConfiguration) l; IUpdater u = bl.getIUpdater(); if (u != null && u.hasLearningRate()) { if (newLrSchedule != null) { @@ -155,8 +154,8 @@ public class NetworkUtils { /** * Set the learning rate schedule for all layers in the network to the specified schedule. * This schedule will replace any/all existing schedules, and also any fixed learning rate values.
- * Note that the iteration/epoch counts will not be reset. Use {@link MultiLayerConfiguration#setIterationCount(int)} - * and {@link MultiLayerConfiguration#setEpochCount(int)} if this is required + * Note that the iteration/epoch counts will not be reset. Use {@link NeuralNetConfiguration#setIterationCount(int)} + * and {@link NeuralNetConfiguration#setEpochCount(int)} if this is required * * @param newLrSchedule New learning rate schedule for all layers */ @@ -184,8 +183,8 @@ public class NetworkUtils { * Note also that {@link #setLearningRate(MultiLayerNetwork, ISchedule)} should also be used in preference, when all layers need * to be set to a new LR schedule.
* This schedule will replace any/all existing schedules, and also any fixed learning rate values.
- * Note also that the iteration/epoch counts will not be reset. Use {@link MultiLayerConfiguration#setIterationCount(int)} - * and {@link MultiLayerConfiguration#setEpochCount(int)} if this is required + * Note also that the iteration/epoch counts will not be reset. Use {@link NeuralNetConfiguration#setIterationCount(int)} + * and {@link NeuralNetConfiguration#setEpochCount(int)} if this is required * * @param layerNumber Number of the layer to set the LR schedule for * @param lrSchedule New learning rate for a single layer @@ -199,15 +198,15 @@ public class NetworkUtils { * Note: If the layer has no learning rate (no parameters, or an updater without a learning rate) then null is returned * * @param net Network - * @param layerNumber Layer number to get the learning rate for + * @param layerNumber ILayer number to get the learning rate for * @return Learning rate for the specified layer, or null */ public static Double getLearningRate(MultiLayerNetwork net, int layerNumber) { - Layer l = net.getLayer(layerNumber).conf().getLayer(); + LayerConfiguration l = net.getLayer(layerNumber).getLayerConfiguration(); int iter = net.getIterationCount(); int epoch = net.getEpochCount(); - if (l instanceof BaseLayer) { - BaseLayer bl = (BaseLayer) l; + if (l instanceof BaseLayerConfiguration) { + BaseLayerConfiguration bl = (BaseLayerConfiguration) l; IUpdater u = bl.getIUpdater(); if (u != null && u.hasLearningRate()) { double d = u.getLearningRate(iter, epoch); @@ -238,16 +237,16 @@ public class NetworkUtils { private static void setLearningRate(ComputationGraph net, double newLr, ISchedule lrSchedule) { org.deeplearning4j.nn.api.Layer[] layers = net.getLayers(); for (int i = 0; i < layers.length; i++) { - setLearningRate(net, layers[i].conf().getLayer().getLayerName(), newLr, lrSchedule, false); + setLearningRate(net, layers[i].getLayerConfiguration().getLayerName(), newLr, lrSchedule, false); } refreshUpdater(net); } private static void setLearningRate(ComputationGraph net, String layerName, double newLr, ISchedule newLrSchedule, boolean refreshUpdater) { - Layer l = net.getLayer(layerName).conf().getLayer(); - if (l instanceof BaseLayer) { - BaseLayer bl = (BaseLayer) l; + LayerConfiguration l = net.getLayer(layerName).getLayerConfiguration(); + if (l instanceof BaseLayerConfiguration) { + BaseLayerConfiguration bl = (BaseLayerConfiguration) l; IUpdater u = bl.getIUpdater(); if (u != null && u.hasLearningRate()) { if (newLrSchedule != null) { @@ -321,15 +320,15 @@ public class NetworkUtils { * Note: If the layer has no learning rate (no parameters, or an updater without a learning rate) then null is returned * * @param net Network - * @param layerName Layer name to get the learning rate for + * @param layerName ILayer name to get the learning rate for * @return Learning rate for the specified layer, or null */ public static Double getLearningRate(ComputationGraph net, String layerName) { - Layer l = net.getLayer(layerName).conf().getLayer(); - int iter = net.getConfiguration().getIterationCount(); - int epoch = net.getConfiguration().getEpochCount(); - if (l instanceof BaseLayer) { - BaseLayer bl = (BaseLayer) l; + LayerConfiguration l = net.getLayer(layerName).getLayerConfiguration(); + int iter = net.getComputationGraphConfiguration().getIterationCount(); + int epoch = net.getComputationGraphConfiguration().getEpochCount(); + if (l instanceof BaseLayerConfiguration) { + BaseLayerConfiguration bl = (BaseLayerConfiguration) l; IUpdater u = bl.getIUpdater(); if (u != null && u.hasLearningRate()) { double d = u.getLearningRate(iter, epoch); @@ -353,7 +352,7 @@ public class NetworkUtils { * @see org.deeplearning4j.nn.graph.ComputationGraph#outputSingle(INDArray...) * @see org.deeplearning4j.nn.multilayer.MultiLayerNetwork#output(INDArray) */ - public static INDArray output(Model model, INDArray input) { + public static INDArray output(IModel model, INDArray input) { if (model instanceof MultiLayerNetwork) { final MultiLayerNetwork multiLayerNetwork = (MultiLayerNetwork) model; @@ -500,7 +499,7 @@ public class NetworkUtils { } - private static int getId(Trainable trainable){ + private static int getId(ITrainableLayer trainable){ if(trainable instanceof GraphVertex){ GraphVertex gv = (GraphVertex)trainable; return gv.getVertexIndex(); diff --git a/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/util/OutputLayerUtil.java b/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/util/OutputLayerUtil.java index 08a3d086a..1829fbd40 100644 --- a/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/util/OutputLayerUtil.java +++ b/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/util/OutputLayerUtil.java @@ -20,6 +20,7 @@ package org.deeplearning4j.util; +import lombok.NonNull; import org.deeplearning4j.exception.DL4JInvalidConfigException; import org.deeplearning4j.nn.conf.layers.*; import org.deeplearning4j.nn.conf.layers.objdetect.Yolo2OutputLayer; @@ -58,7 +59,7 @@ public class OutputLayerUtil { OUTSIDE_ZERO_ONE_RANGE.add(ActivationThresholdedReLU.class); } - private static final String COMMON_MSG = "\nThis configuration validation check can be disabled for MultiLayerConfiguration" + + private static final String COMMON_MSG = "\nThis configuration validation check can be disabled for NeuralNetConfiguration" + " and ComputationGraphConfiguration using validateOutputLayerConfig(false), however this is not recommended."; @@ -68,9 +69,9 @@ public class OutputLayerUtil { * * If the specified layer is not an output layer, this is a no-op * @param layerName Name of the layer - * @param layer Layer + * @param layer ILayer */ - public static void validateOutputLayer(String layerName, Layer layer){ + public static void validateOutputLayer(String layerName, LayerConfiguration layer){ IActivation activation; ILossFunction loss; long nOut; @@ -148,7 +149,7 @@ public class OutputLayerUtil { return lf instanceof LossMCXENT || lf instanceof LossBinaryXENT; } - public static boolean activationExceedsZeroOneRange(IActivation activation, boolean isLossLayer){ + public static boolean activationExceedsZeroOneRange(@NonNull IActivation activation, boolean isLossLayer){ if(OUTSIDE_ZERO_ONE_RANGE.contains(activation.getClass())){ //Note: we're intentionally excluding identity here, for situations like dense(softmax) -> loss(identity) @@ -166,7 +167,7 @@ public class OutputLayerUtil { * @param outputLayer Output layer * @param classifierEval Class for the classifier evaluation */ - public static void validateOutputLayerForClassifierEvaluation(Layer outputLayer, Class classifierEval){ + public static void validateOutputLayerForClassifierEvaluation(LayerConfiguration outputLayer, Class classifierEval){ if(outputLayer instanceof Yolo2OutputLayer){ throw new IllegalStateException("Classifier evaluation using " + classifierEval.getSimpleName() + " class cannot be applied for object" + " detection evaluation using Yolo2OutputLayer: " + classifierEval.getSimpleName() + " class is for classifier evaluation only."); @@ -174,15 +175,15 @@ public class OutputLayerUtil { //Check that the activation function provides probabilities. This can't catch everything, but should catch a few // of the common mistakes users make - if(outputLayer instanceof BaseLayer){ - BaseLayer bl = (BaseLayer)outputLayer; + if(outputLayer instanceof BaseLayerConfiguration){ + BaseLayerConfiguration bl = (BaseLayerConfiguration)outputLayer; boolean isOutputLayer = outputLayer instanceof OutputLayer || outputLayer instanceof RnnOutputLayer || outputLayer instanceof CenterLossOutputLayer; if(activationExceedsZeroOneRange(bl.getActivationFn(), !isOutputLayer)){ throw new IllegalStateException("Classifier evaluation using " + classifierEval.getSimpleName() + " class cannot be applied to output" + " layers with activation functions that are not probabilities (in range 0 to 1). Output layer type: " + outputLayer.getClass().getSimpleName() + " has activation function " + bl.getActivationFn().getClass().getSimpleName() + - ". This check can be disabled using MultiLayerNetwork.getLayerWiseConfigurations().setValidateOutputLayerConfig(false)" + + ". This check can be disabled using MultiLayerNetwork.getConfiguration().setValidateOutputLayerConfig(false)" + " or ComputationGraph.getConfiguration().setValidateOutputLayerConfig(false)"); } } diff --git a/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/util/TimeSeriesUtils.java b/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/util/TimeSeriesUtils.java index df4583cd8..4723211b9 100644 --- a/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/util/TimeSeriesUtils.java +++ b/cavis-dnn/cavis-dnn-nn/src/main/java/org/deeplearning4j/util/TimeSeriesUtils.java @@ -23,7 +23,7 @@ package org.deeplearning4j.util; import lombok.val; import org.deeplearning4j.nn.conf.RNNFormat; import org.deeplearning4j.nn.conf.layers.BaseRecurrentLayer; -import org.deeplearning4j.nn.conf.layers.Layer; +import org.deeplearning4j.nn.conf.layers.LayerConfiguration; import org.deeplearning4j.nn.conf.layers.recurrent.Bidirectional; import org.deeplearning4j.nn.conf.layers.recurrent.LastTimeStep; import org.deeplearning4j.nn.conf.layers.recurrent.TimeDistributed; @@ -440,9 +440,9 @@ public class TimeSeriesUtils { /** * Get the {@link RNNFormat} from the RNN layer, accounting for the presence of wrapper layers like Bidirectional, * LastTimeStep, etc - * @param layer Layer to get the RNNFormat from + * @param layer ILayer to get the RNNFormat from */ - public static RNNFormat getFormatFromRnnLayer(Layer layer){ + public static RNNFormat getFormatFromRnnLayer(LayerConfiguration layer){ if(layer instanceof BaseRecurrentLayer){ return ((BaseRecurrentLayer) layer).getRnnDataFormat(); } else if(layer instanceof MaskZeroLayer){ diff --git a/cavis-dnn/cavis-dnn-nn/src/main/resources/simplelogger.properties b/cavis-dnn/cavis-dnn-nn/src/main/resources/simplelogger.properties new file mode 100644 index 000000000..51c081db4 --- /dev/null +++ b/cavis-dnn/cavis-dnn-nn/src/main/resources/simplelogger.properties @@ -0,0 +1,25 @@ +# +# +# ****************************************************************************** +# * +# * This program and the accompanying materials are made available under the +# * terms of the Apache License, Version 2.0 which is available at +# * https://www.apache.org/licenses/LICENSE-2.0. +# * +# * See the NOTICE file distributed with this work for additional +# * information regarding copyright ownership. +# * Unless required by applicable law or agreed to in writing, software +# * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# * License for the specific language governing permissions and limitations +# * under the License. +# * +# * SPDX-License-Identifier: Apache-2.0 +# ***************************************************************************** +# +# + +org.slf4j.simpleLogger.defaultLogLevel = debug + +org.slf4j.simpleLogger.log.org.deeplearning4j.optimize.listeners = info +org.slf4j.simplelogger.log.org.nd4j.linalg.dataset = info \ No newline at end of file diff --git a/cavis-dnn/cavis-dnn-nn/src/test/java/net/brutex/ai/dnn/api/dnnTest.java b/cavis-dnn/cavis-dnn-nn/src/test/java/net/brutex/ai/dnn/api/dnnTest.java new file mode 100644 index 000000000..7b5176670 --- /dev/null +++ b/cavis-dnn/cavis-dnn-nn/src/test/java/net/brutex/ai/dnn/api/dnnTest.java @@ -0,0 +1,142 @@ +/* + * + * ****************************************************************************** + * * + * * This program and the accompanying materials are made available under the + * * terms of the Apache License, Version 2.0 which is available at + * * https://www.apache.org/licenses/LICENSE-2.0. + * * + * * See the NOTICE file distributed with this work for additional + * * information regarding copyright ownership. + * * Unless required by applicable law or agreed to in writing, software + * * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * * License for the specific language governing permissions and limitations + * * under the License. + * * + * * SPDX-License-Identifier: Apache-2.0 + * ***************************************************************************** + * + */ + +package net.brutex.ai.dnn.api; + +import static org.junit.jupiter.api.Assertions.assertTrue; + +import java.util.Iterator; +import java.util.concurrent.atomic.AtomicInteger; +import org.apache.commons.lang3.RandomUtils; +import org.deeplearning4j.datasets.iterator.FloatsDataSetIterator; +import org.deeplearning4j.nn.conf.GradientNormalization; +import org.deeplearning4j.nn.conf.NeuralNetConfiguration; +import org.deeplearning4j.nn.conf.layers.ActivationLayer; +import org.deeplearning4j.nn.conf.layers.DenseLayer; +import org.deeplearning4j.nn.conf.layers.OutputLayer; +import org.deeplearning4j.nn.multilayer.MultiLayerNetwork; +import org.deeplearning4j.nn.weights.WeightInitXavier; +import org.deeplearning4j.optimize.listeners.ScoreToChartListener; +import org.junit.jupiter.api.Test; +import org.nd4j.common.primitives.Pair; +import org.nd4j.linalg.activations.Activation; +import org.nd4j.linalg.activations.impl.ActivationLReLU; +import org.nd4j.linalg.activations.impl.ActivationSigmoid; +import org.nd4j.linalg.learning.config.Adam; +import org.nd4j.linalg.lossfunctions.LossFunctions; + + +class dnnTest { + + @Test + void testFFLayer() { + int numFeatures = 6; + int batchSize = 5; + int numRows = 100; + AtomicInteger cnt = new AtomicInteger(0); + FloatsDataSetIterator iterator = new FloatsDataSetIterator(floatIterable(numRows, numFeatures), batchSize); + + assertTrue(iterator.hasNext()); + + NeuralNetConfiguration conf = NeuralNetConfiguration.builder().build(); + + /** + * NeuralNetConfiguration confxx = NeuralNetConfiguration.builder() .seed(42) .updater(UPDATER) + * .gradientNormalization(GradientNormalization.RenormalizeL2PerLayer) + * .gradientNormalizationThreshold(GRADIENT_THRESHOLD) .weightInit(WeightInit.XAVIER) + * .activation(Activation.IDENTITY) .list(genLayers()) .inputType(InputType.convolutional(X_DIM, + * Y_DIM, CHANNELS)) // .inputPreProcessor("CNN1", new FeedForwardToCnnPreProcessor(Y_DIM, + * X_DIM, CHANNELS)) .build(); + */ + + /** + * new + * DenseLayer.Builder().nIn(INPUT).nOut(X_DIM*Y_DIM*CHANNELS).weightInit(WeightInit.NORMAL).build(), + * new ActivationLayer.Builder(new ActivationLReLU(0.2)).build(), new + * DenseLayer.Builder().nIn(X_DIM*Y_DIM*CHANNELS).nOut(X_DIM*Y_DIM).build(), new + * ActivationLayer.Builder(new ActivationLReLU(0.2)).build(), new + * DenseLayer.Builder().nIn(X_DIM*Y_DIM).nOut(X_DIM*Y_DIM).build(), new + * ActivationLayer.Builder(new ActivationLReLU(0.2)).build(), new + * DenseLayer.Builder().nIn(X_DIM*Y_DIM).nOut(X_DIM*Y_DIM*CHANNELS).activation(Activation.TANH) + */ + NeuralNetConfiguration network = + NN.net() + .seed(42) + .updater(Adam.builder().learningRate(0.0002).beta1(0.5).build()) + .gradientNormalization(GradientNormalization.RenormalizeL2PerLayer) + .gradientNormalizationThreshold(100) + .weightInit(new WeightInitXavier()) + .activation(new ActivationSigmoid()) + // .inputType(InputType.convolutional(28, 28, 1)) + .layer(new DenseLayer.Builder().nIn(6).nOut(20).build()) + .layer(new ActivationLayer.Builder(new ActivationLReLU(0.2)).build()) + .layer(new DenseLayer.Builder().nIn(20).nOut(40).build()) + .layer(new ActivationLayer.Builder(new ActivationLReLU(0.2)).build()) + .layer(new DenseLayer.Builder().nIn(40).nOut(12).build()) + .layer(new ActivationLayer.Builder(new ActivationLReLU(0.2)).build()) + .layer(new DenseLayer.Builder().nIn(12).nOut(8).build()) + .layer(new ActivationLayer.Builder(new ActivationLReLU(0.2)).build()) + .layer(new OutputLayer.Builder(LossFunctions.LossFunction.SQUARED_LOSS).activation(Activation.SIGMOID).nOut(6).build()) + .build(); + + MultiLayerNetwork net = new MultiLayerNetwork(network); + net.addTrainingListeners(new ScoreToChartListener("dnnTest")); + FloatsDataSetIterator dset = new FloatsDataSetIterator(floatIterable(numRows, numFeatures), batchSize); + + for (int i = 0; i < 2000000; i++) { + net.fit(dset); + System.out.println("Score: " + net.getScore()); + } + } + + protected static Iterable> floatIterable(final int totalRows, final int numColumns) { + return new Iterable>() { + @Override + public Iterator> iterator() { + return new Iterator>() { + private final AtomicInteger cnt = new AtomicInteger(0); + + @Override + public boolean hasNext() { + return cnt.incrementAndGet() <= totalRows; + } + + @Override + public Pair next() { + float[] features = new float[numColumns]; + float[] labels = new float[numColumns]; + for (int i = 0; i < numColumns; i++) { + features[i] = RandomUtils.nextFloat(0, 3); + labels[i] = (float) features[i] + 1; + } + return Pair.makePair(features, labels); + } + + @Override + public void remove() { + // no-op + } + }; + } + }; + } + +} \ No newline at end of file diff --git a/cavis-dnn/cavis-dnn-nn/src/test/java/net/brutex/ai/dnn/conf/layer/FFLayerTest.java b/cavis-dnn/cavis-dnn-nn/src/test/java/net/brutex/ai/dnn/conf/layer/FFLayerTest.java new file mode 100644 index 000000000..8430ec35d --- /dev/null +++ b/cavis-dnn/cavis-dnn-nn/src/test/java/net/brutex/ai/dnn/conf/layer/FFLayerTest.java @@ -0,0 +1,36 @@ +/* + * + * ****************************************************************************** + * * + * * This program and the accompanying materials are made available under the + * * terms of the Apache License, Version 2.0 which is available at + * * https://www.apache.org/licenses/LICENSE-2.0. + * * + * * See the NOTICE file distributed with this work for additional + * * information regarding copyright ownership. + * * Unless required by applicable law or agreed to in writing, software + * * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * * License for the specific language governing permissions and limitations + * * under the License. + * * + * * SPDX-License-Identifier: Apache-2.0 + * ***************************************************************************** + * + */ + +package net.brutex.ai.dnn.conf.layer; + +import org.junit.jupiter.api.Test; + +class FFLayerTest { + + @Test + void instantiate() { + + } + + @Test + void getOutputType() { + } +} \ No newline at end of file diff --git a/cavis-dnn/cavis-dnn-nn/src/test/java/org/deeplearning4j/nn/layers/HelperUtilsTest.java b/cavis-dnn/cavis-dnn-nn/src/test/java/org/deeplearning4j/nn/layers/HelperUtilsTest.java index bd05f187f..a3d21fb0c 100644 --- a/cavis-dnn/cavis-dnn-nn/src/test/java/org/deeplearning4j/nn/layers/HelperUtilsTest.java +++ b/cavis-dnn/cavis-dnn-nn/src/test/java/org/deeplearning4j/nn/layers/HelperUtilsTest.java @@ -34,7 +34,7 @@ import static org.junit.jupiter.api.Assertions.assertNotNull; /** */ -@DisplayName("Activation Layer Test") +@DisplayName("Activation ILayer Test") public class HelperUtilsTest extends BaseDL4JTest { @Override diff --git a/cavis-dnn/cavis-dnn-parallelwrapper-parameterserver/src/main/java/org/deeplearning4j/parallelism/parameterserver/ParameterServerTrainer.java b/cavis-dnn/cavis-dnn-parallelwrapper-parameterserver/src/main/java/org/deeplearning4j/parallelism/parameterserver/ParameterServerTrainer.java index ce14ae0b6..6808d4145 100644 --- a/cavis-dnn/cavis-dnn-parallelwrapper-parameterserver/src/main/java/org/deeplearning4j/parallelism/parameterserver/ParameterServerTrainer.java +++ b/cavis-dnn/cavis-dnn-parallelwrapper-parameterserver/src/main/java/org/deeplearning4j/parallelism/parameterserver/ParameterServerTrainer.java @@ -25,7 +25,7 @@ import lombok.Builder; import lombok.NoArgsConstructor; import lombok.NonNull; import lombok.extern.slf4j.Slf4j; -import org.deeplearning4j.nn.api.Model; +import net.brutex.ai.dnn.api.IModel; import org.deeplearning4j.nn.graph.ComputationGraph; import org.deeplearning4j.nn.multilayer.MultiLayerNetwork; import org.deeplearning4j.parallelism.ParallelWrapper; @@ -58,7 +58,7 @@ public class ParameterServerTrainer extends DefaultTrainer { log.info("Sending parameters"); //send the updated params - parameterServerClient.pushNDArray(getModel().params()); + parameterServerClient.pushNDArray(getModel().getModelParams()); } @Override @@ -77,28 +77,28 @@ public class ParameterServerTrainer extends DefaultTrainer { log.info("About to send params in"); //send the updated params - parameterServerClient.pushNDArray(getModel().params()); + parameterServerClient.pushNDArray(getModel().getModelParams()); log.info("Sent params"); } @Override - public Model getModel() { + public IModel getModel() { return super.getModel(); } @Override - public void updateModel(@NonNull Model model) { + public void updateModel(@NonNull IModel model) { super.updateModel(model); } public static class ParameterServerTrainerBuilder extends DefaultTrainerBuilder { @Override - public ParameterServerTrainerBuilder originalModel(Model originalModel) { + public ParameterServerTrainerBuilder originalModel(IModel originalModel) { return (ParameterServerTrainerBuilder) super.originalModel(originalModel); } @Override - public ParameterServerTrainerBuilder replicatedModel(Model replicatedModel) { + public ParameterServerTrainerBuilder replicatedModel(IModel replicatedModel) { return (ParameterServerTrainerBuilder) super.replicatedModel(replicatedModel); } diff --git a/cavis-dnn/cavis-dnn-parallelwrapper-parameterserver/src/main/java/org/deeplearning4j/parallelism/parameterserver/ParameterServerTrainerContext.java b/cavis-dnn/cavis-dnn-parallelwrapper-parameterserver/src/main/java/org/deeplearning4j/parallelism/parameterserver/ParameterServerTrainerContext.java index 47d04d303..89f8d71a9 100644 --- a/cavis-dnn/cavis-dnn-parallelwrapper-parameterserver/src/main/java/org/deeplearning4j/parallelism/parameterserver/ParameterServerTrainerContext.java +++ b/cavis-dnn/cavis-dnn-parallelwrapper-parameterserver/src/main/java/org/deeplearning4j/parallelism/parameterserver/ParameterServerTrainerContext.java @@ -21,7 +21,7 @@ package org.deeplearning4j.parallelism.parameterserver; import io.aeron.driver.MediaDriver; -import org.deeplearning4j.nn.api.Model; +import net.brutex.ai.dnn.api.IModel; import org.deeplearning4j.nn.conf.WorkspaceMode; import org.deeplearning4j.optimize.api.TrainingListener; import org.deeplearning4j.parallelism.ParallelWrapper; @@ -47,7 +47,7 @@ public class ParameterServerTrainerContext implements TrainerContext { * @param args the arguments to initialize with (maybe null) */ @Override - public void init(Model model, Object... args) { + public void init(IModel model, Object... args) { mediaDriverContext = new MediaDriver.Context(); mediaDriver = MediaDriver.launchEmbedded(mediaDriverContext); parameterServerNode = new ParameterServerNode(mediaDriver, statusServerPort, numWorkers); @@ -73,7 +73,7 @@ public class ParameterServerTrainerContext implements TrainerContext { * @return the created training instance */ @Override - public Trainer create(String uuid, int threadId, Model model, int rootDevice, boolean useMDS, ParallelWrapper wrapper, + public Trainer create(String uuid, int threadId, IModel model, int rootDevice, boolean useMDS, ParallelWrapper wrapper, WorkspaceMode mode, int averagingFrequency) { return ParameterServerTrainer.builder().originalModel(model).parameterServerClient(ParameterServerClient .builder().aeron(parameterServerNode.getAeron()) @@ -86,12 +86,12 @@ public class ParameterServerTrainerContext implements TrainerContext { } @Override - public void finalizeRound(Model originalModel, Model... models) { + public void finalizeRound(IModel originalModel, IModel... models) { // no-op } @Override - public void finalizeTraining(Model originalModel, Model... models) { + public void finalizeTraining(IModel originalModel, IModel... models) { // no-op } } diff --git a/cavis-dnn/cavis-dnn-parallelwrapper-parameterserver/src/test/java/org/deeplearning4j/parallelism/parameterserver/ParameterServerParallelWrapperTest.java b/cavis-dnn/cavis-dnn-parallelwrapper-parameterserver/src/test/java/org/deeplearning4j/parallelism/parameterserver/ParameterServerParallelWrapperTest.java index d92cdf753..cfeaf0821 100644 --- a/cavis-dnn/cavis-dnn-parallelwrapper-parameterserver/src/test/java/org/deeplearning4j/parallelism/parameterserver/ParameterServerParallelWrapperTest.java +++ b/cavis-dnn/cavis-dnn-parallelwrapper-parameterserver/src/test/java/org/deeplearning4j/parallelism/parameterserver/ParameterServerParallelWrapperTest.java @@ -23,8 +23,8 @@ package org.deeplearning4j.parallelism.parameterserver; import lombok.extern.slf4j.Slf4j; import org.deeplearning4j.BaseDL4JTest; import org.deeplearning4j.datasets.iterator.impl.MnistDataSetIterator; -import org.deeplearning4j.nn.conf.MultiLayerConfiguration; import org.deeplearning4j.nn.conf.NeuralNetConfiguration; +import org.deeplearning4j.nn.conf.NeuralNetConfiguration.NeuralNetConfigurationBuilder; import org.deeplearning4j.nn.conf.inputs.InputType; import org.deeplearning4j.nn.conf.layers.ConvolutionLayer; import org.deeplearning4j.nn.conf.layers.DenseLayer; @@ -56,7 +56,7 @@ public class ParameterServerParallelWrapperTest extends BaseDL4JTest { DataSetIterator mnistTest = new MnistDataSetIterator(batchSize, false, 12345); log.info("Build model...."); - MultiLayerConfiguration.Builder builder = new NeuralNetConfiguration.Builder().seed(seed) + NeuralNetConfiguration.NeuralNetConfigurationBuilder builder = NeuralNetConfiguration.builder().seed(seed) .l2(0.0005) .weightInit(WeightInit.XAVIER) .updater(new Nesterovs(0.01, 0.9)).list() @@ -73,9 +73,9 @@ public class ParameterServerParallelWrapperTest extends BaseDL4JTest { .layer(4, new DenseLayer.Builder().activation(Activation.RELU).nOut(500).build()) .layer(5, new OutputLayer.Builder(LossFunctions.LossFunction.NEGATIVELOGLIKELIHOOD) .nOut(outputNum).activation(Activation.SOFTMAX).build()) - .setInputType(InputType.convolutionalFlat(28, 28, 1)); + .inputType(InputType.convolutionalFlat(28, 28, 1)); - MultiLayerConfiguration conf = builder.build(); + NeuralNetConfiguration conf = builder.build(); MultiLayerNetwork model = new MultiLayerNetwork(conf); model.init(); diff --git a/cavis-dnn/cavis-dnn-parallelwrapper/src/main/java/org/deeplearning4j/parallelism/EarlyStoppingParallelTrainer.java b/cavis-dnn/cavis-dnn-parallelwrapper/src/main/java/org/deeplearning4j/parallelism/EarlyStoppingParallelTrainer.java index e1f8b9273..25a364b36 100644 --- a/cavis-dnn/cavis-dnn-parallelwrapper/src/main/java/org/deeplearning4j/parallelism/EarlyStoppingParallelTrainer.java +++ b/cavis-dnn/cavis-dnn-parallelwrapper/src/main/java/org/deeplearning4j/parallelism/EarlyStoppingParallelTrainer.java @@ -22,6 +22,7 @@ package org.deeplearning4j.parallelism; import com.google.common.util.concurrent.AtomicDouble; import lombok.extern.slf4j.Slf4j; +import net.brutex.ai.dnn.api.IModel; import org.deeplearning4j.earlystopping.EarlyStoppingConfiguration; import org.deeplearning4j.earlystopping.EarlyStoppingResult; import org.deeplearning4j.earlystopping.listener.EarlyStoppingListener; @@ -29,7 +30,6 @@ import org.deeplearning4j.earlystopping.scorecalc.ScoreCalculator; import org.deeplearning4j.earlystopping.termination.EpochTerminationCondition; import org.deeplearning4j.earlystopping.termination.IterationTerminationCondition; import org.deeplearning4j.earlystopping.trainer.IEarlyStoppingTrainer; -import org.deeplearning4j.nn.api.Model; import org.deeplearning4j.nn.graph.ComputationGraph; import org.deeplearning4j.nn.multilayer.MultiLayerNetwork; import org.deeplearning4j.optimize.api.BaseTrainingListener; @@ -45,7 +45,7 @@ import java.util.concurrent.atomic.AtomicBoolean; import java.util.concurrent.atomic.AtomicInteger; @Slf4j -public class EarlyStoppingParallelTrainer implements IEarlyStoppingTrainer { +public class EarlyStoppingParallelTrainer implements IEarlyStoppingTrainer { protected T model; @@ -91,16 +91,16 @@ public class EarlyStoppingParallelTrainer implements IEarlyStop // adjust UI listeners AveragingTrainingListener trainerListener = new AveragingTrainingListener(this); if (model instanceof MultiLayerNetwork) { - Collection listeners = ((MultiLayerNetwork) model).getListeners(); + Collection listeners = ((MultiLayerNetwork) model).getTrainingListeners(); Collection newListeners = new LinkedList<>(listeners); newListeners.add(trainerListener); - model.setListeners(newListeners); + model.addTrainingListeners(newListeners.toArray(new TrainingListener[]{})); } else if (model instanceof ComputationGraph) { - Collection listeners = ((ComputationGraph) model).getListeners(); + Collection listeners = ((ComputationGraph) model).getTrainingListeners(); Collection newListeners = new LinkedList<>(listeners); newListeners.add(trainerListener); - model.setListeners(newListeners); + model.addTrainingListeners(newListeners.toArray(new TrainingListener[]{})); } this.wrapper = new ParallelWrapper.Builder<>(model).workers(workers).prefetchBuffer(prefetchBuffer) @@ -314,7 +314,7 @@ public class EarlyStoppingParallelTrainer implements IEarlyStop * with each averaging step, and thus averaging is considered analogous to an iteration. * @param */ - private class AveragingTrainingListener extends BaseTrainingListener { + private class AveragingTrainingListener extends BaseTrainingListener { private final Logger log = LoggerFactory.getLogger(AveragingTrainingListener.class); private final IterationTerminationCondition terminationReason = null; private final EarlyStoppingParallelTrainer trainer; @@ -325,9 +325,9 @@ public class EarlyStoppingParallelTrainer implements IEarlyStop } @Override - public void iterationDone(Model model, int iteration, int epoch) { + public void iterationDone(IModel model, int iteration, int epoch) { //Check per-iteration termination conditions - double latestScore = model.score(); + double latestScore = model.getScore(); trainer.setLatestScore(latestScore); for (IterationTerminationCondition c : esConfig.getIterationTerminationConditions()) { if (c.terminate(latestScore)) { diff --git a/cavis-dnn/cavis-dnn-parallelwrapper/src/main/java/org/deeplearning4j/parallelism/InplaceParallelInference.java b/cavis-dnn/cavis-dnn-parallelwrapper/src/main/java/org/deeplearning4j/parallelism/InplaceParallelInference.java index 20dcd51d9..571002280 100644 --- a/cavis-dnn/cavis-dnn-parallelwrapper/src/main/java/org/deeplearning4j/parallelism/InplaceParallelInference.java +++ b/cavis-dnn/cavis-dnn-parallelwrapper/src/main/java/org/deeplearning4j/parallelism/InplaceParallelInference.java @@ -23,13 +23,12 @@ package org.deeplearning4j.parallelism; import lombok.*; import lombok.extern.slf4j.Slf4j; -import org.deeplearning4j.nn.api.Model; +import net.brutex.ai.dnn.api.IModel; import org.deeplearning4j.nn.api.ModelAdapter; import org.deeplearning4j.nn.conf.ComputationGraphConfiguration; -import org.deeplearning4j.nn.conf.MultiLayerConfiguration; +import org.deeplearning4j.nn.conf.NeuralNetConfiguration; import org.deeplearning4j.nn.graph.ComputationGraph; import org.deeplearning4j.nn.multilayer.MultiLayerNetwork; -import org.deeplearning4j.parallelism.inference.InferenceMode; import org.deeplearning4j.parallelism.inference.LoadBalanceMode; import org.nd4j.linalg.api.ndarray.INDArray; import org.nd4j.linalg.exception.ND4JIllegalStateException; @@ -70,14 +69,14 @@ public class InplaceParallelInference extends ParallelInference { } @Override - public synchronized void updateModel(@NonNull Model model) { + public synchronized void updateModel(@NonNull IModel model) { for (val h:holders) h.updateModel(model); } @Override - protected synchronized Model[] getCurrentModelsFromWorkers() { - val models = new Model[holders.size()]; + protected synchronized IModel[] getCurrentModelsFromWorkers() { + val models = new IModel[holders.size()]; int cnt = 0; for (val h:holders) { models[cnt++] = h.sourceModel; @@ -102,7 +101,7 @@ public class InplaceParallelInference extends ParallelInference { */ public T output(@NonNull ModelAdapter adapter, INDArray[] input, INDArray[] inputMasks, INDArray[] labelsMasks) { val holder = selector.getModelForThisThread(); - Model model = null; + IModel model = null; boolean acquired = false; try { model = holder.acquireModel(); @@ -159,9 +158,9 @@ public class InplaceParallelInference extends ParallelInference { @AllArgsConstructor @lombok.Builder protected static class ModelHolder { - protected Model sourceModel; + protected IModel sourceModel; @lombok.Builder.Default protected int workers = 4; - @lombok.Builder.Default protected List replicas = new ArrayList<>(); + @lombok.Builder.Default protected List replicas = new ArrayList<>(); @lombok.Builder.Default protected boolean rootDevice = true; @lombok.Builder.Default protected LoadBalanceMode loadBalanceMode = LoadBalanceMode.ROUND_ROBIN; protected int targetDeviceId; @@ -170,7 +169,7 @@ public class InplaceParallelInference extends ParallelInference { protected final ReentrantReadWriteLock modelLock = new ReentrantReadWriteLock(); // this queue is used in FIFO mode - protected final BlockingQueue queue = new LinkedBlockingQueue<>(); + protected final BlockingQueue queue = new LinkedBlockingQueue<>(); @lombok.Builder.Default protected transient boolean isCG = false; @lombok.Builder.Default protected transient boolean isMLN = false; @@ -186,7 +185,7 @@ public class InplaceParallelInference extends ParallelInference { isMLN = sourceModel instanceof MultiLayerNetwork; // we clone params only if we're not on the same device - val params = rootDevice ? sourceModel.params() : sourceModel.params().unsafeDuplication(true); + val params = rootDevice ? sourceModel.getModelParams() : sourceModel.getModelParams().unsafeDuplication(true); // and moving it to specified device (only if NOT root if (!rootDevice) @@ -195,7 +194,7 @@ public class InplaceParallelInference extends ParallelInference { for (int e = 0; e < workers; e++) { if (sourceModel instanceof ComputationGraph) { // building configuration with shared parameters - val model = new ComputationGraph(ComputationGraphConfiguration.fromJson(((ComputationGraph) sourceModel).getConfiguration().toJson())); + val model = new ComputationGraph(ComputationGraphConfiguration.fromJson(((ComputationGraph) sourceModel).getComputationGraphConfiguration().toJson())); model.init(params, false); Nd4j.getExecutioner().commit(); @@ -205,7 +204,7 @@ public class InplaceParallelInference extends ParallelInference { if (loadBalanceMode == LoadBalanceMode.FIFO) queue.add(model); } else if (sourceModel instanceof MultiLayerNetwork) { - val model = new MultiLayerNetwork(MultiLayerConfiguration.fromJson(((MultiLayerNetwork) sourceModel).getLayerWiseConfigurations().toJson())); + val model = new MultiLayerNetwork(NeuralNetConfiguration.fromJson(((MultiLayerNetwork) sourceModel).getNetConfiguration().toJson())); model.init(params, false); Nd4j.getExecutioner().commit(); @@ -218,7 +217,7 @@ public class InplaceParallelInference extends ParallelInference { } - protected Model acquireModel() throws InterruptedException { + protected IModel acquireModel() throws InterruptedException { try { modelLock.readLock().lock(); @@ -236,7 +235,7 @@ public class InplaceParallelInference extends ParallelInference { } } - protected void releaseModel(Model model) { + protected void releaseModel(IModel model) { try { modelLock.readLock().lock(); @@ -291,7 +290,7 @@ public class InplaceParallelInference extends ParallelInference { } } - protected void updateModel(@NonNull Model model) { + protected void updateModel(@NonNull IModel model) { try { modelLock.writeLock().lock(); diff --git a/cavis-dnn/cavis-dnn-parallelwrapper/src/main/java/org/deeplearning4j/parallelism/ParallelInference.java b/cavis-dnn/cavis-dnn-parallelwrapper/src/main/java/org/deeplearning4j/parallelism/ParallelInference.java index 52a28606e..242a9f731 100644 --- a/cavis-dnn/cavis-dnn-parallelwrapper/src/main/java/org/deeplearning4j/parallelism/ParallelInference.java +++ b/cavis-dnn/cavis-dnn-parallelwrapper/src/main/java/org/deeplearning4j/parallelism/ParallelInference.java @@ -23,10 +23,10 @@ package org.deeplearning4j.parallelism; import lombok.NonNull; import lombok.extern.slf4j.Slf4j; import lombok.val; -import org.deeplearning4j.nn.api.Model; +import net.brutex.ai.dnn.api.IModel; import org.deeplearning4j.nn.api.ModelAdapter; import org.deeplearning4j.nn.conf.ComputationGraphConfiguration; -import org.deeplearning4j.nn.conf.MultiLayerConfiguration; +import org.deeplearning4j.nn.conf.NeuralNetConfiguration; import org.deeplearning4j.nn.graph.ComputationGraph; import org.deeplearning4j.nn.multilayer.MultiLayerNetwork; import org.deeplearning4j.parallelism.inference.InferenceMode; @@ -52,7 +52,7 @@ import java.util.concurrent.locks.ReentrantReadWriteLock; @Slf4j public class ParallelInference { - protected Model model; + protected IModel model; protected long nanos; protected int workers; protected int batchLimit; @@ -86,7 +86,7 @@ public class ParallelInference { * * @param model */ - public void updateModel(@NonNull Model model) { + public void updateModel(@NonNull IModel model) { if (zoo != null) { for (val w: zoo) w.updateModel(model); @@ -102,11 +102,11 @@ public class ParallelInference { * * @return */ - protected Model[] getCurrentModelsFromWorkers() { + protected IModel[] getCurrentModelsFromWorkers() { if (zoo == null) - return new Model[0]; + return new IModel[0]; - val models = new Model[zoo.length]; + val models = new IModel[zoo.length]; int cnt = 0; for (val w:zoo) { models[cnt++] = w.replicatedModel; @@ -284,14 +284,14 @@ public class ParallelInference { public static class Builder { - private final Model model; + private final IModel model; private int workers = DEFAULT_NUM_WORKERS; private int batchLimit = DEFAULT_BATCH_LIMIT; private InferenceMode inferenceMode = DEFAULT_INFERENCE_MODE; private int queueLimit = DEFAULT_QUEUE_LIMIT; protected LoadBalanceMode loadBalanceMode = LoadBalanceMode.FIFO; - public Builder(@NonNull Model model) { + public Builder(@NonNull IModel model) { this.model = model; } @@ -416,15 +416,15 @@ public class ParallelInference { private final BlockingQueue inputQueue; private final AtomicBoolean shouldWork = new AtomicBoolean(true); private final AtomicBoolean isStopped = new AtomicBoolean(false); - private Model protoModel; - private Model replicatedModel; + private IModel protoModel; + private IModel replicatedModel; private final AtomicLong counter = new AtomicLong(0); private final boolean rootDevice; private final int deviceId; private final ReentrantReadWriteLock modelLock = new ReentrantReadWriteLock(); - private InferenceWorker(int id, @NonNull Model model, @NonNull BlockingQueue inputQueue, boolean rootDevice, int deviceId) { + private InferenceWorker(int id, @NonNull IModel model, @NonNull BlockingQueue inputQueue, boolean rootDevice, int deviceId) { this.inputQueue = inputQueue; this.protoModel = model; this.rootDevice = rootDevice; @@ -439,7 +439,7 @@ public class ParallelInference { return counter.get(); } - protected void updateModel(@NonNull Model model) { + protected void updateModel(@NonNull IModel model) { try { modelLock.writeLock().lock(); this.protoModel = model; @@ -458,11 +458,11 @@ public class ParallelInference { if (protoModel instanceof ComputationGraph) { if (!rootDevice) { this.replicatedModel = new ComputationGraph(ComputationGraphConfiguration - .fromJson(((ComputationGraph) protoModel).getConfiguration().toJson())); + .fromJson(((ComputationGraph) protoModel).getComputationGraphConfiguration().toJson())); this.replicatedModel.init(); synchronized (locker) { - this.replicatedModel.setParams(protoModel.params().unsafeDuplication(true)); + this.replicatedModel.setParams(protoModel.getModelParams().unsafeDuplication(true)); Nd4j.getExecutioner().commit(); } @@ -471,12 +471,12 @@ public class ParallelInference { } } else if (protoModel instanceof MultiLayerNetwork) { if (!rootDevice) { - this.replicatedModel = new MultiLayerNetwork(MultiLayerConfiguration.fromJson( - ((MultiLayerNetwork) protoModel).getLayerWiseConfigurations().toJson())); + this.replicatedModel = new MultiLayerNetwork(NeuralNetConfiguration.fromJson( + ((MultiLayerNetwork) protoModel).getNetConfiguration().toJson())); this.replicatedModel.init(); synchronized (locker) { - this.replicatedModel.setParams(protoModel.params().unsafeDuplication(true)); + this.replicatedModel.setParams(protoModel.getModelParams().unsafeDuplication(true)); Nd4j.getExecutioner().commit(); } diff --git a/cavis-dnn/cavis-dnn-parallelwrapper/src/main/java/org/deeplearning4j/parallelism/ParallelWrapper.java b/cavis-dnn/cavis-dnn-parallelwrapper/src/main/java/org/deeplearning4j/parallelism/ParallelWrapper.java index 8da3b5262..921b9b49e 100644 --- a/cavis-dnn/cavis-dnn-parallelwrapper/src/main/java/org/deeplearning4j/parallelism/ParallelWrapper.java +++ b/cavis-dnn/cavis-dnn-parallelwrapper/src/main/java/org/deeplearning4j/parallelism/ParallelWrapper.java @@ -22,6 +22,7 @@ package org.deeplearning4j.parallelism; import lombok.*; import lombok.extern.slf4j.Slf4j; +import net.brutex.ai.dnn.api.IModel; import org.deeplearning4j.core.storage.StatsStorageRouter; import org.deeplearning4j.core.storage.listener.RoutingIterationListener; import org.deeplearning4j.optimize.solvers.accumulation.EncodingHandler; @@ -32,7 +33,6 @@ import org.deeplearning4j.datasets.iterator.DummyBlockDataSetIterator; import org.deeplearning4j.datasets.iterator.DummyBlockMultiDataSetIterator; import org.deeplearning4j.datasets.iterator.callbacks.InterleavedDataSetCallback; import org.deeplearning4j.exception.DL4JInvalidConfigException; -import org.deeplearning4j.nn.api.Model; import org.deeplearning4j.nn.api.Updater; import org.deeplearning4j.nn.conf.WorkspaceMode; import org.deeplearning4j.nn.graph.ComputationGraph; @@ -49,7 +49,6 @@ import org.deeplearning4j.parallelism.factory.DefaultTrainerContext; import org.deeplearning4j.parallelism.factory.SymmetricTrainerContext; import org.deeplearning4j.parallelism.factory.TrainerContext; import org.deeplearning4j.parallelism.trainer.Trainer; -import org.nd4j.common.base.Preconditions; import org.nd4j.linalg.api.ndarray.INDArray; import org.nd4j.linalg.dataset.api.iterator.DataSetIterator; import org.nd4j.linalg.dataset.api.iterator.MultiDataSetIterator; @@ -93,7 +92,7 @@ public class ParallelWrapper implements AutoCloseable { protected AtomicBoolean exceptionEncountered; protected Throwable exception; protected final String uuid = java.util.UUID.randomUUID().toString(); - protected Model model; + protected IModel model; protected int workers = 2; protected int prefetchSize = 2; protected int averagingFrequency = 1; @@ -131,7 +130,7 @@ public class ParallelWrapper implements AutoCloseable { } }; - protected ParallelWrapper(Model model, int workers, int prefetchSize) { + protected ParallelWrapper(IModel model, int workers, int prefetchSize) { this.model = model; this.workers = workers; this.prefetchSize = prefetchSize; @@ -346,8 +345,8 @@ public class ParallelWrapper implements AutoCloseable { List params = new ArrayList<>(); for (int cnt = 0; cnt < workers && cnt < locker.get(); cnt++) { - params.add(zoo[cnt].getModel().params()); - score += zoo[cnt].getModel().score(); + params.add(zoo[cnt].getModel().getModelParams()); + score += zoo[cnt].getModel().getScore(); } Nd4j.averageAndPropagate(null, params); @@ -669,7 +668,7 @@ public class ParallelWrapper implements AutoCloseable { } } - public static class Builder { + public static class Builder { protected TrainingMode trainingMode = TrainingMode.AVERAGING; protected T model; protected int workers = Nd4j.getAffinityManager().getNumberOfDevices(); @@ -957,11 +956,11 @@ public class ParallelWrapper implements AutoCloseable { List modelListeners = null; if (model instanceof MultiLayerNetwork) { - modelListeners = new ArrayList<>(((MultiLayerNetwork) model).getListeners()); - model.setListeners(Collections.emptyList()); + modelListeners = new ArrayList<>(((MultiLayerNetwork) model).getTrainingListeners()); + model.addTrainingListeners(new TrainingListener[]{}); } else if (model instanceof ComputationGraph) { - modelListeners = new ArrayList<>(((ComputationGraph) model).getListeners()); - model.setListeners(Collections.emptyList()); + modelListeners = new ArrayList<>(((ComputationGraph) model).getTrainingListeners()); + model.addTrainingListeners(new TrainingListener[]{}); } if (modelListeners != null && !modelListeners.isEmpty()) { diff --git a/cavis-dnn/cavis-dnn-parallelwrapper/src/main/java/org/deeplearning4j/parallelism/factory/DefaultTrainerContext.java b/cavis-dnn/cavis-dnn-parallelwrapper/src/main/java/org/deeplearning4j/parallelism/factory/DefaultTrainerContext.java index 4aea543eb..dc9fa3982 100644 --- a/cavis-dnn/cavis-dnn-parallelwrapper/src/main/java/org/deeplearning4j/parallelism/factory/DefaultTrainerContext.java +++ b/cavis-dnn/cavis-dnn-parallelwrapper/src/main/java/org/deeplearning4j/parallelism/factory/DefaultTrainerContext.java @@ -20,7 +20,7 @@ package org.deeplearning4j.parallelism.factory; -import org.deeplearning4j.nn.api.Model; +import net.brutex.ai.dnn.api.IModel; import org.deeplearning4j.nn.conf.WorkspaceMode; import org.deeplearning4j.optimize.api.TrainingListener; import org.deeplearning4j.parallelism.ParallelWrapper; @@ -35,7 +35,7 @@ public class DefaultTrainerContext implements TrainerContext { * @param args the arguments to initialize with (maybe null) */ @Override - public void init(Model model, Object... args) { + public void init(IModel model, Object... args) { } @@ -53,7 +53,7 @@ public class DefaultTrainerContext implements TrainerContext { * @return the created training instance */ @Override - public Trainer create(String uuid, int threadId, Model model, int rootDevice, boolean useMDS, ParallelWrapper wrapper, + public Trainer create(String uuid, int threadId, IModel model, int rootDevice, boolean useMDS, ParallelWrapper wrapper, WorkspaceMode mode, int averagingFrequency) { DefaultTrainer trainer = DefaultTrainer.builder().originalModel(model).replicatedModel(model).threadId(threadId) @@ -68,14 +68,14 @@ public class DefaultTrainerContext implements TrainerContext { } @Override - public void finalizeRound(Model originalModel, Model... models) { + public void finalizeRound(IModel originalModel, IModel... models) { // apply averaging // TODO: move averaging here } @Override - public void finalizeTraining(Model originalModel, Model... models) { + public void finalizeTraining(IModel originalModel, IModel... models) { finalizeRound(originalModel, models); } } diff --git a/cavis-dnn/cavis-dnn-parallelwrapper/src/main/java/org/deeplearning4j/parallelism/factory/SymmetricTrainerContext.java b/cavis-dnn/cavis-dnn-parallelwrapper/src/main/java/org/deeplearning4j/parallelism/factory/SymmetricTrainerContext.java index 3febe09c0..6bafcd4cd 100644 --- a/cavis-dnn/cavis-dnn-parallelwrapper/src/main/java/org/deeplearning4j/parallelism/factory/SymmetricTrainerContext.java +++ b/cavis-dnn/cavis-dnn-parallelwrapper/src/main/java/org/deeplearning4j/parallelism/factory/SymmetricTrainerContext.java @@ -21,11 +21,10 @@ package org.deeplearning4j.parallelism.factory; import lombok.extern.slf4j.Slf4j; -import org.deeplearning4j.nn.api.Model; +import net.brutex.ai.dnn.api.IModel; import org.deeplearning4j.nn.conf.WorkspaceMode; import org.deeplearning4j.optimize.api.TrainingListener; import org.deeplearning4j.parallelism.ParallelWrapper; -import org.deeplearning4j.parallelism.trainer.DefaultTrainer; import org.deeplearning4j.parallelism.trainer.SymmetricTrainer; import org.deeplearning4j.parallelism.trainer.Trainer; @@ -38,7 +37,7 @@ public class SymmetricTrainerContext implements TrainerContext { * @param args the arguments to initialize with (maybe null) */ @Override - public void init(Model model, Object... args) { + public void init(IModel model, Object... args) { } @@ -56,7 +55,7 @@ public class SymmetricTrainerContext implements TrainerContext { * @return the created training instance */ @Override - public Trainer create(String uuid, int threadId, Model model, int rootDevice, boolean useMDS, ParallelWrapper wrapper, + public Trainer create(String uuid, int threadId, IModel model, int rootDevice, boolean useMDS, ParallelWrapper wrapper, WorkspaceMode mode, int averagingFrequency) { SymmetricTrainer trainer = new SymmetricTrainer(model, uuid, threadId, mode, wrapper, useMDS); @@ -68,13 +67,13 @@ public class SymmetricTrainerContext implements TrainerContext { } @Override - public void finalizeRound(Model originalModel, Model... models) { + public void finalizeRound(IModel originalModel, IModel... models) { // no-op } @Override - public void finalizeTraining(Model originalModel, Model... models) { + public void finalizeTraining(IModel originalModel, IModel... models) { // we CAN avarage here, but for now we'll just push first model params to original model - originalModel.setParams(models[0].params()); + originalModel.setParams(models[0].getModelParams()); } } diff --git a/cavis-dnn/cavis-dnn-parallelwrapper/src/main/java/org/deeplearning4j/parallelism/factory/TrainerContext.java b/cavis-dnn/cavis-dnn-parallelwrapper/src/main/java/org/deeplearning4j/parallelism/factory/TrainerContext.java index cc1bd53f7..57cdd76fd 100644 --- a/cavis-dnn/cavis-dnn-parallelwrapper/src/main/java/org/deeplearning4j/parallelism/factory/TrainerContext.java +++ b/cavis-dnn/cavis-dnn-parallelwrapper/src/main/java/org/deeplearning4j/parallelism/factory/TrainerContext.java @@ -20,7 +20,7 @@ package org.deeplearning4j.parallelism.factory; -import org.deeplearning4j.nn.api.Model; +import net.brutex.ai.dnn.api.IModel; import org.deeplearning4j.nn.conf.WorkspaceMode; import org.deeplearning4j.parallelism.ParallelWrapper; import org.deeplearning4j.parallelism.trainer.Trainer; @@ -33,7 +33,7 @@ public interface TrainerContext { * @param model * @param args the arguments to initialize with (maybe null) */ - void init(Model model, Object... args); + void init(IModel model, Object... args); /** * Create a {@link Trainer} @@ -47,7 +47,7 @@ public interface TrainerContext { * for coordination with the {@link ParallelWrapper} 's {@link org.deeplearning4j.optimize.api.TrainingListener} * @return the created training instance */ - Trainer create(String uuid, int threadId, Model model, int rootDevice, boolean useMDS, ParallelWrapper wrapper, + Trainer create(String uuid, int threadId, IModel model, int rootDevice, boolean useMDS, ParallelWrapper wrapper, WorkspaceMode workspaceMode, int averagingFrequency); @@ -57,7 +57,7 @@ public interface TrainerContext { * @param originalModel * @param models */ - void finalizeRound(Model originalModel, Model... models); + void finalizeRound(IModel originalModel, IModel... models); /** * This method is called @@ -65,5 +65,5 @@ public interface TrainerContext { * @param originalModel * @param models */ - void finalizeTraining(Model originalModel, Model... models); + void finalizeTraining(IModel originalModel, IModel... models); } diff --git a/cavis-dnn/cavis-dnn-parallelwrapper/src/main/java/org/deeplearning4j/parallelism/main/ParallelWrapperMain.java b/cavis-dnn/cavis-dnn-parallelwrapper/src/main/java/org/deeplearning4j/parallelism/main/ParallelWrapperMain.java index c0f6c9785..26e76ed61 100644 --- a/cavis-dnn/cavis-dnn-parallelwrapper/src/main/java/org/deeplearning4j/parallelism/main/ParallelWrapperMain.java +++ b/cavis-dnn/cavis-dnn-parallelwrapper/src/main/java/org/deeplearning4j/parallelism/main/ParallelWrapperMain.java @@ -25,10 +25,10 @@ import com.beust.jcommander.Parameter; import com.beust.jcommander.ParameterException; import lombok.Data; import lombok.extern.slf4j.Slf4j; +import net.brutex.ai.dnn.api.IModel; import org.deeplearning4j.common.config.DL4JClassLoading; import org.deeplearning4j.core.storage.StatsStorageRouter; import org.deeplearning4j.core.storage.impl.RemoteUIStatsStorageRouter; -import org.deeplearning4j.nn.api.Model; import org.deeplearning4j.optimize.api.TrainingListener; import org.deeplearning4j.parallelism.ParallelWrapper; import org.deeplearning4j.core.util.ModelGuesser; @@ -101,7 +101,7 @@ public class ParallelWrapperMain { public void run() throws Exception { - Model model = ModelGuesser.loadModelGuess(modelPath); + IModel model = ModelGuesser.loadModelGuess(modelPath); // ParallelWrapper will take care of load balancing between GPUs. wrapper = new ParallelWrapper.Builder(model) // DataSets prefetching options. Set this value with respect to number of actual devices diff --git a/cavis-dnn/cavis-dnn-parallelwrapper/src/main/java/org/deeplearning4j/parallelism/trainer/DefaultTrainer.java b/cavis-dnn/cavis-dnn-parallelwrapper/src/main/java/org/deeplearning4j/parallelism/trainer/DefaultTrainer.java index a1909795a..522b3548a 100644 --- a/cavis-dnn/cavis-dnn-parallelwrapper/src/main/java/org/deeplearning4j/parallelism/trainer/DefaultTrainer.java +++ b/cavis-dnn/cavis-dnn-parallelwrapper/src/main/java/org/deeplearning4j/parallelism/trainer/DefaultTrainer.java @@ -22,12 +22,12 @@ package org.deeplearning4j.parallelism.trainer; import lombok.*; import lombok.extern.slf4j.Slf4j; +import net.brutex.ai.dnn.api.IModel; import org.deeplearning4j.core.storage.StatsStorageRouter; import org.deeplearning4j.core.storage.listener.RoutingIterationListener; -import org.deeplearning4j.nn.api.Model; import org.deeplearning4j.nn.api.Updater; import org.deeplearning4j.nn.conf.ComputationGraphConfiguration; -import org.deeplearning4j.nn.conf.MultiLayerConfiguration; +import org.deeplearning4j.nn.conf.NeuralNetConfiguration; import org.deeplearning4j.nn.conf.WorkspaceMode; import org.deeplearning4j.nn.graph.ComputationGraph; import org.deeplearning4j.nn.multilayer.MultiLayerNetwork; @@ -56,7 +56,7 @@ import java.util.concurrent.locks.ReentrantReadWriteLock; @AllArgsConstructor public class DefaultTrainer extends Thread implements Trainer { - protected Model replicatedModel; + protected IModel replicatedModel; // TODO: make queue size configurable @Builder.Default @@ -89,7 +89,7 @@ public class DefaultTrainer extends Thread implements Trainer { protected WorkspaceMode workspaceMode; protected int averagingFrequency; protected int threadId; - protected Model originalModel; + protected IModel originalModel; protected final ReentrantReadWriteLock modelLock = new ReentrantReadWriteLock(); @@ -135,19 +135,19 @@ public class DefaultTrainer extends Thread implements Trainer { } @Override - public Model getModel() { + public IModel getModel() { return replicatedModel; } @Override - public void updateModel(@NonNull Model model) { + public void updateModel(@NonNull IModel model) { this.shouldUpdate.set(true); try { modelLock.writeLock().lock(); if (replicatedModel instanceof MultiLayerNetwork) { - replicatedModel.setParams(model.params().unsafeDuplication(true)); + replicatedModel.setParams(model.getModelParams().unsafeDuplication(true)); Updater updater = ((MultiLayerNetwork) model).getUpdater(); INDArray view = updater.getStateViewArray(); @@ -161,7 +161,7 @@ public class DefaultTrainer extends Thread implements Trainer { updater.setStateViewArray((MultiLayerNetwork) replicatedModel, viewD, false); } } else if (replicatedModel instanceof ComputationGraph) { - replicatedModel.setParams(model.params().unsafeDuplication(true)); + replicatedModel.setParams(model.getModelParams().unsafeDuplication(true)); ComputationGraphUpdater updater = ((ComputationGraph) model).getUpdater(); INDArray view = updater.getStateViewArray(); @@ -278,7 +278,7 @@ public class DefaultTrainer extends Thread implements Trainer { } configureListeners(uuid, oldListeners, replicatedListeners); - this.replicatedModel.setListeners(replicatedListeners); + this.replicatedModel.addTrainingListeners(replicatedListeners.toArray(new TrainingListener[]{})); } @Override @@ -295,8 +295,8 @@ public class DefaultTrainer extends Thread implements Trainer { // however, we don't need clone or anything here if (originalModel instanceof MultiLayerNetwork) { if (!onRootModel) { - MultiLayerConfiguration conf = MultiLayerConfiguration.fromJson( - ((MultiLayerNetwork) originalModel).getLayerWiseConfigurations().toJson()); + NeuralNetConfiguration conf = NeuralNetConfiguration.fromJson( + ((MultiLayerNetwork) originalModel).getNetConfiguration().toJson()); conf.setTrainingWorkspaceMode(workspaceMode); this.replicatedModel = new MultiLayerNetwork(conf); @@ -305,7 +305,7 @@ public class DefaultTrainer extends Thread implements Trainer { // we replicate original model params & updater state, just in case it's pre-trained model try { modelLock.writeLock().lock(); - replicatedModel.setParams(originalModel.params().unsafeDuplication(true)); + replicatedModel.setParams(originalModel.getModelParams().unsafeDuplication(true)); Updater updaterReplica = ((MultiLayerNetwork) replicatedModel).getUpdater(); Updater updaterOrigina = ((MultiLayerNetwork) originalModel).getUpdater(); @@ -323,13 +323,13 @@ public class DefaultTrainer extends Thread implements Trainer { if (!((MultiLayerNetwork) replicatedModel).isInitCalled()) this.replicatedModel.init(); - ((MultiLayerNetwork) replicatedModel).getLayerWiseConfigurations() + ((MultiLayerNetwork) replicatedModel).getNetConfiguration() .setTrainingWorkspaceMode(workspaceMode); } } else if (originalModel instanceof ComputationGraph) { if (!onRootModel) { ComputationGraphConfiguration conf = ComputationGraphConfiguration - .fromJson(((ComputationGraph) originalModel).getConfiguration().toJson()); + .fromJson(((ComputationGraph) originalModel).getComputationGraphConfiguration().toJson()); conf.setTrainingWorkspaceMode(workspaceMode); this.replicatedModel = new ComputationGraph(conf); @@ -338,7 +338,7 @@ public class DefaultTrainer extends Thread implements Trainer { // we replicate original model params & updater state, just in case it's pre-trained model try { modelLock.writeLock().lock(); - replicatedModel.setParams(originalModel.params().unsafeDuplication(true)); + replicatedModel.setParams(originalModel.getModelParams().unsafeDuplication(true)); ComputationGraphUpdater updaterReplica = ((ComputationGraph) replicatedModel).getUpdater(); ComputationGraphUpdater updaterOrigina = ((ComputationGraph) originalModel).getUpdater(); @@ -354,7 +354,7 @@ public class DefaultTrainer extends Thread implements Trainer { } else { this.replicatedModel = originalModel; this.replicatedModel.init(); - ((ComputationGraph) replicatedModel).getConfiguration().setTrainingWorkspaceMode(workspaceMode); + ((ComputationGraph) replicatedModel).getComputationGraphConfiguration().setTrainingWorkspaceMode(workspaceMode); } } @@ -389,7 +389,7 @@ public class DefaultTrainer extends Thread implements Trainer { Nd4j.getExecutioner().commit(); // we ensure memory is updated on host side - Nd4j.getAffinityManager().ensureLocation(replicatedModel.params(), + Nd4j.getAffinityManager().ensureLocation(replicatedModel.getModelParams(), AffinityManager.Location.HOST); if (replicatedModel instanceof MultiLayerNetwork) { @@ -427,7 +427,7 @@ public class DefaultTrainer extends Thread implements Trainer { Nd4j.getExecutioner().commit(); // we ensure memory is updated on host side - Nd4j.getAffinityManager().ensureLocation(replicatedModel.params(), + Nd4j.getAffinityManager().ensureLocation(replicatedModel.getModelParams(), AffinityManager.Location.HOST); ComputationGraphUpdater updaterReplica = ((ComputationGraph) replicatedModel).getUpdater(); diff --git a/cavis-dnn/cavis-dnn-parallelwrapper/src/main/java/org/deeplearning4j/parallelism/trainer/SymmetricTrainer.java b/cavis-dnn/cavis-dnn-parallelwrapper/src/main/java/org/deeplearning4j/parallelism/trainer/SymmetricTrainer.java index 96e02cad8..a3a3c57db 100644 --- a/cavis-dnn/cavis-dnn-parallelwrapper/src/main/java/org/deeplearning4j/parallelism/trainer/SymmetricTrainer.java +++ b/cavis-dnn/cavis-dnn-parallelwrapper/src/main/java/org/deeplearning4j/parallelism/trainer/SymmetricTrainer.java @@ -22,7 +22,7 @@ package org.deeplearning4j.parallelism.trainer; import lombok.NonNull; import lombok.extern.slf4j.Slf4j; -import org.deeplearning4j.nn.api.Model; +import net.brutex.ai.dnn.api.IModel; import org.deeplearning4j.nn.conf.WorkspaceMode; import org.deeplearning4j.nn.graph.ComputationGraph; import org.deeplearning4j.nn.multilayer.MultiLayerNetwork; @@ -34,7 +34,7 @@ import org.deeplearning4j.parallelism.ParallelWrapper; public class SymmetricTrainer extends DefaultTrainer implements CommunicativeTrainer { protected GradientsAccumulator accumulator; - public SymmetricTrainer(@NonNull Model originalModel, String uuid, int threadIdx, @NonNull WorkspaceMode mode, + public SymmetricTrainer(@NonNull IModel originalModel, String uuid, int threadIdx, @NonNull WorkspaceMode mode, @NonNull ParallelWrapper wrapper, boolean useMDS) { super(); this.uuid = uuid + "_thread_" + threadIdx; diff --git a/cavis-dnn/cavis-dnn-parallelwrapper/src/main/java/org/deeplearning4j/parallelism/trainer/Trainer.java b/cavis-dnn/cavis-dnn-parallelwrapper/src/main/java/org/deeplearning4j/parallelism/trainer/Trainer.java index 51e9be570..bdc773b0f 100644 --- a/cavis-dnn/cavis-dnn-parallelwrapper/src/main/java/org/deeplearning4j/parallelism/trainer/Trainer.java +++ b/cavis-dnn/cavis-dnn-parallelwrapper/src/main/java/org/deeplearning4j/parallelism/trainer/Trainer.java @@ -21,7 +21,7 @@ package org.deeplearning4j.parallelism.trainer; import lombok.NonNull; -import org.deeplearning4j.nn.api.Model; +import net.brutex.ai.dnn.api.IModel; import org.nd4j.linalg.api.ndarray.INDArray; import org.nd4j.linalg.dataset.api.DataSet; import org.nd4j.linalg.dataset.api.MultiDataSet; @@ -54,17 +54,17 @@ public interface Trainer extends Runnable { /** * THe current model for the trainer - * @return the current {@link Model} + * @return the current {@link IModel} * for the worker */ - Model getModel(); + IModel getModel(); /** - * Update the current {@link Model} + * Update the current {@link IModel} * for the worker * @param model the new model for this worker */ - void updateModel(@NonNull Model model); + void updateModel(@NonNull IModel model); boolean isRunning(); diff --git a/cavis-dnn/cavis-dnn-parallelwrapper/src/test/java/org/deeplearning4j/parallelism/InplaceParallelInferenceTest.java b/cavis-dnn/cavis-dnn-parallelwrapper/src/test/java/org/deeplearning4j/parallelism/InplaceParallelInferenceTest.java index ecb28ef9b..d952ebf4a 100644 --- a/cavis-dnn/cavis-dnn-parallelwrapper/src/test/java/org/deeplearning4j/parallelism/InplaceParallelInferenceTest.java +++ b/cavis-dnn/cavis-dnn-parallelwrapper/src/test/java/org/deeplearning4j/parallelism/InplaceParallelInferenceTest.java @@ -40,7 +40,7 @@ public class InplaceParallelInferenceTest extends BaseDL4JTest { public void testUpdateModel() { int nIn = 5; - val conf = new NeuralNetConfiguration.Builder() + val conf = NeuralNetConfiguration.builder() .graphBuilder() .addInputs("in") .layer("out0", new OutputLayer.Builder().nIn(nIn).nOut(4).activation(Activation.SOFTMAX).build(), "in") @@ -65,10 +65,10 @@ public class InplaceParallelInferenceTest extends BaseDL4JTest { for (val m : models) { assertNotNull(m); - assertEquals(net.params(), m.params()); + assertEquals(net.getModelParams(), m.getModelParams()); } - val conf2 = new NeuralNetConfiguration.Builder() + val conf2 = NeuralNetConfiguration.builder() .graphBuilder() .addInputs("in") .layer("out0", new OutputLayer.Builder().nIn(nIn).nOut(4).activation(Activation.SOFTMAX).build(), "in") @@ -80,7 +80,7 @@ public class InplaceParallelInferenceTest extends BaseDL4JTest { val net2 = new ComputationGraph(conf2); net2.init(); - assertNotEquals(net.params(), net2.params()); + assertNotEquals(net.getModelParams(), net2.getModelParams()); pi.updateModel(net2); @@ -90,7 +90,7 @@ public class InplaceParallelInferenceTest extends BaseDL4JTest { for (val m : models2) { assertNotNull(m); - assertEquals(net2.params(), m.params()); + assertEquals(net2.getModelParams(), m.getModelParams()); } } finally { pi.shutdown(); @@ -101,7 +101,7 @@ public class InplaceParallelInferenceTest extends BaseDL4JTest { public void testOutput_RoundRobin_1() throws Exception { int nIn = 5; - val conf = new NeuralNetConfiguration.Builder() + val conf = NeuralNetConfiguration.builder() .graphBuilder() .addInputs("in") .layer("out0", new OutputLayer.Builder().nIn(nIn).nOut(4).activation(Activation.SOFTMAX).build(), "in") @@ -134,7 +134,7 @@ public class InplaceParallelInferenceTest extends BaseDL4JTest { public void testOutput_FIFO_1() throws Exception { int nIn = 5; - val conf = new NeuralNetConfiguration.Builder() + val conf = NeuralNetConfiguration.builder() .graphBuilder() .addInputs("in") .layer("out0", new OutputLayer.Builder().nIn(nIn).nOut(4).activation(Activation.SOFTMAX).build(), "in") diff --git a/cavis-dnn/cavis-dnn-parallelwrapper/src/test/java/org/deeplearning4j/parallelism/ParallelInferenceTest.java b/cavis-dnn/cavis-dnn-parallelwrapper/src/test/java/org/deeplearning4j/parallelism/ParallelInferenceTest.java index 3919bfbc7..cdf908911 100644 --- a/cavis-dnn/cavis-dnn-parallelwrapper/src/test/java/org/deeplearning4j/parallelism/ParallelInferenceTest.java +++ b/cavis-dnn/cavis-dnn-parallelwrapper/src/test/java/org/deeplearning4j/parallelism/ParallelInferenceTest.java @@ -23,12 +23,11 @@ package org.deeplearning4j.parallelism; import lombok.NonNull; import lombok.extern.slf4j.Slf4j; import lombok.val; +import net.brutex.ai.dnn.api.IModel; import org.deeplearning4j.BaseDL4JTest; import org.deeplearning4j.exception.DL4JInvalidInputException; -import org.deeplearning4j.nn.api.Model; import org.deeplearning4j.nn.conf.ComputationGraphConfiguration; import org.deeplearning4j.nn.conf.ConvolutionMode; -import org.deeplearning4j.nn.conf.MultiLayerConfiguration; import org.deeplearning4j.nn.conf.NeuralNetConfiguration; import org.deeplearning4j.nn.conf.layers.*; import org.deeplearning4j.nn.graph.ComputationGraph; @@ -414,7 +413,7 @@ public class ParallelInferenceTest extends BaseDL4JTest { int nIn = 10; int[] tsLengths = {3,5,7,10,50,100}; - MultiLayerConfiguration conf = new NeuralNetConfiguration.Builder() + NeuralNetConfiguration conf = NeuralNetConfiguration.builder() .activation(Activation.TANH) .seed(12345) .list() @@ -459,7 +458,7 @@ public class ParallelInferenceTest extends BaseDL4JTest { int nIn = 10; - MultiLayerConfiguration conf = new NeuralNetConfiguration.Builder() + NeuralNetConfiguration conf = NeuralNetConfiguration.builder() .activation(Activation.TANH) .seed(12345) .list() @@ -527,7 +526,7 @@ public class ParallelInferenceTest extends BaseDL4JTest { {1,nIn,40,45}, }; - MultiLayerConfiguration conf = new NeuralNetConfiguration.Builder() + NeuralNetConfiguration conf = NeuralNetConfiguration.builder() .activation(Activation.TANH) .seed(12345) .list() @@ -575,7 +574,7 @@ public class ParallelInferenceTest extends BaseDL4JTest { int nIn = 3; int[] defaultShape = new int[]{1, nIn, 16, 16}; - MultiLayerConfiguration conf = new NeuralNetConfiguration.Builder() + NeuralNetConfiguration conf = NeuralNetConfiguration.builder() .activation(Activation.TANH) .seed(12345) .convolutionMode(ConvolutionMode.Same) @@ -625,7 +624,7 @@ public class ParallelInferenceTest extends BaseDL4JTest { int nIn = 10; int wrongNIn = 5; - MultiLayerConfiguration conf = new NeuralNetConfiguration.Builder() + NeuralNetConfiguration conf = NeuralNetConfiguration.builder() .activation(Activation.TANH) .seed(12345) .list() @@ -689,7 +688,7 @@ public class ParallelInferenceTest extends BaseDL4JTest { int nIn = 10; int tsLength = 16; - MultiLayerConfiguration conf = new NeuralNetConfiguration.Builder() + NeuralNetConfiguration conf = NeuralNetConfiguration.builder() .activation(Activation.TANH) .seed(12345) .list() @@ -757,7 +756,7 @@ public class ParallelInferenceTest extends BaseDL4JTest { public void testModelUpdate_1() throws Exception { int nIn = 5; - val conf = new NeuralNetConfiguration.Builder() + val conf = NeuralNetConfiguration.builder() .graphBuilder() .addInputs("in") .layer("out0", new OutputLayer.Builder().nIn(nIn).nOut(4).activation(Activation.SOFTMAX).build(), "in") @@ -782,16 +781,16 @@ public class ParallelInferenceTest extends BaseDL4JTest { assertNotEquals(0, output.length); } - Model[] modelsBefore = inf.getCurrentModelsFromWorkers(); + IModel[] modelsBefore = inf.getCurrentModelsFromWorkers(); assertEquals(4, modelsBefore.length); boolean passed = false; int cnt0 = 0; - for (Model m : modelsBefore) { + for (IModel m : modelsBefore) { // model can be null for some of the workers yet, due to race condition if (m != null) { Thread.sleep(500); - assertEquals( net.params(), m.params(), "Failed at model [" + cnt0 + "]"); + assertEquals( net.getModelParams(), m.getModelParams(), "Failed at model [" + cnt0 + "]"); passed = true; } cnt0++; @@ -799,7 +798,7 @@ public class ParallelInferenceTest extends BaseDL4JTest { assertTrue(passed); - val conf2 = new NeuralNetConfiguration.Builder() + val conf2 = NeuralNetConfiguration.builder() .graphBuilder() .addInputs("in") .layer("out0", new OutputLayer.Builder().nIn(nIn).nOut(4).build(), "in") @@ -819,7 +818,7 @@ public class ParallelInferenceTest extends BaseDL4JTest { cnt0 = 0; for (val m:modelsAfter) { assertNotNull( m, "Failed at model [" + cnt0 + "]"); - assertEquals( net2.params(), m.params(), "Failed at model [" + cnt0++ + "]"); + assertEquals( net2.getModelParams(), m.getModelParams(), "Failed at model [" + cnt0++ + "]"); } inf.shutdown(); @@ -830,7 +829,7 @@ public class ParallelInferenceTest extends BaseDL4JTest { int nIn = 5; - ComputationGraphConfiguration conf = new NeuralNetConfiguration.Builder() + ComputationGraphConfiguration conf = NeuralNetConfiguration.builder() .graphBuilder() .addInputs("in") .layer("out0", new OutputLayer.Builder().nIn(nIn).nOut(4).activation(Activation.SOFTMAX).build(), "in") diff --git a/cavis-dnn/cavis-dnn-parallelwrapper/src/test/java/org/deeplearning4j/parallelism/ParallelWrapperTest.java b/cavis-dnn/cavis-dnn-parallelwrapper/src/test/java/org/deeplearning4j/parallelism/ParallelWrapperTest.java index 458b9dab1..471cafbfd 100644 --- a/cavis-dnn/cavis-dnn-parallelwrapper/src/test/java/org/deeplearning4j/parallelism/ParallelWrapperTest.java +++ b/cavis-dnn/cavis-dnn-parallelwrapper/src/test/java/org/deeplearning4j/parallelism/ParallelWrapperTest.java @@ -25,7 +25,6 @@ import org.deeplearning4j.BaseDL4JTest; import org.deeplearning4j.datasets.iterator.EarlyTerminationDataSetIterator; import org.deeplearning4j.datasets.iterator.impl.MnistDataSetIterator; import org.deeplearning4j.eval.Evaluation; -import org.deeplearning4j.nn.conf.MultiLayerConfiguration; import org.deeplearning4j.nn.conf.NeuralNetConfiguration; import org.deeplearning4j.nn.conf.inputs.InputType; import org.deeplearning4j.nn.conf.layers.ConvolutionLayer; @@ -37,8 +36,6 @@ import org.deeplearning4j.nn.weights.WeightInit; import org.deeplearning4j.optimize.listeners.ScoreIterationListener; import org.junit.jupiter.api.Test; import org.nd4j.linalg.activations.Activation; -import org.nd4j.linalg.api.ndarray.INDArray; -import org.nd4j.linalg.dataset.DataSet; import org.nd4j.linalg.dataset.api.iterator.DataSetIterator; import org.nd4j.linalg.learning.config.Nesterovs; import org.nd4j.linalg.lossfunctions.LossFunctions; @@ -72,7 +69,7 @@ public class ParallelWrapperTest extends BaseDL4JTest { log.info("F: {}; L: {};", t0.getFeatures().shape(), t0.getLabels().shape()); log.info("Build model...."); - MultiLayerConfiguration.Builder builder = new NeuralNetConfiguration.Builder().seed(seed) + NeuralNetConfiguration.NeuralNetConfigurationBuilder builder = NeuralNetConfiguration.builder().seed(seed) .l2(0.0005) //.learningRateDecayPolicy(LearningRatePolicy.Inverse).lrPolicyDecayRate(0.001).lrPolicyPower(0.75) .weightInit(WeightInit.XAVIER) @@ -90,9 +87,9 @@ public class ParallelWrapperTest extends BaseDL4JTest { .layer(4, new DenseLayer.Builder().activation(Activation.RELU).nOut(500).build()) .layer(5, new OutputLayer.Builder(LossFunctions.LossFunction.NEGATIVELOGLIKELIHOOD) .nOut(outputNum).activation(Activation.SOFTMAX).build()) - .setInputType(InputType.convolutionalFlat(28, 28, nChannels)); + .inputType(InputType.convolutionalFlat(28, 28, nChannels)); - MultiLayerConfiguration conf = builder.build(); + NeuralNetConfiguration conf = builder.build(); MultiLayerNetwork model = new MultiLayerNetwork(conf); model.init(); @@ -114,7 +111,7 @@ public class ParallelWrapperTest extends BaseDL4JTest { .build(); log.info("Train model...."); - model.setListeners(new ScoreIterationListener(100)); + model.addTrainingListeners(new ScoreIterationListener(100)); long timeX = System.currentTimeMillis(); // optionally you might want to use MultipleEpochsIterator instead of manually iterating/resetting over your iterator diff --git a/cavis-dnn/cavis-dnn-parallelwrapper/src/test/java/org/deeplearning4j/parallelism/TestListeners.java b/cavis-dnn/cavis-dnn-parallelwrapper/src/test/java/org/deeplearning4j/parallelism/TestListeners.java index eb3ccfef8..4389d8f68 100644 --- a/cavis-dnn/cavis-dnn-parallelwrapper/src/test/java/org/deeplearning4j/parallelism/TestListeners.java +++ b/cavis-dnn/cavis-dnn-parallelwrapper/src/test/java/org/deeplearning4j/parallelism/TestListeners.java @@ -25,9 +25,8 @@ import org.deeplearning4j.core.storage.StatsStorage; import org.deeplearning4j.core.storage.StatsStorageRouter; import org.deeplearning4j.core.storage.listener.RoutingIterationListener; import org.deeplearning4j.datasets.iterator.ExistingDataSetIterator; -import org.deeplearning4j.nn.api.Model; +import net.brutex.ai.dnn.api.IModel; import org.deeplearning4j.nn.conf.ComputationGraphConfiguration; -import org.deeplearning4j.nn.conf.MultiLayerConfiguration; import org.deeplearning4j.nn.conf.NeuralNetConfiguration; import org.deeplearning4j.nn.conf.layers.OutputLayer; import org.deeplearning4j.nn.graph.ComputationGraph; @@ -56,11 +55,11 @@ public class TestListeners extends BaseDL4JTest { public void testListeners() { TestListener.clearCounts(); - MultiLayerConfiguration.Builder builder = new NeuralNetConfiguration.Builder().list().layer(0, + NeuralNetConfiguration.NeuralNetConfigurationBuilder builder = NeuralNetConfiguration.builder().list().layer(0, new OutputLayer.Builder(LossFunctions.LossFunction.MSE).nIn(10).nOut(10) .activation(Activation.TANH).build()); - MultiLayerConfiguration conf = builder.build(); + NeuralNetConfiguration conf = builder.build(); MultiLayerNetwork model = new MultiLayerNetwork(conf); model.init(); @@ -71,7 +70,7 @@ public class TestListeners extends BaseDL4JTest { public void testListenersGraph() { TestListener.clearCounts(); - ComputationGraphConfiguration conf = new NeuralNetConfiguration.Builder().graphBuilder() + ComputationGraphConfiguration conf = NeuralNetConfiguration.builder().graphBuilder() .addInputs("in").addLayer("0", new OutputLayer.Builder(LossFunctions.LossFunction.MSE).nIn(10).nOut(10) .activation(Activation.TANH).build(), @@ -88,16 +87,16 @@ public class TestListeners extends BaseDL4JTest { public void testListenersViaModel() { TestListener.clearCounts(); - MultiLayerConfiguration.Builder builder = new NeuralNetConfiguration.Builder().list().layer(0, + NeuralNetConfiguration.NeuralNetConfigurationBuilder builder = NeuralNetConfiguration.builder().list().layer(0, new OutputLayer.Builder(LossFunctions.LossFunction.MSE).nIn(10).nOut(10) .activation(Activation.TANH).build()); - MultiLayerConfiguration conf = builder.build(); + NeuralNetConfiguration conf = builder.build(); MultiLayerNetwork model = new MultiLayerNetwork(conf); model.init(); StatsStorage ss = new InMemoryStatsStorage(); - model.setListeners(new TestListener(), new StatsListener(ss)); + model.addTrainingListeners(new TestListener(), new StatsListener(ss)); testListenersForModel(model, null); @@ -109,7 +108,7 @@ public class TestListeners extends BaseDL4JTest { public void testListenersViaModelGraph() { TestListener.clearCounts(); - ComputationGraphConfiguration conf = new NeuralNetConfiguration.Builder().graphBuilder() + ComputationGraphConfiguration conf = NeuralNetConfiguration.builder().graphBuilder() .addInputs("in").addLayer("0", new OutputLayer.Builder(LossFunctions.LossFunction.MSE).nIn(10).nOut(10) .activation(Activation.TANH).build(), @@ -120,7 +119,7 @@ public class TestListeners extends BaseDL4JTest { model.init(); StatsStorage ss = new InMemoryStatsStorage(); - model.setListeners(new TestListener(), new StatsListener(ss)); + model.addTrainingListeners(new TestListener(), new StatsListener(ss)); testListenersForModel(model, null); @@ -128,7 +127,7 @@ public class TestListeners extends BaseDL4JTest { assertEquals(2, ss.listWorkerIDsForSession(ss.listSessionIDs().get(0)).size()); } - private static void testListenersForModel(Model model, List listeners) { + private static void testListenersForModel(IModel model, List listeners) { int nWorkers = 2; ParallelWrapper wrapper = new ParallelWrapper.Builder(model).workers(nWorkers).averagingFrequency(1) @@ -176,26 +175,26 @@ public class TestListeners extends BaseDL4JTest { } @Override - public void onEpochStart(Model model) {} + public void onEpochStart(IModel model) {} @Override - public void onEpochEnd(Model model) {} + public void onEpochEnd(IModel model) {} @Override - public void onForwardPass(Model model, List activations) { + public void onForwardPass(IModel model, List activations) { forwardPassCount.incrementAndGet(); } @Override - public void onForwardPass(Model model, Map activations) { + public void onForwardPass(IModel model, Map activations) { forwardPassCount.incrementAndGet(); } @Override - public void onGradientCalculation(Model model) {} + public void onGradientCalculation(IModel model) {} @Override - public void onBackwardPass(Model model) { + public void onBackwardPass(IModel model) { backwardPassCount.getAndIncrement(); } @@ -233,7 +232,7 @@ public class TestListeners extends BaseDL4JTest { } @Override - public void iterationDone(Model model, int iteration, int epoch) {} + public void iterationDone(IModel model, int iteration, int epoch) {} } } diff --git a/cavis-dnn/cavis-dnn-parallelwrapper/src/test/java/org/deeplearning4j/parallelism/TestParallelEarlyStopping.java b/cavis-dnn/cavis-dnn-parallelwrapper/src/test/java/org/deeplearning4j/parallelism/TestParallelEarlyStopping.java index 2eaf2e850..a003d99fb 100644 --- a/cavis-dnn/cavis-dnn-parallelwrapper/src/test/java/org/deeplearning4j/parallelism/TestParallelEarlyStopping.java +++ b/cavis-dnn/cavis-dnn-parallelwrapper/src/test/java/org/deeplearning4j/parallelism/TestParallelEarlyStopping.java @@ -32,7 +32,6 @@ import org.deeplearning4j.earlystopping.termination.MaxScoreIterationTermination import org.deeplearning4j.earlystopping.termination.MaxTimeIterationTerminationCondition; import org.deeplearning4j.earlystopping.trainer.IEarlyStoppingTrainer; import org.deeplearning4j.nn.api.OptimizationAlgorithm; -import org.deeplearning4j.nn.conf.MultiLayerConfiguration; import org.deeplearning4j.nn.conf.NeuralNetConfiguration; import org.deeplearning4j.nn.conf.layers.OutputLayer; import org.deeplearning4j.nn.multilayer.MultiLayerNetwork; @@ -56,7 +55,7 @@ public class TestParallelEarlyStopping extends BaseDL4JTest { // be properly designed // @Test // public void testEarlyStoppingIris(){ - // MultiLayerConfiguration conf = new NeuralNetConfiguration.Builder() + // NeuralNetConfiguration conf = NeuralNetConfiguration.builder() // .optimizationAlgo(OptimizationAlgorithm.STOCHASTIC_GRADIENT_DESCENT) // .updater(Updater.SGD) // .weightInit(WeightInit.XAVIER) @@ -101,14 +100,14 @@ public class TestParallelEarlyStopping extends BaseDL4JTest { @Test public void testEarlyStoppingEveryNEpoch() { - MultiLayerConfiguration conf = new NeuralNetConfiguration.Builder() + NeuralNetConfiguration conf = NeuralNetConfiguration.builder() .optimizationAlgo(OptimizationAlgorithm.STOCHASTIC_GRADIENT_DESCENT) .updater(new Sgd()).weightInit(WeightInit.XAVIER).list() .layer(0, new OutputLayer.Builder().nIn(4).nOut(3) .lossFunction(LossFunctions.LossFunction.MCXENT).activation(Activation.SOFTMAX).build()) .build(); MultiLayerNetwork net = new MultiLayerNetwork(conf); - net.setListeners(new ScoreIterationListener(1)); + net.addTrainingListeners(new ScoreIterationListener(1)); DataSetIterator irisIter = new IrisDataSetIterator(50, 600); EarlyStoppingModelSaver saver = new InMemoryModelSaver<>(); @@ -133,7 +132,7 @@ public class TestParallelEarlyStopping extends BaseDL4JTest { //Test poor tuning (high LR): should terminate on MaxScoreIterationTerminationCondition Nd4j.getRandom().setSeed(12345); - MultiLayerConfiguration conf = new NeuralNetConfiguration.Builder().seed(12345) + NeuralNetConfiguration conf = NeuralNetConfiguration.builder().seed(12345) .optimizationAlgo(OptimizationAlgorithm.STOCHASTIC_GRADIENT_DESCENT) .updater(new Sgd(1.0)) //Intentionally huge LR .weightInit(WeightInit.XAVIER).list() @@ -141,7 +140,7 @@ public class TestParallelEarlyStopping extends BaseDL4JTest { .lossFunction(LossFunctions.LossFunction.MCXENT).build()) .build(); MultiLayerNetwork net = new MultiLayerNetwork(conf); - net.setListeners(new ScoreIterationListener(1)); + net.addTrainingListeners(new ScoreIterationListener(1)); DataSetIterator irisIter = new IrisDataSetIterator(10, 150); EarlyStoppingModelSaver saver = new InMemoryModelSaver<>(); diff --git a/cavis-dnn/cavis-dnn-parallelwrapper/src/test/java/org/deeplearning4j/parallelism/TestParallelEarlyStoppingUI.java b/cavis-dnn/cavis-dnn-parallelwrapper/src/test/java/org/deeplearning4j/parallelism/TestParallelEarlyStoppingUI.java index 7bea67ef6..66a9b76c4 100644 --- a/cavis-dnn/cavis-dnn-parallelwrapper/src/test/java/org/deeplearning4j/parallelism/TestParallelEarlyStoppingUI.java +++ b/cavis-dnn/cavis-dnn-parallelwrapper/src/test/java/org/deeplearning4j/parallelism/TestParallelEarlyStoppingUI.java @@ -31,7 +31,6 @@ import org.deeplearning4j.earlystopping.scorecalc.DataSetLossCalculator; import org.deeplearning4j.earlystopping.termination.MaxEpochsTerminationCondition; import org.deeplearning4j.earlystopping.trainer.IEarlyStoppingTrainer; import org.deeplearning4j.nn.api.OptimizationAlgorithm; -import org.deeplearning4j.nn.conf.MultiLayerConfiguration; import org.deeplearning4j.nn.conf.NeuralNetConfiguration; import org.deeplearning4j.nn.conf.layers.DenseLayer; import org.deeplearning4j.nn.conf.layers.OutputLayer; @@ -56,7 +55,7 @@ public class TestParallelEarlyStoppingUI extends BaseDL4JTest { public void testParallelStatsListenerCompatibility() throws Exception { UIServer uiServer = UIServer.getInstance(); - MultiLayerConfiguration conf = new NeuralNetConfiguration.Builder() + NeuralNetConfiguration conf = NeuralNetConfiguration.builder() .optimizationAlgo(OptimizationAlgorithm.STOCHASTIC_GRADIENT_DESCENT) .updater(new Sgd()).weightInit(WeightInit.XAVIER).list() .layer(0, new DenseLayer.Builder().nIn(4).nOut(3).build()) @@ -68,7 +67,7 @@ public class TestParallelEarlyStoppingUI extends BaseDL4JTest { // it's important that the UI can report results from parallel training // there's potential for StatsListener to fail if certain properties aren't set in the model StatsStorage statsStorage = new InMemoryStatsStorage(); - net.setListeners(new StatsListener(statsStorage)); + net.addTrainingListeners(new StatsListener(statsStorage)); uiServer.attach(statsStorage); DataSetIterator irisIter = new IrisDataSetIterator(50, 500); diff --git a/cavis-dnn/cavis-dnn-parallelwrapper/src/test/java/org/deeplearning4j/parallelism/factory/DefaultTrainerContextTest.java b/cavis-dnn/cavis-dnn-parallelwrapper/src/test/java/org/deeplearning4j/parallelism/factory/DefaultTrainerContextTest.java index 3a85b4b34..306338b11 100644 --- a/cavis-dnn/cavis-dnn-parallelwrapper/src/test/java/org/deeplearning4j/parallelism/factory/DefaultTrainerContextTest.java +++ b/cavis-dnn/cavis-dnn-parallelwrapper/src/test/java/org/deeplearning4j/parallelism/factory/DefaultTrainerContextTest.java @@ -21,8 +21,8 @@ package org.deeplearning4j.parallelism.factory; import lombok.val; -import org.deeplearning4j.nn.conf.MultiLayerConfiguration; import org.deeplearning4j.nn.conf.NeuralNetConfiguration; +import org.deeplearning4j.nn.conf.NeuralNetConfiguration.NeuralNetConfigurationBuilder; import org.deeplearning4j.nn.conf.WorkspaceMode; import org.deeplearning4j.nn.conf.inputs.InputType; import org.deeplearning4j.nn.conf.layers.ConvolutionLayer; @@ -33,7 +33,6 @@ import org.deeplearning4j.nn.multilayer.MultiLayerNetwork; import org.deeplearning4j.nn.weights.WeightInit; import org.deeplearning4j.BaseDL4JTest; import org.deeplearning4j.parallelism.ParallelWrapper; -import org.deeplearning4j.parallelism.trainer.SymmetricTrainer; import org.junit.jupiter.api.Test; import org.nd4j.linalg.activations.Activation; import org.nd4j.linalg.learning.config.Nesterovs; @@ -50,11 +49,11 @@ public class DefaultTrainerContextTest extends BaseDL4JTest { @Test public void testEqualUuid1() { - MultiLayerConfiguration.Builder builder = new NeuralNetConfiguration.Builder().seed(seed) + NeuralNetConfiguration.NeuralNetConfigurationBuilder builder = NeuralNetConfiguration.builder().seed(seed) .l2(0.0005) //.learningRateDecayPolicy(LearningRatePolicy.Inverse).lrPolicyDecayRate(0.001).lrPolicyPower(0.75) .weightInit(WeightInit.XAVIER) - .updater(new Nesterovs(0.01, 0.9)).list() + .updater(new Nesterovs(0.01, 0.9)) .layer(0, new ConvolutionLayer.Builder(5, 5) //nIn and nOut specify depth. nIn here is the nChannels and nOut is the number of filters to be applied .nIn(nChannels).stride(1, 1).nOut(20).activation(Activation.IDENTITY).build()) @@ -68,9 +67,9 @@ public class DefaultTrainerContextTest extends BaseDL4JTest { .layer(4, new DenseLayer.Builder().activation(Activation.RELU).nOut(500).build()) .layer(5, new OutputLayer.Builder(LossFunctions.LossFunction.NEGATIVELOGLIKELIHOOD) .nOut(outputNum).activation(Activation.SOFTMAX).build()) - .setInputType(InputType.convolutionalFlat(28, 28, nChannels)); + .inputType(InputType.convolutionalFlat(28, 28, nChannels)); - MultiLayerConfiguration conf = builder.build(); + NeuralNetConfiguration conf = builder.build(); MultiLayerNetwork model = new MultiLayerNetwork(conf); model.init(); diff --git a/cavis-dnn/cavis-dnn-parallelwrapper/src/test/java/org/deeplearning4j/parallelism/factory/SymmetricTrainerContextTest.java b/cavis-dnn/cavis-dnn-parallelwrapper/src/test/java/org/deeplearning4j/parallelism/factory/SymmetricTrainerContextTest.java index ec82896df..b61f820f7 100644 --- a/cavis-dnn/cavis-dnn-parallelwrapper/src/test/java/org/deeplearning4j/parallelism/factory/SymmetricTrainerContextTest.java +++ b/cavis-dnn/cavis-dnn-parallelwrapper/src/test/java/org/deeplearning4j/parallelism/factory/SymmetricTrainerContextTest.java @@ -21,8 +21,8 @@ package org.deeplearning4j.parallelism.factory; import lombok.val; -import org.deeplearning4j.nn.conf.MultiLayerConfiguration; import org.deeplearning4j.nn.conf.NeuralNetConfiguration; +import org.deeplearning4j.nn.conf.NeuralNetConfiguration.NeuralNetConfigurationBuilder; import org.deeplearning4j.nn.conf.WorkspaceMode; import org.deeplearning4j.nn.conf.inputs.InputType; import org.deeplearning4j.nn.conf.layers.ConvolutionLayer; @@ -50,7 +50,7 @@ public class SymmetricTrainerContextTest extends BaseDL4JTest { @Test public void testEqualUuid1() { - MultiLayerConfiguration.Builder builder = new NeuralNetConfiguration.Builder().seed(seed) + NeuralNetConfiguration.NeuralNetConfigurationBuilder builder = NeuralNetConfiguration.builder().seed(seed) .l2(0.0005) //.learningRateDecayPolicy(LearningRatePolicy.Inverse).lrPolicyDecayRate(0.001).lrPolicyPower(0.75) .weightInit(WeightInit.XAVIER) @@ -68,9 +68,9 @@ public class SymmetricTrainerContextTest extends BaseDL4JTest { .layer(4, new DenseLayer.Builder().activation(Activation.RELU).nOut(500).build()) .layer(5, new OutputLayer.Builder(LossFunctions.LossFunction.NEGATIVELOGLIKELIHOOD) .nOut(outputNum).activation(Activation.SOFTMAX).build()) - .setInputType(InputType.convolutionalFlat(28, 28, nChannels)); + .inputType(InputType.convolutionalFlat(28, 28, nChannels)); - MultiLayerConfiguration conf = builder.build(); + NeuralNetConfiguration conf = builder.build(); MultiLayerNetwork model = new MultiLayerNetwork(conf); model.init(); diff --git a/cavis-dnn/cavis-dnn-parallelwrapper/src/test/java/org/deeplearning4j/parallelism/main/ParallelWrapperMainTest.java b/cavis-dnn/cavis-dnn-parallelwrapper/src/test/java/org/deeplearning4j/parallelism/main/ParallelWrapperMainTest.java index 315788855..da27c4c63 100644 --- a/cavis-dnn/cavis-dnn-parallelwrapper/src/test/java/org/deeplearning4j/parallelism/main/ParallelWrapperMainTest.java +++ b/cavis-dnn/cavis-dnn-parallelwrapper/src/test/java/org/deeplearning4j/parallelism/main/ParallelWrapperMainTest.java @@ -22,8 +22,8 @@ package org.deeplearning4j.parallelism.main; import lombok.extern.slf4j.Slf4j; import org.deeplearning4j.datasets.iterator.impl.MnistDataSetIterator; -import org.deeplearning4j.nn.conf.MultiLayerConfiguration; import org.deeplearning4j.nn.conf.NeuralNetConfiguration; +import org.deeplearning4j.nn.conf.NeuralNetConfiguration.NeuralNetConfigurationBuilder; import org.deeplearning4j.nn.conf.inputs.InputType; import org.deeplearning4j.nn.conf.layers.ConvolutionLayer; import org.deeplearning4j.nn.conf.layers.DenseLayer; @@ -66,7 +66,7 @@ public class ParallelWrapperMainTest extends BaseDL4JTest { DataSetIterator mnistTest = new MnistDataSetIterator(batchSize, false, 12345); log.info("Build model...."); - MultiLayerConfiguration.Builder builder = new NeuralNetConfiguration.Builder().seed(seed) + NeuralNetConfiguration.NeuralNetConfigurationBuilder builder = NeuralNetConfiguration.builder().seed(seed) .l2(0.0005) .weightInit(WeightInit.XAVIER) .updater(new Nesterovs(0.01, 0.9)).list() @@ -83,9 +83,9 @@ public class ParallelWrapperMainTest extends BaseDL4JTest { .layer(4, new DenseLayer.Builder().activation(Activation.RELU).nOut(500).build()) .layer(5, new OutputLayer.Builder(LossFunctions.LossFunction.NEGATIVELOGLIKELIHOOD) .nOut(outputNum).activation(Activation.SOFTMAX).build()) - .setInputType(InputType.convolutionalFlat(28, 28, nChannels)); + .inputType(InputType.convolutionalFlat(28, 28, nChannels)); - MultiLayerConfiguration conf = builder.build(); + NeuralNetConfiguration conf = builder.build(); MultiLayerNetwork model = new MultiLayerNetwork(conf); model.init(); File tempModel = new File(testDir, "tmpmodel.zip"); diff --git a/cavis-dnn/cavis-dnn-spark/cavis-dnn-spark-core/src/main/java/org/deeplearning4j/spark/api/TrainingHook.java b/cavis-dnn/cavis-dnn-spark/cavis-dnn-spark-core/src/main/java/org/deeplearning4j/spark/api/TrainingHook.java index 8ea9738db..2adec7f64 100644 --- a/cavis-dnn/cavis-dnn-spark/cavis-dnn-spark-core/src/main/java/org/deeplearning4j/spark/api/TrainingHook.java +++ b/cavis-dnn/cavis-dnn-spark/cavis-dnn-spark-core/src/main/java/org/deeplearning4j/spark/api/TrainingHook.java @@ -20,7 +20,7 @@ package org.deeplearning4j.spark.api; -import org.deeplearning4j.nn.api.Model; +import net.brutex.ai.dnn.api.IModel; import org.nd4j.linalg.dataset.api.DataSet; import org.nd4j.linalg.dataset.api.MultiDataSet; @@ -33,7 +33,7 @@ public interface TrainingHook extends Serializable { * that was used for the update * @param model themodel that was update */ - void preUpdate(DataSet minibatch, Model model); + void preUpdate(DataSet minibatch, IModel model); /** * A hook method for post update @@ -41,7 +41,7 @@ public interface TrainingHook extends Serializable { * that was usd for the update * @param model the model that was updated */ - void postUpdate(DataSet minibatch, Model model); + void postUpdate(DataSet minibatch, IModel model); /** * A hook method for pre update. @@ -49,7 +49,7 @@ public interface TrainingHook extends Serializable { * that was used for the update * @param model the model that was update */ - void preUpdate(MultiDataSet minibatch, Model model); + void preUpdate(MultiDataSet minibatch, IModel model); /** * A hook method for post update @@ -57,6 +57,6 @@ public interface TrainingHook extends Serializable { * that was usd for the update * @param model the model that was updated */ - void postUpdate(MultiDataSet minibatch, Model model); + void postUpdate(MultiDataSet minibatch, IModel model); } diff --git a/cavis-dnn/cavis-dnn-spark/cavis-dnn-spark-core/src/main/java/org/deeplearning4j/spark/api/worker/NetBroadcastTuple.java b/cavis-dnn/cavis-dnn-spark/cavis-dnn-spark-core/src/main/java/org/deeplearning4j/spark/api/worker/NetBroadcastTuple.java index 9fa317026..f0d69c039 100644 --- a/cavis-dnn/cavis-dnn-spark/cavis-dnn-spark-core/src/main/java/org/deeplearning4j/spark/api/worker/NetBroadcastTuple.java +++ b/cavis-dnn/cavis-dnn-spark/cavis-dnn-spark-core/src/main/java/org/deeplearning4j/spark/api/worker/NetBroadcastTuple.java @@ -22,7 +22,7 @@ package org.deeplearning4j.spark.api.worker; import lombok.Data; import org.deeplearning4j.nn.conf.ComputationGraphConfiguration; -import org.deeplearning4j.nn.conf.MultiLayerConfiguration; +import org.deeplearning4j.nn.conf.NeuralNetConfiguration; import org.nd4j.linalg.api.ndarray.INDArray; import java.io.Serializable; @@ -31,13 +31,13 @@ import java.util.concurrent.atomic.AtomicInteger; @Data public class NetBroadcastTuple implements Serializable { - private final MultiLayerConfiguration configuration; + private final NeuralNetConfiguration configuration; private final ComputationGraphConfiguration graphConfiguration; private final INDArray parameters; private final INDArray updaterState; private final AtomicInteger counter; - public NetBroadcastTuple(MultiLayerConfiguration configuration, INDArray parameters, INDArray updaterState) { + public NetBroadcastTuple(NeuralNetConfiguration configuration, INDArray parameters, INDArray updaterState) { this(configuration, null, parameters, updaterState); } @@ -47,12 +47,12 @@ public class NetBroadcastTuple implements Serializable { } - public NetBroadcastTuple(MultiLayerConfiguration configuration, ComputationGraphConfiguration graphConfiguration, + public NetBroadcastTuple(NeuralNetConfiguration configuration, ComputationGraphConfiguration graphConfiguration, INDArray parameters, INDArray updaterState) { this(configuration, graphConfiguration, parameters, updaterState, new AtomicInteger(0)); } - public NetBroadcastTuple(MultiLayerConfiguration configuration, ComputationGraphConfiguration graphConfiguration, + public NetBroadcastTuple(NeuralNetConfiguration configuration, ComputationGraphConfiguration graphConfiguration, INDArray parameters, INDArray updaterState, AtomicInteger counter) { this.configuration = configuration; this.graphConfiguration = graphConfiguration; diff --git a/cavis-dnn/cavis-dnn-spark/cavis-dnn-spark-core/src/main/java/org/deeplearning4j/spark/earlystopping/BaseSparkEarlyStoppingTrainer.java b/cavis-dnn/cavis-dnn-spark/cavis-dnn-spark-core/src/main/java/org/deeplearning4j/spark/earlystopping/BaseSparkEarlyStoppingTrainer.java index 5ed1848b7..8d799c1b2 100644 --- a/cavis-dnn/cavis-dnn-spark/cavis-dnn-spark-core/src/main/java/org/deeplearning4j/spark/earlystopping/BaseSparkEarlyStoppingTrainer.java +++ b/cavis-dnn/cavis-dnn-spark/cavis-dnn-spark-core/src/main/java/org/deeplearning4j/spark/earlystopping/BaseSparkEarlyStoppingTrainer.java @@ -29,7 +29,7 @@ import org.deeplearning4j.earlystopping.scorecalc.ScoreCalculator; import org.deeplearning4j.earlystopping.termination.EpochTerminationCondition; import org.deeplearning4j.earlystopping.termination.IterationTerminationCondition; import org.deeplearning4j.earlystopping.trainer.IEarlyStoppingTrainer; -import org.deeplearning4j.nn.api.Model; +import net.brutex.ai.dnn.api.IModel; import org.nd4j.linalg.dataset.DataSet; import org.nd4j.linalg.dataset.api.MultiDataSet; import org.slf4j.Logger; @@ -39,7 +39,7 @@ import java.io.IOException; import java.util.LinkedHashMap; import java.util.Map; -public abstract class BaseSparkEarlyStoppingTrainer implements IEarlyStoppingTrainer { +public abstract class BaseSparkEarlyStoppingTrainer implements IEarlyStoppingTrainer { private static final Logger log = LoggerFactory.getLogger(BaseSparkEarlyStoppingTrainer.class); diff --git a/cavis-dnn/cavis-dnn-spark/cavis-dnn-spark-core/src/main/java/org/deeplearning4j/spark/impl/common/score/BaseVaeReconstructionProbWithKeyFunction.java b/cavis-dnn/cavis-dnn-spark/cavis-dnn-spark-core/src/main/java/org/deeplearning4j/spark/impl/common/score/BaseVaeReconstructionProbWithKeyFunction.java index ed302a351..e1bfc277f 100644 --- a/cavis-dnn/cavis-dnn-spark/cavis-dnn-spark-core/src/main/java/org/deeplearning4j/spark/impl/common/score/BaseVaeReconstructionProbWithKeyFunction.java +++ b/cavis-dnn/cavis-dnn-spark/cavis-dnn-spark-core/src/main/java/org/deeplearning4j/spark/impl/common/score/BaseVaeReconstructionProbWithKeyFunction.java @@ -31,7 +31,7 @@ public abstract class BaseVaeReconstructionProbWithKeyFunction extends BaseVa /** * @param params MultiLayerNetwork parameters - * @param jsonConfig MultiLayerConfiguration, as json + * @param jsonConfig NeuralNetConfiguration, as json * @param useLogProbability If true: use log probability. False: use raw probability. * @param batchSize Batch size to use when scoring * @param numSamples Number of samples to use when calling {@link VariationalAutoencoder#reconstructionLogProbability(INDArray, int)} diff --git a/cavis-dnn/cavis-dnn-spark/cavis-dnn-spark-core/src/main/java/org/deeplearning4j/spark/impl/common/score/BaseVaeScoreWithKeyFunction.java b/cavis-dnn/cavis-dnn-spark/cavis-dnn-spark-core/src/main/java/org/deeplearning4j/spark/impl/common/score/BaseVaeScoreWithKeyFunction.java index 4140b8a53..cfcc93b78 100644 --- a/cavis-dnn/cavis-dnn-spark/cavis-dnn-spark-core/src/main/java/org/deeplearning4j/spark/impl/common/score/BaseVaeScoreWithKeyFunction.java +++ b/cavis-dnn/cavis-dnn-spark/cavis-dnn-spark-core/src/main/java/org/deeplearning4j/spark/impl/common/score/BaseVaeScoreWithKeyFunction.java @@ -45,7 +45,7 @@ public abstract class BaseVaeScoreWithKeyFunction implements PairFlatMapFunct /** * @param params MultiLayerNetwork parameters - * @param jsonConfig MultiLayerConfiguration, as json + * @param jsonConfig NeuralNetConfiguration, as json * @param batchSize Batch size to use when scoring */ public BaseVaeScoreWithKeyFunction(Broadcast params, Broadcast jsonConfig, int batchSize) { diff --git a/cavis-dnn/cavis-dnn-spark/cavis-dnn-spark-core/src/main/java/org/deeplearning4j/spark/impl/evaluation/EvaluationRunner.java b/cavis-dnn/cavis-dnn-spark/cavis-dnn-spark-core/src/main/java/org/deeplearning4j/spark/impl/evaluation/EvaluationRunner.java index a38322234..beb8d7972 100644 --- a/cavis-dnn/cavis-dnn-spark/cavis-dnn-spark-core/src/main/java/org/deeplearning4j/spark/impl/evaluation/EvaluationRunner.java +++ b/cavis-dnn/cavis-dnn-spark/cavis-dnn-spark-core/src/main/java/org/deeplearning4j/spark/impl/evaluation/EvaluationRunner.java @@ -22,12 +22,12 @@ package org.deeplearning4j.spark.impl.evaluation; import lombok.*; import lombok.extern.slf4j.Slf4j; +import net.brutex.ai.dnn.api.IModel; import org.apache.spark.broadcast.Broadcast; import org.deeplearning4j.datasets.iterator.IteratorDataSetIterator; import org.deeplearning4j.datasets.iterator.IteratorMultiDataSetIterator; -import org.deeplearning4j.nn.api.Model; import org.deeplearning4j.nn.conf.ComputationGraphConfiguration; -import org.deeplearning4j.nn.conf.MultiLayerConfiguration; +import org.deeplearning4j.nn.conf.NeuralNetConfiguration; import org.deeplearning4j.nn.graph.ComputationGraph; import org.deeplearning4j.nn.multilayer.MultiLayerNetwork; import org.nd4j.common.base.Preconditions; @@ -124,14 +124,14 @@ public class EvaluationRunner { EvaluationFuture f = new EvaluationFuture(); f.setResult(evals); try { - Model m; + IModel m; if (isCG) { ComputationGraphConfiguration conf = ComputationGraphConfiguration.fromJson(json.getValue()); ComputationGraph cg = new ComputationGraph(conf); cg.init(deviceLocalParams.get(), false); m = cg; } else { - MultiLayerConfiguration conf = MultiLayerConfiguration.fromJson(json.getValue()); + NeuralNetConfiguration conf = NeuralNetConfiguration.fromJson(json.getValue()); MultiLayerNetwork net = new MultiLayerNetwork(conf); net.init(deviceLocalParams.get(), false); m = net; @@ -176,7 +176,7 @@ public class EvaluationRunner { return f; } - private static void doEval(Model m, IEvaluation[] e, Iterator ds, Iterator mds, int evalBatchSize){ + private static void doEval(IModel m, IEvaluation[] e, Iterator ds, Iterator mds, int evalBatchSize){ if(m instanceof MultiLayerNetwork){ MultiLayerNetwork mln = (MultiLayerNetwork)m; if(ds != null){ diff --git a/cavis-dnn/cavis-dnn-spark/cavis-dnn-spark-core/src/main/java/org/deeplearning4j/spark/impl/graph/SparkComputationGraph.java b/cavis-dnn/cavis-dnn-spark/cavis-dnn-spark-core/src/main/java/org/deeplearning4j/spark/impl/graph/SparkComputationGraph.java index 67b120ddf..4849ee142 100644 --- a/cavis-dnn/cavis-dnn-spark/cavis-dnn-spark-core/src/main/java/org/deeplearning4j/spark/impl/graph/SparkComputationGraph.java +++ b/cavis-dnn/cavis-dnn-spark/cavis-dnn-spark-core/src/main/java/org/deeplearning4j/spark/impl/graph/SparkComputationGraph.java @@ -102,7 +102,7 @@ public class SparkComputationGraph extends SparkListenable { TrainingMaster trainingMaster) { sc = javaSparkContext; this.trainingMaster = trainingMaster; - this.conf = network.getConfiguration().clone(); + this.conf = network.getComputationGraphConfiguration().clone(); this.network = network; this.network.init(); @@ -370,7 +370,7 @@ public class SparkComputationGraph extends SparkListenable { */ public double calculateScore(JavaRDD data, boolean average, int minibatchSize) { JavaRDD> rdd = data.mapPartitions(new ScoreFlatMapFunctionCGDataSet(conf.toJson(), - sc.broadcast(network.params()), minibatchSize)); + sc.broadcast(network.getModelParams()), minibatchSize)); //Reduce to a single tuple, with example count + sum of scores Tuple2 countAndSumScores = rdd.reduce(new LongDoubleReduceFunction()); @@ -405,7 +405,7 @@ public class SparkComputationGraph extends SparkListenable { */ public double calculateScoreMultiDataSet(JavaRDD data, boolean average, int minibatchSize) { JavaRDD> rdd = data.mapPartitions(new ScoreFlatMapFunctionCGMultiDataSet(conf.toJson(), - sc.broadcast(network.params()), minibatchSize)); + sc.broadcast(network.getModelParams()), minibatchSize)); //Reduce to a single tuple, with example count + sum of scores Tuple2 countAndSumScores = rdd.reduce(new LongDoubleReduceFunction()); if (average) { @@ -476,7 +476,7 @@ public class SparkComputationGraph extends SparkListenable { */ public JavaDoubleRDD scoreExamplesMultiDataSet(JavaRDD data, boolean includeRegularizationTerms, int batchSize) { - return data.mapPartitionsToDouble(new ScoreExamplesFunction(sc.broadcast(network.params()), + return data.mapPartitionsToDouble(new ScoreExamplesFunction(sc.broadcast(network.getModelParams()), sc.broadcast(conf.toJson()), includeRegularizationTerms, batchSize)); } @@ -527,7 +527,7 @@ public class SparkComputationGraph extends SparkListenable { * @return Network output given the input, by key */ public JavaPairRDD feedForwardWithKey(JavaPairRDD featuresData, int batchSize) { - return featuresData.mapPartitionsToPair(new GraphFeedForwardWithKeyFunction(sc.broadcast(network.params()), + return featuresData.mapPartitionsToPair(new GraphFeedForwardWithKeyFunction(sc.broadcast(network.getModelParams()), sc.broadcast(conf.toJson()), batchSize)); } @@ -554,7 +554,7 @@ public class SparkComputationGraph extends SparkListenable { */ public JavaPairRDD scoreExamplesMultiDataSet(JavaPairRDD data, boolean includeRegularizationTerms, int batchSize) { - return data.mapPartitionsToPair(new ScoreExamplesWithKeyFunction(sc.broadcast(network.params()), + return data.mapPartitionsToPair(new ScoreExamplesWithKeyFunction(sc.broadcast(network.getModelParams()), sc.broadcast(conf.toJson()), includeRegularizationTerms, batchSize)); } @@ -632,7 +632,7 @@ public class SparkComputationGraph extends SparkListenable { * @return {@link RegressionEvaluation} instance with regression performance */ public T evaluateRegression(JavaRDD data, int minibatchSize) { - val nOut = ((FeedForwardLayer) network.getOutputLayer(0).conf().getLayer()).getNOut(); + val nOut = ((FeedForwardLayer) network.getOutputLayer(0).getLayerConfiguration()).getNOut(); return (T)doEvaluation(data, new org.deeplearning4j.eval.RegressionEvaluation(nOut), minibatchSize); } @@ -820,7 +820,7 @@ public class SparkComputationGraph extends SparkListenable { */ public T[] doEvaluation(JavaRDD data, int evalNumWorkers, int evalBatchSize, T... emptyEvaluations) { IEvaluateFlatMapFunction evalFn = new IEvaluateFlatMapFunction<>(true, sc.broadcast(conf.toJson()), - SparkUtils.asByteArrayBroadcast(sc, network.params()), evalNumWorkers, evalBatchSize, emptyEvaluations); + SparkUtils.asByteArrayBroadcast(sc, network.getModelParams()), evalNumWorkers, evalBatchSize, emptyEvaluations); JavaRDD evaluations = data.mapPartitions(evalFn); return evaluations.treeAggregate(null, new IEvaluateAggregateFunction(), new IEvaluateAggregateFunction()); @@ -844,7 +844,7 @@ public class SparkComputationGraph extends SparkListenable { public T[] doEvaluationMDS(JavaRDD data, int evalNumWorkers, int evalBatchSize, T... emptyEvaluations) { Preconditions.checkArgument(evalNumWorkers > 0, "Invalid number of evaulation workers: require at least 1 - got %s", evalNumWorkers); IEvaluateMDSFlatMapFunction evalFn = new IEvaluateMDSFlatMapFunction<>(sc.broadcast(conf.toJson()), - SparkUtils.asByteArrayBroadcast(sc, network.params()), evalNumWorkers, evalBatchSize, emptyEvaluations); + SparkUtils.asByteArrayBroadcast(sc, network.getModelParams()), evalNumWorkers, evalBatchSize, emptyEvaluations); JavaRDD evaluations = data.mapPartitions(evalFn); return evaluations.treeAggregate(null, new IEvaluateAggregateFunction(), new IEvaluateAggregateFunction()); @@ -906,7 +906,7 @@ public class SparkComputationGraph extends SparkListenable { protected IEvaluation[] doEvaluation(JavaRDD data, int evalNumWorkers, int evalBatchSize, DataSetLoader loader, MultiDataSetLoader mdsLoader, IEvaluation... emptyEvaluations){ IEvaluateMDSPathsFlatMapFunction evalFn = new IEvaluateMDSPathsFlatMapFunction(sc.broadcast(conf.toJson()), - SparkUtils.asByteArrayBroadcast(sc, network.params()), evalNumWorkers, evalBatchSize, loader, mdsLoader, + SparkUtils.asByteArrayBroadcast(sc, network.getModelParams()), evalNumWorkers, evalBatchSize, loader, mdsLoader, BroadcastHadoopConfigHolder.get(sc), emptyEvaluations); Preconditions.checkArgument(evalNumWorkers > 0, "Invalid number of evaulation workers: require at least 1 - got %s", evalNumWorkers); JavaRDD evaluations = data.mapPartitions(evalFn); diff --git a/cavis-dnn/cavis-dnn-spark/cavis-dnn-spark-core/src/main/java/org/deeplearning4j/spark/impl/graph/scoring/CGVaeReconstructionErrorWithKeyFunction.java b/cavis-dnn/cavis-dnn-spark/cavis-dnn-spark-core/src/main/java/org/deeplearning4j/spark/impl/graph/scoring/CGVaeReconstructionErrorWithKeyFunction.java index 3fa3312d7..f6794f27d 100644 --- a/cavis-dnn/cavis-dnn-spark/cavis-dnn-spark-core/src/main/java/org/deeplearning4j/spark/impl/graph/scoring/CGVaeReconstructionErrorWithKeyFunction.java +++ b/cavis-dnn/cavis-dnn-spark/cavis-dnn-spark-core/src/main/java/org/deeplearning4j/spark/impl/graph/scoring/CGVaeReconstructionErrorWithKeyFunction.java @@ -33,7 +33,7 @@ public class CGVaeReconstructionErrorWithKeyFunction extends BaseVaeScoreWith /** * @param params MultiLayerNetwork parameters - * @param jsonConfig MultiLayerConfiguration, as json + * @param jsonConfig NeuralNetConfiguration, as json * @param batchSize Batch size to use when scoring */ public CGVaeReconstructionErrorWithKeyFunction(Broadcast params, Broadcast jsonConfig, @@ -56,7 +56,7 @@ public class CGVaeReconstructionErrorWithKeyFunction extends BaseVaeScoreWith if (!(l instanceof VariationalAutoencoder)) { throw new RuntimeException( "Cannot use CGVaeReconstructionErrorWithKeyFunction on network that doesn't have a VAE " - + "layer as layer 0. Layer type: " + l.getClass()); + + "layer as layer 0. ILayer type: " + l.getClass()); } return (VariationalAutoencoder) l; } diff --git a/cavis-dnn/cavis-dnn-spark/cavis-dnn-spark-core/src/main/java/org/deeplearning4j/spark/impl/graph/scoring/CGVaeReconstructionProbWithKeyFunction.java b/cavis-dnn/cavis-dnn-spark/cavis-dnn-spark-core/src/main/java/org/deeplearning4j/spark/impl/graph/scoring/CGVaeReconstructionProbWithKeyFunction.java index a71912367..b5413e0dc 100644 --- a/cavis-dnn/cavis-dnn-spark/cavis-dnn-spark-core/src/main/java/org/deeplearning4j/spark/impl/graph/scoring/CGVaeReconstructionProbWithKeyFunction.java +++ b/cavis-dnn/cavis-dnn-spark/cavis-dnn-spark-core/src/main/java/org/deeplearning4j/spark/impl/graph/scoring/CGVaeReconstructionProbWithKeyFunction.java @@ -33,7 +33,7 @@ public class CGVaeReconstructionProbWithKeyFunction extends BaseVaeReconstruc /** * @param params MultiLayerNetwork parameters - * @param jsonConfig MultiLayerConfiguration, as json + * @param jsonConfig NeuralNetConfiguration, as json * @param useLogProbability If true: use log probability. False: use raw probability. * @param batchSize Batch size to use when scoring * @param numSamples Number of samples to use when calling {@link VariationalAutoencoder#reconstructionLogProbability(INDArray, int)} @@ -58,7 +58,7 @@ public class CGVaeReconstructionProbWithKeyFunction extends BaseVaeReconstruc if (!(l instanceof VariationalAutoencoder)) { throw new RuntimeException( "Cannot use CGVaeReconstructionProbWithKeyFunction on network that doesn't have a VAE " - + "layer as layer 0. Layer type: " + l.getClass()); + + "layer as layer 0. ILayer type: " + l.getClass()); } return (VariationalAutoencoder) l; } diff --git a/cavis-dnn/cavis-dnn-spark/cavis-dnn-spark-core/src/main/java/org/deeplearning4j/spark/impl/multilayer/SparkDl4jMultiLayer.java b/cavis-dnn/cavis-dnn-spark/cavis-dnn-spark-core/src/main/java/org/deeplearning4j/spark/impl/multilayer/SparkDl4jMultiLayer.java index d8e1c1437..890c62c3d 100644 --- a/cavis-dnn/cavis-dnn-spark/cavis-dnn-spark-core/src/main/java/org/deeplearning4j/spark/impl/multilayer/SparkDl4jMultiLayer.java +++ b/cavis-dnn/cavis-dnn-spark/cavis-dnn-spark-core/src/main/java/org/deeplearning4j/spark/impl/multilayer/SparkDl4jMultiLayer.java @@ -35,7 +35,7 @@ import org.datavec.spark.util.BroadcastHadoopConfigHolder; import org.deeplearning4j.core.loader.DataSetLoader; import org.deeplearning4j.core.loader.MultiDataSetLoader; import org.deeplearning4j.core.loader.impl.SerializedDataSetLoader; -import org.deeplearning4j.nn.conf.MultiLayerConfiguration; +import org.deeplearning4j.nn.conf.NeuralNetConfiguration; import org.deeplearning4j.nn.conf.layers.FeedForwardLayer; import org.deeplearning4j.nn.multilayer.MultiLayerNetwork; import org.deeplearning4j.spark.api.TrainingMaster; @@ -80,7 +80,7 @@ public class SparkDl4jMultiLayer extends SparkListenable { public static final int DEFAULT_ROC_THRESHOLD_STEPS = 32; public static final int DEFAULT_EVAL_WORKERS = 4; private final transient JavaSparkContext sc; - private final MultiLayerConfiguration conf; + private final NeuralNetConfiguration conf; private MultiLayerNetwork network; private double lastScore; private int defaultEvaluationWorkers = DEFAULT_EVAL_WORKERS; @@ -104,7 +104,7 @@ public class SparkDl4jMultiLayer extends SparkListenable { * @param sparkContext the spark context to use * @param conf the configuration of the network */ - public SparkDl4jMultiLayer(SparkContext sparkContext, MultiLayerConfiguration conf, + public SparkDl4jMultiLayer(SparkContext sparkContext, NeuralNetConfiguration conf, TrainingMaster trainingMaster) { this(new JavaSparkContext(sparkContext), initNetwork(conf), trainingMaster); } @@ -115,14 +115,14 @@ public class SparkDl4jMultiLayer extends SparkListenable { * @param sc the spark context to use * @param conf the configuration of the network */ - public SparkDl4jMultiLayer(JavaSparkContext sc, MultiLayerConfiguration conf, TrainingMaster trainingMaster) { + public SparkDl4jMultiLayer(JavaSparkContext sc, NeuralNetConfiguration conf, TrainingMaster trainingMaster) { this(sc.sc(), conf, trainingMaster); } public SparkDl4jMultiLayer(JavaSparkContext javaSparkContext, MultiLayerNetwork network, TrainingMaster trainingMaster) { sc = javaSparkContext; - this.conf = network.getLayerWiseConfigurations().clone(); + this.conf = network.getNetConfiguration().clone(); this.network = network; if (!network.isInitCalled()) network.init(); @@ -132,7 +132,7 @@ public class SparkDl4jMultiLayer extends SparkListenable { SparkUtils.checkKryoConfiguration(javaSparkContext, log); } - private static MultiLayerNetwork initNetwork(MultiLayerConfiguration conf) { + private static MultiLayerNetwork initNetwork(NeuralNetConfiguration conf) { MultiLayerNetwork net = new MultiLayerNetwork(conf); net.init(); return net; @@ -315,8 +315,8 @@ public class SparkDl4jMultiLayer extends SparkListenable { * @return the multi layer network that was fitDataSet */ public MultiLayerNetwork fitLabeledPoint(JavaRDD rdd) { - int nLayers = network.getLayerWiseConfigurations().getConfs().size(); - FeedForwardLayer ffl = (FeedForwardLayer) network.getLayerWiseConfigurations().getConf(nLayers - 1).getLayer(); + int nLayers = network.getNetConfiguration().getFlattenedLayerConfigurations().size(); + FeedForwardLayer ffl = (FeedForwardLayer) network.getNetConfiguration().getFlattenedLayerConfigurations().get(nLayers - 1); JavaRDD ds = MLLibUtil.fromLabeledPoint(sc, rdd, ffl.getNOut()); return fit(ds); } @@ -430,7 +430,7 @@ public class SparkDl4jMultiLayer extends SparkListenable { * @see MultiLayerNetwork#scoreExamples(DataSet, boolean) */ public JavaDoubleRDD scoreExamples(JavaRDD data, boolean includeRegularizationTerms, int batchSize) { - return data.mapPartitionsToDouble(new ScoreExamplesFunction(sc.broadcast(network.params()), + return data.mapPartitionsToDouble(new ScoreExamplesFunction(sc.broadcast(network.getModelParams()), sc.broadcast(conf.toJson()), includeRegularizationTerms, batchSize)); } @@ -466,7 +466,7 @@ public class SparkDl4jMultiLayer extends SparkListenable { */ public JavaPairRDD scoreExamples(JavaPairRDD data, boolean includeRegularizationTerms, int batchSize) { - return data.mapPartitionsToPair(new ScoreExamplesWithKeyFunction(sc.broadcast(network.params()), + return data.mapPartitionsToPair(new ScoreExamplesWithKeyFunction(sc.broadcast(network.getModelParams()), sc.broadcast(conf.toJson()), includeRegularizationTerms, batchSize)); } @@ -494,7 +494,7 @@ public class SparkDl4jMultiLayer extends SparkListenable { */ public JavaPairRDD feedForwardWithMaskAndKey(JavaPairRDD> featuresDataAndMask, int batchSize) { return featuresDataAndMask - .mapPartitionsToPair(new FeedForwardWithKeyFunction(sc.broadcast(network.params()), + .mapPartitionsToPair(new FeedForwardWithKeyFunction(sc.broadcast(network.getModelParams()), sc.broadcast(conf.toJson()), batchSize)); } @@ -577,7 +577,7 @@ public class SparkDl4jMultiLayer extends SparkListenable { * @return {@link RegressionEvaluation} instance with regression performance */ public T evaluateRegression(JavaRDD data, int minibatchSize) { - long nOut = ((FeedForwardLayer) network.getOutputLayer().conf().getLayer()).getNOut(); + long nOut = ((FeedForwardLayer) network.getOutputLayer().getLayerConfiguration()).getNOut(); return (T)doEvaluation(data, new org.deeplearning4j.eval.RegressionEvaluation(nOut), minibatchSize); } @@ -708,7 +708,7 @@ public class SparkDl4jMultiLayer extends SparkListenable { */ public T[] doEvaluation(JavaRDD data, int evalNumWorkers, int evalBatchSize, T... emptyEvaluations) { IEvaluateFlatMapFunction evalFn = new IEvaluateFlatMapFunction<>(false, sc.broadcast(conf.toJson()), - SparkUtils.asByteArrayBroadcast(sc, network.params()), evalNumWorkers, evalBatchSize, emptyEvaluations); + SparkUtils.asByteArrayBroadcast(sc, network.getModelParams()), evalNumWorkers, evalBatchSize, emptyEvaluations); JavaRDD evaluations = data.mapPartitions(evalFn); return evaluations.treeAggregate(null, new IEvaluateAggregateFunction(), new IEvaluationReduceFunction()); } @@ -771,7 +771,7 @@ public class SparkDl4jMultiLayer extends SparkListenable { protected IEvaluation[] doEvaluation(JavaRDD data, int evalNumWorkers, int evalBatchSize, DataSetLoader loader, MultiDataSetLoader mdsLoader, IEvaluation... emptyEvaluations){ Configuration config = sc.hadoopConfiguration(); IEvaluateMDSPathsFlatMapFunction evalFn = new IEvaluateMDSPathsFlatMapFunction(sc.broadcast(conf.toJson()), - SparkUtils.asByteArrayBroadcast(sc, network.params()), evalNumWorkers, evalBatchSize, loader, mdsLoader, + SparkUtils.asByteArrayBroadcast(sc, network.getModelParams()), evalNumWorkers, evalBatchSize, loader, mdsLoader, BroadcastHadoopConfigHolder.get(sc), emptyEvaluations); Preconditions.checkArgument(evalNumWorkers > 0, "Invalid number of evaulation workers: require at least 1 - got %s", evalNumWorkers); JavaRDD evaluations = data.mapPartitions(evalFn); diff --git a/cavis-dnn/cavis-dnn-spark/cavis-dnn-spark-core/src/main/java/org/deeplearning4j/spark/impl/multilayer/scoring/FeedForwardWithKeyFunction.java b/cavis-dnn/cavis-dnn-spark/cavis-dnn-spark-core/src/main/java/org/deeplearning4j/spark/impl/multilayer/scoring/FeedForwardWithKeyFunction.java index 510f2e4d4..c064c81d0 100644 --- a/cavis-dnn/cavis-dnn-spark/cavis-dnn-spark-core/src/main/java/org/deeplearning4j/spark/impl/multilayer/scoring/FeedForwardWithKeyFunction.java +++ b/cavis-dnn/cavis-dnn-spark/cavis-dnn-spark-core/src/main/java/org/deeplearning4j/spark/impl/multilayer/scoring/FeedForwardWithKeyFunction.java @@ -22,7 +22,7 @@ package org.deeplearning4j.spark.impl.multilayer.scoring; import org.apache.spark.api.java.function.PairFlatMapFunction; import org.apache.spark.broadcast.Broadcast; -import org.deeplearning4j.nn.conf.MultiLayerConfiguration; +import org.deeplearning4j.nn.conf.NeuralNetConfiguration; import org.deeplearning4j.nn.multilayer.MultiLayerNetwork; import org.nd4j.linalg.api.ndarray.INDArray; import org.nd4j.linalg.dataset.api.DataSetUtil; @@ -49,7 +49,7 @@ public class FeedForwardWithKeyFunction /** * @param params MultiLayerNetwork parameters - * @param jsonConfig MultiLayerConfiguration, as json + * @param jsonConfig NeuralNetConfiguration, as json * @param batchSize Batch size to use for forward pass (use > 1 for efficiency) */ public FeedForwardWithKeyFunction(Broadcast params, Broadcast jsonConfig, int batchSize) { @@ -65,7 +65,7 @@ public class FeedForwardWithKeyFunction return Collections.emptyIterator(); } - MultiLayerNetwork network = new MultiLayerNetwork(MultiLayerConfiguration.fromJson(jsonConfig.getValue())); + MultiLayerNetwork network = new MultiLayerNetwork(NeuralNetConfiguration.fromJson(jsonConfig.getValue())); network.init(); INDArray val = params.value().unsafeDuplication(); if (val.length() != network.numParams(false)) diff --git a/cavis-dnn/cavis-dnn-spark/cavis-dnn-spark-core/src/main/java/org/deeplearning4j/spark/impl/multilayer/scoring/ScoreExamplesFunction.java b/cavis-dnn/cavis-dnn-spark/cavis-dnn-spark-core/src/main/java/org/deeplearning4j/spark/impl/multilayer/scoring/ScoreExamplesFunction.java index 6c3878da5..b6a21d181 100644 --- a/cavis-dnn/cavis-dnn-spark/cavis-dnn-spark-core/src/main/java/org/deeplearning4j/spark/impl/multilayer/scoring/ScoreExamplesFunction.java +++ b/cavis-dnn/cavis-dnn-spark/cavis-dnn-spark-core/src/main/java/org/deeplearning4j/spark/impl/multilayer/scoring/ScoreExamplesFunction.java @@ -21,9 +21,8 @@ package org.deeplearning4j.spark.impl.multilayer.scoring; import org.apache.spark.api.java.function.DoubleFlatMapFunction; -import org.apache.spark.api.java.function.FlatMapFunction; import org.apache.spark.broadcast.Broadcast; -import org.deeplearning4j.nn.conf.MultiLayerConfiguration; +import org.deeplearning4j.nn.conf.NeuralNetConfiguration; import org.deeplearning4j.nn.multilayer.MultiLayerNetwork; import org.nd4j.linalg.api.ndarray.INDArray; import org.nd4j.linalg.dataset.DataSet; @@ -60,7 +59,7 @@ public class ScoreExamplesFunction implements DoubleFlatMapFunction implements PairFlatMapFunction implements PairFlatMapFunction, DataSetIterator iter = new IteratorDataSetIterator(dataSetIterator, minibatchSize); //Does batching where appropriate - MultiLayerNetwork network = new MultiLayerNetwork(MultiLayerConfiguration.fromJson(json)); + MultiLayerNetwork network = new MultiLayerNetwork(NeuralNetConfiguration.fromJson(json)); network.init(); INDArray val = params.value().unsafeDuplication(); //.value() object will be shared by all executors on each machine -> OK, as params are not modified by score function if (val.length() != network.numParams(false)) diff --git a/cavis-dnn/cavis-dnn-spark/cavis-dnn-spark-core/src/main/java/org/deeplearning4j/spark/impl/multilayer/scoring/VaeReconstructionErrorWithKeyFunction.java b/cavis-dnn/cavis-dnn-spark/cavis-dnn-spark-core/src/main/java/org/deeplearning4j/spark/impl/multilayer/scoring/VaeReconstructionErrorWithKeyFunction.java index e1c2f760d..d9901cbe0 100644 --- a/cavis-dnn/cavis-dnn-spark/cavis-dnn-spark-core/src/main/java/org/deeplearning4j/spark/impl/multilayer/scoring/VaeReconstructionErrorWithKeyFunction.java +++ b/cavis-dnn/cavis-dnn-spark/cavis-dnn-spark-core/src/main/java/org/deeplearning4j/spark/impl/multilayer/scoring/VaeReconstructionErrorWithKeyFunction.java @@ -22,21 +22,18 @@ package org.deeplearning4j.spark.impl.multilayer.scoring; import org.apache.spark.broadcast.Broadcast; import org.deeplearning4j.nn.api.Layer; -import org.deeplearning4j.nn.conf.MultiLayerConfiguration; +import org.deeplearning4j.nn.conf.NeuralNetConfiguration; import org.deeplearning4j.nn.layers.variational.VariationalAutoencoder; import org.deeplearning4j.nn.multilayer.MultiLayerNetwork; import org.deeplearning4j.spark.impl.common.score.BaseVaeScoreWithKeyFunction; import org.nd4j.linalg.api.ndarray.INDArray; -import scala.Tuple2; - -import java.util.Iterator; public class VaeReconstructionErrorWithKeyFunction extends BaseVaeScoreWithKeyFunction { /** * @param params MultiLayerNetwork parameters - * @param jsonConfig MultiLayerConfiguration, as json + * @param jsonConfig NeuralNetConfiguration, as json * @param batchSize Batch size to use when scoring */ public VaeReconstructionErrorWithKeyFunction(Broadcast params, Broadcast jsonConfig, @@ -47,7 +44,7 @@ public class VaeReconstructionErrorWithKeyFunction extends BaseVaeScoreWithKe @Override public VariationalAutoencoder getVaeLayer() { MultiLayerNetwork network = - new MultiLayerNetwork(MultiLayerConfiguration.fromJson(jsonConfig.getValue())); + new MultiLayerNetwork(NeuralNetConfiguration.fromJson(jsonConfig.getValue())); network.init(); INDArray val = params.value().unsafeDuplication(); if (val.length() != network.numParams(false)) @@ -59,7 +56,7 @@ public class VaeReconstructionErrorWithKeyFunction extends BaseVaeScoreWithKe if (!(l instanceof VariationalAutoencoder)) { throw new RuntimeException( "Cannot use VaeReconstructionErrorWithKeyFunction on network that doesn't have a VAE " - + "layer as layer 0. Layer type: " + l.getClass()); + + "layer as layer 0. ILayer type: " + l.getClass()); } return (VariationalAutoencoder) l; } diff --git a/cavis-dnn/cavis-dnn-spark/cavis-dnn-spark-core/src/main/java/org/deeplearning4j/spark/impl/multilayer/scoring/VaeReconstructionProbWithKeyFunction.java b/cavis-dnn/cavis-dnn-spark/cavis-dnn-spark-core/src/main/java/org/deeplearning4j/spark/impl/multilayer/scoring/VaeReconstructionProbWithKeyFunction.java index 12fbbbeb6..b7cdbd403 100644 --- a/cavis-dnn/cavis-dnn-spark/cavis-dnn-spark-core/src/main/java/org/deeplearning4j/spark/impl/multilayer/scoring/VaeReconstructionProbWithKeyFunction.java +++ b/cavis-dnn/cavis-dnn-spark/cavis-dnn-spark-core/src/main/java/org/deeplearning4j/spark/impl/multilayer/scoring/VaeReconstructionProbWithKeyFunction.java @@ -22,7 +22,7 @@ package org.deeplearning4j.spark.impl.multilayer.scoring; import org.apache.spark.broadcast.Broadcast; import org.deeplearning4j.nn.api.Layer; -import org.deeplearning4j.nn.conf.MultiLayerConfiguration; +import org.deeplearning4j.nn.conf.NeuralNetConfiguration; import org.deeplearning4j.nn.layers.variational.VariationalAutoencoder; import org.deeplearning4j.nn.multilayer.MultiLayerNetwork; import org.deeplearning4j.spark.impl.common.score.BaseVaeReconstructionProbWithKeyFunction; @@ -34,7 +34,7 @@ public class VaeReconstructionProbWithKeyFunction extends BaseVaeReconstructi /** * @param params MultiLayerNetwork parameters - * @param jsonConfig MultiLayerConfiguration, as json + * @param jsonConfig NeuralNetConfiguration, as json * @param useLogProbability If true: use log probability. False: use raw probability. * @param batchSize Batch size to use when scoring * @param numSamples Number of samples to use when calling {@link VariationalAutoencoder#reconstructionLogProbability(INDArray, int)} @@ -47,7 +47,7 @@ public class VaeReconstructionProbWithKeyFunction extends BaseVaeReconstructi @Override public VariationalAutoencoder getVaeLayer() { MultiLayerNetwork network = - new MultiLayerNetwork(MultiLayerConfiguration.fromJson(jsonConfig.getValue())); + new MultiLayerNetwork(NeuralNetConfiguration.fromJson(jsonConfig.getValue())); network.init(); INDArray val = params.value().unsafeDuplication(); if (val.length() != network.numParams(false)) @@ -59,7 +59,7 @@ public class VaeReconstructionProbWithKeyFunction extends BaseVaeReconstructi if (!(l instanceof VariationalAutoencoder)) { throw new RuntimeException( "Cannot use VaeReconstructionProbWithKeyFunction on network that doesn't have a VAE " - + "layer as layer 0. Layer type: " + l.getClass()); + + "layer as layer 0. ILayer type: " + l.getClass()); } return (VariationalAutoencoder) l; } diff --git a/cavis-dnn/cavis-dnn-spark/cavis-dnn-spark-core/src/main/java/org/deeplearning4j/spark/impl/paramavg/ParameterAveragingTrainingMaster.java b/cavis-dnn/cavis-dnn-spark/cavis-dnn-spark-core/src/main/java/org/deeplearning4j/spark/impl/paramavg/ParameterAveragingTrainingMaster.java index 3a2170bc3..38a7e5bd8 100644 --- a/cavis-dnn/cavis-dnn-spark/cavis-dnn-spark-core/src/main/java/org/deeplearning4j/spark/impl/paramavg/ParameterAveragingTrainingMaster.java +++ b/cavis-dnn/cavis-dnn-spark/cavis-dnn-spark-core/src/main/java/org/deeplearning4j/spark/impl/paramavg/ParameterAveragingTrainingMaster.java @@ -41,7 +41,7 @@ import org.deeplearning4j.core.storage.StatsStorageRouter; import org.deeplearning4j.core.storage.StatsStorageRouterProvider; import org.deeplearning4j.core.storage.StorageMetaData; import org.deeplearning4j.nn.conf.ComputationGraphConfiguration; -import org.deeplearning4j.nn.conf.MultiLayerConfiguration; +import org.deeplearning4j.nn.conf.NeuralNetConfiguration; import org.deeplearning4j.nn.graph.ComputationGraph; import org.deeplearning4j.nn.multilayer.MultiLayerNetwork; import org.deeplearning4j.optimize.api.TrainingListener; @@ -275,8 +275,8 @@ public class ParameterAveragingTrainingMaster @Override public ParameterAveragingTrainingWorker getWorkerInstance(SparkDl4jMultiLayer network) { - NetBroadcastTuple tuple = new NetBroadcastTuple(network.getNetwork().getLayerWiseConfigurations(), - network.getNetwork().params(), network.getNetwork().getUpdater().getStateViewArray()); + NetBroadcastTuple tuple = new NetBroadcastTuple(network.getNetwork().getNetConfiguration(), + network.getNetwork().getModelParams(), network.getNetwork().getUpdater().getStateViewArray()); if (collectTrainingStats) stats.logBroadcastStart(); @@ -292,8 +292,8 @@ public class ParameterAveragingTrainingMaster @Override public ParameterAveragingTrainingWorker getWorkerInstance(SparkComputationGraph graph) { - NetBroadcastTuple tuple = new NetBroadcastTuple(graph.getNetwork().getConfiguration(), - graph.getNetwork().params(), graph.getNetwork().getUpdater().getStateViewArray()); + NetBroadcastTuple tuple = new NetBroadcastTuple(graph.getNetwork().getComputationGraphConfiguration(), + graph.getNetwork().getModelParams(), graph.getNetwork().getUpdater().getStateViewArray()); if (collectTrainingStats) stats.logBroadcastStart(); @@ -727,11 +727,11 @@ public class ParameterAveragingTrainingMaster if (params != null) { //Params may be null for edge case (empty RDD) if (network != null) { - MultiLayerConfiguration conf = network.getNetwork().getLayerWiseConfigurations(); + NeuralNetConfiguration conf = network.getNetwork().getNetConfiguration(); int numUpdates = averagingFrequency; conf.setIterationCount(conf.getIterationCount() + numUpdates); } else { - ComputationGraphConfiguration conf = graph.getNetwork().getConfiguration(); + ComputationGraphConfiguration conf = graph.getNetwork().getComputationGraphConfiguration(); int numUpdates = averagingFrequency; conf.setIterationCount(conf.getIterationCount() + numUpdates); } diff --git a/cavis-dnn/cavis-dnn-spark/cavis-dnn-spark-core/src/main/java/org/deeplearning4j/spark/impl/paramavg/ParameterAveragingTrainingWorker.java b/cavis-dnn/cavis-dnn-spark/cavis-dnn-spark-core/src/main/java/org/deeplearning4j/spark/impl/paramavg/ParameterAveragingTrainingWorker.java index 87374a584..3b3b9f9b3 100644 --- a/cavis-dnn/cavis-dnn-spark/cavis-dnn-spark-core/src/main/java/org/deeplearning4j/spark/impl/paramavg/ParameterAveragingTrainingWorker.java +++ b/cavis-dnn/cavis-dnn-spark/cavis-dnn-spark-core/src/main/java/org/deeplearning4j/spark/impl/paramavg/ParameterAveragingTrainingWorker.java @@ -21,13 +21,13 @@ package org.deeplearning4j.spark.impl.paramavg; import lombok.val; +import net.brutex.ai.dnn.api.IModel; import org.apache.spark.broadcast.Broadcast; import org.deeplearning4j.core.storage.Persistable; import org.deeplearning4j.core.storage.StatsStorageRouter; import org.deeplearning4j.core.storage.StatsStorageRouterProvider; import org.deeplearning4j.core.storage.StorageMetaData; import org.deeplearning4j.core.storage.listener.RoutingIterationListener; -import org.deeplearning4j.nn.api.Model; import org.deeplearning4j.nn.api.Updater; import org.deeplearning4j.nn.graph.ComputationGraph; import org.deeplearning4j.nn.graph.util.ComputationGraphUtil; @@ -159,7 +159,7 @@ public class ParameterAveragingTrainingWorker extends BaseTrainingWorker list = new ArrayList<>(trainingListeners.size()); for (TrainingListener l : trainingListeners) { @@ -172,9 +172,9 @@ public class ParameterAveragingTrainingWorker extends BaseTrainingWorker irisData = getIris(); @@ -123,7 +122,7 @@ public class TestEarlyStoppingSpark extends BaseSparkTest { return; } Nd4j.getRandom().setSeed(12345); - MultiLayerConfiguration conf = new NeuralNetConfiguration.Builder().seed(12345) + NeuralNetConfiguration conf = NeuralNetConfiguration.builder().seed(12345) .optimizationAlgo(OptimizationAlgorithm.STOCHASTIC_GRADIENT_DESCENT) .updater(new Sgd(10.0)) //Intentionally huge LR .weightInit(WeightInit.XAVIER).list() @@ -131,7 +130,7 @@ public class TestEarlyStoppingSpark extends BaseSparkTest { .lossFunction(LossFunctions.LossFunction.MSE).build()) .build(); MultiLayerNetwork net = new MultiLayerNetwork(conf); - net.setListeners(new ScoreIterationListener(5)); + net.addTrainingListeners(new ScoreIterationListener(5)); JavaRDD irisData = getIris(); EarlyStoppingModelSaver saver = new InMemoryModelSaver<>(); @@ -163,14 +162,14 @@ public class TestEarlyStoppingSpark extends BaseSparkTest { return; } Nd4j.getRandom().setSeed(12345); - MultiLayerConfiguration conf = new NeuralNetConfiguration.Builder().seed(12345) + NeuralNetConfiguration conf = NeuralNetConfiguration.builder().seed(12345) .optimizationAlgo(OptimizationAlgorithm.STOCHASTIC_GRADIENT_DESCENT) .updater(new Sgd(1e-6)).weightInit(WeightInit.XAVIER).list() .layer(0, new OutputLayer.Builder().nIn(4).nOut(3) .lossFunction(LossFunctions.LossFunction.MCXENT).build()) .build(); MultiLayerNetwork net = new MultiLayerNetwork(conf); - net.setListeners(new ScoreIterationListener(5)); + net.addTrainingListeners(new ScoreIterationListener(5)); JavaRDD irisData = getIris(); @@ -209,14 +208,14 @@ public class TestEarlyStoppingSpark extends BaseSparkTest { return; } Nd4j.getRandom().setSeed(12345); - MultiLayerConfiguration conf = new NeuralNetConfiguration.Builder().seed(12345) + NeuralNetConfiguration conf = NeuralNetConfiguration.builder().seed(12345) .optimizationAlgo(OptimizationAlgorithm.STOCHASTIC_GRADIENT_DESCENT) .updater(new Sgd(0.0)).weightInit(WeightInit.XAVIER).list() .layer(0, new OutputLayer.Builder().nIn(4).nOut(3) .lossFunction(LossFunctions.LossFunction.MCXENT).build()) .build(); MultiLayerNetwork net = new MultiLayerNetwork(conf); - net.setListeners(new ScoreIterationListener(5)); + net.addTrainingListeners(new ScoreIterationListener(5)); JavaRDD irisData = getIris(); @@ -246,14 +245,14 @@ public class TestEarlyStoppingSpark extends BaseSparkTest { //Spark tests don't run on windows return; } - MultiLayerConfiguration conf = new NeuralNetConfiguration.Builder() + NeuralNetConfiguration conf = NeuralNetConfiguration.builder() .optimizationAlgo(OptimizationAlgorithm.STOCHASTIC_GRADIENT_DESCENT) .updater(new Sgd()).weightInit(WeightInit.XAVIER).list() .layer(0, new OutputLayer.Builder().nIn(4).nOut(3) .lossFunction(LossFunctions.LossFunction.MCXENT).build()) .build(); MultiLayerNetwork net = new MultiLayerNetwork(conf); - net.setListeners(new ScoreIterationListener(5)); + net.addTrainingListeners(new ScoreIterationListener(5)); JavaRDD irisData = getIris(); diff --git a/cavis-dnn/cavis-dnn-spark/cavis-dnn-spark-core/src/test/java/org/deeplearning4j/spark/TestEarlyStoppingSparkCompGraph.java b/cavis-dnn/cavis-dnn-spark/cavis-dnn-spark-core/src/test/java/org/deeplearning4j/spark/TestEarlyStoppingSparkCompGraph.java index 39618055e..1a196af4f 100644 --- a/cavis-dnn/cavis-dnn-spark/cavis-dnn-spark-core/src/test/java/org/deeplearning4j/spark/TestEarlyStoppingSparkCompGraph.java +++ b/cavis-dnn/cavis-dnn-spark/cavis-dnn-spark-core/src/test/java/org/deeplearning4j/spark/TestEarlyStoppingSparkCompGraph.java @@ -71,14 +71,14 @@ public class TestEarlyStoppingSparkCompGraph extends BaseSparkTest { //Spark tests don't run on windows return; } - ComputationGraphConfiguration conf = new NeuralNetConfiguration.Builder() + ComputationGraphConfiguration conf = NeuralNetConfiguration.builder() .optimizationAlgo(OptimizationAlgorithm.STOCHASTIC_GRADIENT_DESCENT) .updater(new Sgd()).weightInit(WeightInit.XAVIER).graphBuilder().addInputs("in") .addLayer("0", new OutputLayer.Builder().nIn(4).nOut(3) .lossFunction(LossFunctions.LossFunction.MCXENT).build(), "in") .setOutputs("0").build(); ComputationGraph net = new ComputationGraph(conf); - net.setListeners(new ScoreIterationListener(5)); + net.addTrainingListeners(new ScoreIterationListener(5)); JavaRDD irisData = getIris(); @@ -124,7 +124,7 @@ public class TestEarlyStoppingSparkCompGraph extends BaseSparkTest { return; } Nd4j.getRandom().setSeed(12345); - ComputationGraphConfiguration conf = new NeuralNetConfiguration.Builder().seed(12345) + ComputationGraphConfiguration conf = NeuralNetConfiguration.builder().seed(12345) .optimizationAlgo(OptimizationAlgorithm.STOCHASTIC_GRADIENT_DESCENT) .updater(new Sgd(2.0)) //Intentionally huge LR .weightInit(WeightInit.XAVIER).graphBuilder().addInputs("in") @@ -132,7 +132,7 @@ public class TestEarlyStoppingSparkCompGraph extends BaseSparkTest { .lossFunction(LossFunctions.LossFunction.MSE).build(), "in") .setOutputs("0").build(); ComputationGraph net = new ComputationGraph(conf); - net.setListeners(new ScoreIterationListener(5)); + net.addTrainingListeners(new ScoreIterationListener(5)); JavaRDD irisData = getIris(); EarlyStoppingModelSaver saver = new InMemoryModelSaver<>(); @@ -165,7 +165,7 @@ public class TestEarlyStoppingSparkCompGraph extends BaseSparkTest { return; } Nd4j.getRandom().setSeed(12345); - ComputationGraphConfiguration conf = new NeuralNetConfiguration.Builder().seed(12345) + ComputationGraphConfiguration conf = NeuralNetConfiguration.builder().seed(12345) .optimizationAlgo(OptimizationAlgorithm.STOCHASTIC_GRADIENT_DESCENT) .updater(new Sgd(1e-6)).weightInit(WeightInit.XAVIER).graphBuilder() .addInputs("in") @@ -173,7 +173,7 @@ public class TestEarlyStoppingSparkCompGraph extends BaseSparkTest { .lossFunction(LossFunctions.LossFunction.MCXENT).build(), "in") .setOutputs("0").build(); ComputationGraph net = new ComputationGraph(conf); - net.setListeners(new ScoreIterationListener(5)); + net.addTrainingListeners(new ScoreIterationListener(5)); JavaRDD irisData = getIris(); @@ -213,7 +213,7 @@ public class TestEarlyStoppingSparkCompGraph extends BaseSparkTest { return; } Nd4j.getRandom().setSeed(12345); - ComputationGraphConfiguration conf = new NeuralNetConfiguration.Builder().seed(12345) + ComputationGraphConfiguration conf = NeuralNetConfiguration.builder().seed(12345) .optimizationAlgo(OptimizationAlgorithm.STOCHASTIC_GRADIENT_DESCENT) .updater(new Sgd(0.0)).weightInit(WeightInit.XAVIER).graphBuilder() .addInputs("in") @@ -221,7 +221,7 @@ public class TestEarlyStoppingSparkCompGraph extends BaseSparkTest { .lossFunction(LossFunctions.LossFunction.MCXENT).build(), "in") .setOutputs("0").build(); ComputationGraph net = new ComputationGraph(conf); - net.setListeners(new ScoreIterationListener(5)); + net.addTrainingListeners(new ScoreIterationListener(5)); JavaRDD irisData = getIris(); @@ -253,14 +253,14 @@ public class TestEarlyStoppingSparkCompGraph extends BaseSparkTest { //Spark tests don't run on windows return; } - ComputationGraphConfiguration conf = new NeuralNetConfiguration.Builder() + ComputationGraphConfiguration conf = NeuralNetConfiguration.builder() .optimizationAlgo(OptimizationAlgorithm.STOCHASTIC_GRADIENT_DESCENT) .updater(new Sgd()).weightInit(WeightInit.XAVIER).graphBuilder().addInputs("in") .addLayer("0", new OutputLayer.Builder().nIn(4).nOut(3) .lossFunction(LossFunctions.LossFunction.MCXENT).build(), "in") .setOutputs("0").build(); ComputationGraph net = new ComputationGraph(conf); - net.setListeners(new ScoreIterationListener(5)); + net.addTrainingListeners(new ScoreIterationListener(5)); JavaRDD irisData = getIris(); diff --git a/cavis-dnn/cavis-dnn-spark/cavis-dnn-spark-core/src/test/java/org/deeplearning4j/spark/TestKryo.java b/cavis-dnn/cavis-dnn-spark/cavis-dnn-spark-core/src/test/java/org/deeplearning4j/spark/TestKryo.java index da5d7822a..7815303f0 100644 --- a/cavis-dnn/cavis-dnn-spark/cavis-dnn-spark-core/src/test/java/org/deeplearning4j/spark/TestKryo.java +++ b/cavis-dnn/cavis-dnn-spark/cavis-dnn-spark-core/src/test/java/org/deeplearning4j/spark/TestKryo.java @@ -22,7 +22,6 @@ package org.deeplearning4j.spark; import org.apache.spark.serializer.SerializerInstance; import org.deeplearning4j.nn.conf.ComputationGraphConfiguration; -import org.deeplearning4j.nn.conf.MultiLayerConfiguration; import org.deeplearning4j.nn.conf.NeuralNetConfiguration; import org.deeplearning4j.nn.conf.distribution.UniformDistribution; import org.deeplearning4j.nn.conf.graph.*; @@ -68,16 +67,16 @@ public class TestKryo extends BaseSparkKryoTest { Map m = new HashMap<>(); m.put(0, 0.5); m.put(10, 0.1); - MultiLayerConfiguration mlc = new NeuralNetConfiguration.Builder() - .updater(new Nadam(new MapSchedule(ScheduleType.ITERATION,m))).list().layer(0, new OutputLayer.Builder().nIn(10).nOut(10).build()) + NeuralNetConfiguration mlc = NeuralNetConfiguration.builder() + .updater(new Nadam(new MapSchedule(ScheduleType.ITERATION,m))).layer(0, new OutputLayer.Builder().nIn(10).nOut(10).build()) .build(); testSerialization(mlc, si); - ComputationGraphConfiguration cgc = new NeuralNetConfiguration.Builder() + ComputationGraphConfiguration cgc = ((NeuralNetConfiguration.NeuralNetConfigurationBuilder)NeuralNetConfiguration.builder() .dist(new UniformDistribution(-1, 1)) - .updater(new Adam(new MapSchedule(ScheduleType.ITERATION,m))) + .updater(new Adam(new MapSchedule(ScheduleType.ITERATION,m)))) .graphBuilder() .addInputs("in").addLayer("out", new OutputLayer.Builder().nIn(10).nOut(10).build(), "in") .setOutputs("out").build(); @@ -86,7 +85,7 @@ public class TestKryo extends BaseSparkKryoTest { //Check main layers: - Layer[] layers = new Layer[] {new OutputLayer.Builder().nIn(10).nOut(10).build(), + LayerConfiguration[] layers = new LayerConfiguration[] {new OutputLayer.Builder().nIn(10).nOut(10).build(), new RnnOutputLayer.Builder().nIn(10).nOut(10).build(), new LossLayer.Builder().build(), new CenterLossOutputLayer.Builder().nIn(10).nOut(10).build(), new DenseLayer.Builder().nIn(10).nOut(10).build(), @@ -97,7 +96,7 @@ public class TestKryo extends BaseSparkKryoTest { new LSTM.Builder().nIn(10).nOut(10).build(), new DropoutLayer.Builder(0.5).build(), new BatchNormalization.Builder().build(), new LocalResponseNormalization.Builder().build()}; - for (Layer l : layers) { + for (LayerConfiguration l : layers) { testSerialization(l, si); } diff --git a/cavis-dnn/cavis-dnn-spark/cavis-dnn-spark-core/src/test/java/org/deeplearning4j/spark/datavec/TestPreProcessedData.java b/cavis-dnn/cavis-dnn-spark/cavis-dnn-spark-core/src/test/java/org/deeplearning4j/spark/datavec/TestPreProcessedData.java index 714c3ffb6..cc32d9723 100644 --- a/cavis-dnn/cavis-dnn-spark/cavis-dnn-spark-core/src/test/java/org/deeplearning4j/spark/datavec/TestPreProcessedData.java +++ b/cavis-dnn/cavis-dnn-spark/cavis-dnn-spark-core/src/test/java/org/deeplearning4j/spark/datavec/TestPreProcessedData.java @@ -30,7 +30,6 @@ import org.datavec.api.records.reader.impl.csv.CSVRecordReader; import org.deeplearning4j.datasets.iterator.impl.IrisDataSetIterator; import org.deeplearning4j.nn.api.OptimizationAlgorithm; import org.deeplearning4j.nn.conf.ComputationGraphConfiguration; -import org.deeplearning4j.nn.conf.MultiLayerConfiguration; import org.deeplearning4j.nn.conf.NeuralNetConfiguration; import org.deeplearning4j.nn.conf.Updater; import org.deeplearning4j.spark.BaseSparkTest; @@ -84,7 +83,7 @@ public class TestPreProcessedData extends BaseSparkTest { iter.next().save(f2); } - MultiLayerConfiguration conf = new NeuralNetConfiguration.Builder().updater(Updater.RMSPROP) + NeuralNetConfiguration conf = NeuralNetConfiguration.builder().updater(Updater.RMSPROP.getIUpdaterWithDefaultConfig()) .optimizationAlgo(OptimizationAlgorithm.STOCHASTIC_GRADIENT_DESCENT).list() .layer(0, new org.deeplearning4j.nn.conf.layers.DenseLayer.Builder().nIn(4).nOut(3) .activation(Activation.TANH).build()) @@ -134,7 +133,7 @@ public class TestPreProcessedData extends BaseSparkTest { iter.next().save(f2); } - ComputationGraphConfiguration conf = new NeuralNetConfiguration.Builder().updater(Updater.RMSPROP) + ComputationGraphConfiguration conf = NeuralNetConfiguration.builder().updater(Updater.RMSPROP.getIUpdaterWithDefaultConfig()) .optimizationAlgo(OptimizationAlgorithm.STOCHASTIC_GRADIENT_DESCENT) .graphBuilder().addInputs("in") .addLayer("0", new org.deeplearning4j.nn.conf.layers.DenseLayer.Builder().nIn(4).nOut(3) @@ -188,7 +187,7 @@ public class TestPreProcessedData extends BaseSparkTest { mds.save(f2); } - ComputationGraphConfiguration conf = new NeuralNetConfiguration.Builder().updater(Updater.RMSPROP) + ComputationGraphConfiguration conf = NeuralNetConfiguration.builder().updater(Updater.RMSPROP.getIUpdaterWithDefaultConfig()) .optimizationAlgo(OptimizationAlgorithm.STOCHASTIC_GRADIENT_DESCENT) .graphBuilder().addInputs("in") .addLayer("0", new org.deeplearning4j.nn.conf.layers.DenseLayer.Builder().nIn(4).nOut(3) diff --git a/cavis-dnn/cavis-dnn-spark/cavis-dnn-spark-core/src/test/java/org/deeplearning4j/spark/impl/TestKryoWarning.java b/cavis-dnn/cavis-dnn-spark/cavis-dnn-spark-core/src/test/java/org/deeplearning4j/spark/impl/TestKryoWarning.java index ec2195081..402ecb46a 100644 --- a/cavis-dnn/cavis-dnn-spark/cavis-dnn-spark-core/src/test/java/org/deeplearning4j/spark/impl/TestKryoWarning.java +++ b/cavis-dnn/cavis-dnn-spark/cavis-dnn-spark-core/src/test/java/org/deeplearning4j/spark/impl/TestKryoWarning.java @@ -23,7 +23,6 @@ package org.deeplearning4j.spark.impl; import org.apache.spark.SparkConf; import org.apache.spark.api.java.JavaSparkContext; import org.deeplearning4j.nn.conf.ComputationGraphConfiguration; -import org.deeplearning4j.nn.conf.MultiLayerConfiguration; import org.deeplearning4j.nn.conf.NeuralNetConfiguration; import org.deeplearning4j.nn.conf.layers.OutputLayer; import org.deeplearning4j.spark.api.TrainingMaster; @@ -40,7 +39,7 @@ public class TestKryoWarning { try { - MultiLayerConfiguration conf = new NeuralNetConfiguration.Builder().list() + NeuralNetConfiguration conf = NeuralNetConfiguration.builder().list() .layer(0, new OutputLayer.Builder().nIn(10).nOut(10).build()) .build(); @@ -57,7 +56,7 @@ public class TestKryoWarning { try { - ComputationGraphConfiguration conf = new NeuralNetConfiguration.Builder().graphBuilder().addInputs("in") + ComputationGraphConfiguration conf = NeuralNetConfiguration.builder().graphBuilder().addInputs("in") .addLayer("0", new OutputLayer.Builder().nIn(10).nOut(10).build(), "in").setOutputs("0") .build(); diff --git a/cavis-dnn/cavis-dnn-spark/cavis-dnn-spark-core/src/test/java/org/deeplearning4j/spark/impl/customlayer/TestCustomLayer.java b/cavis-dnn/cavis-dnn-spark/cavis-dnn-spark-core/src/test/java/org/deeplearning4j/spark/impl/customlayer/TestCustomLayer.java index b3c96333d..d8b0ddb0a 100644 --- a/cavis-dnn/cavis-dnn-spark/cavis-dnn-spark-core/src/test/java/org/deeplearning4j/spark/impl/customlayer/TestCustomLayer.java +++ b/cavis-dnn/cavis-dnn-spark/cavis-dnn-spark-core/src/test/java/org/deeplearning4j/spark/impl/customlayer/TestCustomLayer.java @@ -22,7 +22,6 @@ package org.deeplearning4j.spark.impl.customlayer; import com.sun.jna.Platform; import org.apache.spark.api.java.JavaRDD; -import org.deeplearning4j.nn.conf.MultiLayerConfiguration; import org.deeplearning4j.nn.conf.NeuralNetConfiguration; import org.deeplearning4j.nn.conf.layers.DenseLayer; import org.deeplearning4j.nn.conf.layers.OutputLayer; @@ -51,8 +50,8 @@ public class TestCustomLayer extends BaseSparkTest { } //Basic test - checks whether exceptions etc are thrown with custom layers + spark //Custom layers are tested more extensively in dl4j core - MultiLayerConfiguration conf = - new NeuralNetConfiguration.Builder().updater(new Sgd(0.1)).list() + NeuralNetConfiguration conf = + NeuralNetConfiguration.builder().updater(new Sgd(0.1)).list() .layer(0, new DenseLayer.Builder().nIn(10).nOut(10).build()) .layer(1, new CustomLayer(3.14159)).layer(2, new OutputLayer.Builder(LossFunctions.LossFunction.MCXENT) diff --git a/cavis-dnn/cavis-dnn-spark/cavis-dnn-spark-core/src/test/java/org/deeplearning4j/spark/impl/customlayer/layer/CustomLayer.java b/cavis-dnn/cavis-dnn-spark/cavis-dnn-spark-core/src/test/java/org/deeplearning4j/spark/impl/customlayer/layer/CustomLayer.java index 189e1f529..97f6a2c89 100644 --- a/cavis-dnn/cavis-dnn-spark/cavis-dnn-spark-core/src/test/java/org/deeplearning4j/spark/impl/customlayer/layer/CustomLayer.java +++ b/cavis-dnn/cavis-dnn-spark/cavis-dnn-spark-core/src/test/java/org/deeplearning4j/spark/impl/customlayer/layer/CustomLayer.java @@ -27,6 +27,7 @@ import org.deeplearning4j.nn.conf.InputPreProcessor; import org.deeplearning4j.nn.conf.NeuralNetConfiguration; import org.deeplearning4j.nn.conf.inputs.InputType; import org.deeplearning4j.nn.conf.layers.FeedForwardLayer; +import org.deeplearning4j.nn.conf.layers.LayerConfiguration; import org.deeplearning4j.nn.conf.memory.LayerMemoryReport; import org.deeplearning4j.nn.params.DefaultParamInitializer; import org.deeplearning4j.optimize.api.TrainingListener; @@ -53,13 +54,14 @@ public class CustomLayer extends FeedForwardLayer { public org.deeplearning4j.nn.api.Layer instantiate(NeuralNetConfiguration conf, Collection trainingListeners, int layerIndex, INDArray layerParamsView, boolean initializeParams, DataType networkDataType) { - CustomLayerImpl ret = new CustomLayerImpl(conf, networkDataType); - ret.setListeners(trainingListeners); + LayerConfiguration lconf = conf.getFlattenedLayerConfigurations().get(layerIndex); + CustomLayerImpl ret = new CustomLayerImpl(lconf, networkDataType); + ret.addTrainingListeners(trainingListeners); ret.setIndex(layerIndex); ret.setParamsViewArray(layerParamsView); - Map paramTable = initializer().init(conf, layerParamsView, initializeParams); + Map paramTable = initializer().init(this, layerParamsView, initializeParams); ret.setParamTable(paramTable); - ret.setConf(conf); + ret.setLayerConfiguration(lconf); return ret; } diff --git a/cavis-dnn/cavis-dnn-spark/cavis-dnn-spark-core/src/test/java/org/deeplearning4j/spark/impl/customlayer/layer/CustomLayerImpl.java b/cavis-dnn/cavis-dnn-spark/cavis-dnn-spark-core/src/test/java/org/deeplearning4j/spark/impl/customlayer/layer/CustomLayerImpl.java index 55b32d1dc..610f4079c 100644 --- a/cavis-dnn/cavis-dnn-spark/cavis-dnn-spark-core/src/test/java/org/deeplearning4j/spark/impl/customlayer/layer/CustomLayerImpl.java +++ b/cavis-dnn/cavis-dnn-spark/cavis-dnn-spark-core/src/test/java/org/deeplearning4j/spark/impl/customlayer/layer/CustomLayerImpl.java @@ -21,11 +21,12 @@ package org.deeplearning4j.spark.impl.customlayer.layer; import org.deeplearning4j.nn.conf.NeuralNetConfiguration; +import org.deeplearning4j.nn.conf.layers.LayerConfiguration; import org.deeplearning4j.nn.layers.BaseLayer; import org.nd4j.linalg.api.buffer.DataType; public class CustomLayerImpl extends BaseLayer { - public CustomLayerImpl(NeuralNetConfiguration conf, DataType dataType) { + public CustomLayerImpl(LayerConfiguration conf, DataType dataType) { super(conf, dataType); } diff --git a/cavis-dnn/cavis-dnn-spark/cavis-dnn-spark-core/src/test/java/org/deeplearning4j/spark/impl/graph/TestSparkComputationGraph.java b/cavis-dnn/cavis-dnn-spark/cavis-dnn-spark-core/src/test/java/org/deeplearning4j/spark/impl/graph/TestSparkComputationGraph.java index 579effe1a..20727ed03 100644 --- a/cavis-dnn/cavis-dnn-spark/cavis-dnn-spark-core/src/test/java/org/deeplearning4j/spark/impl/graph/TestSparkComputationGraph.java +++ b/cavis-dnn/cavis-dnn-spark/cavis-dnn-spark-core/src/test/java/org/deeplearning4j/spark/impl/graph/TestSparkComputationGraph.java @@ -40,7 +40,6 @@ import org.deeplearning4j.nn.conf.layers.DenseLayer; import org.deeplearning4j.nn.conf.layers.OutputLayer; import org.deeplearning4j.nn.graph.ComputationGraph; import org.deeplearning4j.nn.weights.WeightInit; -import org.deeplearning4j.optimize.api.TrainingListener; import org.deeplearning4j.optimize.listeners.ScoreIterationListener; import org.deeplearning4j.spark.BaseSparkTest; import org.deeplearning4j.spark.api.RDDTrainingApproach; @@ -77,7 +76,7 @@ public class TestSparkComputationGraph extends BaseSparkTest { public static ComputationGraph getBasicNetIris2Class() { - ComputationGraphConfiguration conf = new NeuralNetConfiguration.Builder().weightInit(WeightInit.XAVIER) + ComputationGraphConfiguration conf = NeuralNetConfiguration.builder().weightInit(WeightInit.XAVIER) .graphBuilder().addInputs("in") .addLayer("l0", new DenseLayer.Builder().nIn(4).nOut(10).build(), "in") .addLayer("l1", new OutputLayer.Builder(LossFunctions.LossFunction.MCXENT) @@ -104,7 +103,7 @@ public class TestSparkComputationGraph extends BaseSparkTest { while (iter.hasNext()) list.add(iter.next()); - ComputationGraphConfiguration config = new NeuralNetConfiguration.Builder() + ComputationGraphConfiguration config = NeuralNetConfiguration.builder() .updater(new Sgd(0.1)) .graphBuilder().addInputs("in") .addLayer("dense", new DenseLayer.Builder().nIn(4).nOut(2).build(), "in").addLayer("out", @@ -138,7 +137,7 @@ public class TestSparkComputationGraph extends BaseSparkTest { @Test public void testDistributedScoring() { - ComputationGraphConfiguration conf = new NeuralNetConfiguration.Builder().l1(0.1).l2(0.1) + ComputationGraphConfiguration conf = NeuralNetConfiguration.builder().l1(0.1).l2(0.1) .seed(123).updater(new Nesterovs(0.1, 0.9)).graphBuilder() .addInputs("in") .addLayer("0", new org.deeplearning4j.nn.conf.layers.DenseLayer.Builder().nIn(nIn).nOut(3) @@ -217,7 +216,7 @@ public class TestSparkComputationGraph extends BaseSparkTest { //@Ignore("AB 2019/05/23 - Failing on CI only - passing locally. Possible precision or threading issue") public void testSeedRepeatability() throws Exception { - ComputationGraphConfiguration conf = new NeuralNetConfiguration.Builder().seed(12345).updater(Updater.RMSPROP) + ComputationGraphConfiguration conf = NeuralNetConfiguration.builder().seed(12345).updater(Updater.RMSPROP.getIUpdaterWithDefaultConfig()) .optimizationAlgo(OptimizationAlgorithm.STOCHASTIC_GRADIENT_DESCENT) .weightInit(WeightInit.XAVIER).graphBuilder().addInputs("in") .addLayer("0", new org.deeplearning4j.nn.conf.layers.DenseLayer.Builder().nIn(4).nOut(4) @@ -272,9 +271,9 @@ public class TestSparkComputationGraph extends BaseSparkTest { sparkNet3.fit(rdd); - INDArray p1 = sparkNet1.getNetwork().params(); - INDArray p2 = sparkNet2.getNetwork().params(); - INDArray p3 = sparkNet3.getNetwork().params(); + INDArray p1 = sparkNet1.getNetwork().getModelParams(); + INDArray p2 = sparkNet2.getNetwork().getModelParams(); + INDArray p3 = sparkNet3.getNetwork().getModelParams(); sparkNet1.getTrainingMaster().deleteTempFiles(sc); sparkNet2.getTrainingMaster().deleteTempFiles(sc); @@ -414,7 +413,7 @@ public class TestSparkComputationGraph extends BaseSparkTest { JavaRDD rdd = sc.parallelize(l); // simple model - val modelConf = new NeuralNetConfiguration.Builder() + val modelConf = NeuralNetConfiguration.builder() .updater(new Adam(0.01)) .weightInit(WeightInit.XAVIER_UNIFORM) .biasInit(0) diff --git a/cavis-dnn/cavis-dnn-spark/cavis-dnn-spark-core/src/test/java/org/deeplearning4j/spark/impl/misc/TestFrozenLayers.java b/cavis-dnn/cavis-dnn-spark/cavis-dnn-spark-core/src/test/java/org/deeplearning4j/spark/impl/misc/TestFrozenLayers.java index 887696af3..688135888 100644 --- a/cavis-dnn/cavis-dnn-spark/cavis-dnn-spark-core/src/test/java/org/deeplearning4j/spark/impl/misc/TestFrozenLayers.java +++ b/cavis-dnn/cavis-dnn-spark/cavis-dnn-spark-core/src/test/java/org/deeplearning4j/spark/impl/misc/TestFrozenLayers.java @@ -23,6 +23,8 @@ package org.deeplearning4j.spark.impl.misc; import org.apache.spark.api.java.JavaRDD; import org.deeplearning4j.nn.conf.NeuralNetConfiguration; import org.deeplearning4j.nn.conf.layers.DenseLayer; +import org.deeplearning4j.nn.conf.layers.DenseLayer.Builder; +import org.deeplearning4j.nn.conf.layers.OutputLayer; import org.deeplearning4j.nn.graph.ComputationGraph; import org.deeplearning4j.nn.layers.FrozenLayer; import org.deeplearning4j.nn.multilayer.MultiLayerNetwork; @@ -45,6 +47,7 @@ import java.util.ArrayList; import java.util.HashMap; import java.util.List; import java.util.Map; +import org.nd4j.linalg.lossfunctions.LossFunctions.LossFunction; import static org.junit.jupiter.api.Assertions.*; @@ -53,7 +56,7 @@ public class TestFrozenLayers extends BaseSparkTest { @Test public void testSparkFrozenLayers() { - NeuralNetConfiguration.Builder overallConf = new NeuralNetConfiguration.Builder().updater(new Sgd(0.1)) + NeuralNetConfiguration.NeuralNetConfigurationBuilder overallConf = NeuralNetConfiguration.builder().updater(new Sgd(0.1)) .activation(Activation.TANH); FineTuneConfiguration finetune = new FineTuneConfiguration.Builder().updater(new Sgd(0.1)).build(); @@ -61,12 +64,12 @@ public class TestFrozenLayers extends BaseSparkTest { int nIn = 6; int nOut = 3; - MultiLayerNetwork origModel = new MultiLayerNetwork(overallConf.clone().list() - .layer(0, new DenseLayer.Builder().nIn(6).nOut(5).build()) - .layer(1, new DenseLayer.Builder().nIn(5).nOut(4).build()) - .layer(2, new DenseLayer.Builder().nIn(4).nOut(3).build()) - .layer(3, new org.deeplearning4j.nn.conf.layers.OutputLayer.Builder( - LossFunctions.LossFunction.MCXENT).activation(Activation.SOFTMAX).nIn(3).nOut(3) + MultiLayerNetwork origModel = new MultiLayerNetwork((NeuralNetConfiguration) overallConf.clone() + .layer(0, new Builder().nIn(6).nOut(5).build()) + .layer(1, new Builder().nIn(5).nOut(4).build()) + .layer(2, new Builder().nIn(4).nOut(3).build()) + .layer(3, new OutputLayer.Builder( + LossFunction.MCXENT).activation(Activation.SOFTMAX).nIn(3).nOut(3) .build()) .build()); origModel.init(); @@ -74,7 +77,7 @@ public class TestFrozenLayers extends BaseSparkTest { MultiLayerNetwork withFrozen = new TransferLearning.Builder(origModel).fineTuneConfiguration(finetune) .setFeatureExtractor(1).build(); - Map m = withFrozen.paramTable(); + Map m = withFrozen.getParamTable(); Map pCopy = new HashMap<>(); for (Map.Entry entry : m.entrySet()) { pCopy.put(entry.getKey(), entry.getValue().dup()); @@ -110,7 +113,7 @@ public class TestFrozenLayers extends BaseSparkTest { MultiLayerNetwork fitted = sNet.getNetwork(); - Map fittedParams = fitted.paramTable(); + Map fittedParams = fitted.getParamTable(); for (Map.Entry entry : fittedParams.entrySet()) { INDArray orig = pCopy.get(entry.getKey()); @@ -118,7 +121,7 @@ public class TestFrozenLayers extends BaseSparkTest { boolean isFrozen = entry.getKey().startsWith("0_") || entry.getKey().startsWith("1_"); if (isFrozen) { - //Layer should be frozen -> no change + //ILayer should be frozen -> no change assertEquals(orig, now, entry.getKey()); } else { //Not frozen -> should be different @@ -136,7 +139,7 @@ public class TestFrozenLayers extends BaseSparkTest { int nIn = 6; int nOut = 3; - ComputationGraph origModel = new ComputationGraph(new NeuralNetConfiguration.Builder().updater(new Sgd(0.1)) + ComputationGraph origModel = new ComputationGraph(NeuralNetConfiguration.builder().updater(new Sgd(0.1)) .activation(Activation.TANH).graphBuilder().addInputs("in") .addLayer("0", new DenseLayer.Builder().nIn(6).nOut(5).build(), "in") .addLayer("1", new DenseLayer.Builder().nIn(5).nOut(4).build(), "0") @@ -151,7 +154,7 @@ public class TestFrozenLayers extends BaseSparkTest { ComputationGraph withFrozen = new TransferLearning.GraphBuilder(origModel).fineTuneConfiguration(finetune) .setFeatureExtractor("1").build(); - Map m = withFrozen.paramTable(); + Map m = withFrozen.getParamTable(); Map pCopy = new HashMap<>(); for (Map.Entry entry : m.entrySet()) { pCopy.put(entry.getKey(), entry.getValue().dup()); @@ -187,7 +190,7 @@ public class TestFrozenLayers extends BaseSparkTest { ComputationGraph fitted = sNet.getNetwork(); - Map fittedParams = fitted.paramTable(); + Map fittedParams = fitted.getParamTable(); for (Map.Entry entry : fittedParams.entrySet()) { INDArray orig = pCopy.get(entry.getKey()); @@ -195,7 +198,7 @@ public class TestFrozenLayers extends BaseSparkTest { boolean isFrozen = entry.getKey().startsWith("0_") || entry.getKey().startsWith("1_"); if (isFrozen) { - //Layer should be frozen -> no change + //ILayer should be frozen -> no change assertEquals(orig, now, entry.getKey()); } else { //Not frozen -> should be different diff --git a/cavis-dnn/cavis-dnn-spark/cavis-dnn-spark-core/src/test/java/org/deeplearning4j/spark/impl/multilayer/TestMiscFunctions.java b/cavis-dnn/cavis-dnn-spark/cavis-dnn-spark-core/src/test/java/org/deeplearning4j/spark/impl/multilayer/TestMiscFunctions.java index 8b5a8b46c..7a638199c 100644 --- a/cavis-dnn/cavis-dnn-spark/cavis-dnn-spark-core/src/test/java/org/deeplearning4j/spark/impl/multilayer/TestMiscFunctions.java +++ b/cavis-dnn/cavis-dnn-spark/cavis-dnn-spark-core/src/test/java/org/deeplearning4j/spark/impl/multilayer/TestMiscFunctions.java @@ -23,7 +23,6 @@ package org.deeplearning4j.spark.impl.multilayer; import org.apache.spark.api.java.JavaPairRDD; import org.deeplearning4j.datasets.iterator.impl.IrisDataSetIterator; import org.deeplearning4j.nn.conf.ComputationGraphConfiguration; -import org.deeplearning4j.nn.conf.MultiLayerConfiguration; import org.deeplearning4j.nn.conf.NeuralNetConfiguration; import org.deeplearning4j.nn.conf.layers.*; import org.deeplearning4j.nn.conf.layers.variational.GaussianReconstructionDistribution; @@ -57,7 +56,7 @@ public class TestMiscFunctions extends BaseSparkTest { @Test public void testFeedForwardWithKey() { - MultiLayerConfiguration conf = new NeuralNetConfiguration.Builder().weightInit(WeightInit.XAVIER).list() + NeuralNetConfiguration conf = NeuralNetConfiguration.builder().weightInit(WeightInit.XAVIER).list() .layer(0, new DenseLayer.Builder().nIn(4).nOut(3).build()) .layer(1, new OutputLayer.Builder(LossFunctions.LossFunction.MCXENT).nIn(3).nOut(3) .activation(Activation.SOFTMAX).build()) @@ -107,7 +106,7 @@ public class TestMiscFunctions extends BaseSparkTest { @Test public void testFeedForwardWithKeyInputMask() { - MultiLayerConfiguration conf = new NeuralNetConfiguration.Builder().weightInit(WeightInit.XAVIER) + NeuralNetConfiguration conf = NeuralNetConfiguration.builder().weightInit(WeightInit.XAVIER) .list() .layer( new LSTM.Builder().nIn(4).nOut(3).build()) .layer(new GlobalPoolingLayer(PoolingType.AVG)) @@ -162,7 +161,7 @@ public class TestMiscFunctions extends BaseSparkTest { @Test public void testFeedForwardWithKeyGraph() { - ComputationGraphConfiguration conf = new NeuralNetConfiguration.Builder().weightInit(WeightInit.XAVIER) + ComputationGraphConfiguration conf = NeuralNetConfiguration.builder().weightInit(WeightInit.XAVIER) .graphBuilder().addInputs("in1", "in2") .addLayer("0", new DenseLayer.Builder().nIn(4).nOut(3).build(), "in1") .addLayer("1", new DenseLayer.Builder().nIn(4).nOut(3).build(), "in2").addLayer("2", @@ -220,7 +219,7 @@ public class TestMiscFunctions extends BaseSparkTest { int nIn = 10; - MultiLayerConfiguration mlc = new NeuralNetConfiguration.Builder().list() + NeuralNetConfiguration mlc = NeuralNetConfiguration.builder().list() .layer(0, new org.deeplearning4j.nn.conf.layers.variational.VariationalAutoencoder.Builder() .reconstructionDistribution( new GaussianReconstructionDistribution(Activation.IDENTITY)) @@ -240,7 +239,7 @@ public class TestMiscFunctions extends BaseSparkTest { JavaPairRDD reconstr = rdd.mapPartitionsToPair(new VaeReconstructionProbWithKeyFunction( - sc.broadcast(net.params()), sc.broadcast(mlc.toJson()), true, 16, 128)); + sc.broadcast(net.getModelParams()), sc.broadcast(mlc.toJson()), true, 16, 128)); Map l = reconstr.collectAsMap(); @@ -259,7 +258,7 @@ public class TestMiscFunctions extends BaseSparkTest { int nIn = 10; - MultiLayerConfiguration mlc = new NeuralNetConfiguration.Builder() + NeuralNetConfiguration mlc = NeuralNetConfiguration.builder() .list().layer(0, new org.deeplearning4j.nn.conf.layers.variational.VariationalAutoencoder.Builder() .reconstructionDistribution(new LossFunctionWrapper( @@ -283,7 +282,7 @@ public class TestMiscFunctions extends BaseSparkTest { JavaPairRDD reconstrErrors = rdd.mapPartitionsToPair(new VaeReconstructionErrorWithKeyFunction( - sc.broadcast(net.params()), sc.broadcast(mlc.toJson()), 16)); + sc.broadcast(net.getModelParams()), sc.broadcast(mlc.toJson()), 16)); Map l = reconstrErrors.collectAsMap(); diff --git a/cavis-dnn/cavis-dnn-spark/cavis-dnn-spark-core/src/test/java/org/deeplearning4j/spark/impl/multilayer/TestSparkDl4jMultiLayer.java b/cavis-dnn/cavis-dnn-spark/cavis-dnn-spark-core/src/test/java/org/deeplearning4j/spark/impl/multilayer/TestSparkDl4jMultiLayer.java index d2c0d66bc..7de0dc285 100644 --- a/cavis-dnn/cavis-dnn-spark/cavis-dnn-spark-core/src/test/java/org/deeplearning4j/spark/impl/multilayer/TestSparkDl4jMultiLayer.java +++ b/cavis-dnn/cavis-dnn-spark/cavis-dnn-spark-core/src/test/java/org/deeplearning4j/spark/impl/multilayer/TestSparkDl4jMultiLayer.java @@ -25,7 +25,6 @@ import lombok.extern.slf4j.Slf4j; import org.apache.spark.api.java.JavaRDD; import org.deeplearning4j.datasets.iterator.impl.MnistDataSetIterator; import org.deeplearning4j.nn.api.OptimizationAlgorithm; -import org.deeplearning4j.nn.conf.MultiLayerConfiguration; import org.deeplearning4j.nn.conf.NeuralNetConfiguration; import org.deeplearning4j.nn.conf.layers.DenseLayer; import org.deeplearning4j.nn.conf.layers.OutputLayer; @@ -41,7 +40,6 @@ import org.nd4j.linalg.dataset.DataSet; import org.nd4j.linalg.dataset.api.iterator.DataSetIterator; import org.nd4j.linalg.factory.Nd4j; import org.nd4j.linalg.learning.config.Adam; -import org.nd4j.linalg.learning.config.Nesterovs; import org.nd4j.linalg.lossfunctions.LossFunctions; import java.util.ArrayList; @@ -96,7 +94,7 @@ public class TestSparkDl4jMultiLayer extends BaseSparkTest { //---------------------------------- //Create network configuration and conduct network training - MultiLayerConfiguration conf = new NeuralNetConfiguration.Builder() + NeuralNetConfiguration conf = NeuralNetConfiguration.builder() .dataType(DataType.FLOAT) .seed(12345) .optimizationAlgo(OptimizationAlgorithm.STOCHASTIC_GRADIENT_DESCENT) diff --git a/cavis-dnn/cavis-dnn-spark/cavis-dnn-spark-core/src/test/java/org/deeplearning4j/spark/impl/paramavg/TestCompareParameterAveragingSparkVsSingleMachine.java b/cavis-dnn/cavis-dnn-spark/cavis-dnn-spark-core/src/test/java/org/deeplearning4j/spark/impl/paramavg/TestCompareParameterAveragingSparkVsSingleMachine.java index 050e6279c..7ba980f62 100644 --- a/cavis-dnn/cavis-dnn-spark/cavis-dnn-spark-core/src/test/java/org/deeplearning4j/spark/impl/paramavg/TestCompareParameterAveragingSparkVsSingleMachine.java +++ b/cavis-dnn/cavis-dnn-spark/cavis-dnn-spark-core/src/test/java/org/deeplearning4j/spark/impl/paramavg/TestCompareParameterAveragingSparkVsSingleMachine.java @@ -26,7 +26,6 @@ import org.apache.spark.api.java.JavaRDD; import org.apache.spark.api.java.JavaSparkContext; import org.deeplearning4j.nn.api.OptimizationAlgorithm; import org.deeplearning4j.nn.conf.ComputationGraphConfiguration; -import org.deeplearning4j.nn.conf.MultiLayerConfiguration; import org.deeplearning4j.nn.conf.NeuralNetConfiguration; import org.deeplearning4j.nn.conf.inputs.InputType; import org.deeplearning4j.nn.conf.layers.ConvolutionLayer; @@ -51,7 +50,6 @@ import org.nd4j.linalg.learning.config.Sgd; import org.nd4j.linalg.lossfunctions.LossFunctions; import java.util.ArrayList; -import java.util.Arrays; import java.util.List; import static org.junit.jupiter.api.Assertions.*; @@ -63,9 +61,9 @@ public class TestCompareParameterAveragingSparkVsSingleMachine { } - private static MultiLayerConfiguration getConf(int seed, IUpdater updater) { + private static NeuralNetConfiguration getConf(int seed, IUpdater updater) { Nd4j.getRandom().setSeed(seed); - MultiLayerConfiguration conf = new NeuralNetConfiguration.Builder() + NeuralNetConfiguration conf = NeuralNetConfiguration.builder() .optimizationAlgo(OptimizationAlgorithm.STOCHASTIC_GRADIENT_DESCENT) .weightInit(WeightInit.XAVIER).updater(updater).seed(seed).list() .layer(0, new DenseLayer.Builder().nIn(10).nOut(10).build()).layer(1, new OutputLayer.Builder() @@ -74,9 +72,9 @@ public class TestCompareParameterAveragingSparkVsSingleMachine { return conf; } - private static MultiLayerConfiguration getConfCNN(int seed, IUpdater updater) { + private static NeuralNetConfiguration getConfCNN(int seed, IUpdater updater) { Nd4j.getRandom().setSeed(seed); - MultiLayerConfiguration conf = new NeuralNetConfiguration.Builder() + NeuralNetConfiguration conf = NeuralNetConfiguration.builder() .optimizationAlgo(OptimizationAlgorithm.STOCHASTIC_GRADIENT_DESCENT) .weightInit(WeightInit.XAVIER).updater(updater).seed(seed).list() .layer(0, new ConvolutionLayer.Builder().nOut(3).kernelSize(2, 2).stride(1, 1).padding(0, 0) @@ -85,13 +83,13 @@ public class TestCompareParameterAveragingSparkVsSingleMachine { .activation(Activation.TANH).build()) .layer(1, new OutputLayer.Builder().lossFunction(LossFunctions.LossFunction.MSE).nOut(10) .build()) - .setInputType(InputType.convolutional(10, 10, 3)).build(); + .inputType(InputType.convolutional(10, 10, 3)).build(); return conf; } private static ComputationGraphConfiguration getGraphConf(int seed, IUpdater updater) { Nd4j.getRandom().setSeed(seed); - ComputationGraphConfiguration conf = new NeuralNetConfiguration.Builder() + ComputationGraphConfiguration conf = NeuralNetConfiguration.builder() .optimizationAlgo(OptimizationAlgorithm.STOCHASTIC_GRADIENT_DESCENT) .weightInit(WeightInit.XAVIER).updater(updater).seed(seed).graphBuilder() .addInputs("in") @@ -105,7 +103,7 @@ public class TestCompareParameterAveragingSparkVsSingleMachine { private static ComputationGraphConfiguration getGraphConfCNN(int seed, IUpdater updater) { Nd4j.getRandom().setSeed(seed); - ComputationGraphConfiguration conf = new NeuralNetConfiguration.Builder() + ComputationGraphConfiguration conf = NeuralNetConfiguration.builder() .optimizationAlgo(OptimizationAlgorithm.STOCHASTIC_GRADIENT_DESCENT) .weightInit(WeightInit.XAVIER).updater(updater).seed(seed).graphBuilder() .addInputs("in") @@ -193,7 +191,7 @@ public class TestCompareParameterAveragingSparkVsSingleMachine { MultiLayerNetwork net = new MultiLayerNetwork(getConf(12345, new RmsProp(0.5))); net.init(); - INDArray initialParams = net.params().dup(); + INDArray initialParams = net.getModelParams().dup(); for (int i = 0; i < seeds.length; i++) { DataSet ds = getOneDataSet(miniBatchSize, seeds[i]); @@ -201,13 +199,13 @@ public class TestCompareParameterAveragingSparkVsSingleMachine { net.setUpdater(null); net.fit(ds); } - INDArray finalParams = net.params().dup(); + INDArray finalParams = net.getModelParams().dup(); //Do training on Spark with one executor, for 3 separate minibatches TrainingMaster tm = getTrainingMaster(1, miniBatchSize, saveUpdater); SparkDl4jMultiLayer sparkNet = new SparkDl4jMultiLayer(sc, getConf(12345, new RmsProp(0.5)), tm); sparkNet.setCollectTrainingStats(true); - INDArray initialSparkParams = sparkNet.getNetwork().params().dup(); + INDArray initialSparkParams = sparkNet.getNetwork().getModelParams().dup(); for (int i = 0; i < seeds.length; i++) { List list = getOneDataSetAsIndividalExamples(miniBatchSize, seeds[i]); @@ -216,7 +214,7 @@ public class TestCompareParameterAveragingSparkVsSingleMachine { sparkNet.fit(rdd); } - INDArray finalSparkParams = sparkNet.getNetwork().params().dup(); + INDArray finalSparkParams = sparkNet.getNetwork().getModelParams().dup(); assertEquals(initialParams, initialSparkParams); assertNotEquals(initialParams, finalParams); @@ -247,7 +245,7 @@ public class TestCompareParameterAveragingSparkVsSingleMachine { ComputationGraph net = new ComputationGraph(getGraphConf(12345, new RmsProp(0.5))); net.init(); - INDArray initialParams = net.params().dup(); + INDArray initialParams = net.getModelParams().dup(); for (int i = 0; i < seeds.length; i++) { DataSet ds = getOneDataSet(miniBatchSize, seeds[i]); @@ -255,14 +253,14 @@ public class TestCompareParameterAveragingSparkVsSingleMachine { net.setUpdater(null); net.fit(ds); } - INDArray finalParams = net.params().dup(); + INDArray finalParams = net.getModelParams().dup(); //Do training on Spark with one executor, for 3 separate minibatches TrainingMaster tm = getTrainingMaster(1, miniBatchSize, saveUpdater); SparkComputationGraph sparkNet = new SparkComputationGraph(sc, getGraphConf(12345, new RmsProp(0.5)), tm); sparkNet.setCollectTrainingStats(true); - INDArray initialSparkParams = sparkNet.getNetwork().params().dup(); + INDArray initialSparkParams = sparkNet.getNetwork().getModelParams().dup(); for (int i = 0; i < seeds.length; i++) { List list = getOneDataSetAsIndividalExamples(miniBatchSize, seeds[i]); @@ -271,7 +269,7 @@ public class TestCompareParameterAveragingSparkVsSingleMachine { sparkNet.fit(rdd); } - INDArray finalSparkParams = sparkNet.getNetwork().params().dup(); + INDArray finalSparkParams = sparkNet.getNetwork().getModelParams().dup(); assertEquals(initialParams, initialSparkParams); assertNotEquals(initialParams, finalParams); @@ -306,7 +304,7 @@ public class TestCompareParameterAveragingSparkVsSingleMachine { MultiLayerNetwork net = new MultiLayerNetwork(getConf(12345, new Sgd(0.5))); net.init(); - INDArray initialParams = net.params().dup(); + INDArray initialParams = net.getModelParams().dup(); // executioner.addToWatchdog(initialParams, "initialParams"); @@ -316,7 +314,7 @@ public class TestCompareParameterAveragingSparkVsSingleMachine { net.setUpdater(null); net.fit(ds); } - INDArray finalParams = net.params().dup(); + INDArray finalParams = net.getModelParams().dup(); //Do training on Spark with one executor, for 3 separate minibatches // TrainingMaster tm = getTrainingMaster(1, miniBatchSizePerWorker, saveUpdater); @@ -327,7 +325,7 @@ public class TestCompareParameterAveragingSparkVsSingleMachine { .rddTrainingApproach(RDDTrainingApproach.Export).build(); SparkDl4jMultiLayer sparkNet = new SparkDl4jMultiLayer(sc, getConf(12345, new Sgd(0.5)), tm); sparkNet.setCollectTrainingStats(true); - INDArray initialSparkParams = sparkNet.getNetwork().params().dup(); + INDArray initialSparkParams = sparkNet.getNetwork().getModelParams().dup(); // executioner.addToWatchdog(initialSparkParams, "initialSparkParams"); @@ -341,7 +339,7 @@ public class TestCompareParameterAveragingSparkVsSingleMachine { // System.out.println(sparkNet.getSparkTrainingStats().statsAsString()); sparkNet.getSparkTrainingStats().statsAsString(); - INDArray finalSparkParams = sparkNet.getNetwork().params().dup(); + INDArray finalSparkParams = sparkNet.getNetwork().getModelParams().dup(); // System.out.println("Initial (Local) params: " + Arrays.toString(initialParams.data().asFloat())); // System.out.println("Initial (Spark) params: " @@ -355,7 +353,7 @@ public class TestCompareParameterAveragingSparkVsSingleMachine { double sparkScore = sparkNet.getScore(); assertTrue(sparkScore > 0.0); - assertEquals(net.score(), sparkScore, 1e-3); + assertEquals(net.getScore(), sparkScore, 1e-3); } finally { sc.stop(); } @@ -388,7 +386,7 @@ public class TestCompareParameterAveragingSparkVsSingleMachine { MultiLayerNetwork net = new MultiLayerNetwork(getConfCNN(12345, new Sgd(0.5))); net.init(); - INDArray initialParams = net.params().dup(); + INDArray initialParams = net.getModelParams().dup(); for (int i = 0; i < seeds.length; i++) { DataSet ds = getOneDataSetCNN(miniBatchSizePerWorker * nWorkers, seeds[i]); @@ -396,7 +394,7 @@ public class TestCompareParameterAveragingSparkVsSingleMachine { net.setUpdater(null); net.fit(ds); } - INDArray finalParams = net.params().dup(); + INDArray finalParams = net.getModelParams().dup(); //Do training on Spark with one executor, for 3 separate minibatches ParameterAveragingTrainingMaster tm = new ParameterAveragingTrainingMaster.Builder(1) @@ -405,7 +403,7 @@ public class TestCompareParameterAveragingSparkVsSingleMachine { .rddTrainingApproach(RDDTrainingApproach.Export).build(); SparkDl4jMultiLayer sparkNet = new SparkDl4jMultiLayer(sc, getConfCNN(12345, new Sgd(0.5)), tm); sparkNet.setCollectTrainingStats(true); - INDArray initialSparkParams = sparkNet.getNetwork().params().dup(); + INDArray initialSparkParams = sparkNet.getNetwork().getModelParams().dup(); for (int i = 0; i < seeds.length; i++) { List list = @@ -418,7 +416,7 @@ public class TestCompareParameterAveragingSparkVsSingleMachine { // System.out.println(sparkNet.getSparkTrainingStats().statsAsString()); sparkNet.getSparkTrainingStats().statsAsString(); - INDArray finalSparkParams = sparkNet.getNetwork().params().dup(); + INDArray finalSparkParams = sparkNet.getNetwork().getModelParams().dup(); // System.out.println("Initial (Local) params: " + Arrays.toString(initialParams.data().asFloat())); // System.out.println("Initial (Spark) params: " @@ -431,7 +429,7 @@ public class TestCompareParameterAveragingSparkVsSingleMachine { double sparkScore = sparkNet.getScore(); assertTrue(sparkScore > 0.0); - assertEquals(net.score(), sparkScore, 1e-3); + assertEquals(net.getScore(), sparkScore, 1e-3); } finally { sc.stop(); } @@ -466,7 +464,7 @@ public class TestCompareParameterAveragingSparkVsSingleMachine { ComputationGraph net = new ComputationGraph(getGraphConf(12345, new Sgd(0.5))); net.init(); - INDArray initialParams = net.params().dup(); + INDArray initialParams = net.getModelParams().dup(); // executioner.addToWatchdog(initialParams, "initialParams"); for (int i = 0; i < seeds.length; i++) { @@ -475,14 +473,14 @@ public class TestCompareParameterAveragingSparkVsSingleMachine { net.setUpdater(null); net.fit(ds); } - INDArray finalParams = net.params().dup(); + INDArray finalParams = net.getModelParams().dup(); // executioner.addToWatchdog(finalParams, "finalParams"); //Do training on Spark with one executor, for 3 separate minibatches TrainingMaster tm = getTrainingMaster(1, miniBatchSizePerWorker, saveUpdater); SparkComputationGraph sparkNet = new SparkComputationGraph(sc, getGraphConf(12345, new Sgd(0.5)), tm); sparkNet.setCollectTrainingStats(true); - INDArray initialSparkParams = sparkNet.getNetwork().params().dup(); + INDArray initialSparkParams = sparkNet.getNetwork().getModelParams().dup(); // executioner.addToWatchdog(initialSparkParams, "initialSparkParams"); @@ -496,7 +494,7 @@ public class TestCompareParameterAveragingSparkVsSingleMachine { // System.out.println(sparkNet.getSparkTrainingStats().statsAsString()); sparkNet.getSparkTrainingStats().statsAsString(); - INDArray finalSparkParams = sparkNet.getNetwork().params().dup(); + INDArray finalSparkParams = sparkNet.getNetwork().getModelParams().dup(); // executioner.addToWatchdog(finalSparkParams, "finalSparkParams"); float[] fp = finalParams.data().asFloat(); @@ -514,7 +512,7 @@ public class TestCompareParameterAveragingSparkVsSingleMachine { double sparkScore = sparkNet.getScore(); assertTrue(sparkScore > 0.0); - assertEquals(net.score(), sparkScore, 1e-3); + assertEquals(net.getScore(), sparkScore, 1e-3); } finally { sc.stop(); } @@ -547,7 +545,7 @@ public class TestCompareParameterAveragingSparkVsSingleMachine { ComputationGraph net = new ComputationGraph(getGraphConfCNN(12345, new Sgd(0.5))); net.init(); - INDArray initialParams = net.params().dup(); + INDArray initialParams = net.getModelParams().dup(); for (int i = 0; i < seeds.length; i++) { DataSet ds = getOneDataSetCNN(miniBatchSizePerWorker * nWorkers, seeds[i]); @@ -555,13 +553,13 @@ public class TestCompareParameterAveragingSparkVsSingleMachine { net.setUpdater(null); net.fit(ds); } - INDArray finalParams = net.params().dup(); + INDArray finalParams = net.getModelParams().dup(); //Do training on Spark with one executor, for 3 separate minibatches TrainingMaster tm = getTrainingMaster(1, miniBatchSizePerWorker, saveUpdater); SparkComputationGraph sparkNet = new SparkComputationGraph(sc, getGraphConfCNN(12345, new Sgd(0.5)), tm); sparkNet.setCollectTrainingStats(true); - INDArray initialSparkParams = sparkNet.getNetwork().params().dup(); + INDArray initialSparkParams = sparkNet.getNetwork().getModelParams().dup(); for (int i = 0; i < seeds.length; i++) { List list = @@ -574,7 +572,7 @@ public class TestCompareParameterAveragingSparkVsSingleMachine { // System.out.println(sparkNet.getSparkTrainingStats().statsAsString()); sparkNet.getSparkTrainingStats().statsAsString(); - INDArray finalSparkParams = sparkNet.getNetwork().params().dup(); + INDArray finalSparkParams = sparkNet.getNetwork().getModelParams().dup(); // System.out.println("Initial (Local) params: " + Arrays.toString(initialParams.data().asFloat())); // System.out.println("Initial (Spark) params: " + Arrays.toString(initialSparkParams.data().asFloat())); @@ -586,7 +584,7 @@ public class TestCompareParameterAveragingSparkVsSingleMachine { double sparkScore = sparkNet.getScore(); assertTrue(sparkScore > 0.0); - assertEquals(net.score(), sparkScore, 1e-3); + assertEquals(net.getScore(), sparkScore, 1e-3); } finally { sc.stop(); } diff --git a/cavis-dnn/cavis-dnn-spark/cavis-dnn-spark-core/src/test/java/org/deeplearning4j/spark/impl/paramavg/TestSparkMultiLayerParameterAveraging.java b/cavis-dnn/cavis-dnn-spark/cavis-dnn-spark-core/src/test/java/org/deeplearning4j/spark/impl/paramavg/TestSparkMultiLayerParameterAveraging.java index 48a30034a..e4a720a51 100644 --- a/cavis-dnn/cavis-dnn-spark/cavis-dnn-spark-core/src/test/java/org/deeplearning4j/spark/impl/paramavg/TestSparkMultiLayerParameterAveraging.java +++ b/cavis-dnn/cavis-dnn-spark/cavis-dnn-spark-core/src/test/java/org/deeplearning4j/spark/impl/paramavg/TestSparkMultiLayerParameterAveraging.java @@ -37,9 +37,8 @@ import org.deeplearning4j.datasets.iterator.impl.MnistDataSetIterator; import org.deeplearning4j.nn.api.Layer; import org.deeplearning4j.nn.api.OptimizationAlgorithm; import org.deeplearning4j.nn.conf.ComputationGraphConfiguration; -import org.deeplearning4j.nn.conf.MultiLayerConfiguration; import org.deeplearning4j.nn.conf.NeuralNetConfiguration; -import org.deeplearning4j.nn.conf.layers.BaseLayer; +import org.deeplearning4j.nn.conf.layers.BaseLayerConfiguration; import org.deeplearning4j.nn.conf.layers.BatchNormalization; import org.deeplearning4j.nn.conf.layers.DenseLayer; import org.deeplearning4j.nn.conf.layers.OutputLayer; @@ -121,7 +120,7 @@ public class TestSparkMultiLayerParameterAveraging extends BaseSparkTest { .toJavaRDD().map(new TestFn()); DataSet d = new IrisDataSetIterator(150, 150).next(); - MultiLayerConfiguration conf = new NeuralNetConfiguration.Builder().seed(123) + NeuralNetConfiguration conf = NeuralNetConfiguration.builder().seed(123) .optimizationAlgo(OptimizationAlgorithm.STOCHASTIC_GRADIENT_DESCENT).list() .layer(0, new DenseLayer.Builder().nIn(4).nOut(100).weightInit(WeightInit.XAVIER) .activation(Activation.RELU).build()) @@ -156,8 +155,8 @@ public class TestSparkMultiLayerParameterAveraging extends BaseSparkTest { .getAbsolutePath()) .toJavaRDD().map(new TestFn()); - MultiLayerConfiguration conf = - new NeuralNetConfiguration.Builder().seed(123) + NeuralNetConfiguration conf = + NeuralNetConfiguration.builder().seed(123) .updater(new Adam(1e-6)) .weightInit(WeightInit.XAVIER) .list() @@ -192,11 +191,11 @@ public class TestSparkMultiLayerParameterAveraging extends BaseSparkTest { new ParameterAveragingTrainingMaster(true, numExecutors(), 1, 5, 1, 0)); MultiLayerNetwork networkCopy = sparkNetCopy.fit(data); - INDArray expectedParams = networkCopy.params(); + INDArray expectedParams = networkCopy.getModelParams(); SparkDl4jMultiLayer sparkNet = getBasicNetwork(); MultiLayerNetwork network = sparkNet.fit(data); - INDArray actualParams = network.params(); + INDArray actualParams = network.getModelParams(); assertEquals(expectedParams.size(1), actualParams.size(1)); } @@ -211,14 +210,14 @@ public class TestSparkMultiLayerParameterAveraging extends BaseSparkTest { MultiLayerNetwork netCopy = sparkNet.getNetwork().clone(); netCopy.fit(data); - IUpdater expectedUpdater = ((BaseLayer) netCopy.conf().getLayer()).getIUpdater(); - double expectedLR = ((Nesterovs)((BaseLayer) netCopy.conf().getLayer()).getIUpdater()).getLearningRate(); - double expectedMomentum = ((Nesterovs)((BaseLayer) netCopy.conf().getLayer()).getIUpdater()).getMomentum(); + IUpdater expectedUpdater = ((BaseLayerConfiguration) netCopy.getLayerConfiguration()).getIUpdater(); + double expectedLR = ((Nesterovs)((BaseLayerConfiguration) netCopy.getLayerConfiguration()).getIUpdater()).getLearningRate(); + double expectedMomentum = ((Nesterovs)((BaseLayerConfiguration) netCopy.getLayerConfiguration()).getIUpdater()).getMomentum(); - IUpdater actualUpdater = ((BaseLayer) sparkNet.getNetwork().conf().getLayer()).getIUpdater(); + IUpdater actualUpdater = ((BaseLayerConfiguration) sparkNet.getNetwork().getLayerConfiguration()).getIUpdater(); sparkNet.fit(sparkData); - double actualLR = ((Nesterovs)((BaseLayer) sparkNet.getNetwork().conf().getLayer()).getIUpdater()).getLearningRate(); - double actualMomentum = ((Nesterovs)((BaseLayer) sparkNet.getNetwork().conf().getLayer()).getIUpdater()).getMomentum(); + double actualLR = ((Nesterovs)((BaseLayerConfiguration) sparkNet.getNetwork().getLayerConfiguration()).getIUpdater()).getLearningRate(); + double actualMomentum = ((Nesterovs)((BaseLayerConfiguration) sparkNet.getNetwork().getLayerConfiguration()).getIUpdater()).getMomentum(); assertEquals(expectedUpdater, actualUpdater); assertEquals(expectedLR, actualLR, 0.01); @@ -269,7 +268,7 @@ public class TestSparkMultiLayerParameterAveraging extends BaseSparkTest { //Spark tests don't run on windows return; } - MultiLayerConfiguration conf = new NeuralNetConfiguration.Builder().updater(new RmsProp()) + NeuralNetConfiguration conf = NeuralNetConfiguration.builder().updater(new RmsProp()) .optimizationAlgo(OptimizationAlgorithm.STOCHASTIC_GRADIENT_DESCENT).list() .layer(0, new org.deeplearning4j.nn.conf.layers.DenseLayer.Builder().nIn(nIn).nOut(3) .activation(Activation.TANH).build()) @@ -294,7 +293,7 @@ public class TestSparkMultiLayerParameterAveraging extends BaseSparkTest { @Test public void testDistributedScoring() { - MultiLayerConfiguration conf = new NeuralNetConfiguration.Builder().l1(0.1).l2(0.1) + NeuralNetConfiguration conf = NeuralNetConfiguration.builder().l1(0.1).l2(0.1) .seed(123).updater(new Nesterovs(0.1, 0.9)).list() .layer(0, new org.deeplearning4j.nn.conf.layers.DenseLayer.Builder().nIn(nIn).nOut(3) .activation(Activation.TANH).build()) @@ -383,7 +382,7 @@ public class TestSparkMultiLayerParameterAveraging extends BaseSparkTest { list.add(iter.next()); } - MultiLayerConfiguration conf = new NeuralNetConfiguration.Builder().updater(new RmsProp()) + NeuralNetConfiguration conf = NeuralNetConfiguration.builder().updater(new RmsProp()) .optimizationAlgo(OptimizationAlgorithm.STOCHASTIC_GRADIENT_DESCENT).list() .layer(0, new org.deeplearning4j.nn.conf.layers.DenseLayer.Builder().nIn(28 * 28).nOut(50) .activation(Activation.TANH).build()) @@ -447,7 +446,7 @@ public class TestSparkMultiLayerParameterAveraging extends BaseSparkTest { - MultiLayerConfiguration conf = new NeuralNetConfiguration.Builder().updater(new RmsProp()) + NeuralNetConfiguration conf = NeuralNetConfiguration.builder().updater(new RmsProp()) .optimizationAlgo(OptimizationAlgorithm.STOCHASTIC_GRADIENT_DESCENT).list() .layer(0, new org.deeplearning4j.nn.conf.layers.DenseLayer.Builder().nIn(28 * 28).nOut(50) .activation(Activation.TANH).build()) @@ -475,11 +474,11 @@ public class TestSparkMultiLayerParameterAveraging extends BaseSparkTest { paths.add(path); } - INDArray paramsBefore = sparkNet.getNetwork().params().dup(); + INDArray paramsBefore = sparkNet.getNetwork().getModelParams().dup(); JavaRDD pathRdd = sc.parallelize(paths); sparkNet.fitPaths(pathRdd); - INDArray paramsAfter = sparkNet.getNetwork().params().dup(); + INDArray paramsAfter = sparkNet.getNetwork().getModelParams().dup(); assertNotEquals(paramsBefore, paramsAfter); SparkTrainingStats stats = sparkNet.getSparkTrainingStats(); @@ -517,7 +516,7 @@ public class TestSparkMultiLayerParameterAveraging extends BaseSparkTest { - MultiLayerConfiguration conf = new NeuralNetConfiguration.Builder().updater(new RmsProp()) + NeuralNetConfiguration conf = NeuralNetConfiguration.builder().updater(new RmsProp()) .optimizationAlgo(OptimizationAlgorithm.STOCHASTIC_GRADIENT_DESCENT).list() .layer(0, new org.deeplearning4j.nn.conf.layers.DenseLayer.Builder().nIn(28 * 28).nOut(50) .activation(Activation.TANH).build()) @@ -546,11 +545,11 @@ public class TestSparkMultiLayerParameterAveraging extends BaseSparkTest { paths.add(path); } - INDArray paramsBefore = sparkNet.getNetwork().params().dup(); + INDArray paramsBefore = sparkNet.getNetwork().getModelParams().dup(); JavaRDD pathRdd = sc.parallelize(paths); sparkNet.fitPaths(pathRdd); - INDArray paramsAfter = sparkNet.getNetwork().params().dup(); + INDArray paramsAfter = sparkNet.getNetwork().getModelParams().dup(); assertNotEquals(paramsBefore, paramsAfter); Thread.sleep(200); @@ -605,7 +604,7 @@ public class TestSparkMultiLayerParameterAveraging extends BaseSparkTest { - ComputationGraphConfiguration conf = new NeuralNetConfiguration.Builder().updater(new RmsProp()) + ComputationGraphConfiguration conf = NeuralNetConfiguration.builder().updater(new RmsProp()) .optimizationAlgo(OptimizationAlgorithm.STOCHASTIC_GRADIENT_DESCENT) .graphBuilder().addInputs("in") .addLayer("0", new org.deeplearning4j.nn.conf.layers.DenseLayer.Builder().nIn(28 * 28).nOut(50) @@ -636,11 +635,11 @@ public class TestSparkMultiLayerParameterAveraging extends BaseSparkTest { paths.add(path); } - INDArray paramsBefore = sparkNet.getNetwork().params().dup(); + INDArray paramsBefore = sparkNet.getNetwork().getModelParams().dup(); JavaRDD pathRdd = sc.parallelize(paths); sparkNet.fitPaths(pathRdd); - INDArray paramsAfter = sparkNet.getNetwork().params().dup(); + INDArray paramsAfter = sparkNet.getNetwork().getModelParams().dup(); assertNotEquals(paramsBefore, paramsAfter); SparkTrainingStats stats = sparkNet.getSparkTrainingStats(); @@ -658,11 +657,11 @@ public class TestSparkMultiLayerParameterAveraging extends BaseSparkTest { paths.add(path); } - paramsBefore = sparkNet.getNetwork().params().dup(); + paramsBefore = sparkNet.getNetwork().getModelParams().dup(); pathRdd = sc.parallelize(paths); sparkNet.fitPathsMultiDataSet(pathRdd); - paramsAfter = sparkNet.getNetwork().params().dup(); + paramsAfter = sparkNet.getNetwork().getModelParams().dup(); assertNotEquals(paramsBefore, paramsAfter); stats = sparkNet.getSparkTrainingStats(); @@ -678,7 +677,7 @@ public class TestSparkMultiLayerParameterAveraging extends BaseSparkTest { //Spark tests don't run on windows return; } - MultiLayerConfiguration conf = new NeuralNetConfiguration.Builder().seed(12345).updater(new RmsProp()) + NeuralNetConfiguration conf = NeuralNetConfiguration.builder().seed(12345).updater(new RmsProp()) .optimizationAlgo(OptimizationAlgorithm.STOCHASTIC_GRADIENT_DESCENT) .weightInit(WeightInit.XAVIER).list() .layer(0, new org.deeplearning4j.nn.conf.layers.DenseLayer.Builder().nIn(4).nOut(4) @@ -732,9 +731,9 @@ public class TestSparkMultiLayerParameterAveraging extends BaseSparkTest { sparkNet3.fit(rdd); - INDArray p1 = sparkNet1.getNetwork().params(); - INDArray p2 = sparkNet2.getNetwork().params(); - INDArray p3 = sparkNet3.getNetwork().params(); + INDArray p1 = sparkNet1.getNetwork().getModelParams(); + INDArray p2 = sparkNet2.getNetwork().getModelParams(); + INDArray p3 = sparkNet3.getNetwork().getModelParams(); sparkNet1.getTrainingMaster().deleteTempFiles(sc); sparkNet2.getTrainingMaster().deleteTempFiles(sc); @@ -763,7 +762,7 @@ public class TestSparkMultiLayerParameterAveraging extends BaseSparkTest { list.add(iter.next()); } - MultiLayerConfiguration conf = new NeuralNetConfiguration.Builder().updater(new RmsProp()) + NeuralNetConfiguration conf = NeuralNetConfiguration.builder().updater(new RmsProp()) .optimizationAlgo(OptimizationAlgorithm.STOCHASTIC_GRADIENT_DESCENT).list() .layer(0, new org.deeplearning4j.nn.conf.layers.DenseLayer.Builder().nIn(28 * 28).nOut(50) .activation(Activation.TANH).build()) @@ -785,13 +784,13 @@ public class TestSparkMultiLayerParameterAveraging extends BaseSparkTest { JavaRDD rdd = sc.parallelize(list); - assertEquals(0, sparkNet.getNetwork().getLayerWiseConfigurations().getIterationCount()); + assertEquals(0, sparkNet.getNetwork().getNetConfiguration().getIterationCount()); sparkNet.fit(rdd); assertEquals(minibatchesPerWorkerPerEpoch, - sparkNet.getNetwork().getLayerWiseConfigurations().getIterationCount()); + sparkNet.getNetwork().getNetConfiguration().getIterationCount()); sparkNet.fit(rdd); assertEquals(2 * minibatchesPerWorkerPerEpoch, - sparkNet.getNetwork().getLayerWiseConfigurations().getIterationCount()); + sparkNet.getNetwork().getNetConfiguration().getIterationCount()); sparkNet.getTrainingMaster().deleteTempFiles(sc); } @@ -813,7 +812,7 @@ public class TestSparkMultiLayerParameterAveraging extends BaseSparkTest { list.add(iter.next()); } - ComputationGraphConfiguration conf = new NeuralNetConfiguration.Builder().updater(new RmsProp()) + ComputationGraphConfiguration conf = NeuralNetConfiguration.builder().updater(new RmsProp()) .optimizationAlgo(OptimizationAlgorithm.STOCHASTIC_GRADIENT_DESCENT) .graphBuilder().addInputs("in") .addLayer("0", new org.deeplearning4j.nn.conf.layers.DenseLayer.Builder().nIn(28 * 28).nOut(50) @@ -835,12 +834,12 @@ public class TestSparkMultiLayerParameterAveraging extends BaseSparkTest { JavaRDD rdd = sc.parallelize(list); - assertEquals(0, sparkNet.getNetwork().getConfiguration().getIterationCount()); + assertEquals(0, sparkNet.getNetwork().getComputationGraphConfiguration().getIterationCount()); sparkNet.fit(rdd); - assertEquals(minibatchesPerWorkerPerEpoch, sparkNet.getNetwork().getConfiguration().getIterationCount()); + assertEquals(minibatchesPerWorkerPerEpoch, sparkNet.getNetwork().getComputationGraphConfiguration().getIterationCount()); sparkNet.fit(rdd); assertEquals(2 * minibatchesPerWorkerPerEpoch, - sparkNet.getNetwork().getConfiguration().getIterationCount()); + sparkNet.getNetwork().getComputationGraphConfiguration().getIterationCount()); sparkNet.getTrainingMaster().deleteTempFiles(sc); } @@ -854,7 +853,7 @@ public class TestSparkMultiLayerParameterAveraging extends BaseSparkTest { int nIn = 8; Nd4j.getRandom().setSeed(12345); - MultiLayerConfiguration conf = new NeuralNetConfiguration.Builder().seed(12345).updater(new RmsProp()) + NeuralNetConfiguration conf = NeuralNetConfiguration.builder().seed(12345).updater(new RmsProp()) .weightInit(WeightInit.XAVIER).list() .layer(0, new VariationalAutoencoder.Builder().nIn(8).nOut(10).encoderLayerSizes(12) .decoderLayerSizes(13).reconstructionDistribution( @@ -890,7 +889,7 @@ public class TestSparkMultiLayerParameterAveraging extends BaseSparkTest { int nIn = 8; Nd4j.getRandom().setSeed(12345); - ComputationGraphConfiguration conf = new NeuralNetConfiguration.Builder().seed(12345).updater(new RmsProp()) + ComputationGraphConfiguration conf = NeuralNetConfiguration.builder().seed(12345).updater(new RmsProp()) .weightInit(WeightInit.XAVIER).graphBuilder().addInputs("in") .addLayer("0", new VariationalAutoencoder.Builder().nIn(8).nOut(10).encoderLayerSizes(12) .decoderLayerSizes(13).reconstructionDistribution( @@ -930,8 +929,8 @@ public class TestSparkMultiLayerParameterAveraging extends BaseSparkTest { int nOut = 2; int layerSize = 10; - MultiLayerConfiguration conf = - new NeuralNetConfiguration.Builder().weightInit(WeightInit.XAVIER).list() + NeuralNetConfiguration conf = + NeuralNetConfiguration.builder().weightInit(WeightInit.XAVIER).list() .layer(0, new DenseLayer.Builder().nIn(nIn).nOut(layerSize).build()) .layer(1, new OutputLayer.Builder().nIn(layerSize).nOut(nOut) .activation(Activation.SOFTMAX).lossFunction( @@ -985,8 +984,8 @@ public class TestSparkMultiLayerParameterAveraging extends BaseSparkTest { int nOut = 3; int layerSize = 10; - MultiLayerConfiguration conf = - new NeuralNetConfiguration.Builder().weightInit(WeightInit.XAVIER).list() + NeuralNetConfiguration conf = + NeuralNetConfiguration.builder().weightInit(WeightInit.XAVIER).list() .layer(0, new DenseLayer.Builder().nIn(nIn).nOut(layerSize).build()) .layer(1, new OutputLayer.Builder().nIn(layerSize).nOut(nOut) .activation(Activation.SOFTMAX).lossFunction( @@ -1039,12 +1038,12 @@ public class TestSparkMultiLayerParameterAveraging extends BaseSparkTest { //Spark tests don't run on windows return; } - MultiLayerConfiguration conf = new NeuralNetConfiguration.Builder() + NeuralNetConfiguration conf = NeuralNetConfiguration.builder() .list() .layer(new OutputLayer.Builder().nIn(4).nOut(3).build()) .build(); - ComputationGraphConfiguration conf2 = new NeuralNetConfiguration.Builder() + ComputationGraphConfiguration conf2 = NeuralNetConfiguration.builder() .graphBuilder() .addInputs("in") .addLayer("out", new OutputLayer.Builder().nIn(4).nOut(3).build(), "in") @@ -1075,12 +1074,12 @@ public class TestSparkMultiLayerParameterAveraging extends BaseSparkTest { for(int i=0; i<3; i++ ){ - assertEquals(i, sn1.getNetwork().getLayerWiseConfigurations().getEpochCount()); - assertEquals(i, sn2.getNetwork().getConfiguration().getEpochCount()); + assertEquals(i, sn1.getNetwork().getNetConfiguration().getEpochCount()); + assertEquals(i, sn2.getNetwork().getComputationGraphConfiguration().getEpochCount()); sn1.fit(rdd); sn2.fit(rdd); - assertEquals(i+1, sn1.getNetwork().getLayerWiseConfigurations().getEpochCount()); - assertEquals(i+1, sn2.getNetwork().getConfiguration().getEpochCount()); + assertEquals(i+1, sn1.getNetwork().getNetConfiguration().getEpochCount()); + assertEquals(i+1, sn2.getNetwork().getComputationGraphConfiguration().getEpochCount()); } } } diff --git a/cavis-dnn/cavis-dnn-spark/cavis-dnn-spark-core/src/test/java/org/deeplearning4j/spark/impl/stats/TestTrainingStatsCollection.java b/cavis-dnn/cavis-dnn-spark/cavis-dnn-spark-core/src/test/java/org/deeplearning4j/spark/impl/stats/TestTrainingStatsCollection.java index 5d33e82c6..5b735e5a2 100644 --- a/cavis-dnn/cavis-dnn-spark/cavis-dnn-spark-core/src/test/java/org/deeplearning4j/spark/impl/stats/TestTrainingStatsCollection.java +++ b/cavis-dnn/cavis-dnn-spark/cavis-dnn-spark-core/src/test/java/org/deeplearning4j/spark/impl/stats/TestTrainingStatsCollection.java @@ -22,11 +22,9 @@ package org.deeplearning4j.spark.impl.stats; import com.sun.jna.Platform; import org.apache.commons.io.FilenameUtils; -import org.apache.spark.SparkConf; import org.apache.spark.api.java.JavaRDD; import org.apache.spark.api.java.JavaSparkContext; import org.deeplearning4j.nn.api.OptimizationAlgorithm; -import org.deeplearning4j.nn.conf.MultiLayerConfiguration; import org.deeplearning4j.nn.conf.NeuralNetConfiguration; import org.deeplearning4j.nn.conf.layers.DenseLayer; import org.deeplearning4j.nn.conf.layers.OutputLayer; @@ -68,7 +66,7 @@ public class TestTrainingStatsCollection extends BaseSparkTest { try { - MultiLayerConfiguration conf = new NeuralNetConfiguration.Builder() + NeuralNetConfiguration conf = NeuralNetConfiguration.builder() .optimizationAlgo(OptimizationAlgorithm.STOCHASTIC_GRADIENT_DESCENT).list() .layer(0, new DenseLayer.Builder().nIn(10).nOut(10).build()) .layer(1, new OutputLayer.Builder().nIn(10).nOut(10).build()) diff --git a/cavis-dnn/cavis-dnn-spark/cavis-dnn-spark-core/src/test/java/org/deeplearning4j/spark/ui/TestListeners.java b/cavis-dnn/cavis-dnn-spark/cavis-dnn-spark-core/src/test/java/org/deeplearning4j/spark/ui/TestListeners.java index aadf69cdd..1104f8667 100644 --- a/cavis-dnn/cavis-dnn-spark/cavis-dnn-spark-core/src/test/java/org/deeplearning4j/spark/ui/TestListeners.java +++ b/cavis-dnn/cavis-dnn-spark/cavis-dnn-spark-core/src/test/java/org/deeplearning4j/spark/ui/TestListeners.java @@ -27,7 +27,6 @@ import org.deeplearning4j.core.storage.Persistable; import org.deeplearning4j.core.storage.StatsStorage; import org.deeplearning4j.datasets.iterator.impl.IrisDataSetIterator; import org.deeplearning4j.nn.api.OptimizationAlgorithm; -import org.deeplearning4j.nn.conf.MultiLayerConfiguration; import org.deeplearning4j.nn.conf.NeuralNetConfiguration; import org.deeplearning4j.nn.conf.layers.DenseLayer; import org.deeplearning4j.nn.multilayer.MultiLayerNetwork; @@ -60,7 +59,7 @@ public class TestListeners extends BaseSparkTest { JavaSparkContext sc = getContext(); int nExecutors = numExecutors(); - MultiLayerConfiguration conf = new NeuralNetConfiguration.Builder().seed(123) + NeuralNetConfiguration conf = NeuralNetConfiguration.builder().seed(123) .optimizationAlgo(OptimizationAlgorithm.STOCHASTIC_GRADIENT_DESCENT).list() .layer(0, new DenseLayer.Builder().nIn(4).nOut(100).weightInit(WeightInit.XAVIER) .activation(Activation.RELU).build()) diff --git a/cavis-dnn/cavis-dnn-spark/cavis-dnn-spark-parameterserver/src/main/java/org/deeplearning4j/spark/parameterserver/ParameterServerTrainingHook.java b/cavis-dnn/cavis-dnn-spark/cavis-dnn-spark-parameterserver/src/main/java/org/deeplearning4j/spark/parameterserver/ParameterServerTrainingHook.java index 402560c73..060b88dc1 100644 --- a/cavis-dnn/cavis-dnn-spark/cavis-dnn-spark-parameterserver/src/main/java/org/deeplearning4j/spark/parameterserver/ParameterServerTrainingHook.java +++ b/cavis-dnn/cavis-dnn-spark/cavis-dnn-spark-parameterserver/src/main/java/org/deeplearning4j/spark/parameterserver/ParameterServerTrainingHook.java @@ -20,7 +20,7 @@ package org.deeplearning4j.spark.parameterserver; -import org.deeplearning4j.nn.api.Model; +import net.brutex.ai.dnn.api.IModel; import org.deeplearning4j.spark.api.TrainingHook; import org.nd4j.linalg.dataset.api.DataSet; import org.nd4j.linalg.dataset.api.MultiDataSet; @@ -39,7 +39,7 @@ public class ParameterServerTrainingHook implements TrainingHook { * @param model themodel that was update */ @Override - public void preUpdate(DataSet minibatch, Model model) { + public void preUpdate(DataSet minibatch, IModel model) { //pull } @@ -51,7 +51,7 @@ public class ParameterServerTrainingHook implements TrainingHook { * @param model the model that was updated */ @Override - public void postUpdate(DataSet minibatch, Model model) { + public void postUpdate(DataSet minibatch, IModel model) { //push } @@ -63,7 +63,7 @@ public class ParameterServerTrainingHook implements TrainingHook { * @param model themodel that was update */ @Override - public void preUpdate(MultiDataSet minibatch, Model model) { + public void preUpdate(MultiDataSet minibatch, IModel model) { //pull } @@ -75,7 +75,7 @@ public class ParameterServerTrainingHook implements TrainingHook { * @param model the model that was updated */ @Override - public void postUpdate(MultiDataSet minibatch, Model model) { + public void postUpdate(MultiDataSet minibatch, IModel model) { //push } } diff --git a/cavis-dnn/cavis-dnn-spark/cavis-dnn-spark-parameterserver/src/main/java/org/deeplearning4j/spark/parameterserver/pw/SharedTrainingWrapper.java b/cavis-dnn/cavis-dnn-spark/cavis-dnn-spark-parameterserver/src/main/java/org/deeplearning4j/spark/parameterserver/pw/SharedTrainingWrapper.java index a9e2a213b..f26ae7e66 100644 --- a/cavis-dnn/cavis-dnn-spark/cavis-dnn-spark-parameterserver/src/main/java/org/deeplearning4j/spark/parameterserver/pw/SharedTrainingWrapper.java +++ b/cavis-dnn/cavis-dnn-spark/cavis-dnn-spark-parameterserver/src/main/java/org/deeplearning4j/spark/parameterserver/pw/SharedTrainingWrapper.java @@ -27,7 +27,7 @@ import org.deeplearning4j.core.storage.StatsStorageRouter; import org.deeplearning4j.core.storage.listener.RoutingIterationListener; import org.deeplearning4j.common.config.DL4JEnvironmentVars; import org.deeplearning4j.exception.DL4JInvalidConfigException; -import org.deeplearning4j.nn.api.Model; +import net.brutex.ai.dnn.api.IModel; import org.deeplearning4j.nn.api.Updater; import org.deeplearning4j.nn.graph.ComputationGraph; import org.deeplearning4j.nn.multilayer.MultiLayerNetwork; @@ -89,7 +89,7 @@ public class SharedTrainingWrapper { protected ThreadLocal iteratorDataSetCount = new ThreadLocal<>(); //Using AtomicInteger because it's mutable, not because it's atomic protected ThreadLocal observer = new ThreadLocal<>(); protected EncodedGradientsAccumulator accumulator; - protected Model originalModel; + protected IModel originalModel; protected UpdatesConsumer consumer; @@ -200,7 +200,7 @@ public class SharedTrainingWrapper { SharedTrainingConfiguration trainingConfiguration = worker.getBroadcastConfiguration().getValue(); VoidConfiguration voidConfiguration = worker.getBroadcastConfiguration().getValue().getVoidConfiguration(); - Model model = null; + IModel model = null; /* Plan is simple here: if there's defined field in SharedTrainingConfiguration - use that. @@ -239,7 +239,7 @@ public class SharedTrainingWrapper { List listeners = worker.getListeners(); if(listeners != null){ - model.setListeners(listeners); + model.addTrainingListeners(listeners.toArray(new TrainingListener[]{})); StatsStorageRouter r = worker.getRouter(); if(r != null){ for(TrainingListener l : listeners){ @@ -319,7 +319,7 @@ public class SharedTrainingWrapper { consumer = UpdatesConsumer.builder() .numWorkers(numWorkers) .accumulator(accumulator) - .params(model.params()) + .params(model.getModelParams()) .build(); accumulator.setExternalSource(consumer.getUpdatesQueue()); @@ -375,14 +375,14 @@ public class SharedTrainingWrapper { ((MultiLayerNetwork) model).setIterationCount(ModelParameterServer.getInstance().getStartPosition().getFirst()); ((MultiLayerNetwork) model).setEpochCount(ModelParameterServer.getInstance().getStartPosition().getSecond()); } else if (originalModel instanceof ComputationGraph) { - ((ComputationGraph) model).getConfiguration().setIterationCount(ModelParameterServer.getInstance().getStartPosition().getFirst()); - ((ComputationGraph) model).getConfiguration().setEpochCount(ModelParameterServer.getInstance().getStartPosition().getSecond()); + ((ComputationGraph) model).getComputationGraphConfiguration().setIterationCount(ModelParameterServer.getInstance().getStartPosition().getFirst()); + ((ComputationGraph) model).getComputationGraphConfiguration().setEpochCount(ModelParameterServer.getInstance().getStartPosition().getSecond()); } // if we're going to extend iteratation for debugging purposes - let's do that here if (trainingConfiguration.getDebugLongerIterations() > 0) { log.warn("Adding SleepyListener: {} ms", trainingConfiguration.getDebugLongerIterations()); - model.addListeners(SleepyTrainingListener.builder() + model.addTrainingListeners(SleepyTrainingListener.builder() .timerIteration(trainingConfiguration.getDebugLongerIterations()).build()); } @@ -416,16 +416,16 @@ public class SharedTrainingWrapper { val mParams = modelParamsSupplier.get(); if (mParams != null) { log.info("Updating model params to the most recent ones..."); - originalModel.params().assign(mParams); + originalModel.getModelParams().assign(mParams); } // ok. attaching accumulator to model if (model instanceof ComputationGraph) { - ((ComputationGraph) originalModel).getConfiguration() + ((ComputationGraph) originalModel).getComputationGraphConfiguration() .setTrainingWorkspaceMode(trainingConfiguration.getWorkspaceMode()); ((ComputationGraph) originalModel).setGradientsAccumulator(accumulator); } else if (model instanceof MultiLayerNetwork) { - ((MultiLayerNetwork) originalModel).getLayerWiseConfigurations() + ((MultiLayerNetwork) originalModel).getNetConfiguration() .setTrainingWorkspaceMode(trainingConfiguration.getWorkspaceMode()); ((MultiLayerNetwork) originalModel).setGradientsAccumulator(accumulator); } @@ -520,7 +520,7 @@ public class SharedTrainingWrapper { val taAveraged = mh.getAverageThresholdAlgorithm(); // FIXME: fill stats here - val result = SharedTrainingResult.builder().aggregationsCount(1).scoreSum(originalModel.score()) + val result = SharedTrainingResult.builder().aggregationsCount(1).scoreSum(originalModel.getScore()) .updaterStateArray(updaterState).listenerMetaData(new ArrayList<>()) .listenerStaticInfo(new ArrayList<>()).listenerUpdates(new ArrayList<>()) .minibatchesPerExecutor(Collections.singletonMap(SparkUtils.getSparkExecutorId(), iteratorDataSetCount.get().get())) diff --git a/cavis-dnn/cavis-dnn-spark/cavis-dnn-spark-parameterserver/src/main/java/org/deeplearning4j/spark/parameterserver/training/SharedTrainingMaster.java b/cavis-dnn/cavis-dnn-spark/cavis-dnn-spark-parameterserver/src/main/java/org/deeplearning4j/spark/parameterserver/training/SharedTrainingMaster.java index f0b6bc151..5d9dd9d33 100644 --- a/cavis-dnn/cavis-dnn-spark/cavis-dnn-spark-parameterserver/src/main/java/org/deeplearning4j/spark/parameterserver/training/SharedTrainingMaster.java +++ b/cavis-dnn/cavis-dnn-spark/cavis-dnn-spark-parameterserver/src/main/java/org/deeplearning4j/spark/parameterserver/training/SharedTrainingMaster.java @@ -262,8 +262,8 @@ public class SharedTrainingMaster extends BaseTrainingMaster iterations = Collections.newSetFromMap(new ConcurrentHashMap<>()); private static final Set epochs = Collections.newSetFromMap(new ConcurrentHashMap<>()); @Override - public void iterationDone(Model model, int iteration, int epoch) { + public void iterationDone(IModel model, int iteration, int epoch) { iterations.add(iteration); epochs.add(epoch); } diff --git a/cavis-dnn/cavis-dnn-tsne/src/main/java/org/deeplearning4j/plot/BarnesHutTsne.java b/cavis-dnn/cavis-dnn-tsne/src/main/java/org/deeplearning4j/plot/BarnesHutTsne.java index d8efd7dbb..4d3f18c80 100644 --- a/cavis-dnn/cavis-dnn-tsne/src/main/java/org/deeplearning4j/plot/BarnesHutTsne.java +++ b/cavis-dnn/cavis-dnn-tsne/src/main/java/org/deeplearning4j/plot/BarnesHutTsne.java @@ -20,24 +20,32 @@ package org.deeplearning4j.plot; import com.google.common.util.concurrent.AtomicDouble; import lombok.AllArgsConstructor; import lombok.Data; +import lombok.NonNull; import lombok.Setter; import lombok.extern.slf4j.Slf4j; +import net.brutex.ai.dnn.api.IModel; import org.deeplearning4j.clustering.algorithm.Distance; import org.deeplearning4j.clustering.sptree.DataPoint; import org.deeplearning4j.clustering.sptree.SpTree; import org.deeplearning4j.clustering.vptree.VPTree; -import org.deeplearning4j.nn.api.Model; +import org.deeplearning4j.nn.api.ITraininableLayerConfiguration; import org.deeplearning4j.nn.conf.NeuralNetConfiguration; import org.deeplearning4j.nn.conf.WorkspaceMode; +import org.deeplearning4j.nn.conf.layers.LayerConfiguration; import org.deeplearning4j.nn.gradient.DefaultGradient; import org.deeplearning4j.nn.gradient.Gradient; import org.deeplearning4j.nn.workspace.LayerWorkspaceMgr; import org.deeplearning4j.optimize.api.ConvexOptimizer; import org.deeplearning4j.optimize.api.TrainingListener; +import org.nd4j.evaluation.IEvaluation; import org.nd4j.linalg.api.buffer.DataType; import org.nd4j.linalg.api.memory.conf.WorkspaceConfiguration; import org.nd4j.linalg.api.memory.enums.*; import org.nd4j.linalg.api.ndarray.INDArray; +import org.nd4j.linalg.dataset.api.DataSet; +import org.nd4j.linalg.dataset.api.MultiDataSet; +import org.nd4j.linalg.dataset.api.iterator.DataSetIterator; +import org.nd4j.linalg.dataset.api.iterator.MultiDataSetIterator; import org.nd4j.linalg.factory.Nd4j; import org.nd4j.linalg.indexing.BooleanIndexing; import org.nd4j.linalg.indexing.conditions.Conditions; @@ -64,7 +72,7 @@ import static org.nd4j.linalg.ops.transforms.Transforms.sign; */ @Slf4j @Data -public class BarnesHutTsne implements Model { +public class BarnesHutTsne implements IModel { public final static String workspaceCache = "LOOP_CACHE"; @@ -127,7 +135,7 @@ public class BarnesHutTsne implements Model { double realMin, double initialMomentum, double finalMomentum, double momentum, int switchMomentumIteration, boolean normalize, int stopLyingIteration, double tolerance, double learningRate, boolean useAdaGrad, double perplexity, TrainingListener TrainingListener, - double minGain,int vpTreeWorkers) { + double minGain, int vpTreeWorkers) { this(numDimensions, simiarlityFunction, theta, invert, maxIter, realMin, initialMomentum, finalMomentum, momentum, switchMomentumIteration, normalize, stopLyingIteration, tolerance, learningRate, useAdaGrad, perplexity, TrainingListener, minGain, vpTreeWorkers, WorkspaceMode.NONE, null); @@ -137,7 +145,7 @@ public class BarnesHutTsne implements Model { double realMin, double initialMomentum, double finalMomentum, double momentum, int switchMomentumIteration, boolean normalize, int stopLyingIteration, double tolerance, double learningRate, boolean useAdaGrad, double perplexity, TrainingListener TrainingListener, - double minGain,int vpTreeWorkers, WorkspaceMode workspaceMode, INDArray staticInput) { + double minGain, int vpTreeWorkers, WorkspaceMode workspaceMode, INDArray staticInput) { this.maxIter = maxIter; this.realMin = realMin; this.initialMomentum = initialMomentum; @@ -158,7 +166,7 @@ public class BarnesHutTsne implements Model { this.invert = invert; this.vpTreeWorkers = vpTreeWorkers; this.workspaceMode = workspaceMode; - if(this.workspaceMode == null) + if (this.workspaceMode == null) this.workspaceMode = WorkspaceMode.NONE; initializer = (staticInput != null) ? new Initializer(staticInput) : new Initializer(); } @@ -199,7 +207,8 @@ public class BarnesHutTsne implements Model { /** * Convert data to probability * co-occurrences (aka calculating the kernel) - * @param d the data to convert + * + * @param d the data to convert * @param perplexity the perplexity of the model * @return the probabilities of co-occurrence */ @@ -219,14 +228,15 @@ public class BarnesHutTsne implements Model { rows.putScalar(n + 1, rows.getDouble(n) + k); final double enthropy = Math.log(perplexity); - VPTree tree = new VPTree(d, simiarlityFunction, vpTreeWorkers,invert); + VPTree tree = new VPTree(d, simiarlityFunction, vpTreeWorkers, invert); /*MemoryWorkspace workspace = workspaceMode == WorkspaceMode.NONE ? new DummyWorkspace() : Nd4j.getWorkspaceManager().getWorkspaceForCurrentThread( workspaceConfigurationExternal, workspaceExternal); - try (MemoryWorkspace ws = workspace.notifyScopeEntered())*/ { + try (MemoryWorkspace ws = workspace.notifyScopeEntered())*/ + { log.info("Calculating probabilities of data similarities..."); for (int i = 0; i < N; i++) { if (i % 500 == 0) @@ -239,7 +249,7 @@ public class BarnesHutTsne implements Model { tree.search(d.getRow(i), k + 1, results, distances, false, true); double betas = 1.0; - if(results.size() == 0){ + if (results.size() == 0) { throw new IllegalStateException("Search returned no values for vector " + i + " - similarity \"" + simiarlityFunction + "\" may not be defined (for example, vector is" + " all zeros with cosine similarity)"); @@ -302,32 +312,101 @@ public class BarnesHutTsne implements Model { return x; } + /** + * This method returns updater state (if applicable), null otherwise + * + * @return + */ + @Override + public INDArray updaterState() { + return null; + } + @Override public ConvexOptimizer getOptimizer() { return null; } + /** + * This method fits model with a given DataSet + * + * @param dataSet + */ + @Override + public void fit(DataSet dataSet) { + + } + + /** + * This method fits model with a given MultiDataSet + * + * @param dataSet + */ + @Override + public void fit(MultiDataSet dataSet) { + + } + + /** + * This method fits model with a given DataSetIterator + * + * @param iterator + */ + @Override + public void fit(DataSetIterator iterator) { + + } + + /** + * This method fits model with a given MultiDataSetIterator + * + * @param iterator + */ + @Override + public void fit(MultiDataSetIterator iterator) { + + } + + /** + * This method executes evaluation of the model against given iterator and evaluation + * implementations + * + * @param iterator + * @param evaluations + */ + @Override + public T[] doEvaluation(DataSetIterator iterator, T... evaluations) { + return null; + } + + /** + * This method executes evaluation of the model against given iterator and evaluation + * implementations + * + * @param iterator + * @param evaluations + */ + @Override + public T[] doEvaluation(MultiDataSetIterator iterator, + T... evaluations) { + return null; + } + @Override public INDArray getParam(String param) { return null; } - @Override - public void addListeners(TrainingListener... listener) { - // no-op - } - @Override - public Map paramTable() { + public Map getParamTable() { return null; } - @Override - public Map paramTable(boolean backprapParamsOnly) { + public Map getParamTable(boolean backprapParamsOnly) { return null; } - @Override + public void setParamTable(Map paramTable) { } @@ -338,7 +417,8 @@ public class BarnesHutTsne implements Model { } @Override - public void clear() {} + public void clear() { + } @Override public void applyConstraints(int iteration, int epoch) { @@ -361,6 +441,7 @@ public class BarnesHutTsne implements Model { /** * Symmetrize the value matrix + * * @param rowP * @param colP * @param valP @@ -375,7 +456,8 @@ public class BarnesHutTsne implements Model { workspaceConfigurationExternal, workspaceExternal); - try (MemoryWorkspace ws = workspace.notifyScopeEntered())*/ { + try (MemoryWorkspace ws = workspace.notifyScopeEntered())*/ + { for (int n = 0; n < N; n++) { int begin = rowP.getInt(n); int end = rowP.getInt(n + 1); @@ -408,7 +490,7 @@ public class BarnesHutTsne implements Model { for (int n = 0; n < N; n++) { for (int i = rowP.getInt(n); i < rowP.getInt(n + 1); i++) { boolean present = false; - for (int m = rowP.getInt(colP.getInt(i)); m < rowP.getInt(colP.getInt(i)+1); m++) { + for (int m = rowP.getInt(colP.getInt(i)); m < rowP.getInt(colP.getInt(i) + 1); m++) { if (colP.getInt(m) == n) { present = true; if (n <= colP.getInt(i)) { @@ -490,8 +572,8 @@ public class BarnesHutTsne implements Model { * * @param listeners */ - @Override - public void setListeners(Collection listeners) { + + public void addTrainingListeners(Collection listeners) { } @@ -501,7 +583,7 @@ public class BarnesHutTsne implements Model { * @param listeners */ @Override - public void setListeners(TrainingListener... listeners) { + public void addTrainingListeners(TrainingListener... listeners) { } @@ -536,7 +618,8 @@ public class BarnesHutTsne implements Model { private INDArray staticData; - public Initializer() {} + public Initializer() { + } public Initializer(INDArray input) { this.staticData = input; @@ -575,7 +658,8 @@ public class BarnesHutTsne implements Model { workspaceExternal); - try (MemoryWorkspace ws = workspace.notifyScopeEntered())*/ { + try (MemoryWorkspace ws = workspace.notifyScopeEntered())*/ + { x.divi(x.maxNumber()); @@ -618,6 +702,7 @@ public class BarnesHutTsne implements Model { /** * An individual iteration + * * @param p the probabilities that certain points * are near each other * @param i the iteration (primarily for debugging purposes) @@ -626,7 +711,9 @@ public class BarnesHutTsne implements Model { update(gradient().getGradientFor(Y_GRAD), Y_GRAD); } - static double sign_tsne(double x) { return (x == .0 ? .0 : (x < .0 ? -1.0 : 1.0)); } + static double sign_tsne(double x) { + return (x == .0 ? .0 : (x < .0 ? -1.0 : 1.0)); + } @Override @@ -638,7 +725,8 @@ public class BarnesHutTsne implements Model { workspaceConfigurationExternal, workspaceExternal); - try (MemoryWorkspace ws = workspace.notifyScopeEntered())*/ { + try (MemoryWorkspace ws = workspace.notifyScopeEntered())*/ + { INDArray yGrads = gradient; if (gains == null) @@ -647,12 +735,11 @@ public class BarnesHutTsne implements Model { //Nd4j.getExecutioner().exec(new BarnesHutGains(gains, gains, yGrads, yIncs)); // Copied from Reference for (int i = 0; i < yGrads.rows(); ++i) { - for (int j = 0; j < yGrads.columns(); ++j) { - if (sign_tsne(yGrads.getDouble(i,j)) == sign_tsne(yIncs.getDouble(i,j))) { - gains.putScalar(new int[]{i,j}, gains.getDouble(i,j)*0.8); - } - else { - gains.putScalar(new int[]{i,j}, gains.getDouble(i,j)+0.2); + for (int j = 0; j < yGrads.columns(); ++j) { + if (sign_tsne(yGrads.getDouble(i, j)) == sign_tsne(yIncs.getDouble(i, j))) { + gains.putScalar(new int[]{i, j}, gains.getDouble(i, j) * 0.8); + } else { + gains.putScalar(new int[]{i, j}, gains.getDouble(i, j) + 0.2); } } } @@ -680,8 +767,9 @@ public class BarnesHutTsne implements Model { /** * Save the model as a file with a csv format, adding the label as the last column. + * * @param labels - * @param path the path to write + * @param path the path to write * @throws IOException */ public void saveAsFile(List labels, String path) throws IOException { @@ -726,6 +814,7 @@ public class BarnesHutTsne implements Model { write.flush(); } } + /** * Plot tsne * @@ -744,7 +833,7 @@ public class BarnesHutTsne implements Model { @Override - public double score() { + public double getScore() { /*MemoryWorkspace workspace = workspaceMode == WorkspaceMode.NONE ? new DummyWorkspace() @@ -753,7 +842,8 @@ public class BarnesHutTsne implements Model { workspaceExternal); - try (MemoryWorkspace ws = workspace.notifyScopeEntered())*/ { + try (MemoryWorkspace ws = workspace.notifyScopeEntered())*/ + { // Get estimate of normalization term @@ -792,7 +882,7 @@ public class BarnesHutTsne implements Model { } @Override - public INDArray params() { + public INDArray getModelParams() { return null; } @@ -833,7 +923,7 @@ public class BarnesHutTsne implements Model { } @Override - public void fit(INDArray data, LayerWorkspaceMgr workspaceMgr){ + public void fit(INDArray data, LayerWorkspaceMgr workspaceMgr) { fit(data); } @@ -858,7 +948,8 @@ public class BarnesHutTsne implements Model { workspaceExternal); - try (MemoryWorkspace ws = workspace.notifyScopeEntered())*/ { + try (MemoryWorkspace ws = workspace.notifyScopeEntered())*/ + { if (yIncs == null) @@ -888,7 +979,7 @@ public class BarnesHutTsne implements Model { @Override public Pair gradientAndScore() { - return new Pair<>(gradient(), score()); + return new Pair<>(gradient(), getScore()); } @Override @@ -897,12 +988,19 @@ public class BarnesHutTsne implements Model { } @Override - public NeuralNetConfiguration conf() { + public NeuralNetConfiguration getNetConfiguration() { return null; } + /** + * @param netConfiguration + */ @Override - public void setConf(NeuralNetConfiguration conf) { + public void setNetConfiguration(@NonNull NeuralNetConfiguration netConfiguration) { + + } + + public void setLayerConfiguration(LayerConfiguration layerConfiguration) { } @@ -1042,7 +1140,7 @@ public class BarnesHutTsne implements Model { return this; } - public Builder workspaceMode(WorkspaceMode workspaceMode){ + public Builder workspaceMode(WorkspaceMode workspaceMode) { this.workspaceMode = workspaceMode; return this; } @@ -1057,7 +1155,44 @@ public class BarnesHutTsne implements Model { @Override - public void close(){ + public void close() { //No-op } -} + + /** + * Get the TrainingListeners + * + * @return training listener + */ + @Override + public Collection getTrainingListeners() { + return null; + } + + @Override + public ITraininableLayerConfiguration getTrainingConfig() { + throw new UnsupportedOperationException("Not supported"); + } + + @Override + public INDArray getParams() { + throw new RuntimeException("Not supported"); + + } + + /** + * DL4J layers typically produce the sum of the gradients during the backward pass for each layer, and if required + * (if minibatch=true) then divide by the minibatch size.
+ * However, there are some exceptions, such as the batch norm mean/variance estimate parameters: these "gradients" + * are actually not gradients, but are updates to be applied directly to the parameter vector. Put another way, + * most gradients should be divided by the minibatch to get the average; some "gradients" are actually final updates + * already, and should not be divided by the minibatch size. + * + * @param paramName Name of the parameter + * @return True if gradients should be divided by minibatch (most params); false otherwise (edge cases like batch norm mean/variance estimates) + */ + @Override + public boolean updaterDivideByMinibatch(String paramName) { + return false; + } +} \ No newline at end of file diff --git a/cavis-native/cavis-native-cpu/src/main/java/org/nd4j/linalg/cpu/nativecpu/ops/CpuOpContext.java b/cavis-native/cavis-native-cpu/src/main/java/org/nd4j/linalg/cpu/nativecpu/ops/CpuOpContext.java index d6ddf49de..2c1a5860c 100644 --- a/cavis-native/cavis-native-cpu/src/main/java/org/nd4j/linalg/cpu/nativecpu/ops/CpuOpContext.java +++ b/cavis-native/cavis-native-cpu/src/main/java/org/nd4j/linalg/cpu/nativecpu/ops/CpuOpContext.java @@ -50,7 +50,8 @@ public class CpuOpContext extends BaseOpContext implements OpContext, Deallocata @Override public void close() { - // no-op + nativeOps.ctxPurge(context); + context.deallocate(); } @Override diff --git a/cavis-native/cavis-native-jcublas/src/main/java/org/nd4j/jita/workspace/CudaWorkspace.java b/cavis-native/cavis-native-jcublas/src/main/java/org/nd4j/jita/workspace/CudaWorkspace.java index 39dad7bd4..94c2ca71b 100644 --- a/cavis-native/cavis-native-jcublas/src/main/java/org/nd4j/jita/workspace/CudaWorkspace.java +++ b/cavis-native/cavis-native-jcublas/src/main/java/org/nd4j/jita/workspace/CudaWorkspace.java @@ -20,6 +20,8 @@ package org.nd4j.jita.workspace; +import java.util.List; +import java.util.Queue; import lombok.NonNull; import lombok.extern.slf4j.Slf4j; import lombok.val; @@ -39,10 +41,6 @@ import org.nd4j.linalg.exception.ND4JIllegalStateException; import org.nd4j.linalg.factory.Nd4j; import org.nd4j.nativeblas.NativeOpsHolder; -import java.util.List; -import java.util.Queue; - - /** * CUDA-aware MemoryWorkspace implementation * @@ -51,395 +49,489 @@ import java.util.Queue; @Slf4j public class CudaWorkspace extends Nd4jWorkspace { + public CudaWorkspace(@NonNull WorkspaceConfiguration configuration) { + super(configuration); + } - public CudaWorkspace(@NonNull WorkspaceConfiguration configuration) { - super(configuration); + public CudaWorkspace(@NonNull WorkspaceConfiguration configuration, @NonNull String workspaceId) { + super(configuration, workspaceId); + } + + public CudaWorkspace( + @NonNull WorkspaceConfiguration configuration, + @NonNull String workspaceId, + Integer deviceId) { + super(configuration, workspaceId); + this.deviceId = deviceId; + } + + @Override + protected void init() { + if (workspaceConfiguration.getPolicyLocation() == LocationPolicy.MMAP) { + throw new ND4JIllegalStateException("CUDA do not support MMAP workspaces yet"); } - public CudaWorkspace(@NonNull WorkspaceConfiguration configuration, @NonNull String workspaceId) { - super(configuration, workspaceId); + super.init(); + + if (currentSize.get() > 0) { + log.debug("Allocating {} bytes at DEVICE & HOST space...", currentSize.get()); + isInit.set(true); + + long bytes = currentSize.get(); + + log.debug( + "Allocating [{}] workspace on device_{}, {} bytes...", + id, + Nd4j.getAffinityManager().getDeviceForCurrentThread(), + bytes); + + if (isDebug.get()) { + Nd4j.getWorkspaceManager().printAllocationStatisticsForCurrentThread(); + } + + Pointer ptr = memoryManager.allocate((bytes + SAFETY_OFFSET), MemoryKind.HOST, false); + if (ptr == null) throw new ND4JIllegalStateException("Can't allocate memory for workspace"); + + workspace.setHostPointer(new PagedPointer(ptr)); + + if (workspaceConfiguration.getPolicyMirroring() != MirroringPolicy.HOST_ONLY) { + workspace.setDevicePointer( + new PagedPointer( + memoryManager.allocate((bytes + SAFETY_OFFSET), MemoryKind.DEVICE, false))); + AllocationsTracker.getInstance() + .markAllocated( + AllocationKind.GENERAL, + Nd4j.getAffinityManager().getDeviceForCurrentThread(), + bytes + SAFETY_OFFSET); + + MemoryTracker.getInstance() + .incrementWorkspaceAllocatedAmount( + Nd4j.getAffinityManager().getDeviceForCurrentThread(), bytes + SAFETY_OFFSET); + + // if base pointer isn't aligned to 16 bytes (128 bits) - adjust the offfset then + val addr = workspace.getDevicePointer().address(); + val div = addr % alignmentBase; + if (div != 0) { + deviceOffset.set(alignmentBase - div); + hostOffset.set(alignmentBase - div); + } + } + } + } + + @Override + public PagedPointer alloc(long requiredMemory, DataType type, boolean initialize) { + return this.alloc(requiredMemory, MemoryKind.DEVICE, type, initialize); + } + + @Override + public synchronized void destroyWorkspace(boolean extended) { + val size = currentSize.getAndSet(0); + reset(); + + if (extended) clearExternalAllocations(); + + clearPinnedAllocations(extended); + + if (workspace.getHostPointer() != null) + NativeOpsHolder.getInstance().getDeviceNativeOps().freeHost(workspace.getHostPointer()); + + if (workspace.getDevicePointer() != null) { + NativeOpsHolder.getInstance() + .getDeviceNativeOps() + .freeDevice(workspace.getDevicePointer(), 0); + AllocationsTracker.getInstance() + .markReleased( + AllocationKind.GENERAL, + Nd4j.getAffinityManager().getDeviceForCurrentThread(), + size + SAFETY_OFFSET); + + MemoryTracker.getInstance() + .decrementWorkspaceAmount( + Nd4j.getAffinityManager().getDeviceForCurrentThread(), size + SAFETY_OFFSET); } - public CudaWorkspace(@NonNull WorkspaceConfiguration configuration, @NonNull String workspaceId, Integer deviceId) { - super(configuration, workspaceId); - this.deviceId = deviceId; + workspace.setDevicePointer(null); + workspace.setHostPointer(null); + } + + @Override + public PagedPointer alloc( + long requiredMemory, MemoryKind kind, DataType type, boolean initialize) { + long numElements = requiredMemory / Nd4j.sizeOfDataType(type); + + // alignment + if (requiredMemory % alignmentBase != 0) + requiredMemory += alignmentBase - (requiredMemory % alignmentBase); + + if (!isUsed.get()) { + if (disabledCounter.incrementAndGet() % 10 == 0) + log.warn( + "Workspace was turned off, and wasn't enabled after {} allocations", + disabledCounter.get()); + + if (kind == MemoryKind.DEVICE) { + val pointer = + new PagedPointer( + memoryManager.allocate(requiredMemory, MemoryKind.DEVICE, initialize), numElements); + externalAllocations.add(new PointersPair(null, pointer)); + MemoryTracker.getInstance() + .incrementWorkspaceAllocatedAmount( + Nd4j.getAffinityManager().getDeviceForCurrentThread(), requiredMemory); + return pointer; + } else { + val pointer = + new PagedPointer( + memoryManager.allocate(requiredMemory, MemoryKind.HOST, initialize), numElements); + externalAllocations.add(new PointersPair(pointer, null)); + return pointer; + } } - @Override - protected void init() { - if (workspaceConfiguration.getPolicyLocation() == LocationPolicy.MMAP) { - throw new ND4JIllegalStateException("CUDA do not support MMAP workspaces yet"); + boolean trimmer = + (workspaceConfiguration.getPolicyReset() == ResetPolicy.ENDOFBUFFER_REACHED + && requiredMemory + cycleAllocations.get() > initialBlockSize.get() + && initialBlockSize.get() > 0 + && kind == MemoryKind.DEVICE) + || trimmedMode.get(); + + if (trimmer + && workspaceConfiguration.getPolicySpill() == SpillPolicy.REALLOCATE + && !trimmedMode.get()) { + trimmedMode.set(true); + trimmedStep.set(stepsCount.get()); + } + + if (kind == MemoryKind.DEVICE) { + if (deviceOffset.get() + requiredMemory <= currentSize.get() + && !trimmer + && Nd4j.getWorkspaceManager().getDebugMode() != DebugMode.SPILL_EVERYTHING) { + cycleAllocations.addAndGet(requiredMemory); + long prevOffset = deviceOffset.getAndAdd(requiredMemory); + + if (workspaceConfiguration.getPolicyMirroring() == MirroringPolicy.HOST_ONLY) return null; + + val ptr = workspace.getDevicePointer().withOffset(prevOffset, numElements); + + log.debug( + "Workspace [{}] device_{}: alloc array of {} bytes, capacity of {} elements; prevOffset: {}; newOffset: {}; size: {}; address: {}", + id, + Nd4j.getAffinityManager().getDeviceForCurrentThread(), + requiredMemory, + numElements, + prevOffset, + deviceOffset.get(), + currentSize.get(), + ptr.address()); + + if (initialize) { + val context = AtomicAllocator.getInstance().getDeviceContext(); + + int ret = + NativeOpsHolder.getInstance() + .getDeviceNativeOps() + .memsetAsync(ptr, 0, requiredMemory, 0, context.getSpecialStream()); + if (ret == 0) + throw new ND4JIllegalStateException( + "memset failed device_" + Nd4j.getAffinityManager().getDeviceForCurrentThread()); + + context.syncSpecialStream(); } - super.init(); + return ptr; + } else { - if (currentSize.get() > 0) { - //log.info("Allocating {} bytes at DEVICE & HOST space...", currentSize.get()); - isInit.set(true); - - long bytes = currentSize.get(); - - if (isDebug.get()) - log.info("Allocating [{}] workspace on device_{}, {} bytes...", id, Nd4j.getAffinityManager().getDeviceForCurrentThread(), bytes); - - if (isDebug.get()) { - Nd4j.getWorkspaceManager().printAllocationStatisticsForCurrentThread(); - } - - Pointer ptr = memoryManager.allocate((bytes + SAFETY_OFFSET), MemoryKind.HOST, false); - if (ptr == null) - throw new ND4JIllegalStateException("Can't allocate memory for workspace"); - - workspace.setHostPointer(new PagedPointer(ptr)); - - if (workspaceConfiguration.getPolicyMirroring() != MirroringPolicy.HOST_ONLY) { - workspace.setDevicePointer(new PagedPointer(memoryManager.allocate((bytes + SAFETY_OFFSET), MemoryKind.DEVICE, false))); - AllocationsTracker.getInstance().markAllocated(AllocationKind.GENERAL, Nd4j.getAffinityManager().getDeviceForCurrentThread(), bytes + SAFETY_OFFSET); - - MemoryTracker.getInstance().incrementWorkspaceAllocatedAmount(Nd4j.getAffinityManager().getDeviceForCurrentThread(), bytes + SAFETY_OFFSET); - - // if base pointer isn't aligned to 16 bytes (128 bits) - adjust the offfset then - val addr = workspace.getDevicePointer().address(); - val div = addr % alignmentBase; - if (div != 0) { - deviceOffset.set(alignmentBase - div); - hostOffset.set(alignmentBase - div); - } - } - } - } - - @Override - public PagedPointer alloc(long requiredMemory, DataType type, boolean initialize) { - return this.alloc(requiredMemory, MemoryKind.DEVICE, type, initialize); - } - - - @Override - public synchronized void destroyWorkspace(boolean extended) { - val size = currentSize.getAndSet(0); - reset(); - - if (extended) - clearExternalAllocations(); - - clearPinnedAllocations(extended); - - if (workspace.getHostPointer() != null) - NativeOpsHolder.getInstance().getDeviceNativeOps().freeHost(workspace.getHostPointer()); - - if (workspace.getDevicePointer() != null) { - NativeOpsHolder.getInstance().getDeviceNativeOps().freeDevice(workspace.getDevicePointer(), 0); - AllocationsTracker.getInstance().markReleased(AllocationKind.GENERAL, Nd4j.getAffinityManager().getDeviceForCurrentThread(), size + SAFETY_OFFSET); - - MemoryTracker.getInstance().decrementWorkspaceAmount(Nd4j.getAffinityManager().getDeviceForCurrentThread(), size + SAFETY_OFFSET); + // spill + if (workspaceConfiguration.getPolicyReset() == ResetPolicy.ENDOFBUFFER_REACHED + && currentSize.get() > 0 + && !trimmer + && Nd4j.getWorkspaceManager().getDebugMode() != DebugMode.SPILL_EVERYTHING) { + // log.info("End of space reached. Current offset: {}; requiredMemory: {}", + // deviceOffset.get(), requiredMemory); + deviceOffset.set(0); + resetPlanned.set(true); + return alloc(requiredMemory, kind, type, initialize); } - workspace.setDevicePointer(null); - workspace.setHostPointer(null); + if (!trimmer) spilledAllocationsSize.addAndGet(requiredMemory); + else pinnedAllocationsSize.addAndGet(requiredMemory); - } + log.debug( + "Workspace [{}] device_{}: spilled DEVICE array of {} bytes, capacity of {} elements", + id, + Nd4j.getAffinityManager().getDeviceForCurrentThread(), + requiredMemory, + numElements); + val shape = + new AllocationShape( + requiredMemory / Nd4j.sizeOfDataType(type), Nd4j.sizeOfDataType(type), type); - @Override - public PagedPointer alloc(long requiredMemory, MemoryKind kind, DataType type, boolean initialize) { - long numElements = requiredMemory / Nd4j.sizeOfDataType(type); + cycleAllocations.addAndGet(requiredMemory); - // alignment - if (requiredMemory % alignmentBase != 0) - requiredMemory += alignmentBase - (requiredMemory % alignmentBase); + if (workspaceConfiguration.getPolicyMirroring() == MirroringPolicy.HOST_ONLY) return null; - if (!isUsed.get()) { - if (disabledCounter.incrementAndGet() % 10 == 0) - log.warn("Worskpace was turned off, and wasn't enabled after {} allocations", disabledCounter.get()); + switch (workspaceConfiguration.getPolicySpill()) { + case REALLOCATE: + case EXTERNAL: + if (!trimmer) { + externalCount.incrementAndGet(); + // + // AtomicAllocator.getInstance().getMemoryHandler().getMemoryProvider().malloc(shape, + // null, AllocationStatus.DEVICE).getDevicePointer() + val pointer = + new PagedPointer( + memoryManager.allocate(requiredMemory, MemoryKind.DEVICE, initialize), + numElements); + pointer.isLeaked(); - if (kind == MemoryKind.DEVICE) { - val pointer = new PagedPointer(memoryManager.allocate(requiredMemory, MemoryKind.DEVICE, initialize), numElements); - externalAllocations.add(new PointersPair(null, pointer)); - MemoryTracker.getInstance().incrementWorkspaceAllocatedAmount(Nd4j.getAffinityManager().getDeviceForCurrentThread(), requiredMemory); - return pointer; + val pp = new PointersPair(null, pointer); + pp.setRequiredMemory(requiredMemory); + externalAllocations.add(pp); + + MemoryTracker.getInstance() + .incrementWorkspaceAllocatedAmount( + Nd4j.getAffinityManager().getDeviceForCurrentThread(), requiredMemory); + return pointer; } else { - val pointer = new PagedPointer(memoryManager.allocate(requiredMemory, MemoryKind.HOST, initialize), numElements); - externalAllocations.add(new PointersPair(pointer, null)); - return pointer; + pinnedCount.incrementAndGet(); + + val pointer = + new PagedPointer( + memoryManager.allocate(requiredMemory, MemoryKind.DEVICE, initialize), + numElements); + pointer.isLeaked(); + + pinnedAllocations.add( + new PointersPair(stepsCount.get(), requiredMemory, null, pointer)); + MemoryTracker.getInstance() + .incrementWorkspaceAllocatedAmount( + Nd4j.getAffinityManager().getDeviceForCurrentThread(), requiredMemory); + return pointer; } + case FAIL: + default: + { + throw new ND4JIllegalStateException("Can't allocate memory: Workspace is full"); + } + } + } + } else if (kind == MemoryKind.HOST) { + if (hostOffset.get() + requiredMemory <= currentSize.get() + && !trimmer + && Nd4j.getWorkspaceManager().getDebugMode() != DebugMode.SPILL_EVERYTHING) { + long prevOffset = hostOffset.getAndAdd(requiredMemory); + val ptr = workspace.getHostPointer().withOffset(prevOffset, numElements); + + // && workspaceConfiguration.getPolicyMirroring() == MirroringPolicy.HOST_ONLY + if (initialize) Pointer.memset(ptr, 0, requiredMemory); + return ptr; + } else { + // log.info("Spilled HOST array of {} bytes, capacity of {} elements", requiredMemory, + // numElements); + if (workspaceConfiguration.getPolicyReset() == ResetPolicy.ENDOFBUFFER_REACHED + && currentSize.get() > 0 + && !trimmer + && Nd4j.getWorkspaceManager().getDebugMode() != DebugMode.SPILL_EVERYTHING) { + // log.info("End of space reached. Current offset: {}; requiredMemory: {}", + // deviceOffset.get(), requiredMemory); + hostOffset.set(0); + // resetPlanned.set(true); + return alloc(requiredMemory, kind, type, initialize); } - boolean trimmer = (workspaceConfiguration.getPolicyReset() == ResetPolicy.ENDOFBUFFER_REACHED && requiredMemory + cycleAllocations.get() > initialBlockSize.get() && initialBlockSize.get() > 0 && kind == MemoryKind.DEVICE) || trimmedMode.get(); + val shape = + new AllocationShape( + requiredMemory / Nd4j.sizeOfDataType(type), Nd4j.sizeOfDataType(type), type); - if (trimmer && workspaceConfiguration.getPolicySpill() == SpillPolicy.REALLOCATE && !trimmedMode.get()) { - trimmedMode.set(true); - trimmedStep.set(stepsCount.get()); - } + switch (workspaceConfiguration.getPolicySpill()) { + case REALLOCATE: + case EXTERNAL: + if (!trimmer) { + // memoryManager.allocate(requiredMemory, MemoryKind.HOST, true) + // AtomicAllocator.getInstance().getMemoryHandler().getMemoryProvider().malloc(shape, + // null, AllocationStatus.DEVICE).getDevicePointer() + PagedPointer pointer = + new PagedPointer( + memoryManager.allocate(requiredMemory, MemoryKind.HOST, initialize), + numElements); - if (kind == MemoryKind.DEVICE) { - if (deviceOffset.get() + requiredMemory <= currentSize.get() && !trimmer && Nd4j.getWorkspaceManager().getDebugMode() != DebugMode.SPILL_EVERYTHING) { - cycleAllocations.addAndGet(requiredMemory); - long prevOffset = deviceOffset.getAndAdd(requiredMemory); - - if (workspaceConfiguration.getPolicyMirroring() == MirroringPolicy.HOST_ONLY) - return null; - - val ptr = workspace.getDevicePointer().withOffset(prevOffset, numElements); - - if (isDebug.get()) - log.info("Workspace [{}] device_{}: alloc array of {} bytes, capacity of {} elements; prevOffset: {}; newOffset: {}; size: {}; address: {}", id, Nd4j.getAffinityManager().getDeviceForCurrentThread(), requiredMemory, numElements, prevOffset, deviceOffset.get(), currentSize.get(), ptr.address()); - - if (initialize) { - val context = AtomicAllocator.getInstance().getDeviceContext(); - - int ret = NativeOpsHolder.getInstance().getDeviceNativeOps().memsetAsync(ptr, 0, requiredMemory, 0, context.getSpecialStream()); - if (ret == 0) - throw new ND4JIllegalStateException("memset failed device_" + Nd4j.getAffinityManager().getDeviceForCurrentThread()); - - context.syncSpecialStream(); - } - - return ptr; + externalAllocations.add(new PointersPair(pointer, null)); + return pointer; } else { + // AtomicAllocator.getInstance().getMemoryHandler().getMemoryProvider().malloc(shape, + // null, AllocationStatus.DEVICE).getDevicePointer() + PagedPointer pointer = + new PagedPointer( + memoryManager.allocate(requiredMemory, MemoryKind.HOST, initialize), + numElements); + pointer.isLeaked(); - // spill - if (workspaceConfiguration.getPolicyReset() == ResetPolicy.ENDOFBUFFER_REACHED && currentSize.get() > 0 && !trimmer && Nd4j.getWorkspaceManager().getDebugMode() != DebugMode.SPILL_EVERYTHING) { - //log.info("End of space reached. Current offset: {}; requiredMemory: {}", deviceOffset.get(), requiredMemory); - deviceOffset.set(0); - resetPlanned.set(true); - return alloc(requiredMemory, kind, type, initialize); - } - - if (!trimmer) - spilledAllocationsSize.addAndGet(requiredMemory); - else - pinnedAllocationsSize.addAndGet(requiredMemory); - - if (isDebug.get()) { - log.info("Workspace [{}] device_{}: spilled DEVICE array of {} bytes, capacity of {} elements", id, Nd4j.getAffinityManager().getDeviceForCurrentThread(), requiredMemory, numElements); - } - - val shape = new AllocationShape(requiredMemory / Nd4j.sizeOfDataType(type), Nd4j.sizeOfDataType(type), type); - - cycleAllocations.addAndGet(requiredMemory); - - if (workspaceConfiguration.getPolicyMirroring() == MirroringPolicy.HOST_ONLY) - return null; - - switch (workspaceConfiguration.getPolicySpill()) { - case REALLOCATE: - case EXTERNAL: - if (!trimmer) { - externalCount.incrementAndGet(); - // - //AtomicAllocator.getInstance().getMemoryHandler().getMemoryProvider().malloc(shape, null, AllocationStatus.DEVICE).getDevicePointer() - val pointer = new PagedPointer(memoryManager.allocate(requiredMemory, MemoryKind.DEVICE, initialize), numElements); - pointer.isLeaked(); - - val pp = new PointersPair(null, pointer); - pp.setRequiredMemory(requiredMemory); - externalAllocations.add(pp); - - MemoryTracker.getInstance().incrementWorkspaceAllocatedAmount(Nd4j.getAffinityManager().getDeviceForCurrentThread(), requiredMemory); - return pointer; - } else { - pinnedCount.incrementAndGet(); - - val pointer = new PagedPointer(memoryManager.allocate(requiredMemory, MemoryKind.DEVICE, initialize), numElements); - pointer.isLeaked(); - - pinnedAllocations.add(new PointersPair(stepsCount.get(), requiredMemory, null, pointer)); - MemoryTracker.getInstance().incrementWorkspaceAllocatedAmount(Nd4j.getAffinityManager().getDeviceForCurrentThread(), requiredMemory); - return pointer; - } - case FAIL: - default: { - throw new ND4JIllegalStateException("Can't allocate memory: Workspace is full"); - } - } + pinnedAllocations.add(new PointersPair(stepsCount.get(), 0L, pointer, null)); + return pointer; } - } else if (kind == MemoryKind.HOST) { - if (hostOffset.get() + requiredMemory <= currentSize.get() && !trimmer && Nd4j.getWorkspaceManager().getDebugMode() != DebugMode.SPILL_EVERYTHING) { - - long prevOffset = hostOffset.getAndAdd(requiredMemory); - - val ptr = workspace.getHostPointer().withOffset(prevOffset, numElements); - - // && workspaceConfiguration.getPolicyMirroring() == MirroringPolicy.HOST_ONLY - if (initialize) - Pointer.memset(ptr, 0, requiredMemory); - return ptr; - } else { - // log.info("Spilled HOST array of {} bytes, capacity of {} elements", requiredMemory, numElements); - if (workspaceConfiguration.getPolicyReset() == ResetPolicy.ENDOFBUFFER_REACHED && currentSize.get() > 0 && !trimmer && Nd4j.getWorkspaceManager().getDebugMode() != DebugMode.SPILL_EVERYTHING) { - //log.info("End of space reached. Current offset: {}; requiredMemory: {}", deviceOffset.get(), requiredMemory); - hostOffset.set(0); - //resetPlanned.set(true); - return alloc(requiredMemory, kind, type, initialize); - } - - val shape = new AllocationShape(requiredMemory / Nd4j.sizeOfDataType(type), Nd4j.sizeOfDataType(type), type); - - switch (workspaceConfiguration.getPolicySpill()) { - case REALLOCATE: - case EXTERNAL: - if (!trimmer) { - //memoryManager.allocate(requiredMemory, MemoryKind.HOST, true) - //AtomicAllocator.getInstance().getMemoryHandler().getMemoryProvider().malloc(shape, null, AllocationStatus.DEVICE).getDevicePointer() - PagedPointer pointer = new PagedPointer(memoryManager.allocate(requiredMemory, MemoryKind.HOST, initialize), numElements); - - externalAllocations.add(new PointersPair(pointer, null)); - return pointer; - } else { - //AtomicAllocator.getInstance().getMemoryHandler().getMemoryProvider().malloc(shape, null, AllocationStatus.DEVICE).getDevicePointer() - PagedPointer pointer = new PagedPointer(memoryManager.allocate(requiredMemory, MemoryKind.HOST, initialize), numElements); - pointer.isLeaked(); - - pinnedAllocations.add(new PointersPair(stepsCount.get(), 0L, pointer, null)); - return pointer; - } - case FAIL: - default: { - throw new ND4JIllegalStateException("Can't allocate memory: Workspace is full"); - } - } - } - } else throw new ND4JIllegalStateException("Unknown MemoryKind was passed in: " + kind); - - //throw new ND4JIllegalStateException("Shouldn't ever reach this line"); - } - - @Override - protected void clearPinnedAllocations(boolean extended) { - if (isDebug.get()) - log.info("Workspace [{}] device_{} threadId {} cycle {}: clearing pinned allocations...", id, Nd4j.getAffinityManager().getDeviceForCurrentThread(), Thread.currentThread().getId(), cyclesCount.get()); - - while (!pinnedAllocations.isEmpty()) { - val pair = pinnedAllocations.peek(); - if (pair == null) - throw new RuntimeException(); - - long stepNumber = pair.getAllocationCycle(); - long stepCurrent = stepsCount.get(); - - if (isDebug.get()) - log.info("Allocation step: {}; Current step: {}", stepNumber, stepCurrent); - - if (stepNumber + 2 < stepCurrent || extended) { - pinnedAllocations.remove(); - - if (pair.getDevicePointer() != null) { - NativeOpsHolder.getInstance().getDeviceNativeOps().freeDevice(pair.getDevicePointer(), 0); - MemoryTracker.getInstance().decrementWorkspaceAmount(Nd4j.getAffinityManager().getDeviceForCurrentThread(), pair.getRequiredMemory()); - pinnedCount.decrementAndGet(); - - if (isDebug.get()) - log.info("deleting external device allocation "); - } - - if (pair.getHostPointer() != null) { - NativeOpsHolder.getInstance().getDeviceNativeOps().freeHost(pair.getHostPointer()); - - if (isDebug.get()) - log.info("deleting external host allocation "); - } - - val sizez = pair.getRequiredMemory() * -1; - pinnedAllocationsSize.addAndGet(sizez); - } else { - break; + case FAIL: + default: + { + throw new ND4JIllegalStateException("Can't allocate memory: Workspace is full"); } } - } + } + } else throw new ND4JIllegalStateException("Unknown MemoryKind was passed in: " + kind); - @Override - protected void clearExternalAllocations() { - if (isDebug.get()) - log.info("Workspace [{}] device_{} threadId {} guid [{}]: clearing external allocations...", id, Nd4j.getAffinityManager().getDeviceForCurrentThread(), Thread.currentThread().getId(), guid); + // throw new ND4JIllegalStateException("Shouldn't ever reach this line"); + } - Nd4j.getExecutioner().commit(); + @Override + protected void clearPinnedAllocations(boolean extended) { - try { - for (PointersPair pair : externalAllocations) { - if (pair.getHostPointer() != null) { - NativeOpsHolder.getInstance().getDeviceNativeOps().freeHost(pair.getHostPointer()); + log.debug( + "Workspace [{}] device_{} threadId {} cycle {}: clearing pinned allocations...", + id, + Nd4j.getAffinityManager().getDeviceForCurrentThread(), + Thread.currentThread().getId(), + cyclesCount.get()); - if (isDebug.get()) - log.info("deleting external host allocation... "); - } + while (!pinnedAllocations.isEmpty()) { + val pair = pinnedAllocations.peek(); + if (pair == null) throw new RuntimeException(); - if (pair.getDevicePointer() != null) { - NativeOpsHolder.getInstance().getDeviceNativeOps().freeDevice(pair.getDevicePointer(), 0); + long stepNumber = pair.getAllocationCycle(); + long stepCurrent = stepsCount.get(); - if (isDebug.get()) - log.info("deleting external device allocation... "); + log.debug("Allocation step: {}; Current step: {}", stepNumber, stepCurrent); - val sizez = pair.getRequiredMemory(); - if (sizez != null) { - AllocationsTracker.getInstance().markReleased(AllocationKind.GENERAL, Nd4j.getAffinityManager().getDeviceForCurrentThread(), sizez); - MemoryTracker.getInstance().decrementWorkspaceAmount(Nd4j.getAffinityManager().getDeviceForCurrentThread(), sizez); - } - } - } - } catch (Exception e) { - log.error("RC: Workspace [{}] device_{} threadId {} guid [{}]: clearing external allocations...", id, Nd4j.getAffinityManager().getDeviceForCurrentThread(), Thread.currentThread().getId(), guid); - throw new RuntimeException(e); + if (stepNumber + 2 < stepCurrent || extended) { + pinnedAllocations.remove(); + + if (pair.getDevicePointer() != null) { + NativeOpsHolder.getInstance().getDeviceNativeOps().freeDevice(pair.getDevicePointer(), 0); + MemoryTracker.getInstance() + .decrementWorkspaceAmount( + Nd4j.getAffinityManager().getDeviceForCurrentThread(), pair.getRequiredMemory()); + pinnedCount.decrementAndGet(); + + log.debug("deleting external device allocation "); } - spilledAllocationsSize.set(0); - externalCount.set(0); - externalAllocations.clear(); - } + if (pair.getHostPointer() != null) { + NativeOpsHolder.getInstance().getDeviceNativeOps().freeHost(pair.getHostPointer()); - @Override - protected void resetWorkspace() { - if (currentSize.get() < 1) { + log.debug("deleting external host allocation "); } + val sizez = pair.getRequiredMemory() * -1; + pinnedAllocationsSize.addAndGet(sizez); + } else { + break; + } + } + } -/* - if (Nd4j.getExecutioner() instanceof GridExecutioner) - ((GridExecutioner) Nd4j.getExecutioner()).flushQueueBlocking(); + @Override + protected void clearExternalAllocations() { - CudaContext context = (CudaContext) AtomicAllocator.getInstance().getDeviceContext().getContext(); + log.debug( + "Workspace [{}] device_{} threadId {} guid [{}]: clearing external allocations...", + id, + Nd4j.getAffinityManager().getDeviceForCurrentThread(), + Thread.currentThread().getId(), + guid); - //log.info("workspace: {}, size: {}", workspace.getDevicePointer().address(), currentSize.get()); + Nd4j.getExecutioner().commit(); - NativeOpsHolder.getInstance().getDeviceNativeOps().memsetAsync(workspace.getDevicePointer(), 0, currentSize.get() + SAFETY_OFFSET, 0, context.getSpecialStream()); + try { + for (PointersPair pair : externalAllocations) { + if (pair.getHostPointer() != null) { + NativeOpsHolder.getInstance().getDeviceNativeOps().freeHost(pair.getHostPointer()); - Pointer.memset(workspace.getHostPointer(), 0, currentSize.get() + SAFETY_OFFSET); + log.debug("deleting external host allocation... "); + } - context.getSpecialStream().synchronize(); - */ + if (pair.getDevicePointer() != null) { + NativeOpsHolder.getInstance().getDeviceNativeOps().freeDevice(pair.getDevicePointer(), 0); + + log.debug("deleting external device allocation... "); + + val sizez = pair.getRequiredMemory(); + if (sizez != null) { + AllocationsTracker.getInstance() + .markReleased( + AllocationKind.GENERAL, + Nd4j.getAffinityManager().getDeviceForCurrentThread(), + sizez); + MemoryTracker.getInstance() + .decrementWorkspaceAmount( + Nd4j.getAffinityManager().getDeviceForCurrentThread(), sizez); + } + } + } + } catch (Exception e) { + log.error( + "RC: Workspace [{}] device_{} threadId {} guid [{}]: clearing external allocations...", + id, + Nd4j.getAffinityManager().getDeviceForCurrentThread(), + Thread.currentThread().getId(), + guid); + throw new RuntimeException(e); } - protected PointersPair workspace() { - return workspace; - } + spilledAllocationsSize.set(0); + externalCount.set(0); + externalAllocations.clear(); + } - protected Queue pinnedPointers() { - return pinnedAllocations; - } + @Override + protected void resetWorkspace() { + if (currentSize.get() < 1) {} - protected List externalPointers() { - return externalAllocations; - } + /* + if (Nd4j.getExecutioner() instanceof GridExecutioner) + ((GridExecutioner) Nd4j.getExecutioner()).flushQueueBlocking(); - @Override - public Deallocator deallocator() { - return new CudaWorkspaceDeallocator(this); - } + CudaContext context = (CudaContext) AtomicAllocator.getInstance().getDeviceContext().getContext(); - @Override - public String getUniqueId() { - return "Workspace_" + getId() + "_" + Nd4j.getDeallocatorService().nextValue(); - } + //log.info("workspace: {}, size: {}", workspace.getDevicePointer().address(), currentSize.get()); - @Override - public int targetDevice() { - return deviceId; - } + NativeOpsHolder.getInstance().getDeviceNativeOps().memsetAsync(workspace.getDevicePointer(), 0, currentSize.get() + SAFETY_OFFSET, 0, context.getSpecialStream()); - @Override - public long getPrimaryOffset() { - return getDeviceOffset(); - } + Pointer.memset(workspace.getHostPointer(), 0, currentSize.get() + SAFETY_OFFSET); + + context.getSpecialStream().synchronize(); + */ + } + + protected PointersPair workspace() { + return workspace; + } + + protected Queue pinnedPointers() { + return pinnedAllocations; + } + + protected List externalPointers() { + return externalAllocations; + } + + @Override + public Deallocator deallocator() { + return new CudaWorkspaceDeallocator(this); + } + + @Override + public String getUniqueId() { + return "Workspace_" + getId() + "_" + Nd4j.getDeallocatorService().nextValue(); + } + + @Override + public int targetDevice() { + return deviceId; + } + + @Override + public long getPrimaryOffset() { + return getDeviceOffset(); + } } diff --git a/cavis-native/cavis-native-jcublas/src/main/java/org/nd4j/jita/workspace/CudaWorkspaceDeallocator.java b/cavis-native/cavis-native-jcublas/src/main/java/org/nd4j/jita/workspace/CudaWorkspaceDeallocator.java index 806986fc7..41b936bf7 100644 --- a/cavis-native/cavis-native-jcublas/src/main/java/org/nd4j/jita/workspace/CudaWorkspaceDeallocator.java +++ b/cavis-native/cavis-native-jcublas/src/main/java/org/nd4j/jita/workspace/CudaWorkspaceDeallocator.java @@ -48,7 +48,7 @@ public class CudaWorkspaceDeallocator implements Deallocator { @Override public void deallocate() { - log.trace("Deallocating CUDA workspace"); + log.debug("Deallocating CUDA workspace"); // purging workspace planes if (pointersPair != null) { diff --git a/cavis-native/cavis-native-jcublas/src/main/java/org/nd4j/linalg/jcublas/ops/executioner/CudaExecutioner.java b/cavis-native/cavis-native-jcublas/src/main/java/org/nd4j/linalg/jcublas/ops/executioner/CudaExecutioner.java index 612dfdda8..5524bddbc 100644 --- a/cavis-native/cavis-native-jcublas/src/main/java/org/nd4j/linalg/jcublas/ops/executioner/CudaExecutioner.java +++ b/cavis-native/cavis-native-jcublas/src/main/java/org/nd4j/linalg/jcublas/ops/executioner/CudaExecutioner.java @@ -1582,7 +1582,7 @@ public class CudaExecutioner extends DefaultOpExecutioner { } if (nativeOps.lastErrorCode() != 0) - throw new RuntimeException(nativeOps.lastErrorMessage()); + throw new RuntimeException(nativeOps.lastErrorMessage() + " error code: " + nativeOps.lastErrorCode()); profilingConfigurableHookOut(op, oc, st); diff --git a/cavis-native/cavis-native-jcublas/src/main/java/org/nd4j/linalg/jcublas/ops/executioner/CudaOpContext.java b/cavis-native/cavis-native-jcublas/src/main/java/org/nd4j/linalg/jcublas/ops/executioner/CudaOpContext.java index 3ba143e36..52af2eeb5 100644 --- a/cavis-native/cavis-native-jcublas/src/main/java/org/nd4j/linalg/jcublas/ops/executioner/CudaOpContext.java +++ b/cavis-native/cavis-native-jcublas/src/main/java/org/nd4j/linalg/jcublas/ops/executioner/CudaOpContext.java @@ -56,7 +56,8 @@ public class CudaOpContext extends BaseOpContext implements OpContext, Deallocat @Override public void close() { - // no-op + nativeOps.ctxPurge(context); + context.deallocate(); } @Override diff --git a/cavis-ui/cavis-ui-common/src/main/java/org/deeplearning4j/ui/weights/ConvolutionalIterationListener.java b/cavis-ui/cavis-ui-common/src/main/java/org/deeplearning4j/ui/weights/ConvolutionalIterationListener.java index 4770b2d76..44caf37c4 100644 --- a/cavis-ui/cavis-ui-common/src/main/java/org/deeplearning4j/ui/weights/ConvolutionalIterationListener.java +++ b/cavis-ui/cavis-ui-common/src/main/java/org/deeplearning4j/ui/weights/ConvolutionalIterationListener.java @@ -23,12 +23,12 @@ package org.deeplearning4j.ui.weights; import lombok.NonNull; import lombok.extern.slf4j.Slf4j; import lombok.val; +import net.brutex.ai.dnn.api.IModel; import org.datavec.image.loader.ImageLoader; import org.deeplearning4j.core.storage.Persistable; import org.deeplearning4j.core.storage.StatsStorage; import org.deeplearning4j.core.storage.StatsStorageRouter; import org.deeplearning4j.nn.api.Layer; -import org.deeplearning4j.nn.api.Model; import org.deeplearning4j.nn.graph.ComputationGraph; import org.deeplearning4j.nn.multilayer.MultiLayerNetwork; import org.deeplearning4j.optimize.api.BaseTrainingListener; @@ -40,8 +40,6 @@ import org.deeplearning4j.ui.model.weights.ConvolutionListenerPersistable; import org.nd4j.linalg.api.ndarray.INDArray; import org.nd4j.linalg.exception.ND4JArraySizeException; import org.nd4j.common.io.ClassPathResource; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; import javax.imageio.ImageIO; import java.awt.*; @@ -60,7 +58,7 @@ public class ConvolutionalIterationListener extends BaseTrainingListener { } private int freq = 10; - private static final Logger log = LoggerFactory.getLogger(ConvolutionalIterationListener.class); + private int minibatchNum = 0; private boolean openBrowser = true; private final String path; @@ -125,12 +123,12 @@ public class ConvolutionalIterationListener extends BaseTrainingListener { * @param iteration the iteration number */ @Override - public void iterationDone(Model model, int iteration, int epoch) { + public void iterationDone(IModel model, int iteration, int epoch) { } @Override - public void onForwardPass(Model model, Map activations) { + public void onForwardPass(IModel model, Map activations) { int iteration = (model instanceof MultiLayerNetwork ? ((MultiLayerNetwork)model).getIterationCount() : ((ComputationGraph)model).getIterationCount()); if (iteration % freq == 0) { @@ -147,7 +145,7 @@ public class ConvolutionalIterationListener extends BaseTrainingListener { throw new RuntimeException("layers.length != activations.size(). Got layers.length="+layers.length+", activations.size()="+activations.size()); for( int i=0; i activations) { + public void onForwardPass(IModel model, List activations) { int iteration = (model instanceof MultiLayerNetwork ? ((MultiLayerNetwork)model).getIterationCount() : ((ComputationGraph)model).getIterationCount()); if (iteration % freq == 0) { diff --git a/cavis-ui/cavis-ui-common/src/test/java/org/deeplearning4j/ui/ManualTests.java b/cavis-ui/cavis-ui-common/src/test/java/org/deeplearning4j/ui/ManualTests.java index 81cd4e5b1..415cd7ac2 100644 --- a/cavis-ui/cavis-ui-common/src/test/java/org/deeplearning4j/ui/ManualTests.java +++ b/cavis-ui/cavis-ui-common/src/test/java/org/deeplearning4j/ui/ManualTests.java @@ -29,7 +29,6 @@ import org.deeplearning4j.models.embeddings.reader.impl.BasicModelUtils; import org.deeplearning4j.models.word2vec.VocabWord; import org.deeplearning4j.models.word2vec.Word2Vec; import org.deeplearning4j.nn.conf.GradientNormalization; -import org.deeplearning4j.nn.conf.MultiLayerConfiguration; import org.deeplearning4j.nn.conf.NeuralNetConfiguration; import org.deeplearning4j.nn.conf.inputs.InputType; import org.deeplearning4j.nn.conf.layers.ConvolutionLayer; @@ -125,10 +124,10 @@ public class ManualTests { outputNum, useSubset, true, 1.0, new Random(seed)); log.info("Build model...."); - MultiLayerConfiguration.Builder builder = new NeuralNetConfiguration.Builder().seed(seed) + NeuralNetConfiguration.NeuralNetConfigurationBuilder builder = NeuralNetConfiguration.builder().seed(seed) .activation(Activation.RELU).weightInit(WeightInit.XAVIER) .gradientNormalization(GradientNormalization.RenormalizeL2PerLayer) - .updater(new AdaGrad(0.01)).weightNoise(new DropConnect(0.5)).list() + .updater(new AdaGrad(0.01)).weightNoise(new DropConnect(0.5)) .layer(0, new ConvolutionLayer.Builder(4, 4).name("cnn1").nIn(nChannels).stride(1, 1).nOut(20) .build()) .layer(1, new SubsamplingLayer.Builder(SubsamplingLayer.PoolingType.MAX, new int[] {2, 2}) @@ -144,14 +143,14 @@ public class ManualTests { .layer(8, new OutputLayer.Builder(LossFunctions.LossFunction.NEGATIVELOGLIKELIHOOD) .nOut(outputNum).activation(Activation.SOFTMAX).build()) - .setInputType(InputType.convolutional(numRows, numColumns, nChannels)); + .inputType(InputType.convolutional(numRows, numColumns, nChannels)); MultiLayerNetwork model = new MultiLayerNetwork(builder.build()); model.init(); log.info("Train model...."); - model.setListeners(new ScoreIterationListener(listenerFreq), new ConvolutionalIterationListener(listenerFreq)); + model.addTrainingListeners(new ScoreIterationListener(listenerFreq), new ConvolutionalIterationListener(listenerFreq)); while (lfw.hasNext()) { lfwNext = lfw.next(); @@ -246,10 +245,10 @@ public class ManualTests { DataSetIterator mnistTest = new MnistDataSetIterator(batchSize, false, 12345); log.info("Build model...."); - MultiLayerConfiguration.Builder builder = new NeuralNetConfiguration.Builder().seed(seed) + NeuralNetConfiguration.NeuralNetConfigurationBuilder builder = NeuralNetConfiguration.builder().seed(seed) .l2(0.0005) .weightInit(WeightInit.XAVIER) - .updater(new Nesterovs(0.01, 0.9)).list() + .updater(new Nesterovs(0.01, 0.9)) .layer(0, new ConvolutionLayer.Builder(5, 5) //nIn and nOut specify depth. nIn here is the nChannels and nOut is the number of filters to be applied .nIn(nChannels).stride(1, 1).nOut(20).activation(Activation.IDENTITY).build()) @@ -263,9 +262,9 @@ public class ManualTests { .layer(4, new DenseLayer.Builder().activation(Activation.RELU).nOut(500).build()) .layer(5, new OutputLayer.Builder(LossFunctions.LossFunction.NEGATIVELOGLIKELIHOOD) .nOut(outputNum).activation(Activation.SOFTMAX).build()) - .setInputType(InputType.convolutional(28, 28, nChannels)); + .inputType(InputType.convolutional(28, 28, nChannels)); - MultiLayerConfiguration conf = builder.build(); + NeuralNetConfiguration conf = builder.build(); MultiLayerNetwork model = new MultiLayerNetwork(conf); model.init(); /* @@ -279,7 +278,7 @@ public class ManualTests { */ log.info("Train model...."); - model.setListeners(new ConvolutionalIterationListener(1)); + model.addTrainingListeners(new ConvolutionalIterationListener(1)); //((NativeOpExecutioner) Nd4j.getExecutioner()).getLoop().setOmpNumThreads(8); @@ -320,10 +319,10 @@ public class ManualTests { DataSetIterator mnistTrain = new MnistDataSetIterator(batchSize, true, 12345); log.info("Build model...."); - MultiLayerConfiguration.Builder builder = new NeuralNetConfiguration.Builder().seed(seed) + NeuralNetConfiguration.NeuralNetConfigurationBuilder builder = NeuralNetConfiguration.builder().seed(seed) .l2(0.0005) .weightInit(WeightInit.XAVIER) - .updater(new Nesterovs(0.01, 0.9)).list() + .updater(new Nesterovs(0.01, 0.9)) .layer(0, new FrozenLayer(new ConvolutionLayer.Builder(5, 5) //nIn and nOut specify depth. nIn here is the nChannels and nOut is the number of filters to be applied .nIn(nChannels).stride(1, 1).nOut(20).activation(Activation.IDENTITY).build())) @@ -332,14 +331,14 @@ public class ManualTests { .layer(2, new FrozenLayer(new DenseLayer.Builder().activation(Activation.RELU).nOut(500).build())) .layer(3, new OutputLayer.Builder(LossFunctions.LossFunction.NEGATIVELOGLIKELIHOOD) .nOut(outputNum).activation(Activation.SOFTMAX).build()) - .setInputType(InputType.convolutionalFlat(28, 28, nChannels)); + .inputType(InputType.convolutionalFlat(28, 28, nChannels)); - MultiLayerConfiguration conf = builder.build(); + NeuralNetConfiguration conf = builder.build(); MultiLayerNetwork model = new MultiLayerNetwork(conf); model.init(); log.info("Train model...."); - model.setListeners(new ConvolutionalIterationListener(1)); + model.addTrainingListeners(new ConvolutionalIterationListener(1)); for (int i = 0; i < nEpochs; i++) { model.fit(mnistTrain); diff --git a/cavis-ui/cavis-ui-common/src/test/java/org/deeplearning4j/ui/weights/TestConvolutionalListener.java b/cavis-ui/cavis-ui-common/src/test/java/org/deeplearning4j/ui/weights/TestConvolutionalListener.java index 442f3bf01..b53e55c9c 100644 --- a/cavis-ui/cavis-ui-common/src/test/java/org/deeplearning4j/ui/weights/TestConvolutionalListener.java +++ b/cavis-ui/cavis-ui-common/src/test/java/org/deeplearning4j/ui/weights/TestConvolutionalListener.java @@ -21,7 +21,6 @@ package org.deeplearning4j.ui.weights; import org.deeplearning4j.datasets.iterator.impl.MnistDataSetIterator; -import org.deeplearning4j.nn.conf.MultiLayerConfiguration; import org.deeplearning4j.nn.conf.NeuralNetConfiguration; import org.deeplearning4j.nn.conf.inputs.InputType; import org.deeplearning4j.nn.conf.layers.ConvolutionLayer; @@ -54,9 +53,9 @@ public class TestConvolutionalListener { DataSetIterator mnistTrain = new MnistDataSetIterator(batchSize, true, 12345); - MultiLayerConfiguration conf = new NeuralNetConfiguration.Builder().seed(12345) // Training iterations as above + NeuralNetConfiguration conf = NeuralNetConfiguration.builder().seed(12345) // Training iterations as above .l2(0.0005).weightInit(WeightInit.XAVIER) - .updater(new Nesterovs(0.01, 0.9)).list() + .updater(new Nesterovs(0.01, 0.9)) .layer(0, new ConvolutionLayer.Builder(5, 5) //nIn and nOut specify depth. nIn here is the nChannels and nOut is the number of filters to be applied .nIn(nChannels).stride(1, 1).nOut(20).activation(Activation.IDENTITY).build()) @@ -70,12 +69,12 @@ public class TestConvolutionalListener { .layer(4, new DenseLayer.Builder().activation(Activation.RELU).nOut(500).build()) .layer(5, new OutputLayer.Builder(LossFunctions.LossFunction.NEGATIVELOGLIKELIHOOD) .nOut(outputNum).activation(Activation.SOFTMAX).build()) - .setInputType(InputType.convolutionalFlat(28, 28, 1)) //See note below + .inputType(InputType.convolutionalFlat(28, 28, 1)) //See note below .build(); MultiLayerNetwork net = new MultiLayerNetwork(conf); net.init(); - net.setListeners(new ConvolutionalIterationListener(1), new ScoreIterationListener(1)); + net.addTrainingListeners(new ConvolutionalIterationListener(1), new ScoreIterationListener(1)); for (int i = 0; i < 10; i++) { net.fit(mnistTrain.next()); @@ -83,7 +82,7 @@ public class TestConvolutionalListener { } ComputationGraph cg = net.toComputationGraph(); - cg.setListeners(new ConvolutionalIterationListener(1), new ScoreIterationListener(1)); + cg.addTrainingListeners(new ConvolutionalIterationListener(1), new ScoreIterationListener(1)); for (int i = 0; i < 10; i++) { cg.fit(mnistTrain.next()); Thread.sleep(1000); diff --git a/cavis-ui/cavis-ui-model/src/main/java/org/deeplearning4j/ui/model/stats/BaseStatsListener.java b/cavis-ui/cavis-ui-model/src/main/java/org/deeplearning4j/ui/model/stats/BaseStatsListener.java index 3ecc58fd5..70144bfd3 100644 --- a/cavis-ui/cavis-ui-model/src/main/java/org/deeplearning4j/ui/model/stats/BaseStatsListener.java +++ b/cavis-ui/cavis-ui-model/src/main/java/org/deeplearning4j/ui/model/stats/BaseStatsListener.java @@ -21,6 +21,7 @@ package org.deeplearning4j.ui.model.stats; import lombok.extern.slf4j.Slf4j; +import net.brutex.ai.dnn.api.IModel; import org.apache.commons.io.IOUtils; import org.bytedeco.javacpp.Pointer; import org.deeplearning4j.common.config.DL4JClassLoading; @@ -28,8 +29,7 @@ import org.deeplearning4j.core.storage.StatsStorageRouter; import org.deeplearning4j.core.storage.StorageMetaData; import org.deeplearning4j.core.storage.listener.RoutingIterationListener; import org.deeplearning4j.nn.api.Layer; -import org.deeplearning4j.nn.api.Model; -import org.deeplearning4j.nn.conf.NeuralNetConfiguration; +import org.deeplearning4j.nn.conf.layers.LayerConfiguration; import org.deeplearning4j.nn.gradient.Gradient; import org.deeplearning4j.nn.graph.ComputationGraph; import org.deeplearning4j.nn.multilayer.MultiLayerNetwork; @@ -52,7 +52,6 @@ import java.lang.management.GarbageCollectorMXBean; import java.lang.management.ManagementFactory; import java.lang.management.OperatingSystemMXBean; import java.lang.management.RuntimeMXBean; -import java.lang.reflect.Constructor; import java.util.*; @Slf4j @@ -86,7 +85,7 @@ public abstract class BaseStatsListener implements RoutingIterationListener { private Map meanMagGradients; private static class ModelInfo implements Serializable { - private final Model model; + private final IModel model; private long initTime; private long lastReportTime = -1; private int lastReportIteration = -1; @@ -98,12 +97,12 @@ public abstract class BaseStatsListener implements RoutingIterationListener { private int iterCount = 0; - private ModelInfo(Model model) { + private ModelInfo(IModel model) { this.model = model; } } - private ModelInfo getModelInfo(Model model) { + private ModelInfo getModelInfo(IModel model) { ModelInfo mi = null; for (ModelInfo m : modelInfos) { if (m.model == model) { @@ -219,11 +218,11 @@ public abstract class BaseStatsListener implements RoutingIterationListener { return sessionID; } - private String getSessionID(Model model) { + private String getSessionID(IModel model) { if (model instanceof MultiLayerNetwork || model instanceof ComputationGraph) return sessionID; if (model instanceof Layer) { - //Keep in mind MultiLayerNetwork implements Layer also... + //Keep in mind MultiLayerNetwork implements ILayer also... Layer l = (Layer) model; int layerIdx = l.getIndex(); return sessionID + "_layer" + layerIdx; @@ -232,17 +231,17 @@ public abstract class BaseStatsListener implements RoutingIterationListener { } @Override - public void onEpochStart(Model model) { + public void onEpochStart(IModel model) { } @Override - public void onEpochEnd(Model model) { + public void onEpochEnd(IModel model) { } @Override - public void onForwardPass(Model model, List activations) { + public void onForwardPass(IModel model, List activations) { int iterCount = getModelInfo(model).iterCount; if (calcFromActivations() && (iterCount == 0 || iterCount % updateConfig.reportingFrequency() == 0)) { //Assumption: we have input, layer 0, layer 1, ... @@ -258,7 +257,7 @@ public abstract class BaseStatsListener implements RoutingIterationListener { } @Override - public void onForwardPass(Model model, Map activations) { + public void onForwardPass(IModel model, Map activations) { int iterCount = getModelInfo(model).iterCount; if (calcFromActivations() && updateConfig.reportingFrequency() > 0 && (iterCount == 0 || iterCount % updateConfig.reportingFrequency() == 0)) { @@ -278,7 +277,7 @@ public abstract class BaseStatsListener implements RoutingIterationListener { } @Override - public void onGradientCalculation(Model model) { + public void onGradientCalculation(IModel model) { int iterCount = getModelInfo(model).iterCount; if (calcFromGradients() && updateConfig.reportingFrequency() > 0 && (iterCount == 0 || iterCount % updateConfig.reportingFrequency() == 0)) { @@ -312,12 +311,12 @@ public abstract class BaseStatsListener implements RoutingIterationListener { } @Override - public void onBackwardPass(Model model) { + public void onBackwardPass(IModel model) { //No op } @Override - public void iterationDone(Model model, int iteration, int epoch) { + public void iterationDone(IModel model, int iteration, int epoch) { ModelInfo modelInfo = getModelInfo(model); boolean backpropParamsOnly = backpropParamsOnly(model); @@ -419,7 +418,7 @@ public abstract class BaseStatsListener implements RoutingIterationListener { } //--- General --- - report.reportScore(model.score()); //Always report score + report.reportScore(model.getScore()); //Always report score if (updateConfig.collectLearningRates()) { Map lrs = new HashMap<>(); @@ -427,10 +426,10 @@ public abstract class BaseStatsListener implements RoutingIterationListener { //Need to append "0_", "1_" etc to param names from layers... int layerIdx = 0; for (Layer l : ((MultiLayerNetwork) model).getLayers()) { - NeuralNetConfiguration conf = l.conf(); - List paramkeys = l.conf().getLayer().initializer().paramKeys(l.conf().getLayer()); + LayerConfiguration conf = l.getLayerConfiguration(); + List paramkeys = l.getLayerConfiguration().initializer().paramKeys(l.getLayerConfiguration()); for (String s : paramkeys) { - double lr = conf.getLayer().getUpdaterByParam(s).getLearningRate(l.getIterationCount(), l.getEpochCount()); + double lr = conf.getUpdaterByParam(s).getLearningRate(l.getIterationCount(), l.getEpochCount()); if (Double.isNaN(lr)) { //Edge case: No-Op updater, AdaDelta etc - don't have a LR hence return NaN for IUpdater.getLearningRate lr = 0.0; @@ -441,11 +440,11 @@ public abstract class BaseStatsListener implements RoutingIterationListener { } } else if (model instanceof ComputationGraph) { for (Layer l : ((ComputationGraph) model).getLayers()) { - NeuralNetConfiguration conf = l.conf(); - String layerName = conf.getLayer().getLayerName(); - List paramkeys = l.conf().getLayer().initializer().paramKeys(l.conf().getLayer()); + LayerConfiguration conf = l.getLayerConfiguration(); + String layerName = conf.getLayerName(); + List paramkeys = l.getLayerConfiguration().initializer().paramKeys(l.getLayerConfiguration()); for (String s : paramkeys) { - double lr = conf.getLayer().getUpdaterByParam(s).getLearningRate(l.getIterationCount(), l.getEpochCount()); + double lr = conf.getUpdaterByParam(s).getLearningRate(l.getIterationCount(), l.getEpochCount()); if (Double.isNaN(lr)) { //Edge case: No-Op updater, AdaDelta etc - don't have a LR hence return NaN for IUpdater.getLearningRate lr = 0.0; @@ -455,9 +454,9 @@ public abstract class BaseStatsListener implements RoutingIterationListener { } } else if (model instanceof Layer) { Layer l = (Layer) model; - List paramkeys = l.conf().getLayer().initializer().paramKeys(l.conf().getLayer()); + List paramkeys = l.getLayerConfiguration().initializer().paramKeys(l.getLayerConfiguration()); for (String s : paramkeys) { - double lr = l.conf().getLayer().getUpdaterByParam(s).getLearningRate(l.getIterationCount(), l.getEpochCount()); + double lr = l.getLayerConfiguration().getUpdaterByParam(s).getLearningRate(l.getIterationCount(), l.getEpochCount()); lrs.put(s, lr); } } @@ -468,7 +467,7 @@ public abstract class BaseStatsListener implements RoutingIterationListener { //--- Histograms --- if (updateConfig.collectHistograms(StatsType.Parameters)) { - Map paramHistograms = getHistograms(model.paramTable(backpropParamsOnly), + Map paramHistograms = getHistograms(model.getParamTable(backpropParamsOnly), updateConfig.numHistogramBins(StatsType.Parameters)); report.reportHistograms(StatsType.Parameters, paramHistograms); } @@ -491,7 +490,7 @@ public abstract class BaseStatsListener implements RoutingIterationListener { //--- Summary Stats: Mean, Variance, Mean Magnitudes --- if (updateConfig.collectMean(StatsType.Parameters)) { - Map meanParams = calculateSummaryStats(model.paramTable(backpropParamsOnly), StatType.Mean); + Map meanParams = calculateSummaryStats(model.getParamTable(backpropParamsOnly), StatType.Mean); report.reportMean(StatsType.Parameters, meanParams); } @@ -512,7 +511,7 @@ public abstract class BaseStatsListener implements RoutingIterationListener { if (updateConfig.collectStdev(StatsType.Parameters)) { Map stdevParams = - calculateSummaryStats(model.paramTable(backpropParamsOnly), StatType.Stdev); + calculateSummaryStats(model.getParamTable(backpropParamsOnly), StatType.Stdev); report.reportStdev(StatsType.Parameters, stdevParams); } @@ -533,7 +532,7 @@ public abstract class BaseStatsListener implements RoutingIterationListener { if (updateConfig.collectMeanMagnitudes(StatsType.Parameters)) { Map meanMagParams = - calculateSummaryStats(model.paramTable(backpropParamsOnly), StatType.MeanMagnitude); + calculateSummaryStats(model.getParamTable(backpropParamsOnly), StatType.MeanMagnitude); report.reportMeanMagnitudes(StatsType.Parameters, meanMagParams); } @@ -576,7 +575,7 @@ public abstract class BaseStatsListener implements RoutingIterationListener { return System.currentTimeMillis(); } - private void doInit(Model model) { + private void doInit(IModel model) { boolean backpropParamsOnly = backpropParamsOnly(model); long initTime = System.currentTimeMillis(); //TODO support NTP StatsInitializationReport initReport = getNewInitializationReport(); @@ -653,17 +652,17 @@ public abstract class BaseStatsListener implements RoutingIterationListener { long numParams; if (model instanceof MultiLayerNetwork) { MultiLayerNetwork net = ((MultiLayerNetwork) model); - jsonConf = net.getLayerWiseConfigurations().toJson(); + jsonConf = net.getNetConfiguration().toJson(); numLayers = net.getnLayers(); numParams = net.numParams(); } else if (model instanceof ComputationGraph) { ComputationGraph cg = ((ComputationGraph) model); - jsonConf = cg.getConfiguration().toJson(); + jsonConf = cg.getComputationGraphConfiguration().toJson(); numLayers = cg.getNumLayers(); numParams = cg.numParams(); } else if (model instanceof Layer) { Layer l = (Layer) model; - jsonConf = l.conf().toJson(); + jsonConf = l.getNetConfiguration().toJson(); numLayers = 1; numParams = l.numParams(); } else { @@ -671,7 +670,7 @@ public abstract class BaseStatsListener implements RoutingIterationListener { + (model == null ? null : model.getClass())); } - Map paramMap = model.paramTable(backpropParamsOnly); + Map paramMap = model.getParamTable(backpropParamsOnly); String[] paramNames = new String[paramMap.size()]; int i = 0; for (String s : paramMap.keySet()) { //Assuming sensible iteration order - LinkedHashMaps are used in MLN/CG for example @@ -708,7 +707,7 @@ public abstract class BaseStatsListener implements RoutingIterationListener { } } - private void updateExamplesMinibatchesCounts(Model model) { + private void updateExamplesMinibatchesCounts(IModel model) { ModelInfo modelInfo = getModelInfo(model); int examplesThisMinibatch = 0; if (model instanceof MultiLayerNetwork) { @@ -724,7 +723,7 @@ public abstract class BaseStatsListener implements RoutingIterationListener { modelInfo.totalMinibatches++; } - private boolean backpropParamsOnly(Model model) { + private boolean backpropParamsOnly(IModel model) { //For pretrain layers (VAE, AE) we *do* want pretrain params also; for MLN and CG we only want backprop params // as we only have backprop gradients return model instanceof MultiLayerNetwork || model instanceof ComputationGraph; diff --git a/cavis-ui/cavis-ui-model/src/main/java/org/deeplearning4j/ui/model/stats/impl/SbeStatsReport.java b/cavis-ui/cavis-ui-model/src/main/java/org/deeplearning4j/ui/model/stats/impl/SbeStatsReport.java index 39c34a4b9..6ffe27c41 100644 --- a/cavis-ui/cavis-ui-model/src/main/java/org/deeplearning4j/ui/model/stats/impl/SbeStatsReport.java +++ b/cavis-ui/cavis-ui-model/src/main/java/org/deeplearning4j/ui/model/stats/impl/SbeStatsReport.java @@ -494,7 +494,7 @@ public class SbeStatsReport implements StatsReport, AgronaPersistable { bufferSize += SbeUtil.toBytes(true, s).length; //Content } - //Layer names group + //ILayer names group bufferSize += 4; //Header; always present List layerNames = getlayerNames(); for (String s : layerNames) { @@ -728,7 +728,7 @@ public class SbeStatsReport implements StatsReport, AgronaPersistable { pne.next().paramName(s); } - //Layer names + //ILayer names List layerNames = getlayerNames(); UpdateEncoder.LayerNamesEncoder lne = ue.layerNamesCount(layerNames.size()); for (String s : layerNames) { diff --git a/cavis-ui/cavis-ui-model/src/test/java/org/deeplearning4j/ui/stats/TestStatsListener.java b/cavis-ui/cavis-ui-model/src/test/java/org/deeplearning4j/ui/stats/TestStatsListener.java index 56952d870..24e5e1ed9 100644 --- a/cavis-ui/cavis-ui-model/src/test/java/org/deeplearning4j/ui/stats/TestStatsListener.java +++ b/cavis-ui/cavis-ui-model/src/test/java/org/deeplearning4j/ui/stats/TestStatsListener.java @@ -24,7 +24,6 @@ import org.deeplearning4j.core.storage.Persistable; import org.deeplearning4j.core.storage.StatsStorage; import org.deeplearning4j.datasets.iterator.impl.IrisDataSetIterator; import org.deeplearning4j.nn.api.OptimizationAlgorithm; -import org.deeplearning4j.nn.conf.MultiLayerConfiguration; import org.deeplearning4j.nn.conf.NeuralNetConfiguration; import org.deeplearning4j.nn.conf.layers.OutputLayer; import org.deeplearning4j.nn.multilayer.MultiLayerNetwork; @@ -51,10 +50,10 @@ public class TestStatsListener extends BaseDL4JTest { DataSet ds = new IrisDataSetIterator(150, 150).next(); - MultiLayerConfiguration conf = - new NeuralNetConfiguration.Builder() + NeuralNetConfiguration conf = + NeuralNetConfiguration.builder() .optimizationAlgo(OptimizationAlgorithm.STOCHASTIC_GRADIENT_DESCENT) - .list().layer(0, + .layer(0, new OutputLayer.Builder(LossFunctions.LossFunction.MCXENT) .activation(Activation.SOFTMAX).nIn(4).nOut(3).build()) .build(); @@ -65,9 +64,9 @@ public class TestStatsListener extends BaseDL4JTest { StatsStorage ss = new MapDBStatsStorage(); //in-memory if (useJ7) { - net.setListeners(new J7StatsListener(ss, 1)); + net.addTrainingListeners(new J7StatsListener(ss, 1)); } else { - net.setListeners(new StatsListener(ss, 1)); + net.addTrainingListeners(new StatsListener(ss, 1)); } diff --git a/cavis-ui/cavis-ui-model/src/test/java/org/deeplearning4j/ui/stats/TestTransferStatsCollection.java b/cavis-ui/cavis-ui-model/src/test/java/org/deeplearning4j/ui/stats/TestTransferStatsCollection.java index 3cf4ec7d9..1dc5cb1a6 100644 --- a/cavis-ui/cavis-ui-model/src/test/java/org/deeplearning4j/ui/stats/TestTransferStatsCollection.java +++ b/cavis-ui/cavis-ui-model/src/test/java/org/deeplearning4j/ui/stats/TestTransferStatsCollection.java @@ -21,7 +21,6 @@ package org.deeplearning4j.ui.stats; import org.deeplearning4j.BaseDL4JTest; -import org.deeplearning4j.nn.conf.MultiLayerConfiguration; import org.deeplearning4j.nn.conf.NeuralNetConfiguration; import org.deeplearning4j.nn.conf.layers.DenseLayer; import org.deeplearning4j.nn.conf.layers.OutputLayer; @@ -43,7 +42,7 @@ public class TestTransferStatsCollection extends BaseDL4JTest { @Test public void test() throws IOException { - MultiLayerConfiguration conf = new NeuralNetConfiguration.Builder().list() + NeuralNetConfiguration conf = NeuralNetConfiguration.builder() .layer(0, new DenseLayer.Builder().nIn(10).nOut(10).build()) .layer(1, new OutputLayer.Builder().activation(Activation.SOFTMAX).nIn(10).nOut(10).build()).build(); @@ -57,7 +56,7 @@ public class TestTransferStatsCollection extends BaseDL4JTest { new FineTuneConfiguration.Builder().updater(new Sgd(0.01)).build()) .setFeatureExtractor(0).build(); - net2.setListeners(new StatsListener(new InMemoryStatsStorage())); + net2.addTrainingListeners(new StatsListener(new InMemoryStatsStorage())); //Previosuly: failed on frozen layers net2.fit(new DataSet(Nd4j.rand(8, 10), Nd4j.rand(8, 10))); diff --git a/cavis-ui/cavis-ui-vertx/src/main/java/org/deeplearning4j/ui/module/train/TrainModule.java b/cavis-ui/cavis-ui-vertx/src/main/java/org/deeplearning4j/ui/module/train/TrainModule.java index 858648018..89b2ceef6 100644 --- a/cavis-ui/cavis-ui-vertx/src/main/java/org/deeplearning4j/ui/module/train/TrainModule.java +++ b/cavis-ui/cavis-ui-vertx/src/main/java/org/deeplearning4j/ui/module/train/TrainModule.java @@ -38,7 +38,6 @@ import org.deeplearning4j.core.storage.StatsStorageEvent; import org.deeplearning4j.core.storage.StatsStorageListener; import org.deeplearning4j.common.config.DL4JSystemProperties; import org.deeplearning4j.nn.conf.ComputationGraphConfiguration; -import org.deeplearning4j.nn.conf.MultiLayerConfiguration; import org.deeplearning4j.nn.conf.NeuralNetConfiguration; import org.deeplearning4j.nn.conf.graph.GraphVertex; import org.deeplearning4j.nn.conf.graph.LayerVertex; @@ -872,7 +871,7 @@ public class TrainModule implements UIModule { .end(json); } - private TrainModuleUtils.GraphInfo getGraphInfo(Triple conf) { if (conf == null) { return null; @@ -881,7 +880,7 @@ public class TrainModule implements UIModule { if (conf.getFirst() != null) { return TrainModuleUtils.buildGraphInfo(conf.getFirst()); } else if (conf.getSecond() != null) { - return TrainModuleUtils.buildGraphInfo(conf.getSecond()); + return TrainModuleUtils.buildGraphInfo(conf.getSecond().getDefaultConfiguration()); } else if (conf.getThird() != null) { return TrainModuleUtils.buildGraphInfo(conf.getThird()); } else { @@ -889,7 +888,7 @@ public class TrainModule implements UIModule { } } - private Triple getConfig(String sessionId) { + private Triple getConfig(String sessionId) { boolean noData = (sessionId == null || !knownSessionIDs.containsKey(sessionId)); StatsStorage ss = (noData ? null : knownSessionIDs.get(sessionId)); List allStatic = (noData ? Collections.EMPTY_LIST @@ -902,7 +901,7 @@ public class TrainModule implements UIModule { String config = p.getModelConfigJson(); if (modelClass.endsWith("MultiLayerNetwork")) { - MultiLayerConfiguration conf = MultiLayerConfiguration.fromJson(config); + NeuralNetConfiguration conf = NeuralNetConfiguration.fromJson(config); return new Triple<>(conf, null, null); } else if (modelClass.endsWith("ComputationGraph")) { ComputationGraphConfiguration conf = ComputationGraphConfiguration.fromJson(config); @@ -940,7 +939,7 @@ public class TrainModule implements UIModule { Map result = new HashMap<>(); result.put("updateTimestamp", lastUpdateTime); - Triple conf = getConfig(sessionId); + Triple conf = getConfig(sessionId); if (conf == null) { rc.response() .putHeader("content-type", "application/json") @@ -1097,7 +1096,7 @@ public class TrainModule implements UIModule { .end(asJson(ret)); } - private static String getLayerType(Layer layer) { + private static String getLayerType(LayerConfiguration layer) { String layerType = "n/a"; if (layer != null) { try { @@ -1124,14 +1123,14 @@ public class TrainModule implements UIModule { //TODO error handling... String layerType = ""; - Layer layer = null; + LayerConfiguration layer = null; NeuralNetConfiguration nnc = null; if (modelClass.endsWith("MultiLayerNetwork")) { - MultiLayerConfiguration conf = MultiLayerConfiguration.fromJson(configJson); + NeuralNetConfiguration conf = NeuralNetConfiguration.fromJson(configJson); int confIdx = layerIdx - 1; //-1 because of input if (confIdx >= 0) { - nnc = conf.getConf(confIdx); - layer = nnc.getLayer(); + layer = conf.getFlattenedLayerConfigurations().get(confIdx); + nnc = layer.getNetConfiguration(); } else { //Input layer layerType = "Input"; @@ -1144,8 +1143,8 @@ public class TrainModule implements UIModule { Map vertices = conf.getVertices(); if (vertices.containsKey(vertexName) && vertices.get(vertexName) instanceof LayerVertex) { LayerVertex lv = (LayerVertex) vertices.get(vertexName); - nnc = lv.getLayerConf(); - layer = nnc.getLayer(); + nnc = lv.getNetConfiguration(); + layer = lv.getLayerConfiguration(); } else if (conf.getNetworkInputs().contains(vertexName)) { layerType = "Input"; } else { @@ -1175,15 +1174,15 @@ public class TrainModule implements UIModule { layerInfoRows.add(new String[]{i18N.getMessage("train.model.layerinfotable.layerSize"), String.valueOf(ffl.getNOut())}); } - if (layer instanceof BaseLayer) { - BaseLayer bl = (BaseLayer) layer; + if (layer instanceof BaseLayerConfiguration) { + BaseLayerConfiguration bl = (BaseLayerConfiguration) layer; activationFn = bl.getActivationFn().toString(); - long nParams = layer.initializer().numParams(nnc); + long nParams = layer.initializer().numParams(bl.getLayer()); layerInfoRows.add(new String[]{i18N.getMessage("train.model.layerinfotable.layerNParams"), String.valueOf(nParams)}); if (nParams > 0) { try { - String str = JsonMappers.getMapper().writeValueAsString(bl.getWeightInitFn()); + String str = JsonMappers.getMapper().writeValueAsString(bl.getWeightInit()); layerInfoRows.add(new String[]{ i18N.getMessage("train.model.layerinfotable.layerWeightInit"), str}); } catch (JsonProcessingException e) { diff --git a/cavis-ui/cavis-ui-vertx/src/main/java/org/deeplearning4j/ui/module/train/TrainModuleUtils.java b/cavis-ui/cavis-ui-vertx/src/main/java/org/deeplearning4j/ui/module/train/TrainModuleUtils.java index ff6f00901..aebfaffa7 100644 --- a/cavis-ui/cavis-ui-vertx/src/main/java/org/deeplearning4j/ui/module/train/TrainModuleUtils.java +++ b/cavis-ui/cavis-ui-vertx/src/main/java/org/deeplearning4j/ui/module/train/TrainModuleUtils.java @@ -24,7 +24,6 @@ import com.fasterxml.jackson.annotation.JsonIgnore; import lombok.AllArgsConstructor; import lombok.Data; import org.deeplearning4j.nn.conf.ComputationGraphConfiguration; -import org.deeplearning4j.nn.conf.MultiLayerConfiguration; import org.deeplearning4j.nn.conf.NeuralNetConfiguration; import org.deeplearning4j.nn.conf.graph.GraphVertex; import org.deeplearning4j.nn.conf.graph.LayerVertex; @@ -50,7 +49,7 @@ public class TrainModuleUtils { private List originalVertexName; } - public static GraphInfo buildGraphInfo(MultiLayerConfiguration config) { + public static GraphInfo buildGraphInfo(NeuralNetConfiguration config) { List vertexNames = new ArrayList<>(); List originalVertexName = new ArrayList<>(); List layerTypes = new ArrayList<>(); @@ -63,30 +62,31 @@ public class TrainModuleUtils { layerInfo.add(Collections.emptyMap()); - List list = config.getConfs(); + List list = config.getFlattenedLayerConfigurations(); int layerIdx = 1; - for (NeuralNetConfiguration c : list) { - Layer layer = c.getLayer(); + for (LayerConfiguration c : list) { + LayerConfiguration layer = c; String layerName = layer.getLayerName(); if (layerName == null) layerName = "layer" + layerIdx; vertexNames.add(layerName); originalVertexName.add(String.valueOf(layerIdx - 1)); - String layerType = c.getLayer().getClass().getSimpleName().replaceAll("Layer$", ""); + String layerType = c.getClass().getSimpleName().replaceAll("Layer$", ""); layerTypes.add(layerType); layerInputs.add(Collections.singletonList(layerIdx - 1)); layerIdx++; //Extract layer info - Map map = getLayerInfo(c, layer); + Map map = getLayerInfo(c.getNetConfiguration(), layer); layerInfo.add(map); } return new GraphInfo(vertexNames, layerTypes, layerInputs, layerInfo, originalVertexName); } + /** public static GraphInfo buildGraphInfo(ComputationGraphConfiguration config) { List layerNames = new ArrayList<>(); List layerTypes = new ArrayList<>(); @@ -129,7 +129,7 @@ public class TrainModuleUtils { if (gv instanceof LayerVertex) { NeuralNetConfiguration c = ((LayerVertex) gv).getLayerConf(); - Layer layer = c.getLayer(); + LayerConfiguration layer = c.getFirstLayer(); String layerType = layer.getClass().getSimpleName().replaceAll("Layer$", ""); layerTypes.add(layerType); @@ -148,7 +148,9 @@ public class TrainModuleUtils { return new GraphInfo(layerNames, layerTypes, layerInputs, layerInfo, originalVertexName); } + **/ + /** public static GraphInfo buildGraphInfo(NeuralNetConfiguration config) { List vertexNames = new ArrayList<>(); @@ -162,9 +164,9 @@ public class TrainModuleUtils { layerInputs.add(Collections.emptyList()); layerInfo.add(Collections.emptyMap()); - if (config.getLayer() instanceof VariationalAutoencoder) { + if (config.getFirstLayer() instanceof VariationalAutoencoder) { //Special case like this is a bit ugly - but it works - VariationalAutoencoder va = (VariationalAutoencoder) config.getLayer(); + VariationalAutoencoder va = (VariationalAutoencoder) config.getFirstLayer(); int[] encLayerSizes = va.getEncoderLayerSizes(); int[] decLayerSizes = va.getDecoderLayerSizes(); @@ -182,7 +184,7 @@ public class TrainModuleUtils { long inputSize = (i == 0 ? va.getNIn() : encLayerSizes[i - 1]); long outputSize = encLayerSizes[i]; encoderInfo.put("Input Size", String.valueOf(inputSize)); - encoderInfo.put("Layer Size", String.valueOf(outputSize)); + encoderInfo.put("ILayer Size", String.valueOf(outputSize)); encoderInfo.put("Num Parameters", String.valueOf((inputSize + 1) * outputSize)); encoderInfo.put("Activation Function", va.getActivationFn().toString()); layerInfo.add(encoderInfo); @@ -197,7 +199,7 @@ public class TrainModuleUtils { long inputSize = encLayerSizes[encLayerSizes.length - 1]; long outputSize = va.getNOut(); latentInfo.put("Input Size", String.valueOf(inputSize)); - latentInfo.put("Layer Size", String.valueOf(outputSize)); + latentInfo.put("ILayer Size", String.valueOf(outputSize)); latentInfo.put("Num Parameters", String.valueOf((inputSize + 1) * outputSize * 2)); latentInfo.put("Activation Function", va.getPzxActivationFn().toString()); layerInfo.add(latentInfo); @@ -216,7 +218,7 @@ public class TrainModuleUtils { inputSize = (i == 0 ? va.getNOut() : decLayerSizes[i - 1]); outputSize = decLayerSizes[i]; decoderInfo.put("Input Size", String.valueOf(inputSize)); - decoderInfo.put("Layer Size", String.valueOf(outputSize)); + decoderInfo.put("ILayer Size", String.valueOf(outputSize)); decoderInfo.put("Num Parameters", String.valueOf((inputSize + 1) * outputSize)); decoderInfo.put("Activation Function", va.getActivationFn().toString()); layerInfo.add(decoderInfo); @@ -231,7 +233,7 @@ public class TrainModuleUtils { inputSize = decLayerSizes[decLayerSizes.length - 1]; outputSize = va.getNIn(); reconstructionInfo.put("Input Size", String.valueOf(inputSize)); - reconstructionInfo.put("Layer Size", String.valueOf(outputSize)); + reconstructionInfo.put("ILayer Size", String.valueOf(outputSize)); reconstructionInfo.put("Num Parameters", String .valueOf((inputSize + 1) * va.getOutputDistribution().distributionInputSize((int) va.getNIn()))); reconstructionInfo.put("Distribution", va.getOutputDistribution().toString()); @@ -240,14 +242,14 @@ public class TrainModuleUtils { } else { //VAE or similar... - Layer layer = config.getLayer(); + LayerConfiguration layer = config.getFirstLayer(); String layerName = layer.getLayerName(); if (layerName == null) layerName = "layer0"; vertexNames.add(layerName); originalVertexName.add("0"); - String layerType = config.getLayer().getClass().getSimpleName().replaceAll("Layer$", ""); + String layerType = config.getFirstLayer().getClass().getSimpleName().replaceAll("Layer$", ""); layerTypes.add(layerType); layerInputs.add(Collections.singletonList(0)); @@ -256,20 +258,18 @@ public class TrainModuleUtils { Map map = getLayerInfo(config, layer); layerInfo.add(map); } - - return new GraphInfo(vertexNames, layerTypes, layerInputs, layerInfo, originalVertexName); } +**/ - - private static Map getLayerInfo(NeuralNetConfiguration c, Layer layer) { + private static Map getLayerInfo(NeuralNetConfiguration c, LayerConfiguration layer) { Map map = new LinkedHashMap<>(); if (layer instanceof FeedForwardLayer) { FeedForwardLayer layer1 = (FeedForwardLayer) layer; map.put("Input size", String.valueOf(layer1.getNIn())); map.put("Output size", String.valueOf(layer1.getNOut())); - map.put("Num Parameters", String.valueOf(layer1.initializer().numParams(c))); + map.put("Num Parameters", String.valueOf(layer1.initializer().numParams(layer))); map.put("Activation Function", layer1.getActivationFn().toString()); } diff --git a/cavis-ui/cavis-ui-vertx/src/main/resources/templates/SameDiffUI.html b/cavis-ui/cavis-ui-vertx/src/main/resources/templates/SameDiffUI.html index 951aabeb5..2ecadd3ee 100644 --- a/cavis-ui/cavis-ui-vertx/src/main/resources/templates/SameDiffUI.html +++ b/cavis-ui/cavis-ui-vertx/src/main/resources/templates/SameDiffUI.html @@ -143,7 +143,7 @@ + Spread

diff --git a/cavis-ui/cavis-ui-vertx/src/main/resources/templates/TrainingModel.html.ftl b/cavis-ui/cavis-ui-vertx/src/main/resources/templates/TrainingModel.html.ftl index 859aae287..51d63af6b 100644 --- a/cavis-ui/cavis-ui-vertx/src/main/resources/templates/TrainingModel.html.ftl +++ b/cavis-ui/cavis-ui-vertx/src/main/resources/templates/TrainingModel.html.ftl @@ -103,7 +103,7 @@

- +
@@ -179,7 +179,7 @@
- +
@@ -244,7 +244,7 @@ - + diff --git a/cavis-ui/cavis-ui-vertx/src/test/java/org/deeplearning4j/ui/TestRemoteReceiver.java b/cavis-ui/cavis-ui-vertx/src/test/java/org/deeplearning4j/ui/TestRemoteReceiver.java index 37a7aab14..09dbe9846 100644 --- a/cavis-ui/cavis-ui-vertx/src/test/java/org/deeplearning4j/ui/TestRemoteReceiver.java +++ b/cavis-ui/cavis-ui-vertx/src/test/java/org/deeplearning4j/ui/TestRemoteReceiver.java @@ -27,7 +27,6 @@ import org.deeplearning4j.core.storage.impl.CollectionStatsStorageRouter; import org.deeplearning4j.core.storage.impl.RemoteUIStatsStorageRouter; import org.deeplearning4j.datasets.iterator.impl.IrisDataSetIterator; import org.deeplearning4j.nn.api.OptimizationAlgorithm; -import org.deeplearning4j.nn.conf.MultiLayerConfiguration; import org.deeplearning4j.nn.conf.NeuralNetConfiguration; import org.deeplearning4j.nn.conf.layers.DenseLayer; import org.deeplearning4j.nn.conf.layers.OutputLayer; @@ -133,8 +132,8 @@ public class TestRemoteReceiver extends BaseDL4JTest { public void testRemoteFull() throws Exception { //Use this in conjunction with startRemoteUI() - MultiLayerConfiguration conf = new NeuralNetConfiguration.Builder() - .optimizationAlgo(OptimizationAlgorithm.STOCHASTIC_GRADIENT_DESCENT).list() + NeuralNetConfiguration conf = NeuralNetConfiguration.builder() + .optimizationAlgo(OptimizationAlgorithm.STOCHASTIC_GRADIENT_DESCENT) .layer(0, new DenseLayer.Builder().activation(Activation.TANH).nIn(4).nOut(4).build()) .layer(1, new OutputLayer.Builder().lossFunction(LossFunctions.LossFunction.MCXENT) .activation(Activation.SOFTMAX).nIn(4).nOut(3).build()) @@ -143,7 +142,7 @@ public class TestRemoteReceiver extends BaseDL4JTest { MultiLayerNetwork net = new MultiLayerNetwork(conf); net.init(); try(RemoteUIStatsStorageRouter ssr = new RemoteUIStatsStorageRouter("http://localhost:9000")) { - net.setListeners(new StatsListener(ssr), new ScoreIterationListener(1)); + net.addTrainingListeners(new StatsListener(ssr), new ScoreIterationListener(1)); DataSetIterator iter = new IrisDataSetIterator(150, 150); diff --git a/cavis-ui/cavis-ui-vertx/src/test/java/org/deeplearning4j/ui/TestVertxUI.java b/cavis-ui/cavis-ui-vertx/src/test/java/org/deeplearning4j/ui/TestVertxUI.java index 988b9b502..d51f74aba 100644 --- a/cavis-ui/cavis-ui-vertx/src/test/java/org/deeplearning4j/ui/TestVertxUI.java +++ b/cavis-ui/cavis-ui-vertx/src/test/java/org/deeplearning4j/ui/TestVertxUI.java @@ -31,7 +31,6 @@ import org.deeplearning4j.datasets.iterator.impl.IrisDataSetIterator; import org.deeplearning4j.exception.DL4JException; import org.deeplearning4j.nn.api.OptimizationAlgorithm; import org.deeplearning4j.nn.conf.ComputationGraphConfiguration; -import org.deeplearning4j.nn.conf.MultiLayerConfiguration; import org.deeplearning4j.nn.conf.NeuralNetConfiguration; import org.deeplearning4j.nn.conf.layers.DenseLayer; import org.deeplearning4j.nn.conf.layers.OutputLayer; @@ -60,7 +59,6 @@ import java.nio.charset.StandardCharsets; import java.util.List; import java.util.Map; import java.util.concurrent.CountDownLatch; -import java.util.concurrent.atomic.AtomicReference; import static org.junit.jupiter.api.Assertions.*; @@ -94,10 +92,10 @@ public class TestVertxUI extends BaseDL4JTest { UIServer uiServer = UIServer.getInstance(); uiServer.attach(ss); - MultiLayerConfiguration conf = new NeuralNetConfiguration.Builder() + NeuralNetConfiguration conf = NeuralNetConfiguration.builder() .optimizationAlgo(OptimizationAlgorithm.STOCHASTIC_GRADIENT_DESCENT) .updater(new Sgd(1e-5)) - .list().layer(0, + .layer(0, new VariationalAutoencoder.Builder().nIn(4).nOut(3).encoderLayerSizes(10, 11) .decoderLayerSizes(12, 13).weightInit(WeightInit.XAVIER) .pzxActivationFunction(Activation.IDENTITY) @@ -114,7 +112,7 @@ public class TestVertxUI extends BaseDL4JTest { MultiLayerNetwork net = new MultiLayerNetwork(conf); net.init(); - net.setListeners(new StatsListener(ss), new ScoreIterationListener(1)); + net.addTrainingListeners(new StatsListener(ss), new ScoreIterationListener(1)); DataSetIterator iter = new IrisDataSetIterator(150, 150); @@ -135,8 +133,8 @@ public class TestVertxUI extends BaseDL4JTest { UIServer uiServer = UIServer.getInstance(); uiServer.attach(ss); - MultiLayerConfiguration conf = new NeuralNetConfiguration.Builder() - .optimizationAlgo(OptimizationAlgorithm.STOCHASTIC_GRADIENT_DESCENT).list() + NeuralNetConfiguration conf = NeuralNetConfiguration.builder() + .optimizationAlgo(OptimizationAlgorithm.STOCHASTIC_GRADIENT_DESCENT) .layer(0, new DenseLayer.Builder().activation(Activation.TANH).nIn(4).nOut(4).build()) .layer(1, new OutputLayer.Builder().lossFunction(LossFunctions.LossFunction.MCXENT) .activation(Activation.SOFTMAX).nIn(4).nOut(3).build()) @@ -144,7 +142,7 @@ public class TestVertxUI extends BaseDL4JTest { MultiLayerNetwork net = new MultiLayerNetwork(conf); net.init(); - net.setListeners(new StatsListener(ss, 1), new ScoreIterationListener(1)); + net.addTrainingListeners(new StatsListener(ss, 1), new ScoreIterationListener(1)); DataSetIterator iter = new IrisDataSetIterator(150, 150); @@ -163,7 +161,7 @@ public class TestVertxUI extends BaseDL4JTest { UIServer uiServer = UIServer.getInstance(); uiServer.attach(ss); - ComputationGraphConfiguration conf = new NeuralNetConfiguration.Builder().graphBuilder().addInputs("in") + ComputationGraphConfiguration conf = NeuralNetConfiguration.builder().graphBuilder().addInputs("in") .addLayer("L0", new DenseLayer.Builder().activation(Activation.TANH).nIn(4).nOut(4).build(), "in") .addLayer("L1", new OutputLayer.Builder().lossFunction(LossFunctions.LossFunction.MCXENT) @@ -173,7 +171,7 @@ public class TestVertxUI extends BaseDL4JTest { ComputationGraph net = new ComputationGraph(conf); net.init(); - net.setListeners(new StatsListener(ss), new ScoreIterationListener(1)); + net.addTrainingListeners(new StatsListener(ss), new ScoreIterationListener(1)); DataSetIterator iter = new IrisDataSetIterator(150, 150); @@ -185,7 +183,7 @@ public class TestVertxUI extends BaseDL4JTest { @Test public void testAutoAttach() throws Exception { - ComputationGraphConfiguration conf = new NeuralNetConfiguration.Builder().graphBuilder().addInputs("in") + ComputationGraphConfiguration conf = NeuralNetConfiguration.builder().graphBuilder().addInputs("in") .addLayer("L0", new DenseLayer.Builder().activation(Activation.TANH).nIn(4).nOut(4).build(), "in") .addLayer("L1", new OutputLayer.Builder().lossFunction(LossFunctions.LossFunction.MCXENT) @@ -197,7 +195,7 @@ public class TestVertxUI extends BaseDL4JTest { StatsStorage ss1 = new InMemoryStatsStorage(); - net.setListeners(new StatsListener(ss1, 1, "ss1")); + net.addTrainingListeners(new StatsListener(ss1, 1, "ss1")); DataSetIterator iter = new IrisDataSetIterator(150, 150); @@ -206,7 +204,7 @@ public class TestVertxUI extends BaseDL4JTest { } StatsStorage ss2 = new InMemoryStatsStorage(); - net.setListeners(new StatsListener(ss2, 1, "ss2")); + net.addTrainingListeners(new StatsListener(ss2, 1, "ss2")); for (int i = 0; i < 4; i++) { net.fit(iter); diff --git a/cavis-ui/cavis-ui-vertx/src/test/java/org/deeplearning4j/ui/TestVertxUIManual.java b/cavis-ui/cavis-ui-vertx/src/test/java/org/deeplearning4j/ui/TestVertxUIManual.java index d6f11df5e..e17681c4c 100644 --- a/cavis-ui/cavis-ui-vertx/src/test/java/org/deeplearning4j/ui/TestVertxUIManual.java +++ b/cavis-ui/cavis-ui-vertx/src/test/java/org/deeplearning4j/ui/TestVertxUIManual.java @@ -28,7 +28,6 @@ import org.deeplearning4j.BaseDL4JTest; import org.deeplearning4j.core.storage.StatsStorage; import org.deeplearning4j.datasets.iterator.impl.IrisDataSetIterator; import org.deeplearning4j.nn.api.OptimizationAlgorithm; -import org.deeplearning4j.nn.conf.MultiLayerConfiguration; import org.deeplearning4j.nn.conf.NeuralNetConfiguration; import org.deeplearning4j.nn.conf.layers.DenseLayer; import org.deeplearning4j.nn.conf.layers.OutputLayer; @@ -92,12 +91,12 @@ public class TestVertxUIManual extends BaseDL4JTest { int numInputs = 4; int outputNum = 3; - MultiLayerConfiguration conf = new NeuralNetConfiguration.Builder() + NeuralNetConfiguration conf = NeuralNetConfiguration.builder() .activation(Activation.TANH) .weightInit(WeightInit.XAVIER) .updater(new Sgd(0.03)) .l2(1e-4) - .list() + .layer(0, new DenseLayer.Builder().nIn(numInputs).nOut(3) .build()) .layer(1, new DenseLayer.Builder().nIn(3).nOut(3) @@ -109,7 +108,7 @@ public class TestVertxUIManual extends BaseDL4JTest { MultiLayerNetwork net = new MultiLayerNetwork(conf); net.init(); - net.setListeners(new StatsListener(ss), new ScoreIterationListener(1)); + net.addTrainingListeners(new StatsListener(ss), new ScoreIterationListener(1)); DataSetIterator iter = new IrisDataSetIterator(150, 150); @@ -192,8 +191,8 @@ public class TestVertxUIManual extends BaseDL4JTest { ss = new InMemoryStatsStorage(); String sessionId = Integer.toString(session); statsProvider.put(sessionId, ss); - MultiLayerConfiguration conf = new NeuralNetConfiguration.Builder() - .optimizationAlgo(OptimizationAlgorithm.STOCHASTIC_GRADIENT_DESCENT).list() + NeuralNetConfiguration conf = NeuralNetConfiguration.builder() + .optimizationAlgo(OptimizationAlgorithm.STOCHASTIC_GRADIENT_DESCENT) .layer(0, new DenseLayer.Builder().activation(Activation.TANH).nIn(4).nOut(layerSize).build()) .layer(1, new OutputLayer.Builder().lossFunction(LossFunctions.LossFunction.MCXENT) .activation(Activation.SOFTMAX).nIn(layerSize).nOut(3).build()) @@ -204,7 +203,7 @@ public class TestVertxUIManual extends BaseDL4JTest { StatsListener statsListener = new StatsListener(ss, 1); statsListener.setSessionID(sessionId); - net.setListeners(statsListener, new ScoreIterationListener(1)); + net.addTrainingListeners(statsListener, new ScoreIterationListener(1)); uIServer.attach(ss); DataSetIterator iter = new IrisDataSetIterator(150, 150); diff --git a/cavis-ui/cavis-ui-vertx/src/test/java/org/deeplearning4j/ui/TestVertxUIMultiSession.java b/cavis-ui/cavis-ui-vertx/src/test/java/org/deeplearning4j/ui/TestVertxUIMultiSession.java index 5a774dceb..fb21f9561 100644 --- a/cavis-ui/cavis-ui-vertx/src/test/java/org/deeplearning4j/ui/TestVertxUIMultiSession.java +++ b/cavis-ui/cavis-ui-vertx/src/test/java/org/deeplearning4j/ui/TestVertxUIMultiSession.java @@ -27,7 +27,6 @@ import org.deeplearning4j.core.storage.StatsStorage; import org.deeplearning4j.datasets.iterator.impl.IrisDataSetIterator; import org.deeplearning4j.exception.DL4JException; import org.deeplearning4j.nn.api.OptimizationAlgorithm; -import org.deeplearning4j.nn.conf.MultiLayerConfiguration; import org.deeplearning4j.nn.conf.NeuralNetConfiguration; import org.deeplearning4j.nn.conf.layers.DenseLayer; import org.deeplearning4j.nn.conf.layers.OutputLayer; @@ -87,9 +86,9 @@ public class TestVertxUIMultiSession extends BaseDL4JTest { Thread training = new Thread(() -> { int layerSize = sid + 4; - MultiLayerConfiguration conf = new NeuralNetConfiguration.Builder() + NeuralNetConfiguration conf = NeuralNetConfiguration.builder() .updater(new Adam(1e-2)) - .optimizationAlgo(OptimizationAlgorithm.STOCHASTIC_GRADIENT_DESCENT).list() + .optimizationAlgo(OptimizationAlgorithm.STOCHASTIC_GRADIENT_DESCENT) .layer(0, new DenseLayer.Builder().activation(Activation.TANH).nIn(4).nOut(layerSize).build()) .layer(1, new OutputLayer.Builder().lossFunction(LossFunctions.LossFunction.MCXENT) .activation(Activation.SOFTMAX).nIn(layerSize).nOut(3).build()) @@ -101,7 +100,7 @@ public class TestVertxUIMultiSession extends BaseDL4JTest { StatsListener statsListener = new StatsListener(ss, 1); statsListener.setSessionID(sessionId); - net.setListeners(statsListener, new ScoreIterationListener(1)); + net.addTrainingListeners(statsListener, new ScoreIterationListener(1)); uIServer.attach(ss); DataSetIterator iter = new IrisDataSetIterator(150, 150); @@ -153,8 +152,8 @@ public class TestVertxUIMultiSession extends BaseDL4JTest { InMemoryStatsStorage ss = new InMemoryStatsStorage(); String sessionId = Integer.toString(session); statsStorageForSession.put(sessionId, ss); - MultiLayerConfiguration conf = new NeuralNetConfiguration.Builder() - .optimizationAlgo(OptimizationAlgorithm.STOCHASTIC_GRADIENT_DESCENT).list() + NeuralNetConfiguration conf = NeuralNetConfiguration.builder() + .optimizationAlgo(OptimizationAlgorithm.STOCHASTIC_GRADIENT_DESCENT) .layer(0, new DenseLayer.Builder().activation(Activation.TANH).nIn(4).nOut(layerSize).build()) .layer(1, new OutputLayer.Builder().lossFunction(LossFunctions.LossFunction.MCXENT) .activation(Activation.SOFTMAX).nIn(layerSize).nOut(3).build()) @@ -165,7 +164,7 @@ public class TestVertxUIMultiSession extends BaseDL4JTest { StatsListener statsListener = new StatsListener(ss, 1); statsListener.setSessionID(sessionId); - net.setListeners(statsListener, new ScoreIterationListener(1)); + net.addTrainingListeners(statsListener, new ScoreIterationListener(1)); uIServer.attach(ss); DataSetIterator iter = new IrisDataSetIterator(150, 150); diff --git a/cavis-zoo/cavis-zoo-models/src/main/java/org/deeplearning4j/zoo/InstantiableModel.java b/cavis-zoo/cavis-zoo-models/src/main/java/org/deeplearning4j/zoo/InstantiableModel.java index 6045f7ca1..fa72dea55 100644 --- a/cavis-zoo/cavis-zoo-models/src/main/java/org/deeplearning4j/zoo/InstantiableModel.java +++ b/cavis-zoo/cavis-zoo-models/src/main/java/org/deeplearning4j/zoo/InstantiableModel.java @@ -20,20 +20,20 @@ package org.deeplearning4j.zoo; -import org.deeplearning4j.nn.api.Model; +import net.brutex.ai.dnn.api.IModel; public interface InstantiableModel { void setInputShape(int[][] inputShape); - M init(); + M init(); /** * @deprecated No longer used, will be removed in a future release */ @Deprecated ModelMetaData metaData(); - Class modelType(); + Class modelType(); String pretrainedUrl(PretrainedType pretrainedType); diff --git a/cavis-zoo/cavis-zoo-models/src/main/java/org/deeplearning4j/zoo/ZooModel.java b/cavis-zoo/cavis-zoo-models/src/main/java/org/deeplearning4j/zoo/ZooModel.java index 958edec33..da2bc3a78 100644 --- a/cavis-zoo/cavis-zoo-models/src/main/java/org/deeplearning4j/zoo/ZooModel.java +++ b/cavis-zoo/cavis-zoo-models/src/main/java/org/deeplearning4j/zoo/ZooModel.java @@ -21,10 +21,10 @@ package org.deeplearning4j.zoo; import lombok.extern.slf4j.Slf4j; +import net.brutex.ai.dnn.api.IModel; import org.apache.commons.io.FileUtils; import org.deeplearning4j.common.resources.DL4JResources; import org.deeplearning4j.common.resources.ResourceType; -import org.deeplearning4j.nn.api.Model; import org.deeplearning4j.nn.graph.ComputationGraph; import org.deeplearning4j.nn.multilayer.MultiLayerNetwork; import org.deeplearning4j.util.ModelSerializer; @@ -48,7 +48,7 @@ public abstract class ZooModel implements InstantiableModel { * @return * @throws IOException */ - public Model initPretrained() throws IOException { + public IModel initPretrained() throws IOException { return initPretrained(PretrainedType.IMAGENET); } @@ -59,7 +59,7 @@ public abstract class ZooModel implements InstantiableModel { * @return * @throws IOException */ - public M initPretrained(PretrainedType pretrainedType) throws IOException { + public M initPretrained(PretrainedType pretrainedType) throws IOException { String remoteUrl = pretrainedUrl(pretrainedType); if (remoteUrl == null) throw new UnsupportedOperationException( diff --git a/cavis-zoo/cavis-zoo-models/src/main/java/org/deeplearning4j/zoo/model/AlexNet.java b/cavis-zoo/cavis-zoo-models/src/main/java/org/deeplearning4j/zoo/model/AlexNet.java index b65441942..9e55c5b26 100644 --- a/cavis-zoo/cavis-zoo-models/src/main/java/org/deeplearning4j/zoo/model/AlexNet.java +++ b/cavis-zoo/cavis-zoo-models/src/main/java/org/deeplearning4j/zoo/model/AlexNet.java @@ -22,13 +22,14 @@ package org.deeplearning4j.zoo.model; import lombok.AllArgsConstructor; import lombok.Builder; -import org.deeplearning4j.nn.api.Model; +import net.brutex.ai.dnn.api.IModel; import org.deeplearning4j.nn.api.OptimizationAlgorithm; import org.deeplearning4j.nn.conf.*; import org.deeplearning4j.nn.conf.distribution.NormalDistribution; import org.deeplearning4j.nn.conf.inputs.InputType; import org.deeplearning4j.nn.conf.layers.*; import org.deeplearning4j.nn.multilayer.MultiLayerNetwork; +import org.deeplearning4j.nn.weights.WeightInit; import org.deeplearning4j.zoo.ModelMetaData; import org.deeplearning4j.zoo.PretrainedType; import org.deeplearning4j.zoo.ZooModel; @@ -64,15 +65,16 @@ public class AlexNet extends ZooModel { } @Override - public Class modelType() { + public Class modelType() { return MultiLayerNetwork.class; } - public MultiLayerConfiguration conf() { + public NeuralNetConfiguration conf() { double nonZeroBias = 1; - MultiLayerConfiguration conf = new NeuralNetConfiguration.Builder().seed(seed) - .weightInit(new NormalDistribution(0.0, 0.01)) + NeuralNetConfiguration conf = NeuralNetConfiguration.builder() + .seed(seed) + .weightInit( WeightInit.NORMAL) //new NormalDistribution(0.0, 0.01)) .activation(Activation.RELU) .optimizationAlgo(OptimizationAlgorithm.STOCHASTIC_GRADIENT_DESCENT) .updater(updater) @@ -84,7 +86,7 @@ public class AlexNet extends ZooModel { .cacheMode(cacheMode) .l2(5 * 1e-4) .miniBatch(false) - .list() + .layer(0, new ConvolutionLayer.Builder(new int[]{11,11}, new int[]{4, 4}) .name("cnn1") .cudnnAlgoMode(ConvolutionLayer.AlgoMode.PREFER_FASTEST) @@ -158,15 +160,16 @@ public class AlexNet extends ZooModel { .build()) - .setInputType(InputType.convolutional(inputShape[2], inputShape[1], inputShape[0])) - .build(); + .inputType( InputType.convolutional(inputShape[2], inputShape[1], inputShape[0]) ) + .build() + ; return conf; } @Override public MultiLayerNetwork init() { - MultiLayerConfiguration conf = conf(); + NeuralNetConfiguration conf = conf(); MultiLayerNetwork network = new MultiLayerNetwork(conf); network.init(); return network; diff --git a/cavis-zoo/cavis-zoo-models/src/main/java/org/deeplearning4j/zoo/model/Darknet19.java b/cavis-zoo/cavis-zoo-models/src/main/java/org/deeplearning4j/zoo/model/Darknet19.java index 739493bd8..c94161a4b 100644 --- a/cavis-zoo/cavis-zoo-models/src/main/java/org/deeplearning4j/zoo/model/Darknet19.java +++ b/cavis-zoo/cavis-zoo-models/src/main/java/org/deeplearning4j/zoo/model/Darknet19.java @@ -22,14 +22,14 @@ package org.deeplearning4j.zoo.model; import lombok.AllArgsConstructor; import lombok.Builder; -import lombok.NoArgsConstructor; +import net.brutex.ai.dnn.api.IModel; import org.deeplearning4j.common.resources.DL4JResources; -import org.deeplearning4j.nn.api.Model; import org.deeplearning4j.nn.conf.*; import org.deeplearning4j.nn.conf.ComputationGraphConfiguration.GraphBuilder; import org.deeplearning4j.nn.conf.inputs.InputType; import org.deeplearning4j.nn.conf.layers.*; import org.deeplearning4j.nn.graph.ComputationGraph; +import org.deeplearning4j.nn.weights.IWeightInit; import org.deeplearning4j.nn.weights.WeightInit; import org.deeplearning4j.zoo.ModelMetaData; import org.deeplearning4j.zoo.PretrainedType; @@ -80,22 +80,22 @@ public class Darknet19 extends ZooModel { } @Override - public Class modelType() { + public Class modelType() { return ComputationGraph.class; } public ComputationGraphConfiguration conf() { - GraphBuilder graphBuilder = new NeuralNetConfiguration.Builder() + GraphBuilder graphBuilder = ((NeuralNetConfiguration.NeuralNetConfigurationBuilder)NeuralNetConfiguration.builder() .seed(seed) .updater(updater) .weightInit(weightInit) - .l2(0.00001) + .l2(0.00001) .activation(Activation.IDENTITY) .cacheMode(cacheMode) .trainingWorkspaceMode(workspaceMode) .inferenceWorkspaceMode(workspaceMode) - .cudnnAlgoMode(cudnnAlgoMode) - .graphBuilder() + .cudnnAlgoMode(cudnnAlgoMode)) + .graphBuilder() .addInputs("input") .setInputTypes(InputType.convolutional(inputShape[2], inputShape[1], inputShape[0])); diff --git a/cavis-zoo/cavis-zoo-models/src/main/java/org/deeplearning4j/zoo/model/FaceNetNN4Small2.java b/cavis-zoo/cavis-zoo-models/src/main/java/org/deeplearning4j/zoo/model/FaceNetNN4Small2.java index 487401625..07ce6b985 100644 --- a/cavis-zoo/cavis-zoo-models/src/main/java/org/deeplearning4j/zoo/model/FaceNetNN4Small2.java +++ b/cavis-zoo/cavis-zoo-models/src/main/java/org/deeplearning4j/zoo/model/FaceNetNN4Small2.java @@ -22,8 +22,7 @@ package org.deeplearning4j.zoo.model; import lombok.AllArgsConstructor; import lombok.Builder; -import lombok.NoArgsConstructor; -import org.deeplearning4j.nn.api.Model; +import net.brutex.ai.dnn.api.IModel; import org.deeplearning4j.nn.api.OptimizationAlgorithm; import org.deeplearning4j.nn.conf.*; import org.deeplearning4j.nn.conf.graph.L2NormalizeVertex; @@ -69,13 +68,13 @@ public class FaceNetNN4Small2 extends ZooModel { } @Override - public Class modelType() { + public Class modelType() { return ComputationGraph.class; } public ComputationGraphConfiguration conf() { - ComputationGraphConfiguration.GraphBuilder graph = new NeuralNetConfiguration.Builder().seed(seed) + ComputationGraphConfiguration.GraphBuilder graph = ((NeuralNetConfiguration.NeuralNetConfigurationBuilder)NeuralNetConfiguration.builder().seed(seed) .activation(Activation.IDENTITY) .optimizationAlgo(OptimizationAlgorithm.STOCHASTIC_GRADIENT_DESCENT) .updater(updater) @@ -86,7 +85,7 @@ public class FaceNetNN4Small2 extends ZooModel { .trainingWorkspaceMode(workspaceMode) .inferenceWorkspaceMode(workspaceMode) .cudnnAlgoMode(cudnnAlgoMode) - .convolutionMode(ConvolutionMode.Same) + .convolutionMode(ConvolutionMode.Same)) .graphBuilder(); diff --git a/cavis-zoo/cavis-zoo-models/src/main/java/org/deeplearning4j/zoo/model/InceptionResNetV1.java b/cavis-zoo/cavis-zoo-models/src/main/java/org/deeplearning4j/zoo/model/InceptionResNetV1.java index 50f14da0b..2d5d69dda 100644 --- a/cavis-zoo/cavis-zoo-models/src/main/java/org/deeplearning4j/zoo/model/InceptionResNetV1.java +++ b/cavis-zoo/cavis-zoo-models/src/main/java/org/deeplearning4j/zoo/model/InceptionResNetV1.java @@ -22,18 +22,15 @@ package org.deeplearning4j.zoo.model; import lombok.AllArgsConstructor; import lombok.Builder; -import lombok.NoArgsConstructor; -import org.deeplearning4j.nn.api.Model; +import net.brutex.ai.dnn.api.IModel; import org.deeplearning4j.nn.api.OptimizationAlgorithm; import org.deeplearning4j.nn.conf.*; -import org.deeplearning4j.nn.conf.distribution.NormalDistribution; import org.deeplearning4j.nn.conf.distribution.TruncatedNormalDistribution; import org.deeplearning4j.nn.conf.graph.L2NormalizeVertex; import org.deeplearning4j.nn.conf.graph.MergeVertex; import org.deeplearning4j.nn.conf.inputs.InputType; import org.deeplearning4j.nn.conf.layers.*; import org.deeplearning4j.nn.graph.ComputationGraph; -import org.deeplearning4j.nn.weights.WeightInit; import org.deeplearning4j.zoo.ModelMetaData; import org.deeplearning4j.zoo.PretrainedType; import org.deeplearning4j.zoo.ZooModel; @@ -69,7 +66,7 @@ public class InceptionResNetV1 extends ZooModel { } @Override - public Class modelType() { + public Class modelType() { return ComputationGraph.class; } @@ -102,7 +99,8 @@ public class InceptionResNetV1 extends ZooModel { public ComputationGraphConfiguration.GraphBuilder graphBuilder(String input) { - ComputationGraphConfiguration.GraphBuilder graph = new NeuralNetConfiguration.Builder().seed(seed) + ComputationGraphConfiguration.GraphBuilder graph = ((NeuralNetConfiguration.NeuralNetConfigurationBuilder)NeuralNetConfiguration.builder() + .seed(seed) .activation(Activation.RELU) .optimizationAlgo(OptimizationAlgorithm.STOCHASTIC_GRADIENT_DESCENT) .updater(updater) @@ -112,7 +110,7 @@ public class InceptionResNetV1 extends ZooModel { .cacheMode(cacheMode) .trainingWorkspaceMode(workspaceMode) .inferenceWorkspaceMode(workspaceMode) - .convolutionMode(ConvolutionMode.Truncate).graphBuilder(); + .convolutionMode(ConvolutionMode.Truncate)).graphBuilder(); graph diff --git a/cavis-zoo/cavis-zoo-models/src/main/java/org/deeplearning4j/zoo/model/LeNet.java b/cavis-zoo/cavis-zoo-models/src/main/java/org/deeplearning4j/zoo/model/LeNet.java index 64a6f8c92..6dc75af5f 100644 --- a/cavis-zoo/cavis-zoo-models/src/main/java/org/deeplearning4j/zoo/model/LeNet.java +++ b/cavis-zoo/cavis-zoo-models/src/main/java/org/deeplearning4j/zoo/model/LeNet.java @@ -22,9 +22,8 @@ package org.deeplearning4j.zoo.model; import lombok.AllArgsConstructor; import lombok.Builder; -import lombok.NoArgsConstructor; +import net.brutex.ai.dnn.api.IModel; import org.deeplearning4j.common.resources.DL4JResources; -import org.deeplearning4j.nn.api.Model; import org.deeplearning4j.nn.api.OptimizationAlgorithm; import org.deeplearning4j.nn.conf.*; import org.deeplearning4j.nn.conf.inputs.InputType; @@ -74,12 +73,12 @@ public class LeNet extends ZooModel { } @Override - public Class modelType() { + public Class modelType() { return MultiLayerNetwork.class; } - public MultiLayerConfiguration conf() { - MultiLayerConfiguration conf = new NeuralNetConfiguration.Builder().seed(seed) + public NeuralNetConfiguration conf() { + NeuralNetConfiguration conf = NeuralNetConfiguration.builder().seed(seed) .activation(Activation.IDENTITY) .weightInit(WeightInit.XAVIER) .optimizationAlgo(OptimizationAlgorithm.STOCHASTIC_GRADIENT_DESCENT) @@ -89,7 +88,7 @@ public class LeNet extends ZooModel { .inferenceWorkspaceMode(workspaceMode) .cudnnAlgoMode(cudnnAlgoMode) .convolutionMode(ConvolutionMode.Same) - .list() + // block 1 .layer(new ConvolutionLayer.Builder() .name("cnn1") @@ -128,14 +127,14 @@ public class LeNet extends ZooModel { .nOut(numClasses) .activation(Activation.SOFTMAX) // radial basis function required .build()) - .setInputType(InputType.convolutionalFlat(inputShape[2], inputShape[1], inputShape[0])) + .inputType(InputType.convolutionalFlat(inputShape[2], inputShape[1], inputShape[0])) .build(); return conf; } @Override - public Model init() { + public IModel init() { MultiLayerNetwork network = new MultiLayerNetwork(conf()); network.init(); return network; diff --git a/cavis-zoo/cavis-zoo-models/src/main/java/org/deeplearning4j/zoo/model/NASNet.java b/cavis-zoo/cavis-zoo-models/src/main/java/org/deeplearning4j/zoo/model/NASNet.java index 0e78f819e..35f617773 100644 --- a/cavis-zoo/cavis-zoo-models/src/main/java/org/deeplearning4j/zoo/model/NASNet.java +++ b/cavis-zoo/cavis-zoo-models/src/main/java/org/deeplearning4j/zoo/model/NASNet.java @@ -22,8 +22,8 @@ package org.deeplearning4j.zoo.model; import lombok.AllArgsConstructor; import lombok.Builder; +import net.brutex.ai.dnn.api.IModel; import org.deeplearning4j.common.resources.DL4JResources; -import org.deeplearning4j.nn.api.Model; import org.deeplearning4j.nn.api.OptimizationAlgorithm; import org.deeplearning4j.nn.conf.*; import org.deeplearning4j.nn.conf.inputs.InputType; @@ -86,7 +86,7 @@ public class NASNet extends ZooModel { } @Override - public Class modelType() { + public Class modelType() { return ComputationGraph.class; } @@ -110,7 +110,7 @@ public class NASNet extends ZooModel { } int filters = (int) Math.floor(penultimateFilters / 24); - ComputationGraphConfiguration.GraphBuilder graph = new NeuralNetConfiguration.Builder().seed(seed) + ComputationGraphConfiguration.GraphBuilder graph = ((NeuralNetConfiguration.NeuralNetConfigurationBuilder)NeuralNetConfiguration.builder().seed(seed) .optimizationAlgo(OptimizationAlgorithm.STOCHASTIC_GRADIENT_DESCENT) .updater(updater) .weightInit(weightInit) @@ -120,7 +120,7 @@ public class NASNet extends ZooModel { .trainingWorkspaceMode(workspaceMode) .inferenceWorkspaceMode(workspaceMode) .cudnnAlgoMode(cudnnAlgoMode) - .convolutionMode(ConvolutionMode.Truncate) + .convolutionMode(ConvolutionMode.Truncate)) .graphBuilder(); if(!skipReduction) { diff --git a/cavis-zoo/cavis-zoo-models/src/main/java/org/deeplearning4j/zoo/model/ResNet50.java b/cavis-zoo/cavis-zoo-models/src/main/java/org/deeplearning4j/zoo/model/ResNet50.java index 2453bb21c..70abb5722 100644 --- a/cavis-zoo/cavis-zoo-models/src/main/java/org/deeplearning4j/zoo/model/ResNet50.java +++ b/cavis-zoo/cavis-zoo-models/src/main/java/org/deeplearning4j/zoo/model/ResNet50.java @@ -22,19 +22,16 @@ package org.deeplearning4j.zoo.model; import lombok.AllArgsConstructor; import lombok.Builder; -import lombok.NoArgsConstructor; import org.deeplearning4j.common.resources.DL4JResources; -import org.deeplearning4j.nn.api.Model; +import net.brutex.ai.dnn.api.IModel; import org.deeplearning4j.nn.api.OptimizationAlgorithm; import org.deeplearning4j.nn.conf.*; -import org.deeplearning4j.nn.conf.distribution.NormalDistribution; import org.deeplearning4j.nn.conf.distribution.TruncatedNormalDistribution; import org.deeplearning4j.nn.conf.graph.ElementWiseVertex; import org.deeplearning4j.nn.conf.inputs.InputType; import org.deeplearning4j.nn.conf.layers.*; import org.deeplearning4j.nn.graph.ComputationGraph; import org.deeplearning4j.nn.weights.IWeightInit; -import org.deeplearning4j.nn.weights.WeightInit; import org.deeplearning4j.nn.weights.WeightInitDistribution; import org.deeplearning4j.zoo.ModelMetaData; import org.deeplearning4j.zoo.PretrainedType; @@ -77,7 +74,7 @@ public class ResNet50 extends ZooModel { } @Override - public Class modelType() { + public Class modelType() { return ComputationGraph.class; } @@ -175,7 +172,7 @@ public class ResNet50 extends ZooModel { public ComputationGraphConfiguration.GraphBuilder graphBuilder() { - ComputationGraphConfiguration.GraphBuilder graph = new NeuralNetConfiguration.Builder().seed(seed) + ComputationGraphConfiguration.GraphBuilder graph = ((NeuralNetConfiguration.NeuralNetConfigurationBuilder)NeuralNetConfiguration.builder().seed(seed) .activation(Activation.IDENTITY) .optimizationAlgo(OptimizationAlgorithm.STOCHASTIC_GRADIENT_DESCENT) .updater(updater) @@ -187,7 +184,7 @@ public class ResNet50 extends ZooModel { .trainingWorkspaceMode(workspaceMode) .inferenceWorkspaceMode(workspaceMode) .cudnnAlgoMode(cudnnAlgoMode) - .convolutionMode(ConvolutionMode.Truncate) + .convolutionMode(ConvolutionMode.Truncate)) .graphBuilder(); diff --git a/cavis-zoo/cavis-zoo-models/src/main/java/org/deeplearning4j/zoo/model/SimpleCNN.java b/cavis-zoo/cavis-zoo-models/src/main/java/org/deeplearning4j/zoo/model/SimpleCNN.java index 17f22d1f4..f5b1c41ee 100644 --- a/cavis-zoo/cavis-zoo-models/src/main/java/org/deeplearning4j/zoo/model/SimpleCNN.java +++ b/cavis-zoo/cavis-zoo-models/src/main/java/org/deeplearning4j/zoo/model/SimpleCNN.java @@ -22,8 +22,7 @@ package org.deeplearning4j.zoo.model; import lombok.AllArgsConstructor; import lombok.Builder; -import lombok.NoArgsConstructor; -import org.deeplearning4j.nn.api.Model; +import net.brutex.ai.dnn.api.IModel; import org.deeplearning4j.nn.api.OptimizationAlgorithm; import org.deeplearning4j.nn.conf.*; import org.deeplearning4j.nn.conf.inputs.InputType; @@ -63,13 +62,13 @@ public class SimpleCNN extends ZooModel { } @Override - public Class modelType() { + public Class modelType() { return MultiLayerNetwork.class; } - public MultiLayerConfiguration conf() { - MultiLayerConfiguration conf = - new NeuralNetConfiguration.Builder().seed(seed) + public NeuralNetConfiguration conf() { + NeuralNetConfiguration conf = + NeuralNetConfiguration.builder().seed(seed) .activation(Activation.IDENTITY) .weightInit(WeightInit.RELU) .optimizationAlgo(OptimizationAlgorithm.STOCHASTIC_GRADIENT_DESCENT) @@ -78,7 +77,7 @@ public class SimpleCNN extends ZooModel { .trainingWorkspaceMode(workspaceMode) .inferenceWorkspaceMode(workspaceMode) .convolutionMode(ConvolutionMode.Same) - .list() + // block 1 .layer(0, new ConvolutionLayer.Builder(new int[] {7, 7}).name("image_array") .nIn(inputShape[0]).nOut(16).build()) @@ -130,7 +129,7 @@ public class SimpleCNN extends ZooModel { .layer(31, new GlobalPoolingLayer.Builder(PoolingType.AVG).build()) .layer(32, new ActivationLayer.Builder().activation(Activation.SOFTMAX).build()) - .setInputType(InputType.convolutional(inputShape[2], inputShape[1], + .inputType(InputType.convolutional(inputShape[2], inputShape[1], inputShape[0])) .build(); @@ -138,7 +137,7 @@ public class SimpleCNN extends ZooModel { } @Override - public Model init() { + public IModel init() { MultiLayerNetwork network = new MultiLayerNetwork(conf()); network.init(); return network; diff --git a/cavis-zoo/cavis-zoo-models/src/main/java/org/deeplearning4j/zoo/model/SqueezeNet.java b/cavis-zoo/cavis-zoo-models/src/main/java/org/deeplearning4j/zoo/model/SqueezeNet.java index 2f77a2d4c..e63e36cea 100644 --- a/cavis-zoo/cavis-zoo-models/src/main/java/org/deeplearning4j/zoo/model/SqueezeNet.java +++ b/cavis-zoo/cavis-zoo-models/src/main/java/org/deeplearning4j/zoo/model/SqueezeNet.java @@ -22,12 +22,10 @@ package org.deeplearning4j.zoo.model; import lombok.AllArgsConstructor; import lombok.Builder; -import lombok.NoArgsConstructor; import org.deeplearning4j.common.resources.DL4JResources; -import org.deeplearning4j.nn.api.Model; +import net.brutex.ai.dnn.api.IModel; import org.deeplearning4j.nn.api.OptimizationAlgorithm; import org.deeplearning4j.nn.conf.*; -import org.deeplearning4j.nn.conf.distribution.NormalDistribution; import org.deeplearning4j.nn.conf.graph.MergeVertex; import org.deeplearning4j.nn.conf.inputs.InputType; import org.deeplearning4j.nn.conf.layers.*; @@ -79,12 +77,12 @@ public class SqueezeNet extends ZooModel { public ComputationGraph initPretrained(PretrainedType pretrainedType) throws IOException { ComputationGraph cg = (ComputationGraph) super.initPretrained(pretrainedType); //Set collapse dimensions to true in global avg pooling - more useful for users [N,1000] rather than [N,1000,1,1] out. Also matches non-pretrain config - ((GlobalPoolingLayer)cg.getLayer("global_average_pooling2d_5").conf().getLayer()).setCollapseDimensions(true); + ((GlobalPoolingLayer)cg.getLayer("global_average_pooling2d_5").getLayerConfiguration()).setCollapseDimensions(true); return cg; } @Override - public Class modelType() { + public Class modelType() { return ComputationGraph.class; } @@ -103,7 +101,7 @@ public class SqueezeNet extends ZooModel { public ComputationGraphConfiguration.GraphBuilder graphBuilder() { - ComputationGraphConfiguration.GraphBuilder graph = new NeuralNetConfiguration.Builder().seed(seed) + ComputationGraphConfiguration.GraphBuilder graph = ((NeuralNetConfiguration.NeuralNetConfigurationBuilder)NeuralNetConfiguration.builder().seed(seed) .optimizationAlgo(OptimizationAlgorithm.STOCHASTIC_GRADIENT_DESCENT) .updater(updater) .weightInit(weightInit) @@ -112,7 +110,7 @@ public class SqueezeNet extends ZooModel { .cacheMode(cacheMode) .trainingWorkspaceMode(workspaceMode) .inferenceWorkspaceMode(workspaceMode) - .convolutionMode(ConvolutionMode.Truncate) + .convolutionMode(ConvolutionMode.Truncate)) .graphBuilder(); diff --git a/cavis-zoo/cavis-zoo-models/src/main/java/org/deeplearning4j/zoo/model/TextGenerationLSTM.java b/cavis-zoo/cavis-zoo-models/src/main/java/org/deeplearning4j/zoo/model/TextGenerationLSTM.java index 432c74231..962b8f677 100644 --- a/cavis-zoo/cavis-zoo-models/src/main/java/org/deeplearning4j/zoo/model/TextGenerationLSTM.java +++ b/cavis-zoo/cavis-zoo-models/src/main/java/org/deeplearning4j/zoo/model/TextGenerationLSTM.java @@ -22,8 +22,7 @@ package org.deeplearning4j.zoo.model; import lombok.AllArgsConstructor; import lombok.Builder; -import lombok.NoArgsConstructor; -import org.deeplearning4j.nn.api.Model; +import net.brutex.ai.dnn.api.IModel; import org.deeplearning4j.nn.api.OptimizationAlgorithm; import org.deeplearning4j.nn.conf.*; import org.deeplearning4j.nn.conf.layers.ConvolutionLayer; @@ -66,12 +65,12 @@ public class TextGenerationLSTM extends ZooModel { } @Override - public Class modelType() { + public Class modelType() { return MultiLayerNetwork.class; } - public MultiLayerConfiguration conf() { - MultiLayerConfiguration conf = new NeuralNetConfiguration.Builder().seed(12345) + public NeuralNetConfiguration conf() { + NeuralNetConfiguration conf = NeuralNetConfiguration.builder().seed(12345) .optimizationAlgo(OptimizationAlgorithm.STOCHASTIC_GRADIENT_DESCENT) .l2(0.001) .weightInit(WeightInit.XAVIER) @@ -80,21 +79,21 @@ public class TextGenerationLSTM extends ZooModel { .trainingWorkspaceMode(workspaceMode) .inferenceWorkspaceMode(workspaceMode) .cudnnAlgoMode(cudnnAlgoMode) - .list() + .layer(0, new GravesLSTM.Builder().nIn(inputShape[1]).nOut(256).activation(Activation.TANH) .build()) .layer(1, new GravesLSTM.Builder().nOut(256).activation(Activation.TANH).build()) .layer(2, new RnnOutputLayer.Builder(LossFunctions.LossFunction.MCXENT) .activation(Activation.SOFTMAX) //MCXENT + softmax for classification .nOut(totalUniqueCharacters).build()) - .backpropType(BackpropType.TruncatedBPTT).tBPTTForwardLength(50).tBPTTBackwardLength(50) + .backpropType(BackpropType.TruncatedBPTT).tbpttFwdLength(50).tbpttBackLength(50) .build(); return conf; } @Override - public Model init() { + public IModel init() { MultiLayerNetwork network = new MultiLayerNetwork(conf()); network.init(); return network; diff --git a/cavis-zoo/cavis-zoo-models/src/main/java/org/deeplearning4j/zoo/model/TinyYOLO.java b/cavis-zoo/cavis-zoo-models/src/main/java/org/deeplearning4j/zoo/model/TinyYOLO.java index abbdf06cf..e5281d33d 100644 --- a/cavis-zoo/cavis-zoo-models/src/main/java/org/deeplearning4j/zoo/model/TinyYOLO.java +++ b/cavis-zoo/cavis-zoo-models/src/main/java/org/deeplearning4j/zoo/model/TinyYOLO.java @@ -23,9 +23,8 @@ package org.deeplearning4j.zoo.model; import lombok.AllArgsConstructor; import lombok.Builder; import lombok.Getter; -import lombok.NoArgsConstructor; import org.deeplearning4j.common.resources.DL4JResources; -import org.deeplearning4j.nn.api.Model; +import net.brutex.ai.dnn.api.IModel; import org.deeplearning4j.nn.api.OptimizationAlgorithm; import org.deeplearning4j.nn.conf.*; import org.deeplearning4j.nn.conf.ComputationGraphConfiguration.GraphBuilder; @@ -80,14 +79,14 @@ public class TinyYOLO extends ZooModel { } @Override - public Class modelType() { + public Class modelType() { return ComputationGraph.class; } public ComputationGraphConfiguration conf() { INDArray priors = Nd4j.create(priorBoxes); - GraphBuilder graphBuilder = new NeuralNetConfiguration.Builder() + GraphBuilder graphBuilder = ((NeuralNetConfiguration.NeuralNetConfigurationBuilder)NeuralNetConfiguration.builder() .seed(seed) .optimizationAlgo(OptimizationAlgorithm.STOCHASTIC_GRADIENT_DESCENT) .gradientNormalization(GradientNormalization.RenormalizeL2PerLayer) @@ -98,7 +97,7 @@ public class TinyYOLO extends ZooModel { .cacheMode(cacheMode) .trainingWorkspaceMode(workspaceMode) .inferenceWorkspaceMode(workspaceMode) - .cudnnAlgoMode(cudnnAlgoMode) + .cudnnAlgoMode(cudnnAlgoMode)) .graphBuilder() .addInputs("input") .setInputTypes(InputType.convolutional(inputShape[2], inputShape[1], inputShape[0])); diff --git a/cavis-zoo/cavis-zoo-models/src/main/java/org/deeplearning4j/zoo/model/UNet.java b/cavis-zoo/cavis-zoo-models/src/main/java/org/deeplearning4j/zoo/model/UNet.java index ca8136f62..f9400ba8e 100644 --- a/cavis-zoo/cavis-zoo-models/src/main/java/org/deeplearning4j/zoo/model/UNet.java +++ b/cavis-zoo/cavis-zoo-models/src/main/java/org/deeplearning4j/zoo/model/UNet.java @@ -22,11 +22,10 @@ package org.deeplearning4j.zoo.model; import lombok.AllArgsConstructor; import lombok.Builder; +import net.brutex.ai.dnn.api.IModel; import org.deeplearning4j.common.resources.DL4JResources; -import org.deeplearning4j.nn.api.Model; import org.deeplearning4j.nn.api.OptimizationAlgorithm; import org.deeplearning4j.nn.conf.*; -import org.deeplearning4j.nn.conf.distribution.TruncatedNormalDistribution; import org.deeplearning4j.nn.conf.graph.MergeVertex; import org.deeplearning4j.nn.conf.inputs.InputType; import org.deeplearning4j.nn.conf.layers.*; @@ -73,7 +72,7 @@ public class UNet extends ZooModel { } @Override - public Class modelType() { + public Class modelType() { return ComputationGraph.class; } @@ -92,7 +91,7 @@ public class UNet extends ZooModel { public ComputationGraphConfiguration.GraphBuilder graphBuilder() { - ComputationGraphConfiguration.GraphBuilder graph = new NeuralNetConfiguration.Builder().seed(seed) + ComputationGraphConfiguration.GraphBuilder graph = ((NeuralNetConfiguration.NeuralNetConfigurationBuilder)NeuralNetConfiguration.builder().seed(seed) .optimizationAlgo(OptimizationAlgorithm.STOCHASTIC_GRADIENT_DESCENT) .updater(updater) .weightInit(weightInit) @@ -100,7 +99,7 @@ public class UNet extends ZooModel { .miniBatch(true) .cacheMode(cacheMode) .trainingWorkspaceMode(workspaceMode) - .inferenceWorkspaceMode(workspaceMode) + .inferenceWorkspaceMode(workspaceMode)) .graphBuilder(); diff --git a/cavis-zoo/cavis-zoo-models/src/main/java/org/deeplearning4j/zoo/model/VGG16.java b/cavis-zoo/cavis-zoo-models/src/main/java/org/deeplearning4j/zoo/model/VGG16.java index c52d8988d..2f6aa1cac 100644 --- a/cavis-zoo/cavis-zoo-models/src/main/java/org/deeplearning4j/zoo/model/VGG16.java +++ b/cavis-zoo/cavis-zoo-models/src/main/java/org/deeplearning4j/zoo/model/VGG16.java @@ -22,8 +22,8 @@ package org.deeplearning4j.zoo.model; import lombok.AllArgsConstructor; import lombok.Builder; +import net.brutex.ai.dnn.api.IModel; import org.deeplearning4j.common.resources.DL4JResources; -import org.deeplearning4j.nn.api.Model; import org.deeplearning4j.nn.api.OptimizationAlgorithm; import org.deeplearning4j.nn.conf.CacheMode; import org.deeplearning4j.nn.conf.ComputationGraphConfiguration; @@ -83,19 +83,19 @@ public class VGG16 extends ZooModel { } @Override - public Class modelType() { + public Class modelType() { return ComputationGraph.class; } public ComputationGraphConfiguration conf() { ComputationGraphConfiguration conf = - new NeuralNetConfiguration.Builder().seed(seed) + ((NeuralNetConfiguration.NeuralNetConfigurationBuilder) NeuralNetConfiguration.builder().seed(seed) .optimizationAlgo(OptimizationAlgorithm.STOCHASTIC_GRADIENT_DESCENT) .updater(updater) .activation(Activation.RELU) .cacheMode(cacheMode) .trainingWorkspaceMode(workspaceMode) - .inferenceWorkspaceMode(workspaceMode) + .inferenceWorkspaceMode(workspaceMode)) .graphBuilder() .addInputs("in") // block 1 diff --git a/cavis-zoo/cavis-zoo-models/src/main/java/org/deeplearning4j/zoo/model/VGG19.java b/cavis-zoo/cavis-zoo-models/src/main/java/org/deeplearning4j/zoo/model/VGG19.java index ee2bb0725..5e846efda 100644 --- a/cavis-zoo/cavis-zoo-models/src/main/java/org/deeplearning4j/zoo/model/VGG19.java +++ b/cavis-zoo/cavis-zoo-models/src/main/java/org/deeplearning4j/zoo/model/VGG19.java @@ -22,9 +22,8 @@ package org.deeplearning4j.zoo.model; import lombok.AllArgsConstructor; import lombok.Builder; -import lombok.NoArgsConstructor; +import net.brutex.ai.dnn.api.IModel; import org.deeplearning4j.common.resources.DL4JResources; -import org.deeplearning4j.nn.api.Model; import org.deeplearning4j.nn.api.OptimizationAlgorithm; import org.deeplearning4j.nn.conf.*; import org.deeplearning4j.nn.conf.inputs.InputType; @@ -33,7 +32,6 @@ import org.deeplearning4j.nn.conf.layers.DenseLayer; import org.deeplearning4j.nn.conf.layers.OutputLayer; import org.deeplearning4j.nn.conf.layers.SubsamplingLayer; import org.deeplearning4j.nn.graph.ComputationGraph; -import org.deeplearning4j.nn.multilayer.MultiLayerNetwork; import org.deeplearning4j.zoo.ModelMetaData; import org.deeplearning4j.zoo.PretrainedType; import org.deeplearning4j.zoo.ZooModel; @@ -74,19 +72,19 @@ public class VGG19 extends ZooModel { } @Override - public Class modelType() { + public Class modelType() { return ComputationGraph.class; } public ComputationGraphConfiguration conf() { ComputationGraphConfiguration conf = - new NeuralNetConfiguration.Builder().seed(seed) + ((NeuralNetConfiguration.NeuralNetConfigurationBuilder) NeuralNetConfiguration.builder().seed(seed) .optimizationAlgo(OptimizationAlgorithm.STOCHASTIC_GRADIENT_DESCENT) .updater(updater) .activation(Activation.RELU) .cacheMode(cacheMode) .trainingWorkspaceMode(workspaceMode) - .inferenceWorkspaceMode(workspaceMode) + .inferenceWorkspaceMode(workspaceMode)) .graphBuilder() .addInputs("in") // block 1 diff --git a/cavis-zoo/cavis-zoo-models/src/main/java/org/deeplearning4j/zoo/model/Xception.java b/cavis-zoo/cavis-zoo-models/src/main/java/org/deeplearning4j/zoo/model/Xception.java index 4c851fa08..bbba3ff3c 100644 --- a/cavis-zoo/cavis-zoo-models/src/main/java/org/deeplearning4j/zoo/model/Xception.java +++ b/cavis-zoo/cavis-zoo-models/src/main/java/org/deeplearning4j/zoo/model/Xception.java @@ -22,12 +22,10 @@ package org.deeplearning4j.zoo.model; import lombok.AllArgsConstructor; import lombok.Builder; -import lombok.NoArgsConstructor; +import net.brutex.ai.dnn.api.IModel; import org.deeplearning4j.common.resources.DL4JResources; -import org.deeplearning4j.nn.api.Model; import org.deeplearning4j.nn.api.OptimizationAlgorithm; import org.deeplearning4j.nn.conf.*; -import org.deeplearning4j.nn.conf.distribution.NormalDistribution; import org.deeplearning4j.nn.conf.graph.ElementWiseVertex; import org.deeplearning4j.nn.conf.inputs.InputType; import org.deeplearning4j.nn.conf.layers.*; @@ -39,7 +37,6 @@ import org.deeplearning4j.zoo.ZooModel; import org.deeplearning4j.zoo.ZooType; import org.nd4j.linalg.activations.Activation; import org.nd4j.linalg.learning.config.AdaDelta; -import org.nd4j.linalg.learning.config.AdaGrad; import org.nd4j.linalg.learning.config.IUpdater; import org.nd4j.linalg.lossfunctions.LossFunctions; @@ -75,7 +72,7 @@ public class Xception extends ZooModel { } @Override - public Class modelType() { + public Class modelType() { return ComputationGraph.class; } @@ -94,7 +91,7 @@ public class Xception extends ZooModel { public ComputationGraphConfiguration.GraphBuilder graphBuilder() { - ComputationGraphConfiguration.GraphBuilder graph = new NeuralNetConfiguration.Builder().seed(seed) + ComputationGraphConfiguration.GraphBuilder graph =((NeuralNetConfiguration.NeuralNetConfigurationBuilder) NeuralNetConfiguration.builder().seed(seed) .optimizationAlgo(OptimizationAlgorithm.STOCHASTIC_GRADIENT_DESCENT) .updater(updater) .weightInit(weightInit) @@ -103,7 +100,7 @@ public class Xception extends ZooModel { .cacheMode(cacheMode) .trainingWorkspaceMode(workspaceMode) .inferenceWorkspaceMode(workspaceMode) - .convolutionMode(ConvolutionMode.Truncate) + .convolutionMode(ConvolutionMode.Truncate)) .graphBuilder(); diff --git a/cavis-zoo/cavis-zoo-models/src/main/java/org/deeplearning4j/zoo/model/YOLO2.java b/cavis-zoo/cavis-zoo-models/src/main/java/org/deeplearning4j/zoo/model/YOLO2.java index 030a5c46b..3c28a36a0 100644 --- a/cavis-zoo/cavis-zoo-models/src/main/java/org/deeplearning4j/zoo/model/YOLO2.java +++ b/cavis-zoo/cavis-zoo-models/src/main/java/org/deeplearning4j/zoo/model/YOLO2.java @@ -23,9 +23,8 @@ package org.deeplearning4j.zoo.model; import lombok.AllArgsConstructor; import lombok.Builder; import lombok.Getter; -import lombok.NoArgsConstructor; +import net.brutex.ai.dnn.api.IModel; import org.deeplearning4j.common.resources.DL4JResources; -import org.deeplearning4j.nn.api.Model; import org.deeplearning4j.nn.api.OptimizationAlgorithm; import org.deeplearning4j.nn.conf.*; import org.deeplearning4j.nn.conf.ComputationGraphConfiguration.GraphBuilder; @@ -87,14 +86,14 @@ public class YOLO2 extends ZooModel { } @Override - public Class modelType() { + public Class modelType() { return ComputationGraph.class; } public ComputationGraphConfiguration conf() { INDArray priors = Nd4j.create(priorBoxes); - GraphBuilder graphBuilder = new NeuralNetConfiguration.Builder() + GraphBuilder graphBuilder = ((NeuralNetConfiguration.NeuralNetConfigurationBuilder)NeuralNetConfiguration.builder() .seed(seed) .optimizationAlgo(OptimizationAlgorithm.STOCHASTIC_GRADIENT_DESCENT) .gradientNormalization(GradientNormalization.RenormalizeL2PerLayer) @@ -105,7 +104,7 @@ public class YOLO2 extends ZooModel { .cacheMode(cacheMode) .trainingWorkspaceMode(workspaceMode) .inferenceWorkspaceMode(workspaceMode) - .cudnnAlgoMode(cudnnAlgoMode) + .cudnnAlgoMode(cudnnAlgoMode)) .graphBuilder() .addInputs("input") .setInputTypes(InputType.convolutional(inputShape[2], inputShape[1], inputShape[0])); diff --git a/cavis-zoo/cavis-zoo-models/src/test/java/org/deeplearning4j/zoo/TestImageNet.java b/cavis-zoo/cavis-zoo-models/src/test/java/org/deeplearning4j/zoo/TestImageNet.java index 0e6fdfb38..8d6b94d54 100644 --- a/cavis-zoo/cavis-zoo-models/src/test/java/org/deeplearning4j/zoo/TestImageNet.java +++ b/cavis-zoo/cavis-zoo-models/src/test/java/org/deeplearning4j/zoo/TestImageNet.java @@ -108,7 +108,7 @@ public class TestImageNet extends BaseDL4JTest { assertEquals("golden retriever", predictions.get(0).get(0).getLabel()); // clean up for current model - initializedModel.params().close(); + initializedModel.getModelParams().close(); Nd4j.getWorkspaceManager().destroyAllWorkspacesForCurrentThread(); System.gc(); @@ -134,7 +134,7 @@ public class TestImageNet extends BaseDL4JTest { } // clean up for current model - initializedModel.params().close(); + initializedModel.getModelParams().close(); Nd4j.getWorkspaceManager().destroyAllWorkspacesForCurrentThread(); System.gc(); @@ -159,7 +159,7 @@ public class TestImageNet extends BaseDL4JTest { assertEquals("dog", classPrediction.getLabel()); } - initializedModel.params().close(); + initializedModel.getModelParams().close(); } } diff --git a/cavis-zoo/cavis-zoo-models/src/test/java/org/deeplearning4j/zoo/TestInstantiation.java b/cavis-zoo/cavis-zoo-models/src/test/java/org/deeplearning4j/zoo/TestInstantiation.java index f9e8b83a1..27eb6e23d 100644 --- a/cavis-zoo/cavis-zoo-models/src/test/java/org/deeplearning4j/zoo/TestInstantiation.java +++ b/cavis-zoo/cavis-zoo-models/src/test/java/org/deeplearning4j/zoo/TestInstantiation.java @@ -21,10 +21,10 @@ package org.deeplearning4j.zoo; import lombok.extern.slf4j.Slf4j; +import net.brutex.ai.dnn.api.IModel; import org.deeplearning4j.BaseDL4JTest; import org.deeplearning4j.datasets.iterator.AsyncDataSetIterator; import org.deeplearning4j.datasets.iterator.impl.BenchmarkDataSetIterator; -import org.deeplearning4j.nn.api.Model; import org.deeplearning4j.nn.conf.inputs.InputType; import org.deeplearning4j.nn.conf.layers.LossLayer; import org.deeplearning4j.nn.conf.layers.OutputLayer; @@ -40,7 +40,6 @@ import org.junit.jupiter.api.Test; import org.nd4j.linalg.activations.Activation; import org.nd4j.linalg.api.buffer.DataType; import org.nd4j.linalg.api.ndarray.INDArray; -import org.nd4j.linalg.dataset.api.DataSet; import org.nd4j.linalg.dataset.api.iterator.DataSetIterator; import org.nd4j.linalg.factory.Nd4j; import org.nd4j.linalg.lossfunctions.LossFunctions; @@ -108,7 +107,7 @@ public class TestInstantiation extends BaseDL4JTest { new int[]{8, inputShape[0], inputShape[1], inputShape[2]}, numClasses, 1, gridWidth, gridHeight); - Model initializedModel = model.init(); + IModel initializedModel = model.init(); AsyncDataSetIterator async = new AsyncDataSetIterator(iter); if (initializedModel instanceof MultiLayerNetwork) { ((MultiLayerNetwork) initializedModel).fit(async); @@ -202,7 +201,7 @@ public class TestInstantiation extends BaseDL4JTest { // clean up for current model Nd4j.getWorkspaceManager().destroyAllWorkspacesForCurrentThread(); - initializedModel.params().close(); + initializedModel.getModelParams().close(); for(INDArray arr : result){ arr.close(); } @@ -272,7 +271,7 @@ public class TestInstantiation extends BaseDL4JTest { Nd4j.getWorkspaceManager().destroyAllWorkspacesForCurrentThread(); f.close(); l.close(); - initializedModel.params().close(); + initializedModel.getModelParams().close(); initializedModel.getFlattenedGradients().close(); System.gc(); } diff --git a/cavis-zoo/cavis-zoo-models/src/test/java/org/deeplearning4j/zoo/TestUtils.java b/cavis-zoo/cavis-zoo-models/src/test/java/org/deeplearning4j/zoo/TestUtils.java index a61ae386d..d759151e8 100644 --- a/cavis-zoo/cavis-zoo-models/src/test/java/org/deeplearning4j/zoo/TestUtils.java +++ b/cavis-zoo/cavis-zoo-models/src/test/java/org/deeplearning4j/zoo/TestUtils.java @@ -45,8 +45,8 @@ public class TestUtils { ByteArrayInputStream bais = new ByteArrayInputStream(bytes); MultiLayerNetwork restored = ModelSerializer.restoreMultiLayerNetwork(bais, true); - assertEquals(net.getLayerWiseConfigurations(), restored.getLayerWiseConfigurations()); - assertEquals(net.params(), restored.params()); + assertEquals(net.getNetConfiguration(), restored.getNetConfiguration()); + assertEquals(net.getModelParams(), restored.getModelParams()); return restored; } catch (IOException e){ @@ -65,8 +65,8 @@ public class TestUtils { ByteArrayInputStream bais = new ByteArrayInputStream(bytes); ComputationGraph restored = ModelSerializer.restoreComputationGraph(bais, true); - assertEquals(net.getConfiguration(), restored.getConfiguration()); - assertEquals(net.params(), restored.params()); + assertEquals(net.getComputationGraphConfiguration(), restored.getComputationGraphConfiguration()); + assertEquals(net.getModelParams(), restored.getModelParams()); return restored; } catch (IOException e){ diff --git a/settings.gradle b/settings.gradle index 0002d667d..d7875c751 100644 --- a/settings.gradle +++ b/settings.gradle @@ -100,6 +100,7 @@ include ':cavis-dnn:cavis-dnn-data:cavis-dnn-data-utility-iterators' include ':cavis-dnn:cavis-dnn-modelimport' include ':cavis-dnn:cavis-dnn-nlp' include ':cavis-dnn:cavis-dnn-nn' +//include ':cavis-dnn:cavis-dnn-nn-api' include ':cavis-dnn:cavis-dnn-nn-parent' include ':cavis-dnn:cavis-dnn-nn-parent:cavis-dnn-nn-server' include ':cavis-dnn:cavis-dnn-nn-parent:cavis-dnn-nn-client' @@ -154,3 +155,4 @@ include ':brutex-extended-tests' include ':cavis-full' +