2021-02-01 14:31:20 +09:00
|
|
|
/*
|
|
|
|
* ******************************************************************************
|
|
|
|
* *
|
|
|
|
* *
|
|
|
|
* * This program and the accompanying materials are made available under the
|
|
|
|
* * terms of the Apache License, Version 2.0 which is available at
|
|
|
|
* * https://www.apache.org/licenses/LICENSE-2.0.
|
|
|
|
* *
|
2021-02-01 17:47:29 +09:00
|
|
|
* * See the NOTICE file distributed with this work for additional
|
|
|
|
* * information regarding copyright ownership.
|
2021-02-01 14:31:20 +09:00
|
|
|
* * Unless required by applicable law or agreed to in writing, software
|
|
|
|
* * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
|
|
|
* * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
|
|
|
* * License for the specific language governing permissions and limitations
|
|
|
|
* * under the License.
|
|
|
|
* *
|
|
|
|
* * SPDX-License-Identifier: Apache-2.0
|
|
|
|
* *****************************************************************************
|
|
|
|
*/
|
2019-06-06 15:21:15 +03:00
|
|
|
package org.deeplearning4j.nn.multilayer;
|
|
|
|
|
|
|
|
import org.deeplearning4j.BaseDL4JTest;
|
|
|
|
import org.deeplearning4j.datasets.iterator.impl.IrisDataSetIterator;
|
|
|
|
import org.deeplearning4j.nn.api.Layer;
|
|
|
|
import org.deeplearning4j.nn.conf.MultiLayerConfiguration;
|
|
|
|
import org.deeplearning4j.nn.conf.NeuralNetConfiguration;
|
|
|
|
import org.deeplearning4j.nn.conf.layers.DenseLayer;
|
|
|
|
import org.deeplearning4j.nn.conf.layers.OutputLayer;
|
|
|
|
import org.deeplearning4j.nn.gradient.Gradient;
|
|
|
|
import org.deeplearning4j.nn.params.DefaultParamInitializer;
|
|
|
|
import org.deeplearning4j.nn.weights.WeightInit;
|
|
|
|
import org.deeplearning4j.optimize.listeners.ScoreIterationListener;
|
2021-03-15 13:02:01 +09:00
|
|
|
import org.junit.jupiter.api.Test;
|
2019-06-06 15:21:15 +03:00
|
|
|
import org.nd4j.linalg.activations.Activation;
|
Development updates (#9098)
* RL4J: Add generic update rule (#502)
Signed-off-by: Alexandre Boulanger <aboulang2002@yahoo.com>
* Shyrma reduce (#481)
* - start working on improving of cpu legacy code for reduce ops
Signed-off-by: Yurii <iuriish@yahoo.com>
* - further work on improving legacy loops
Signed-off-by: Yurii <iuriish@yahoo.com>
* - still working on improving reduce ops
Signed-off-by: Yurii <iuriish@yahoo.com>
* - further work on improving reduce ops
Signed-off-by: Yurii <iuriish@yahoo.com>
* - testing speed run of new reduce op
Signed-off-by: Yurii <iuriish@yahoo.com>
* - working on improvement of default loop for reduce op
Signed-off-by: Yurii <iuriish@yahoo.com>
* - update signatures of stuff which calls reduce ops
Signed-off-by: Yurii <iuriish@yahoo.com>
* - make corrections in cuda reduce kernels
Signed-off-by: Yurii <iuriish@yahoo.com>
* - change loop for default case in broadcast legacy ops
Signed-off-by: Yurii <iuriish@yahoo.com>
* - comment some shape stuff
Signed-off-by: Yurii <iuriish@yahoo.com>
* - comment unnecessary prints in RNGtests
Signed-off-by: Yurii <iuriish@yahoo.com>
* - finish to resolve conflicts after master has been merged
Signed-off-by: Yurii <iuriish@yahoo.com>
* - get rid of some compilation mistakes of cuda stuff
Signed-off-by: Yurii <iuriish@yahoo.com>
* - minor changes
Signed-off-by: Yurii <iuriish@yahoo.com>
* - further search for bug causing crash on java test
Signed-off-by: Yurii <iuriish@yahoo.com>
* - add scalar case in reduce_ ... exec stuff
Signed-off-by: Yurii <iuriish@yahoo.com>
* - minor corrections in NAtiveOps.cu
Signed-off-by: Yurii <iuriish@yahoo.com>
* - add switch to scalar case execReduceXD functions
Signed-off-by: Yurii <iuriish@yahoo.com>
* - add support for vectors old shape in ConstantShapeHelper::createShapeInfoWithNoUnitiesForReduce
Signed-off-by: Yurii <iuriish@yahoo.com>
* - correct cuda mirrorPad
Signed-off-by: Yurii <iuriish@yahoo.com>
* - add support for vectors old shape in cuda createShapeInfoWithNoUnitiesForReduce
Signed-off-by: Yurii <iuriish@yahoo.com>
Co-authored-by: raver119 <raver119@gmail.com>
* Add support for CUDA 11.0 (#492)
* Add support for CUDA 11.0
* libnd4j tweaks for CUDA 11
Signed-off-by: raver119@gmail.com <raver119@gmail.com>
* bindings update, again?
Signed-off-by: raver119@gmail.com <raver119@gmail.com>
* * Update versions of JavaCPP Presets for FFmpeg, OpenBLAS, and NumPy
* update API to match CUDA 8
Signed-off-by: raver119@gmail.com <raver119@gmail.com>
* * Update version of JavaCPP Presets for CPython
* C++ updated for cuDNN 8.0
Signed-off-by: raver119@gmail.com <raver119@gmail.com>
* one more test
Signed-off-by: raver119@gmail.com <raver119@gmail.com>
* one more test
Signed-off-by: raver119@gmail.com <raver119@gmail.com>
* one more test
Signed-off-by: raver119@gmail.com <raver119@gmail.com>
* 128-bit alignment for workspaces
Signed-off-by: raver119@gmail.com <raver119@gmail.com>
* change seed in 1 test
Signed-off-by: raver119@gmail.com <raver119@gmail.com>
* Fix dependecy duplication in python4j-parent pom
* Fix group id for in python4j-numpy
* few tests tweaked
Signed-off-by: raver119@gmail.com <raver119@gmail.com>
* Remove macosx-x86_64-gpu from nd4j-tests-tensorflow
* few minor tweaks for IndexReduce
Signed-off-by: raver119@gmail.com <raver119@gmail.com>
* one test removed
Signed-off-by: raver119@gmail.com <raver119@gmail.com>
Co-authored-by: raver119@gmail.com <raver119@gmail.com>
Co-authored-by: Serhii Shepel <9946053+sshepel@users.noreply.github.com>
* RL4J: Add SyncTrainer and AgentLearnerBuilder for a few algorithms (#504)
Signed-off-by: Alexandre Boulanger <aboulang2002@yahoo.com>
* * Update versions of JavaCPP Presets for OpenCV, FFmpeg, and MKL
Signed-off-by: Samuel Audet <samuel.audet@gmail.com>
* Fix L2NormalizeVertex and eclipse#9054 (#513)
* update
* Fix L2NormalizeVertex
Fix eclipse#9054
* RL4J: Add async training and advantage actor-critic (#507)
* Added async training & Advantage Actor Critic
Signed-off-by: Alexandre Boulanger <aboulang2002@yahoo.com>
* Fix compiler error
Signed-off-by: Samuel Audet <samuel.audet@gmail.com>
* Renamed ActorCriticPolicy back to ACPolicy
Signed-off-by: Alexandre Boulanger <aboulang2002@yahoo.com>
Co-authored-by: Samuel Audet <samuel.audet@gmail.com>
* Python GIL overhaul (#517)
* Development updates (#9053)
* RL4J: Add generic update rule (#502)
Signed-off-by: Alexandre Boulanger <aboulang2002@yahoo.com>
* Shyrma reduce (#481)
* - start working on improving of cpu legacy code for reduce ops
Signed-off-by: Yurii <iuriish@yahoo.com>
* - further work on improving legacy loops
Signed-off-by: Yurii <iuriish@yahoo.com>
* - still working on improving reduce ops
Signed-off-by: Yurii <iuriish@yahoo.com>
* - further work on improving reduce ops
Signed-off-by: Yurii <iuriish@yahoo.com>
* - testing speed run of new reduce op
Signed-off-by: Yurii <iuriish@yahoo.com>
* - working on improvement of default loop for reduce op
Signed-off-by: Yurii <iuriish@yahoo.com>
* - update signatures of stuff which calls reduce ops
Signed-off-by: Yurii <iuriish@yahoo.com>
* - make corrections in cuda reduce kernels
Signed-off-by: Yurii <iuriish@yahoo.com>
* - change loop for default case in broadcast legacy ops
Signed-off-by: Yurii <iuriish@yahoo.com>
* - comment some shape stuff
Signed-off-by: Yurii <iuriish@yahoo.com>
* - comment unnecessary prints in RNGtests
Signed-off-by: Yurii <iuriish@yahoo.com>
* - finish to resolve conflicts after master has been merged
Signed-off-by: Yurii <iuriish@yahoo.com>
* - get rid of some compilation mistakes of cuda stuff
Signed-off-by: Yurii <iuriish@yahoo.com>
* - minor changes
Signed-off-by: Yurii <iuriish@yahoo.com>
* - further search for bug causing crash on java test
Signed-off-by: Yurii <iuriish@yahoo.com>
* - add scalar case in reduce_ ... exec stuff
Signed-off-by: Yurii <iuriish@yahoo.com>
* - minor corrections in NAtiveOps.cu
Signed-off-by: Yurii <iuriish@yahoo.com>
* - add switch to scalar case execReduceXD functions
Signed-off-by: Yurii <iuriish@yahoo.com>
* - add support for vectors old shape in ConstantShapeHelper::createShapeInfoWithNoUnitiesForReduce
Signed-off-by: Yurii <iuriish@yahoo.com>
* - correct cuda mirrorPad
Signed-off-by: Yurii <iuriish@yahoo.com>
* - add support for vectors old shape in cuda createShapeInfoWithNoUnitiesForReduce
Signed-off-by: Yurii <iuriish@yahoo.com>
Co-authored-by: raver119 <raver119@gmail.com>
* Add support for CUDA 11.0 (#492)
* Add support for CUDA 11.0
* libnd4j tweaks for CUDA 11
Signed-off-by: raver119@gmail.com <raver119@gmail.com>
* bindings update, again?
Signed-off-by: raver119@gmail.com <raver119@gmail.com>
* * Update versions of JavaCPP Presets for FFmpeg, OpenBLAS, and NumPy
* update API to match CUDA 8
Signed-off-by: raver119@gmail.com <raver119@gmail.com>
* * Update version of JavaCPP Presets for CPython
* C++ updated for cuDNN 8.0
Signed-off-by: raver119@gmail.com <raver119@gmail.com>
* one more test
Signed-off-by: raver119@gmail.com <raver119@gmail.com>
* one more test
Signed-off-by: raver119@gmail.com <raver119@gmail.com>
* one more test
Signed-off-by: raver119@gmail.com <raver119@gmail.com>
* 128-bit alignment for workspaces
Signed-off-by: raver119@gmail.com <raver119@gmail.com>
* change seed in 1 test
Signed-off-by: raver119@gmail.com <raver119@gmail.com>
* Fix dependecy duplication in python4j-parent pom
* Fix group id for in python4j-numpy
* few tests tweaked
Signed-off-by: raver119@gmail.com <raver119@gmail.com>
* Remove macosx-x86_64-gpu from nd4j-tests-tensorflow
* few minor tweaks for IndexReduce
Signed-off-by: raver119@gmail.com <raver119@gmail.com>
* one test removed
Signed-off-by: raver119@gmail.com <raver119@gmail.com>
Co-authored-by: raver119@gmail.com <raver119@gmail.com>
Co-authored-by: Serhii Shepel <9946053+sshepel@users.noreply.github.com>
* RL4J: Add SyncTrainer and AgentLearnerBuilder for a few algorithms (#504)
Signed-off-by: Alexandre Boulanger <aboulang2002@yahoo.com>
Co-authored-by: Alexandre Boulanger <44292157+aboulang2002@users.noreply.github.com>
Co-authored-by: Yurii Shyrma <iuriish@yahoo.com>
Co-authored-by: raver119 <raver119@gmail.com>
Co-authored-by: Serhii Shepel <9946053+sshepel@users.noreply.github.com>
* Removed dead code (#9057)
Signed-off-by: Dariusz Zbyrad <dariusz.zbyrad@gmail.com>
* performance improvement (#9055)
* performance improvement
Signed-off-by: Dariusz Zbyrad <dariusz.zbyrad@gmail.com>
* revert some changes
Signed-off-by: Dariusz Zbyrad <dariusz.zbyrad@gmail.com>
* Development updates (#9064)
* Update versions of JavaCPP Presets for OpenCV, FFmpeg, and MKL
Signed-off-by: Samuel Audet <samuel.audet@gmail.com>
* Cherry pick rl4j changes from most recent KonduitAI/deeplearning4j PR
* Update cherry pick again from last master revision.
Co-authored-by: Samuel Audet <samuel.audet@gmail.com>
Co-authored-by: Alexandre Boulanger <44292157+aboulang2002@users.noreply.github.com>
Co-authored-by: Yurii Shyrma <iuriish@yahoo.com>
Co-authored-by: raver119 <raver119@gmail.com>
Co-authored-by: Serhii Shepel <9946053+sshepel@users.noreply.github.com>
Co-authored-by: dariuszzbyrad <dariusz.zbyrad@gmail.com>
* Ag pythongiloverhaul (#518)
* Development updates (#9053)
* RL4J: Add generic update rule (#502)
Signed-off-by: Alexandre Boulanger <aboulang2002@yahoo.com>
* Shyrma reduce (#481)
* - start working on improving of cpu legacy code for reduce ops
Signed-off-by: Yurii <iuriish@yahoo.com>
* - further work on improving legacy loops
Signed-off-by: Yurii <iuriish@yahoo.com>
* - still working on improving reduce ops
Signed-off-by: Yurii <iuriish@yahoo.com>
* - further work on improving reduce ops
Signed-off-by: Yurii <iuriish@yahoo.com>
* - testing speed run of new reduce op
Signed-off-by: Yurii <iuriish@yahoo.com>
* - working on improvement of default loop for reduce op
Signed-off-by: Yurii <iuriish@yahoo.com>
* - update signatures of stuff which calls reduce ops
Signed-off-by: Yurii <iuriish@yahoo.com>
* - make corrections in cuda reduce kernels
Signed-off-by: Yurii <iuriish@yahoo.com>
* - change loop for default case in broadcast legacy ops
Signed-off-by: Yurii <iuriish@yahoo.com>
* - comment some shape stuff
Signed-off-by: Yurii <iuriish@yahoo.com>
* - comment unnecessary prints in RNGtests
Signed-off-by: Yurii <iuriish@yahoo.com>
* - finish to resolve conflicts after master has been merged
Signed-off-by: Yurii <iuriish@yahoo.com>
* - get rid of some compilation mistakes of cuda stuff
Signed-off-by: Yurii <iuriish@yahoo.com>
* - minor changes
Signed-off-by: Yurii <iuriish@yahoo.com>
* - further search for bug causing crash on java test
Signed-off-by: Yurii <iuriish@yahoo.com>
* - add scalar case in reduce_ ... exec stuff
Signed-off-by: Yurii <iuriish@yahoo.com>
* - minor corrections in NAtiveOps.cu
Signed-off-by: Yurii <iuriish@yahoo.com>
* - add switch to scalar case execReduceXD functions
Signed-off-by: Yurii <iuriish@yahoo.com>
* - add support for vectors old shape in ConstantShapeHelper::createShapeInfoWithNoUnitiesForReduce
Signed-off-by: Yurii <iuriish@yahoo.com>
* - correct cuda mirrorPad
Signed-off-by: Yurii <iuriish@yahoo.com>
* - add support for vectors old shape in cuda createShapeInfoWithNoUnitiesForReduce
Signed-off-by: Yurii <iuriish@yahoo.com>
Co-authored-by: raver119 <raver119@gmail.com>
* Add support for CUDA 11.0 (#492)
* Add support for CUDA 11.0
* libnd4j tweaks for CUDA 11
Signed-off-by: raver119@gmail.com <raver119@gmail.com>
* bindings update, again?
Signed-off-by: raver119@gmail.com <raver119@gmail.com>
* * Update versions of JavaCPP Presets for FFmpeg, OpenBLAS, and NumPy
* update API to match CUDA 8
Signed-off-by: raver119@gmail.com <raver119@gmail.com>
* * Update version of JavaCPP Presets for CPython
* C++ updated for cuDNN 8.0
Signed-off-by: raver119@gmail.com <raver119@gmail.com>
* one more test
Signed-off-by: raver119@gmail.com <raver119@gmail.com>
* one more test
Signed-off-by: raver119@gmail.com <raver119@gmail.com>
* one more test
Signed-off-by: raver119@gmail.com <raver119@gmail.com>
* 128-bit alignment for workspaces
Signed-off-by: raver119@gmail.com <raver119@gmail.com>
* change seed in 1 test
Signed-off-by: raver119@gmail.com <raver119@gmail.com>
* Fix dependecy duplication in python4j-parent pom
* Fix group id for in python4j-numpy
* few tests tweaked
Signed-off-by: raver119@gmail.com <raver119@gmail.com>
* Remove macosx-x86_64-gpu from nd4j-tests-tensorflow
* few minor tweaks for IndexReduce
Signed-off-by: raver119@gmail.com <raver119@gmail.com>
* one test removed
Signed-off-by: raver119@gmail.com <raver119@gmail.com>
Co-authored-by: raver119@gmail.com <raver119@gmail.com>
Co-authored-by: Serhii Shepel <9946053+sshepel@users.noreply.github.com>
* RL4J: Add SyncTrainer and AgentLearnerBuilder for a few algorithms (#504)
Signed-off-by: Alexandre Boulanger <aboulang2002@yahoo.com>
Co-authored-by: Alexandre Boulanger <44292157+aboulang2002@users.noreply.github.com>
Co-authored-by: Yurii Shyrma <iuriish@yahoo.com>
Co-authored-by: raver119 <raver119@gmail.com>
Co-authored-by: Serhii Shepel <9946053+sshepel@users.noreply.github.com>
* Removed dead code (#9057)
Signed-off-by: Dariusz Zbyrad <dariusz.zbyrad@gmail.com>
* performance improvement (#9055)
* performance improvement
Signed-off-by: Dariusz Zbyrad <dariusz.zbyrad@gmail.com>
* revert some changes
Signed-off-by: Dariusz Zbyrad <dariusz.zbyrad@gmail.com>
* Development updates (#9064)
* Update versions of JavaCPP Presets for OpenCV, FFmpeg, and MKL
Signed-off-by: Samuel Audet <samuel.audet@gmail.com>
* Cherry pick rl4j changes from most recent KonduitAI/deeplearning4j PR
* Update cherry pick again from last master revision.
* Re update python4j
Co-authored-by: Samuel Audet <samuel.audet@gmail.com>
Co-authored-by: Alexandre Boulanger <44292157+aboulang2002@users.noreply.github.com>
Co-authored-by: Yurii Shyrma <iuriish@yahoo.com>
Co-authored-by: raver119 <raver119@gmail.com>
Co-authored-by: Serhii Shepel <9946053+sshepel@users.noreply.github.com>
Co-authored-by: dariuszzbyrad <dariusz.zbyrad@gmail.com>
* Bump formatter-maven-plugin from 2.0.0 to 2.12.1 (#505)
Bumps [formatter-maven-plugin](https://github.com/revelc/formatter-maven-plugin) from 2.0.0 to 2.12.1.
- [Release notes](https://github.com/revelc/formatter-maven-plugin/releases)
- [Changelog](https://github.com/revelc/formatter-maven-plugin/blob/formatter-maven-plugin-2.12.1/CHANGELOG.md)
- [Commits](https://github.com/revelc/formatter-maven-plugin/compare/formatter-maven-plugin-2.0.0...formatter-maven-plugin-2.12.1)
Signed-off-by: dependabot-preview[bot] <support@dependabot.com>
Co-authored-by: dependabot-preview[bot] <27856297+dependabot-preview[bot]@users.noreply.github.com>
* Ag fix9060 (#519)
* Development updates (#9053)
* RL4J: Add generic update rule (#502)
Signed-off-by: Alexandre Boulanger <aboulang2002@yahoo.com>
* Shyrma reduce (#481)
* - start working on improving of cpu legacy code for reduce ops
Signed-off-by: Yurii <iuriish@yahoo.com>
* - further work on improving legacy loops
Signed-off-by: Yurii <iuriish@yahoo.com>
* - still working on improving reduce ops
Signed-off-by: Yurii <iuriish@yahoo.com>
* - further work on improving reduce ops
Signed-off-by: Yurii <iuriish@yahoo.com>
* - testing speed run of new reduce op
Signed-off-by: Yurii <iuriish@yahoo.com>
* - working on improvement of default loop for reduce op
Signed-off-by: Yurii <iuriish@yahoo.com>
* - update signatures of stuff which calls reduce ops
Signed-off-by: Yurii <iuriish@yahoo.com>
* - make corrections in cuda reduce kernels
Signed-off-by: Yurii <iuriish@yahoo.com>
* - change loop for default case in broadcast legacy ops
Signed-off-by: Yurii <iuriish@yahoo.com>
* - comment some shape stuff
Signed-off-by: Yurii <iuriish@yahoo.com>
* - comment unnecessary prints in RNGtests
Signed-off-by: Yurii <iuriish@yahoo.com>
* - finish to resolve conflicts after master has been merged
Signed-off-by: Yurii <iuriish@yahoo.com>
* - get rid of some compilation mistakes of cuda stuff
Signed-off-by: Yurii <iuriish@yahoo.com>
* - minor changes
Signed-off-by: Yurii <iuriish@yahoo.com>
* - further search for bug causing crash on java test
Signed-off-by: Yurii <iuriish@yahoo.com>
* - add scalar case in reduce_ ... exec stuff
Signed-off-by: Yurii <iuriish@yahoo.com>
* - minor corrections in NAtiveOps.cu
Signed-off-by: Yurii <iuriish@yahoo.com>
* - add switch to scalar case execReduceXD functions
Signed-off-by: Yurii <iuriish@yahoo.com>
* - add support for vectors old shape in ConstantShapeHelper::createShapeInfoWithNoUnitiesForReduce
Signed-off-by: Yurii <iuriish@yahoo.com>
* - correct cuda mirrorPad
Signed-off-by: Yurii <iuriish@yahoo.com>
* - add support for vectors old shape in cuda createShapeInfoWithNoUnitiesForReduce
Signed-off-by: Yurii <iuriish@yahoo.com>
Co-authored-by: raver119 <raver119@gmail.com>
* Add support for CUDA 11.0 (#492)
* Add support for CUDA 11.0
* libnd4j tweaks for CUDA 11
Signed-off-by: raver119@gmail.com <raver119@gmail.com>
* bindings update, again?
Signed-off-by: raver119@gmail.com <raver119@gmail.com>
* * Update versions of JavaCPP Presets for FFmpeg, OpenBLAS, and NumPy
* update API to match CUDA 8
Signed-off-by: raver119@gmail.com <raver119@gmail.com>
* * Update version of JavaCPP Presets for CPython
* C++ updated for cuDNN 8.0
Signed-off-by: raver119@gmail.com <raver119@gmail.com>
* one more test
Signed-off-by: raver119@gmail.com <raver119@gmail.com>
* one more test
Signed-off-by: raver119@gmail.com <raver119@gmail.com>
* one more test
Signed-off-by: raver119@gmail.com <raver119@gmail.com>
* 128-bit alignment for workspaces
Signed-off-by: raver119@gmail.com <raver119@gmail.com>
* change seed in 1 test
Signed-off-by: raver119@gmail.com <raver119@gmail.com>
* Fix dependecy duplication in python4j-parent pom
* Fix group id for in python4j-numpy
* few tests tweaked
Signed-off-by: raver119@gmail.com <raver119@gmail.com>
* Remove macosx-x86_64-gpu from nd4j-tests-tensorflow
* few minor tweaks for IndexReduce
Signed-off-by: raver119@gmail.com <raver119@gmail.com>
* one test removed
Signed-off-by: raver119@gmail.com <raver119@gmail.com>
Co-authored-by: raver119@gmail.com <raver119@gmail.com>
Co-authored-by: Serhii Shepel <9946053+sshepel@users.noreply.github.com>
* RL4J: Add SyncTrainer and AgentLearnerBuilder for a few algorithms (#504)
Signed-off-by: Alexandre Boulanger <aboulang2002@yahoo.com>
Co-authored-by: Alexandre Boulanger <44292157+aboulang2002@users.noreply.github.com>
Co-authored-by: Yurii Shyrma <iuriish@yahoo.com>
Co-authored-by: raver119 <raver119@gmail.com>
Co-authored-by: Serhii Shepel <9946053+sshepel@users.noreply.github.com>
* Removed dead code (#9057)
Signed-off-by: Dariusz Zbyrad <dariusz.zbyrad@gmail.com>
* performance improvement (#9055)
* performance improvement
Signed-off-by: Dariusz Zbyrad <dariusz.zbyrad@gmail.com>
* revert some changes
Signed-off-by: Dariusz Zbyrad <dariusz.zbyrad@gmail.com>
* Development updates (#9064)
* Update versions of JavaCPP Presets for OpenCV, FFmpeg, and MKL
Signed-off-by: Samuel Audet <samuel.audet@gmail.com>
* Added support for the archunit (#9062)
* Added support for the archunit
Signed-off-by: Dariusz Zbyrad <dariusz.zbyrad@gmail.com>
* Updated pom files
Signed-off-by: Dariusz Zbyrad <dariusz.zbyrad@gmail.com>
* Datavec code cleaup (#9071)
* removed unnecessary semicolons
Signed-off-by: Dariusz Zbyrad <dariusz.zbyrad@gmail.com>
* Use standard charset object
Signed-off-by: Dariusz Zbyrad <dariusz.zbyrad@gmail.com>
* Removed unused imports
Signed-off-by: Dariusz Zbyrad <dariusz.zbyrad@gmail.com>
* WIP: Fix Conv1d causal case
* Add inital tests
* Update Conv1d tests to be a bit more robust
* Remove redundant test
* Reset from master
* Remove cuda definition (left over)
* Update rl4j again
* Update pom.xml
Co-authored-by: Samuel Audet <samuel.audet@gmail.com>
Co-authored-by: Alexandre Boulanger <44292157+aboulang2002@users.noreply.github.com>
Co-authored-by: Yurii Shyrma <iuriish@yahoo.com>
Co-authored-by: raver119 <raver119@gmail.com>
Co-authored-by: Serhii Shepel <9946053+sshepel@users.noreply.github.com>
Co-authored-by: dariuszzbyrad <dariusz.zbyrad@gmail.com>
* Fixes 9061 (#521)
* Get rid of edge case in validation
* Added support for the archunit (#9062)
* Added support for the archunit
Signed-off-by: Dariusz Zbyrad <dariusz.zbyrad@gmail.com>
* Updated pom files
Signed-off-by: Dariusz Zbyrad <dariusz.zbyrad@gmail.com>
* Using embedded copying of an array instead of manual (#9073)
Signed-off-by: Dariusz Zbyrad <dariusz.zbyrad@gmail.com>
* Datavec bulk operation (#9075)
* Bulk operation can be used instead of iteration inspection
Signed-off-by: Dariusz Zbyrad <dariusz.zbyrad@gmail.com>
* Redundant 'Collection.addAll()' call inspection
Signed-off-by: Dariusz Zbyrad <dariusz.zbyrad@gmail.com>
* Removed infinitely loop (#9076)
Signed-off-by: Dariusz Zbyrad <dariusz.zbyrad@gmail.com>
Co-authored-by: dariuszzbyrad <dariusz.zbyrad@gmail.com>
* Revert "Merge eclipse changes" (#526)
* Revert rl4j to 72f5c18c830f62df2c04fbf8dc7b1353cc2d3182 (#527)
* RL4J: Add generic update rule (#502)
Signed-off-by: Alexandre Boulanger <aboulang2002@yahoo.com>
* Shyrma reduce (#481)
* - start working on improving of cpu legacy code for reduce ops
Signed-off-by: Yurii <iuriish@yahoo.com>
* - further work on improving legacy loops
Signed-off-by: Yurii <iuriish@yahoo.com>
* - still working on improving reduce ops
Signed-off-by: Yurii <iuriish@yahoo.com>
* - further work on improving reduce ops
Signed-off-by: Yurii <iuriish@yahoo.com>
* - testing speed run of new reduce op
Signed-off-by: Yurii <iuriish@yahoo.com>
* - working on improvement of default loop for reduce op
Signed-off-by: Yurii <iuriish@yahoo.com>
* - update signatures of stuff which calls reduce ops
Signed-off-by: Yurii <iuriish@yahoo.com>
* - make corrections in cuda reduce kernels
Signed-off-by: Yurii <iuriish@yahoo.com>
* - change loop for default case in broadcast legacy ops
Signed-off-by: Yurii <iuriish@yahoo.com>
* - comment some shape stuff
Signed-off-by: Yurii <iuriish@yahoo.com>
* - comment unnecessary prints in RNGtests
Signed-off-by: Yurii <iuriish@yahoo.com>
* - finish to resolve conflicts after master has been merged
Signed-off-by: Yurii <iuriish@yahoo.com>
* - get rid of some compilation mistakes of cuda stuff
Signed-off-by: Yurii <iuriish@yahoo.com>
* - minor changes
Signed-off-by: Yurii <iuriish@yahoo.com>
* - further search for bug causing crash on java test
Signed-off-by: Yurii <iuriish@yahoo.com>
* - add scalar case in reduce_ ... exec stuff
Signed-off-by: Yurii <iuriish@yahoo.com>
* - minor corrections in NAtiveOps.cu
Signed-off-by: Yurii <iuriish@yahoo.com>
* - add switch to scalar case execReduceXD functions
Signed-off-by: Yurii <iuriish@yahoo.com>
* - add support for vectors old shape in ConstantShapeHelper::createShapeInfoWithNoUnitiesForReduce
Signed-off-by: Yurii <iuriish@yahoo.com>
* - correct cuda mirrorPad
Signed-off-by: Yurii <iuriish@yahoo.com>
* - add support for vectors old shape in cuda createShapeInfoWithNoUnitiesForReduce
Signed-off-by: Yurii <iuriish@yahoo.com>
Co-authored-by: raver119 <raver119@gmail.com>
* Add support for CUDA 11.0 (#492)
* Add support for CUDA 11.0
* libnd4j tweaks for CUDA 11
Signed-off-by: raver119@gmail.com <raver119@gmail.com>
* bindings update, again?
Signed-off-by: raver119@gmail.com <raver119@gmail.com>
* * Update versions of JavaCPP Presets for FFmpeg, OpenBLAS, and NumPy
* update API to match CUDA 8
Signed-off-by: raver119@gmail.com <raver119@gmail.com>
* * Update version of JavaCPP Presets for CPython
* C++ updated for cuDNN 8.0
Signed-off-by: raver119@gmail.com <raver119@gmail.com>
* one more test
Signed-off-by: raver119@gmail.com <raver119@gmail.com>
* one more test
Signed-off-by: raver119@gmail.com <raver119@gmail.com>
* one more test
Signed-off-by: raver119@gmail.com <raver119@gmail.com>
* 128-bit alignment for workspaces
Signed-off-by: raver119@gmail.com <raver119@gmail.com>
* change seed in 1 test
Signed-off-by: raver119@gmail.com <raver119@gmail.com>
* Fix dependecy duplication in python4j-parent pom
* Fix group id for in python4j-numpy
* few tests tweaked
Signed-off-by: raver119@gmail.com <raver119@gmail.com>
* Remove macosx-x86_64-gpu from nd4j-tests-tensorflow
* few minor tweaks for IndexReduce
Signed-off-by: raver119@gmail.com <raver119@gmail.com>
* one test removed
Signed-off-by: raver119@gmail.com <raver119@gmail.com>
Co-authored-by: raver119@gmail.com <raver119@gmail.com>
Co-authored-by: Serhii Shepel <9946053+sshepel@users.noreply.github.com>
* RL4J: Add SyncTrainer and AgentLearnerBuilder for a few algorithms (#504)
Signed-off-by: Alexandre Boulanger <aboulang2002@yahoo.com>
* * Update versions of JavaCPP Presets for OpenCV, FFmpeg, and MKL
Signed-off-by: Samuel Audet <samuel.audet@gmail.com>
* Fix L2NormalizeVertex and eclipse#9054 (#513)
* update
* Fix L2NormalizeVertex
Fix eclipse#9054
* RL4J: Add async training and advantage actor-critic (#507)
* Added async training & Advantage Actor Critic
Signed-off-by: Alexandre Boulanger <aboulang2002@yahoo.com>
* Fix compiler error
Signed-off-by: Samuel Audet <samuel.audet@gmail.com>
* Renamed ActorCriticPolicy back to ACPolicy
Signed-off-by: Alexandre Boulanger <aboulang2002@yahoo.com>
Co-authored-by: Samuel Audet <samuel.audet@gmail.com>
* Python GIL overhaul (#517)
* Development updates (#9053)
* RL4J: Add generic update rule (#502)
Signed-off-by: Alexandre Boulanger <aboulang2002@yahoo.com>
* Shyrma reduce (#481)
* - start working on improving of cpu legacy code for reduce ops
Signed-off-by: Yurii <iuriish@yahoo.com>
* - further work on improving legacy loops
Signed-off-by: Yurii <iuriish@yahoo.com>
* - still working on improving reduce ops
Signed-off-by: Yurii <iuriish@yahoo.com>
* - further work on improving reduce ops
Signed-off-by: Yurii <iuriish@yahoo.com>
* - testing speed run of new reduce op
Signed-off-by: Yurii <iuriish@yahoo.com>
* - working on improvement of default loop for reduce op
Signed-off-by: Yurii <iuriish@yahoo.com>
* - update signatures of stuff which calls reduce ops
Signed-off-by: Yurii <iuriish@yahoo.com>
* - make corrections in cuda reduce kernels
Signed-off-by: Yurii <iuriish@yahoo.com>
* - change loop for default case in broadcast legacy ops
Signed-off-by: Yurii <iuriish@yahoo.com>
* - comment some shape stuff
Signed-off-by: Yurii <iuriish@yahoo.com>
* - comment unnecessary prints in RNGtests
Signed-off-by: Yurii <iuriish@yahoo.com>
* - finish to resolve conflicts after master has been merged
Signed-off-by: Yurii <iuriish@yahoo.com>
* - get rid of some compilation mistakes of cuda stuff
Signed-off-by: Yurii <iuriish@yahoo.com>
* - minor changes
Signed-off-by: Yurii <iuriish@yahoo.com>
* - further search for bug causing crash on java test
Signed-off-by: Yurii <iuriish@yahoo.com>
* - add scalar case in reduce_ ... exec stuff
Signed-off-by: Yurii <iuriish@yahoo.com>
* - minor corrections in NAtiveOps.cu
Signed-off-by: Yurii <iuriish@yahoo.com>
* - add switch to scalar case execReduceXD functions
Signed-off-by: Yurii <iuriish@yahoo.com>
* - add support for vectors old shape in ConstantShapeHelper::createShapeInfoWithNoUnitiesForReduce
Signed-off-by: Yurii <iuriish@yahoo.com>
* - correct cuda mirrorPad
Signed-off-by: Yurii <iuriish@yahoo.com>
* - add support for vectors old shape in cuda createShapeInfoWithNoUnitiesForReduce
Signed-off-by: Yurii <iuriish@yahoo.com>
Co-authored-by: raver119 <raver119@gmail.com>
* Add support for CUDA 11.0 (#492)
* Add support for CUDA 11.0
* libnd4j tweaks for CUDA 11
Signed-off-by: raver119@gmail.com <raver119@gmail.com>
* bindings update, again?
Signed-off-by: raver119@gmail.com <raver119@gmail.com>
* * Update versions of JavaCPP Presets for FFmpeg, OpenBLAS, and NumPy
* update API to match CUDA 8
Signed-off-by: raver119@gmail.com <raver119@gmail.com>
* * Update version of JavaCPP Presets for CPython
* C++ updated for cuDNN 8.0
Signed-off-by: raver119@gmail.com <raver119@gmail.com>
* one more test
Signed-off-by: raver119@gmail.com <raver119@gmail.com>
* one more test
Signed-off-by: raver119@gmail.com <raver119@gmail.com>
* one more test
Signed-off-by: raver119@gmail.com <raver119@gmail.com>
* 128-bit alignment for workspaces
Signed-off-by: raver119@gmail.com <raver119@gmail.com>
* change seed in 1 test
Signed-off-by: raver119@gmail.com <raver119@gmail.com>
* Fix dependecy duplication in python4j-parent pom
* Fix group id for in python4j-numpy
* few tests tweaked
Signed-off-by: raver119@gmail.com <raver119@gmail.com>
* Remove macosx-x86_64-gpu from nd4j-tests-tensorflow
* few minor tweaks for IndexReduce
Signed-off-by: raver119@gmail.com <raver119@gmail.com>
* one test removed
Signed-off-by: raver119@gmail.com <raver119@gmail.com>
Co-authored-by: raver119@gmail.com <raver119@gmail.com>
Co-authored-by: Serhii Shepel <9946053+sshepel@users.noreply.github.com>
* RL4J: Add SyncTrainer and AgentLearnerBuilder for a few algorithms (#504)
Signed-off-by: Alexandre Boulanger <aboulang2002@yahoo.com>
Co-authored-by: Alexandre Boulanger <44292157+aboulang2002@users.noreply.github.com>
Co-authored-by: Yurii Shyrma <iuriish@yahoo.com>
Co-authored-by: raver119 <raver119@gmail.com>
Co-authored-by: Serhii Shepel <9946053+sshepel@users.noreply.github.com>
* Removed dead code (#9057)
Signed-off-by: Dariusz Zbyrad <dariusz.zbyrad@gmail.com>
* performance improvement (#9055)
* performance improvement
Signed-off-by: Dariusz Zbyrad <dariusz.zbyrad@gmail.com>
* revert some changes
Signed-off-by: Dariusz Zbyrad <dariusz.zbyrad@gmail.com>
* Development updates (#9064)
* Update versions of JavaCPP Presets for OpenCV, FFmpeg, and MKL
Signed-off-by: Samuel Audet <samuel.audet@gmail.com>
* Cherry pick rl4j changes from most recent KonduitAI/deeplearning4j PR
* Update cherry pick again from last master revision.
Co-authored-by: Samuel Audet <samuel.audet@gmail.com>
Co-authored-by: Alexandre Boulanger <44292157+aboulang2002@users.noreply.github.com>
Co-authored-by: Yurii Shyrma <iuriish@yahoo.com>
Co-authored-by: raver119 <raver119@gmail.com>
Co-authored-by: Serhii Shepel <9946053+sshepel@users.noreply.github.com>
Co-authored-by: dariuszzbyrad <dariusz.zbyrad@gmail.com>
* Ag pythongiloverhaul (#518)
* Development updates (#9053)
* RL4J: Add generic update rule (#502)
Signed-off-by: Alexandre Boulanger <aboulang2002@yahoo.com>
* Shyrma reduce (#481)
* - start working on improving of cpu legacy code for reduce ops
Signed-off-by: Yurii <iuriish@yahoo.com>
* - further work on improving legacy loops
Signed-off-by: Yurii <iuriish@yahoo.com>
* - still working on improving reduce ops
Signed-off-by: Yurii <iuriish@yahoo.com>
* - further work on improving reduce ops
Signed-off-by: Yurii <iuriish@yahoo.com>
* - testing speed run of new reduce op
Signed-off-by: Yurii <iuriish@yahoo.com>
* - working on improvement of default loop for reduce op
Signed-off-by: Yurii <iuriish@yahoo.com>
* - update signatures of stuff which calls reduce ops
Signed-off-by: Yurii <iuriish@yahoo.com>
* - make corrections in cuda reduce kernels
Signed-off-by: Yurii <iuriish@yahoo.com>
* - change loop for default case in broadcast legacy ops
Signed-off-by: Yurii <iuriish@yahoo.com>
* - comment some shape stuff
Signed-off-by: Yurii <iuriish@yahoo.com>
* - comment unnecessary prints in RNGtests
Signed-off-by: Yurii <iuriish@yahoo.com>
* - finish to resolve conflicts after master has been merged
Signed-off-by: Yurii <iuriish@yahoo.com>
* - get rid of some compilation mistakes of cuda stuff
Signed-off-by: Yurii <iuriish@yahoo.com>
* - minor changes
Signed-off-by: Yurii <iuriish@yahoo.com>
* - further search for bug causing crash on java test
Signed-off-by: Yurii <iuriish@yahoo.com>
* - add scalar case in reduce_ ... exec stuff
Signed-off-by: Yurii <iuriish@yahoo.com>
* - minor corrections in NAtiveOps.cu
Signed-off-by: Yurii <iuriish@yahoo.com>
* - add switch to scalar case execReduceXD functions
Signed-off-by: Yurii <iuriish@yahoo.com>
* - add support for vectors old shape in ConstantShapeHelper::createShapeInfoWithNoUnitiesForReduce
Signed-off-by: Yurii <iuriish@yahoo.com>
* - correct cuda mirrorPad
Signed-off-by: Yurii <iuriish@yahoo.com>
* - add support for vectors old shape in cuda createShapeInfoWithNoUnitiesForReduce
Signed-off-by: Yurii <iuriish@yahoo.com>
Co-authored-by: raver119 <raver119@gmail.com>
* Add support for CUDA 11.0 (#492)
* Add support for CUDA 11.0
* libnd4j tweaks for CUDA 11
Signed-off-by: raver119@gmail.com <raver119@gmail.com>
* bindings update, again?
Signed-off-by: raver119@gmail.com <raver119@gmail.com>
* * Update versions of JavaCPP Presets for FFmpeg, OpenBLAS, and NumPy
* update API to match CUDA 8
Signed-off-by: raver119@gmail.com <raver119@gmail.com>
* * Update version of JavaCPP Presets for CPython
* C++ updated for cuDNN 8.0
Signed-off-by: raver119@gmail.com <raver119@gmail.com>
* one more test
Signed-off-by: raver119@gmail.com <raver119@gmail.com>
* one more test
Signed-off-by: raver119@gmail.com <raver119@gmail.com>
* one more test
Signed-off-by: raver119@gmail.com <raver119@gmail.com>
* 128-bit alignment for workspaces
Signed-off-by: raver119@gmail.com <raver119@gmail.com>
* change seed in 1 test
Signed-off-by: raver119@gmail.com <raver119@gmail.com>
* Fix dependecy duplication in python4j-parent pom
* Fix group id for in python4j-numpy
* few tests tweaked
Signed-off-by: raver119@gmail.com <raver119@gmail.com>
* Remove macosx-x86_64-gpu from nd4j-tests-tensorflow
* few minor tweaks for IndexReduce
Signed-off-by: raver119@gmail.com <raver119@gmail.com>
* one test removed
Signed-off-by: raver119@gmail.com <raver119@gmail.com>
Co-authored-by: raver119@gmail.com <raver119@gmail.com>
Co-authored-by: Serhii Shepel <9946053+sshepel@users.noreply.github.com>
* RL4J: Add SyncTrainer and AgentLearnerBuilder for a few algorithms (#504)
Signed-off-by: Alexandre Boulanger <aboulang2002@yahoo.com>
Co-authored-by: Alexandre Boulanger <44292157+aboulang2002@users.noreply.github.com>
Co-authored-by: Yurii Shyrma <iuriish@yahoo.com>
Co-authored-by: raver119 <raver119@gmail.com>
Co-authored-by: Serhii Shepel <9946053+sshepel@users.noreply.github.com>
* Removed dead code (#9057)
Signed-off-by: Dariusz Zbyrad <dariusz.zbyrad@gmail.com>
* performance improvement (#9055)
* performance improvement
Signed-off-by: Dariusz Zbyrad <dariusz.zbyrad@gmail.com>
* revert some changes
Signed-off-by: Dariusz Zbyrad <dariusz.zbyrad@gmail.com>
* Development updates (#9064)
* Update versions of JavaCPP Presets for OpenCV, FFmpeg, and MKL
Signed-off-by: Samuel Audet <samuel.audet@gmail.com>
* Cherry pick rl4j changes from most recent KonduitAI/deeplearning4j PR
* Update cherry pick again from last master revision.
* Re update python4j
Co-authored-by: Samuel Audet <samuel.audet@gmail.com>
Co-authored-by: Alexandre Boulanger <44292157+aboulang2002@users.noreply.github.com>
Co-authored-by: Yurii Shyrma <iuriish@yahoo.com>
Co-authored-by: raver119 <raver119@gmail.com>
Co-authored-by: Serhii Shepel <9946053+sshepel@users.noreply.github.com>
Co-authored-by: dariuszzbyrad <dariusz.zbyrad@gmail.com>
* Bump formatter-maven-plugin from 2.0.0 to 2.12.1 (#505)
Bumps [formatter-maven-plugin](https://github.com/revelc/formatter-maven-plugin) from 2.0.0 to 2.12.1.
- [Release notes](https://github.com/revelc/formatter-maven-plugin/releases)
- [Changelog](https://github.com/revelc/formatter-maven-plugin/blob/formatter-maven-plugin-2.12.1/CHANGELOG.md)
- [Commits](https://github.com/revelc/formatter-maven-plugin/compare/formatter-maven-plugin-2.0.0...formatter-maven-plugin-2.12.1)
Signed-off-by: dependabot-preview[bot] <support@dependabot.com>
Co-authored-by: dependabot-preview[bot] <27856297+dependabot-preview[bot]@users.noreply.github.com>
* Ag fix9060 (#519)
* Development updates (#9053)
* RL4J: Add generic update rule (#502)
Signed-off-by: Alexandre Boulanger <aboulang2002@yahoo.com>
* Shyrma reduce (#481)
* - start working on improving of cpu legacy code for reduce ops
Signed-off-by: Yurii <iuriish@yahoo.com>
* - further work on improving legacy loops
Signed-off-by: Yurii <iuriish@yahoo.com>
* - still working on improving reduce ops
Signed-off-by: Yurii <iuriish@yahoo.com>
* - further work on improving reduce ops
Signed-off-by: Yurii <iuriish@yahoo.com>
* - testing speed run of new reduce op
Signed-off-by: Yurii <iuriish@yahoo.com>
* - working on improvement of default loop for reduce op
Signed-off-by: Yurii <iuriish@yahoo.com>
* - update signatures of stuff which calls reduce ops
Signed-off-by: Yurii <iuriish@yahoo.com>
* - make corrections in cuda reduce kernels
Signed-off-by: Yurii <iuriish@yahoo.com>
* - change loop for default case in broadcast legacy ops
Signed-off-by: Yurii <iuriish@yahoo.com>
* - comment some shape stuff
Signed-off-by: Yurii <iuriish@yahoo.com>
* - comment unnecessary prints in RNGtests
Signed-off-by: Yurii <iuriish@yahoo.com>
* - finish to resolve conflicts after master has been merged
Signed-off-by: Yurii <iuriish@yahoo.com>
* - get rid of some compilation mistakes of cuda stuff
Signed-off-by: Yurii <iuriish@yahoo.com>
* - minor changes
Signed-off-by: Yurii <iuriish@yahoo.com>
* - further search for bug causing crash on java test
Signed-off-by: Yurii <iuriish@yahoo.com>
* - add scalar case in reduce_ ... exec stuff
Signed-off-by: Yurii <iuriish@yahoo.com>
* - minor corrections in NAtiveOps.cu
Signed-off-by: Yurii <iuriish@yahoo.com>
* - add switch to scalar case execReduceXD functions
Signed-off-by: Yurii <iuriish@yahoo.com>
* - add support for vectors old shape in ConstantShapeHelper::createShapeInfoWithNoUnitiesForReduce
Signed-off-by: Yurii <iuriish@yahoo.com>
* - correct cuda mirrorPad
Signed-off-by: Yurii <iuriish@yahoo.com>
* - add support for vectors old shape in cuda createShapeInfoWithNoUnitiesForReduce
Signed-off-by: Yurii <iuriish@yahoo.com>
Co-authored-by: raver119 <raver119@gmail.com>
* Add support for CUDA 11.0 (#492)
* Add support for CUDA 11.0
* libnd4j tweaks for CUDA 11
Signed-off-by: raver119@gmail.com <raver119@gmail.com>
* bindings update, again?
Signed-off-by: raver119@gmail.com <raver119@gmail.com>
* * Update versions of JavaCPP Presets for FFmpeg, OpenBLAS, and NumPy
* update API to match CUDA 8
Signed-off-by: raver119@gmail.com <raver119@gmail.com>
* * Update version of JavaCPP Presets for CPython
* C++ updated for cuDNN 8.0
Signed-off-by: raver119@gmail.com <raver119@gmail.com>
* one more test
Signed-off-by: raver119@gmail.com <raver119@gmail.com>
* one more test
Signed-off-by: raver119@gmail.com <raver119@gmail.com>
* one more test
Signed-off-by: raver119@gmail.com <raver119@gmail.com>
* 128-bit alignment for workspaces
Signed-off-by: raver119@gmail.com <raver119@gmail.com>
* change seed in 1 test
Signed-off-by: raver119@gmail.com <raver119@gmail.com>
* Fix dependecy duplication in python4j-parent pom
* Fix group id for in python4j-numpy
* few tests tweaked
Signed-off-by: raver119@gmail.com <raver119@gmail.com>
* Remove macosx-x86_64-gpu from nd4j-tests-tensorflow
* few minor tweaks for IndexReduce
Signed-off-by: raver119@gmail.com <raver119@gmail.com>
* one test removed
Signed-off-by: raver119@gmail.com <raver119@gmail.com>
Co-authored-by: raver119@gmail.com <raver119@gmail.com>
Co-authored-by: Serhii Shepel <9946053+sshepel@users.noreply.github.com>
* RL4J: Add SyncTrainer and AgentLearnerBuilder for a few algorithms (#504)
Signed-off-by: Alexandre Boulanger <aboulang2002@yahoo.com>
Co-authored-by: Alexandre Boulanger <44292157+aboulang2002@users.noreply.github.com>
Co-authored-by: Yurii Shyrma <iuriish@yahoo.com>
Co-authored-by: raver119 <raver119@gmail.com>
Co-authored-by: Serhii Shepel <9946053+sshepel@users.noreply.github.com>
* Removed dead code (#9057)
Signed-off-by: Dariusz Zbyrad <dariusz.zbyrad@gmail.com>
* performance improvement (#9055)
* performance improvement
Signed-off-by: Dariusz Zbyrad <dariusz.zbyrad@gmail.com>
* revert some changes
Signed-off-by: Dariusz Zbyrad <dariusz.zbyrad@gmail.com>
* Development updates (#9064)
* Update versions of JavaCPP Presets for OpenCV, FFmpeg, and MKL
Signed-off-by: Samuel Audet <samuel.audet@gmail.com>
* Added support for the archunit (#9062)
* Added support for the archunit
Signed-off-by: Dariusz Zbyrad <dariusz.zbyrad@gmail.com>
* Updated pom files
Signed-off-by: Dariusz Zbyrad <dariusz.zbyrad@gmail.com>
* Datavec code cleaup (#9071)
* removed unnecessary semicolons
Signed-off-by: Dariusz Zbyrad <dariusz.zbyrad@gmail.com>
* Use standard charset object
Signed-off-by: Dariusz Zbyrad <dariusz.zbyrad@gmail.com>
* Removed unused imports
Signed-off-by: Dariusz Zbyrad <dariusz.zbyrad@gmail.com>
* WIP: Fix Conv1d causal case
* Add inital tests
* Update Conv1d tests to be a bit more robust
* Remove redundant test
* Reset from master
* Remove cuda definition (left over)
* Update rl4j again
* Update pom.xml
Co-authored-by: Samuel Audet <samuel.audet@gmail.com>
Co-authored-by: Alexandre Boulanger <44292157+aboulang2002@users.noreply.github.com>
Co-authored-by: Yurii Shyrma <iuriish@yahoo.com>
Co-authored-by: raver119 <raver119@gmail.com>
Co-authored-by: Serhii Shepel <9946053+sshepel@users.noreply.github.com>
Co-authored-by: dariuszzbyrad <dariusz.zbyrad@gmail.com>
* Fixes 9061 (#521)
* Get rid of edge case in validation
* Added support for the archunit (#9062)
* Added support for the archunit
Signed-off-by: Dariusz Zbyrad <dariusz.zbyrad@gmail.com>
* Updated pom files
Signed-off-by: Dariusz Zbyrad <dariusz.zbyrad@gmail.com>
* Using embedded copying of an array instead of manual (#9073)
Signed-off-by: Dariusz Zbyrad <dariusz.zbyrad@gmail.com>
* Datavec bulk operation (#9075)
* Bulk operation can be used instead of iteration inspection
Signed-off-by: Dariusz Zbyrad <dariusz.zbyrad@gmail.com>
* Redundant 'Collection.addAll()' call inspection
Signed-off-by: Dariusz Zbyrad <dariusz.zbyrad@gmail.com>
* Removed infinitely loop (#9076)
Signed-off-by: Dariusz Zbyrad <dariusz.zbyrad@gmail.com>
Co-authored-by: dariuszzbyrad <dariusz.zbyrad@gmail.com>
* RL4J: Add async training and advantage actor-critic (#507)
* Added async training & Advantage Actor Critic
Signed-off-by: Alexandre Boulanger <aboulang2002@yahoo.com>
* Fix compiler error
Signed-off-by: Samuel Audet <samuel.audet@gmail.com>
* Renamed ActorCriticPolicy back to ACPolicy
Signed-off-by: Alexandre Boulanger <aboulang2002@yahoo.com>
Co-authored-by: Samuel Audet <samuel.audet@gmail.com>
(cherry picked from commit 72f5c18c830f62df2c04fbf8dc7b1353cc2d3182)
* RL4J: Add async training and advantage actor-critic (#507)
* Added async training & Advantage Actor Critic
Signed-off-by: Alexandre Boulanger <aboulang2002@yahoo.com>
* Fix compiler error
Signed-off-by: Samuel Audet <samuel.audet@gmail.com>
* Renamed ActorCriticPolicy back to ACPolicy
Signed-off-by: Alexandre Boulanger <aboulang2002@yahoo.com>
Co-authored-by: Samuel Audet <samuel.audet@gmail.com>
(cherry picked from commit 72f5c18c830f62df2c04fbf8dc7b1353cc2d3182)
* Revert rl4j to 72f5c18c830f62df2c04fbf8dc7b1353cc2d3182
* Delete jnind4jaurora.cpp
Co-authored-by: Alexandre Boulanger <44292157+aboulang2002@users.noreply.github.com>
Co-authored-by: Yurii Shyrma <iuriish@yahoo.com>
Co-authored-by: raver119 <raver119@gmail.com>
Co-authored-by: Samuel Audet <samuel.audet@gmail.com>
Co-authored-by: Serhii Shepel <9946053+sshepel@users.noreply.github.com>
Co-authored-by: dariuszzbyrad <dariusz.zbyrad@gmail.com>
Co-authored-by: dependabot-preview[bot] <27856297+dependabot-preview[bot]@users.noreply.github.com>
* RL4J: Add partial support for RNN (#514)
* Added partial recurrent support
Signed-off-by: Alexandre Boulanger <aboulang2002@yahoo.com>
* Made sure the RNN always see the observation in EpsGreedy
Signed-off-by: Alexandre Boulanger <aboulang2002@yahoo.com>
* Converted all line endings of rl4j-core to LF (#530)
Signed-off-by: Alexandre Boulanger <aboulang2002@yahoo.com>
* NDJ4: Bundle configuration files required by AOT compilation with GraalVM (#529)
* NDJ4: Bundle configuration files required by AOT compilation with GraalVM
* Update dependencies to just released JavaCPP and JavaCV 1.5.4
* Ag fixtests 831 (#523)
* Update UnderSamplingPreProcessorTest.java
* Development updates (#9053)
* RL4J: Add generic update rule (#502)
Signed-off-by: Alexandre Boulanger <aboulang2002@yahoo.com>
* Shyrma reduce (#481)
* - start working on improving of cpu legacy code for reduce ops
Signed-off-by: Yurii <iuriish@yahoo.com>
* - further work on improving legacy loops
Signed-off-by: Yurii <iuriish@yahoo.com>
* - still working on improving reduce ops
Signed-off-by: Yurii <iuriish@yahoo.com>
* - further work on improving reduce ops
Signed-off-by: Yurii <iuriish@yahoo.com>
* - testing speed run of new reduce op
Signed-off-by: Yurii <iuriish@yahoo.com>
* - working on improvement of default loop for reduce op
Signed-off-by: Yurii <iuriish@yahoo.com>
* - update signatures of stuff which calls reduce ops
Signed-off-by: Yurii <iuriish@yahoo.com>
* - make corrections in cuda reduce kernels
Signed-off-by: Yurii <iuriish@yahoo.com>
* - change loop for default case in broadcast legacy ops
Signed-off-by: Yurii <iuriish@yahoo.com>
* - comment some shape stuff
Signed-off-by: Yurii <iuriish@yahoo.com>
* - comment unnecessary prints in RNGtests
Signed-off-by: Yurii <iuriish@yahoo.com>
* - finish to resolve conflicts after master has been merged
Signed-off-by: Yurii <iuriish@yahoo.com>
* - get rid of some compilation mistakes of cuda stuff
Signed-off-by: Yurii <iuriish@yahoo.com>
* - minor changes
Signed-off-by: Yurii <iuriish@yahoo.com>
* - further search for bug causing crash on java test
Signed-off-by: Yurii <iuriish@yahoo.com>
* - add scalar case in reduce_ ... exec stuff
Signed-off-by: Yurii <iuriish@yahoo.com>
* - minor corrections in NAtiveOps.cu
Signed-off-by: Yurii <iuriish@yahoo.com>
* - add switch to scalar case execReduceXD functions
Signed-off-by: Yurii <iuriish@yahoo.com>
* - add support for vectors old shape in ConstantShapeHelper::createShapeInfoWithNoUnitiesForReduce
Signed-off-by: Yurii <iuriish@yahoo.com>
* - correct cuda mirrorPad
Signed-off-by: Yurii <iuriish@yahoo.com>
* - add support for vectors old shape in cuda createShapeInfoWithNoUnitiesForReduce
Signed-off-by: Yurii <iuriish@yahoo.com>
Co-authored-by: raver119 <raver119@gmail.com>
* Add support for CUDA 11.0 (#492)
* Add support for CUDA 11.0
* libnd4j tweaks for CUDA 11
Signed-off-by: raver119@gmail.com <raver119@gmail.com>
* bindings update, again?
Signed-off-by: raver119@gmail.com <raver119@gmail.com>
* * Update versions of JavaCPP Presets for FFmpeg, OpenBLAS, and NumPy
* update API to match CUDA 8
Signed-off-by: raver119@gmail.com <raver119@gmail.com>
* * Update version of JavaCPP Presets for CPython
* C++ updated for cuDNN 8.0
Signed-off-by: raver119@gmail.com <raver119@gmail.com>
* one more test
Signed-off-by: raver119@gmail.com <raver119@gmail.com>
* one more test
Signed-off-by: raver119@gmail.com <raver119@gmail.com>
* one more test
Signed-off-by: raver119@gmail.com <raver119@gmail.com>
* 128-bit alignment for workspaces
Signed-off-by: raver119@gmail.com <raver119@gmail.com>
* change seed in 1 test
Signed-off-by: raver119@gmail.com <raver119@gmail.com>
* Fix dependecy duplication in python4j-parent pom
* Fix group id for in python4j-numpy
* few tests tweaked
Signed-off-by: raver119@gmail.com <raver119@gmail.com>
* Remove macosx-x86_64-gpu from nd4j-tests-tensorflow
* few minor tweaks for IndexReduce
Signed-off-by: raver119@gmail.com <raver119@gmail.com>
* one test removed
Signed-off-by: raver119@gmail.com <raver119@gmail.com>
Co-authored-by: raver119@gmail.com <raver119@gmail.com>
Co-authored-by: Serhii Shepel <9946053+sshepel@users.noreply.github.com>
* RL4J: Add SyncTrainer and AgentLearnerBuilder for a few algorithms (#504)
Signed-off-by: Alexandre Boulanger <aboulang2002@yahoo.com>
Co-authored-by: Alexandre Boulanger <44292157+aboulang2002@users.noreply.github.com>
Co-authored-by: Yurii Shyrma <iuriish@yahoo.com>
Co-authored-by: raver119 <raver119@gmail.com>
Co-authored-by: Serhii Shepel <9946053+sshepel@users.noreply.github.com>
* Development updates (#9064)
* Update versions of JavaCPP Presets for OpenCV, FFmpeg, and MKL
Signed-off-by: Samuel Audet <samuel.audet@gmail.com>
* Add proper annotation
* Fix classcast exception for recurrent model import case
* Update keras import to allow for proper handling of changing NCHW -> NHWC mid later
* Add output to test to ensure proper activation
* Fixes computation graphs to allow dimension ordering to change mid graph
* Add NHWC support for keras import.
* Update tests to pass /ignore out of date ones
* Add multi RNNDataformat support
* Update tests to make more pass.
Updates some tests to be correct, double checked existing models and updated reasons they may or may not fail.
* Add back old default values to ensure legacy serialization works. Replace null value default with sentinel value for default value overridden.
* Update layers to preserve changed values
* Exclude default value over ridden from comparison
* Fix conv1d import (no permute weights anymore)
* Update KerasConvolution1D.java
Co-authored-by: Samuel Audet <samuel.audet@gmail.com>
Co-authored-by: Alexandre Boulanger <44292157+aboulang2002@users.noreply.github.com>
Co-authored-by: Yurii Shyrma <iuriish@yahoo.com>
Co-authored-by: raver119 <raver119@gmail.com>
Co-authored-by: Serhii Shepel <9946053+sshepel@users.noreply.github.com>
* GPU compute capability (#532)
* - GPU cpu capability flags
- CUDA MAJOR VERSION provided by cmake
Signed-off-by: AbdelRauf <rauf@konduit.ai>
* Readme
Signed-off-by: AbdelRauf <rauf@konduit.ai>
* Readme
Signed-off-by: AbdelRauf <rauf@konduit.ai>
* RL4J: Add new network implementation to help support recurrent networks (#531)
Signed-off-by: Alexandre Boulanger <aboulang2002@yahoo.com>
Co-authored-by: Alexandre Boulanger <44292157+aboulang2002@users.noreply.github.com>
Co-authored-by: Yurii Shyrma <iuriish@yahoo.com>
Co-authored-by: raver119 <raver119@gmail.com>
Co-authored-by: Samuel Audet <samuel.audet@gmail.com>
Co-authored-by: Serhii Shepel <9946053+sshepel@users.noreply.github.com>
Co-authored-by: dariuszzbyrad <dariusz.zbyrad@gmail.com>
Co-authored-by: dependabot-preview[bot] <27856297+dependabot-preview[bot]@users.noreply.github.com>
Co-authored-by: Abdelrauf <qwr@live.ru>
2020-09-23 19:11:29 +09:00
|
|
|
import org.nd4j.linalg.api.buffer.DataType;
|
2019-06-06 15:21:15 +03:00
|
|
|
import org.nd4j.linalg.api.iter.NdIndexIterator;
|
|
|
|
import org.nd4j.linalg.api.ndarray.INDArray;
|
|
|
|
import org.nd4j.linalg.api.ops.impl.transforms.strict.SigmoidDerivative;
|
Development updates (#9098)
* RL4J: Add generic update rule (#502)
Signed-off-by: Alexandre Boulanger <aboulang2002@yahoo.com>
* Shyrma reduce (#481)
* - start working on improving of cpu legacy code for reduce ops
Signed-off-by: Yurii <iuriish@yahoo.com>
* - further work on improving legacy loops
Signed-off-by: Yurii <iuriish@yahoo.com>
* - still working on improving reduce ops
Signed-off-by: Yurii <iuriish@yahoo.com>
* - further work on improving reduce ops
Signed-off-by: Yurii <iuriish@yahoo.com>
* - testing speed run of new reduce op
Signed-off-by: Yurii <iuriish@yahoo.com>
* - working on improvement of default loop for reduce op
Signed-off-by: Yurii <iuriish@yahoo.com>
* - update signatures of stuff which calls reduce ops
Signed-off-by: Yurii <iuriish@yahoo.com>
* - make corrections in cuda reduce kernels
Signed-off-by: Yurii <iuriish@yahoo.com>
* - change loop for default case in broadcast legacy ops
Signed-off-by: Yurii <iuriish@yahoo.com>
* - comment some shape stuff
Signed-off-by: Yurii <iuriish@yahoo.com>
* - comment unnecessary prints in RNGtests
Signed-off-by: Yurii <iuriish@yahoo.com>
* - finish to resolve conflicts after master has been merged
Signed-off-by: Yurii <iuriish@yahoo.com>
* - get rid of some compilation mistakes of cuda stuff
Signed-off-by: Yurii <iuriish@yahoo.com>
* - minor changes
Signed-off-by: Yurii <iuriish@yahoo.com>
* - further search for bug causing crash on java test
Signed-off-by: Yurii <iuriish@yahoo.com>
* - add scalar case in reduce_ ... exec stuff
Signed-off-by: Yurii <iuriish@yahoo.com>
* - minor corrections in NAtiveOps.cu
Signed-off-by: Yurii <iuriish@yahoo.com>
* - add switch to scalar case execReduceXD functions
Signed-off-by: Yurii <iuriish@yahoo.com>
* - add support for vectors old shape in ConstantShapeHelper::createShapeInfoWithNoUnitiesForReduce
Signed-off-by: Yurii <iuriish@yahoo.com>
* - correct cuda mirrorPad
Signed-off-by: Yurii <iuriish@yahoo.com>
* - add support for vectors old shape in cuda createShapeInfoWithNoUnitiesForReduce
Signed-off-by: Yurii <iuriish@yahoo.com>
Co-authored-by: raver119 <raver119@gmail.com>
* Add support for CUDA 11.0 (#492)
* Add support for CUDA 11.0
* libnd4j tweaks for CUDA 11
Signed-off-by: raver119@gmail.com <raver119@gmail.com>
* bindings update, again?
Signed-off-by: raver119@gmail.com <raver119@gmail.com>
* * Update versions of JavaCPP Presets for FFmpeg, OpenBLAS, and NumPy
* update API to match CUDA 8
Signed-off-by: raver119@gmail.com <raver119@gmail.com>
* * Update version of JavaCPP Presets for CPython
* C++ updated for cuDNN 8.0
Signed-off-by: raver119@gmail.com <raver119@gmail.com>
* one more test
Signed-off-by: raver119@gmail.com <raver119@gmail.com>
* one more test
Signed-off-by: raver119@gmail.com <raver119@gmail.com>
* one more test
Signed-off-by: raver119@gmail.com <raver119@gmail.com>
* 128-bit alignment for workspaces
Signed-off-by: raver119@gmail.com <raver119@gmail.com>
* change seed in 1 test
Signed-off-by: raver119@gmail.com <raver119@gmail.com>
* Fix dependecy duplication in python4j-parent pom
* Fix group id for in python4j-numpy
* few tests tweaked
Signed-off-by: raver119@gmail.com <raver119@gmail.com>
* Remove macosx-x86_64-gpu from nd4j-tests-tensorflow
* few minor tweaks for IndexReduce
Signed-off-by: raver119@gmail.com <raver119@gmail.com>
* one test removed
Signed-off-by: raver119@gmail.com <raver119@gmail.com>
Co-authored-by: raver119@gmail.com <raver119@gmail.com>
Co-authored-by: Serhii Shepel <9946053+sshepel@users.noreply.github.com>
* RL4J: Add SyncTrainer and AgentLearnerBuilder for a few algorithms (#504)
Signed-off-by: Alexandre Boulanger <aboulang2002@yahoo.com>
* * Update versions of JavaCPP Presets for OpenCV, FFmpeg, and MKL
Signed-off-by: Samuel Audet <samuel.audet@gmail.com>
* Fix L2NormalizeVertex and eclipse#9054 (#513)
* update
* Fix L2NormalizeVertex
Fix eclipse#9054
* RL4J: Add async training and advantage actor-critic (#507)
* Added async training & Advantage Actor Critic
Signed-off-by: Alexandre Boulanger <aboulang2002@yahoo.com>
* Fix compiler error
Signed-off-by: Samuel Audet <samuel.audet@gmail.com>
* Renamed ActorCriticPolicy back to ACPolicy
Signed-off-by: Alexandre Boulanger <aboulang2002@yahoo.com>
Co-authored-by: Samuel Audet <samuel.audet@gmail.com>
* Python GIL overhaul (#517)
* Development updates (#9053)
* RL4J: Add generic update rule (#502)
Signed-off-by: Alexandre Boulanger <aboulang2002@yahoo.com>
* Shyrma reduce (#481)
* - start working on improving of cpu legacy code for reduce ops
Signed-off-by: Yurii <iuriish@yahoo.com>
* - further work on improving legacy loops
Signed-off-by: Yurii <iuriish@yahoo.com>
* - still working on improving reduce ops
Signed-off-by: Yurii <iuriish@yahoo.com>
* - further work on improving reduce ops
Signed-off-by: Yurii <iuriish@yahoo.com>
* - testing speed run of new reduce op
Signed-off-by: Yurii <iuriish@yahoo.com>
* - working on improvement of default loop for reduce op
Signed-off-by: Yurii <iuriish@yahoo.com>
* - update signatures of stuff which calls reduce ops
Signed-off-by: Yurii <iuriish@yahoo.com>
* - make corrections in cuda reduce kernels
Signed-off-by: Yurii <iuriish@yahoo.com>
* - change loop for default case in broadcast legacy ops
Signed-off-by: Yurii <iuriish@yahoo.com>
* - comment some shape stuff
Signed-off-by: Yurii <iuriish@yahoo.com>
* - comment unnecessary prints in RNGtests
Signed-off-by: Yurii <iuriish@yahoo.com>
* - finish to resolve conflicts after master has been merged
Signed-off-by: Yurii <iuriish@yahoo.com>
* - get rid of some compilation mistakes of cuda stuff
Signed-off-by: Yurii <iuriish@yahoo.com>
* - minor changes
Signed-off-by: Yurii <iuriish@yahoo.com>
* - further search for bug causing crash on java test
Signed-off-by: Yurii <iuriish@yahoo.com>
* - add scalar case in reduce_ ... exec stuff
Signed-off-by: Yurii <iuriish@yahoo.com>
* - minor corrections in NAtiveOps.cu
Signed-off-by: Yurii <iuriish@yahoo.com>
* - add switch to scalar case execReduceXD functions
Signed-off-by: Yurii <iuriish@yahoo.com>
* - add support for vectors old shape in ConstantShapeHelper::createShapeInfoWithNoUnitiesForReduce
Signed-off-by: Yurii <iuriish@yahoo.com>
* - correct cuda mirrorPad
Signed-off-by: Yurii <iuriish@yahoo.com>
* - add support for vectors old shape in cuda createShapeInfoWithNoUnitiesForReduce
Signed-off-by: Yurii <iuriish@yahoo.com>
Co-authored-by: raver119 <raver119@gmail.com>
* Add support for CUDA 11.0 (#492)
* Add support for CUDA 11.0
* libnd4j tweaks for CUDA 11
Signed-off-by: raver119@gmail.com <raver119@gmail.com>
* bindings update, again?
Signed-off-by: raver119@gmail.com <raver119@gmail.com>
* * Update versions of JavaCPP Presets for FFmpeg, OpenBLAS, and NumPy
* update API to match CUDA 8
Signed-off-by: raver119@gmail.com <raver119@gmail.com>
* * Update version of JavaCPP Presets for CPython
* C++ updated for cuDNN 8.0
Signed-off-by: raver119@gmail.com <raver119@gmail.com>
* one more test
Signed-off-by: raver119@gmail.com <raver119@gmail.com>
* one more test
Signed-off-by: raver119@gmail.com <raver119@gmail.com>
* one more test
Signed-off-by: raver119@gmail.com <raver119@gmail.com>
* 128-bit alignment for workspaces
Signed-off-by: raver119@gmail.com <raver119@gmail.com>
* change seed in 1 test
Signed-off-by: raver119@gmail.com <raver119@gmail.com>
* Fix dependecy duplication in python4j-parent pom
* Fix group id for in python4j-numpy
* few tests tweaked
Signed-off-by: raver119@gmail.com <raver119@gmail.com>
* Remove macosx-x86_64-gpu from nd4j-tests-tensorflow
* few minor tweaks for IndexReduce
Signed-off-by: raver119@gmail.com <raver119@gmail.com>
* one test removed
Signed-off-by: raver119@gmail.com <raver119@gmail.com>
Co-authored-by: raver119@gmail.com <raver119@gmail.com>
Co-authored-by: Serhii Shepel <9946053+sshepel@users.noreply.github.com>
* RL4J: Add SyncTrainer and AgentLearnerBuilder for a few algorithms (#504)
Signed-off-by: Alexandre Boulanger <aboulang2002@yahoo.com>
Co-authored-by: Alexandre Boulanger <44292157+aboulang2002@users.noreply.github.com>
Co-authored-by: Yurii Shyrma <iuriish@yahoo.com>
Co-authored-by: raver119 <raver119@gmail.com>
Co-authored-by: Serhii Shepel <9946053+sshepel@users.noreply.github.com>
* Removed dead code (#9057)
Signed-off-by: Dariusz Zbyrad <dariusz.zbyrad@gmail.com>
* performance improvement (#9055)
* performance improvement
Signed-off-by: Dariusz Zbyrad <dariusz.zbyrad@gmail.com>
* revert some changes
Signed-off-by: Dariusz Zbyrad <dariusz.zbyrad@gmail.com>
* Development updates (#9064)
* Update versions of JavaCPP Presets for OpenCV, FFmpeg, and MKL
Signed-off-by: Samuel Audet <samuel.audet@gmail.com>
* Cherry pick rl4j changes from most recent KonduitAI/deeplearning4j PR
* Update cherry pick again from last master revision.
Co-authored-by: Samuel Audet <samuel.audet@gmail.com>
Co-authored-by: Alexandre Boulanger <44292157+aboulang2002@users.noreply.github.com>
Co-authored-by: Yurii Shyrma <iuriish@yahoo.com>
Co-authored-by: raver119 <raver119@gmail.com>
Co-authored-by: Serhii Shepel <9946053+sshepel@users.noreply.github.com>
Co-authored-by: dariuszzbyrad <dariusz.zbyrad@gmail.com>
* Ag pythongiloverhaul (#518)
* Development updates (#9053)
* RL4J: Add generic update rule (#502)
Signed-off-by: Alexandre Boulanger <aboulang2002@yahoo.com>
* Shyrma reduce (#481)
* - start working on improving of cpu legacy code for reduce ops
Signed-off-by: Yurii <iuriish@yahoo.com>
* - further work on improving legacy loops
Signed-off-by: Yurii <iuriish@yahoo.com>
* - still working on improving reduce ops
Signed-off-by: Yurii <iuriish@yahoo.com>
* - further work on improving reduce ops
Signed-off-by: Yurii <iuriish@yahoo.com>
* - testing speed run of new reduce op
Signed-off-by: Yurii <iuriish@yahoo.com>
* - working on improvement of default loop for reduce op
Signed-off-by: Yurii <iuriish@yahoo.com>
* - update signatures of stuff which calls reduce ops
Signed-off-by: Yurii <iuriish@yahoo.com>
* - make corrections in cuda reduce kernels
Signed-off-by: Yurii <iuriish@yahoo.com>
* - change loop for default case in broadcast legacy ops
Signed-off-by: Yurii <iuriish@yahoo.com>
* - comment some shape stuff
Signed-off-by: Yurii <iuriish@yahoo.com>
* - comment unnecessary prints in RNGtests
Signed-off-by: Yurii <iuriish@yahoo.com>
* - finish to resolve conflicts after master has been merged
Signed-off-by: Yurii <iuriish@yahoo.com>
* - get rid of some compilation mistakes of cuda stuff
Signed-off-by: Yurii <iuriish@yahoo.com>
* - minor changes
Signed-off-by: Yurii <iuriish@yahoo.com>
* - further search for bug causing crash on java test
Signed-off-by: Yurii <iuriish@yahoo.com>
* - add scalar case in reduce_ ... exec stuff
Signed-off-by: Yurii <iuriish@yahoo.com>
* - minor corrections in NAtiveOps.cu
Signed-off-by: Yurii <iuriish@yahoo.com>
* - add switch to scalar case execReduceXD functions
Signed-off-by: Yurii <iuriish@yahoo.com>
* - add support for vectors old shape in ConstantShapeHelper::createShapeInfoWithNoUnitiesForReduce
Signed-off-by: Yurii <iuriish@yahoo.com>
* - correct cuda mirrorPad
Signed-off-by: Yurii <iuriish@yahoo.com>
* - add support for vectors old shape in cuda createShapeInfoWithNoUnitiesForReduce
Signed-off-by: Yurii <iuriish@yahoo.com>
Co-authored-by: raver119 <raver119@gmail.com>
* Add support for CUDA 11.0 (#492)
* Add support for CUDA 11.0
* libnd4j tweaks for CUDA 11
Signed-off-by: raver119@gmail.com <raver119@gmail.com>
* bindings update, again?
Signed-off-by: raver119@gmail.com <raver119@gmail.com>
* * Update versions of JavaCPP Presets for FFmpeg, OpenBLAS, and NumPy
* update API to match CUDA 8
Signed-off-by: raver119@gmail.com <raver119@gmail.com>
* * Update version of JavaCPP Presets for CPython
* C++ updated for cuDNN 8.0
Signed-off-by: raver119@gmail.com <raver119@gmail.com>
* one more test
Signed-off-by: raver119@gmail.com <raver119@gmail.com>
* one more test
Signed-off-by: raver119@gmail.com <raver119@gmail.com>
* one more test
Signed-off-by: raver119@gmail.com <raver119@gmail.com>
* 128-bit alignment for workspaces
Signed-off-by: raver119@gmail.com <raver119@gmail.com>
* change seed in 1 test
Signed-off-by: raver119@gmail.com <raver119@gmail.com>
* Fix dependecy duplication in python4j-parent pom
* Fix group id for in python4j-numpy
* few tests tweaked
Signed-off-by: raver119@gmail.com <raver119@gmail.com>
* Remove macosx-x86_64-gpu from nd4j-tests-tensorflow
* few minor tweaks for IndexReduce
Signed-off-by: raver119@gmail.com <raver119@gmail.com>
* one test removed
Signed-off-by: raver119@gmail.com <raver119@gmail.com>
Co-authored-by: raver119@gmail.com <raver119@gmail.com>
Co-authored-by: Serhii Shepel <9946053+sshepel@users.noreply.github.com>
* RL4J: Add SyncTrainer and AgentLearnerBuilder for a few algorithms (#504)
Signed-off-by: Alexandre Boulanger <aboulang2002@yahoo.com>
Co-authored-by: Alexandre Boulanger <44292157+aboulang2002@users.noreply.github.com>
Co-authored-by: Yurii Shyrma <iuriish@yahoo.com>
Co-authored-by: raver119 <raver119@gmail.com>
Co-authored-by: Serhii Shepel <9946053+sshepel@users.noreply.github.com>
* Removed dead code (#9057)
Signed-off-by: Dariusz Zbyrad <dariusz.zbyrad@gmail.com>
* performance improvement (#9055)
* performance improvement
Signed-off-by: Dariusz Zbyrad <dariusz.zbyrad@gmail.com>
* revert some changes
Signed-off-by: Dariusz Zbyrad <dariusz.zbyrad@gmail.com>
* Development updates (#9064)
* Update versions of JavaCPP Presets for OpenCV, FFmpeg, and MKL
Signed-off-by: Samuel Audet <samuel.audet@gmail.com>
* Cherry pick rl4j changes from most recent KonduitAI/deeplearning4j PR
* Update cherry pick again from last master revision.
* Re update python4j
Co-authored-by: Samuel Audet <samuel.audet@gmail.com>
Co-authored-by: Alexandre Boulanger <44292157+aboulang2002@users.noreply.github.com>
Co-authored-by: Yurii Shyrma <iuriish@yahoo.com>
Co-authored-by: raver119 <raver119@gmail.com>
Co-authored-by: Serhii Shepel <9946053+sshepel@users.noreply.github.com>
Co-authored-by: dariuszzbyrad <dariusz.zbyrad@gmail.com>
* Bump formatter-maven-plugin from 2.0.0 to 2.12.1 (#505)
Bumps [formatter-maven-plugin](https://github.com/revelc/formatter-maven-plugin) from 2.0.0 to 2.12.1.
- [Release notes](https://github.com/revelc/formatter-maven-plugin/releases)
- [Changelog](https://github.com/revelc/formatter-maven-plugin/blob/formatter-maven-plugin-2.12.1/CHANGELOG.md)
- [Commits](https://github.com/revelc/formatter-maven-plugin/compare/formatter-maven-plugin-2.0.0...formatter-maven-plugin-2.12.1)
Signed-off-by: dependabot-preview[bot] <support@dependabot.com>
Co-authored-by: dependabot-preview[bot] <27856297+dependabot-preview[bot]@users.noreply.github.com>
* Ag fix9060 (#519)
* Development updates (#9053)
* RL4J: Add generic update rule (#502)
Signed-off-by: Alexandre Boulanger <aboulang2002@yahoo.com>
* Shyrma reduce (#481)
* - start working on improving of cpu legacy code for reduce ops
Signed-off-by: Yurii <iuriish@yahoo.com>
* - further work on improving legacy loops
Signed-off-by: Yurii <iuriish@yahoo.com>
* - still working on improving reduce ops
Signed-off-by: Yurii <iuriish@yahoo.com>
* - further work on improving reduce ops
Signed-off-by: Yurii <iuriish@yahoo.com>
* - testing speed run of new reduce op
Signed-off-by: Yurii <iuriish@yahoo.com>
* - working on improvement of default loop for reduce op
Signed-off-by: Yurii <iuriish@yahoo.com>
* - update signatures of stuff which calls reduce ops
Signed-off-by: Yurii <iuriish@yahoo.com>
* - make corrections in cuda reduce kernels
Signed-off-by: Yurii <iuriish@yahoo.com>
* - change loop for default case in broadcast legacy ops
Signed-off-by: Yurii <iuriish@yahoo.com>
* - comment some shape stuff
Signed-off-by: Yurii <iuriish@yahoo.com>
* - comment unnecessary prints in RNGtests
Signed-off-by: Yurii <iuriish@yahoo.com>
* - finish to resolve conflicts after master has been merged
Signed-off-by: Yurii <iuriish@yahoo.com>
* - get rid of some compilation mistakes of cuda stuff
Signed-off-by: Yurii <iuriish@yahoo.com>
* - minor changes
Signed-off-by: Yurii <iuriish@yahoo.com>
* - further search for bug causing crash on java test
Signed-off-by: Yurii <iuriish@yahoo.com>
* - add scalar case in reduce_ ... exec stuff
Signed-off-by: Yurii <iuriish@yahoo.com>
* - minor corrections in NAtiveOps.cu
Signed-off-by: Yurii <iuriish@yahoo.com>
* - add switch to scalar case execReduceXD functions
Signed-off-by: Yurii <iuriish@yahoo.com>
* - add support for vectors old shape in ConstantShapeHelper::createShapeInfoWithNoUnitiesForReduce
Signed-off-by: Yurii <iuriish@yahoo.com>
* - correct cuda mirrorPad
Signed-off-by: Yurii <iuriish@yahoo.com>
* - add support for vectors old shape in cuda createShapeInfoWithNoUnitiesForReduce
Signed-off-by: Yurii <iuriish@yahoo.com>
Co-authored-by: raver119 <raver119@gmail.com>
* Add support for CUDA 11.0 (#492)
* Add support for CUDA 11.0
* libnd4j tweaks for CUDA 11
Signed-off-by: raver119@gmail.com <raver119@gmail.com>
* bindings update, again?
Signed-off-by: raver119@gmail.com <raver119@gmail.com>
* * Update versions of JavaCPP Presets for FFmpeg, OpenBLAS, and NumPy
* update API to match CUDA 8
Signed-off-by: raver119@gmail.com <raver119@gmail.com>
* * Update version of JavaCPP Presets for CPython
* C++ updated for cuDNN 8.0
Signed-off-by: raver119@gmail.com <raver119@gmail.com>
* one more test
Signed-off-by: raver119@gmail.com <raver119@gmail.com>
* one more test
Signed-off-by: raver119@gmail.com <raver119@gmail.com>
* one more test
Signed-off-by: raver119@gmail.com <raver119@gmail.com>
* 128-bit alignment for workspaces
Signed-off-by: raver119@gmail.com <raver119@gmail.com>
* change seed in 1 test
Signed-off-by: raver119@gmail.com <raver119@gmail.com>
* Fix dependecy duplication in python4j-parent pom
* Fix group id for in python4j-numpy
* few tests tweaked
Signed-off-by: raver119@gmail.com <raver119@gmail.com>
* Remove macosx-x86_64-gpu from nd4j-tests-tensorflow
* few minor tweaks for IndexReduce
Signed-off-by: raver119@gmail.com <raver119@gmail.com>
* one test removed
Signed-off-by: raver119@gmail.com <raver119@gmail.com>
Co-authored-by: raver119@gmail.com <raver119@gmail.com>
Co-authored-by: Serhii Shepel <9946053+sshepel@users.noreply.github.com>
* RL4J: Add SyncTrainer and AgentLearnerBuilder for a few algorithms (#504)
Signed-off-by: Alexandre Boulanger <aboulang2002@yahoo.com>
Co-authored-by: Alexandre Boulanger <44292157+aboulang2002@users.noreply.github.com>
Co-authored-by: Yurii Shyrma <iuriish@yahoo.com>
Co-authored-by: raver119 <raver119@gmail.com>
Co-authored-by: Serhii Shepel <9946053+sshepel@users.noreply.github.com>
* Removed dead code (#9057)
Signed-off-by: Dariusz Zbyrad <dariusz.zbyrad@gmail.com>
* performance improvement (#9055)
* performance improvement
Signed-off-by: Dariusz Zbyrad <dariusz.zbyrad@gmail.com>
* revert some changes
Signed-off-by: Dariusz Zbyrad <dariusz.zbyrad@gmail.com>
* Development updates (#9064)
* Update versions of JavaCPP Presets for OpenCV, FFmpeg, and MKL
Signed-off-by: Samuel Audet <samuel.audet@gmail.com>
* Added support for the archunit (#9062)
* Added support for the archunit
Signed-off-by: Dariusz Zbyrad <dariusz.zbyrad@gmail.com>
* Updated pom files
Signed-off-by: Dariusz Zbyrad <dariusz.zbyrad@gmail.com>
* Datavec code cleaup (#9071)
* removed unnecessary semicolons
Signed-off-by: Dariusz Zbyrad <dariusz.zbyrad@gmail.com>
* Use standard charset object
Signed-off-by: Dariusz Zbyrad <dariusz.zbyrad@gmail.com>
* Removed unused imports
Signed-off-by: Dariusz Zbyrad <dariusz.zbyrad@gmail.com>
* WIP: Fix Conv1d causal case
* Add inital tests
* Update Conv1d tests to be a bit more robust
* Remove redundant test
* Reset from master
* Remove cuda definition (left over)
* Update rl4j again
* Update pom.xml
Co-authored-by: Samuel Audet <samuel.audet@gmail.com>
Co-authored-by: Alexandre Boulanger <44292157+aboulang2002@users.noreply.github.com>
Co-authored-by: Yurii Shyrma <iuriish@yahoo.com>
Co-authored-by: raver119 <raver119@gmail.com>
Co-authored-by: Serhii Shepel <9946053+sshepel@users.noreply.github.com>
Co-authored-by: dariuszzbyrad <dariusz.zbyrad@gmail.com>
* Fixes 9061 (#521)
* Get rid of edge case in validation
* Added support for the archunit (#9062)
* Added support for the archunit
Signed-off-by: Dariusz Zbyrad <dariusz.zbyrad@gmail.com>
* Updated pom files
Signed-off-by: Dariusz Zbyrad <dariusz.zbyrad@gmail.com>
* Using embedded copying of an array instead of manual (#9073)
Signed-off-by: Dariusz Zbyrad <dariusz.zbyrad@gmail.com>
* Datavec bulk operation (#9075)
* Bulk operation can be used instead of iteration inspection
Signed-off-by: Dariusz Zbyrad <dariusz.zbyrad@gmail.com>
* Redundant 'Collection.addAll()' call inspection
Signed-off-by: Dariusz Zbyrad <dariusz.zbyrad@gmail.com>
* Removed infinitely loop (#9076)
Signed-off-by: Dariusz Zbyrad <dariusz.zbyrad@gmail.com>
Co-authored-by: dariuszzbyrad <dariusz.zbyrad@gmail.com>
* Revert "Merge eclipse changes" (#526)
* Revert rl4j to 72f5c18c830f62df2c04fbf8dc7b1353cc2d3182 (#527)
* RL4J: Add generic update rule (#502)
Signed-off-by: Alexandre Boulanger <aboulang2002@yahoo.com>
* Shyrma reduce (#481)
* - start working on improving of cpu legacy code for reduce ops
Signed-off-by: Yurii <iuriish@yahoo.com>
* - further work on improving legacy loops
Signed-off-by: Yurii <iuriish@yahoo.com>
* - still working on improving reduce ops
Signed-off-by: Yurii <iuriish@yahoo.com>
* - further work on improving reduce ops
Signed-off-by: Yurii <iuriish@yahoo.com>
* - testing speed run of new reduce op
Signed-off-by: Yurii <iuriish@yahoo.com>
* - working on improvement of default loop for reduce op
Signed-off-by: Yurii <iuriish@yahoo.com>
* - update signatures of stuff which calls reduce ops
Signed-off-by: Yurii <iuriish@yahoo.com>
* - make corrections in cuda reduce kernels
Signed-off-by: Yurii <iuriish@yahoo.com>
* - change loop for default case in broadcast legacy ops
Signed-off-by: Yurii <iuriish@yahoo.com>
* - comment some shape stuff
Signed-off-by: Yurii <iuriish@yahoo.com>
* - comment unnecessary prints in RNGtests
Signed-off-by: Yurii <iuriish@yahoo.com>
* - finish to resolve conflicts after master has been merged
Signed-off-by: Yurii <iuriish@yahoo.com>
* - get rid of some compilation mistakes of cuda stuff
Signed-off-by: Yurii <iuriish@yahoo.com>
* - minor changes
Signed-off-by: Yurii <iuriish@yahoo.com>
* - further search for bug causing crash on java test
Signed-off-by: Yurii <iuriish@yahoo.com>
* - add scalar case in reduce_ ... exec stuff
Signed-off-by: Yurii <iuriish@yahoo.com>
* - minor corrections in NAtiveOps.cu
Signed-off-by: Yurii <iuriish@yahoo.com>
* - add switch to scalar case execReduceXD functions
Signed-off-by: Yurii <iuriish@yahoo.com>
* - add support for vectors old shape in ConstantShapeHelper::createShapeInfoWithNoUnitiesForReduce
Signed-off-by: Yurii <iuriish@yahoo.com>
* - correct cuda mirrorPad
Signed-off-by: Yurii <iuriish@yahoo.com>
* - add support for vectors old shape in cuda createShapeInfoWithNoUnitiesForReduce
Signed-off-by: Yurii <iuriish@yahoo.com>
Co-authored-by: raver119 <raver119@gmail.com>
* Add support for CUDA 11.0 (#492)
* Add support for CUDA 11.0
* libnd4j tweaks for CUDA 11
Signed-off-by: raver119@gmail.com <raver119@gmail.com>
* bindings update, again?
Signed-off-by: raver119@gmail.com <raver119@gmail.com>
* * Update versions of JavaCPP Presets for FFmpeg, OpenBLAS, and NumPy
* update API to match CUDA 8
Signed-off-by: raver119@gmail.com <raver119@gmail.com>
* * Update version of JavaCPP Presets for CPython
* C++ updated for cuDNN 8.0
Signed-off-by: raver119@gmail.com <raver119@gmail.com>
* one more test
Signed-off-by: raver119@gmail.com <raver119@gmail.com>
* one more test
Signed-off-by: raver119@gmail.com <raver119@gmail.com>
* one more test
Signed-off-by: raver119@gmail.com <raver119@gmail.com>
* 128-bit alignment for workspaces
Signed-off-by: raver119@gmail.com <raver119@gmail.com>
* change seed in 1 test
Signed-off-by: raver119@gmail.com <raver119@gmail.com>
* Fix dependecy duplication in python4j-parent pom
* Fix group id for in python4j-numpy
* few tests tweaked
Signed-off-by: raver119@gmail.com <raver119@gmail.com>
* Remove macosx-x86_64-gpu from nd4j-tests-tensorflow
* few minor tweaks for IndexReduce
Signed-off-by: raver119@gmail.com <raver119@gmail.com>
* one test removed
Signed-off-by: raver119@gmail.com <raver119@gmail.com>
Co-authored-by: raver119@gmail.com <raver119@gmail.com>
Co-authored-by: Serhii Shepel <9946053+sshepel@users.noreply.github.com>
* RL4J: Add SyncTrainer and AgentLearnerBuilder for a few algorithms (#504)
Signed-off-by: Alexandre Boulanger <aboulang2002@yahoo.com>
* * Update versions of JavaCPP Presets for OpenCV, FFmpeg, and MKL
Signed-off-by: Samuel Audet <samuel.audet@gmail.com>
* Fix L2NormalizeVertex and eclipse#9054 (#513)
* update
* Fix L2NormalizeVertex
Fix eclipse#9054
* RL4J: Add async training and advantage actor-critic (#507)
* Added async training & Advantage Actor Critic
Signed-off-by: Alexandre Boulanger <aboulang2002@yahoo.com>
* Fix compiler error
Signed-off-by: Samuel Audet <samuel.audet@gmail.com>
* Renamed ActorCriticPolicy back to ACPolicy
Signed-off-by: Alexandre Boulanger <aboulang2002@yahoo.com>
Co-authored-by: Samuel Audet <samuel.audet@gmail.com>
* Python GIL overhaul (#517)
* Development updates (#9053)
* RL4J: Add generic update rule (#502)
Signed-off-by: Alexandre Boulanger <aboulang2002@yahoo.com>
* Shyrma reduce (#481)
* - start working on improving of cpu legacy code for reduce ops
Signed-off-by: Yurii <iuriish@yahoo.com>
* - further work on improving legacy loops
Signed-off-by: Yurii <iuriish@yahoo.com>
* - still working on improving reduce ops
Signed-off-by: Yurii <iuriish@yahoo.com>
* - further work on improving reduce ops
Signed-off-by: Yurii <iuriish@yahoo.com>
* - testing speed run of new reduce op
Signed-off-by: Yurii <iuriish@yahoo.com>
* - working on improvement of default loop for reduce op
Signed-off-by: Yurii <iuriish@yahoo.com>
* - update signatures of stuff which calls reduce ops
Signed-off-by: Yurii <iuriish@yahoo.com>
* - make corrections in cuda reduce kernels
Signed-off-by: Yurii <iuriish@yahoo.com>
* - change loop for default case in broadcast legacy ops
Signed-off-by: Yurii <iuriish@yahoo.com>
* - comment some shape stuff
Signed-off-by: Yurii <iuriish@yahoo.com>
* - comment unnecessary prints in RNGtests
Signed-off-by: Yurii <iuriish@yahoo.com>
* - finish to resolve conflicts after master has been merged
Signed-off-by: Yurii <iuriish@yahoo.com>
* - get rid of some compilation mistakes of cuda stuff
Signed-off-by: Yurii <iuriish@yahoo.com>
* - minor changes
Signed-off-by: Yurii <iuriish@yahoo.com>
* - further search for bug causing crash on java test
Signed-off-by: Yurii <iuriish@yahoo.com>
* - add scalar case in reduce_ ... exec stuff
Signed-off-by: Yurii <iuriish@yahoo.com>
* - minor corrections in NAtiveOps.cu
Signed-off-by: Yurii <iuriish@yahoo.com>
* - add switch to scalar case execReduceXD functions
Signed-off-by: Yurii <iuriish@yahoo.com>
* - add support for vectors old shape in ConstantShapeHelper::createShapeInfoWithNoUnitiesForReduce
Signed-off-by: Yurii <iuriish@yahoo.com>
* - correct cuda mirrorPad
Signed-off-by: Yurii <iuriish@yahoo.com>
* - add support for vectors old shape in cuda createShapeInfoWithNoUnitiesForReduce
Signed-off-by: Yurii <iuriish@yahoo.com>
Co-authored-by: raver119 <raver119@gmail.com>
* Add support for CUDA 11.0 (#492)
* Add support for CUDA 11.0
* libnd4j tweaks for CUDA 11
Signed-off-by: raver119@gmail.com <raver119@gmail.com>
* bindings update, again?
Signed-off-by: raver119@gmail.com <raver119@gmail.com>
* * Update versions of JavaCPP Presets for FFmpeg, OpenBLAS, and NumPy
* update API to match CUDA 8
Signed-off-by: raver119@gmail.com <raver119@gmail.com>
* * Update version of JavaCPP Presets for CPython
* C++ updated for cuDNN 8.0
Signed-off-by: raver119@gmail.com <raver119@gmail.com>
* one more test
Signed-off-by: raver119@gmail.com <raver119@gmail.com>
* one more test
Signed-off-by: raver119@gmail.com <raver119@gmail.com>
* one more test
Signed-off-by: raver119@gmail.com <raver119@gmail.com>
* 128-bit alignment for workspaces
Signed-off-by: raver119@gmail.com <raver119@gmail.com>
* change seed in 1 test
Signed-off-by: raver119@gmail.com <raver119@gmail.com>
* Fix dependecy duplication in python4j-parent pom
* Fix group id for in python4j-numpy
* few tests tweaked
Signed-off-by: raver119@gmail.com <raver119@gmail.com>
* Remove macosx-x86_64-gpu from nd4j-tests-tensorflow
* few minor tweaks for IndexReduce
Signed-off-by: raver119@gmail.com <raver119@gmail.com>
* one test removed
Signed-off-by: raver119@gmail.com <raver119@gmail.com>
Co-authored-by: raver119@gmail.com <raver119@gmail.com>
Co-authored-by: Serhii Shepel <9946053+sshepel@users.noreply.github.com>
* RL4J: Add SyncTrainer and AgentLearnerBuilder for a few algorithms (#504)
Signed-off-by: Alexandre Boulanger <aboulang2002@yahoo.com>
Co-authored-by: Alexandre Boulanger <44292157+aboulang2002@users.noreply.github.com>
Co-authored-by: Yurii Shyrma <iuriish@yahoo.com>
Co-authored-by: raver119 <raver119@gmail.com>
Co-authored-by: Serhii Shepel <9946053+sshepel@users.noreply.github.com>
* Removed dead code (#9057)
Signed-off-by: Dariusz Zbyrad <dariusz.zbyrad@gmail.com>
* performance improvement (#9055)
* performance improvement
Signed-off-by: Dariusz Zbyrad <dariusz.zbyrad@gmail.com>
* revert some changes
Signed-off-by: Dariusz Zbyrad <dariusz.zbyrad@gmail.com>
* Development updates (#9064)
* Update versions of JavaCPP Presets for OpenCV, FFmpeg, and MKL
Signed-off-by: Samuel Audet <samuel.audet@gmail.com>
* Cherry pick rl4j changes from most recent KonduitAI/deeplearning4j PR
* Update cherry pick again from last master revision.
Co-authored-by: Samuel Audet <samuel.audet@gmail.com>
Co-authored-by: Alexandre Boulanger <44292157+aboulang2002@users.noreply.github.com>
Co-authored-by: Yurii Shyrma <iuriish@yahoo.com>
Co-authored-by: raver119 <raver119@gmail.com>
Co-authored-by: Serhii Shepel <9946053+sshepel@users.noreply.github.com>
Co-authored-by: dariuszzbyrad <dariusz.zbyrad@gmail.com>
* Ag pythongiloverhaul (#518)
* Development updates (#9053)
* RL4J: Add generic update rule (#502)
Signed-off-by: Alexandre Boulanger <aboulang2002@yahoo.com>
* Shyrma reduce (#481)
* - start working on improving of cpu legacy code for reduce ops
Signed-off-by: Yurii <iuriish@yahoo.com>
* - further work on improving legacy loops
Signed-off-by: Yurii <iuriish@yahoo.com>
* - still working on improving reduce ops
Signed-off-by: Yurii <iuriish@yahoo.com>
* - further work on improving reduce ops
Signed-off-by: Yurii <iuriish@yahoo.com>
* - testing speed run of new reduce op
Signed-off-by: Yurii <iuriish@yahoo.com>
* - working on improvement of default loop for reduce op
Signed-off-by: Yurii <iuriish@yahoo.com>
* - update signatures of stuff which calls reduce ops
Signed-off-by: Yurii <iuriish@yahoo.com>
* - make corrections in cuda reduce kernels
Signed-off-by: Yurii <iuriish@yahoo.com>
* - change loop for default case in broadcast legacy ops
Signed-off-by: Yurii <iuriish@yahoo.com>
* - comment some shape stuff
Signed-off-by: Yurii <iuriish@yahoo.com>
* - comment unnecessary prints in RNGtests
Signed-off-by: Yurii <iuriish@yahoo.com>
* - finish to resolve conflicts after master has been merged
Signed-off-by: Yurii <iuriish@yahoo.com>
* - get rid of some compilation mistakes of cuda stuff
Signed-off-by: Yurii <iuriish@yahoo.com>
* - minor changes
Signed-off-by: Yurii <iuriish@yahoo.com>
* - further search for bug causing crash on java test
Signed-off-by: Yurii <iuriish@yahoo.com>
* - add scalar case in reduce_ ... exec stuff
Signed-off-by: Yurii <iuriish@yahoo.com>
* - minor corrections in NAtiveOps.cu
Signed-off-by: Yurii <iuriish@yahoo.com>
* - add switch to scalar case execReduceXD functions
Signed-off-by: Yurii <iuriish@yahoo.com>
* - add support for vectors old shape in ConstantShapeHelper::createShapeInfoWithNoUnitiesForReduce
Signed-off-by: Yurii <iuriish@yahoo.com>
* - correct cuda mirrorPad
Signed-off-by: Yurii <iuriish@yahoo.com>
* - add support for vectors old shape in cuda createShapeInfoWithNoUnitiesForReduce
Signed-off-by: Yurii <iuriish@yahoo.com>
Co-authored-by: raver119 <raver119@gmail.com>
* Add support for CUDA 11.0 (#492)
* Add support for CUDA 11.0
* libnd4j tweaks for CUDA 11
Signed-off-by: raver119@gmail.com <raver119@gmail.com>
* bindings update, again?
Signed-off-by: raver119@gmail.com <raver119@gmail.com>
* * Update versions of JavaCPP Presets for FFmpeg, OpenBLAS, and NumPy
* update API to match CUDA 8
Signed-off-by: raver119@gmail.com <raver119@gmail.com>
* * Update version of JavaCPP Presets for CPython
* C++ updated for cuDNN 8.0
Signed-off-by: raver119@gmail.com <raver119@gmail.com>
* one more test
Signed-off-by: raver119@gmail.com <raver119@gmail.com>
* one more test
Signed-off-by: raver119@gmail.com <raver119@gmail.com>
* one more test
Signed-off-by: raver119@gmail.com <raver119@gmail.com>
* 128-bit alignment for workspaces
Signed-off-by: raver119@gmail.com <raver119@gmail.com>
* change seed in 1 test
Signed-off-by: raver119@gmail.com <raver119@gmail.com>
* Fix dependecy duplication in python4j-parent pom
* Fix group id for in python4j-numpy
* few tests tweaked
Signed-off-by: raver119@gmail.com <raver119@gmail.com>
* Remove macosx-x86_64-gpu from nd4j-tests-tensorflow
* few minor tweaks for IndexReduce
Signed-off-by: raver119@gmail.com <raver119@gmail.com>
* one test removed
Signed-off-by: raver119@gmail.com <raver119@gmail.com>
Co-authored-by: raver119@gmail.com <raver119@gmail.com>
Co-authored-by: Serhii Shepel <9946053+sshepel@users.noreply.github.com>
* RL4J: Add SyncTrainer and AgentLearnerBuilder for a few algorithms (#504)
Signed-off-by: Alexandre Boulanger <aboulang2002@yahoo.com>
Co-authored-by: Alexandre Boulanger <44292157+aboulang2002@users.noreply.github.com>
Co-authored-by: Yurii Shyrma <iuriish@yahoo.com>
Co-authored-by: raver119 <raver119@gmail.com>
Co-authored-by: Serhii Shepel <9946053+sshepel@users.noreply.github.com>
* Removed dead code (#9057)
Signed-off-by: Dariusz Zbyrad <dariusz.zbyrad@gmail.com>
* performance improvement (#9055)
* performance improvement
Signed-off-by: Dariusz Zbyrad <dariusz.zbyrad@gmail.com>
* revert some changes
Signed-off-by: Dariusz Zbyrad <dariusz.zbyrad@gmail.com>
* Development updates (#9064)
* Update versions of JavaCPP Presets for OpenCV, FFmpeg, and MKL
Signed-off-by: Samuel Audet <samuel.audet@gmail.com>
* Cherry pick rl4j changes from most recent KonduitAI/deeplearning4j PR
* Update cherry pick again from last master revision.
* Re update python4j
Co-authored-by: Samuel Audet <samuel.audet@gmail.com>
Co-authored-by: Alexandre Boulanger <44292157+aboulang2002@users.noreply.github.com>
Co-authored-by: Yurii Shyrma <iuriish@yahoo.com>
Co-authored-by: raver119 <raver119@gmail.com>
Co-authored-by: Serhii Shepel <9946053+sshepel@users.noreply.github.com>
Co-authored-by: dariuszzbyrad <dariusz.zbyrad@gmail.com>
* Bump formatter-maven-plugin from 2.0.0 to 2.12.1 (#505)
Bumps [formatter-maven-plugin](https://github.com/revelc/formatter-maven-plugin) from 2.0.0 to 2.12.1.
- [Release notes](https://github.com/revelc/formatter-maven-plugin/releases)
- [Changelog](https://github.com/revelc/formatter-maven-plugin/blob/formatter-maven-plugin-2.12.1/CHANGELOG.md)
- [Commits](https://github.com/revelc/formatter-maven-plugin/compare/formatter-maven-plugin-2.0.0...formatter-maven-plugin-2.12.1)
Signed-off-by: dependabot-preview[bot] <support@dependabot.com>
Co-authored-by: dependabot-preview[bot] <27856297+dependabot-preview[bot]@users.noreply.github.com>
* Ag fix9060 (#519)
* Development updates (#9053)
* RL4J: Add generic update rule (#502)
Signed-off-by: Alexandre Boulanger <aboulang2002@yahoo.com>
* Shyrma reduce (#481)
* - start working on improving of cpu legacy code for reduce ops
Signed-off-by: Yurii <iuriish@yahoo.com>
* - further work on improving legacy loops
Signed-off-by: Yurii <iuriish@yahoo.com>
* - still working on improving reduce ops
Signed-off-by: Yurii <iuriish@yahoo.com>
* - further work on improving reduce ops
Signed-off-by: Yurii <iuriish@yahoo.com>
* - testing speed run of new reduce op
Signed-off-by: Yurii <iuriish@yahoo.com>
* - working on improvement of default loop for reduce op
Signed-off-by: Yurii <iuriish@yahoo.com>
* - update signatures of stuff which calls reduce ops
Signed-off-by: Yurii <iuriish@yahoo.com>
* - make corrections in cuda reduce kernels
Signed-off-by: Yurii <iuriish@yahoo.com>
* - change loop for default case in broadcast legacy ops
Signed-off-by: Yurii <iuriish@yahoo.com>
* - comment some shape stuff
Signed-off-by: Yurii <iuriish@yahoo.com>
* - comment unnecessary prints in RNGtests
Signed-off-by: Yurii <iuriish@yahoo.com>
* - finish to resolve conflicts after master has been merged
Signed-off-by: Yurii <iuriish@yahoo.com>
* - get rid of some compilation mistakes of cuda stuff
Signed-off-by: Yurii <iuriish@yahoo.com>
* - minor changes
Signed-off-by: Yurii <iuriish@yahoo.com>
* - further search for bug causing crash on java test
Signed-off-by: Yurii <iuriish@yahoo.com>
* - add scalar case in reduce_ ... exec stuff
Signed-off-by: Yurii <iuriish@yahoo.com>
* - minor corrections in NAtiveOps.cu
Signed-off-by: Yurii <iuriish@yahoo.com>
* - add switch to scalar case execReduceXD functions
Signed-off-by: Yurii <iuriish@yahoo.com>
* - add support for vectors old shape in ConstantShapeHelper::createShapeInfoWithNoUnitiesForReduce
Signed-off-by: Yurii <iuriish@yahoo.com>
* - correct cuda mirrorPad
Signed-off-by: Yurii <iuriish@yahoo.com>
* - add support for vectors old shape in cuda createShapeInfoWithNoUnitiesForReduce
Signed-off-by: Yurii <iuriish@yahoo.com>
Co-authored-by: raver119 <raver119@gmail.com>
* Add support for CUDA 11.0 (#492)
* Add support for CUDA 11.0
* libnd4j tweaks for CUDA 11
Signed-off-by: raver119@gmail.com <raver119@gmail.com>
* bindings update, again?
Signed-off-by: raver119@gmail.com <raver119@gmail.com>
* * Update versions of JavaCPP Presets for FFmpeg, OpenBLAS, and NumPy
* update API to match CUDA 8
Signed-off-by: raver119@gmail.com <raver119@gmail.com>
* * Update version of JavaCPP Presets for CPython
* C++ updated for cuDNN 8.0
Signed-off-by: raver119@gmail.com <raver119@gmail.com>
* one more test
Signed-off-by: raver119@gmail.com <raver119@gmail.com>
* one more test
Signed-off-by: raver119@gmail.com <raver119@gmail.com>
* one more test
Signed-off-by: raver119@gmail.com <raver119@gmail.com>
* 128-bit alignment for workspaces
Signed-off-by: raver119@gmail.com <raver119@gmail.com>
* change seed in 1 test
Signed-off-by: raver119@gmail.com <raver119@gmail.com>
* Fix dependecy duplication in python4j-parent pom
* Fix group id for in python4j-numpy
* few tests tweaked
Signed-off-by: raver119@gmail.com <raver119@gmail.com>
* Remove macosx-x86_64-gpu from nd4j-tests-tensorflow
* few minor tweaks for IndexReduce
Signed-off-by: raver119@gmail.com <raver119@gmail.com>
* one test removed
Signed-off-by: raver119@gmail.com <raver119@gmail.com>
Co-authored-by: raver119@gmail.com <raver119@gmail.com>
Co-authored-by: Serhii Shepel <9946053+sshepel@users.noreply.github.com>
* RL4J: Add SyncTrainer and AgentLearnerBuilder for a few algorithms (#504)
Signed-off-by: Alexandre Boulanger <aboulang2002@yahoo.com>
Co-authored-by: Alexandre Boulanger <44292157+aboulang2002@users.noreply.github.com>
Co-authored-by: Yurii Shyrma <iuriish@yahoo.com>
Co-authored-by: raver119 <raver119@gmail.com>
Co-authored-by: Serhii Shepel <9946053+sshepel@users.noreply.github.com>
* Removed dead code (#9057)
Signed-off-by: Dariusz Zbyrad <dariusz.zbyrad@gmail.com>
* performance improvement (#9055)
* performance improvement
Signed-off-by: Dariusz Zbyrad <dariusz.zbyrad@gmail.com>
* revert some changes
Signed-off-by: Dariusz Zbyrad <dariusz.zbyrad@gmail.com>
* Development updates (#9064)
* Update versions of JavaCPP Presets for OpenCV, FFmpeg, and MKL
Signed-off-by: Samuel Audet <samuel.audet@gmail.com>
* Added support for the archunit (#9062)
* Added support for the archunit
Signed-off-by: Dariusz Zbyrad <dariusz.zbyrad@gmail.com>
* Updated pom files
Signed-off-by: Dariusz Zbyrad <dariusz.zbyrad@gmail.com>
* Datavec code cleaup (#9071)
* removed unnecessary semicolons
Signed-off-by: Dariusz Zbyrad <dariusz.zbyrad@gmail.com>
* Use standard charset object
Signed-off-by: Dariusz Zbyrad <dariusz.zbyrad@gmail.com>
* Removed unused imports
Signed-off-by: Dariusz Zbyrad <dariusz.zbyrad@gmail.com>
* WIP: Fix Conv1d causal case
* Add inital tests
* Update Conv1d tests to be a bit more robust
* Remove redundant test
* Reset from master
* Remove cuda definition (left over)
* Update rl4j again
* Update pom.xml
Co-authored-by: Samuel Audet <samuel.audet@gmail.com>
Co-authored-by: Alexandre Boulanger <44292157+aboulang2002@users.noreply.github.com>
Co-authored-by: Yurii Shyrma <iuriish@yahoo.com>
Co-authored-by: raver119 <raver119@gmail.com>
Co-authored-by: Serhii Shepel <9946053+sshepel@users.noreply.github.com>
Co-authored-by: dariuszzbyrad <dariusz.zbyrad@gmail.com>
* Fixes 9061 (#521)
* Get rid of edge case in validation
* Added support for the archunit (#9062)
* Added support for the archunit
Signed-off-by: Dariusz Zbyrad <dariusz.zbyrad@gmail.com>
* Updated pom files
Signed-off-by: Dariusz Zbyrad <dariusz.zbyrad@gmail.com>
* Using embedded copying of an array instead of manual (#9073)
Signed-off-by: Dariusz Zbyrad <dariusz.zbyrad@gmail.com>
* Datavec bulk operation (#9075)
* Bulk operation can be used instead of iteration inspection
Signed-off-by: Dariusz Zbyrad <dariusz.zbyrad@gmail.com>
* Redundant 'Collection.addAll()' call inspection
Signed-off-by: Dariusz Zbyrad <dariusz.zbyrad@gmail.com>
* Removed infinitely loop (#9076)
Signed-off-by: Dariusz Zbyrad <dariusz.zbyrad@gmail.com>
Co-authored-by: dariuszzbyrad <dariusz.zbyrad@gmail.com>
* RL4J: Add async training and advantage actor-critic (#507)
* Added async training & Advantage Actor Critic
Signed-off-by: Alexandre Boulanger <aboulang2002@yahoo.com>
* Fix compiler error
Signed-off-by: Samuel Audet <samuel.audet@gmail.com>
* Renamed ActorCriticPolicy back to ACPolicy
Signed-off-by: Alexandre Boulanger <aboulang2002@yahoo.com>
Co-authored-by: Samuel Audet <samuel.audet@gmail.com>
(cherry picked from commit 72f5c18c830f62df2c04fbf8dc7b1353cc2d3182)
* RL4J: Add async training and advantage actor-critic (#507)
* Added async training & Advantage Actor Critic
Signed-off-by: Alexandre Boulanger <aboulang2002@yahoo.com>
* Fix compiler error
Signed-off-by: Samuel Audet <samuel.audet@gmail.com>
* Renamed ActorCriticPolicy back to ACPolicy
Signed-off-by: Alexandre Boulanger <aboulang2002@yahoo.com>
Co-authored-by: Samuel Audet <samuel.audet@gmail.com>
(cherry picked from commit 72f5c18c830f62df2c04fbf8dc7b1353cc2d3182)
* Revert rl4j to 72f5c18c830f62df2c04fbf8dc7b1353cc2d3182
* Delete jnind4jaurora.cpp
Co-authored-by: Alexandre Boulanger <44292157+aboulang2002@users.noreply.github.com>
Co-authored-by: Yurii Shyrma <iuriish@yahoo.com>
Co-authored-by: raver119 <raver119@gmail.com>
Co-authored-by: Samuel Audet <samuel.audet@gmail.com>
Co-authored-by: Serhii Shepel <9946053+sshepel@users.noreply.github.com>
Co-authored-by: dariuszzbyrad <dariusz.zbyrad@gmail.com>
Co-authored-by: dependabot-preview[bot] <27856297+dependabot-preview[bot]@users.noreply.github.com>
* RL4J: Add partial support for RNN (#514)
* Added partial recurrent support
Signed-off-by: Alexandre Boulanger <aboulang2002@yahoo.com>
* Made sure the RNN always see the observation in EpsGreedy
Signed-off-by: Alexandre Boulanger <aboulang2002@yahoo.com>
* Converted all line endings of rl4j-core to LF (#530)
Signed-off-by: Alexandre Boulanger <aboulang2002@yahoo.com>
* NDJ4: Bundle configuration files required by AOT compilation with GraalVM (#529)
* NDJ4: Bundle configuration files required by AOT compilation with GraalVM
* Update dependencies to just released JavaCPP and JavaCV 1.5.4
* Ag fixtests 831 (#523)
* Update UnderSamplingPreProcessorTest.java
* Development updates (#9053)
* RL4J: Add generic update rule (#502)
Signed-off-by: Alexandre Boulanger <aboulang2002@yahoo.com>
* Shyrma reduce (#481)
* - start working on improving of cpu legacy code for reduce ops
Signed-off-by: Yurii <iuriish@yahoo.com>
* - further work on improving legacy loops
Signed-off-by: Yurii <iuriish@yahoo.com>
* - still working on improving reduce ops
Signed-off-by: Yurii <iuriish@yahoo.com>
* - further work on improving reduce ops
Signed-off-by: Yurii <iuriish@yahoo.com>
* - testing speed run of new reduce op
Signed-off-by: Yurii <iuriish@yahoo.com>
* - working on improvement of default loop for reduce op
Signed-off-by: Yurii <iuriish@yahoo.com>
* - update signatures of stuff which calls reduce ops
Signed-off-by: Yurii <iuriish@yahoo.com>
* - make corrections in cuda reduce kernels
Signed-off-by: Yurii <iuriish@yahoo.com>
* - change loop for default case in broadcast legacy ops
Signed-off-by: Yurii <iuriish@yahoo.com>
* - comment some shape stuff
Signed-off-by: Yurii <iuriish@yahoo.com>
* - comment unnecessary prints in RNGtests
Signed-off-by: Yurii <iuriish@yahoo.com>
* - finish to resolve conflicts after master has been merged
Signed-off-by: Yurii <iuriish@yahoo.com>
* - get rid of some compilation mistakes of cuda stuff
Signed-off-by: Yurii <iuriish@yahoo.com>
* - minor changes
Signed-off-by: Yurii <iuriish@yahoo.com>
* - further search for bug causing crash on java test
Signed-off-by: Yurii <iuriish@yahoo.com>
* - add scalar case in reduce_ ... exec stuff
Signed-off-by: Yurii <iuriish@yahoo.com>
* - minor corrections in NAtiveOps.cu
Signed-off-by: Yurii <iuriish@yahoo.com>
* - add switch to scalar case execReduceXD functions
Signed-off-by: Yurii <iuriish@yahoo.com>
* - add support for vectors old shape in ConstantShapeHelper::createShapeInfoWithNoUnitiesForReduce
Signed-off-by: Yurii <iuriish@yahoo.com>
* - correct cuda mirrorPad
Signed-off-by: Yurii <iuriish@yahoo.com>
* - add support for vectors old shape in cuda createShapeInfoWithNoUnitiesForReduce
Signed-off-by: Yurii <iuriish@yahoo.com>
Co-authored-by: raver119 <raver119@gmail.com>
* Add support for CUDA 11.0 (#492)
* Add support for CUDA 11.0
* libnd4j tweaks for CUDA 11
Signed-off-by: raver119@gmail.com <raver119@gmail.com>
* bindings update, again?
Signed-off-by: raver119@gmail.com <raver119@gmail.com>
* * Update versions of JavaCPP Presets for FFmpeg, OpenBLAS, and NumPy
* update API to match CUDA 8
Signed-off-by: raver119@gmail.com <raver119@gmail.com>
* * Update version of JavaCPP Presets for CPython
* C++ updated for cuDNN 8.0
Signed-off-by: raver119@gmail.com <raver119@gmail.com>
* one more test
Signed-off-by: raver119@gmail.com <raver119@gmail.com>
* one more test
Signed-off-by: raver119@gmail.com <raver119@gmail.com>
* one more test
Signed-off-by: raver119@gmail.com <raver119@gmail.com>
* 128-bit alignment for workspaces
Signed-off-by: raver119@gmail.com <raver119@gmail.com>
* change seed in 1 test
Signed-off-by: raver119@gmail.com <raver119@gmail.com>
* Fix dependecy duplication in python4j-parent pom
* Fix group id for in python4j-numpy
* few tests tweaked
Signed-off-by: raver119@gmail.com <raver119@gmail.com>
* Remove macosx-x86_64-gpu from nd4j-tests-tensorflow
* few minor tweaks for IndexReduce
Signed-off-by: raver119@gmail.com <raver119@gmail.com>
* one test removed
Signed-off-by: raver119@gmail.com <raver119@gmail.com>
Co-authored-by: raver119@gmail.com <raver119@gmail.com>
Co-authored-by: Serhii Shepel <9946053+sshepel@users.noreply.github.com>
* RL4J: Add SyncTrainer and AgentLearnerBuilder for a few algorithms (#504)
Signed-off-by: Alexandre Boulanger <aboulang2002@yahoo.com>
Co-authored-by: Alexandre Boulanger <44292157+aboulang2002@users.noreply.github.com>
Co-authored-by: Yurii Shyrma <iuriish@yahoo.com>
Co-authored-by: raver119 <raver119@gmail.com>
Co-authored-by: Serhii Shepel <9946053+sshepel@users.noreply.github.com>
* Development updates (#9064)
* Update versions of JavaCPP Presets for OpenCV, FFmpeg, and MKL
Signed-off-by: Samuel Audet <samuel.audet@gmail.com>
* Add proper annotation
* Fix classcast exception for recurrent model import case
* Update keras import to allow for proper handling of changing NCHW -> NHWC mid later
* Add output to test to ensure proper activation
* Fixes computation graphs to allow dimension ordering to change mid graph
* Add NHWC support for keras import.
* Update tests to pass /ignore out of date ones
* Add multi RNNDataformat support
* Update tests to make more pass.
Updates some tests to be correct, double checked existing models and updated reasons they may or may not fail.
* Add back old default values to ensure legacy serialization works. Replace null value default with sentinel value for default value overridden.
* Update layers to preserve changed values
* Exclude default value over ridden from comparison
* Fix conv1d import (no permute weights anymore)
* Update KerasConvolution1D.java
Co-authored-by: Samuel Audet <samuel.audet@gmail.com>
Co-authored-by: Alexandre Boulanger <44292157+aboulang2002@users.noreply.github.com>
Co-authored-by: Yurii Shyrma <iuriish@yahoo.com>
Co-authored-by: raver119 <raver119@gmail.com>
Co-authored-by: Serhii Shepel <9946053+sshepel@users.noreply.github.com>
* GPU compute capability (#532)
* - GPU cpu capability flags
- CUDA MAJOR VERSION provided by cmake
Signed-off-by: AbdelRauf <rauf@konduit.ai>
* Readme
Signed-off-by: AbdelRauf <rauf@konduit.ai>
* Readme
Signed-off-by: AbdelRauf <rauf@konduit.ai>
* RL4J: Add new network implementation to help support recurrent networks (#531)
Signed-off-by: Alexandre Boulanger <aboulang2002@yahoo.com>
Co-authored-by: Alexandre Boulanger <44292157+aboulang2002@users.noreply.github.com>
Co-authored-by: Yurii Shyrma <iuriish@yahoo.com>
Co-authored-by: raver119 <raver119@gmail.com>
Co-authored-by: Samuel Audet <samuel.audet@gmail.com>
Co-authored-by: Serhii Shepel <9946053+sshepel@users.noreply.github.com>
Co-authored-by: dariuszzbyrad <dariusz.zbyrad@gmail.com>
Co-authored-by: dependabot-preview[bot] <27856297+dependabot-preview[bot]@users.noreply.github.com>
Co-authored-by: Abdelrauf <qwr@live.ru>
2020-09-23 19:11:29 +09:00
|
|
|
import org.nd4j.linalg.api.ops.impl.transforms.strict.TanhDerivative;
|
2019-06-06 15:21:15 +03:00
|
|
|
import org.nd4j.linalg.dataset.DataSet;
|
|
|
|
import org.nd4j.linalg.dataset.api.iterator.DataSetIterator;
|
2019-10-31 11:23:09 +02:00
|
|
|
import org.nd4j.linalg.exception.ND4JArraySizeException;
|
2019-06-06 15:21:15 +03:00
|
|
|
import org.nd4j.linalg.factory.Nd4j;
|
|
|
|
import org.nd4j.linalg.learning.config.Sgd;
|
|
|
|
import org.nd4j.linalg.lossfunctions.LossFunctions.LossFunction;
|
|
|
|
import org.nd4j.linalg.ops.transforms.Transforms;
|
|
|
|
import java.util.Arrays;
|
2021-03-15 13:02:01 +09:00
|
|
|
import static org.junit.jupiter.api.Assertions.assertArrayEquals;
|
2021-03-16 11:57:24 +09:00
|
|
|
import static org.junit.jupiter.api.Assertions.fail;
|
2021-03-15 13:02:01 +09:00
|
|
|
import org.junit.jupiter.api.DisplayName;
|
|
|
|
import org.junit.jupiter.api.extension.ExtendWith;
|
2019-06-06 15:21:15 +03:00
|
|
|
|
2021-03-15 13:02:01 +09:00
|
|
|
@DisplayName("Back Prop MLP Test")
|
|
|
|
class BackPropMLPTest extends BaseDL4JTest {
|
2019-06-06 15:21:15 +03:00
|
|
|
|
|
|
|
@Test
|
2021-03-15 13:02:01 +09:00
|
|
|
@DisplayName("Test MLP Trivial")
|
|
|
|
void testMLPTrivial() {
|
|
|
|
// Simplest possible case: 1 hidden layer, 1 hidden neuron, batch size of 1.
|
|
|
|
MultiLayerNetwork network = new MultiLayerNetwork(getIrisMLPSimpleConfig(new int[] { 1 }, Activation.SIGMOID));
|
2019-06-06 15:21:15 +03:00
|
|
|
network.setListeners(new ScoreIterationListener(1));
|
|
|
|
network.init();
|
|
|
|
DataSetIterator iter = new IrisDataSetIterator(1, 10);
|
2021-03-15 13:02:01 +09:00
|
|
|
while (iter.hasNext()) network.fit(iter.next());
|
2019-06-06 15:21:15 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
@Test
|
2021-03-15 13:02:01 +09:00
|
|
|
@DisplayName("Test MLP")
|
|
|
|
void testMLP() {
|
|
|
|
// Simple mini-batch test with multiple hidden layers
|
|
|
|
MultiLayerConfiguration conf = getIrisMLPSimpleConfig(new int[] { 5, 4, 3 }, Activation.SIGMOID);
|
|
|
|
// System.out.println(conf);
|
2019-06-06 15:21:15 +03:00
|
|
|
MultiLayerNetwork network = new MultiLayerNetwork(conf);
|
|
|
|
network.init();
|
|
|
|
DataSetIterator iter = new IrisDataSetIterator(10, 100);
|
|
|
|
while (iter.hasNext()) {
|
|
|
|
network.fit(iter.next());
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
@Test
|
2021-03-15 13:02:01 +09:00
|
|
|
@DisplayName("Test MLP 2")
|
|
|
|
void testMLP2() {
|
|
|
|
// Simple mini-batch test with multiple hidden layers
|
|
|
|
MultiLayerConfiguration conf = getIrisMLPSimpleConfig(new int[] { 5, 15, 3 }, Activation.TANH);
|
|
|
|
// System.out.println(conf);
|
2019-06-06 15:21:15 +03:00
|
|
|
MultiLayerNetwork network = new MultiLayerNetwork(conf);
|
|
|
|
network.init();
|
|
|
|
DataSetIterator iter = new IrisDataSetIterator(12, 120);
|
|
|
|
while (iter.hasNext()) {
|
|
|
|
network.fit(iter.next());
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
@Test
|
2021-03-15 13:02:01 +09:00
|
|
|
@DisplayName("Test Single Example Weight Updates")
|
|
|
|
void testSingleExampleWeightUpdates() {
|
|
|
|
// Simplest possible case: 1 hidden layer, 1 hidden neuron, batch size of 1.
|
|
|
|
// Manually calculate weight updates (entirely outside of DL4J and ND4J)
|
2019-06-06 15:21:15 +03:00
|
|
|
// and compare expected and actual weights after backprop
|
|
|
|
DataSetIterator iris = new IrisDataSetIterator(1, 10);
|
2021-03-15 13:02:01 +09:00
|
|
|
MultiLayerNetwork network = new MultiLayerNetwork(getIrisMLPSimpleConfig(new int[] { 1 }, Activation.SIGMOID));
|
2019-06-06 15:21:15 +03:00
|
|
|
network.init();
|
|
|
|
Layer[] layers = network.getLayers();
|
2020-01-04 13:45:07 +11:00
|
|
|
final boolean printCalculations = false;
|
2019-06-06 15:21:15 +03:00
|
|
|
while (iris.hasNext()) {
|
|
|
|
DataSet data = iris.next();
|
|
|
|
INDArray x = data.getFeatures();
|
|
|
|
INDArray y = data.getLabels();
|
|
|
|
float[] xFloat = asFloat(x);
|
|
|
|
float[] yFloat = asFloat(y);
|
2021-03-15 13:02:01 +09:00
|
|
|
// Do forward pass:
|
|
|
|
// Hidden layer
|
|
|
|
INDArray l1Weights = layers[0].getParam(DefaultParamInitializer.WEIGHT_KEY).dup();
|
|
|
|
// Output layer
|
|
|
|
INDArray l2Weights = layers[1].getParam(DefaultParamInitializer.WEIGHT_KEY).dup();
|
2019-06-06 15:21:15 +03:00
|
|
|
INDArray l1Bias = layers[0].getParam(DefaultParamInitializer.BIAS_KEY).dup();
|
|
|
|
INDArray l2Bias = layers[1].getParam(DefaultParamInitializer.BIAS_KEY).dup();
|
|
|
|
float[] l1WeightsFloat = asFloat(l1Weights);
|
|
|
|
float[] l2WeightsFloat = asFloat(l2Weights);
|
|
|
|
float l1BiasFloat = l1Bias.getFloat(0);
|
|
|
|
float[] l2BiasFloatArray = asFloat(l2Bias);
|
2021-03-15 13:02:01 +09:00
|
|
|
// z=w*x+b
|
|
|
|
float hiddenUnitPreSigmoid = dotProduct(l1WeightsFloat, xFloat) + l1BiasFloat;
|
|
|
|
// a=sigma(z)
|
|
|
|
float hiddenUnitPostSigmoid = sigmoid(hiddenUnitPreSigmoid);
|
2019-06-06 15:21:15 +03:00
|
|
|
float[] outputPreSoftmax = new float[3];
|
2021-03-15 13:02:01 +09:00
|
|
|
// Normally a matrix multiplication here, but only one hidden unit in this trivial example
|
2019-06-06 15:21:15 +03:00
|
|
|
for (int i = 0; i < 3; i++) {
|
|
|
|
outputPreSoftmax[i] = hiddenUnitPostSigmoid * l2WeightsFloat[i] + l2BiasFloatArray[i];
|
|
|
|
}
|
|
|
|
float[] outputPostSoftmax = softmax(outputPreSoftmax);
|
2021-03-15 13:02:01 +09:00
|
|
|
// Do backward pass:
|
|
|
|
// out-labels
|
|
|
|
float[] deltaOut = vectorDifference(outputPostSoftmax, yFloat);
|
|
|
|
// deltaHidden = sigmaPrime(hiddenUnitZ) * sum_k (w_jk * \delta_k); here, only one j
|
2019-06-06 15:21:15 +03:00
|
|
|
float deltaHidden = 0.0f;
|
2021-03-15 13:02:01 +09:00
|
|
|
for (int i = 0; i < 3; i++) deltaHidden += l2WeightsFloat[i] * deltaOut[i];
|
2019-06-06 15:21:15 +03:00
|
|
|
deltaHidden *= derivOfSigmoid(hiddenUnitPreSigmoid);
|
2021-03-15 13:02:01 +09:00
|
|
|
// Calculate weight/bias updates:
|
|
|
|
// dL/dW = delta * (activation of prev. layer)
|
|
|
|
// dL/db = delta
|
2019-06-06 15:21:15 +03:00
|
|
|
float[] dLdwOut = new float[3];
|
2021-03-15 13:02:01 +09:00
|
|
|
for (int i = 0; i < dLdwOut.length; i++) dLdwOut[i] = deltaOut[i] * hiddenUnitPostSigmoid;
|
2019-06-06 15:21:15 +03:00
|
|
|
float[] dLdwHidden = new float[4];
|
2021-03-15 13:02:01 +09:00
|
|
|
for (int i = 0; i < dLdwHidden.length; i++) dLdwHidden[i] = deltaHidden * xFloat[i];
|
2019-06-06 15:21:15 +03:00
|
|
|
float[] dLdbOut = deltaOut;
|
|
|
|
float dLdbHidden = deltaHidden;
|
|
|
|
if (printCalculations) {
|
|
|
|
System.out.println("deltaOut = " + Arrays.toString(deltaOut));
|
|
|
|
System.out.println("deltaHidden = " + deltaHidden);
|
|
|
|
System.out.println("dLdwOut = " + Arrays.toString(dLdwOut));
|
|
|
|
System.out.println("dLdbOut = " + Arrays.toString(dLdbOut));
|
|
|
|
System.out.println("dLdwHidden = " + Arrays.toString(dLdwHidden));
|
|
|
|
System.out.println("dLdbHidden = " + dLdbHidden);
|
|
|
|
}
|
2021-03-15 13:02:01 +09:00
|
|
|
// Calculate new parameters:
|
|
|
|
// w_i = w_i - (learningRate)/(batchSize) * sum_j (dL_j/dw_i)
|
|
|
|
// b_i = b_i - (learningRate)/(batchSize) * sum_j (dL_j/db_i)
|
|
|
|
// Which for batch size of one (here) is simply:
|
|
|
|
// w_i = w_i - learningRate * dL/dW
|
|
|
|
// b_i = b_i - learningRate * dL/db
|
2019-06-06 15:21:15 +03:00
|
|
|
float[] expectedL1WeightsAfter = new float[4];
|
|
|
|
float[] expectedL2WeightsAfter = new float[3];
|
|
|
|
float expectedL1BiasAfter = l1BiasFloat - 0.1f * dLdbHidden;
|
|
|
|
float[] expectedL2BiasAfter = new float[3];
|
2021-03-15 13:02:01 +09:00
|
|
|
for (int i = 0; i < 4; i++) expectedL1WeightsAfter[i] = l1WeightsFloat[i] - 0.1f * dLdwHidden[i];
|
|
|
|
for (int i = 0; i < 3; i++) expectedL2WeightsAfter[i] = l2WeightsFloat[i] - 0.1f * dLdwOut[i];
|
|
|
|
for (int i = 0; i < 3; i++) expectedL2BiasAfter[i] = l2BiasFloatArray[i] - 0.1f * dLdbOut[i];
|
|
|
|
// Finally, do back-prop on network, and compare parameters vs. expected parameters
|
2019-06-06 15:21:15 +03:00
|
|
|
network.fit(data);
|
|
|
|
/* INDArray l1WeightsAfter = layers[0].getParam(DefaultParamInitializer.WEIGHT_KEY).dup(); //Hidden layer
|
|
|
|
INDArray l2WeightsAfter = layers[1].getParam(DefaultParamInitializer.WEIGHT_KEY).dup(); //Output layer
|
|
|
|
INDArray l1BiasAfter = layers[0].getParam(DefaultParamInitializer.BIAS_KEY).dup();
|
|
|
|
INDArray l2BiasAfter = layers[1].getParam(DefaultParamInitializer.BIAS_KEY).dup();
|
|
|
|
float[] l1WeightsFloatAfter = asFloat(l1WeightsAfter);
|
|
|
|
float[] l2WeightsFloatAfter = asFloat(l2WeightsAfter);
|
|
|
|
float l1BiasFloatAfter = l1BiasAfter.getFloat(0);
|
|
|
|
float[] l2BiasFloatAfter = asFloat(l2BiasAfter);
|
|
|
|
|
|
|
|
if( printCalculations) {
|
|
|
|
System.out.println("Expected L1 weights = " + Arrays.toString(expectedL1WeightsAfter));
|
|
|
|
System.out.println("Actual L1 weights = " + Arrays.toString(asFloat(l1WeightsAfter)));
|
|
|
|
System.out.println("Expected L2 weights = " + Arrays.toString(expectedL2WeightsAfter));
|
|
|
|
System.out.println("Actual L2 weights = " + Arrays.toString(asFloat(l2WeightsAfter)));
|
|
|
|
System.out.println("Expected L1 bias = " + expectedL1BiasAfter);
|
|
|
|
System.out.println("Actual L1 bias = " + Arrays.toString(asFloat(l1BiasAfter)));
|
|
|
|
System.out.println("Expected L2 bias = " + Arrays.toString(expectedL2BiasAfter));
|
|
|
|
System.out.println("Actual L2 bias = " + Arrays.toString(asFloat(l2BiasAfter)));
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
float eps = 1e-4f;
|
|
|
|
assertArrayEquals(l1WeightsFloatAfter,expectedL1WeightsAfter,eps);
|
|
|
|
assertArrayEquals(l2WeightsFloatAfter,expectedL2WeightsAfter,eps);
|
|
|
|
assertEquals(l1BiasFloatAfter,expectedL1BiasAfter,eps);
|
|
|
|
assertArrayEquals(l2BiasFloatAfter,expectedL2BiasAfter,eps);
|
|
|
|
*/
|
2021-03-15 13:02:01 +09:00
|
|
|
// System.out.println("\n\n--------------");
|
2019-06-06 15:21:15 +03:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
@Test
|
2021-03-15 13:02:01 +09:00
|
|
|
@DisplayName("Test MLP Gradient Calculation")
|
|
|
|
void testMLPGradientCalculation() {
|
|
|
|
testIrisMiniBatchGradients(1, new int[] { 1 }, Activation.SIGMOID);
|
|
|
|
testIrisMiniBatchGradients(1, new int[] { 5 }, Activation.SIGMOID);
|
|
|
|
testIrisMiniBatchGradients(12, new int[] { 15, 25, 10 }, Activation.SIGMOID);
|
|
|
|
testIrisMiniBatchGradients(50, new int[] { 10, 50, 200, 50, 10 }, Activation.TANH);
|
|
|
|
testIrisMiniBatchGradients(150, new int[] { 30, 50, 20 }, Activation.TANH);
|
2019-06-06 15:21:15 +03:00
|
|
|
}
|
|
|
|
|
2021-03-15 13:02:01 +09:00
|
|
|
private static void testIrisMiniBatchGradients(int miniBatchSize, int[] hiddenLayerSizes, Activation activationFunction) {
|
2019-06-06 15:21:15 +03:00
|
|
|
int totalExamples = 10 * miniBatchSize;
|
|
|
|
if (totalExamples > 150) {
|
|
|
|
totalExamples = miniBatchSize * (150 / miniBatchSize);
|
|
|
|
}
|
|
|
|
if (miniBatchSize > 150) {
|
|
|
|
fail();
|
|
|
|
}
|
|
|
|
DataSetIterator iris = new IrisDataSetIterator(miniBatchSize, totalExamples);
|
|
|
|
MultiLayerNetwork network = new MultiLayerNetwork(getIrisMLPSimpleConfig(hiddenLayerSizes, Activation.SIGMOID));
|
|
|
|
network.init();
|
|
|
|
Layer[] layers = network.getLayers();
|
|
|
|
int nLayers = layers.length;
|
|
|
|
while (iris.hasNext()) {
|
|
|
|
DataSet data = iris.next();
|
|
|
|
INDArray x = data.getFeatures();
|
|
|
|
INDArray y = data.getLabels();
|
2021-03-15 13:02:01 +09:00
|
|
|
// Do forward pass:
|
2019-06-06 15:21:15 +03:00
|
|
|
INDArray[] layerWeights = new INDArray[nLayers];
|
|
|
|
INDArray[] layerBiases = new INDArray[nLayers];
|
|
|
|
for (int i = 0; i < nLayers; i++) {
|
|
|
|
layerWeights[i] = layers[i].getParam(DefaultParamInitializer.WEIGHT_KEY).dup();
|
|
|
|
layerBiases[i] = layers[i].getParam(DefaultParamInitializer.BIAS_KEY).dup();
|
|
|
|
}
|
|
|
|
INDArray[] layerZs = new INDArray[nLayers];
|
|
|
|
INDArray[] layerActivations = new INDArray[nLayers];
|
|
|
|
for (int i = 0; i < nLayers; i++) {
|
|
|
|
INDArray layerInput = (i == 0 ? x : layerActivations[i - 1]);
|
|
|
|
layerZs[i] = layerInput.castTo(layerWeights[i].dataType()).mmul(layerWeights[i]).addiRowVector(layerBiases[i]);
|
|
|
|
layerActivations[i] = (i == nLayers - 1 ? doSoftmax(layerZs[i].dup()) : doSigmoid(layerZs[i].dup()));
|
|
|
|
}
|
2021-03-15 13:02:01 +09:00
|
|
|
// Do backward pass:
|
2019-06-06 15:21:15 +03:00
|
|
|
INDArray[] deltas = new INDArray[nLayers];
|
2021-03-15 13:02:01 +09:00
|
|
|
// Out - labels; shape=[miniBatchSize,nOut];
|
|
|
|
deltas[nLayers - 1] = layerActivations[nLayers - 1].sub(y.castTo(layerActivations[nLayers - 1].dataType()));
|
|
|
|
assertArrayEquals(deltas[nLayers - 1].shape(), new long[] { miniBatchSize, 3 });
|
2019-06-06 15:21:15 +03:00
|
|
|
for (int i = nLayers - 2; i >= 0; i--) {
|
|
|
|
INDArray sigmaPrimeOfZ;
|
|
|
|
sigmaPrimeOfZ = doSigmoidDerivative(layerZs[i]);
|
|
|
|
INDArray epsilon = layerWeights[i + 1].mmul(deltas[i + 1].transpose()).transpose();
|
|
|
|
deltas[i] = epsilon.mul(sigmaPrimeOfZ);
|
2021-03-15 13:02:01 +09:00
|
|
|
assertArrayEquals(deltas[i].shape(), new long[] { miniBatchSize, hiddenLayerSizes[i] });
|
2019-06-06 15:21:15 +03:00
|
|
|
}
|
|
|
|
INDArray[] dLdw = new INDArray[nLayers];
|
|
|
|
INDArray[] dLdb = new INDArray[nLayers];
|
|
|
|
for (int i = 0; i < nLayers; i++) {
|
|
|
|
INDArray prevActivations = (i == 0 ? x : layerActivations[i - 1]);
|
2021-03-15 13:02:01 +09:00
|
|
|
// Raw gradients, so not yet divided by mini-batch size (division is done in BaseUpdater)
|
|
|
|
// Shape: [nIn, nOut]
|
|
|
|
dLdw[i] = deltas[i].transpose().castTo(prevActivations.dataType()).mmul(prevActivations).transpose();
|
|
|
|
// Shape: [1,nOut]
|
|
|
|
dLdb[i] = deltas[i].sum(true, 0);
|
2019-06-06 15:21:15 +03:00
|
|
|
int nIn = (i == 0 ? 4 : hiddenLayerSizes[i - 1]);
|
|
|
|
int nOut = (i < nLayers - 1 ? hiddenLayerSizes[i] : 3);
|
2021-03-15 13:02:01 +09:00
|
|
|
assertArrayEquals(dLdw[i].shape(), new long[] { nIn, nOut });
|
|
|
|
assertArrayEquals(dLdb[i].shape(), new long[] { 1, nOut });
|
2019-06-06 15:21:15 +03:00
|
|
|
}
|
2021-03-15 13:02:01 +09:00
|
|
|
// Calculate and get gradient, compare to expected
|
2019-06-06 15:21:15 +03:00
|
|
|
network.setInput(x);
|
|
|
|
network.setLabels(y);
|
|
|
|
network.computeGradientAndScore();
|
|
|
|
Gradient gradient = network.gradientAndScore().getFirst();
|
|
|
|
float eps = 1e-4f;
|
|
|
|
for (int i = 0; i < hiddenLayerSizes.length; i++) {
|
|
|
|
String wKey = i + "_" + DefaultParamInitializer.WEIGHT_KEY;
|
|
|
|
String bKey = i + "_" + DefaultParamInitializer.BIAS_KEY;
|
|
|
|
INDArray wGrad = gradient.getGradientFor(wKey);
|
|
|
|
INDArray bGrad = gradient.getGradientFor(bKey);
|
|
|
|
float[] wGradf = asFloat(wGrad);
|
|
|
|
float[] bGradf = asFloat(bGrad);
|
|
|
|
float[] expWGradf = asFloat(dLdw[i]);
|
|
|
|
float[] expBGradf = asFloat(dLdb[i]);
|
|
|
|
assertArrayEquals(wGradf, expWGradf, eps);
|
|
|
|
assertArrayEquals(bGradf, expBGradf, eps);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-03-15 13:02:01 +09:00
|
|
|
/**
|
|
|
|
* Very simple back-prop config set up for Iris.
|
2019-06-06 15:21:15 +03:00
|
|
|
* Learning Rate = 0.1
|
|
|
|
* No regularization, no Adagrad, no momentum etc. One iteration.
|
|
|
|
*/
|
2021-03-15 13:02:01 +09:00
|
|
|
private static MultiLayerConfiguration getIrisMLPSimpleConfig(int[] hiddenLayerSizes, Activation activationFunction) {
|
|
|
|
NeuralNetConfiguration.ListBuilder lb = new NeuralNetConfiguration.Builder().updater(new Sgd(0.1)).seed(12345L).list();
|
2019-06-06 15:21:15 +03:00
|
|
|
for (int i = 0; i < hiddenLayerSizes.length; i++) {
|
|
|
|
int nIn = (i == 0 ? 4 : hiddenLayerSizes[i - 1]);
|
2021-03-15 13:02:01 +09:00
|
|
|
lb.layer(i, new DenseLayer.Builder().nIn(nIn).nOut(hiddenLayerSizes[i]).weightInit(WeightInit.XAVIER).activation(activationFunction).build());
|
2019-06-06 15:21:15 +03:00
|
|
|
}
|
2021-03-15 13:02:01 +09:00
|
|
|
lb.layer(hiddenLayerSizes.length, new OutputLayer.Builder(LossFunction.MCXENT).nIn(hiddenLayerSizes[hiddenLayerSizes.length - 1]).nOut(3).weightInit(WeightInit.XAVIER).activation(activationFunction.equals(Activation.IDENTITY) ? Activation.IDENTITY : Activation.SOFTMAX).build());
|
2019-06-06 15:21:15 +03:00
|
|
|
return lb.build();
|
|
|
|
}
|
|
|
|
|
|
|
|
public static float[] asFloat(INDArray arr) {
|
|
|
|
long len = arr.length();
|
2019-10-31 11:23:09 +02:00
|
|
|
if (len > Integer.MAX_VALUE)
|
|
|
|
throw new ND4JArraySizeException();
|
2019-06-06 15:21:15 +03:00
|
|
|
float[] f = new float[(int) len];
|
|
|
|
NdIndexIterator iterator = new NdIndexIterator('c', arr.shape());
|
|
|
|
for (int i = 0; i < len; i++) {
|
|
|
|
f[i] = arr.getFloat(iterator.next());
|
|
|
|
}
|
|
|
|
return f;
|
|
|
|
}
|
|
|
|
|
|
|
|
public static float dotProduct(float[] x, float[] y) {
|
|
|
|
float sum = 0.0f;
|
2021-03-15 13:02:01 +09:00
|
|
|
for (int i = 0; i < x.length; i++) sum += x[i] * y[i];
|
2019-06-06 15:21:15 +03:00
|
|
|
return sum;
|
|
|
|
}
|
|
|
|
|
|
|
|
public static float sigmoid(float in) {
|
|
|
|
return (float) (1.0 / (1.0 + Math.exp(-in)));
|
|
|
|
}
|
|
|
|
|
|
|
|
public static float[] sigmoid(float[] in) {
|
|
|
|
float[] out = new float[in.length];
|
|
|
|
for (int i = 0; i < in.length; i++) {
|
|
|
|
out[i] = sigmoid(in[i]);
|
|
|
|
}
|
|
|
|
return out;
|
|
|
|
}
|
|
|
|
|
|
|
|
public static float derivOfSigmoid(float in) {
|
2021-03-15 13:02:01 +09:00
|
|
|
// float v = (float)( Math.exp(in) / Math.pow(1+Math.exp(in),2.0) );
|
2019-06-06 15:21:15 +03:00
|
|
|
float v = in * (1 - in);
|
|
|
|
return v;
|
|
|
|
}
|
|
|
|
|
|
|
|
public static float[] derivOfSigmoid(float[] in) {
|
|
|
|
float[] out = new float[in.length];
|
|
|
|
for (int i = 0; i < in.length; i++) {
|
|
|
|
out[i] = derivOfSigmoid(in[i]);
|
|
|
|
}
|
|
|
|
return out;
|
|
|
|
}
|
|
|
|
|
|
|
|
public static float[] softmax(float[] in) {
|
|
|
|
float[] out = new float[in.length];
|
|
|
|
float sumExp = 0.0f;
|
|
|
|
for (int i = 0; i < in.length; i++) {
|
|
|
|
sumExp += Math.exp(in[i]);
|
|
|
|
}
|
|
|
|
for (int i = 0; i < in.length; i++) {
|
|
|
|
out[i] = (float) Math.exp(in[i]) / sumExp;
|
|
|
|
}
|
|
|
|
return out;
|
|
|
|
}
|
|
|
|
|
|
|
|
public static float[] vectorDifference(float[] x, float[] y) {
|
|
|
|
float[] out = new float[x.length];
|
|
|
|
for (int i = 0; i < x.length; i++) {
|
|
|
|
out[i] = x[i] - y[i];
|
|
|
|
}
|
|
|
|
return out;
|
|
|
|
}
|
|
|
|
|
|
|
|
public static INDArray doSoftmax(INDArray input) {
|
|
|
|
return Transforms.softmax(input, true);
|
|
|
|
}
|
|
|
|
|
|
|
|
public static INDArray doSigmoid(INDArray input) {
|
|
|
|
return Transforms.sigmoid(input, true);
|
|
|
|
}
|
|
|
|
|
|
|
|
public static INDArray doSigmoidDerivative(INDArray input) {
|
|
|
|
return Nd4j.getExecutioner().exec(new SigmoidDerivative(input.dup()));
|
|
|
|
}
|
|
|
|
}
|