2019-06-06 15:21:15 +03:00
|
|
|
################################################################################
|
2021-02-18 11:46:53 +09:00
|
|
|
#
|
2019-06-06 15:21:15 +03:00
|
|
|
#
|
|
|
|
# This program and the accompanying materials are made available under the
|
|
|
|
# terms of the Apache License, Version 2.0 which is available at
|
|
|
|
# https://www.apache.org/licenses/LICENSE-2.0.
|
|
|
|
#
|
2021-02-18 11:46:53 +09:00
|
|
|
# See the NOTICE file distributed with this work for additional
|
|
|
|
# information regarding copyright ownership.
|
|
|
|
#
|
2019-06-06 15:21:15 +03:00
|
|
|
# Unless required by applicable law or agreed to in writing, software
|
|
|
|
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
|
|
|
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
|
|
|
# License for the specific language governing permissions and limitations
|
|
|
|
# under the License.
|
|
|
|
#
|
|
|
|
# SPDX-License-Identifier: Apache-2.0
|
|
|
|
################################################################################
|
|
|
|
|
2022-09-20 15:40:53 +02:00
|
|
|
set(CMAKE_VERBOSE_MAKEFILE ON)
|
2019-06-06 15:21:15 +03:00
|
|
|
|
|
|
|
if(LINUX)
|
|
|
|
link_directories(/usr/local/lib)
|
|
|
|
link_directories(/usr/lib)
|
|
|
|
link_directories(/lib)
|
|
|
|
endif()
|
|
|
|
|
|
|
|
if(APPLE)
|
|
|
|
message("Using apple")
|
|
|
|
link_directories(/usr/local/lib)
|
|
|
|
link_directories(/usr/lib)
|
|
|
|
link_directories(/lib)
|
|
|
|
endif()
|
|
|
|
|
2020-03-02 12:49:41 +03:00
|
|
|
if (SD_APPLE_BUILD)
|
|
|
|
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -DSD_APPLE_BUILD=true -mmacosx-version-min=10.10")
|
|
|
|
set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -DSD_APPLE_BUILD=true -mmacosx-version-min=10.10")
|
2019-07-11 18:28:19 +03:00
|
|
|
endif()
|
|
|
|
|
2020-03-02 12:49:41 +03:00
|
|
|
if (SD_ARM_BUILD)
|
|
|
|
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -DSD_ARM_BUILD=true")
|
|
|
|
set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -DSD_ARM_BUILD=true")
|
2020-02-21 14:31:00 +03:00
|
|
|
endif()
|
|
|
|
|
2020-03-02 12:49:41 +03:00
|
|
|
if (SD_ANDROID_BUILD)
|
|
|
|
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -DSD_ANDROID_BUILD=true")
|
|
|
|
set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -DSD_ANDROID_BUILD=true")
|
2019-07-11 18:28:19 +03:00
|
|
|
endif()
|
|
|
|
|
2020-03-02 12:49:41 +03:00
|
|
|
if (SD_IOS_BUILD)
|
|
|
|
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -DSD_IOS_BUILD=true")
|
|
|
|
set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -DSD_IOS_BUILD=true")
|
2019-07-11 18:28:19 +03:00
|
|
|
endif()
|
|
|
|
|
2020-03-19 14:53:21 +09:00
|
|
|
if(WIN32 AND NOT ANDROID)
|
2022-09-20 15:40:53 +02:00
|
|
|
message("Building for Windows")
|
2019-06-06 15:21:15 +03:00
|
|
|
get_property(dirs DIRECTORY ${CMAKE_CURRENT_SOURCE_DIR} PROPERTY INCLUDE_DIRECTORIES)
|
2021-02-09 07:44:23 +09:00
|
|
|
if ("${CMAKE_CXX_COMPILER_ID}" STREQUAL "GNU")
|
|
|
|
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Wa,-mbig-obj")
|
|
|
|
endif()
|
2019-06-06 15:21:15 +03:00
|
|
|
foreach(dir ${dirs})
|
|
|
|
message(STATUS "dir='${dir}'")
|
|
|
|
endforeach()
|
|
|
|
|
|
|
|
# workaround for long command lines
|
|
|
|
SET(CMAKE_C_USE_RESPONSE_FILE_FOR_OBJECTS 1)
|
|
|
|
SET(CMAKE_CXX_USE_RESPONSE_FILE_FOR_OBJECTS 1)
|
|
|
|
|
|
|
|
SET(CMAKE_C_RESPONSE_FILE_LINK_FLAG "@")
|
|
|
|
SET(CMAKE_CXX_RESPONSE_FILE_LINK_FLAG "@")
|
|
|
|
|
|
|
|
SET(CMAKE_NINJA_FORCE_RESPONSE_FILE 1 CACHE INTERNAL "")
|
|
|
|
endif()
|
|
|
|
|
2020-03-02 12:49:41 +03:00
|
|
|
if ("${SD_ALL_OPS}")
|
|
|
|
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -DSD_ALL_OPS=true")
|
2019-06-06 15:21:15 +03:00
|
|
|
else()
|
2020-03-02 12:49:41 +03:00
|
|
|
message("_OPS: ${SD_OPS_LIST}")
|
|
|
|
foreach(OP "${SD_OPS_LIST}")
|
2019-06-06 15:21:15 +03:00
|
|
|
message(STATUS "${OP}")
|
|
|
|
endforeach()
|
2020-03-02 12:49:41 +03:00
|
|
|
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} ${SD_OPS_LIST}")
|
2019-06-06 15:21:15 +03:00
|
|
|
endif()
|
|
|
|
|
2021-02-01 14:31:20 +09:00
|
|
|
IF(${SD_ARCH} MATCHES "armv8")
|
2022-09-20 15:40:53 +02:00
|
|
|
message("Building ARM v8 (x64) binary...")
|
2020-03-02 12:49:41 +03:00
|
|
|
set(ARCH_TUNE "-march=${SD_ARCH}")
|
2021-02-01 14:31:20 +09:00
|
|
|
ELSEIF(${SD_ARCH} MATCHES "armv7")
|
2022-09-20 15:40:53 +02:00
|
|
|
message("Building ARM v7 binary...")
|
2021-02-01 14:31:20 +09:00
|
|
|
set(ARCH_TUNE "-march=${SD_ARCH} -mfpu=neon ")
|
2020-03-02 12:49:41 +03:00
|
|
|
ELSEIF(${SD_ARCH} MATCHES "power*")
|
2022-09-20 15:40:53 +02:00
|
|
|
message("Building Power binary...")
|
2020-03-02 12:49:41 +03:00
|
|
|
set(ARCH_TUNE "-mcpu=${SD_ARCH} -mtune=${SD_ARCH} -D__POWER")
|
2022-09-20 15:40:53 +02:00
|
|
|
ELSEIF("${SD_ARCH}" STREQUAL "x86-64")
|
2019-09-11 21:50:28 +03:00
|
|
|
message("Building x86_64 binary...")
|
2019-06-06 15:21:15 +03:00
|
|
|
set(ARCH_TYPE "generic")
|
2019-09-11 21:50:28 +03:00
|
|
|
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -DF_X64=true")
|
2022-09-20 15:40:53 +02:00
|
|
|
ELSE()
|
|
|
|
message("Building ${SD_ARCH} binary...")
|
2020-03-02 12:49:41 +03:00
|
|
|
set(ARCH_TYPE "${SD_ARCH}")
|
2022-09-20 15:40:53 +02:00
|
|
|
ENDIF()
|
2019-06-06 15:21:15 +03:00
|
|
|
|
2022-09-20 15:40:53 +02:00
|
|
|
IF(${SD_EXTENSION} MATCHES "avx2")
|
|
|
|
message("Extension AVX2 enabled.")
|
|
|
|
set(ARCH_TUNE "${ARCH_TUNE} -mmmx -msse -msse2 -msse3 -msse4.1 -msse4.2 -mavx -mavx2 -mfma -mf16c -mprefetchwt1 -DSD_F16C=true -DF_AVX2=true")
|
|
|
|
ELSEIF(${SD_EXTENSION} MATCHES "avx512")
|
|
|
|
message("Extension AVX512 enabled.")
|
2019-09-11 21:50:28 +03:00
|
|
|
# we need to set flag here, that we can use hardware f16 conversion + tell that cpu features should be tracked
|
|
|
|
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -mmmx -msse -msse2 -msse3 -msse4.1 -msse4.2 -mavx -mavx2 -mfma -mf16c -mavx512f -mavx512vl -mavx512bw -mavx512dq -mavx512cd -mbmi -mbmi2 -mprefetchwt1 -mclflushopt -mxsavec -mxsaves -DSD_F16C=true -DF_AVX512=true")
|
2022-09-20 15:40:53 +02:00
|
|
|
ENDIF()
|
2019-09-11 21:50:28 +03:00
|
|
|
|
2022-09-20 15:40:53 +02:00
|
|
|
if (NOT WIN32)
|
2019-11-13 17:15:18 +03:00
|
|
|
# we don't want this definition for msvc
|
2020-03-02 12:49:41 +03:00
|
|
|
set(ARCH_TUNE "-march=${SD_ARCH} -mtune=${ARCH_TYPE}")
|
2022-09-20 15:40:53 +02:00
|
|
|
endif()
|
2019-06-06 15:21:15 +03:00
|
|
|
|
2020-03-02 12:49:41 +03:00
|
|
|
if ("${CMAKE_CXX_COMPILER_ID}" STREQUAL "AppleClang" AND SD_X86_BUILD)
|
2020-01-04 15:27:16 +03:00
|
|
|
# apple clang but not ios-arm
|
|
|
|
SET( CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} ${ARCH_TUNE}")
|
|
|
|
elseif ("${CMAKE_CXX_COMPILER_ID}" STREQUAL "Clang")
|
2019-06-06 15:21:15 +03:00
|
|
|
# using Clang
|
|
|
|
SET( CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} ${ARCH_TUNE}")
|
|
|
|
elseif ("${CMAKE_CXX_COMPILER_ID}" STREQUAL "Intel")
|
|
|
|
# using Intel C++
|
|
|
|
SET( CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} ${ARCH_TUNE} -O3 -fp-model fast")
|
|
|
|
elseif ("${CMAKE_CXX_COMPILER_ID}" STREQUAL "MSVC")
|
|
|
|
# using Visual Studio C++
|
2019-12-02 21:37:21 +03:00
|
|
|
set( CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} ${ARCH_TUNE}")
|
2019-06-06 15:21:15 +03:00
|
|
|
elseif ("${CMAKE_CXX_COMPILER_ID}" STREQUAL "GNU")
|
|
|
|
# using GCC
|
2022-09-20 15:40:53 +02:00
|
|
|
SET( CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} ${ARCH_TUNE} -fmax-errors=2 -fdiagnostics-show-caret ")
|
2019-06-06 15:21:15 +03:00
|
|
|
set(CMAKE_SHARED_LINKER_FLAGS "${CMAKE_SHARED_LINKER_FLAGS} -Wl,-rpath,$ORIGIN/")
|
2020-05-08 20:59:39 +03:00
|
|
|
|
|
|
|
if (CMAKE_BUILD_TYPE STREQUAL "Debug" AND NOT(APPLE) AND NOT(WIN32))
|
|
|
|
SET(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -rdynamic -Wl,-export-dynamic")
|
|
|
|
SET(CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} -export-dynamic")
|
|
|
|
endif()
|
2019-06-06 15:21:15 +03:00
|
|
|
endif()
|
|
|
|
|
|
|
|
|
|
|
|
IF(${CMAKE_SYSTEM_NAME} MATCHES "Linux")
|
|
|
|
include_directories("/usr/include")
|
|
|
|
include_directories("/usr/local/include")
|
|
|
|
ENDIF(${CMAKE_SYSTEM_NAME} MATCHES "Linux")
|
2022-09-20 15:40:53 +02:00
|
|
|
|
2020-03-02 12:49:41 +03:00
|
|
|
if(!SD_CUDA)
|
|
|
|
if(!SD_CPU)
|
|
|
|
set(SD_CUDA FALSE)
|
|
|
|
set(SD_CPU TRUE)
|
2019-06-06 15:21:15 +03:00
|
|
|
endif()
|
|
|
|
endif()
|
|
|
|
|
2020-01-20 21:32:46 +03:00
|
|
|
#if MKLDNN is enabled - we're building mkldnn-powered helpers
|
|
|
|
if (HAVE_MKLDNN)
|
2022-09-20 15:40:53 +02:00
|
|
|
file(GLOB_RECURSE CUSTOMOPS_MKLDNN_SOURCES false ops/declarable/platform/mkldnn/*.cpp ops/declarable/platform/mkldnn/mkldnnUtils.h)
|
2020-01-20 21:32:46 +03:00
|
|
|
endif()
|
|
|
|
|
2020-06-26 11:03:46 +04:00
|
|
|
if(HAVE_ARMCOMPUTE)
|
2022-09-20 15:40:53 +02:00
|
|
|
file(GLOB_RECURSE CUSTOMOPS_ARMCOMPUTE_SOURCES false ops/declarable/platform/armcompute/*.cpp ops/declarable/platform/armcompute/*.h)
|
2020-06-26 11:03:46 +04:00
|
|
|
endif()
|
|
|
|
|
2020-03-02 12:49:41 +03:00
|
|
|
if(SD_CUDA)
|
2019-06-06 15:21:15 +03:00
|
|
|
message("Build cublas")
|
2022-09-20 15:40:53 +02:00
|
|
|
if(NOT DEFINED ${CMAKE_CUDA_ARCHITECTURES})
|
|
|
|
set(CMAKE_CUDA_ARCHITECTURES 75)
|
2019-06-06 15:21:15 +03:00
|
|
|
endif()
|
2022-09-20 15:40:53 +02:00
|
|
|
message(STATUS "CUDA architectures set to ${CMAKE_CUDA_ARCHITECTURES}")
|
|
|
|
|
|
|
|
find_package(CUDAToolkit)
|
|
|
|
enable_language(CUDA)
|
|
|
|
|
|
|
|
set(CMAKE_CUDA_STANDARD 17)
|
|
|
|
set(CMAKE_CXX_STANDARD 14)
|
|
|
|
|
|
|
|
add_definitions(-D__CUDABLAS__=true)
|
|
|
|
#Enable features prio C++17
|
|
|
|
add_definitions(-D_HAS_AUTO_PTR_ETC=1)
|
2019-06-06 15:21:15 +03:00
|
|
|
|
2022-09-20 15:40:53 +02:00
|
|
|
#This basically kills instrinsic activated through SD_F16C=true
|
|
|
|
#if ("${CMAKE_CXX_COMPILER_ID}" STREQUAL "MSVC")
|
|
|
|
# set (CMAKE_CXX_FLAGS "")
|
|
|
|
#endif()
|
|
|
|
|
|
|
|
if (CUDAToolkit_FOUND)
|
|
|
|
include_directories(${CUDAToolkit_INCLUDE_DIRS})
|
2019-06-06 15:21:15 +03:00
|
|
|
message("CUDA found!")
|
2020-03-02 12:49:41 +03:00
|
|
|
if ("${SD_EXPERIMENTAL}" STREQUAL "yes")
|
2019-06-06 15:21:15 +03:00
|
|
|
message("Experimental mode ENABLED")
|
2019-12-14 14:38:17 +03:00
|
|
|
set(CMAKE_CUDA_FLAGS " ${CMAKE_CUDA_FLAGS} -D__ND4J_EXPERIMENTAL__=true")
|
|
|
|
set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -D__ND4J_EXPERIMENTAL__=true")
|
|
|
|
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -D__ND4J_EXPERIMENTAL__=true")
|
|
|
|
set(EXPM " -D__ND4J_EXPERIMENTAL__=true")
|
2019-06-06 15:21:15 +03:00
|
|
|
endif()
|
2019-12-14 14:38:17 +03:00
|
|
|
|
2021-03-05 10:59:02 +09:00
|
|
|
|
|
|
|
|
2019-12-14 14:38:17 +03:00
|
|
|
# the only difference for debug mode here is host/device debug symbols
|
|
|
|
set(CMAKE_CUDA_FLAGS_DEBUG " -G -g")
|
|
|
|
|
|
|
|
# we need -fPIC on Linux/GCC
|
|
|
|
if ("${CMAKE_CXX_COMPILER_ID}" STREQUAL "GNU")
|
2022-09-20 15:40:53 +02:00
|
|
|
message("Enabling fPIC for GNU compilers...")
|
|
|
|
set(CMAKE_CUDA_FLAGS "${CMAKE_CUDA_FLAGS} -Xcompiler=-fPIC")
|
2019-12-14 14:38:17 +03:00
|
|
|
endif()
|
|
|
|
|
2021-03-05 10:59:02 +09:00
|
|
|
if(WIN32)
|
|
|
|
message("In windows, setting cublas library and cusolver library")
|
|
|
|
if(NOT DEFINED CUDA_cublas_LIBRARY)
|
|
|
|
set(CUDA_cublas_LIBRARY ${CUDA_HOME}/lib/x64/cublas.lib)
|
|
|
|
endif()
|
|
|
|
|
|
|
|
if(NOT DEFINED CUDA_cusolver_LIBRARY)
|
|
|
|
set(CUDA_cusolver_LIBRARY ${CUDA_HOME}/lib/x64/cusolver.lib)
|
|
|
|
endif()
|
|
|
|
endif()
|
|
|
|
|
2022-09-20 15:40:53 +02:00
|
|
|
#
|
|
|
|
#string( TOLOWER "${COMPUTE}" COMPUTE_CMP )
|
|
|
|
# if ("${COMPUTE_CMP}" STREQUAL "all")
|
|
|
|
# CUDA_SELECT_NVCC_ARCH_FLAGS(CUDA_ARCH_FLAGS "Common")
|
|
|
|
# elseif("${COMPUTE_CMP}" STREQUAL "auto")
|
|
|
|
# CUDA_SELECT_NVCC_ARCH_FLAGS(CUDA_ARCH_FLAGS "Auto")
|
|
|
|
# elseif(COMPUTE_CMP MATCHES "^[0-9]+$")
|
|
|
|
# #matches USER COMPUTE old way
|
|
|
|
#set(CUDA_ARCH_FLAGS "-gencode arch=compute_${COMPUTE},code=sm_${COMPUTE} ")
|
|
|
|
# else()
|
|
|
|
# #matches numbers NAME | NUM.NUM | NUM.NUM(NUM.NUM) | NUM.NUM+PTX
|
|
|
|
# #NAME: Fermi Kepler Maxwell Kepler+Tegra Kepler+Tesla Maxwell+Tegra Pascal
|
|
|
|
# #NUM: 2.0 2.1 3.0 3.2 3.5 3.7 5.0 5.2 5.3 6.0 6.2 et cetera
|
|
|
|
# CUDA_SELECT_NVCC_ARCH_FLAGS(CUDA_ARCH_FLAGS "${COMPUTE}")
|
|
|
|
# endif()
|
Development updates (#9098)
* RL4J: Add generic update rule (#502)
Signed-off-by: Alexandre Boulanger <aboulang2002@yahoo.com>
* Shyrma reduce (#481)
* - start working on improving of cpu legacy code for reduce ops
Signed-off-by: Yurii <iuriish@yahoo.com>
* - further work on improving legacy loops
Signed-off-by: Yurii <iuriish@yahoo.com>
* - still working on improving reduce ops
Signed-off-by: Yurii <iuriish@yahoo.com>
* - further work on improving reduce ops
Signed-off-by: Yurii <iuriish@yahoo.com>
* - testing speed run of new reduce op
Signed-off-by: Yurii <iuriish@yahoo.com>
* - working on improvement of default loop for reduce op
Signed-off-by: Yurii <iuriish@yahoo.com>
* - update signatures of stuff which calls reduce ops
Signed-off-by: Yurii <iuriish@yahoo.com>
* - make corrections in cuda reduce kernels
Signed-off-by: Yurii <iuriish@yahoo.com>
* - change loop for default case in broadcast legacy ops
Signed-off-by: Yurii <iuriish@yahoo.com>
* - comment some shape stuff
Signed-off-by: Yurii <iuriish@yahoo.com>
* - comment unnecessary prints in RNGtests
Signed-off-by: Yurii <iuriish@yahoo.com>
* - finish to resolve conflicts after master has been merged
Signed-off-by: Yurii <iuriish@yahoo.com>
* - get rid of some compilation mistakes of cuda stuff
Signed-off-by: Yurii <iuriish@yahoo.com>
* - minor changes
Signed-off-by: Yurii <iuriish@yahoo.com>
* - further search for bug causing crash on java test
Signed-off-by: Yurii <iuriish@yahoo.com>
* - add scalar case in reduce_ ... exec stuff
Signed-off-by: Yurii <iuriish@yahoo.com>
* - minor corrections in NAtiveOps.cu
Signed-off-by: Yurii <iuriish@yahoo.com>
* - add switch to scalar case execReduceXD functions
Signed-off-by: Yurii <iuriish@yahoo.com>
* - add support for vectors old shape in ConstantShapeHelper::createShapeInfoWithNoUnitiesForReduce
Signed-off-by: Yurii <iuriish@yahoo.com>
* - correct cuda mirrorPad
Signed-off-by: Yurii <iuriish@yahoo.com>
* - add support for vectors old shape in cuda createShapeInfoWithNoUnitiesForReduce
Signed-off-by: Yurii <iuriish@yahoo.com>
Co-authored-by: raver119 <raver119@gmail.com>
* Add support for CUDA 11.0 (#492)
* Add support for CUDA 11.0
* libnd4j tweaks for CUDA 11
Signed-off-by: raver119@gmail.com <raver119@gmail.com>
* bindings update, again?
Signed-off-by: raver119@gmail.com <raver119@gmail.com>
* * Update versions of JavaCPP Presets for FFmpeg, OpenBLAS, and NumPy
* update API to match CUDA 8
Signed-off-by: raver119@gmail.com <raver119@gmail.com>
* * Update version of JavaCPP Presets for CPython
* C++ updated for cuDNN 8.0
Signed-off-by: raver119@gmail.com <raver119@gmail.com>
* one more test
Signed-off-by: raver119@gmail.com <raver119@gmail.com>
* one more test
Signed-off-by: raver119@gmail.com <raver119@gmail.com>
* one more test
Signed-off-by: raver119@gmail.com <raver119@gmail.com>
* 128-bit alignment for workspaces
Signed-off-by: raver119@gmail.com <raver119@gmail.com>
* change seed in 1 test
Signed-off-by: raver119@gmail.com <raver119@gmail.com>
* Fix dependecy duplication in python4j-parent pom
* Fix group id for in python4j-numpy
* few tests tweaked
Signed-off-by: raver119@gmail.com <raver119@gmail.com>
* Remove macosx-x86_64-gpu from nd4j-tests-tensorflow
* few minor tweaks for IndexReduce
Signed-off-by: raver119@gmail.com <raver119@gmail.com>
* one test removed
Signed-off-by: raver119@gmail.com <raver119@gmail.com>
Co-authored-by: raver119@gmail.com <raver119@gmail.com>
Co-authored-by: Serhii Shepel <9946053+sshepel@users.noreply.github.com>
* RL4J: Add SyncTrainer and AgentLearnerBuilder for a few algorithms (#504)
Signed-off-by: Alexandre Boulanger <aboulang2002@yahoo.com>
* * Update versions of JavaCPP Presets for OpenCV, FFmpeg, and MKL
Signed-off-by: Samuel Audet <samuel.audet@gmail.com>
* Fix L2NormalizeVertex and eclipse#9054 (#513)
* update
* Fix L2NormalizeVertex
Fix eclipse#9054
* RL4J: Add async training and advantage actor-critic (#507)
* Added async training & Advantage Actor Critic
Signed-off-by: Alexandre Boulanger <aboulang2002@yahoo.com>
* Fix compiler error
Signed-off-by: Samuel Audet <samuel.audet@gmail.com>
* Renamed ActorCriticPolicy back to ACPolicy
Signed-off-by: Alexandre Boulanger <aboulang2002@yahoo.com>
Co-authored-by: Samuel Audet <samuel.audet@gmail.com>
* Python GIL overhaul (#517)
* Development updates (#9053)
* RL4J: Add generic update rule (#502)
Signed-off-by: Alexandre Boulanger <aboulang2002@yahoo.com>
* Shyrma reduce (#481)
* - start working on improving of cpu legacy code for reduce ops
Signed-off-by: Yurii <iuriish@yahoo.com>
* - further work on improving legacy loops
Signed-off-by: Yurii <iuriish@yahoo.com>
* - still working on improving reduce ops
Signed-off-by: Yurii <iuriish@yahoo.com>
* - further work on improving reduce ops
Signed-off-by: Yurii <iuriish@yahoo.com>
* - testing speed run of new reduce op
Signed-off-by: Yurii <iuriish@yahoo.com>
* - working on improvement of default loop for reduce op
Signed-off-by: Yurii <iuriish@yahoo.com>
* - update signatures of stuff which calls reduce ops
Signed-off-by: Yurii <iuriish@yahoo.com>
* - make corrections in cuda reduce kernels
Signed-off-by: Yurii <iuriish@yahoo.com>
* - change loop for default case in broadcast legacy ops
Signed-off-by: Yurii <iuriish@yahoo.com>
* - comment some shape stuff
Signed-off-by: Yurii <iuriish@yahoo.com>
* - comment unnecessary prints in RNGtests
Signed-off-by: Yurii <iuriish@yahoo.com>
* - finish to resolve conflicts after master has been merged
Signed-off-by: Yurii <iuriish@yahoo.com>
* - get rid of some compilation mistakes of cuda stuff
Signed-off-by: Yurii <iuriish@yahoo.com>
* - minor changes
Signed-off-by: Yurii <iuriish@yahoo.com>
* - further search for bug causing crash on java test
Signed-off-by: Yurii <iuriish@yahoo.com>
* - add scalar case in reduce_ ... exec stuff
Signed-off-by: Yurii <iuriish@yahoo.com>
* - minor corrections in NAtiveOps.cu
Signed-off-by: Yurii <iuriish@yahoo.com>
* - add switch to scalar case execReduceXD functions
Signed-off-by: Yurii <iuriish@yahoo.com>
* - add support for vectors old shape in ConstantShapeHelper::createShapeInfoWithNoUnitiesForReduce
Signed-off-by: Yurii <iuriish@yahoo.com>
* - correct cuda mirrorPad
Signed-off-by: Yurii <iuriish@yahoo.com>
* - add support for vectors old shape in cuda createShapeInfoWithNoUnitiesForReduce
Signed-off-by: Yurii <iuriish@yahoo.com>
Co-authored-by: raver119 <raver119@gmail.com>
* Add support for CUDA 11.0 (#492)
* Add support for CUDA 11.0
* libnd4j tweaks for CUDA 11
Signed-off-by: raver119@gmail.com <raver119@gmail.com>
* bindings update, again?
Signed-off-by: raver119@gmail.com <raver119@gmail.com>
* * Update versions of JavaCPP Presets for FFmpeg, OpenBLAS, and NumPy
* update API to match CUDA 8
Signed-off-by: raver119@gmail.com <raver119@gmail.com>
* * Update version of JavaCPP Presets for CPython
* C++ updated for cuDNN 8.0
Signed-off-by: raver119@gmail.com <raver119@gmail.com>
* one more test
Signed-off-by: raver119@gmail.com <raver119@gmail.com>
* one more test
Signed-off-by: raver119@gmail.com <raver119@gmail.com>
* one more test
Signed-off-by: raver119@gmail.com <raver119@gmail.com>
* 128-bit alignment for workspaces
Signed-off-by: raver119@gmail.com <raver119@gmail.com>
* change seed in 1 test
Signed-off-by: raver119@gmail.com <raver119@gmail.com>
* Fix dependecy duplication in python4j-parent pom
* Fix group id for in python4j-numpy
* few tests tweaked
Signed-off-by: raver119@gmail.com <raver119@gmail.com>
* Remove macosx-x86_64-gpu from nd4j-tests-tensorflow
* few minor tweaks for IndexReduce
Signed-off-by: raver119@gmail.com <raver119@gmail.com>
* one test removed
Signed-off-by: raver119@gmail.com <raver119@gmail.com>
Co-authored-by: raver119@gmail.com <raver119@gmail.com>
Co-authored-by: Serhii Shepel <9946053+sshepel@users.noreply.github.com>
* RL4J: Add SyncTrainer and AgentLearnerBuilder for a few algorithms (#504)
Signed-off-by: Alexandre Boulanger <aboulang2002@yahoo.com>
Co-authored-by: Alexandre Boulanger <44292157+aboulang2002@users.noreply.github.com>
Co-authored-by: Yurii Shyrma <iuriish@yahoo.com>
Co-authored-by: raver119 <raver119@gmail.com>
Co-authored-by: Serhii Shepel <9946053+sshepel@users.noreply.github.com>
* Removed dead code (#9057)
Signed-off-by: Dariusz Zbyrad <dariusz.zbyrad@gmail.com>
* performance improvement (#9055)
* performance improvement
Signed-off-by: Dariusz Zbyrad <dariusz.zbyrad@gmail.com>
* revert some changes
Signed-off-by: Dariusz Zbyrad <dariusz.zbyrad@gmail.com>
* Development updates (#9064)
* Update versions of JavaCPP Presets for OpenCV, FFmpeg, and MKL
Signed-off-by: Samuel Audet <samuel.audet@gmail.com>
* Cherry pick rl4j changes from most recent KonduitAI/deeplearning4j PR
* Update cherry pick again from last master revision.
Co-authored-by: Samuel Audet <samuel.audet@gmail.com>
Co-authored-by: Alexandre Boulanger <44292157+aboulang2002@users.noreply.github.com>
Co-authored-by: Yurii Shyrma <iuriish@yahoo.com>
Co-authored-by: raver119 <raver119@gmail.com>
Co-authored-by: Serhii Shepel <9946053+sshepel@users.noreply.github.com>
Co-authored-by: dariuszzbyrad <dariusz.zbyrad@gmail.com>
* Ag pythongiloverhaul (#518)
* Development updates (#9053)
* RL4J: Add generic update rule (#502)
Signed-off-by: Alexandre Boulanger <aboulang2002@yahoo.com>
* Shyrma reduce (#481)
* - start working on improving of cpu legacy code for reduce ops
Signed-off-by: Yurii <iuriish@yahoo.com>
* - further work on improving legacy loops
Signed-off-by: Yurii <iuriish@yahoo.com>
* - still working on improving reduce ops
Signed-off-by: Yurii <iuriish@yahoo.com>
* - further work on improving reduce ops
Signed-off-by: Yurii <iuriish@yahoo.com>
* - testing speed run of new reduce op
Signed-off-by: Yurii <iuriish@yahoo.com>
* - working on improvement of default loop for reduce op
Signed-off-by: Yurii <iuriish@yahoo.com>
* - update signatures of stuff which calls reduce ops
Signed-off-by: Yurii <iuriish@yahoo.com>
* - make corrections in cuda reduce kernels
Signed-off-by: Yurii <iuriish@yahoo.com>
* - change loop for default case in broadcast legacy ops
Signed-off-by: Yurii <iuriish@yahoo.com>
* - comment some shape stuff
Signed-off-by: Yurii <iuriish@yahoo.com>
* - comment unnecessary prints in RNGtests
Signed-off-by: Yurii <iuriish@yahoo.com>
* - finish to resolve conflicts after master has been merged
Signed-off-by: Yurii <iuriish@yahoo.com>
* - get rid of some compilation mistakes of cuda stuff
Signed-off-by: Yurii <iuriish@yahoo.com>
* - minor changes
Signed-off-by: Yurii <iuriish@yahoo.com>
* - further search for bug causing crash on java test
Signed-off-by: Yurii <iuriish@yahoo.com>
* - add scalar case in reduce_ ... exec stuff
Signed-off-by: Yurii <iuriish@yahoo.com>
* - minor corrections in NAtiveOps.cu
Signed-off-by: Yurii <iuriish@yahoo.com>
* - add switch to scalar case execReduceXD functions
Signed-off-by: Yurii <iuriish@yahoo.com>
* - add support for vectors old shape in ConstantShapeHelper::createShapeInfoWithNoUnitiesForReduce
Signed-off-by: Yurii <iuriish@yahoo.com>
* - correct cuda mirrorPad
Signed-off-by: Yurii <iuriish@yahoo.com>
* - add support for vectors old shape in cuda createShapeInfoWithNoUnitiesForReduce
Signed-off-by: Yurii <iuriish@yahoo.com>
Co-authored-by: raver119 <raver119@gmail.com>
* Add support for CUDA 11.0 (#492)
* Add support for CUDA 11.0
* libnd4j tweaks for CUDA 11
Signed-off-by: raver119@gmail.com <raver119@gmail.com>
* bindings update, again?
Signed-off-by: raver119@gmail.com <raver119@gmail.com>
* * Update versions of JavaCPP Presets for FFmpeg, OpenBLAS, and NumPy
* update API to match CUDA 8
Signed-off-by: raver119@gmail.com <raver119@gmail.com>
* * Update version of JavaCPP Presets for CPython
* C++ updated for cuDNN 8.0
Signed-off-by: raver119@gmail.com <raver119@gmail.com>
* one more test
Signed-off-by: raver119@gmail.com <raver119@gmail.com>
* one more test
Signed-off-by: raver119@gmail.com <raver119@gmail.com>
* one more test
Signed-off-by: raver119@gmail.com <raver119@gmail.com>
* 128-bit alignment for workspaces
Signed-off-by: raver119@gmail.com <raver119@gmail.com>
* change seed in 1 test
Signed-off-by: raver119@gmail.com <raver119@gmail.com>
* Fix dependecy duplication in python4j-parent pom
* Fix group id for in python4j-numpy
* few tests tweaked
Signed-off-by: raver119@gmail.com <raver119@gmail.com>
* Remove macosx-x86_64-gpu from nd4j-tests-tensorflow
* few minor tweaks for IndexReduce
Signed-off-by: raver119@gmail.com <raver119@gmail.com>
* one test removed
Signed-off-by: raver119@gmail.com <raver119@gmail.com>
Co-authored-by: raver119@gmail.com <raver119@gmail.com>
Co-authored-by: Serhii Shepel <9946053+sshepel@users.noreply.github.com>
* RL4J: Add SyncTrainer and AgentLearnerBuilder for a few algorithms (#504)
Signed-off-by: Alexandre Boulanger <aboulang2002@yahoo.com>
Co-authored-by: Alexandre Boulanger <44292157+aboulang2002@users.noreply.github.com>
Co-authored-by: Yurii Shyrma <iuriish@yahoo.com>
Co-authored-by: raver119 <raver119@gmail.com>
Co-authored-by: Serhii Shepel <9946053+sshepel@users.noreply.github.com>
* Removed dead code (#9057)
Signed-off-by: Dariusz Zbyrad <dariusz.zbyrad@gmail.com>
* performance improvement (#9055)
* performance improvement
Signed-off-by: Dariusz Zbyrad <dariusz.zbyrad@gmail.com>
* revert some changes
Signed-off-by: Dariusz Zbyrad <dariusz.zbyrad@gmail.com>
* Development updates (#9064)
* Update versions of JavaCPP Presets for OpenCV, FFmpeg, and MKL
Signed-off-by: Samuel Audet <samuel.audet@gmail.com>
* Cherry pick rl4j changes from most recent KonduitAI/deeplearning4j PR
* Update cherry pick again from last master revision.
* Re update python4j
Co-authored-by: Samuel Audet <samuel.audet@gmail.com>
Co-authored-by: Alexandre Boulanger <44292157+aboulang2002@users.noreply.github.com>
Co-authored-by: Yurii Shyrma <iuriish@yahoo.com>
Co-authored-by: raver119 <raver119@gmail.com>
Co-authored-by: Serhii Shepel <9946053+sshepel@users.noreply.github.com>
Co-authored-by: dariuszzbyrad <dariusz.zbyrad@gmail.com>
* Bump formatter-maven-plugin from 2.0.0 to 2.12.1 (#505)
Bumps [formatter-maven-plugin](https://github.com/revelc/formatter-maven-plugin) from 2.0.0 to 2.12.1.
- [Release notes](https://github.com/revelc/formatter-maven-plugin/releases)
- [Changelog](https://github.com/revelc/formatter-maven-plugin/blob/formatter-maven-plugin-2.12.1/CHANGELOG.md)
- [Commits](https://github.com/revelc/formatter-maven-plugin/compare/formatter-maven-plugin-2.0.0...formatter-maven-plugin-2.12.1)
Signed-off-by: dependabot-preview[bot] <support@dependabot.com>
Co-authored-by: dependabot-preview[bot] <27856297+dependabot-preview[bot]@users.noreply.github.com>
* Ag fix9060 (#519)
* Development updates (#9053)
* RL4J: Add generic update rule (#502)
Signed-off-by: Alexandre Boulanger <aboulang2002@yahoo.com>
* Shyrma reduce (#481)
* - start working on improving of cpu legacy code for reduce ops
Signed-off-by: Yurii <iuriish@yahoo.com>
* - further work on improving legacy loops
Signed-off-by: Yurii <iuriish@yahoo.com>
* - still working on improving reduce ops
Signed-off-by: Yurii <iuriish@yahoo.com>
* - further work on improving reduce ops
Signed-off-by: Yurii <iuriish@yahoo.com>
* - testing speed run of new reduce op
Signed-off-by: Yurii <iuriish@yahoo.com>
* - working on improvement of default loop for reduce op
Signed-off-by: Yurii <iuriish@yahoo.com>
* - update signatures of stuff which calls reduce ops
Signed-off-by: Yurii <iuriish@yahoo.com>
* - make corrections in cuda reduce kernels
Signed-off-by: Yurii <iuriish@yahoo.com>
* - change loop for default case in broadcast legacy ops
Signed-off-by: Yurii <iuriish@yahoo.com>
* - comment some shape stuff
Signed-off-by: Yurii <iuriish@yahoo.com>
* - comment unnecessary prints in RNGtests
Signed-off-by: Yurii <iuriish@yahoo.com>
* - finish to resolve conflicts after master has been merged
Signed-off-by: Yurii <iuriish@yahoo.com>
* - get rid of some compilation mistakes of cuda stuff
Signed-off-by: Yurii <iuriish@yahoo.com>
* - minor changes
Signed-off-by: Yurii <iuriish@yahoo.com>
* - further search for bug causing crash on java test
Signed-off-by: Yurii <iuriish@yahoo.com>
* - add scalar case in reduce_ ... exec stuff
Signed-off-by: Yurii <iuriish@yahoo.com>
* - minor corrections in NAtiveOps.cu
Signed-off-by: Yurii <iuriish@yahoo.com>
* - add switch to scalar case execReduceXD functions
Signed-off-by: Yurii <iuriish@yahoo.com>
* - add support for vectors old shape in ConstantShapeHelper::createShapeInfoWithNoUnitiesForReduce
Signed-off-by: Yurii <iuriish@yahoo.com>
* - correct cuda mirrorPad
Signed-off-by: Yurii <iuriish@yahoo.com>
* - add support for vectors old shape in cuda createShapeInfoWithNoUnitiesForReduce
Signed-off-by: Yurii <iuriish@yahoo.com>
Co-authored-by: raver119 <raver119@gmail.com>
* Add support for CUDA 11.0 (#492)
* Add support for CUDA 11.0
* libnd4j tweaks for CUDA 11
Signed-off-by: raver119@gmail.com <raver119@gmail.com>
* bindings update, again?
Signed-off-by: raver119@gmail.com <raver119@gmail.com>
* * Update versions of JavaCPP Presets for FFmpeg, OpenBLAS, and NumPy
* update API to match CUDA 8
Signed-off-by: raver119@gmail.com <raver119@gmail.com>
* * Update version of JavaCPP Presets for CPython
* C++ updated for cuDNN 8.0
Signed-off-by: raver119@gmail.com <raver119@gmail.com>
* one more test
Signed-off-by: raver119@gmail.com <raver119@gmail.com>
* one more test
Signed-off-by: raver119@gmail.com <raver119@gmail.com>
* one more test
Signed-off-by: raver119@gmail.com <raver119@gmail.com>
* 128-bit alignment for workspaces
Signed-off-by: raver119@gmail.com <raver119@gmail.com>
* change seed in 1 test
Signed-off-by: raver119@gmail.com <raver119@gmail.com>
* Fix dependecy duplication in python4j-parent pom
* Fix group id for in python4j-numpy
* few tests tweaked
Signed-off-by: raver119@gmail.com <raver119@gmail.com>
* Remove macosx-x86_64-gpu from nd4j-tests-tensorflow
* few minor tweaks for IndexReduce
Signed-off-by: raver119@gmail.com <raver119@gmail.com>
* one test removed
Signed-off-by: raver119@gmail.com <raver119@gmail.com>
Co-authored-by: raver119@gmail.com <raver119@gmail.com>
Co-authored-by: Serhii Shepel <9946053+sshepel@users.noreply.github.com>
* RL4J: Add SyncTrainer and AgentLearnerBuilder for a few algorithms (#504)
Signed-off-by: Alexandre Boulanger <aboulang2002@yahoo.com>
Co-authored-by: Alexandre Boulanger <44292157+aboulang2002@users.noreply.github.com>
Co-authored-by: Yurii Shyrma <iuriish@yahoo.com>
Co-authored-by: raver119 <raver119@gmail.com>
Co-authored-by: Serhii Shepel <9946053+sshepel@users.noreply.github.com>
* Removed dead code (#9057)
Signed-off-by: Dariusz Zbyrad <dariusz.zbyrad@gmail.com>
* performance improvement (#9055)
* performance improvement
Signed-off-by: Dariusz Zbyrad <dariusz.zbyrad@gmail.com>
* revert some changes
Signed-off-by: Dariusz Zbyrad <dariusz.zbyrad@gmail.com>
* Development updates (#9064)
* Update versions of JavaCPP Presets for OpenCV, FFmpeg, and MKL
Signed-off-by: Samuel Audet <samuel.audet@gmail.com>
* Added support for the archunit (#9062)
* Added support for the archunit
Signed-off-by: Dariusz Zbyrad <dariusz.zbyrad@gmail.com>
* Updated pom files
Signed-off-by: Dariusz Zbyrad <dariusz.zbyrad@gmail.com>
* Datavec code cleaup (#9071)
* removed unnecessary semicolons
Signed-off-by: Dariusz Zbyrad <dariusz.zbyrad@gmail.com>
* Use standard charset object
Signed-off-by: Dariusz Zbyrad <dariusz.zbyrad@gmail.com>
* Removed unused imports
Signed-off-by: Dariusz Zbyrad <dariusz.zbyrad@gmail.com>
* WIP: Fix Conv1d causal case
* Add inital tests
* Update Conv1d tests to be a bit more robust
* Remove redundant test
* Reset from master
* Remove cuda definition (left over)
* Update rl4j again
* Update pom.xml
Co-authored-by: Samuel Audet <samuel.audet@gmail.com>
Co-authored-by: Alexandre Boulanger <44292157+aboulang2002@users.noreply.github.com>
Co-authored-by: Yurii Shyrma <iuriish@yahoo.com>
Co-authored-by: raver119 <raver119@gmail.com>
Co-authored-by: Serhii Shepel <9946053+sshepel@users.noreply.github.com>
Co-authored-by: dariuszzbyrad <dariusz.zbyrad@gmail.com>
* Fixes 9061 (#521)
* Get rid of edge case in validation
* Added support for the archunit (#9062)
* Added support for the archunit
Signed-off-by: Dariusz Zbyrad <dariusz.zbyrad@gmail.com>
* Updated pom files
Signed-off-by: Dariusz Zbyrad <dariusz.zbyrad@gmail.com>
* Using embedded copying of an array instead of manual (#9073)
Signed-off-by: Dariusz Zbyrad <dariusz.zbyrad@gmail.com>
* Datavec bulk operation (#9075)
* Bulk operation can be used instead of iteration inspection
Signed-off-by: Dariusz Zbyrad <dariusz.zbyrad@gmail.com>
* Redundant 'Collection.addAll()' call inspection
Signed-off-by: Dariusz Zbyrad <dariusz.zbyrad@gmail.com>
* Removed infinitely loop (#9076)
Signed-off-by: Dariusz Zbyrad <dariusz.zbyrad@gmail.com>
Co-authored-by: dariuszzbyrad <dariusz.zbyrad@gmail.com>
* Revert "Merge eclipse changes" (#526)
* Revert rl4j to 72f5c18c830f62df2c04fbf8dc7b1353cc2d3182 (#527)
* RL4J: Add generic update rule (#502)
Signed-off-by: Alexandre Boulanger <aboulang2002@yahoo.com>
* Shyrma reduce (#481)
* - start working on improving of cpu legacy code for reduce ops
Signed-off-by: Yurii <iuriish@yahoo.com>
* - further work on improving legacy loops
Signed-off-by: Yurii <iuriish@yahoo.com>
* - still working on improving reduce ops
Signed-off-by: Yurii <iuriish@yahoo.com>
* - further work on improving reduce ops
Signed-off-by: Yurii <iuriish@yahoo.com>
* - testing speed run of new reduce op
Signed-off-by: Yurii <iuriish@yahoo.com>
* - working on improvement of default loop for reduce op
Signed-off-by: Yurii <iuriish@yahoo.com>
* - update signatures of stuff which calls reduce ops
Signed-off-by: Yurii <iuriish@yahoo.com>
* - make corrections in cuda reduce kernels
Signed-off-by: Yurii <iuriish@yahoo.com>
* - change loop for default case in broadcast legacy ops
Signed-off-by: Yurii <iuriish@yahoo.com>
* - comment some shape stuff
Signed-off-by: Yurii <iuriish@yahoo.com>
* - comment unnecessary prints in RNGtests
Signed-off-by: Yurii <iuriish@yahoo.com>
* - finish to resolve conflicts after master has been merged
Signed-off-by: Yurii <iuriish@yahoo.com>
* - get rid of some compilation mistakes of cuda stuff
Signed-off-by: Yurii <iuriish@yahoo.com>
* - minor changes
Signed-off-by: Yurii <iuriish@yahoo.com>
* - further search for bug causing crash on java test
Signed-off-by: Yurii <iuriish@yahoo.com>
* - add scalar case in reduce_ ... exec stuff
Signed-off-by: Yurii <iuriish@yahoo.com>
* - minor corrections in NAtiveOps.cu
Signed-off-by: Yurii <iuriish@yahoo.com>
* - add switch to scalar case execReduceXD functions
Signed-off-by: Yurii <iuriish@yahoo.com>
* - add support for vectors old shape in ConstantShapeHelper::createShapeInfoWithNoUnitiesForReduce
Signed-off-by: Yurii <iuriish@yahoo.com>
* - correct cuda mirrorPad
Signed-off-by: Yurii <iuriish@yahoo.com>
* - add support for vectors old shape in cuda createShapeInfoWithNoUnitiesForReduce
Signed-off-by: Yurii <iuriish@yahoo.com>
Co-authored-by: raver119 <raver119@gmail.com>
* Add support for CUDA 11.0 (#492)
* Add support for CUDA 11.0
* libnd4j tweaks for CUDA 11
Signed-off-by: raver119@gmail.com <raver119@gmail.com>
* bindings update, again?
Signed-off-by: raver119@gmail.com <raver119@gmail.com>
* * Update versions of JavaCPP Presets for FFmpeg, OpenBLAS, and NumPy
* update API to match CUDA 8
Signed-off-by: raver119@gmail.com <raver119@gmail.com>
* * Update version of JavaCPP Presets for CPython
* C++ updated for cuDNN 8.0
Signed-off-by: raver119@gmail.com <raver119@gmail.com>
* one more test
Signed-off-by: raver119@gmail.com <raver119@gmail.com>
* one more test
Signed-off-by: raver119@gmail.com <raver119@gmail.com>
* one more test
Signed-off-by: raver119@gmail.com <raver119@gmail.com>
* 128-bit alignment for workspaces
Signed-off-by: raver119@gmail.com <raver119@gmail.com>
* change seed in 1 test
Signed-off-by: raver119@gmail.com <raver119@gmail.com>
* Fix dependecy duplication in python4j-parent pom
* Fix group id for in python4j-numpy
* few tests tweaked
Signed-off-by: raver119@gmail.com <raver119@gmail.com>
* Remove macosx-x86_64-gpu from nd4j-tests-tensorflow
* few minor tweaks for IndexReduce
Signed-off-by: raver119@gmail.com <raver119@gmail.com>
* one test removed
Signed-off-by: raver119@gmail.com <raver119@gmail.com>
Co-authored-by: raver119@gmail.com <raver119@gmail.com>
Co-authored-by: Serhii Shepel <9946053+sshepel@users.noreply.github.com>
* RL4J: Add SyncTrainer and AgentLearnerBuilder for a few algorithms (#504)
Signed-off-by: Alexandre Boulanger <aboulang2002@yahoo.com>
* * Update versions of JavaCPP Presets for OpenCV, FFmpeg, and MKL
Signed-off-by: Samuel Audet <samuel.audet@gmail.com>
* Fix L2NormalizeVertex and eclipse#9054 (#513)
* update
* Fix L2NormalizeVertex
Fix eclipse#9054
* RL4J: Add async training and advantage actor-critic (#507)
* Added async training & Advantage Actor Critic
Signed-off-by: Alexandre Boulanger <aboulang2002@yahoo.com>
* Fix compiler error
Signed-off-by: Samuel Audet <samuel.audet@gmail.com>
* Renamed ActorCriticPolicy back to ACPolicy
Signed-off-by: Alexandre Boulanger <aboulang2002@yahoo.com>
Co-authored-by: Samuel Audet <samuel.audet@gmail.com>
* Python GIL overhaul (#517)
* Development updates (#9053)
* RL4J: Add generic update rule (#502)
Signed-off-by: Alexandre Boulanger <aboulang2002@yahoo.com>
* Shyrma reduce (#481)
* - start working on improving of cpu legacy code for reduce ops
Signed-off-by: Yurii <iuriish@yahoo.com>
* - further work on improving legacy loops
Signed-off-by: Yurii <iuriish@yahoo.com>
* - still working on improving reduce ops
Signed-off-by: Yurii <iuriish@yahoo.com>
* - further work on improving reduce ops
Signed-off-by: Yurii <iuriish@yahoo.com>
* - testing speed run of new reduce op
Signed-off-by: Yurii <iuriish@yahoo.com>
* - working on improvement of default loop for reduce op
Signed-off-by: Yurii <iuriish@yahoo.com>
* - update signatures of stuff which calls reduce ops
Signed-off-by: Yurii <iuriish@yahoo.com>
* - make corrections in cuda reduce kernels
Signed-off-by: Yurii <iuriish@yahoo.com>
* - change loop for default case in broadcast legacy ops
Signed-off-by: Yurii <iuriish@yahoo.com>
* - comment some shape stuff
Signed-off-by: Yurii <iuriish@yahoo.com>
* - comment unnecessary prints in RNGtests
Signed-off-by: Yurii <iuriish@yahoo.com>
* - finish to resolve conflicts after master has been merged
Signed-off-by: Yurii <iuriish@yahoo.com>
* - get rid of some compilation mistakes of cuda stuff
Signed-off-by: Yurii <iuriish@yahoo.com>
* - minor changes
Signed-off-by: Yurii <iuriish@yahoo.com>
* - further search for bug causing crash on java test
Signed-off-by: Yurii <iuriish@yahoo.com>
* - add scalar case in reduce_ ... exec stuff
Signed-off-by: Yurii <iuriish@yahoo.com>
* - minor corrections in NAtiveOps.cu
Signed-off-by: Yurii <iuriish@yahoo.com>
* - add switch to scalar case execReduceXD functions
Signed-off-by: Yurii <iuriish@yahoo.com>
* - add support for vectors old shape in ConstantShapeHelper::createShapeInfoWithNoUnitiesForReduce
Signed-off-by: Yurii <iuriish@yahoo.com>
* - correct cuda mirrorPad
Signed-off-by: Yurii <iuriish@yahoo.com>
* - add support for vectors old shape in cuda createShapeInfoWithNoUnitiesForReduce
Signed-off-by: Yurii <iuriish@yahoo.com>
Co-authored-by: raver119 <raver119@gmail.com>
* Add support for CUDA 11.0 (#492)
* Add support for CUDA 11.0
* libnd4j tweaks for CUDA 11
Signed-off-by: raver119@gmail.com <raver119@gmail.com>
* bindings update, again?
Signed-off-by: raver119@gmail.com <raver119@gmail.com>
* * Update versions of JavaCPP Presets for FFmpeg, OpenBLAS, and NumPy
* update API to match CUDA 8
Signed-off-by: raver119@gmail.com <raver119@gmail.com>
* * Update version of JavaCPP Presets for CPython
* C++ updated for cuDNN 8.0
Signed-off-by: raver119@gmail.com <raver119@gmail.com>
* one more test
Signed-off-by: raver119@gmail.com <raver119@gmail.com>
* one more test
Signed-off-by: raver119@gmail.com <raver119@gmail.com>
* one more test
Signed-off-by: raver119@gmail.com <raver119@gmail.com>
* 128-bit alignment for workspaces
Signed-off-by: raver119@gmail.com <raver119@gmail.com>
* change seed in 1 test
Signed-off-by: raver119@gmail.com <raver119@gmail.com>
* Fix dependecy duplication in python4j-parent pom
* Fix group id for in python4j-numpy
* few tests tweaked
Signed-off-by: raver119@gmail.com <raver119@gmail.com>
* Remove macosx-x86_64-gpu from nd4j-tests-tensorflow
* few minor tweaks for IndexReduce
Signed-off-by: raver119@gmail.com <raver119@gmail.com>
* one test removed
Signed-off-by: raver119@gmail.com <raver119@gmail.com>
Co-authored-by: raver119@gmail.com <raver119@gmail.com>
Co-authored-by: Serhii Shepel <9946053+sshepel@users.noreply.github.com>
* RL4J: Add SyncTrainer and AgentLearnerBuilder for a few algorithms (#504)
Signed-off-by: Alexandre Boulanger <aboulang2002@yahoo.com>
Co-authored-by: Alexandre Boulanger <44292157+aboulang2002@users.noreply.github.com>
Co-authored-by: Yurii Shyrma <iuriish@yahoo.com>
Co-authored-by: raver119 <raver119@gmail.com>
Co-authored-by: Serhii Shepel <9946053+sshepel@users.noreply.github.com>
* Removed dead code (#9057)
Signed-off-by: Dariusz Zbyrad <dariusz.zbyrad@gmail.com>
* performance improvement (#9055)
* performance improvement
Signed-off-by: Dariusz Zbyrad <dariusz.zbyrad@gmail.com>
* revert some changes
Signed-off-by: Dariusz Zbyrad <dariusz.zbyrad@gmail.com>
* Development updates (#9064)
* Update versions of JavaCPP Presets for OpenCV, FFmpeg, and MKL
Signed-off-by: Samuel Audet <samuel.audet@gmail.com>
* Cherry pick rl4j changes from most recent KonduitAI/deeplearning4j PR
* Update cherry pick again from last master revision.
Co-authored-by: Samuel Audet <samuel.audet@gmail.com>
Co-authored-by: Alexandre Boulanger <44292157+aboulang2002@users.noreply.github.com>
Co-authored-by: Yurii Shyrma <iuriish@yahoo.com>
Co-authored-by: raver119 <raver119@gmail.com>
Co-authored-by: Serhii Shepel <9946053+sshepel@users.noreply.github.com>
Co-authored-by: dariuszzbyrad <dariusz.zbyrad@gmail.com>
* Ag pythongiloverhaul (#518)
* Development updates (#9053)
* RL4J: Add generic update rule (#502)
Signed-off-by: Alexandre Boulanger <aboulang2002@yahoo.com>
* Shyrma reduce (#481)
* - start working on improving of cpu legacy code for reduce ops
Signed-off-by: Yurii <iuriish@yahoo.com>
* - further work on improving legacy loops
Signed-off-by: Yurii <iuriish@yahoo.com>
* - still working on improving reduce ops
Signed-off-by: Yurii <iuriish@yahoo.com>
* - further work on improving reduce ops
Signed-off-by: Yurii <iuriish@yahoo.com>
* - testing speed run of new reduce op
Signed-off-by: Yurii <iuriish@yahoo.com>
* - working on improvement of default loop for reduce op
Signed-off-by: Yurii <iuriish@yahoo.com>
* - update signatures of stuff which calls reduce ops
Signed-off-by: Yurii <iuriish@yahoo.com>
* - make corrections in cuda reduce kernels
Signed-off-by: Yurii <iuriish@yahoo.com>
* - change loop for default case in broadcast legacy ops
Signed-off-by: Yurii <iuriish@yahoo.com>
* - comment some shape stuff
Signed-off-by: Yurii <iuriish@yahoo.com>
* - comment unnecessary prints in RNGtests
Signed-off-by: Yurii <iuriish@yahoo.com>
* - finish to resolve conflicts after master has been merged
Signed-off-by: Yurii <iuriish@yahoo.com>
* - get rid of some compilation mistakes of cuda stuff
Signed-off-by: Yurii <iuriish@yahoo.com>
* - minor changes
Signed-off-by: Yurii <iuriish@yahoo.com>
* - further search for bug causing crash on java test
Signed-off-by: Yurii <iuriish@yahoo.com>
* - add scalar case in reduce_ ... exec stuff
Signed-off-by: Yurii <iuriish@yahoo.com>
* - minor corrections in NAtiveOps.cu
Signed-off-by: Yurii <iuriish@yahoo.com>
* - add switch to scalar case execReduceXD functions
Signed-off-by: Yurii <iuriish@yahoo.com>
* - add support for vectors old shape in ConstantShapeHelper::createShapeInfoWithNoUnitiesForReduce
Signed-off-by: Yurii <iuriish@yahoo.com>
* - correct cuda mirrorPad
Signed-off-by: Yurii <iuriish@yahoo.com>
* - add support for vectors old shape in cuda createShapeInfoWithNoUnitiesForReduce
Signed-off-by: Yurii <iuriish@yahoo.com>
Co-authored-by: raver119 <raver119@gmail.com>
* Add support for CUDA 11.0 (#492)
* Add support for CUDA 11.0
* libnd4j tweaks for CUDA 11
Signed-off-by: raver119@gmail.com <raver119@gmail.com>
* bindings update, again?
Signed-off-by: raver119@gmail.com <raver119@gmail.com>
* * Update versions of JavaCPP Presets for FFmpeg, OpenBLAS, and NumPy
* update API to match CUDA 8
Signed-off-by: raver119@gmail.com <raver119@gmail.com>
* * Update version of JavaCPP Presets for CPython
* C++ updated for cuDNN 8.0
Signed-off-by: raver119@gmail.com <raver119@gmail.com>
* one more test
Signed-off-by: raver119@gmail.com <raver119@gmail.com>
* one more test
Signed-off-by: raver119@gmail.com <raver119@gmail.com>
* one more test
Signed-off-by: raver119@gmail.com <raver119@gmail.com>
* 128-bit alignment for workspaces
Signed-off-by: raver119@gmail.com <raver119@gmail.com>
* change seed in 1 test
Signed-off-by: raver119@gmail.com <raver119@gmail.com>
* Fix dependecy duplication in python4j-parent pom
* Fix group id for in python4j-numpy
* few tests tweaked
Signed-off-by: raver119@gmail.com <raver119@gmail.com>
* Remove macosx-x86_64-gpu from nd4j-tests-tensorflow
* few minor tweaks for IndexReduce
Signed-off-by: raver119@gmail.com <raver119@gmail.com>
* one test removed
Signed-off-by: raver119@gmail.com <raver119@gmail.com>
Co-authored-by: raver119@gmail.com <raver119@gmail.com>
Co-authored-by: Serhii Shepel <9946053+sshepel@users.noreply.github.com>
* RL4J: Add SyncTrainer and AgentLearnerBuilder for a few algorithms (#504)
Signed-off-by: Alexandre Boulanger <aboulang2002@yahoo.com>
Co-authored-by: Alexandre Boulanger <44292157+aboulang2002@users.noreply.github.com>
Co-authored-by: Yurii Shyrma <iuriish@yahoo.com>
Co-authored-by: raver119 <raver119@gmail.com>
Co-authored-by: Serhii Shepel <9946053+sshepel@users.noreply.github.com>
* Removed dead code (#9057)
Signed-off-by: Dariusz Zbyrad <dariusz.zbyrad@gmail.com>
* performance improvement (#9055)
* performance improvement
Signed-off-by: Dariusz Zbyrad <dariusz.zbyrad@gmail.com>
* revert some changes
Signed-off-by: Dariusz Zbyrad <dariusz.zbyrad@gmail.com>
* Development updates (#9064)
* Update versions of JavaCPP Presets for OpenCV, FFmpeg, and MKL
Signed-off-by: Samuel Audet <samuel.audet@gmail.com>
* Cherry pick rl4j changes from most recent KonduitAI/deeplearning4j PR
* Update cherry pick again from last master revision.
* Re update python4j
Co-authored-by: Samuel Audet <samuel.audet@gmail.com>
Co-authored-by: Alexandre Boulanger <44292157+aboulang2002@users.noreply.github.com>
Co-authored-by: Yurii Shyrma <iuriish@yahoo.com>
Co-authored-by: raver119 <raver119@gmail.com>
Co-authored-by: Serhii Shepel <9946053+sshepel@users.noreply.github.com>
Co-authored-by: dariuszzbyrad <dariusz.zbyrad@gmail.com>
* Bump formatter-maven-plugin from 2.0.0 to 2.12.1 (#505)
Bumps [formatter-maven-plugin](https://github.com/revelc/formatter-maven-plugin) from 2.0.0 to 2.12.1.
- [Release notes](https://github.com/revelc/formatter-maven-plugin/releases)
- [Changelog](https://github.com/revelc/formatter-maven-plugin/blob/formatter-maven-plugin-2.12.1/CHANGELOG.md)
- [Commits](https://github.com/revelc/formatter-maven-plugin/compare/formatter-maven-plugin-2.0.0...formatter-maven-plugin-2.12.1)
Signed-off-by: dependabot-preview[bot] <support@dependabot.com>
Co-authored-by: dependabot-preview[bot] <27856297+dependabot-preview[bot]@users.noreply.github.com>
* Ag fix9060 (#519)
* Development updates (#9053)
* RL4J: Add generic update rule (#502)
Signed-off-by: Alexandre Boulanger <aboulang2002@yahoo.com>
* Shyrma reduce (#481)
* - start working on improving of cpu legacy code for reduce ops
Signed-off-by: Yurii <iuriish@yahoo.com>
* - further work on improving legacy loops
Signed-off-by: Yurii <iuriish@yahoo.com>
* - still working on improving reduce ops
Signed-off-by: Yurii <iuriish@yahoo.com>
* - further work on improving reduce ops
Signed-off-by: Yurii <iuriish@yahoo.com>
* - testing speed run of new reduce op
Signed-off-by: Yurii <iuriish@yahoo.com>
* - working on improvement of default loop for reduce op
Signed-off-by: Yurii <iuriish@yahoo.com>
* - update signatures of stuff which calls reduce ops
Signed-off-by: Yurii <iuriish@yahoo.com>
* - make corrections in cuda reduce kernels
Signed-off-by: Yurii <iuriish@yahoo.com>
* - change loop for default case in broadcast legacy ops
Signed-off-by: Yurii <iuriish@yahoo.com>
* - comment some shape stuff
Signed-off-by: Yurii <iuriish@yahoo.com>
* - comment unnecessary prints in RNGtests
Signed-off-by: Yurii <iuriish@yahoo.com>
* - finish to resolve conflicts after master has been merged
Signed-off-by: Yurii <iuriish@yahoo.com>
* - get rid of some compilation mistakes of cuda stuff
Signed-off-by: Yurii <iuriish@yahoo.com>
* - minor changes
Signed-off-by: Yurii <iuriish@yahoo.com>
* - further search for bug causing crash on java test
Signed-off-by: Yurii <iuriish@yahoo.com>
* - add scalar case in reduce_ ... exec stuff
Signed-off-by: Yurii <iuriish@yahoo.com>
* - minor corrections in NAtiveOps.cu
Signed-off-by: Yurii <iuriish@yahoo.com>
* - add switch to scalar case execReduceXD functions
Signed-off-by: Yurii <iuriish@yahoo.com>
* - add support for vectors old shape in ConstantShapeHelper::createShapeInfoWithNoUnitiesForReduce
Signed-off-by: Yurii <iuriish@yahoo.com>
* - correct cuda mirrorPad
Signed-off-by: Yurii <iuriish@yahoo.com>
* - add support for vectors old shape in cuda createShapeInfoWithNoUnitiesForReduce
Signed-off-by: Yurii <iuriish@yahoo.com>
Co-authored-by: raver119 <raver119@gmail.com>
* Add support for CUDA 11.0 (#492)
* Add support for CUDA 11.0
* libnd4j tweaks for CUDA 11
Signed-off-by: raver119@gmail.com <raver119@gmail.com>
* bindings update, again?
Signed-off-by: raver119@gmail.com <raver119@gmail.com>
* * Update versions of JavaCPP Presets for FFmpeg, OpenBLAS, and NumPy
* update API to match CUDA 8
Signed-off-by: raver119@gmail.com <raver119@gmail.com>
* * Update version of JavaCPP Presets for CPython
* C++ updated for cuDNN 8.0
Signed-off-by: raver119@gmail.com <raver119@gmail.com>
* one more test
Signed-off-by: raver119@gmail.com <raver119@gmail.com>
* one more test
Signed-off-by: raver119@gmail.com <raver119@gmail.com>
* one more test
Signed-off-by: raver119@gmail.com <raver119@gmail.com>
* 128-bit alignment for workspaces
Signed-off-by: raver119@gmail.com <raver119@gmail.com>
* change seed in 1 test
Signed-off-by: raver119@gmail.com <raver119@gmail.com>
* Fix dependecy duplication in python4j-parent pom
* Fix group id for in python4j-numpy
* few tests tweaked
Signed-off-by: raver119@gmail.com <raver119@gmail.com>
* Remove macosx-x86_64-gpu from nd4j-tests-tensorflow
* few minor tweaks for IndexReduce
Signed-off-by: raver119@gmail.com <raver119@gmail.com>
* one test removed
Signed-off-by: raver119@gmail.com <raver119@gmail.com>
Co-authored-by: raver119@gmail.com <raver119@gmail.com>
Co-authored-by: Serhii Shepel <9946053+sshepel@users.noreply.github.com>
* RL4J: Add SyncTrainer and AgentLearnerBuilder for a few algorithms (#504)
Signed-off-by: Alexandre Boulanger <aboulang2002@yahoo.com>
Co-authored-by: Alexandre Boulanger <44292157+aboulang2002@users.noreply.github.com>
Co-authored-by: Yurii Shyrma <iuriish@yahoo.com>
Co-authored-by: raver119 <raver119@gmail.com>
Co-authored-by: Serhii Shepel <9946053+sshepel@users.noreply.github.com>
* Removed dead code (#9057)
Signed-off-by: Dariusz Zbyrad <dariusz.zbyrad@gmail.com>
* performance improvement (#9055)
* performance improvement
Signed-off-by: Dariusz Zbyrad <dariusz.zbyrad@gmail.com>
* revert some changes
Signed-off-by: Dariusz Zbyrad <dariusz.zbyrad@gmail.com>
* Development updates (#9064)
* Update versions of JavaCPP Presets for OpenCV, FFmpeg, and MKL
Signed-off-by: Samuel Audet <samuel.audet@gmail.com>
* Added support for the archunit (#9062)
* Added support for the archunit
Signed-off-by: Dariusz Zbyrad <dariusz.zbyrad@gmail.com>
* Updated pom files
Signed-off-by: Dariusz Zbyrad <dariusz.zbyrad@gmail.com>
* Datavec code cleaup (#9071)
* removed unnecessary semicolons
Signed-off-by: Dariusz Zbyrad <dariusz.zbyrad@gmail.com>
* Use standard charset object
Signed-off-by: Dariusz Zbyrad <dariusz.zbyrad@gmail.com>
* Removed unused imports
Signed-off-by: Dariusz Zbyrad <dariusz.zbyrad@gmail.com>
* WIP: Fix Conv1d causal case
* Add inital tests
* Update Conv1d tests to be a bit more robust
* Remove redundant test
* Reset from master
* Remove cuda definition (left over)
* Update rl4j again
* Update pom.xml
Co-authored-by: Samuel Audet <samuel.audet@gmail.com>
Co-authored-by: Alexandre Boulanger <44292157+aboulang2002@users.noreply.github.com>
Co-authored-by: Yurii Shyrma <iuriish@yahoo.com>
Co-authored-by: raver119 <raver119@gmail.com>
Co-authored-by: Serhii Shepel <9946053+sshepel@users.noreply.github.com>
Co-authored-by: dariuszzbyrad <dariusz.zbyrad@gmail.com>
* Fixes 9061 (#521)
* Get rid of edge case in validation
* Added support for the archunit (#9062)
* Added support for the archunit
Signed-off-by: Dariusz Zbyrad <dariusz.zbyrad@gmail.com>
* Updated pom files
Signed-off-by: Dariusz Zbyrad <dariusz.zbyrad@gmail.com>
* Using embedded copying of an array instead of manual (#9073)
Signed-off-by: Dariusz Zbyrad <dariusz.zbyrad@gmail.com>
* Datavec bulk operation (#9075)
* Bulk operation can be used instead of iteration inspection
Signed-off-by: Dariusz Zbyrad <dariusz.zbyrad@gmail.com>
* Redundant 'Collection.addAll()' call inspection
Signed-off-by: Dariusz Zbyrad <dariusz.zbyrad@gmail.com>
* Removed infinitely loop (#9076)
Signed-off-by: Dariusz Zbyrad <dariusz.zbyrad@gmail.com>
Co-authored-by: dariuszzbyrad <dariusz.zbyrad@gmail.com>
* RL4J: Add async training and advantage actor-critic (#507)
* Added async training & Advantage Actor Critic
Signed-off-by: Alexandre Boulanger <aboulang2002@yahoo.com>
* Fix compiler error
Signed-off-by: Samuel Audet <samuel.audet@gmail.com>
* Renamed ActorCriticPolicy back to ACPolicy
Signed-off-by: Alexandre Boulanger <aboulang2002@yahoo.com>
Co-authored-by: Samuel Audet <samuel.audet@gmail.com>
(cherry picked from commit 72f5c18c830f62df2c04fbf8dc7b1353cc2d3182)
* RL4J: Add async training and advantage actor-critic (#507)
* Added async training & Advantage Actor Critic
Signed-off-by: Alexandre Boulanger <aboulang2002@yahoo.com>
* Fix compiler error
Signed-off-by: Samuel Audet <samuel.audet@gmail.com>
* Renamed ActorCriticPolicy back to ACPolicy
Signed-off-by: Alexandre Boulanger <aboulang2002@yahoo.com>
Co-authored-by: Samuel Audet <samuel.audet@gmail.com>
(cherry picked from commit 72f5c18c830f62df2c04fbf8dc7b1353cc2d3182)
* Revert rl4j to 72f5c18c830f62df2c04fbf8dc7b1353cc2d3182
* Delete jnind4jaurora.cpp
Co-authored-by: Alexandre Boulanger <44292157+aboulang2002@users.noreply.github.com>
Co-authored-by: Yurii Shyrma <iuriish@yahoo.com>
Co-authored-by: raver119 <raver119@gmail.com>
Co-authored-by: Samuel Audet <samuel.audet@gmail.com>
Co-authored-by: Serhii Shepel <9946053+sshepel@users.noreply.github.com>
Co-authored-by: dariuszzbyrad <dariusz.zbyrad@gmail.com>
Co-authored-by: dependabot-preview[bot] <27856297+dependabot-preview[bot]@users.noreply.github.com>
* RL4J: Add partial support for RNN (#514)
* Added partial recurrent support
Signed-off-by: Alexandre Boulanger <aboulang2002@yahoo.com>
* Made sure the RNN always see the observation in EpsGreedy
Signed-off-by: Alexandre Boulanger <aboulang2002@yahoo.com>
* Converted all line endings of rl4j-core to LF (#530)
Signed-off-by: Alexandre Boulanger <aboulang2002@yahoo.com>
* NDJ4: Bundle configuration files required by AOT compilation with GraalVM (#529)
* NDJ4: Bundle configuration files required by AOT compilation with GraalVM
* Update dependencies to just released JavaCPP and JavaCV 1.5.4
* Ag fixtests 831 (#523)
* Update UnderSamplingPreProcessorTest.java
* Development updates (#9053)
* RL4J: Add generic update rule (#502)
Signed-off-by: Alexandre Boulanger <aboulang2002@yahoo.com>
* Shyrma reduce (#481)
* - start working on improving of cpu legacy code for reduce ops
Signed-off-by: Yurii <iuriish@yahoo.com>
* - further work on improving legacy loops
Signed-off-by: Yurii <iuriish@yahoo.com>
* - still working on improving reduce ops
Signed-off-by: Yurii <iuriish@yahoo.com>
* - further work on improving reduce ops
Signed-off-by: Yurii <iuriish@yahoo.com>
* - testing speed run of new reduce op
Signed-off-by: Yurii <iuriish@yahoo.com>
* - working on improvement of default loop for reduce op
Signed-off-by: Yurii <iuriish@yahoo.com>
* - update signatures of stuff which calls reduce ops
Signed-off-by: Yurii <iuriish@yahoo.com>
* - make corrections in cuda reduce kernels
Signed-off-by: Yurii <iuriish@yahoo.com>
* - change loop for default case in broadcast legacy ops
Signed-off-by: Yurii <iuriish@yahoo.com>
* - comment some shape stuff
Signed-off-by: Yurii <iuriish@yahoo.com>
* - comment unnecessary prints in RNGtests
Signed-off-by: Yurii <iuriish@yahoo.com>
* - finish to resolve conflicts after master has been merged
Signed-off-by: Yurii <iuriish@yahoo.com>
* - get rid of some compilation mistakes of cuda stuff
Signed-off-by: Yurii <iuriish@yahoo.com>
* - minor changes
Signed-off-by: Yurii <iuriish@yahoo.com>
* - further search for bug causing crash on java test
Signed-off-by: Yurii <iuriish@yahoo.com>
* - add scalar case in reduce_ ... exec stuff
Signed-off-by: Yurii <iuriish@yahoo.com>
* - minor corrections in NAtiveOps.cu
Signed-off-by: Yurii <iuriish@yahoo.com>
* - add switch to scalar case execReduceXD functions
Signed-off-by: Yurii <iuriish@yahoo.com>
* - add support for vectors old shape in ConstantShapeHelper::createShapeInfoWithNoUnitiesForReduce
Signed-off-by: Yurii <iuriish@yahoo.com>
* - correct cuda mirrorPad
Signed-off-by: Yurii <iuriish@yahoo.com>
* - add support for vectors old shape in cuda createShapeInfoWithNoUnitiesForReduce
Signed-off-by: Yurii <iuriish@yahoo.com>
Co-authored-by: raver119 <raver119@gmail.com>
* Add support for CUDA 11.0 (#492)
* Add support for CUDA 11.0
* libnd4j tweaks for CUDA 11
Signed-off-by: raver119@gmail.com <raver119@gmail.com>
* bindings update, again?
Signed-off-by: raver119@gmail.com <raver119@gmail.com>
* * Update versions of JavaCPP Presets for FFmpeg, OpenBLAS, and NumPy
* update API to match CUDA 8
Signed-off-by: raver119@gmail.com <raver119@gmail.com>
* * Update version of JavaCPP Presets for CPython
* C++ updated for cuDNN 8.0
Signed-off-by: raver119@gmail.com <raver119@gmail.com>
* one more test
Signed-off-by: raver119@gmail.com <raver119@gmail.com>
* one more test
Signed-off-by: raver119@gmail.com <raver119@gmail.com>
* one more test
Signed-off-by: raver119@gmail.com <raver119@gmail.com>
* 128-bit alignment for workspaces
Signed-off-by: raver119@gmail.com <raver119@gmail.com>
* change seed in 1 test
Signed-off-by: raver119@gmail.com <raver119@gmail.com>
* Fix dependecy duplication in python4j-parent pom
* Fix group id for in python4j-numpy
* few tests tweaked
Signed-off-by: raver119@gmail.com <raver119@gmail.com>
* Remove macosx-x86_64-gpu from nd4j-tests-tensorflow
* few minor tweaks for IndexReduce
Signed-off-by: raver119@gmail.com <raver119@gmail.com>
* one test removed
Signed-off-by: raver119@gmail.com <raver119@gmail.com>
Co-authored-by: raver119@gmail.com <raver119@gmail.com>
Co-authored-by: Serhii Shepel <9946053+sshepel@users.noreply.github.com>
* RL4J: Add SyncTrainer and AgentLearnerBuilder for a few algorithms (#504)
Signed-off-by: Alexandre Boulanger <aboulang2002@yahoo.com>
Co-authored-by: Alexandre Boulanger <44292157+aboulang2002@users.noreply.github.com>
Co-authored-by: Yurii Shyrma <iuriish@yahoo.com>
Co-authored-by: raver119 <raver119@gmail.com>
Co-authored-by: Serhii Shepel <9946053+sshepel@users.noreply.github.com>
* Development updates (#9064)
* Update versions of JavaCPP Presets for OpenCV, FFmpeg, and MKL
Signed-off-by: Samuel Audet <samuel.audet@gmail.com>
* Add proper annotation
* Fix classcast exception for recurrent model import case
* Update keras import to allow for proper handling of changing NCHW -> NHWC mid later
* Add output to test to ensure proper activation
* Fixes computation graphs to allow dimension ordering to change mid graph
* Add NHWC support for keras import.
* Update tests to pass /ignore out of date ones
* Add multi RNNDataformat support
* Update tests to make more pass.
Updates some tests to be correct, double checked existing models and updated reasons they may or may not fail.
* Add back old default values to ensure legacy serialization works. Replace null value default with sentinel value for default value overridden.
* Update layers to preserve changed values
* Exclude default value over ridden from comparison
* Fix conv1d import (no permute weights anymore)
* Update KerasConvolution1D.java
Co-authored-by: Samuel Audet <samuel.audet@gmail.com>
Co-authored-by: Alexandre Boulanger <44292157+aboulang2002@users.noreply.github.com>
Co-authored-by: Yurii Shyrma <iuriish@yahoo.com>
Co-authored-by: raver119 <raver119@gmail.com>
Co-authored-by: Serhii Shepel <9946053+sshepel@users.noreply.github.com>
* GPU compute capability (#532)
* - GPU cpu capability flags
- CUDA MAJOR VERSION provided by cmake
Signed-off-by: AbdelRauf <rauf@konduit.ai>
* Readme
Signed-off-by: AbdelRauf <rauf@konduit.ai>
* Readme
Signed-off-by: AbdelRauf <rauf@konduit.ai>
* RL4J: Add new network implementation to help support recurrent networks (#531)
Signed-off-by: Alexandre Boulanger <aboulang2002@yahoo.com>
Co-authored-by: Alexandre Boulanger <44292157+aboulang2002@users.noreply.github.com>
Co-authored-by: Yurii Shyrma <iuriish@yahoo.com>
Co-authored-by: raver119 <raver119@gmail.com>
Co-authored-by: Samuel Audet <samuel.audet@gmail.com>
Co-authored-by: Serhii Shepel <9946053+sshepel@users.noreply.github.com>
Co-authored-by: dariuszzbyrad <dariusz.zbyrad@gmail.com>
Co-authored-by: dependabot-preview[bot] <27856297+dependabot-preview[bot]@users.noreply.github.com>
Co-authored-by: Abdelrauf <qwr@live.ru>
2020-09-23 19:11:29 +09:00
|
|
|
# list to spaces
|
2022-09-20 15:40:53 +02:00
|
|
|
#string (REPLACE ";" " " CUDA_ARCH_FLAGS "${CUDA_ARCH_FLAGS}")
|
|
|
|
|
|
|
|
#set(CMAKE_CUDA_FLAGS " ${CMAKE_CUDA_FLAGS} -DCUDA_VERSION_MAJOR=${CUDA_VERSION_MAJOR} ${EXPM} -w --cudart=static --expt-extended-lambda -Xfatbin -compress-all ")
|
|
|
|
set(CMAKE_CUDA_ARCHITECTURES OFF)
|
|
|
|
#set(CMAKE_CUDA_FLAGS "${CMAKE_CUDA_FLAGS} --generate-code \"arch=compute_53,code=[compute_53,sm_53]\" " )
|
|
|
|
set(CMAKE_CUDA_FLAGS "${CMAKE_CUDA_FLAGS} --generate-code \"arch=compute_61,code=[compute_61,sm_61]\" " )
|
|
|
|
#set(CMAKE_CUDA_FLAGS "${CMAKE_CUDA_FLAGS} --generate-code \"arch=compute_75,code=[compute_75,sm_75]\" " )
|
|
|
|
set(CMAKE_CUDA_FLAGS "${CMAKE_CUDA_FLAGS} --extended-lambda ")
|
|
|
|
set(CMAKE_CUDA_FLAGS "${CMAKE_CUDA_FLAGS} --expt-relaxed-constexpr ")
|
|
|
|
set(CMAKE_CUDA_FLAGS "${CMAKE_CUDA_FLAGS} -DCUDA_VERSION_MAJOR=11 -w --cudart=static -Xfatbin -compress-all")
|
|
|
|
if(WIN32)
|
|
|
|
set(CMAKE_CUDA_FLAGS "${CMAKE_CUDA_FLAGS} -Xcompiler=/EHsc")
|
|
|
|
endif()
|
|
|
|
#set(GPU_ARCH)
|
|
|
|
|
|
|
|
message("CMAKE_CUDA_FLAGS = ${CMAKE_CUDA_FLAGS}")
|
|
|
|
message("CMAKE_CXX_FLAGS = ${CMAKE_CXX_FLAGS}")
|
|
|
|
message("CMAKE_CUDA_FLAGS_RELEASE = ${CMAKE_CUDA_FLAGS_RELEASE}")
|
|
|
|
message("CMAKE_CXX_FLAGS_RELEASE = ${CMAKE_CXX_FLAGS_RELEASE}")
|
|
|
|
message("CMAKE_CUDA_EXTENSIONS = ${CMAKE_CUDA_EXTENSIONS}")
|
|
|
|
message("CUDA_NVCC_FLAGS = ${CUDA_NVCC_FLAGS}")
|
|
|
|
message("CUDA_PROPAGATE_HOST_FLAGS = ${CUDA_PROPAGATE_HOST_FLAGS}")
|
|
|
|
message("CUDA_ARCH_FLAGS = ${CUDA_ARCH_FLAGS}")
|
|
|
|
|
|
|
|
file(GLOB_RECURSE PERF_SOURCES false performance/*.cpp performance/*.h)
|
|
|
|
file(GLOB_RECURSE EXCEPTIONS_SOURCES false exceptions/*.cpp exceptions/*.h)
|
|
|
|
file(GLOB_RECURSE EXEC_SOURCES false execution/impl/*.cpp execution/*.cu execution/*.h)
|
|
|
|
file(GLOB_RECURSE TYPES_SOURCES false types/*.cpp types/*.h)
|
|
|
|
file(GLOB_RECURSE ARRAY_SOURCES false array/impl/*.cpp array/cuda/*.cu array/*.h)
|
|
|
|
file(GLOB_RECURSE MEMORY_SOURCES false memory/impl/*.cpp memory/cuda/*.cu memory/*.h)
|
|
|
|
file(GLOB_RECURSE GRAPH_SOURCES false graph/*.cpp graph/*.cu graph/*.h)
|
|
|
|
file(GLOB_RECURSE CUSTOMOPS_SOURCES false ops/declarable/generic/*.cpp)
|
|
|
|
file(GLOB_RECURSE CUSTOMOPS_HELPERS_SOURCES false ops/declarable/helpers/cuda/*.cu ops/declarable/helpers/impl/*.cpp)
|
|
|
|
file(GLOB_RECURSE OPS_SOURCES false ops/impl/*.cpp ops/declarable/impl/*.cpp ops/*.h)
|
|
|
|
file(GLOB_RECURSE HELPERS_SOURCES false build_info.cu helpers/impl/*.cpp helpers/*.cu helpers/*.cupp helpers/*.h)
|
|
|
|
file(GLOB_RECURSE INDEXING_SOURCES false indexing/*.cpp indexing/*.h)
|
|
|
|
file(GLOB_RECURSE LOOPS_SOURCES false ./loops/impl/*.cpp ./loops/*.h)
|
|
|
|
file(GLOB_RECURSE LEGACY_SOURCES false legacy/impl/*.cpp legacy/*.cu legacy/*.h)
|
|
|
|
file(GLOB_RECURSE LOOPS_SOURCES_CUDA false loops/cuda/*.cu)
|
|
|
|
|
|
|
|
|
|
|
|
file(GLOB_RECURSE COMPILATION_UNITS false loops/cuda/compilation_units/*.cu.in
|
|
|
|
ops/impl/compilation_units/*.cpp.in)
|
2020-06-01 12:47:21 +04:00
|
|
|
|
2021-02-09 07:44:23 +09:00
|
|
|
foreach(FL_ITEM ${COMPILATION_UNITS})
|
2020-06-01 12:47:21 +04:00
|
|
|
genCompilation(FL_ITEM)
|
2021-02-09 07:44:23 +09:00
|
|
|
endforeach()
|
2020-06-01 12:47:21 +04:00
|
|
|
|
2020-01-20 21:32:46 +03:00
|
|
|
if (HAVE_CUDNN)
|
|
|
|
message("cuDNN included")
|
2022-09-20 15:40:53 +02:00
|
|
|
file(GLOB_RECURSE CUSTOMOPS_CUDNN_SOURCES false ops/declarable/platform/cudnn/*.cu)
|
|
|
|
else()
|
|
|
|
message("cuDNN not included")
|
2020-01-20 21:32:46 +03:00
|
|
|
endif()
|
|
|
|
|
2021-02-09 07:44:23 +09:00
|
|
|
add_library(samediff_obj OBJECT ${LOOPS_SOURCES_CUDA} ${LEGACY_SOURCES}
|
2019-06-06 15:21:15 +03:00
|
|
|
${CUSTOMOPS_HELPERS_SOURCES} ${HELPERS_SOURCES} ${EXEC_SOURCES}
|
2020-03-02 12:49:41 +03:00
|
|
|
${LOOPS_SOURCES} ${ARRAY_SOURCES} ${TYPES_SOURCES}
|
2020-06-01 12:47:21 +04:00
|
|
|
${MEMORY_SOURCES} ${GRAPH_SOURCES} ${CUSTOMOPS_SOURCES} ${INDEXING_SOURCES} ${EXCEPTIONS_SOURCES} ${OPS_SOURCES} ${PERF_SOURCES} ${CUSTOMOPS_CUDNN_SOURCES} ${CUSTOMOPS_MKLDNN_SOURCES}
|
2022-09-20 15:40:53 +02:00
|
|
|
${CUSTOMOPS_ARMCOMPUTE_SOURCES} ${CUSTOMOPS_GENERIC_SOURCES})
|
|
|
|
|
|
|
|
include_directories(samediff_obj PUBLIC .)
|
2019-06-06 15:21:15 +03:00
|
|
|
|
2020-01-20 21:32:46 +03:00
|
|
|
if (WIN32)
|
|
|
|
message("MSVC runtime for library: ${MSVC_RT_LIB}")
|
|
|
|
endif()
|
2019-12-14 14:38:17 +03:00
|
|
|
|
2020-05-12 07:48:29 +03:00
|
|
|
# build shared library by default or when it's explicitly requested
|
|
|
|
if(NOT SD_STATIC_LIB OR SD_SHARED_LIB)
|
|
|
|
add_library(${SD_LIBRARY_NAME} SHARED $<TARGET_OBJECTS:samediff_obj>)
|
|
|
|
endif()
|
|
|
|
|
|
|
|
if (SD_STATIC_LIB AND SD_SHARED_LIB)
|
|
|
|
# if both static and shared library are going to be built - static library will have special suffix
|
|
|
|
add_library(${SD_LIBRARY_NAME}static STATIC $<TARGET_OBJECTS:samediff_obj>)
|
2020-03-02 12:49:41 +03:00
|
|
|
set_property(TARGET ${SD_LIBRARY_NAME}static PROPERTY MSVC_RUNTIME_LIBRARY "${MSVC_RT_LIB}$<$<CONFIG:Debug>:Debug>")
|
2020-03-19 14:53:21 +09:00
|
|
|
install(TARGETS ${SD_LIBRARY_NAME}static DESTINATION .)
|
2020-05-12 07:48:29 +03:00
|
|
|
elseif(SD_STATIC_LIB)
|
|
|
|
# if we only build static library - use this name
|
|
|
|
add_library(${SD_LIBRARY_NAME} STATIC $<TARGET_OBJECTS:samediff_obj>)
|
|
|
|
set_property(TARGET ${SD_LIBRARY_NAME} PROPERTY MSVC_RUNTIME_LIBRARY "${MSVC_RT_LIB}$<$<CONFIG:Debug>:Debug>")
|
|
|
|
install(TARGETS ${SD_LIBRARY_NAME} DESTINATION .)
|
2019-12-14 14:38:17 +03:00
|
|
|
endif()
|
|
|
|
|
|
|
|
# on windows we want to make sure we use MT or MD, but since we use it in one lib, we must use it everywhere to avoid conflicts
|
2020-05-12 07:48:29 +03:00
|
|
|
set_property(TARGET samediff_obj PROPERTY MSVC_RUNTIME_LIBRARY "${MSVC_RT_LIB}$<$<CONFIG:Debug>:Debug>")
|
2020-03-02 12:49:41 +03:00
|
|
|
set_property(TARGET ${SD_LIBRARY_NAME} PROPERTY MSVC_RUNTIME_LIBRARY "${MSVC_RT_LIB}$<$<CONFIG:Debug>:Debug>")
|
2019-12-14 14:38:17 +03:00
|
|
|
|
2022-09-20 15:40:53 +02:00
|
|
|
# Done by nvcc as default on windows
|
2019-06-06 15:21:15 +03:00
|
|
|
if(WIN32)
|
|
|
|
message("CUDA on Windows: enabling /EHsc")
|
2022-09-20 15:40:53 +02:00
|
|
|
SET(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} /EHsc /bigobj")
|
2019-06-06 15:21:15 +03:00
|
|
|
endif()
|
|
|
|
|
2022-09-20 15:40:53 +02:00
|
|
|
#target_link_libraries(${SD_LIBRARY_NAME} ${CUDA_LIBRARIES} ${CUDA_CUBLAS_LIBRARIES} ${CUDA_cusolver_LIBRARY} ${CUDNN} ${MKLDNN})
|
|
|
|
target_link_libraries(${SD_LIBRARY_NAME} CUDA::cudart CUDA::cublas CUDA::cusolver ${CUDNN} ${MKLDNN})
|
2019-06-06 15:21:15 +03:00
|
|
|
|
2022-09-20 15:40:53 +02:00
|
|
|
set(CMAKE_LIBRARY_OUTPUT_DIRECTORY ${PROJECT_BINARY_DIR}/cuda/${SD_EXTENSION})
|
2021-02-09 07:44:23 +09:00
|
|
|
install(TARGETS ${SD_LIBRARY_NAME} DESTINATION .)
|
2022-09-20 15:40:53 +02:00
|
|
|
endif(CUDAToolkit_FOUND)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
2020-03-02 12:49:41 +03:00
|
|
|
elseif(SD_CPU)
|
2019-06-06 15:21:15 +03:00
|
|
|
|
2020-03-02 12:49:41 +03:00
|
|
|
if ("${SD_EXPERIMENTAL}" STREQUAL "yes")
|
2019-06-06 15:21:15 +03:00
|
|
|
message("Experimental mode ENABLED")
|
|
|
|
set (CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -D__ND4J_EXPERIMENTAL__=true")
|
|
|
|
set (CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -D__ND4J_EXPERIMENTAL__=true")
|
|
|
|
endif()
|
|
|
|
|
2022-09-20 15:40:53 +02:00
|
|
|
file(GLOB_RECURSE PERF_SOURCES false performance/*.cpp performance/*.h)
|
|
|
|
file(GLOB_RECURSE EXCEPTIONS_SOURCES false exceptions/*.cpp exceptions/*.h)
|
|
|
|
file(GLOB_RECURSE EXEC_SOURCES false execution/*.cpp execution/*.h)
|
|
|
|
file(GLOB_RECURSE TYPES_SOURCES false types/*.cpp types/*.h)
|
|
|
|
file(GLOB_RECURSE ARRAY_SOURCES false array/*.cpp array/*.h)
|
|
|
|
file(GLOB_RECURSE MEMORY_SOURCES false memory/*.cpp memory/*.h)
|
|
|
|
file(GLOB_RECURSE GRAPH_SOURCES false graph/*.cpp graph/*.h)
|
|
|
|
file(GLOB_RECURSE CUSTOMOPS_SOURCES false ops/declarable/generic/*.cpp)
|
|
|
|
file(GLOB_RECURSE CUSTOMOPS_GENERIC_SOURCES false ops/declarable/helpers/cpu/*.cpp ops/declarable/helpers/impl/*.cpp)
|
|
|
|
file(GLOB_RECURSE OPS_SOURCES false ops/impl/*.cpp ops/declarable/impl/*.cpp ops/*.h)
|
|
|
|
file(GLOB_RECURSE INDEXING_SOURCES false indexing/*.cpp indexing/*.h)
|
|
|
|
file(GLOB_RECURSE HELPERS_SOURCES false build_info.cpp helpers/*.cpp helpers/*.h)
|
|
|
|
file(GLOB_RECURSE LEGACY_SOURCES false legacy/impl/*.cpp legacy/cpu/*.cpp ./legacy/*.h system/*.h)
|
|
|
|
file(GLOB_RECURSE LOOPS_SOURCES false loops/*.cpp loops/*.h)
|
|
|
|
|
|
|
|
|
|
|
|
file(GLOB_RECURSE COMPILATION_UNITS false ops/declarable/helpers/cpu/compilation_units/*.cpp.in
|
|
|
|
loops/cpu/compilation_units/*.cpp.in helpers/cpu/loops/*.cpp.in
|
|
|
|
ops/impl/compilation_units/*.cpp.in)
|
2020-06-01 12:47:21 +04:00
|
|
|
|
2021-02-09 07:44:23 +09:00
|
|
|
foreach(FL_ITEM ${COMPILATION_UNITS})
|
2020-06-01 12:47:21 +04:00
|
|
|
genCompilation(FL_ITEM)
|
2021-02-09 07:44:23 +09:00
|
|
|
endforeach()
|
2020-05-14 14:41:55 +04:00
|
|
|
|
2020-03-02 12:49:41 +03:00
|
|
|
if (SD_X86_BUILD)
|
2019-11-13 17:15:18 +03:00
|
|
|
# we disable platform optimizations for certains files for linux/macos
|
2019-09-11 21:50:28 +03:00
|
|
|
set_source_files_properties(cpu/NativeOps.cpp PROPERTIES COMPILE_FLAGS "-march=x86-64 -mtune=generic")
|
|
|
|
set_source_files_properties(../include/helpers/impl/OpTracker.cpp PROPERTIES COMPILE_FLAGS "-march=x86-64 -mtune=generic")
|
|
|
|
endif()
|
|
|
|
|
2020-05-14 14:41:55 +04:00
|
|
|
|
|
|
|
|
2020-03-02 12:49:41 +03:00
|
|
|
if(SD_CHECK_VECTORIZATION)
|
2021-02-09 07:44:23 +09:00
|
|
|
set(VECT_FILES cpu/NativeOps.cpp ${OPS_SOURCES} ${HELPERS_SOURCES} ${CUSTOMOPS_GENERIC_SOURCES} ${LOOPS_SOURCES})
|
|
|
|
if("${CMAKE_CXX_COMPILER_ID}" STREQUAL "GNU")
|
|
|
|
|
|
|
|
if (CMAKE_COMPILER_IS_GNUCC AND CMAKE_CXX_COMPILER_VERSION VERSION_GREATER 9.0)
|
|
|
|
set(CHECK_VECT_FLAGS "-ftree-vectorize -fsave-optimization-record")
|
|
|
|
#to process fsave-optimization-record we will need our cython version code
|
|
|
|
message("Build Auto vectorization helpers")
|
|
|
|
execute_process(COMMAND "python3" "${CMAKE_CURRENT_SOURCE_DIR}/../auto_vectorization/cython_setup.py" "build_ext" "--inplace" WORKING_DIRECTORY "${CMAKE_CURRENT_SOURCE_DIR}/../auto_vectorization/" RESULT_VARIABLE ret)
|
|
|
|
message("build='${ret}'")
|
|
|
|
|
|
|
|
#remove fail cases that gcc fails produce sometimes
|
2022-09-20 15:40:53 +02:00
|
|
|
file(GLOB_RECURSE FAILURE_CASES false loops/cpu/compilation_units/reduce3*.cpp)
|
2021-02-09 07:44:23 +09:00
|
|
|
#message("*****${FAILURE_CASES}")
|
|
|
|
foreach(FL_ITEM ${FAILURE_CASES})
|
|
|
|
message("Removing failure cases ${FL_ITEM}")
|
|
|
|
list(REMOVE_ITEM VECT_FILES ${FL_ITEM})
|
|
|
|
endforeach()
|
|
|
|
else()
|
|
|
|
set(CHECK_VECT_FLAGS "-ftree-vectorize -fopt-info-vec-optimized-missed")
|
|
|
|
endif()
|
|
|
|
message("CHECK VECTORIZATION ${CHECK_VECT_FLAGS}")
|
|
|
|
set_source_files_properties( ${VECT_FILES} PROPERTIES COMPILE_FLAGS "${CHECK_VECT_FLAGS}" )
|
2020-01-28 20:00:12 +04:00
|
|
|
endif()
|
2021-02-09 07:44:23 +09:00
|
|
|
endif()
|
2020-01-28 20:00:12 +04:00
|
|
|
|
2022-09-20 15:40:53 +02:00
|
|
|
message("Build native CPU BLAS")
|
2019-06-06 15:21:15 +03:00
|
|
|
add_definitions(-D__CPUBLAS__=true)
|
2021-03-05 10:59:02 +09:00
|
|
|
|
2020-05-12 07:48:29 +03:00
|
|
|
add_library(samediff_obj OBJECT ${LEGACY_SOURCES}
|
2020-03-02 12:49:41 +03:00
|
|
|
${LOOPS_SOURCES} ${HELPERS_SOURCES} ${EXEC_SOURCES} ${ARRAY_SOURCES} ${TYPES_SOURCES}
|
2021-02-09 07:44:23 +09:00
|
|
|
${MEMORY_SOURCES} ${GRAPH_SOURCES} ${CUSTOMOPS_SOURCES} ${EXCEPTIONS_SOURCES} ${INDEXING_SOURCES} ${CUSTOMOPS_MKLDNN_SOURCES}
|
2020-06-26 11:03:46 +04:00
|
|
|
${CUSTOMOPS_ARMCOMPUTE_SOURCES} ${CUSTOMOPS_GENERIC_SOURCES} ${OPS_SOURCES} ${PERF_SOURCES})
|
2022-09-20 15:40:53 +02:00
|
|
|
#target_include_directories(samediff_obj PUBLIC ${CMAKE_CURRENT_SOURCE_DIR})
|
|
|
|
target_include_directories(samediff_obj PUBLIC ./)
|
2021-03-05 10:59:02 +09:00
|
|
|
|
2019-06-06 15:21:15 +03:00
|
|
|
if(IOS)
|
2022-09-20 15:40:53 +02:00
|
|
|
message("Building static library for IOS ${SD_LIBRARY_NAME}")
|
2020-05-12 07:48:29 +03:00
|
|
|
add_library(${SD_LIBRARY_NAME} STATIC $<TARGET_OBJECTS:samediff_obj>)
|
2019-06-06 15:21:15 +03:00
|
|
|
else()
|
2020-05-12 07:48:29 +03:00
|
|
|
# build shared library by default or when it's explicitly requested
|
|
|
|
if(NOT SD_STATIC_LIB OR SD_SHARED_LIB)
|
2022-09-20 15:40:53 +02:00
|
|
|
message("Building a shared library for ${SD_LIBRARY_NAME}")
|
2020-05-12 07:48:29 +03:00
|
|
|
add_library(${SD_LIBRARY_NAME} SHARED $<TARGET_OBJECTS:samediff_obj>)
|
2022-09-20 15:40:53 +02:00
|
|
|
|
|
|
|
#set_target_properties(${SD_LIBRARY_NAME} PROPERTIES IMPORT_SUFFIX ".lib")
|
|
|
|
#target_link_libraries(${SD_LIBRARY_NAME} $<TARGET_OBJECTS:samediff_obj>)
|
|
|
|
|
2021-03-05 10:59:02 +09:00
|
|
|
if(ANDROID)
|
|
|
|
# See: https://www.scivision.dev/cmake-ninja-job-pool-limited-memory/
|
|
|
|
# See: https://cmake.org/cmake/help/v3.0/command/cmake_host_system_information.html
|
|
|
|
# See: https://cmake.org/cmake/help/latest/prop_gbl/JOB_POOLS.html
|
|
|
|
cmake_host_system_information(RESULT _logical_cores QUERY NUMBER_OF_LOGICAL_CORES)
|
|
|
|
if(_logical_cores LESS 4)
|
|
|
|
set_target_properties(${SD_LIBRARY_NAME} PROPERTIES JOB_POOL_COMPILE one_jobs)
|
|
|
|
endif()
|
|
|
|
endif()
|
2019-12-14 14:38:17 +03:00
|
|
|
endif()
|
|
|
|
|
2020-05-12 07:48:29 +03:00
|
|
|
if (SD_STATIC_LIB AND SD_SHARED_LIB)
|
|
|
|
# if both static and shared library are going to be built - static library will have special suffix
|
2022-09-20 15:40:53 +02:00
|
|
|
message("Adding a static library for ${SD_LIBRARY_NAME} as ${SD_LIBRARY_NAME}static")
|
2020-05-12 07:48:29 +03:00
|
|
|
add_library(${SD_LIBRARY_NAME}static STATIC $<TARGET_OBJECTS:samediff_obj>)
|
|
|
|
set_property(TARGET ${SD_LIBRARY_NAME}static PROPERTY MSVC_RUNTIME_LIBRARY "${MSVC_RT_LIB}$<$<CONFIG:Debug>:Debug>")
|
|
|
|
install(TARGETS ${SD_LIBRARY_NAME}static DESTINATION .)
|
|
|
|
elseif(SD_STATIC_LIB)
|
|
|
|
# if we only build static library - use this name
|
2022-09-20 15:40:53 +02:00
|
|
|
message(Only building a static library for ${SD_LIBRARY_NAME})
|
2020-05-12 07:48:29 +03:00
|
|
|
add_library(${SD_LIBRARY_NAME} STATIC $<TARGET_OBJECTS:samediff_obj>)
|
|
|
|
set_property(TARGET ${SD_LIBRARY_NAME} PROPERTY MSVC_RUNTIME_LIBRARY "${MSVC_RT_LIB}$<$<CONFIG:Debug>:Debug>")
|
|
|
|
install(TARGETS ${SD_LIBRARY_NAME} DESTINATION .)
|
2020-03-19 14:53:21 +09:00
|
|
|
endif()
|
2019-06-06 15:21:15 +03:00
|
|
|
endif()
|
|
|
|
|
2019-09-11 21:50:28 +03:00
|
|
|
# we're including {MKLDNN} here in case of building from sources. in future that'll replace {MKLDNN_LIBRARIES}. same applies to BLAS
|
2019-11-13 17:15:18 +03:00
|
|
|
if (NOT BLAS_LIBRARIES)
|
|
|
|
set(BLAS_LIBRARIES "")
|
|
|
|
endif()
|
2021-02-01 14:31:20 +09:00
|
|
|
get_cmake_property(_variableNames VARIABLES)
|
|
|
|
list (SORT _variableNames)
|
|
|
|
foreach (_variableName ${_variableNames})
|
|
|
|
message(STATUS "${_variableName}=${${_variableName}}")
|
|
|
|
endforeach()
|
2021-02-09 07:44:23 +09:00
|
|
|
|
|
|
|
#This breaks the build. Normally you want to run tests anyways.
|
|
|
|
if(NOT "$ENV{CLION_IDE}")
|
|
|
|
target_link_libraries(${SD_LIBRARY_NAME} ${MKLDNN} ${MKLDNN_LIBRARIES} ${ARMCOMPUTE_LIBRARIES} ${OPENBLAS_LIBRARIES} ${BLAS_LIBRARIES} ${CPU_FEATURES})
|
|
|
|
endif()
|
2019-06-06 15:21:15 +03:00
|
|
|
|
2020-03-02 12:49:41 +03:00
|
|
|
if ("${SD_ALL_OPS}" AND "${SD_BUILD_MINIFIER}")
|
2019-06-06 15:21:15 +03:00
|
|
|
message(STATUS "Building minifier...")
|
|
|
|
add_executable(minifier ../minifier/minifier.cpp ../minifier/graphopt.cpp)
|
2020-06-26 11:03:46 +04:00
|
|
|
target_link_libraries(minifier samediff_obj ${MKLDNN_LIBRARIES} ${ARMCOMPUTE_LIBRARIES} ${OPENBLAS_LIBRARIES} ${MKLDNN} ${BLAS_LIBRARIES} ${CPU_FEATURES})
|
2019-06-06 15:21:15 +03:00
|
|
|
endif()
|
|
|
|
|
|
|
|
if ("${CMAKE_CXX_COMPILER_ID}" STREQUAL "GNU" AND "${CMAKE_CXX_COMPILER_VERSION}" VERSION_LESS 4.9)
|
2021-02-09 07:44:23 +09:00
|
|
|
message(FATAL_ERROR "You need at least GCC 4.9")
|
2019-06-06 15:21:15 +03:00
|
|
|
endif()
|
|
|
|
|
|
|
|
# OpenMP works well pretty much only with GCC
|
|
|
|
if("${CMAKE_CXX_COMPILER_ID}" STREQUAL "GNU")
|
|
|
|
find_package(OpenMP)
|
|
|
|
if (OPENMP_FOUND)
|
|
|
|
set (CMAKE_C_FLAGS "${CMAKE_C_FLAGS} ${OpenMP_C_FLAGS}")
|
|
|
|
set (CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} ${OpenMP_CXX_FLAGS}")
|
|
|
|
endif()
|
|
|
|
endif()
|
|
|
|
|
2022-09-20 15:40:53 +02:00
|
|
|
message("Installing ${SD_LIBRARY_NAME}")
|
2020-03-02 12:49:41 +03:00
|
|
|
install(TARGETS ${SD_LIBRARY_NAME} DESTINATION .)
|
2022-09-20 15:40:53 +02:00
|
|
|
set(CMAKE_LIBRARY_OUTPUT_DIRECTORY ${PROJECT_BINARY_DIR}/cpu/${SD_EXTENSION}/)
|
|
|
|
|
2019-06-06 15:21:15 +03:00
|
|
|
endif()
|