libnd4j polishing (#273)

* initial set of include changes

Signed-off-by: raver119 <raver119@gmail.com>

* one more tweak

Signed-off-by: raver119 <raver119@gmail.com>

* few more rearrangements

Signed-off-by: raver119 <raver119@gmail.com>

* few more rearrangements

Signed-off-by: raver119 <raver119@gmail.com>

* few more rearrangements

Signed-off-by: raver119 <raver119@gmail.com>

* cuda includes rearrangements

Signed-off-by: raver119 <raver119@gmail.com>

* java update

Signed-off-by: raver119 <raver119@gmail.com>

* = namespace changed to sd
- few CMake variables renamed with SD_ prefix

Signed-off-by: raver119 <raver119@gmail.com>

* java update

Signed-off-by: raver119 <raver119@gmail.com>

* LoopKind minor fix

Signed-off-by: raver119 <raver119@gmail.com>

* few more changes

Signed-off-by: raver119 <raver119@gmail.com>

* few more changes

Signed-off-by: raver119 <raver119@gmail.com>

* few more changes

Signed-off-by: raver119 <raver119@gmail.com>

* sanitizer is optional now

Signed-off-by: raver119 <raver119@gmail.com>

* dev tests updated

Signed-off-by: raver119 <raver119@gmail.com>

* few more changes

Signed-off-by: raver119 <raver119@gmail.com>

* last update

Signed-off-by: raver119 <raver119@gmail.com>

* java update

Signed-off-by: raver119 <raver119@gmail.com>
master
raver119 2020-03-02 12:49:41 +03:00 committed by GitHub
parent 483c3d7b8c
commit 63fa3c2ef3
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
1524 changed files with 17887 additions and 17658 deletions

View File

@ -1,17 +1,23 @@
cmake_minimum_required(VERSION 3.15) cmake_minimum_required(VERSION 3.15)
project(libnd4j) project(libnd4j)
set(CMAKE_VERBOSE_MAKEFILE OFF) set(CMAKE_VERBOSE_MAKEFILE OFF)
option(NATIVE "Optimize for build machine (might not work on others)" OFF)
set(CMAKE_MODULE_PATH "${CMAKE_SOURCE_DIR}/cmake" ${CMAKE_MODULE_PATH}) set(CMAKE_MODULE_PATH "${CMAKE_SOURCE_DIR}/cmake" ${CMAKE_MODULE_PATH})
#ensure we create lib files #ensure we create lib files
set(CMAKE_WINDOWS_EXPORT_ALL_SYMBOLS OFF) set(CMAKE_WINDOWS_EXPORT_ALL_SYMBOLS OFF)
option(CHECK_VECTORIZATION "checks for vectorization" OFF)
option(BUILD_TESTS "Build tests" OFF) option(SD_NATIVE "Optimize for build machine (might not work on others)" OFF)
option(SD_CHECK_VECTORIZATION "checks for vectorization" OFF)
option(SD_BUILD_TESTS "Build tests" OFF)
option(SD_STATIC_LIB "Build static library" OFF)
option(SD_SHARED_LIB "Build shared library" ON)
option(SD_SANITIZE "Enable Address Sanitizer" ON)
option(FLATBUFFERS_BUILD_FLATC "Enable the build of the flatbuffers compiler" OFF) option(FLATBUFFERS_BUILD_FLATC "Enable the build of the flatbuffers compiler" OFF)
set(FLATBUFFERS_BUILD_FLATC "OFF" CACHE STRING "Hack to disable flatc build" FORCE) set(FLATBUFFERS_BUILD_FLATC "OFF" CACHE STRING "Hack to disable flatc build" FORCE)
set(CMAKE_CXX_STANDARD 11) set(CMAKE_CXX_STANDARD 11)
if (CUDA_BLAS) if (SD_CUDA)
enable_language(CUDA) enable_language(CUDA)
set(CMAKE_CUDA_STANDARD 11) set(CMAKE_CUDA_STANDARD 11)
@ -23,23 +29,23 @@ endif()
# MSVC runtime lib can be either "MultiThreaded" or "MultiThreadedDLL", /MT and /MD respectively # MSVC runtime lib can be either "MultiThreaded" or "MultiThreadedDLL", /MT and /MD respectively
set(MSVC_RT_LIB "MultiThreadedDLL") set(MSVC_RT_LIB "MultiThreadedDLL")
set(X86_BUILD false) set(SD_X86_BUILD false)
if (NOT IOS_BUILD AND NOT ANDROID_BUILD AND NOT ${ARCH} MATCHES "power*" AND NOT ${ARCH} MATCHES "arm*") if (NOT SD_IOS_BUILD AND NOT SD_ANDROID_BUILD AND NOT ${SD_ARCH} MATCHES "power*" AND NOT ${SD_ARCH} MATCHES "arm*")
set(X86_BUILD true) set(SD_X86_BUILD true)
endif() endif()
# -fsanitize=address # -fsanitize=address
# -fsanitize=leak # -fsanitize=leak
if (ANDROID_BUILD) if (SD_ANDROID_BUILD)
set(CMAKE_CXX_FLAGS_RELEASE "${CMAKE_CXX_FLAGS_RELEASE} -O3 -fPIC -Wno-braced-scalar-init -Wno-delete-non-virtual-dtor -Wno-unused-command-line-argument -Wno-dangling-else -D_RELEASE=true") set(CMAKE_CXX_FLAGS_RELEASE "${CMAKE_CXX_FLAGS_RELEASE} -O3 -fPIC -Wno-braced-scalar-init -Wno-delete-non-virtual-dtor -Wno-unused-command-line-argument -Wno-dangling-else -D_RELEASE=true")
set(CMAKE_CXX_FLAGS_DEBUG "${CMAKE_CXX_FLAGS_DEBUG} -O0 -g -fPIC -Wno-braced-scalar-init -Wno-delete-non-virtual-dtor -Wno-unused-command-line-argument -Wno-dangling-else") set(CMAKE_CXX_FLAGS_DEBUG "${CMAKE_CXX_FLAGS_DEBUG} -O0 -g -fPIC -Wno-braced-scalar-init -Wno-delete-non-virtual-dtor -Wno-unused-command-line-argument -Wno-dangling-else")
elseif (APPLE) elseif (APPLE)
set(CMAKE_CXX_FLAGS_RELEASE "-O3 -fPIC -Wno-braced-scalar-init -Wno-delete-non-virtual-dtor -Wno-unused-command-line-argument -Wno-dangling-else -D__APPLE_OS__=true -D_RELEASE=true") set(CMAKE_CXX_FLAGS_RELEASE "-O3 -fPIC -Wno-braced-scalar-init -Wno-delete-non-virtual-dtor -Wno-unused-command-line-argument -Wno-dangling-else -D__APPLE_OS__=true -D_RELEASE=true")
set(CMAKE_CXX_FLAGS_DEBUG " -O0 -g -fPIC -Wno-braced-scalar-init -Wno-delete-non-virtual-dtor -Wno-unused-command-line-argument -Wno-dangling-else -D__APPLE_OS__=true") set(CMAKE_CXX_FLAGS_DEBUG " -O0 -g -fPIC -Wno-braced-scalar-init -Wno-delete-non-virtual-dtor -Wno-unused-command-line-argument -Wno-dangling-else -D__APPLE_OS__=true")
elseif(WIN32) elseif(WIN32)
set(X86_BUILD true) set(SD_X86_BUILD true)
if (CUDA_BLAS) if (SD_CUDA)
set(CMAKE_CXX_FLAGS_RELEASE "-D_RELEASE=true") set(CMAKE_CXX_FLAGS_RELEASE "-D_RELEASE=true")
set(CMAKE_CXX_FLAGS_DEBUG " /FS /EHsc") set(CMAKE_CXX_FLAGS_DEBUG " /FS /EHsc")
else() else()
@ -50,14 +56,14 @@ else()
set(CMAKE_CXX_FLAGS_RELEASE "-O3 -fPIC -fmax-errors=2 -D_RELEASE=true") set(CMAKE_CXX_FLAGS_RELEASE "-O3 -fPIC -fmax-errors=2 -D_RELEASE=true")
set(CMAKE_CXX_FLAGS_DEBUG " -g -O0 -fPIC -fmax-errors=2") set(CMAKE_CXX_FLAGS_DEBUG " -g -O0 -fPIC -fmax-errors=2")
if (CPU_BLAS) if (SD_CPU)
set(CMAKE_CXX_FLAGS_DEBUG "${CMAKE_CXX_FLAGS_DEBUG} -fsanitize=address") set(CMAKE_CXX_FLAGS_DEBUG "${CMAKE_CXX_FLAGS_DEBUG} -fsanitize=address")
endif() endif()
endif() endif()
if(NATIVE) if(SD_NATIVE)
IF(${CMAKE_SYSTEM_PROCESSOR} MATCHES "ppc64*") IF(${CMAKE_SYSTEM_PROCESSOR} MATCHES "ppc64*")
set(X86_BUILD false) set(SD_X86_BUILD false)
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -mcpu=native") set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -mcpu=native")
ELSE() ELSE()
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -march=native") set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -march=native")
@ -65,7 +71,7 @@ if(NATIVE)
endif() endif()
if(NOT CUDA_BLAS) if(NOT SD_CUDA)
# we need this definition to avoid global memory use within mkldnn # we need this definition to avoid global memory use within mkldnn
add_definitions(-DDNNL_ENABLE_CONCURRENT_EXEC=true) add_definitions(-DDNNL_ENABLE_CONCURRENT_EXEC=true)
@ -91,7 +97,7 @@ if(NOT CUDA_BLAS)
endif() endif()
# building cpu_features # building cpu_features
if (X86_BUILD) if (SD_X86_BUILD)
add_definitions(-DCPU_FEATURES=true) add_definitions(-DCPU_FEATURES=true)
set(BUILD_PIC "ON" CACHE STRING "Hack to enforce fPIC mode" FORCE) set(BUILD_PIC "ON" CACHE STRING "Hack to enforce fPIC mode" FORCE)
configure_file(./CMakeLists.txt.cpu_features.in cpu_features-download/CMakeLists.txt) configure_file(./CMakeLists.txt.cpu_features.in cpu_features-download/CMakeLists.txt)
@ -153,7 +159,7 @@ endif()
if (${HELPERS_cudnn}) if (${HELPERS_cudnn})
if (NOT CUDA_BLAS) if (NOT SD_CUDA)
message(FATAL_ERROR "Can't build cuDNN on non-CUDA platform") message(FATAL_ERROR "Can't build cuDNN on non-CUDA platform")
endif() endif()
@ -215,12 +221,12 @@ include_directories(${CMAKE_CURRENT_BINARY_DIR}/include)
if (NOT DEFINED ENV{CLION_IDE}) if (NOT DEFINED ENV{CLION_IDE})
message("NOT CLION") message("NOT CLION")
include_directories(blas/ include/ include/helpers include/loops include/graph include/execution include/ops include/types include/array include/cnpy include/exceptions) include_directories(${CMAKE_CURRENT_SOURCE_DIR}/include)
add_subdirectory(blas) add_subdirectory(blas)
if(BUILD_TESTS) if(SD_BUILD_TESTS)
# tests are always compiled with all ops included # tests are always compiled with all ops included
set(LIBND4J_ALL_OPS true) set(SD_ALL_OPS true)
set(LIBND4J_BUILD_MINIFIER true) set(SD_BUILD_MINIFIER true)
add_subdirectory(tests_cpu) add_subdirectory(tests_cpu)
endif() endif()
endif () endif ()
@ -230,7 +236,7 @@ if ($ENV{CLION_IDE})
endif () endif ()
if (MSVC_DEV) if (MSVC_DEV)
set(LIBND4J_BUILD_MINIFIER false) set(SD_BUILD_MINIFIER false)
endif () endif ()
set (CMAKE_INSTALL_PREFIX $ENV{ND4J_HOME}/nd4j-native-parent/nd4j-native/src/main/resources) set (CMAKE_INSTALL_PREFIX $ENV{ND4J_HOME}/nd4j-native-parent/nd4j-native/src/main/resources)

View File

@ -9,7 +9,7 @@
], ],
"buildRoot": "${env.USERPROFILE}\\CMakeBuilds\\${workspaceHash}\\build\\${name}", "buildRoot": "${env.USERPROFILE}\\CMakeBuilds\\${workspaceHash}\\build\\${name}",
"installRoot": "${env.USERPROFILE}\\CMakeBuilds\\${workspaceHash}\\install\\${name}", "installRoot": "${env.USERPROFILE}\\CMakeBuilds\\${workspaceHash}\\install\\${name}",
"cmakeCommandArgs": " -DCUDA_BLAS=true -DLIBND4J_NAME=nd4jcuda -DMSVC_DEV=true -DCOMPUTE=61 -DBUILD_TESTS=true", "cmakeCommandArgs": " -DSD_CUDA=true -DLIBND4J_NAME=nd4jcuda -DMSVC_DEV=true -DCOMPUTE=61 -DBUILD_TESTS=true",
"buildCommandArgs": "-v", "buildCommandArgs": "-v",
"ctestCommandArgs": "" "ctestCommandArgs": ""
}, },
@ -20,7 +20,7 @@
"buildRoot": "${projectDir}\\out\\build\\${name}", "buildRoot": "${projectDir}\\out\\build\\${name}",
"installRoot": "${projectDir}\\out\\install\\${name}", "installRoot": "${projectDir}\\out\\install\\${name}",
"cmakeExecutable": "/usr/bin/cmake", "cmakeExecutable": "/usr/bin/cmake",
"cmakeCommandArgs": "-DLIBND4J_ALL_OPS=true -DCMAKE_BUILD_TYPE=Debug -DCPU_BLAS=true -DLIBND4J_NAME=nd4jcpu -DBUILD_TESTS=ON -DCMAKE_BUILD_TYPE=Debug -DOPENBLAS_PATH=/usr/lib/openblas-base/ -DEXTENSION=avx2 ", "cmakeCommandArgs": "-DSD_ALL_OPS=true -DCMAKE_BUILD_TYPE=Debug -DSD_CPU=true -DLIBND4J_NAME=nd4jcpu -DBUILD_TESTS=ON -DCMAKE_BUILD_TYPE=Debug -DOPENBLAS_PATH=/usr/lib/openblas-base/ -DEXTENSION=avx2 ",
"buildCommandArgs": "-j 4", "buildCommandArgs": "-j 4",
"ctestCommandArgs": "", "ctestCommandArgs": "",
"inheritEnvironments": [ "linux_x64" ], "inheritEnvironments": [ "linux_x64" ],

View File

@ -29,24 +29,24 @@ if(APPLE)
link_directories(/lib) link_directories(/lib)
endif() endif()
if (APPLE_BUILD) if (SD_APPLE_BUILD)
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -DAPPLE_BUILD=true -mmacosx-version-min=10.10") set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -DSD_APPLE_BUILD=true -mmacosx-version-min=10.10")
set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -DAPPLE_BUILD=true -mmacosx-version-min=10.10") set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -DSD_APPLE_BUILD=true -mmacosx-version-min=10.10")
endif() endif()
if (ARM_BUILD) if (SD_ARM_BUILD)
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -DARM_BUILD=true") set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -DSD_ARM_BUILD=true")
set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -DARM_BUILD=true") set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -DSD_ARM_BUILD=true")
endif() endif()
if (ANDROID_BUILD) if (SD_ANDROID_BUILD)
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -DANDROID_BUILD=true") set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -DSD_ANDROID_BUILD=true")
set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -DANDROID_BUILD=true") set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -DSD_ANDROID_BUILD=true")
endif() endif()
if (IOS_BUILD) if (SD_IOS_BUILD)
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -DIOS_BUILD=true") set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -DSD_IOS_BUILD=true")
set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -DIOS_BUILD=true") set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -DSD_IOS_BUILD=true")
endif() endif()
if(WIN32) if(WIN32)
@ -68,33 +68,33 @@ if(WIN32)
SET(CMAKE_NINJA_FORCE_RESPONSE_FILE 1 CACHE INTERNAL "") SET(CMAKE_NINJA_FORCE_RESPONSE_FILE 1 CACHE INTERNAL "")
endif() endif()
if ("${LIBND4J_ALL_OPS}") if ("${SD_ALL_OPS}")
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -DLIBND4J_ALL_OPS=true") set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -DSD_ALL_OPS=true")
else() else()
message("_OPS: ${LIBND4J_OPS_LIST}") message("_OPS: ${SD_OPS_LIST}")
foreach(OP "${LIBND4J_OPS_LIST}") foreach(OP "${SD_OPS_LIST}")
message(STATUS "${OP}") message(STATUS "${OP}")
endforeach() endforeach()
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} ${LIBND4J_OPS_LIST}") set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} ${SD_OPS_LIST}")
endif() endif()
IF(${ARCH} MATCHES "arm*") IF(${SD_ARCH} MATCHES "arm*")
set(ARCH_TUNE "-march=${ARCH}") set(ARCH_TUNE "-march=${SD_ARCH}")
ELSEIF(${ARCH} MATCHES "power*") ELSEIF(${SD_ARCH} MATCHES "power*")
set(ARCH_TUNE "-mcpu=${ARCH} -mtune=${ARCH} -D__POWER") set(ARCH_TUNE "-mcpu=${SD_ARCH} -mtune=${SD_ARCH} -D__POWER")
ELSEIF(${EXTENSION} MATCHES "avx2") ELSEIF(${SD_EXTENSION} MATCHES "avx2")
message("Building AVX2 binary...") message("Building AVX2 binary...")
set(ARCH_TUNE "-mmmx -msse -msse2 -msse3 -msse4.1 -msse4.2 -mavx -mavx2 -mfma -mf16c -mprefetchwt1 -DSD_F16C=true -DF_AVX2=true") set(ARCH_TUNE "-mmmx -msse -msse2 -msse3 -msse4.1 -msse4.2 -mavx -mavx2 -mfma -mf16c -mprefetchwt1 -DSD_F16C=true -DF_AVX2=true")
ELSE() ELSE()
if ("${ARCH}" STREQUAL "x86-64") if ("${SD_ARCH}" STREQUAL "x86-64")
message("Building x86_64 binary...") message("Building x86_64 binary...")
set(ARCH_TYPE "generic") set(ARCH_TYPE "generic")
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -DF_X64=true") set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -DF_X64=true")
else() else()
set(ARCH_TYPE "${ARCH}") set(ARCH_TYPE "${SD_ARCH}")
endif() endif()
IF(${EXTENSION} MATCHES "avx512") IF(${SD_EXTENSION} MATCHES "avx512")
message("Building AVX512 binary...") message("Building AVX512 binary...")
# we need to set flag here, that we can use hardware f16 conversion + tell that cpu features should be tracked # we need to set flag here, that we can use hardware f16 conversion + tell that cpu features should be tracked
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -mmmx -msse -msse2 -msse3 -msse4.1 -msse4.2 -mavx -mavx2 -mfma -mf16c -mavx512f -mavx512vl -mavx512bw -mavx512dq -mavx512cd -mbmi -mbmi2 -mprefetchwt1 -mclflushopt -mxsavec -mxsaves -DSD_F16C=true -DF_AVX512=true") set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -mmmx -msse -msse2 -msse3 -msse4.1 -msse4.2 -mavx -mavx2 -mfma -mf16c -mavx512f -mavx512vl -mavx512bw -mavx512dq -mavx512cd -mbmi -mbmi2 -mprefetchwt1 -mclflushopt -mxsavec -mxsaves -DSD_F16C=true -DF_AVX512=true")
@ -102,11 +102,11 @@ ELSE()
if (NOT WIN32) if (NOT WIN32)
# we don't want this definition for msvc # we don't want this definition for msvc
set(ARCH_TUNE "-march=${ARCH} -mtune=${ARCH_TYPE}") set(ARCH_TUNE "-march=${SD_ARCH} -mtune=${ARCH_TYPE}")
endif() endif()
ENDIF() ENDIF()
if ("${CMAKE_CXX_COMPILER_ID}" STREQUAL "AppleClang" AND X86_BUILD) if ("${CMAKE_CXX_COMPILER_ID}" STREQUAL "AppleClang" AND SD_X86_BUILD)
# apple clang but not ios-arm # apple clang but not ios-arm
SET( CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} ${ARCH_TUNE}") SET( CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} ${ARCH_TUNE}")
elseif ("${CMAKE_CXX_COMPILER_ID}" STREQUAL "Clang") elseif ("${CMAKE_CXX_COMPILER_ID}" STREQUAL "Clang")
@ -129,10 +129,10 @@ IF(${CMAKE_SYSTEM_NAME} MATCHES "Linux")
include_directories("/usr/include") include_directories("/usr/include")
include_directories("/usr/local/include") include_directories("/usr/local/include")
ENDIF(${CMAKE_SYSTEM_NAME} MATCHES "Linux") ENDIF(${CMAKE_SYSTEM_NAME} MATCHES "Linux")
if(!CUDA_BLAS) if(!SD_CUDA)
if(!CPU_BLAS) if(!SD_CPU)
set(CUDA_BLAS FALSE) set(SD_CUDA FALSE)
set(CPU_BLAS TRUE) set(SD_CPU TRUE)
endif() endif()
endif() endif()
@ -141,7 +141,7 @@ if (HAVE_MKLDNN)
file(GLOB_RECURSE CUSTOMOPS_MKLDNN_SOURCES false ../include/ops/declarable/platform/mkldnn/*.cpp ../include/ops/declarable/platform/mkldnn/mkldnnUtils.h) file(GLOB_RECURSE CUSTOMOPS_MKLDNN_SOURCES false ../include/ops/declarable/platform/mkldnn/*.cpp ../include/ops/declarable/platform/mkldnn/mkldnnUtils.h)
endif() endif()
if(CUDA_BLAS) if(SD_CUDA)
message("Build cublas") message("Build cublas")
find_package(CUDA) find_package(CUDA)
add_definitions(-D__CUDABLAS__=true) add_definitions(-D__CUDABLAS__=true)
@ -154,7 +154,7 @@ if(CUDA_BLAS)
include_directories(${CUDA_INCLUDE_DIRS}) include_directories(${CUDA_INCLUDE_DIRS})
message("CUDA found!") message("CUDA found!")
if ("${EXPERIMENTAL}" STREQUAL "yes") if ("${SD_EXPERIMENTAL}" STREQUAL "yes")
message("Experimental mode ENABLED") message("Experimental mode ENABLED")
set(CMAKE_CUDA_FLAGS " ${CMAKE_CUDA_FLAGS} -D__ND4J_EXPERIMENTAL__=true") set(CMAKE_CUDA_FLAGS " ${CMAKE_CUDA_FLAGS} -D__ND4J_EXPERIMENTAL__=true")
set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -D__ND4J_EXPERIMENTAL__=true") set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -D__ND4J_EXPERIMENTAL__=true")
@ -218,6 +218,7 @@ if(CUDA_BLAS)
file(GLOB_RECURSE HELPERS_SOURCES false ../include/helpers/impl/*.cpp ../include/helpers/*.cu ../include/helpers/*.cupp ../include/helpers/*.h) file(GLOB_RECURSE HELPERS_SOURCES false ../include/helpers/impl/*.cpp ../include/helpers/*.cu ../include/helpers/*.cupp ../include/helpers/*.h)
file(GLOB_RECURSE INDEXING_SOURCES false ../include/indexing/*.cpp ../include/indexing/*.h) file(GLOB_RECURSE INDEXING_SOURCES false ../include/indexing/*.cpp ../include/indexing/*.h)
file(GLOB_RECURSE LOOPS_SOURCES false ../include/loops/impl/*.cpp ../include/loops/*.h) file(GLOB_RECURSE LOOPS_SOURCES false ../include/loops/impl/*.cpp ../include/loops/*.h)
file(GLOB_RECURSE LEGACY_SOURCES false ../include/legacy/impl/*.cpp ../include/legacy/*.cu ../include/legacy/*.h)
file(GLOB_RECURSE LOOPS_SOURCES_CUDA false ../include/loops/*.cu) file(GLOB_RECURSE LOOPS_SOURCES_CUDA false ../include/loops/*.cu)
if (HAVE_CUDNN) if (HAVE_CUDNN)
@ -225,43 +226,41 @@ if(CUDA_BLAS)
file(GLOB_RECURSE CUSTOMOPS_CUDNN_SOURCES false ../include/ops/declarable/platform/cudnn/*.cu) file(GLOB_RECURSE CUSTOMOPS_CUDNN_SOURCES false ../include/ops/declarable/platform/cudnn/*.cu)
endif() endif()
add_library(nd4jobj OBJECT cuda/NativeOps.cu cuda/NativeOpExecutioner.cu cuda/BlasVersionHelper.cu Environment.cpp ${LOOPS_SOURCES_CUDA} add_library(nd4jobj OBJECT ${LOOPS_SOURCES_CUDA} ${LEGACY_SOURCES}
${CUSTOMOPS_HELPERS_SOURCES} ${HELPERS_SOURCES} ${EXEC_SOURCES} ${CUSTOMOPS_HELPERS_SOURCES} ${HELPERS_SOURCES} ${EXEC_SOURCES}
../include/cnpy/cnpy.cpp ../include/nd4jmemset.h ../include/nd4jmalloc.h ${LOOPS_SOURCES} ${ARRAY_SOURCES} ${TYPES_SOURCES}
cpu/GraphExecutioner.cpp cuda/NDArray.cu cpu/NDArrayFactory.cpp
Environment.h ${LOOPS_SOURCES} ${ARRAY_SOURCES} ${TYPES_SOURCES}
${MEMORY_SOURCES} ${GRAPH_SOURCES} ${CUSTOMOPS_SOURCES} ${INDEXING_SOURCES} ${EXCEPTIONS_SOURCES} ${OPS_SOURCES} ${PERF_SOURCES} ${CUSTOMOPS_CUDNN_SOURCES} ${CUSTOMOPS_MKLDNN_SOURCES}) ${MEMORY_SOURCES} ${GRAPH_SOURCES} ${CUSTOMOPS_SOURCES} ${INDEXING_SOURCES} ${EXCEPTIONS_SOURCES} ${OPS_SOURCES} ${PERF_SOURCES} ${CUSTOMOPS_CUDNN_SOURCES} ${CUSTOMOPS_MKLDNN_SOURCES})
add_library(${LIBND4J_NAME} SHARED $<TARGET_OBJECTS:nd4jobj>) add_library(${SD_LIBRARY_NAME} SHARED $<TARGET_OBJECTS:nd4jobj>)
if (WIN32) if (WIN32)
message("MSVC runtime for library: ${MSVC_RT_LIB}") message("MSVC runtime for library: ${MSVC_RT_LIB}")
endif() endif()
# static library is built only if we're going to build tests, skip otherwise # static library is built only if we're going to build tests, skip otherwise
if (BUILD_TESTS) if (SD_BUILD_TESTS OR SD_STATIC_LIB)
add_library(${LIBND4J_NAME}static STATIC $<TARGET_OBJECTS:nd4jobj>) add_library(${SD_LIBRARY_NAME}static STATIC $<TARGET_OBJECTS:nd4jobj>)
set_property(TARGET ${LIBND4J_NAME}static PROPERTY MSVC_RUNTIME_LIBRARY "${MSVC_RT_LIB}$<$<CONFIG:Debug>:Debug>") set_property(TARGET ${SD_LIBRARY_NAME}static PROPERTY MSVC_RUNTIME_LIBRARY "${MSVC_RT_LIB}$<$<CONFIG:Debug>:Debug>")
install(TARGETS ${LIBND4J_NAME}static DESTINATION .) install(TARGETS ${SD_LIBRARY_NAME}static DESTINATION .)
endif() endif()
# on windows we want to make sure we use MT or MD, but since we use it in one lib, we must use it everywhere to avoid conflicts # on windows we want to make sure we use MT or MD, but since we use it in one lib, we must use it everywhere to avoid conflicts
set_property(TARGET nd4jobj PROPERTY MSVC_RUNTIME_LIBRARY "${MSVC_RT_LIB}$<$<CONFIG:Debug>:Debug>") set_property(TARGET nd4jobj PROPERTY MSVC_RUNTIME_LIBRARY "${MSVC_RT_LIB}$<$<CONFIG:Debug>:Debug>")
set_property(TARGET ${LIBND4J_NAME} PROPERTY MSVC_RUNTIME_LIBRARY "${MSVC_RT_LIB}$<$<CONFIG:Debug>:Debug>") set_property(TARGET ${SD_LIBRARY_NAME} PROPERTY MSVC_RUNTIME_LIBRARY "${MSVC_RT_LIB}$<$<CONFIG:Debug>:Debug>")
if(WIN32) if(WIN32)
message("CUDA on Windows: enabling /EHsc") message("CUDA on Windows: enabling /EHsc")
SET(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} /EHsc /bigobj /std:c++14") SET(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} /EHsc /bigobj /std:c++14")
endif() endif()
target_link_libraries(${LIBND4J_NAME} ${CUDA_LIBRARIES} ${CUDA_CUBLAS_LIBRARIES} ${CUDA_cusolver_LIBRARY} ${CUDNN} ${MKLDNN}) target_link_libraries(${SD_LIBRARY_NAME} ${CUDA_LIBRARIES} ${CUDA_CUBLAS_LIBRARIES} ${CUDA_cusolver_LIBRARY} ${CUDNN} ${MKLDNN})
set(CMAKE_LIBRARY_OUTPUT_DIRECTORY ${PROJECT_BINARY_DIR}/cuda) set(CMAKE_LIBRARY_OUTPUT_DIRECTORY ${PROJECT_BINARY_DIR}/cuda)
install(TARGETS ${LIBND4J_NAME} DESTINATION .) install(TARGETS ${SD_LIBRARY_NAME} DESTINATION .)
endif(CUDA_FOUND) endif(CUDA_FOUND)
elseif(CPU_BLAS) elseif(SD_CPU)
if ("${EXPERIMENTAL}" STREQUAL "yes") if ("${SD_EXPERIMENTAL}" STREQUAL "yes")
message("Experimental mode ENABLED") message("Experimental mode ENABLED")
set (CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -D__ND4J_EXPERIMENTAL__=true") set (CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -D__ND4J_EXPERIMENTAL__=true")
set (CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -D__ND4J_EXPERIMENTAL__=true") set (CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -D__ND4J_EXPERIMENTAL__=true")
@ -279,15 +278,16 @@ elseif(CPU_BLAS)
file(GLOB_RECURSE OPS_SOURCES false ../include/ops/impl/*.cpp ../include/ops/declarable/impl/*.cpp ../include/ops/*.h) file(GLOB_RECURSE OPS_SOURCES false ../include/ops/impl/*.cpp ../include/ops/declarable/impl/*.cpp ../include/ops/*.h)
file(GLOB_RECURSE INDEXING_SOURCES false ../include/indexing/*.cpp ../include/indexing/*.h) file(GLOB_RECURSE INDEXING_SOURCES false ../include/indexing/*.cpp ../include/indexing/*.h)
file(GLOB_RECURSE HELPERS_SOURCES false ../include/helpers/*.cpp ../include/helpers/*.h) file(GLOB_RECURSE HELPERS_SOURCES false ../include/helpers/*.cpp ../include/helpers/*.h)
file(GLOB_RECURSE LEGACY_SOURCES false ../include/legacy/impl/*.cpp ../include/legacy/cpu/*.cpp ../include/legacy/*.h)
file(GLOB_RECURSE LOOPS_SOURCES false ../include/loops/*.cpp ../include/loops/*.h) file(GLOB_RECURSE LOOPS_SOURCES false ../include/loops/*.cpp ../include/loops/*.h)
if (X86_BUILD) if (SD_X86_BUILD)
# we disable platform optimizations for certains files for linux/macos # we disable platform optimizations for certains files for linux/macos
set_source_files_properties(cpu/NativeOps.cpp PROPERTIES COMPILE_FLAGS "-march=x86-64 -mtune=generic") set_source_files_properties(cpu/NativeOps.cpp PROPERTIES COMPILE_FLAGS "-march=x86-64 -mtune=generic")
set_source_files_properties(../include/helpers/impl/OpTracker.cpp PROPERTIES COMPILE_FLAGS "-march=x86-64 -mtune=generic") set_source_files_properties(../include/helpers/impl/OpTracker.cpp PROPERTIES COMPILE_FLAGS "-march=x86-64 -mtune=generic")
endif() endif()
if(CHECK_VECTORIZATION) if(SD_CHECK_VECTORIZATION)
set(VECT_FILES cpu/NativeOps.cpp ${OPS_SOURCES} ${HELPERS_SOURCES} ${CUSTOMOPS_GENERIC_SOURCES} ${LOOPS_SOURCES}) set(VECT_FILES cpu/NativeOps.cpp ${OPS_SOURCES} ${HELPERS_SOURCES} ${CUSTOMOPS_GENERIC_SOURCES} ${LOOPS_SOURCES})
if("${CMAKE_CXX_COMPILER_ID}" STREQUAL "GNU") if("${CMAKE_CXX_COMPILER_ID}" STREQUAL "GNU")
@ -315,33 +315,31 @@ elseif(CPU_BLAS)
message("CPU BLAS") message("CPU BLAS")
add_definitions(-D__CPUBLAS__=true) add_definitions(-D__CPUBLAS__=true)
add_library(nd4jobj OBJECT cpu/NativeOps.cpp cpu/GraphExecutioner.cpp add_library(nd4jobj OBJECT ${LEGACY_SOURCES}
cpu/NativeOpExecutioner.cpp cpu/NDArray.cpp cpu/NDArrayFactory.cpp ${LOOPS_SOURCES} ${HELPERS_SOURCES} ${EXEC_SOURCES} ${ARRAY_SOURCES} ${TYPES_SOURCES}
../include/cnpy/cnpy.cpp ../include/nd4jmemset.h ../include/nd4jmalloc.h
Environment.cpp Environment.h ${LOOPS_SOURCES} ${HELPERS_SOURCES} ${EXEC_SOURCES} ${ARRAY_SOURCES} ${TYPES_SOURCES}
${MEMORY_SOURCES} ${GRAPH_SOURCES} ${CUSTOMOPS_SOURCES} ${EXCEPTIONS_SOURCES} ${INDEXING_SOURCES} ${CUSTOMOPS_MKLDNN_SOURCES} ${CUSTOMOPS_GENERIC_SOURCES} ${MEMORY_SOURCES} ${GRAPH_SOURCES} ${CUSTOMOPS_SOURCES} ${EXCEPTIONS_SOURCES} ${INDEXING_SOURCES} ${CUSTOMOPS_MKLDNN_SOURCES} ${CUSTOMOPS_GENERIC_SOURCES}
${OPS_SOURCES} ${PERF_SOURCES}) ${OPS_SOURCES} ${PERF_SOURCES})
if(IOS) if(IOS)
add_library(${LIBND4J_NAME} STATIC $<TARGET_OBJECTS:nd4jobj>) add_library(${SD_LIBRARY_NAME} STATIC $<TARGET_OBJECTS:nd4jobj>)
else() else()
# static library is built only if we're going to build tests, skip otherwise # static library is built only if we're going to build tests, skip otherwise
if (BUILD_TESTS) if (SD_BUILD_TESTS OR SD_STATIC_LIB)
add_library(${LIBND4J_NAME}static STATIC $<TARGET_OBJECTS:nd4jobj>) add_library(${SD_LIBRARY_NAME}static STATIC $<TARGET_OBJECTS:nd4jobj>)
endif() endif()
add_library(${LIBND4J_NAME} SHARED $<TARGET_OBJECTS:nd4jobj>) add_library(${SD_LIBRARY_NAME} SHARED $<TARGET_OBJECTS:nd4jobj>)
endif() endif()
# we're including {MKLDNN} here in case of building from sources. in future that'll replace {MKLDNN_LIBRARIES}. same applies to BLAS # we're including {MKLDNN} here in case of building from sources. in future that'll replace {MKLDNN_LIBRARIES}. same applies to BLAS
if (NOT BLAS_LIBRARIES) if (NOT BLAS_LIBRARIES)
set(BLAS_LIBRARIES "") set(BLAS_LIBRARIES "")
endif() endif()
target_link_libraries(${LIBND4J_NAME} ${MKLDNN} ${MKLDNN_LIBRARIES} ${OPENBLAS_LIBRARIES} ${BLAS_LIBRARIES} ${CPU_FEATURES}) target_link_libraries(${SD_LIBRARY_NAME} ${MKLDNN} ${MKLDNN_LIBRARIES} ${OPENBLAS_LIBRARIES} ${BLAS_LIBRARIES} ${CPU_FEATURES})
if ("${LIBND4J_ALL_OPS}" AND "${LIBND4J_BUILD_MINIFIER}") if ("${SD_ALL_OPS}" AND "${SD_BUILD_MINIFIER}")
message(STATUS "Building minifier...") message(STATUS "Building minifier...")
add_executable(minifier ../minifier/minifier.cpp ../minifier/graphopt.cpp) add_executable(minifier ../minifier/minifier.cpp ../minifier/graphopt.cpp)
target_link_libraries(minifier ${LIBND4J_NAME}static ${MKLDNN_LIBRARIES} ${OPENBLAS_LIBRARIES} ${MKLDNN} ${BLAS_LIBRARIES} ${CPU_FEATURES}) target_link_libraries(minifier ${SD_LIBRARY_NAME}static ${MKLDNN_LIBRARIES} ${OPENBLAS_LIBRARIES} ${MKLDNN} ${BLAS_LIBRARIES} ${CPU_FEATURES})
endif() endif()
if ("${CMAKE_CXX_COMPILER_ID}" STREQUAL "GNU" AND "${CMAKE_CXX_COMPILER_VERSION}" VERSION_LESS 4.9) if ("${CMAKE_CXX_COMPILER_ID}" STREQUAL "GNU" AND "${CMAKE_CXX_COMPILER_VERSION}" VERSION_LESS 4.9)
@ -362,6 +360,6 @@ elseif(CPU_BLAS)
SET(CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} -export-dynamic") SET(CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} -export-dynamic")
endif() endif()
install(TARGETS ${LIBND4J_NAME} DESTINATION .) install(TARGETS ${SD_LIBRARY_NAME} DESTINATION .)
set(CMAKE_LIBRARY_OUTPUT_DIRECTORY ${PROJECT_BINARY_DIR}/cpu) set(CMAKE_LIBRARY_OUTPUT_DIRECTORY ${PROJECT_BINARY_DIR}/cpu)
endif() endif()

View File

@ -1,191 +0,0 @@
/*******************************************************************************
* Copyright (c) 2015-2018 Skymind, Inc.
* Copyright (c) 2019-2020 Konduit K.K.
*
* This program and the accompanying materials are made available under the
* terms of the Apache License, Version 2.0 which is available at
* https://www.apache.org/licenses/LICENSE-2.0.
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
*
* SPDX-License-Identifier: Apache-2.0
******************************************************************************/
//
// Created by raver119 on 2018-09-16.
// @author Oleg Semeniv <oleg.semeniv@gmail.com>
//
#ifndef DEV_TESTS_NDARRAYFACTORY_H
#define DEV_TESTS_NDARRAYFACTORY_H
#include <vector>
#include <initializer_list>
#include <NDArray.h>
//#include <memory/Workspace.h>
#include <execution/LaunchContext.h>
#include <string>
namespace nd4j {
class ND4J_EXPORT NDArrayFactory {
private:
template <typename T>
static void memcpyFromVector(void *ptr, const std::vector<T> &vector);
public:
template <typename T>
static NDArray* empty_(nd4j::LaunchContext * context = nd4j::LaunchContext ::defaultContext());
static NDArray* empty_(nd4j::DataType dataType, nd4j::LaunchContext * context = nd4j::LaunchContext ::defaultContext());
template <typename T>
static NDArray empty(nd4j::LaunchContext * context = nd4j::LaunchContext ::defaultContext());
static NDArray empty(nd4j::DataType dataType, nd4j::LaunchContext * context = nd4j::LaunchContext ::defaultContext());
template <typename T>
static NDArray* valueOf(const std::initializer_list<Nd4jLong>& shape, T value, char order = 'c', nd4j::LaunchContext * context = nd4j::LaunchContext ::defaultContext());
template <typename T>
static NDArray* valueOf(const std::vector<Nd4jLong>& shape, T value, char order = 'c', nd4j::LaunchContext * context = nd4j::LaunchContext ::defaultContext());
static NDArray* valueOf(const std::vector<Nd4jLong>& shape, const NDArray& value, char order = 'c', nd4j::LaunchContext * context = nd4j::LaunchContext ::defaultContext());
template <typename T>
static NDArray* linspace(T from, T to, Nd4jLong numElements);
template <typename T>
static NDArray* create_(const T value, nd4j::LaunchContext * context = nd4j::LaunchContext ::defaultContext());
static NDArray* create_(nd4j::DataType dtype, nd4j::LaunchContext * context = nd4j::LaunchContext ::defaultContext());
template <typename T>
static NDArray create(const T value, nd4j::LaunchContext * context = nd4j::LaunchContext ::defaultContext());
static NDArray create(nd4j::DataType dtype, nd4j::LaunchContext * context = nd4j::LaunchContext ::defaultContext());
template <typename T>
static NDArray create(DataType type, const T scalar, nd4j::LaunchContext * context = nd4j::LaunchContext ::defaultContext());
template <typename T>
static NDArray* vector(Nd4jLong length, T startingValue = (T) 0, nd4j::LaunchContext * context = nd4j::LaunchContext ::defaultContext());
template <typename T>
static NDArray* create_(char order, const std::vector<Nd4jLong> &shape, nd4j::LaunchContext * context = nd4j::LaunchContext ::defaultContext());
static NDArray* create_( char order, const std::vector<Nd4jLong> &shape, nd4j::DataType dataType, nd4j::LaunchContext * context = nd4j::LaunchContext ::defaultContext());
template <typename T>
static NDArray* create_(char order, const std::vector<Nd4jLong> &shape, const std::vector<T> &data, nd4j::LaunchContext * context = nd4j::LaunchContext ::defaultContext());
template <typename T>
static NDArray create(char order, const std::vector<Nd4jLong> &shape, const std::vector<T> &data, nd4j::LaunchContext * context = nd4j::LaunchContext ::defaultContext());
template <typename T>
static NDArray create(char order, const std::vector<Nd4jLong> &shape, nd4j::LaunchContext * context = nd4j::LaunchContext ::defaultContext());
static NDArray create(char order, const std::vector<Nd4jLong> &shape, nd4j::DataType dtype, nd4j::LaunchContext * context = nd4j::LaunchContext ::defaultContext());
template <typename T>
static NDArray create(const std::vector<T> &values, nd4j::LaunchContext * context = nd4j::LaunchContext ::defaultContext());
#ifndef __JAVACPP_HACK__
// this method only available out of javacpp
/**
* This constructor creates vector of T
*
* @param values
*/
template <typename T>
static NDArray create(char order, const std::initializer_list<Nd4jLong>& shape, nd4j::LaunchContext * context = nd4j::LaunchContext ::defaultContext());
template <typename T>
static NDArray create(T* buffer, char order, const std::initializer_list<Nd4jLong>& shape, nd4j::LaunchContext * context = nd4j::LaunchContext ::defaultContext());
template <typename T>
static NDArray create(char order, const std::vector<Nd4jLong> &shape, const std::initializer_list<T>& data, nd4j::LaunchContext * context = nd4j::LaunchContext ::defaultContext());
/**
* This method creates NDArray from .npy file
* @param fileName
* @return
*/
static NDArray fromNpyFile(const char *fileName);
/**
* This factory create array from utf8 string
* @return NDArray default dataType UTF8
*/
static NDArray string(const char *string, nd4j::DataType dtype = nd4j::DataType::UTF8, nd4j::LaunchContext * context = nd4j::LaunchContext ::defaultContext());
static NDArray* string_(const char *string, nd4j::DataType dtype = nd4j::DataType::UTF8, nd4j::LaunchContext * context = nd4j::LaunchContext ::defaultContext());
static NDArray* string_(const std::string &string, nd4j::DataType dtype = nd4j::DataType::UTF8, nd4j::LaunchContext * context = nd4j::LaunchContext ::defaultContext());
static NDArray string(const std::string& string, nd4j::DataType dtype = nd4j::DataType::UTF8, nd4j::LaunchContext* context = nd4j::LaunchContext::defaultContext());
/**
* This factory create array from utf16 string
* @return NDArray default dataType UTF16
*/
static NDArray string(const char16_t* u16string, nd4j::DataType dtype = nd4j::DataType::UTF16, nd4j::LaunchContext* context = nd4j::LaunchContext::defaultContext());
static NDArray* string_(const char16_t* u16string, nd4j::DataType dtype = nd4j::DataType::UTF16, nd4j::LaunchContext* context = nd4j::LaunchContext::defaultContext());
static NDArray* string_(const std::u16string& u16string, nd4j::DataType dtype = nd4j::DataType::UTF16, nd4j::LaunchContext* context = nd4j::LaunchContext::defaultContext());
static NDArray string(const std::u16string& u16string, nd4j::DataType dtype = nd4j::DataType::UTF16, nd4j::LaunchContext* context = nd4j::LaunchContext::defaultContext());
/**
* This factory create array from utf32 string
* @return NDArray default dataType UTF32
*/
static NDArray string(const char32_t* u32string, nd4j::DataType dtype = nd4j::DataType::UTF32, nd4j::LaunchContext* context = nd4j::LaunchContext::defaultContext());
static NDArray* string_(const char32_t* u32string, nd4j::DataType dtype = nd4j::DataType::UTF32, nd4j::LaunchContext* context = nd4j::LaunchContext::defaultContext());
static NDArray* string_(const std::u32string& u32string, nd4j::DataType dtype = nd4j::DataType::UTF32, nd4j::LaunchContext* context = nd4j::LaunchContext::defaultContext());
static NDArray string(const std::u32string& u32string, nd4j::DataType dtype = nd4j::DataType::UTF32, nd4j::LaunchContext* context = nd4j::LaunchContext::defaultContext());
/**
* This factory create array from vector of utf8 strings
* @return NDArray default dataType UTF8
*/
static NDArray string( const std::vector<Nd4jLong> &shape, const std::initializer_list<const char *> &strings, nd4j::DataType dtype = nd4j::DataType::UTF8, nd4j::LaunchContext * context = nd4j::LaunchContext ::defaultContext());
static NDArray string( const std::vector<Nd4jLong> &shape, const std::initializer_list<std::string> &string, nd4j::DataType dtype = nd4j::DataType::UTF8, nd4j::LaunchContext * context = nd4j::LaunchContext ::defaultContext());
static NDArray string( const std::vector<Nd4jLong> &shape, const std::vector<const char *> &strings, nd4j::DataType dtype = nd4j::DataType::UTF8, nd4j::LaunchContext * context = nd4j::LaunchContext ::defaultContext());
static NDArray string( const std::vector<Nd4jLong> &shape, const std::vector<std::string> &string, nd4j::DataType dtype = nd4j::DataType::UTF8, nd4j::LaunchContext * context = nd4j::LaunchContext ::defaultContext());
static NDArray* string_( const std::vector<Nd4jLong> &shape, const std::initializer_list<const char *> &strings, nd4j::DataType dtype = nd4j::DataType::UTF8, nd4j::LaunchContext * context = nd4j::LaunchContext ::defaultContext());
static NDArray* string_( const std::vector<Nd4jLong> &shape, const std::initializer_list<std::string> &string, nd4j::DataType dtype = nd4j::DataType::UTF8, nd4j::LaunchContext * context = nd4j::LaunchContext ::defaultContext());
static NDArray* string_( const std::vector<Nd4jLong> &shape, const std::vector<const char *> &strings, nd4j::DataType dtype = nd4j::DataType::UTF8, nd4j::LaunchContext * context = nd4j::LaunchContext ::defaultContext());
static NDArray* string_( const std::vector<Nd4jLong> &shape, const std::vector<std::string> &string, nd4j::DataType dtype = nd4j::DataType::UTF8, nd4j::LaunchContext * context = nd4j::LaunchContext ::defaultContext());
/**
* This factory create array from vector of utf16 strings
* @return NDArray default dataType UTF16
*/
static NDArray string( const std::vector<Nd4jLong>& shape, const std::initializer_list<const char16_t*>& strings, nd4j::DataType dtype = nd4j::DataType::UTF16, nd4j::LaunchContext* context = nd4j::LaunchContext::defaultContext());
static NDArray string( const std::vector<Nd4jLong>& shape, const std::initializer_list<std::u16string>& string, nd4j::DataType dtype = nd4j::DataType::UTF16, nd4j::LaunchContext* context = nd4j::LaunchContext::defaultContext());
static NDArray string( const std::vector<Nd4jLong>& shape, const std::vector<const char16_t*>& strings, nd4j::DataType dtype = nd4j::DataType::UTF16, nd4j::LaunchContext* context = nd4j::LaunchContext::defaultContext());
static NDArray string( const std::vector<Nd4jLong>& shape, const std::vector<std::u16string>& string, nd4j::DataType dtype = nd4j::DataType::UTF16, nd4j::LaunchContext* context = nd4j::LaunchContext::defaultContext());
static NDArray* string_( const std::vector<Nd4jLong>& shape, const std::initializer_list<const char16_t*>& strings, nd4j::DataType dtype = nd4j::DataType::UTF16, nd4j::LaunchContext* context = nd4j::LaunchContext::defaultContext());
static NDArray* string_( const std::vector<Nd4jLong>& shape, const std::initializer_list<std::u16string>& string, nd4j::DataType dtype = nd4j::DataType::UTF16, nd4j::LaunchContext* context = nd4j::LaunchContext::defaultContext());
static NDArray* string_( const std::vector<Nd4jLong>& shape, const std::vector<const char16_t*>& strings, nd4j::DataType dtype = nd4j::DataType::UTF16, nd4j::LaunchContext* context = nd4j::LaunchContext::defaultContext());
static NDArray* string_( const std::vector<Nd4jLong>& shape, const std::vector<std::u16string>& string, nd4j::DataType dtype = nd4j::DataType::UTF16, nd4j::LaunchContext* context = nd4j::LaunchContext::defaultContext());
/**
* This factory create array from vector of utf32 strings
* @return NDArray default dataType UTF32
*/
static NDArray string( const std::vector<Nd4jLong>& shape, const std::initializer_list<const char32_t*>& strings, nd4j::DataType dtype = nd4j::DataType::UTF32, nd4j::LaunchContext* context = nd4j::LaunchContext::defaultContext());
static NDArray string( const std::vector<Nd4jLong>& shape, const std::initializer_list<std::u32string>& string, nd4j::DataType dtype = nd4j::DataType::UTF32, nd4j::LaunchContext* context = nd4j::LaunchContext::defaultContext());
static NDArray string( const std::vector<Nd4jLong>& shape, const std::vector<const char32_t*>& strings, nd4j::DataType dtype = nd4j::DataType::UTF32, nd4j::LaunchContext* context = nd4j::LaunchContext::defaultContext());
static NDArray string( const std::vector<Nd4jLong>& shape, const std::vector<std::u32string>& string, nd4j::DataType dtype = nd4j::DataType::UTF32, nd4j::LaunchContext* context = nd4j::LaunchContext::defaultContext());
static NDArray* string_( const std::vector<Nd4jLong>& shape, const std::initializer_list<const char32_t*>& strings, nd4j::DataType dtype = nd4j::DataType::UTF32, nd4j::LaunchContext* context = nd4j::LaunchContext::defaultContext());
static NDArray* string_( const std::vector<Nd4jLong>& shape, const std::initializer_list<std::u32string>& string, nd4j::DataType dtype = nd4j::DataType::UTF32, nd4j::LaunchContext* context = nd4j::LaunchContext::defaultContext());
static NDArray* string_( const std::vector<Nd4jLong>& shape, const std::vector<const char32_t*>& strings, nd4j::DataType dtype = nd4j::DataType::UTF32, nd4j::LaunchContext* context = nd4j::LaunchContext::defaultContext());
static NDArray* string_( const std::vector<Nd4jLong>& shape, const std::vector<std::u32string>& string, nd4j::DataType dtype = nd4j::DataType::UTF32, nd4j::LaunchContext* context = nd4j::LaunchContext::defaultContext());
static ResultSet createSetOfArrs(const Nd4jLong numOfArrs, const void* buffer, const Nd4jLong* shapeInfo, const Nd4jLong* offsets, nd4j::LaunchContext * context = nd4j::LaunchContext ::defaultContext());
#endif
};
}
#endif //DEV_TESTS_NDARRAYFACTORY_H

View File

@ -1,148 +0,0 @@
################################################################################
# Copyright (c) 2015-2018 Skymind, Inc.
#
# This program and the accompanying materials are made available under the
# terms of the Apache License, Version 2.0 which is available at
# https://www.apache.org/licenses/LICENSE-2.0.
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
# SPDX-License-Identifier: Apache-2.0
################################################################################
#ifndef NDARRAY_MACRO
#define NDARRAY_MACRO
#include <op_boilerplate.h>
//NDArray<T> *other, T *extraParams
BUILD_CALL_1(template void NDArray<float>::template applyPairwiseTransform, float, (NDArray<float>* other, float* extraParams), PAIRWISE_TRANSFORM_OPS)
BUILD_CALL_1(template void NDArray<float16>::applyPairwiseTransform, float16, (NDArray<float16>* other, float16* extraParams), PAIRWISE_TRANSFORM_OPS)
BUILD_CALL_1(template void NDArray<double>::applyPairwiseTransform, double, (NDArray<double>* other, double* extraParams), PAIRWISE_TRANSFORM_OPS)
// NDArray<T> *other, NDArray<T> *target, T *extraParams
BUILD_CALL_1(template void nd4j::NDArray<float>::applyPairwiseTransform, float, (NDArray<float>* other, NDArray<float>* target, float* extraParams), PAIRWISE_TRANSFORM_OPS)
BUILD_CALL_1(template void nd4j::NDArray<float16>::applyPairwiseTransform, float16, (NDArray<float16>* other, NDArray<float16>* target, float16* extraParams), PAIRWISE_TRANSFORM_OPS)
BUILD_CALL_1(template void nd4j::NDArray<double>::applyPairwiseTransform, double, (NDArray<double>* other, NDArray<double>* target, double* extraParams), PAIRWISE_TRANSFORM_OPS)
BUILD_CALL_1(template void nd4j::NDArray<float16>::applyScalar, float16, (NDArray<float16>& scalar, NDArray<float16>* target, float16 *extraParams) const, SCALAR_OPS)
BUILD_CALL_1(template void nd4j::NDArray<float16>::applyScalar, float16, (float16 scalar, NDArray<float16>* target, float16 *extraParams) const, SCALAR_OPS)
BUILD_CALL_1(template void nd4j::NDArray<float>::applyScalar, float, (NDArray<float>& scalar, NDArray<float>* target, float *extraParams) const, SCALAR_OPS)
BUILD_CALL_1(template void nd4j::NDArray<float>::applyScalar, float, (float scalar, NDArray<float>* target, float *extraParams) const, SCALAR_OPS)
BUILD_CALL_1(template void nd4j::NDArray<double>::applyScalar, double, (NDArray<double>& scalar, NDArray<double>* target, double *extraParams) const, SCALAR_OPS)
BUILD_CALL_1(template void nd4j::NDArray<double>::applyScalar, double, (double scalar, NDArray<double>* target, double *extraParams) const, SCALAR_OPS)
BUILD_CALL_1(template float16 nd4j::NDArray<float16>::reduceNumber, float16, (float16 *extraParams) const, REDUCE_OPS)
BUILD_CALL_1(template float nd4j::NDArray<float>::reduceNumber, float, (float *extraParams) const, REDUCE_OPS)
BUILD_CALL_1(template double nd4j::NDArray<double>::reduceNumber, double, (double *extraParams) const, REDUCE_OPS)
BUILD_CALL_1(template Nd4jLong nd4j::NDArray<float16>::indexReduceNumber, float16, (float16 *extraParams), INDEX_REDUCE_OPS)
BUILD_CALL_1(template Nd4jLong nd4j::NDArray<float>::indexReduceNumber, float, (float *extraParams), INDEX_REDUCE_OPS)
BUILD_CALL_1(template Nd4jLong nd4j::NDArray<double>::indexReduceNumber, double, (double *extraParams), INDEX_REDUCE_OPS)
BUILD_CALL_1(template void nd4j::NDArray<float16>::applyBroadcast, float16, (std::initializer_list<int> list, const nd4j::NDArray<float16>* a, nd4j::NDArray<float16>* b, float16* c), BROADCAST_OPS)
BUILD_CALL_1(template void nd4j::NDArray<float>::applyBroadcast, float, (std::initializer_list<int> list, const nd4j::NDArray<float>* a, nd4j::NDArray<float>* b, float* c), BROADCAST_OPS)
BUILD_CALL_1(template void nd4j::NDArray<double>::applyBroadcast, double, (std::initializer_list<int> list, const nd4j::NDArray<double>* a, nd4j::NDArray<double>* b, double* c), BROADCAST_OPS)
BUILD_CALL_1(template void nd4j::NDArray<float16>::applyTrueBroadcast, float16,(const nd4j::NDArray<float16>* a, nd4j::NDArray<float16>* target, const bool checkTargetShape, float16* c) const, BROADCAST_OPS)
BUILD_CALL_1(template void nd4j::NDArray<float>::applyTrueBroadcast, float, (const nd4j::NDArray<float>* a, nd4j::NDArray<float>* target, const bool checkTargetShape, float* c) const, BROADCAST_OPS)
BUILD_CALL_1(template void nd4j::NDArray<double>::applyTrueBroadcast, double, (const nd4j::NDArray<double>* a, nd4j::NDArray<double>* target, const bool checkTargetShape, double* c) const, BROADCAST_OPS)
BUILD_CALL_1(template nd4j::NDArray<float16>* nd4j::NDArray<float16>::applyTrueBroadcast, float16, (const nd4j::NDArray<float16>* a, float16* c) const, BROADCAST_OPS)
BUILD_CALL_1(template nd4j::NDArray<float>* nd4j::NDArray<float>::applyTrueBroadcast, float, (const nd4j::NDArray<float>* a, float* c) const, BROADCAST_OPS)
BUILD_CALL_1(template nd4j::NDArray<double>* nd4j::NDArray<double>::applyTrueBroadcast, double, (const nd4j::NDArray<double>* a, double* c) const, BROADCAST_OPS)
BUILD_CALL_1(template nd4j::NDArray<float16> nd4j::NDArray<float16>::applyTrueBroadcast, float16, (const nd4j::NDArray<float16>& a, float16* c) const, BROADCAST_OPS)
BUILD_CALL_1(template nd4j::NDArray<float> nd4j::NDArray<float>::applyTrueBroadcast, float, (const nd4j::NDArray<float>& a, float* c) const, BROADCAST_OPS)
BUILD_CALL_1(template nd4j::NDArray<double> nd4j::NDArray<double>::applyTrueBroadcast, double, (const nd4j::NDArray<double>& a, double* c) const, BROADCAST_OPS)
BUILD_CALL_1(template void nd4j::NDArray<float16>::applyTransform, float16, (NDArray<float16>* target, float16* extraParams), TRANSFORM_OPS)
BUILD_CALL_1(template void nd4j::NDArray<float>::applyTransform, float, (NDArray<float>* target, float* extraParams), TRANSFORM_OPS)
BUILD_CALL_1(template void nd4j::NDArray<double>::applyTransform, double, (NDArray<double>* target, double* extraParams), TRANSFORM_OPS)
BUILD_CALL_1(template void nd4j::NDArray<float16>::applyTransform, float16, (float16* extraParams), TRANSFORM_OPS)
BUILD_CALL_1(template void nd4j::NDArray<float>::applyTransform, float, (float* extraParams), TRANSFORM_OPS)
BUILD_CALL_1(template void nd4j::NDArray<double>::applyTransform, double, (double* extraParams), TRANSFORM_OPS)
BUILD_CALL_1(template void nd4j::NDArray<float16>::applyRandom, float16, (nd4j::random::RandomBuffer *buffer, NDArray<float16>* y, NDArray<float16>* z, float16* extraParams), RANDOM_OPS)
BUILD_CALL_1(template void nd4j::NDArray<float>::applyRandom, float, (nd4j::random::RandomBuffer *buffer, NDArray<float>* y, NDArray<float>* z, float* extraParams), RANDOM_OPS)
BUILD_CALL_1(template void nd4j::NDArray<double>::applyRandom, double, (nd4j::random::RandomBuffer *buffer, NDArray<double>* y, NDArray<double>* z, double* extraParams), RANDOM_OPS)
BUILD_CALL_1(template NDArray<float16> nd4j::NDArray<float16>::transform, float16, (float16* extraParams) const, TRANSFORM_OPS)
BUILD_CALL_1(template NDArray<float> nd4j::NDArray<float>::transform, float, (float* extraParams) const, TRANSFORM_OPS)
BUILD_CALL_1(template NDArray<double> nd4j::NDArray<double>::transform, double, (double* extraParams) const, TRANSFORM_OPS)
BUILD_CALL_1(template NDArray<float> *nd4j::NDArray<float>::template reduceAlongDimension, float, (const std::vector<int>& dimensions, const bool keepDims, const bool supportOldShapes) const, REDUCE_OPS)
BUILD_CALL_1(template NDArray<float16> *nd4j::NDArray<float16>::template reduceAlongDimension, float16, (const std::vector<int>& dimensions, const bool keepDims, const bool supportOldShapes) const, REDUCE_OPS)
BUILD_CALL_1(template NDArray<double> *nd4j::NDArray<double>::template reduceAlongDimension, double, (const std::vector<int>& dimensions, const bool keepDims, const bool supportOldShapes) const, REDUCE_OPS)
BUILD_CALL_1(template NDArray<float> nd4j::NDArray<float>::template reduceAlongDims, float, (const std::vector<int>& dimensions, const bool keepDims, const bool supportOldShapes) const, REDUCE_OPS)
BUILD_CALL_1(template NDArray<float16> nd4j::NDArray<float16>::template reduceAlongDims, float16, (const std::vector<int>& dimensions, const bool keepDims, const bool supportOldShapes) const, REDUCE_OPS)
BUILD_CALL_1(template NDArray<double> nd4j::NDArray<double>::template reduceAlongDims, double, (const std::vector<int>& dimensions, const bool keepDims, const bool supportOldShapes) const, REDUCE_OPS)
BUILD_CALL_1(template NDArray<float> *nd4j::NDArray<float>::template reduceAlongDimension, float, (const std::initializer_list<int>& dimensions, const bool keepDims, const bool supportOldShapes) const, REDUCE_OPS)
BUILD_CALL_1(template NDArray<float16> *nd4j::NDArray<float16>::template reduceAlongDimension, float16, (const std::initializer_list<int>& dimensions, const bool keepDims, const bool supportOldShapes) const, REDUCE_OPS)
BUILD_CALL_1(template NDArray<double> *nd4j::NDArray<double>::template reduceAlongDimension, double, (const std::initializer_list<int>& dimensions, const bool keepDims, const bool supportOldShapes) const, REDUCE_OPS)
BUILD_CALL_1(template void nd4j::NDArray<float>::template reduceAlongDimension, float, (NDArray<float>* target, const std::vector<int>& dimensions, const bool keepDims, const bool supportOldShapes, float * extras) const, REDUCE_OPS)
BUILD_CALL_1(template void nd4j::NDArray<float16>::template reduceAlongDimension, float16, (NDArray<float16>* target, const std::vector<int>& dimensions, const bool keepDims, const bool supportOldShapes, float16 * extras) const, REDUCE_OPS)
BUILD_CALL_1(template void nd4j::NDArray<double>::template reduceAlongDimension, double, (NDArray<double>* target, const std::vector<int>& dimension, const bool keepDims, const bool supportOldShapes, double * extras) const, REDUCE_OPS)
BUILD_CALL_1(template NDArray<float> *nd4j::NDArray<float>::template varianceAlongDimension, float, (const bool biasCorrected, const std::initializer_list<int>& dimensions) const, SUMMARY_STATS_OPS)
BUILD_CALL_1(template NDArray<float16> *nd4j::NDArray<float16>::template varianceAlongDimension, float16, (const bool biasCorrected, const std::initializer_list<int>& dimensions) const, SUMMARY_STATS_OPS)
BUILD_CALL_1(template NDArray<double> *nd4j::NDArray<double>::template varianceAlongDimension, double, (const bool biasCorrected, const std::initializer_list<int>& dimensions) const, SUMMARY_STATS_OPS)
BUILD_CALL_1(template void nd4j::NDArray<float>::template varianceAlongDimension, float, (const NDArray<float> *target, const bool biasCorrected, const std::initializer_list<int>& dimensions), SUMMARY_STATS_OPS)
BUILD_CALL_1(template void nd4j::NDArray<float16>::template varianceAlongDimension, float16, (const NDArray<float16> *target,const bool biasCorrected, const std::initializer_list<int>& dimensions), SUMMARY_STATS_OPS)
BUILD_CALL_1(template void nd4j::NDArray<double>::template varianceAlongDimension, double, (const NDArray<double> *target, const bool biasCorrected, const std::initializer_list<int>& dimensions), SUMMARY_STATS_OPS)
BUILD_CALL_1(template void nd4j::NDArray<float>::template varianceAlongDimension, float, (const NDArray<float> *target, const bool biasCorrected, const std::vector<int>& dimensions), SUMMARY_STATS_OPS)
BUILD_CALL_1(template void nd4j::NDArray<float16>::template varianceAlongDimension, float16, (const NDArray<float16> *target,const bool biasCorrected, const std::vector<int>& dimensions), SUMMARY_STATS_OPS)
BUILD_CALL_1(template void nd4j::NDArray<double>::template varianceAlongDimension, double, (const NDArray<double> *target, const bool biasCorrected, const std::vector<int>& dimensions), SUMMARY_STATS_OPS)
BUILD_CALL_1(template float nd4j::NDArray<float>::template varianceNumber, float, (bool biasCorrected), SUMMARY_STATS_OPS)
BUILD_CALL_1(template float16 nd4j::NDArray<float16>::template varianceNumber, float16, (bool biasCorrected), SUMMARY_STATS_OPS)
BUILD_CALL_1(template double nd4j::NDArray<double>::template varianceNumber, double, (bool biasCorrected), SUMMARY_STATS_OPS)
BUILD_CALL_1(template NDArray<float> *nd4j::NDArray<float>::template applyReduce3, float, (const NDArray<float>* other, const float* extraParams) const, REDUCE3_OPS)
BUILD_CALL_1(template NDArray<float16> *nd4j::NDArray<float16>::template applyReduce3, float16, (const NDArray<float16>* other, const float16* extraParams) const, REDUCE3_OPS)
BUILD_CALL_1(template NDArray<double> *nd4j::NDArray<double>::template applyReduce3, double, (const NDArray<double>* other, const double* extraParams) const, REDUCE3_OPS)
BUILD_CALL_1(template NDArray<float> *nd4j::NDArray<float>::template applyReduce3, float, (const NDArray<float>* other, const std::vector<int> &dims, const float* extraParams) const, REDUCE3_OPS)
BUILD_CALL_1(template NDArray<float16> *nd4j::NDArray<float16>::template applyReduce3, float16, (const NDArray<float16>* other, const std::vector<int> &dims, const float16* extraParams) const, REDUCE3_OPS)
BUILD_CALL_1(template NDArray<double> *nd4j::NDArray<double>::template applyReduce3, double, (const NDArray<double>* other, const std::vector<int> &dims, const double* extraParams) const, REDUCE3_OPS)
BUILD_CALL_1(template void nd4j::NDArray<float>::template applyIndexReduce, float, (const NDArray<float>* target, const std::vector<int> & alpha, const float* beta) const, INDEX_REDUCE_OPS)
BUILD_CALL_1(template void nd4j::NDArray<float16>::template applyIndexReduce, float16, (const NDArray<float16>* target, const std::vector<int> & alpha, const float16* beta) const, INDEX_REDUCE_OPS)
BUILD_CALL_1(template void nd4j::NDArray<double>::template applyIndexReduce, double, (const NDArray<double>* target, const std::vector<int> & alpha, const double* beta) const, INDEX_REDUCE_OPS)
BUILD_CALL_1(template NDArray<float> *nd4j::NDArray<float>::template applyIndexReduce, float, (const std::vector<int> & alpha, const float* beta) const, INDEX_REDUCE_OPS)
BUILD_CALL_1(template NDArray<float16> *nd4j::NDArray<float16>::template applyIndexReduce, float16, (const std::vector<int> & alpha, const float16* beta) const, INDEX_REDUCE_OPS)
BUILD_CALL_1(template NDArray<double> *nd4j::NDArray<double>::template applyIndexReduce, double, (const std::vector<int> & alpha, const double* beta) const, INDEX_REDUCE_OPS)
BUILD_CALL_1(template NDArray<float> *nd4j::NDArray<float>::template applyAllReduce3, float, (const nd4j::NDArray<float>* alpha, const std::vector<int> & beta, float const* gamma) const, REDUCE3_OPS)
BUILD_CALL_1(template NDArray<float16> *nd4j::NDArray<float16>::template applyAllReduce3, float16, (const nd4j::NDArray<float16>* alpha, const std::vector<int> & beta, float16 const* gamma) const, REDUCE3_OPS)
BUILD_CALL_1(template NDArray<double> *nd4j::NDArray<double>::template applyAllReduce3, double, (const nd4j::NDArray<double>* alpha, const std::vector<int> & beta, double const* gamma) const, REDUCE3_OPS)
template NDArray<float> mmul(const NDArray<float>& left, const NDArray<float>& right);
template NDArray<float16> mmul(const NDArray<float16>& left, const NDArray<float16>& right);
template NDArray<double> mmul(const NDArray<double>& left, const NDArray<double>& right);
// template NDArray<float> operator-(const float, const NDArray<float>&);
// template NDArray<float16> operator-(const float16, const NDArray<float16>&);
// template NDArray<double> operator-(const double, const NDArray<double>&);
// template NDArray<float> operator+(const float, const NDArray<float>&);
// template NDArray<float16> operator+(const float16, const NDArray<float16>&);
// template NDArray<double> operator+(const double, const NDArray<double>&);
#endif

View File

@ -173,7 +173,7 @@ fi
case "$OS" in case "$OS" in
linux-armhf) linux-armhf)
export RPI_BIN=$RPI_HOME/tools/arm-bcm2708/arm-rpi-4.9.3-linux-gnueabihf/bin/arm-linux-gnueabihf export RPI_BIN=$RPI_HOME/tools/arm-bcm2708/arm-rpi-4.9.3-linux-gnueabihf/bin/arm-linux-gnueabihf
export CMAKE_COMMAND="$CMAKE_COMMAND -D CMAKE_TOOLCHAIN_FILE=cmake/rpi.cmake -DARM_BUILD=true" export CMAKE_COMMAND="$CMAKE_COMMAND -D CMAKE_TOOLCHAIN_FILE=cmake/rpi.cmake -DSD_ARM_BUILD=true"
if [ -z "$ARCH" ]; then if [ -z "$ARCH" ]; then
ARCH="armv7-r" ARCH="armv7-r"
fi fi
@ -183,7 +183,7 @@ case "$OS" in
if [ -z "$ARCH" ]; then if [ -z "$ARCH" ]; then
ARCH="armv8-a" ARCH="armv8-a"
fi fi
export CMAKE_COMMAND="$CMAKE_COMMAND -DARM_BUILD=true" export CMAKE_COMMAND="$CMAKE_COMMAND -DSD_ARM_BUILD=true"
;; ;;
android-arm) android-arm)
@ -194,7 +194,7 @@ case "$OS" in
export ANDROID_CPP="$ANDROID_NDK/sources/cxx-stl/llvm-libc++/" export ANDROID_CPP="$ANDROID_NDK/sources/cxx-stl/llvm-libc++/"
export ANDROID_CC="$ANDROID_NDK/toolchains/llvm/prebuilt/$KERNEL/bin/clang" export ANDROID_CC="$ANDROID_NDK/toolchains/llvm/prebuilt/$KERNEL/bin/clang"
export ANDROID_ROOT="$ANDROID_NDK/platforms/android-21/arch-arm/" export ANDROID_ROOT="$ANDROID_NDK/platforms/android-21/arch-arm/"
export CMAKE_COMMAND="$CMAKE_COMMAND -DCMAKE_TOOLCHAIN_FILE=cmake/android-arm.cmake -DANDROID_BUILD=true" export CMAKE_COMMAND="$CMAKE_COMMAND -DCMAKE_TOOLCHAIN_FILE=cmake/android-arm.cmake -DSD_ANDROID_BUILD=true"
;; ;;
android-arm64) android-arm64)
@ -205,7 +205,7 @@ case "$OS" in
export ANDROID_CPP="$ANDROID_NDK/sources/cxx-stl/llvm-libc++/" export ANDROID_CPP="$ANDROID_NDK/sources/cxx-stl/llvm-libc++/"
export ANDROID_CC="$ANDROID_NDK/toolchains/llvm/prebuilt/$KERNEL/bin/clang" export ANDROID_CC="$ANDROID_NDK/toolchains/llvm/prebuilt/$KERNEL/bin/clang"
export ANDROID_ROOT="$ANDROID_NDK/platforms/android-21/arch-arm64/" export ANDROID_ROOT="$ANDROID_NDK/platforms/android-21/arch-arm64/"
export CMAKE_COMMAND="$CMAKE_COMMAND -DCMAKE_TOOLCHAIN_FILE=cmake/android-arm64.cmake -DANDROID_BUILD=true" export CMAKE_COMMAND="$CMAKE_COMMAND -DCMAKE_TOOLCHAIN_FILE=cmake/android-arm64.cmake -DSD_ANDROID_BUILD=true"
;; ;;
android-x86) android-x86)
@ -216,7 +216,7 @@ case "$OS" in
export ANDROID_CPP="$ANDROID_NDK/sources/cxx-stl/llvm-libc++/" export ANDROID_CPP="$ANDROID_NDK/sources/cxx-stl/llvm-libc++/"
export ANDROID_CC="$ANDROID_NDK/toolchains/llvm/prebuilt/$KERNEL/bin/clang" export ANDROID_CC="$ANDROID_NDK/toolchains/llvm/prebuilt/$KERNEL/bin/clang"
export ANDROID_ROOT="$ANDROID_NDK/platforms/android-21/arch-x86/" export ANDROID_ROOT="$ANDROID_NDK/platforms/android-21/arch-x86/"
export CMAKE_COMMAND="$CMAKE_COMMAND -DCMAKE_TOOLCHAIN_FILE=cmake/android-x86.cmake -DANDROID_BUILD=true" export CMAKE_COMMAND="$CMAKE_COMMAND -DCMAKE_TOOLCHAIN_FILE=cmake/android-x86.cmake -DSD_ANDROID_BUILD=true"
;; ;;
android-x86_64) android-x86_64)
@ -227,7 +227,7 @@ case "$OS" in
export ANDROID_CPP="$ANDROID_NDK/sources/cxx-stl/llvm-libc++/" export ANDROID_CPP="$ANDROID_NDK/sources/cxx-stl/llvm-libc++/"
export ANDROID_CC="$ANDROID_NDK/toolchains/llvm/prebuilt/$KERNEL/bin/clang" export ANDROID_CC="$ANDROID_NDK/toolchains/llvm/prebuilt/$KERNEL/bin/clang"
export ANDROID_ROOT="$ANDROID_NDK/platforms/android-21/arch-x86_64/" export ANDROID_ROOT="$ANDROID_NDK/platforms/android-21/arch-x86_64/"
export CMAKE_COMMAND="$CMAKE_COMMAND -DCMAKE_TOOLCHAIN_FILE=cmake/android-x86_64.cmake -DANDROID_BUILD=true" export CMAKE_COMMAND="$CMAKE_COMMAND -DCMAKE_TOOLCHAIN_FILE=cmake/android-x86_64.cmake -DSD_ANDROID_BUILD=true"
;; ;;
ios-x86_64) ios-x86_64)
@ -240,7 +240,7 @@ case "$OS" in
fi fi
XCODE_PATH="$(xcode-select --print-path)" XCODE_PATH="$(xcode-select --print-path)"
export IOS_SDK="$XCODE_PATH/Platforms/iPhoneSimulator.platform/Developer/SDKs/iPhoneSimulator$IOS_VERSION.sdk" export IOS_SDK="$XCODE_PATH/Platforms/iPhoneSimulator.platform/Developer/SDKs/iPhoneSimulator$IOS_VERSION.sdk"
export CMAKE_COMMAND="$CMAKE_COMMAND -DCMAKE_TOOLCHAIN_FILE=cmake/ios-x86_64.cmake --debug-trycompile -DIOS_BUILD=true" export CMAKE_COMMAND="$CMAKE_COMMAND -DCMAKE_TOOLCHAIN_FILE=cmake/ios-x86_64.cmake --debug-trycompile -DSD_IOS_BUILD=true"
;; ;;
ios-x86) ios-x86)
@ -253,7 +253,7 @@ case "$OS" in
fi fi
XCODE_PATH="$(xcode-select --print-path)" XCODE_PATH="$(xcode-select --print-path)"
export IOS_SDK="$XCODE_PATH/Platforms/iPhoneSimulator.platform/Developer/SDKs/iPhoneSimulator$IOS_VERSION.sdk" export IOS_SDK="$XCODE_PATH/Platforms/iPhoneSimulator.platform/Developer/SDKs/iPhoneSimulator$IOS_VERSION.sdk"
export CMAKE_COMMAND="$CMAKE_COMMAND -DCMAKE_TOOLCHAIN_FILE=cmake/ios-x86.cmake --debug-trycompile -DIOS_BUILD=true" export CMAKE_COMMAND="$CMAKE_COMMAND -DCMAKE_TOOLCHAIN_FILE=cmake/ios-x86.cmake --debug-trycompile -DSD_IOS_BUILD=true"
;; ;;
ios-arm64) ios-arm64)
@ -266,7 +266,7 @@ case "$OS" in
fi fi
XCODE_PATH="$(xcode-select --print-path)" XCODE_PATH="$(xcode-select --print-path)"
export IOS_SDK="$XCODE_PATH/Platforms/iPhoneOS.platform/Developer/SDKs/iPhoneOS$IOS_VERSION.sdk" export IOS_SDK="$XCODE_PATH/Platforms/iPhoneOS.platform/Developer/SDKs/iPhoneOS$IOS_VERSION.sdk"
export CMAKE_COMMAND="$CMAKE_COMMAND -DCMAKE_TOOLCHAIN_FILE=cmake/ios-arm64.cmake --debug-trycompile -DIOS_BUILD=true" export CMAKE_COMMAND="$CMAKE_COMMAND -DCMAKE_TOOLCHAIN_FILE=cmake/ios-arm64.cmake --debug-trycompile -DSD_IOS_BUILD=true"
;; ;;
ios-arm) ios-arm)
@ -279,7 +279,7 @@ case "$OS" in
fi fi
XCODE_PATH="$(xcode-select --print-path)" XCODE_PATH="$(xcode-select --print-path)"
export IOS_SDK="$XCODE_PATH/Platforms/iPhoneOS.platform/Developer/SDKs/iPhoneOS$IOS_VERSION.sdk" export IOS_SDK="$XCODE_PATH/Platforms/iPhoneOS.platform/Developer/SDKs/iPhoneOS$IOS_VERSION.sdk"
export CMAKE_COMMAND="$CMAKE_COMMAND -DCMAKE_TOOLCHAIN_FILE=cmake/ios-arm.cmake --debug-trycompile -DIOS_BUILD=true" export CMAKE_COMMAND="$CMAKE_COMMAND -DCMAKE_TOOLCHAIN_FILE=cmake/ios-arm.cmake --debug-trycompile -DSD_IOS_BUILD=true"
;; ;;
ios-armv7) ios-armv7)
@ -289,7 +289,7 @@ case "$OS" in
LIBTYPE="static" LIBTYPE="static"
ARCH="armv7" ARCH="armv7"
export IOS_SDK="/Applications/Xcode.app/Contents/Developer/Platforms/${iPhoneOS}.platform/Developer/SDKs/${iPhoneOS}${IOS_VERSION}.sdk" export IOS_SDK="/Applications/Xcode.app/Contents/Developer/Platforms/${iPhoneOS}.platform/Developer/SDKs/${iPhoneOS}${IOS_VERSION}.sdk"
export CMAKE_COMMAND="$CMAKE_COMMAND -DCMAKE_TOOLCHAIN_FILE=cmake/ios-armv7.cmake --debug-trycompile -DIOS_BUILD=true" export CMAKE_COMMAND="$CMAKE_COMMAND -DCMAKE_TOOLCHAIN_FILE=cmake/ios-armv7.cmake --debug-trycompile -DSD_IOS_BUILD=true"
;; ;;
linux*) linux*)
@ -299,7 +299,7 @@ case "$OS" in
export CC=clang export CC=clang
export CXX=clang++ export CXX=clang++
PARALLEL="true" PARALLEL="true"
export CMAKE_COMMAND="$CMAKE_COMMAND -DCMAKE_MACOSX_RPATH=ON -DAPPLE_BUILD=true" export CMAKE_COMMAND="$CMAKE_COMMAND -DCMAKE_MACOSX_RPATH=ON -DSD_APPLE_BUILD=true"
;; ;;
windows*) windows*)
@ -376,7 +376,7 @@ fi
OPERATIONS_ARG= OPERATIONS_ARG=
if [ -z "$OPERATIONS" ]; then if [ -z "$OPERATIONS" ]; then
OPERATIONS_ARG="-DLIBND4J_ALL_OPS=true" OPERATIONS_ARG="-DSD_ALL_OPS=true"
else else
OPERATIONS_ARG=$OPERATIONS OPERATIONS_ARG=$OPERATIONS
fi fi
@ -386,9 +386,9 @@ if [ -z "$EXPERIMENTAL" ]; then
fi fi
if [ "$CHIP" == "cpu" ]; then if [ "$CHIP" == "cpu" ]; then
BLAS_ARG="-DCPU_BLAS=true -DBLAS=TRUE" BLAS_ARG="-DSD_CPU=true -DBLAS=TRUE"
else else
BLAS_ARG="-DCUDA_BLAS=true -DBLAS=TRUE" BLAS_ARG="-DSD_CUDA=true -DBLAS=TRUE"
fi fi
if [ -z "$NAME" ]; then if [ -z "$NAME" ]; then
@ -400,9 +400,9 @@ if [ -z "$NAME" ]; then
fi fi
if [ "$LIBTYPE" == "dynamic" ]; then if [ "$LIBTYPE" == "dynamic" ]; then
SHARED_LIBS_ARG="-DBUILD_SHARED_LIBS=OFF" SHARED_LIBS_ARG="-DSD_SHARED_LIB=OFF"
else else
SHARED_LIBS_ARG="-DBUILD_SHARED_LIBS=ON" SHARED_LIBS_ARG="-DSD_SHARED_LIB=ON"
fi fi
if [ "$BUILD" == "release" ]; then if [ "$BUILD" == "release" ]; then
@ -429,24 +429,24 @@ if [ "$PACKAGING" == "msi" ]; then
fi fi
EXPERIMENTAL_ARG=""; EXPERIMENTAL_ARG="";
MINIFIER_ARG="-DLIBND4J_BUILD_MINIFIER=false" MINIFIER_ARG="-DSD_BUILD_MINIFIER=false"
TESTS_ARG="-DBUILD_TESTS=OFF" TESTS_ARG="-DSD_BUILD_TESTS=OFF"
NAME_ARG="-DLIBND4J_NAME=$NAME" NAME_ARG="-DSD_LIBRARY_NAME=$NAME"
if [ "$EXPERIMENTAL" == "yes" ]; then if [ "$EXPERIMENTAL" == "yes" ]; then
EXPERIMENTAL_ARG="-DEXPERIMENTAL=yes" EXPERIMENTAL_ARG="-DSD_EXPERIMENTAL=yes"
fi fi
if [ "$MINIFIER" == "true" ]; then if [ "$MINIFIER" == "true" ]; then
MINIFIER_ARG="-DLIBND4J_BUILD_MINIFIER=true" MINIFIER_ARG="-DSD_BUILD_MINIFIER=true"
fi fi
if [ "$TESTS" == "true" ]; then if [ "$TESTS" == "true" ]; then
MINIFIER_ARG="-DLIBND4J_BUILD_MINIFIER=true" MINIFIER_ARG="-DSD_BUILD_MINIFIER=true"
TESTS_ARG="-DBUILD_TESTS=ON" TESTS_ARG="-DSD_BUILD_TESTS=ON"
fi fi
ARCH_ARG="-DARCH=$ARCH -DEXTENSION=$CHIP_EXTENSION" ARCH_ARG="-DSD_ARCH=$ARCH -DSD_EXTENSION=$CHIP_EXTENSION"
CUDA_COMPUTE="-DCOMPUTE=$COMPUTE" CUDA_COMPUTE="-DCOMPUTE=$COMPUTE"
@ -537,7 +537,7 @@ echo CHECK_VECTORIZATION = "$CHECK_VECTORIZATION"
echo HELPERS = "$HELPERS" echo HELPERS = "$HELPERS"
mkbuilddir mkbuilddir
pwd pwd
eval $CMAKE_COMMAND "$BLAS_ARG" "$ARCH_ARG" "$NAME_ARG" -DCHECK_VECTORIZATION="${CHECK_VECTORIZATION}" $HELPERS "$SHARED_LIBS_ARG" "$MINIFIER_ARG" "$OPERATIONS_ARG" "$BUILD_TYPE" "$PACKAGING_ARG" "$EXPERIMENTAL_ARG" "$TESTS_ARG" "$CUDA_COMPUTE" -DOPENBLAS_PATH="$OPENBLAS_PATH" -DDEV=FALSE -DCMAKE_NEED_RESPONSE=YES -DMKL_MULTI_THREADED=TRUE ../.. eval $CMAKE_COMMAND "$BLAS_ARG" "$ARCH_ARG" "$NAME_ARG" -DSD_CHECK_VECTORIZATION="${CHECK_VECTORIZATION}" $HELPERS "$SHARED_LIBS_ARG" "$MINIFIER_ARG" "$OPERATIONS_ARG" "$BUILD_TYPE" "$PACKAGING_ARG" "$EXPERIMENTAL_ARG" "$TESTS_ARG" "$CUDA_COMPUTE" -DOPENBLAS_PATH="$OPENBLAS_PATH" -DDEV=FALSE -DCMAKE_NEED_RESPONSE=YES -DMKL_MULTI_THREADED=TRUE ../..
if [ "$PARALLEL" == "true" ]; then if [ "$PARALLEL" == "true" ]; then
MAKE_ARGUMENTS="$MAKE_ARGUMENTS -j $MAKEJ" MAKE_ARGUMENTS="$MAKE_ARGUMENTS -j $MAKEJ"

View File

@ -21,9 +21,9 @@
#ifndef ND4J_ARRAY_OPTIONS_H #ifndef ND4J_ARRAY_OPTIONS_H
#define ND4J_ARRAY_OPTIONS_H #define ND4J_ARRAY_OPTIONS_H
#include <op_boilerplate.h> #include <system/op_boilerplate.h>
#include <pointercast.h> #include <system/pointercast.h>
#include <dll.h> #include <system/dll.h>
#include <array/DataType.h> #include <array/DataType.h>
#include <array/ArrayType.h> #include <array/ArrayType.h>
#include <array/SpaceType.h> #include <array/SpaceType.h>
@ -87,7 +87,7 @@
#define ARRAY_UNSIGNED 8388608 #define ARRAY_UNSIGNED 8388608
namespace nd4j { namespace sd {
class ND4J_EXPORT ArrayOptions { class ND4J_EXPORT ArrayOptions {
private: private:
@ -104,7 +104,7 @@ namespace nd4j {
static FORCEINLINE _CUDA_HD bool isSparseArray(Nd4jLong *shapeInfo); static FORCEINLINE _CUDA_HD bool isSparseArray(Nd4jLong *shapeInfo);
static FORCEINLINE _CUDA_HD bool isUnsigned(Nd4jLong *shapeInfo); static FORCEINLINE _CUDA_HD bool isUnsigned(Nd4jLong *shapeInfo);
static FORCEINLINE _CUDA_HD nd4j::DataType dataType(const Nd4jLong *shapeInfo); static FORCEINLINE _CUDA_HD sd::DataType dataType(const Nd4jLong *shapeInfo);
static FORCEINLINE _CUDA_HD SpaceType spaceType(Nd4jLong *shapeInfo); static FORCEINLINE _CUDA_HD SpaceType spaceType(Nd4jLong *shapeInfo);
static FORCEINLINE _CUDA_HD SpaceType spaceType(const Nd4jLong *shapeInfo); static FORCEINLINE _CUDA_HD SpaceType spaceType(const Nd4jLong *shapeInfo);
@ -119,7 +119,7 @@ namespace nd4j {
static FORCEINLINE _CUDA_HD void resetDataType(Nd4jLong *shapeInfo); static FORCEINLINE _CUDA_HD void resetDataType(Nd4jLong *shapeInfo);
static FORCEINLINE _CUDA_HD void setDataType(Nd4jLong *shapeInfo, const nd4j::DataType dataType); static FORCEINLINE _CUDA_HD void setDataType(Nd4jLong *shapeInfo, const sd::DataType dataType);
static FORCEINLINE _CUDA_HD void copyDataType(Nd4jLong* to, const Nd4jLong* from); static FORCEINLINE _CUDA_HD void copyDataType(Nd4jLong* to, const Nd4jLong* from);
}; };
@ -155,34 +155,34 @@ namespace nd4j {
return hasPropertyBitSet(shapeInfo, ARRAY_UNSIGNED); return hasPropertyBitSet(shapeInfo, ARRAY_UNSIGNED);
} }
FORCEINLINE _CUDA_HD nd4j::DataType ArrayOptions::dataType(const Nd4jLong *shapeInfo) { FORCEINLINE _CUDA_HD sd::DataType ArrayOptions::dataType(const Nd4jLong *shapeInfo) {
/*if (hasPropertyBitSet(shapeInfo, ARRAY_QUANTIZED)) /*if (hasPropertyBitSet(shapeInfo, ARRAY_QUANTIZED))
return nd4j::DataType::QINT8; return sd::DataType::QINT8;
else */if (hasPropertyBitSet(shapeInfo, ARRAY_FLOAT)) else */if (hasPropertyBitSet(shapeInfo, ARRAY_FLOAT))
return nd4j::DataType::FLOAT32; return sd::DataType::FLOAT32;
else if (hasPropertyBitSet(shapeInfo, ARRAY_DOUBLE)) else if (hasPropertyBitSet(shapeInfo, ARRAY_DOUBLE))
return nd4j::DataType::DOUBLE; return sd::DataType::DOUBLE;
else if (hasPropertyBitSet(shapeInfo, ARRAY_HALF)) else if (hasPropertyBitSet(shapeInfo, ARRAY_HALF))
return nd4j::DataType::HALF; return sd::DataType::HALF;
else if (hasPropertyBitSet(shapeInfo, ARRAY_BHALF)) else if (hasPropertyBitSet(shapeInfo, ARRAY_BHALF))
return nd4j::DataType::BFLOAT16; return sd::DataType::BFLOAT16;
else if (hasPropertyBitSet(shapeInfo, ARRAY_BOOL)) else if (hasPropertyBitSet(shapeInfo, ARRAY_BOOL))
return nd4j::DataType ::BOOL; return sd::DataType ::BOOL;
else if (hasPropertyBitSet(shapeInfo, ARRAY_UNSIGNED)) { else if (hasPropertyBitSet(shapeInfo, ARRAY_UNSIGNED)) {
if (hasPropertyBitSet(shapeInfo, ARRAY_CHAR)) if (hasPropertyBitSet(shapeInfo, ARRAY_CHAR))
return nd4j::DataType ::UINT8; return sd::DataType ::UINT8;
else if (hasPropertyBitSet(shapeInfo, ARRAY_SHORT)) else if (hasPropertyBitSet(shapeInfo, ARRAY_SHORT))
return nd4j::DataType ::UINT16; return sd::DataType ::UINT16;
else if (hasPropertyBitSet(shapeInfo, ARRAY_INT)) else if (hasPropertyBitSet(shapeInfo, ARRAY_INT))
return nd4j::DataType ::UINT32; return sd::DataType ::UINT32;
else if (hasPropertyBitSet(shapeInfo, ARRAY_LONG)) else if (hasPropertyBitSet(shapeInfo, ARRAY_LONG))
return nd4j::DataType ::UINT64; return sd::DataType ::UINT64;
else if (hasPropertyBitSet(shapeInfo, ARRAY_UTF8)) else if (hasPropertyBitSet(shapeInfo, ARRAY_UTF8))
return nd4j::DataType ::UTF8; return sd::DataType ::UTF8;
else if (hasPropertyBitSet(shapeInfo, ARRAY_UTF16)) else if (hasPropertyBitSet(shapeInfo, ARRAY_UTF16))
return nd4j::DataType ::UTF16; return sd::DataType ::UTF16;
else if (hasPropertyBitSet(shapeInfo, ARRAY_UTF32)) else if (hasPropertyBitSet(shapeInfo, ARRAY_UTF32))
return nd4j::DataType ::UTF32; return sd::DataType ::UTF32;
else { else {
//shape::printShapeInfoLinear("Bad unsigned datatype (not)stored in shape", const_cast<Nd4jLong*>(shapeInfo)); //shape::printShapeInfoLinear("Bad unsigned datatype (not)stored in shape", const_cast<Nd4jLong*>(shapeInfo));
#ifndef __CUDA_ARCH__ #ifndef __CUDA_ARCH__
@ -191,19 +191,19 @@ namespace nd4j {
} }
} }
else if (hasPropertyBitSet(shapeInfo, ARRAY_CHAR)) else if (hasPropertyBitSet(shapeInfo, ARRAY_CHAR))
return nd4j::DataType::INT8; return sd::DataType::INT8;
else if (hasPropertyBitSet(shapeInfo, ARRAY_SHORT)) else if (hasPropertyBitSet(shapeInfo, ARRAY_SHORT))
return nd4j::DataType::INT16; return sd::DataType::INT16;
else if (hasPropertyBitSet(shapeInfo, ARRAY_INT)) else if (hasPropertyBitSet(shapeInfo, ARRAY_INT))
return nd4j::DataType::INT32; return sd::DataType::INT32;
else if (hasPropertyBitSet(shapeInfo, ARRAY_LONG)) else if (hasPropertyBitSet(shapeInfo, ARRAY_LONG))
return nd4j::DataType::INT64; return sd::DataType::INT64;
else if (hasPropertyBitSet(shapeInfo, ARRAY_UTF8)) else if (hasPropertyBitSet(shapeInfo, ARRAY_UTF8))
return nd4j::DataType::UTF8; return sd::DataType::UTF8;
else if (hasPropertyBitSet(shapeInfo, ARRAY_UTF16)) else if (hasPropertyBitSet(shapeInfo, ARRAY_UTF16))
return nd4j::DataType::UTF16; return sd::DataType::UTF16;
else if (hasPropertyBitSet(shapeInfo, ARRAY_UTF32)) else if (hasPropertyBitSet(shapeInfo, ARRAY_UTF32))
return nd4j::DataType::UTF32; return sd::DataType::UTF32;
else { else {
//shape::printShapeInfoLinear("Bad signed datatype (not)stored in shape", const_cast<Nd4jLong*>(shapeInfo)); //shape::printShapeInfoLinear("Bad signed datatype (not)stored in shape", const_cast<Nd4jLong*>(shapeInfo));
#ifndef __CUDA_ARCH__ #ifndef __CUDA_ARCH__
@ -296,63 +296,63 @@ namespace nd4j {
unsetPropertyBit(shapeInfo, ARRAY_UNSIGNED); unsetPropertyBit(shapeInfo, ARRAY_UNSIGNED);
} }
FORCEINLINE _CUDA_HD void ArrayOptions::setDataType(Nd4jLong *shapeInfo, const nd4j::DataType dataType) { FORCEINLINE _CUDA_HD void ArrayOptions::setDataType(Nd4jLong *shapeInfo, const sd::DataType dataType) {
resetDataType(shapeInfo); resetDataType(shapeInfo);
if (dataType == nd4j::DataType::UINT8 || if (dataType == sd::DataType::UINT8 ||
dataType == nd4j::DataType::UINT16 || dataType == sd::DataType::UINT16 ||
dataType == nd4j::DataType::UINT32 || dataType == sd::DataType::UINT32 ||
dataType == nd4j::DataType::UINT64) { dataType == sd::DataType::UINT64) {
setPropertyBit(shapeInfo, ARRAY_UNSIGNED); setPropertyBit(shapeInfo, ARRAY_UNSIGNED);
} }
switch (dataType) { switch (dataType) {
case nd4j::DataType::BOOL: case sd::DataType::BOOL:
setPropertyBit(shapeInfo, ARRAY_BOOL); setPropertyBit(shapeInfo, ARRAY_BOOL);
break; break;
case nd4j::DataType::HALF: case sd::DataType::HALF:
setPropertyBit(shapeInfo, ARRAY_HALF); setPropertyBit(shapeInfo, ARRAY_HALF);
break; break;
case nd4j::DataType::BFLOAT16: case sd::DataType::BFLOAT16:
setPropertyBit(shapeInfo, ARRAY_BHALF); setPropertyBit(shapeInfo, ARRAY_BHALF);
break; break;
case nd4j::DataType::FLOAT32: case sd::DataType::FLOAT32:
setPropertyBit(shapeInfo, ARRAY_FLOAT); setPropertyBit(shapeInfo, ARRAY_FLOAT);
break; break;
case nd4j::DataType::DOUBLE: case sd::DataType::DOUBLE:
setPropertyBit(shapeInfo, ARRAY_DOUBLE); setPropertyBit(shapeInfo, ARRAY_DOUBLE);
break; break;
case nd4j::DataType::INT8: case sd::DataType::INT8:
setPropertyBit(shapeInfo, ARRAY_CHAR); setPropertyBit(shapeInfo, ARRAY_CHAR);
break; break;
case nd4j::DataType::INT16: case sd::DataType::INT16:
setPropertyBit(shapeInfo, ARRAY_SHORT); setPropertyBit(shapeInfo, ARRAY_SHORT);
break; break;
case nd4j::DataType::INT32: case sd::DataType::INT32:
setPropertyBit(shapeInfo, ARRAY_INT); setPropertyBit(shapeInfo, ARRAY_INT);
break; break;
case nd4j::DataType::INT64: case sd::DataType::INT64:
setPropertyBit(shapeInfo, ARRAY_LONG); setPropertyBit(shapeInfo, ARRAY_LONG);
break; break;
case nd4j::DataType::UINT8: case sd::DataType::UINT8:
setPropertyBit(shapeInfo, ARRAY_CHAR); setPropertyBit(shapeInfo, ARRAY_CHAR);
break; break;
case nd4j::DataType::UINT16: case sd::DataType::UINT16:
setPropertyBit(shapeInfo, ARRAY_SHORT); setPropertyBit(shapeInfo, ARRAY_SHORT);
break; break;
case nd4j::DataType::UINT32: case sd::DataType::UINT32:
setPropertyBit(shapeInfo, ARRAY_INT); setPropertyBit(shapeInfo, ARRAY_INT);
break; break;
case nd4j::DataType::UINT64: case sd::DataType::UINT64:
setPropertyBit(shapeInfo, ARRAY_LONG); setPropertyBit(shapeInfo, ARRAY_LONG);
break; break;
case nd4j::DataType::UTF8: case sd::DataType::UTF8:
setPropertyBit(shapeInfo, ARRAY_UTF8); setPropertyBit(shapeInfo, ARRAY_UTF8);
break; break;
case nd4j::DataType::UTF16: case sd::DataType::UTF16:
setPropertyBit(shapeInfo, ARRAY_UTF16); setPropertyBit(shapeInfo, ARRAY_UTF16);
break; break;
case nd4j::DataType::UTF32: case sd::DataType::UTF32:
setPropertyBit(shapeInfo, ARRAY_UTF32); setPropertyBit(shapeInfo, ARRAY_UTF32);
break; break;
default: default:

View File

@ -21,7 +21,7 @@
#ifndef ND4J_ARRAY_TYPE_H #ifndef ND4J_ARRAY_TYPE_H
#define ND4J_ARRAY_TYPE_H #define ND4J_ARRAY_TYPE_H
namespace nd4j { namespace sd {
enum ArrayType { enum ArrayType {
DENSE = 1, DENSE = 1,
SPARSE = 2, SPARSE = 2,

View File

@ -21,7 +21,7 @@
#ifndef LIBND4J_BYTEORDER_H #ifndef LIBND4J_BYTEORDER_H
#define LIBND4J_BYTEORDER_H #define LIBND4J_BYTEORDER_H
namespace nd4j { namespace sd {
enum ByteOrder { enum ByteOrder {
LE = 0, LE = 0,
BE = 1, BE = 1,

View File

@ -23,12 +23,12 @@
#include <graph/generated/array_generated.h> #include <graph/generated/array_generated.h>
#include "ByteOrder.h" #include "ByteOrder.h"
#include <dll.h> #include <system/dll.h>
namespace nd4j { namespace sd {
class ND4J_EXPORT ByteOrderUtils { class ND4J_EXPORT ByteOrderUtils {
public: public:
static ByteOrder fromFlatByteOrder(nd4j::graph::ByteOrder order); static ByteOrder fromFlatByteOrder(sd::graph::ByteOrder order);
}; };
} }

View File

@ -20,11 +20,11 @@
#ifndef LIBND4J_CONSTANTDATABUFFER_H #ifndef LIBND4J_CONSTANTDATABUFFER_H
#define LIBND4J_CONSTANTDATABUFFER_H #define LIBND4J_CONSTANTDATABUFFER_H
#include <dll.h> #include <system/dll.h>
#include <pointercast.h> #include <system/pointercast.h>
namespace nd4j { namespace sd {
class ND4J_EXPORT ConstantDataBuffer { class ND4J_EXPORT ConstantDataBuffer {
private: private:
Nd4jPointer _primaryBuffer = nullptr; Nd4jPointer _primaryBuffer = nullptr;

View File

@ -24,11 +24,11 @@
#include <array/DataType.h> #include <array/DataType.h>
#include <unordered_map> #include <unordered_map>
#include <vector> #include <vector>
#include <pointercast.h> #include <system/pointercast.h>
#include <dll.h> #include <system/dll.h>
#include <array/ConstantDataBuffer.h> #include <array/ConstantDataBuffer.h>
namespace nd4j { namespace sd {
class ND4J_EXPORT ConstantDescriptor { class ND4J_EXPORT ConstantDescriptor {
private: private:
std::vector<Nd4jLong> _integerValues; std::vector<Nd4jLong> _integerValues;
@ -63,9 +63,9 @@ namespace nd4j {
namespace std { namespace std {
template<> template<>
class ND4J_EXPORT hash<nd4j::ConstantDescriptor> { class ND4J_EXPORT hash<sd::ConstantDescriptor> {
public: public:
size_t operator()(const nd4j::ConstantDescriptor &k) const; size_t operator()(const sd::ConstantDescriptor &k) const;
}; };
} }

View File

@ -27,13 +27,13 @@
#include <array/ConstantDataBuffer.h> #include <array/ConstantDataBuffer.h>
#include <mutex> #include <mutex>
namespace nd4j { namespace sd {
class ConstantHolder { class ConstantHolder {
private: private:
int _deviceId = 0; int _deviceId = 0;
std::mutex _mutex; std::mutex _mutex;
std::map<nd4j::DataType, ConstantDataBuffer> _buffers; std::map<sd::DataType, ConstantDataBuffer> _buffers;
public: public:
ConstantHolder(const ConstantHolder& other); ConstantHolder(const ConstantHolder& other);
ConstantHolder() = default; ConstantHolder() = default;
@ -42,17 +42,17 @@ namespace nd4j {
ConstantHolder& operator=(const ConstantHolder& other) = default; ConstantHolder& operator=(const ConstantHolder& other) = default;
ConstantHolder& operator=(ConstantHolder&& other) = default; ConstantHolder& operator=(ConstantHolder&& other) = default;
bool hasBuffer(nd4j::DataType dataType); bool hasBuffer(sd::DataType dataType);
template <typename T> template <typename T>
bool hasBuffer(); bool hasBuffer();
void addBuffer(ConstantDataBuffer &pointer, nd4j::DataType dataType); void addBuffer(ConstantDataBuffer &pointer, sd::DataType dataType);
template <typename T> template <typename T>
void addBuffer(ConstantDataBuffer &pointer); void addBuffer(ConstantDataBuffer &pointer);
ConstantDataBuffer* getConstantDataBuffer(nd4j::DataType dataType); ConstantDataBuffer* getConstantDataBuffer(sd::DataType dataType);
template <typename T> template <typename T>
ConstantDataBuffer* getConstantDataBuffer(); ConstantDataBuffer* getConstantDataBuffer();

View File

@ -23,14 +23,14 @@
#define DEV_TESTS_DATABUFFER_H #define DEV_TESTS_DATABUFFER_H
#include <cstring> #include <cstring>
#include <op_boilerplate.h> #include <system/op_boilerplate.h>
#include <dll.h> #include <system/dll.h>
#include <pointercast.h> #include <system/pointercast.h>
#include <array/DataType.h> #include <array/DataType.h>
#include <memory/Workspace.h> #include <memory/Workspace.h>
#include <execution/LaunchContext.h> #include <execution/LaunchContext.h>
namespace nd4j { namespace sd {
class ND4J_EXPORT DataBuffer { class ND4J_EXPORT DataBuffer {

View File

@ -21,7 +21,7 @@
#ifndef ND4J_DATATYPE_H #ifndef ND4J_DATATYPE_H
#define ND4J_DATATYPE_H #define ND4J_DATATYPE_H
namespace nd4j { namespace sd {
enum DataType { enum DataType {
INHERIT = 0, INHERIT = 0,
BOOL = 1, BOOL = 1,

View File

@ -21,17 +21,17 @@
#ifndef LIBND4J_DATATYPECONVERSIONS_H #ifndef LIBND4J_DATATYPECONVERSIONS_H
#define LIBND4J_DATATYPECONVERSIONS_H #define LIBND4J_DATATYPECONVERSIONS_H
#include <pointercast.h> #include <system/pointercast.h>
#include <helpers/logger.h> #include <helpers/logger.h>
#include <op_boilerplate.h> #include <system/op_boilerplate.h>
#include <array/DataType.h> #include <array/DataType.h>
#include <types/float16.h> #include <types/float16.h>
#include <helpers/BitwiseUtils.h> #include <helpers/BitwiseUtils.h>
#include <loops/type_conversions.h> #include <loops/type_conversions.h>
#include <dll.h> #include <system/dll.h>
#include <execution/Threads.h> #include <execution/Threads.h>
namespace nd4j { namespace sd {
template <typename T> template <typename T>
class ND4J_EXPORT DataTypeConversions { class ND4J_EXPORT DataTypeConversions {
private: private:

View File

@ -26,20 +26,20 @@
#include <types/bfloat16.h> #include <types/bfloat16.h>
#include <array/DataType.h> #include <array/DataType.h>
#include <graph/generated/array_generated.h> #include <graph/generated/array_generated.h>
#include <op_boilerplate.h> #include <system/op_boilerplate.h>
#include <dll.h> #include <system/dll.h>
#include <Environment.h> #include <system/Environment.h>
#include <ArrayOptions.h> #include <array/ArrayOptions.h>
//#include <templatemath.h> //#include <templatemath.h>
//#include <shape.h> //#include <helpers/shape.h>
#include <helpers/logger.h> #include <helpers/logger.h>
namespace nd4j { namespace sd {
class ND4J_EXPORT DataTypeUtils { class ND4J_EXPORT DataTypeUtils {
public: public:
static int asInt(DataType type); static int asInt(DataType type);
static DataType fromInt(int dtype); static DataType fromInt(int dtype);
static DataType fromFlatDataType(nd4j::graph::DType dtype); static DataType fromFlatDataType(sd::graph::DType dtype);
FORCEINLINE static std::string asString(DataType dataType); FORCEINLINE static std::string asString(DataType dataType);
template <typename T> template <typename T>
@ -70,21 +70,21 @@ namespace nd4j {
FORCEINLINE static _CUDA_HD size_t sizeOf(DataType type); FORCEINLINE static _CUDA_HD size_t sizeOf(DataType type);
FORCEINLINE static _CUDA_HD size_t sizeOf(const Nd4jLong* shapeInfo); FORCEINLINE static _CUDA_HD size_t sizeOf(const Nd4jLong* shapeInfo);
FORCEINLINE static _CUDA_HD bool isR(nd4j::DataType dataType); FORCEINLINE static _CUDA_HD bool isR(sd::DataType dataType);
FORCEINLINE static _CUDA_HD bool isZ(nd4j::DataType dataType); FORCEINLINE static _CUDA_HD bool isZ(sd::DataType dataType);
FORCEINLINE static _CUDA_HD bool isB(nd4j::DataType dataType); FORCEINLINE static _CUDA_HD bool isB(sd::DataType dataType);
FORCEINLINE static _CUDA_HD bool isU(nd4j::DataType dataType); FORCEINLINE static _CUDA_HD bool isU(sd::DataType dataType);
FORCEINLINE static _CUDA_HD bool isS(nd4j::DataType dataType); FORCEINLINE static _CUDA_HD bool isS(sd::DataType dataType);
FORCEINLINE static nd4j::DataType pickPairwiseResultType(nd4j::DataType typeX, nd4j::DataType typeY); FORCEINLINE static sd::DataType pickPairwiseResultType(sd::DataType typeX, sd::DataType typeY);
FORCEINLINE static nd4j::DataType pickPairwiseResultType(const Nd4jLong* shapeInfo1, const Nd4jLong* shapeInfo2); FORCEINLINE static sd::DataType pickPairwiseResultType(const Nd4jLong* shapeInfo1, const Nd4jLong* shapeInfo2);
FORCEINLINE static nd4j::DataType pickFloatingType(nd4j::DataType typeX); FORCEINLINE static sd::DataType pickFloatingType(sd::DataType typeX);
template <typename T1, typename T2> template <typename T1, typename T2>
FORCEINLINE static std::vector<T2> convertVector(const std::vector<T1> &vector); FORCEINLINE static std::vector<T2> convertVector(const std::vector<T1> &vector);
@ -106,38 +106,38 @@ namespace nd4j {
///// IMLEMENTATION OF INLINE METHODS ///// ///// IMLEMENTATION OF INLINE METHODS /////
////////////////////////////////////////////////////////////////////////// //////////////////////////////////////////////////////////////////////////
FORCEINLINE nd4j::DataType DataTypeUtils::pickFloatingType(nd4j::DataType typeX) { FORCEINLINE sd::DataType DataTypeUtils::pickFloatingType(sd::DataType typeX) {
// if proposed dataType is already floating point - return it // if proposed dataType is already floating point - return it
if (isR(typeX)) if (isR(typeX))
return typeX; return typeX;
return Environment::getInstance()->defaultFloatDataType(); return Environment::getInstance()->defaultFloatDataType();
} }
FORCEINLINE bool DataTypeUtils::isR(nd4j::DataType dataType) { FORCEINLINE bool DataTypeUtils::isR(sd::DataType dataType) {
return dataType == nd4j::DataType::FLOAT32 || dataType == nd4j::DataType::BFLOAT16 || dataType == nd4j::DataType::HALF || dataType == nd4j::DataType::DOUBLE; return dataType == sd::DataType::FLOAT32 || dataType == sd::DataType::BFLOAT16 || dataType == sd::DataType::HALF || dataType == sd::DataType::DOUBLE;
} }
FORCEINLINE bool DataTypeUtils::isB(nd4j::DataType dataType) { FORCEINLINE bool DataTypeUtils::isB(sd::DataType dataType) {
return dataType == nd4j::DataType::BOOL; return dataType == sd::DataType::BOOL;
} }
FORCEINLINE bool DataTypeUtils::isS(nd4j::DataType dataType) { FORCEINLINE bool DataTypeUtils::isS(sd::DataType dataType) {
return dataType == nd4j::DataType::UTF8 || dataType == nd4j::DataType::UTF16 || dataType == nd4j::DataType::UTF32; return dataType == sd::DataType::UTF8 || dataType == sd::DataType::UTF16 || dataType == sd::DataType::UTF32;
} }
FORCEINLINE bool DataTypeUtils::isZ(nd4j::DataType dataType) { FORCEINLINE bool DataTypeUtils::isZ(sd::DataType dataType) {
return !isR(dataType) && !isB(dataType) && !isS(dataType); return !isR(dataType) && !isB(dataType) && !isS(dataType);
} }
FORCEINLINE bool DataTypeUtils::isU(nd4j::DataType dataType) { FORCEINLINE bool DataTypeUtils::isU(sd::DataType dataType) {
return dataType == nd4j::DataType::UINT8 || dataType == nd4j::DataType::UINT16 || dataType == nd4j::DataType::UINT32 || dataType == nd4j::DataType::UINT64; return dataType == sd::DataType::UINT8 || dataType == sd::DataType::UINT16 || dataType == sd::DataType::UINT32 || dataType == sd::DataType::UINT64;
} }
FORCEINLINE nd4j::DataType DataTypeUtils::pickPairwiseResultType(nd4j::DataType typeX, nd4j::DataType typeY) { FORCEINLINE sd::DataType DataTypeUtils::pickPairwiseResultType(sd::DataType typeX, sd::DataType typeY) {
// if both dtypes are the same - just return it // if both dtypes are the same - just return it
if (typeX == typeY) if (typeX == typeY)
return typeX; return typeX;
auto nd4j_max = [](nd4j::DataType typeX, nd4j::DataType typeY) { auto nd4j_max = [](sd::DataType typeX, sd::DataType typeY) {
return typeX > typeY?typeX:typeY; return typeX > typeY?typeX:typeY;
}; };
auto rX = isR(typeX); auto rX = isR(typeX);
@ -154,7 +154,7 @@ namespace nd4j {
// if both data types are float - return biggest one // if both data types are float - return biggest one
if (rX && rY) { if (rX && rY) {
// if we allow precision boost, then we pick bigger data type // if we allow precision boost, then we pick bigger data type
if (nd4j::Environment::getInstance()->precisionBoostAllowed()) { if (sd::Environment::getInstance()->precisionBoostAllowed()) {
return nd4j_max(typeX, typeY); return nd4j_max(typeX, typeY);
} else { } else {
// and we return first operand otherwise // and we return first operand otherwise
@ -165,7 +165,7 @@ namespace nd4j {
// if that's not real type, we apply same rules // if that's not real type, we apply same rules
if (!rX && !rY) { if (!rX && !rY) {
if (nd4j::Environment::getInstance()->precisionBoostAllowed()) { if (sd::Environment::getInstance()->precisionBoostAllowed()) {
return nd4j_max(typeX, typeY); return nd4j_max(typeX, typeY);
} else { } else {
// and we return first operand otherwise // and we return first operand otherwise
@ -177,7 +177,7 @@ namespace nd4j {
} }
/////////////////////////////////////////////////////////////////// ///////////////////////////////////////////////////////////////////
FORCEINLINE nd4j::DataType DataTypeUtils::pickPairwiseResultType(const Nd4jLong* shapeInfo1, const Nd4jLong* shapeInfo2) { FORCEINLINE sd::DataType DataTypeUtils::pickPairwiseResultType(const Nd4jLong* shapeInfo1, const Nd4jLong* shapeInfo2) {
return pickPairwiseResultType(ArrayOptions::dataType(shapeInfo1), ArrayOptions::dataType(shapeInfo2)); return pickPairwiseResultType(ArrayOptions::dataType(shapeInfo1), ArrayOptions::dataType(shapeInfo2));
} }
@ -420,31 +420,31 @@ FORCEINLINE _CUDA_HD T DataTypeUtils::eps() {
return result; return result;
} }
FORCEINLINE _CUDA_HD size_t DataTypeUtils::sizeOfElement(nd4j::DataType type) { FORCEINLINE _CUDA_HD size_t DataTypeUtils::sizeOfElement(sd::DataType type) {
switch (type) { switch (type) {
case nd4j::DataType::UINT8: case sd::DataType::UINT8:
case nd4j::DataType::INT8: case sd::DataType::INT8:
case nd4j::DataType::FLOAT8: case sd::DataType::FLOAT8:
case nd4j::DataType::QINT8: case sd::DataType::QINT8:
case nd4j::DataType::BOOL: return (size_t) 1; case sd::DataType::BOOL: return (size_t) 1;
case nd4j::DataType::BFLOAT16: case sd::DataType::BFLOAT16:
case nd4j::DataType::HALF: case sd::DataType::HALF:
case nd4j::DataType::INT16: case sd::DataType::INT16:
case nd4j::DataType::QINT16: case sd::DataType::QINT16:
case nd4j::DataType::UINT16: return (size_t) 2; case sd::DataType::UINT16: return (size_t) 2;
case nd4j::DataType::UTF8: case sd::DataType::UTF8:
case nd4j::DataType::UTF16: case sd::DataType::UTF16:
case nd4j::DataType::UTF32: case sd::DataType::UTF32:
case nd4j::DataType::INT32: case sd::DataType::INT32:
case nd4j::DataType::UINT32: case sd::DataType::UINT32:
case nd4j::DataType::HALF2: case sd::DataType::HALF2:
case nd4j::DataType::FLOAT32: return (size_t) 4; case sd::DataType::FLOAT32: return (size_t) 4;
case nd4j::DataType::UINT64: case sd::DataType::UINT64:
case nd4j::DataType::INT64: case sd::DataType::INT64:
case nd4j::DataType::DOUBLE: return (size_t) 8; case sd::DataType::DOUBLE: return (size_t) 8;
default: { default: {
nd4j_printf("Unknown DataType used: [%i]\n", asInt(type)); nd4j_printf("Unknown DataType used: [%i]\n", asInt(type));
@ -456,41 +456,41 @@ FORCEINLINE _CUDA_HD T DataTypeUtils::eps() {
} }
template <typename T> template <typename T>
FORCEINLINE _CUDA_HD nd4j::DataType nd4j::DataTypeUtils::fromT() { FORCEINLINE _CUDA_HD sd::DataType sd::DataTypeUtils::fromT() {
if (std::is_same<T, bool>::value) { if (std::is_same<T, bool>::value) {
return nd4j::DataType::BOOL; return sd::DataType::BOOL;
} else if (std::is_same<T, std::string>::value) { } else if (std::is_same<T, std::string>::value) {
return nd4j::DataType::UTF8; return sd::DataType::UTF8;
} else if (std::is_same<T, std::u16string>::value) { } else if (std::is_same<T, std::u16string>::value) {
return nd4j::DataType::UTF16; return sd::DataType::UTF16;
} else if (std::is_same<T, std::u32string>::value) { } else if (std::is_same<T, std::u32string>::value) {
return nd4j::DataType::UTF32; return sd::DataType::UTF32;
} else if (std::is_same<T, float>::value) { } else if (std::is_same<T, float>::value) {
return nd4j::DataType::FLOAT32; return sd::DataType::FLOAT32;
} else if (std::is_same<T, float16>::value) { } else if (std::is_same<T, float16>::value) {
return nd4j::DataType::HALF; return sd::DataType::HALF;
} else if (std::is_same<T, bfloat16>::value) { } else if (std::is_same<T, bfloat16>::value) {
return nd4j::DataType::BFLOAT16; return sd::DataType::BFLOAT16;
} else if (std::is_same<T, double>::value) { } else if (std::is_same<T, double>::value) {
return nd4j::DataType::DOUBLE; return sd::DataType::DOUBLE;
} else if (std::is_same<T, int8_t >::value) { } else if (std::is_same<T, int8_t >::value) {
return nd4j::DataType::INT8; return sd::DataType::INT8;
} else if (std::is_same<T, int16_t >::value) { } else if (std::is_same<T, int16_t >::value) {
return nd4j::DataType::INT16; return sd::DataType::INT16;
} else if (std::is_same<T, int>::value) { } else if (std::is_same<T, int>::value) {
return nd4j::DataType::INT32; return sd::DataType::INT32;
} else if (std::is_same<T, Nd4jLong>::value) { } else if (std::is_same<T, Nd4jLong>::value) {
return nd4j::DataType::INT64; return sd::DataType::INT64;
} else if (std::is_same<T, uint8_t>::value) { } else if (std::is_same<T, uint8_t>::value) {
return nd4j::DataType::UINT8; return sd::DataType::UINT8;
} else if (std::is_same<T, uint16_t>::value) { } else if (std::is_same<T, uint16_t>::value) {
return nd4j::DataType::UINT16; return sd::DataType::UINT16;
} else if (std::is_same<T, uint32_t>::value) { } else if (std::is_same<T, uint32_t>::value) {
return nd4j::DataType::UINT32; return sd::DataType::UINT32;
} else if (std::is_same<T, Nd4jULong>::value) { } else if (std::is_same<T, Nd4jULong>::value) {
return nd4j::DataType::UINT64; return sd::DataType::UINT64;
} else { } else {
return nd4j::DataType::INHERIT; return sd::DataType::INHERIT;
} }
} }
} }

View File

@ -21,14 +21,14 @@
#ifndef DEV_TESTS_EXTRAARGUMENTS_H #ifndef DEV_TESTS_EXTRAARGUMENTS_H
#define DEV_TESTS_EXTRAARGUMENTS_H #define DEV_TESTS_EXTRAARGUMENTS_H
#include <dll.h> #include <system/dll.h>
#include <initializer_list> #include <initializer_list>
#include <vector> #include <vector>
#include <array/DataType.h> #include <array/DataType.h>
#include <pointercast.h> #include <system/pointercast.h>
#include <stdlib.h> #include <stdlib.h>
namespace nd4j { namespace sd {
class ND4J_EXPORT ExtraArguments { class ND4J_EXPORT ExtraArguments {
private: private:
std::vector<double> _fpArgs; std::vector<double> _fpArgs;
@ -54,7 +54,7 @@ namespace nd4j {
template <typename T> template <typename T>
void* argumentsAsT(Nd4jLong offset = 0); void* argumentsAsT(Nd4jLong offset = 0);
void* argumentsAsT(nd4j::DataType dataType, Nd4jLong offset = 0); void* argumentsAsT(sd::DataType dataType, Nd4jLong offset = 0);
size_t length(); size_t length();
}; };

View File

@ -18,7 +18,7 @@
// @author raver119@gmail.com // @author raver119@gmail.com
// //
#include <dll.h> #include <system/dll.h>
#include <array/DataBuffer.h> #include <array/DataBuffer.h>
#include <array/DataType.h> #include <array/DataType.h>
#include <memory> #include <memory>
@ -26,7 +26,7 @@
#ifndef LIBND4J_INTEROPDATABUFFER_H #ifndef LIBND4J_INTEROPDATABUFFER_H
#define LIBND4J_INTEROPDATABUFFER_H #define LIBND4J_INTEROPDATABUFFER_H
namespace nd4j { namespace sd {
/** /**
* This class is a wrapper for DataBuffer, suitable for sharing DataBuffer between front-end and back-end languages * This class is a wrapper for DataBuffer, suitable for sharing DataBuffer between front-end and back-end languages
*/ */
@ -37,7 +37,7 @@ namespace nd4j {
public: public:
InteropDataBuffer(InteropDataBuffer &dataBuffer, uint64_t length, uint64_t offset); InteropDataBuffer(InteropDataBuffer &dataBuffer, uint64_t length, uint64_t offset);
InteropDataBuffer(std::shared_ptr<DataBuffer> databuffer); InteropDataBuffer(std::shared_ptr<DataBuffer> databuffer);
InteropDataBuffer(size_t elements, nd4j::DataType dtype, bool allocateBoth); InteropDataBuffer(size_t elements, sd::DataType dtype, bool allocateBoth);
~InteropDataBuffer() = default; ~InteropDataBuffer() = default;
#ifndef __JAVACPP_HACK__ #ifndef __JAVACPP_HACK__

View File

@ -17,11 +17,11 @@
#ifndef NDARRAY_H #ifndef NDARRAY_H
#define NDARRAY_H #define NDARRAY_H
#include <dll.h> #include <system/dll.h>
#include <initializer_list> #include <initializer_list>
#include <functional> #include <functional>
#include <shape.h> #include <helpers/shape.h>
#include "NativeOpExecutioner.h" #include "legacy/NativeOpExecutioner.h"
#include <indexing/NDIndex.h> #include <indexing/NDIndex.h>
#include <indexing/IndicesList.h> #include <indexing/IndicesList.h>
#include <graph/Intervals.h> #include <graph/Intervals.h>
@ -32,13 +32,13 @@
#include <array/ArrayType.h> #include <array/ArrayType.h>
#include <array/ResultSet.h> #include <array/ResultSet.h>
#include <helpers/ShapeBuilders.h> #include <helpers/ShapeBuilders.h>
#include <op_enums.h> #include <system/op_enums.h>
#include <ops/BroadcastOpsTuple.h> #include <ops/BroadcastOpsTuple.h>
#include <ops/BroadcastBoolOpsTuple.h> #include <ops/BroadcastBoolOpsTuple.h>
#include <ops/BroadcastIntOpsTuple.h> #include <ops/BroadcastIntOpsTuple.h>
#include <array/ExtraArguments.h> #include <array/ExtraArguments.h>
#include <Status.h> #include <graph/Status.h>
#include <ShapeDescriptor.h> #include <array/ShapeDescriptor.h>
#include <helpers/ConstantShapeHelper.h> #include <helpers/ConstantShapeHelper.h>
#include <array/DataBuffer.h> #include <array/DataBuffer.h>
#include <execution/AffinityManager.h> #include <execution/AffinityManager.h>
@ -47,7 +47,7 @@
#include <memory/MemoryCounter.h> #include <memory/MemoryCounter.h>
namespace nd4j { namespace sd {
template <typename T, typename = typename std::enable_if<DataTypeUtils::scalarTypesForNDarray<T>::value>::type> template <typename T, typename = typename std::enable_if<DataTypeUtils::scalarTypesForNDarray<T>::value>::type>
ND4J_EXPORT NDArray operator+(const NDArray& arr, const T& scalar); ND4J_EXPORT NDArray operator+(const NDArray& arr, const T& scalar);
@ -116,7 +116,7 @@ namespace nd4j {
void templatedSet(void *buffer, const Nd4jLong xOffset, const void *value); void templatedSet(void *buffer, const Nd4jLong xOffset, const void *value);
template <typename T> template <typename T>
void templatedSet(void *buffer, const Nd4jLong xOfsset, nd4j::DataType dtype, const void *value); void templatedSet(void *buffer, const Nd4jLong xOfsset, sd::DataType dtype, const void *value);
template <typename T> template <typename T>
void templatedAssign(void *xBuffer, const Nd4jLong xOffset, const void *yBuffer, const Nd4jLong yOffset) const; void templatedAssign(void *xBuffer, const Nd4jLong xOffset, const void *yBuffer, const Nd4jLong yOffset) const;
@ -161,7 +161,7 @@ namespace nd4j {
/** /**
* pointer on device launch context (with all data needed there). * pointer on device launch context (with all data needed there).
*/ */
nd4j::LaunchContext * _context = nd4j::LaunchContext::defaultContext(); sd::LaunchContext * _context = sd::LaunchContext::defaultContext();
// indicates if array's buffer is within workspace // indicates if array's buffer is within workspace
bool _isAttached = false; bool _isAttached = false;
@ -174,7 +174,7 @@ namespace nd4j {
/** /**
* type of array elements * type of array elements
*/ */
nd4j::DataType _dataType = FLOAT32; sd::DataType _dataType = FLOAT32;
/** /**
* deviceID where this NDArray belongs to * deviceID where this NDArray belongs to
@ -191,72 +191,72 @@ namespace nd4j {
* do not allocate memory, memory for array is passed from outside * do not allocate memory, memory for array is passed from outside
*/ */
#ifndef __JAVACPP_HACK__ #ifndef __JAVACPP_HACK__
NDArray(std::shared_ptr<DataBuffer> buffer, const ShapeDescriptor& descriptor, nd4j::LaunchContext* context = nd4j::LaunchContext::defaultContext(), const Nd4jLong offset = 0); NDArray(std::shared_ptr<DataBuffer> buffer, const ShapeDescriptor& descriptor, sd::LaunchContext* context = sd::LaunchContext::defaultContext(), const Nd4jLong offset = 0);
NDArray(std::shared_ptr<DataBuffer> buffer, const char order, const std::vector<Nd4jLong> &shape, nd4j::LaunchContext* context = nd4j::LaunchContext::defaultContext()); NDArray(std::shared_ptr<DataBuffer> buffer, const char order, const std::vector<Nd4jLong> &shape, sd::LaunchContext* context = sd::LaunchContext::defaultContext());
/** /**
* This contructors create scalar array containing string utf8 * This contructors create scalar array containing string utf8
* *
*/ */
NDArray(const char* str, nd4j::DataType dtype = nd4j::DataType::UTF8, nd4j::LaunchContext* context = nd4j::LaunchContext::defaultContext()) NDArray(const char* str, sd::DataType dtype = sd::DataType::UTF8, sd::LaunchContext* context = sd::LaunchContext::defaultContext())
: NDArray(std::string(str), dtype, context) { : NDArray(std::string(str), dtype, context) {
} }
NDArray(const std::string& string, nd4j::DataType dtype = nd4j::DataType::UTF8, nd4j::LaunchContext* context = nd4j::LaunchContext::defaultContext()); NDArray(const std::string& string, sd::DataType dtype = sd::DataType::UTF8, sd::LaunchContext* context = sd::LaunchContext::defaultContext());
/** /**
* This contructors create scalar array containing string utf16 * This contructors create scalar array containing string utf16
* *
*/ */
NDArray(const char16_t* u16string, nd4j::DataType dtype = nd4j::DataType::UTF16, nd4j::LaunchContext* context = nd4j::LaunchContext::defaultContext()) NDArray(const char16_t* u16string, sd::DataType dtype = sd::DataType::UTF16, sd::LaunchContext* context = sd::LaunchContext::defaultContext())
: NDArray(std::u16string(u16string), dtype, context) { : NDArray(std::u16string(u16string), dtype, context) {
} }
NDArray(const std::u16string& u16string, nd4j::DataType dtype = nd4j::DataType::UTF16, nd4j::LaunchContext* context = nd4j::LaunchContext::defaultContext()); NDArray(const std::u16string& u16string, sd::DataType dtype = sd::DataType::UTF16, sd::LaunchContext* context = sd::LaunchContext::defaultContext());
/** /**
* This contructors create scalar array containing string utf32 * This contructors create scalar array containing string utf32
* *
*/ */
NDArray(const char32_t* u32string, nd4j::DataType dtype = nd4j::DataType::UTF32, nd4j::LaunchContext* context = nd4j::LaunchContext::defaultContext()) NDArray(const char32_t* u32string, sd::DataType dtype = sd::DataType::UTF32, sd::LaunchContext* context = sd::LaunchContext::defaultContext())
: NDArray(std::u32string(u32string), dtype, context) { : NDArray(std::u32string(u32string), dtype, context) {
} }
NDArray(const std::u32string& u32string, nd4j::DataType dtype = nd4j::DataType::UTF32, nd4j::LaunchContext* context = nd4j::LaunchContext::defaultContext()); NDArray(const std::u32string& u32string, sd::DataType dtype = sd::DataType::UTF32, sd::LaunchContext* context = sd::LaunchContext::defaultContext());
/** /**
* This contructors create array from vector of utf8 strings * This contructors create array from vector of utf8 strings
* *
*/ */
NDArray(const std::vector<Nd4jLong>& shape, const std::vector<const char*>& strings, nd4j::DataType dtype = nd4j::DataType::UTF8, nd4j::LaunchContext* context = nd4j::LaunchContext::defaultContext()); NDArray(const std::vector<Nd4jLong>& shape, const std::vector<const char*>& strings, sd::DataType dtype = sd::DataType::UTF8, sd::LaunchContext* context = sd::LaunchContext::defaultContext());
NDArray(const std::vector<Nd4jLong>& shape, const std::vector<std::string>& string, nd4j::DataType dtype = nd4j::DataType::UTF8, nd4j::LaunchContext* context = nd4j::LaunchContext::defaultContext()); NDArray(const std::vector<Nd4jLong>& shape, const std::vector<std::string>& string, sd::DataType dtype = sd::DataType::UTF8, sd::LaunchContext* context = sd::LaunchContext::defaultContext());
/** /**
* This contructors create array from vector of utf16 strings * This contructors create array from vector of utf16 strings
* *
*/ */
NDArray(const std::vector<Nd4jLong>& shape, const std::vector<const char16_t*>& strings, nd4j::DataType dtype = nd4j::DataType::UTF16, nd4j::LaunchContext* context = nd4j::LaunchContext::defaultContext()); NDArray(const std::vector<Nd4jLong>& shape, const std::vector<const char16_t*>& strings, sd::DataType dtype = sd::DataType::UTF16, sd::LaunchContext* context = sd::LaunchContext::defaultContext());
NDArray(const std::vector<Nd4jLong>& shape, const std::vector<std::u16string>& string, nd4j::DataType dtype = nd4j::DataType::UTF16, nd4j::LaunchContext* context = nd4j::LaunchContext::defaultContext()); NDArray(const std::vector<Nd4jLong>& shape, const std::vector<std::u16string>& string, sd::DataType dtype = sd::DataType::UTF16, sd::LaunchContext* context = sd::LaunchContext::defaultContext());
/** /**
* This contructors create array from vector of utf32 strings * This contructors create array from vector of utf32 strings
* *
*/ */
NDArray(const std::vector<Nd4jLong>& shape, const std::vector<const char32_t*>& strings, nd4j::DataType dtype = nd4j::DataType::UTF32, nd4j::LaunchContext* context = nd4j::LaunchContext::defaultContext()); NDArray(const std::vector<Nd4jLong>& shape, const std::vector<const char32_t*>& strings, sd::DataType dtype = sd::DataType::UTF32, sd::LaunchContext* context = sd::LaunchContext::defaultContext());
NDArray(const std::vector<Nd4jLong>& shape, const std::vector<std::u32string>& string, nd4j::DataType dtype = nd4j::DataType::UTF32, nd4j::LaunchContext* context = nd4j::LaunchContext::defaultContext()); NDArray(const std::vector<Nd4jLong>& shape, const std::vector<std::u32string>& string, sd::DataType dtype = sd::DataType::UTF32, sd::LaunchContext* context = sd::LaunchContext::defaultContext());
#endif #endif
/** /**
* do not allocate memory, memory for array is passed from outside * do not allocate memory, memory for array is passed from outside
*/ */
NDArray(void *buffer, Nd4jLong* shapeInfo, nd4j::LaunchContext* context = nd4j::LaunchContext::defaultContext(), const bool isBuffAlloc = false); NDArray(void *buffer, Nd4jLong* shapeInfo, sd::LaunchContext* context = sd::LaunchContext::defaultContext(), const bool isBuffAlloc = false);
/** /**
* do not allocate memory, memory for array is passed from outside * do not allocate memory, memory for array is passed from outside
* we suppose the content of both (device and host) buffers is identical * we suppose the content of both (device and host) buffers is identical
*/ */
NDArray(void *buffer, void *bufferD, Nd4jLong* shapeInfo, nd4j::LaunchContext* context = nd4j::LaunchContext::defaultContext(), const bool isBuffAlloc = false, const bool isBuffDAlloc = false); NDArray(void *buffer, void *bufferD, Nd4jLong* shapeInfo, sd::LaunchContext* context = sd::LaunchContext::defaultContext(), const bool isBuffAlloc = false, const bool isBuffDAlloc = false);
/** /**
* copy constructor * copy constructor
@ -271,34 +271,34 @@ namespace nd4j {
/** /**
* constructor, create array stored at given workspace * constructor, create array stored at given workspace
*/ */
NDArray(nd4j::LaunchContext * context); NDArray(sd::LaunchContext * context);
/** /**
* constructor creates new NDArray using shape information from "shapeInfo", set all elements in new array to zeros, if copyStrides is true then use stride values from "shapeInfo", else calculate strides independently * constructor creates new NDArray using shape information from "shapeInfo", set all elements in new array to zeros, if copyStrides is true then use stride values from "shapeInfo", else calculate strides independently
*/ */
NDArray(Nd4jLong* shapeInfo, const bool copyStrides = false, nd4j::LaunchContext* context = nd4j::LaunchContext::defaultContext()); NDArray(Nd4jLong* shapeInfo, const bool copyStrides = false, sd::LaunchContext* context = sd::LaunchContext::defaultContext());
/** /**
* constructor creates new NDArray using shape information from "shapeInfo", set all elements in new array to be zeros, if copyStrides is true then use stride values from "shapeInfo", else calculate strides independently * constructor creates new NDArray using shape information from "shapeInfo", set all elements in new array to be zeros, if copyStrides is true then use stride values from "shapeInfo", else calculate strides independently
* set dtype as array type * set dtype as array type
*/ */
NDArray(Nd4jLong* shapeInfo, const nd4j::DataType dtype, const bool copyStrides = false, nd4j::LaunchContext* context = nd4j::LaunchContext::defaultContext()); NDArray(Nd4jLong* shapeInfo, const sd::DataType dtype, const bool copyStrides = false, sd::LaunchContext* context = sd::LaunchContext::defaultContext());
/** /**
* this constructor creates new array using shape information contained in vector argument * this constructor creates new array using shape information contained in vector argument
*/ */
NDArray(const char order, const std::vector<Nd4jLong> &shape, nd4j::DataType dtype = DOUBLE, nd4j::LaunchContext* context = nd4j::LaunchContext::defaultContext()); NDArray(const char order, const std::vector<Nd4jLong> &shape, sd::DataType dtype = DOUBLE, sd::LaunchContext* context = sd::LaunchContext::defaultContext());
/** /**
* This constructor creates new array with elements copied from data and using shape information stored in shape, elements from data will be casted to dtype * This constructor creates new array with elements copied from data and using shape information stored in shape, elements from data will be casted to dtype
*/ */
NDArray(const char order, const std::vector<Nd4jLong> &shape, const std::vector<double>& data, nd4j::DataType dtype = DOUBLE, nd4j::LaunchContext* context = nd4j::LaunchContext::defaultContext()); NDArray(const char order, const std::vector<Nd4jLong> &shape, const std::vector<double>& data, sd::DataType dtype = DOUBLE, sd::LaunchContext* context = sd::LaunchContext::defaultContext());
/** /**
* this constructor creates new array using given buffer (without memory allocation) and shape information stored in shape * this constructor creates new array using given buffer (without memory allocation) and shape information stored in shape
*/ */
NDArray(void *buffer, const char order, const std::vector<Nd4jLong> &shape, nd4j::DataType dtype, nd4j::LaunchContext* context = nd4j::LaunchContext::defaultContext(), const bool isBuffAlloc = false); NDArray(void *buffer, const char order, const std::vector<Nd4jLong> &shape, sd::DataType dtype, sd::LaunchContext* context = sd::LaunchContext::defaultContext(), const bool isBuffAlloc = false);
/** /**
* This method returns new array with the same shape & data type * This method returns new array with the same shape & data type
@ -317,12 +317,12 @@ namespace nd4j {
* this constructor creates new NDArray with shape matching "other" array, * this constructor creates new NDArray with shape matching "other" array,
* doesn't copy "other" elements into new array !!! * doesn't copy "other" elements into new array !!!
*/ */
explicit NDArray(const NDArray* other, const bool copyStrides = false, nd4j::LaunchContext* context = nd4j::LaunchContext ::defaultContext()); explicit NDArray(const NDArray* other, const bool copyStrides = false, sd::LaunchContext* context = sd::LaunchContext ::defaultContext());
/** /**
* this constructor creates scalar(and set its value = 0) or empty array depending on bool argument isScalar * this constructor creates scalar(and set its value = 0) or empty array depending on bool argument isScalar
*/ */
NDArray(nd4j::DataType dtype, nd4j::LaunchContext* context = nd4j::LaunchContext::defaultContext(), const bool isScalar = true); NDArray(sd::DataType dtype, sd::LaunchContext* context = sd::LaunchContext::defaultContext(), const bool isScalar = true);
/** /**
* This method blocks until asynchronous operation finishes * This method blocks until asynchronous operation finishes
@ -392,7 +392,7 @@ namespace nd4j {
void operator delete(void* p); void operator delete(void* p);
void setContext(nd4j::LaunchContext * context); void setContext(sd::LaunchContext * context);
/** /**
* create a new array by replicating current array by repeats times along given dimension * create a new array by replicating current array by repeats times along given dimension
@ -438,7 +438,7 @@ namespace nd4j {
/** /**
* returns _context * returns _context
*/ */
nd4j::LaunchContext * getContext() const { sd::LaunchContext * getContext() const {
return _context; return _context;
} }
@ -626,17 +626,17 @@ namespace nd4j {
* keepDims - if true then put unities in place of reduced dimensions * keepDims - if true then put unities in place of reduced dimensions
*/ */
NDArray reduceAlongDimension(nd4j::reduce::FloatOps op, const std::vector<int>& dimensions, const bool keepDims = false, const bool supportOldShapes = false) const; NDArray reduceAlongDimension(sd::reduce::FloatOps op, const std::vector<int>& dimensions, const bool keepDims = false, const bool supportOldShapes = false) const;
NDArray reduceAlongDimension(nd4j::reduce::FloatOps op, const std::initializer_list<int>& dimensions, const bool keepDims = false, const bool supportOldShapes = false) const; NDArray reduceAlongDimension(sd::reduce::FloatOps op, const std::initializer_list<int>& dimensions, const bool keepDims = false, const bool supportOldShapes = false) const;
NDArray reduceAlongDimension(nd4j::reduce::SameOps op, const std::vector<int>& dimensions, const bool keepDims = false, const bool supportOldShapes = false) const; NDArray reduceAlongDimension(sd::reduce::SameOps op, const std::vector<int>& dimensions, const bool keepDims = false, const bool supportOldShapes = false) const;
NDArray reduceAlongDimension(nd4j::reduce::SameOps op, const std::initializer_list<int>& dimensions, const bool keepDims = false, const bool supportOldShapes = false) const; NDArray reduceAlongDimension(sd::reduce::SameOps op, const std::initializer_list<int>& dimensions, const bool keepDims = false, const bool supportOldShapes = false) const;
NDArray reduceAlongDimension(nd4j::reduce::BoolOps op, const std::vector<int>& dimensions, const bool keepDims = false, const bool supportOldShapes = false) const; NDArray reduceAlongDimension(sd::reduce::BoolOps op, const std::vector<int>& dimensions, const bool keepDims = false, const bool supportOldShapes = false) const;
NDArray reduceAlongDimension(nd4j::reduce::BoolOps op, const std::initializer_list<int>& dimensions, const bool keepDims = false, const bool supportOldShapes = false) const; NDArray reduceAlongDimension(sd::reduce::BoolOps op, const std::initializer_list<int>& dimensions, const bool keepDims = false, const bool supportOldShapes = false) const;
NDArray reduceAlongDimension(nd4j::reduce::LongOps op, const std::vector<int>& dimensions, const bool keepDims = false, const bool supportOldShapes = false) const; NDArray reduceAlongDimension(sd::reduce::LongOps op, const std::vector<int>& dimensions, const bool keepDims = false, const bool supportOldShapes = false) const;
NDArray reduceAlongDimension(nd4j::reduce::LongOps op, const std::initializer_list<int>& dimensions, const bool keepDims = false, const bool supportOldShapes = false) const; NDArray reduceAlongDimension(sd::reduce::LongOps op, const std::initializer_list<int>& dimensions, const bool keepDims = false, const bool supportOldShapes = false) const;
/** /**
* method reduces array by excluding its shapes along dimensions present in given dimensions vector * method reduces array by excluding its shapes along dimensions present in given dimensions vector
@ -645,37 +645,37 @@ namespace nd4j {
* keepDims - if true then put unities in place of reduced dimensions * keepDims - if true then put unities in place of reduced dimensions
* extras - extra parameters * extras - extra parameters
*/ */
void reduceAlongDimension(nd4j::reduce::FloatOps op, NDArray& target, const std::vector<int>& dimensions, const bool keepDims = false, const bool supportOldShapes = false, const bool checkTargetShape = true) const; void reduceAlongDimension(sd::reduce::FloatOps op, NDArray& target, const std::vector<int>& dimensions, const bool keepDims = false, const bool supportOldShapes = false, const bool checkTargetShape = true) const;
void reduceAlongDimension(nd4j::reduce::SameOps op, NDArray& target, const std::vector<int>& dimensions, const bool keepDims = false, const bool supportOldShapes = false, const bool checkTargetShape = true) const; void reduceAlongDimension(sd::reduce::SameOps op, NDArray& target, const std::vector<int>& dimensions, const bool keepDims = false, const bool supportOldShapes = false, const bool checkTargetShape = true) const;
void reduceAlongDimension(nd4j::reduce::BoolOps op, NDArray& target, const std::vector<int>& dimensions, const bool keepDims = false, const bool supportOldShapes = false, const bool checkTargetShape = true) const; void reduceAlongDimension(sd::reduce::BoolOps op, NDArray& target, const std::vector<int>& dimensions, const bool keepDims = false, const bool supportOldShapes = false, const bool checkTargetShape = true) const;
void reduceAlongDimension(nd4j::reduce::LongOps op, NDArray& target, const std::vector<int>& dimensions, const bool keepDims = false, const bool supportOldShapes = false, const bool checkTargetShape = true) const; void reduceAlongDimension(sd::reduce::LongOps op, NDArray& target, const std::vector<int>& dimensions, const bool keepDims = false, const bool supportOldShapes = false, const bool checkTargetShape = true) const;
/** /**
* return variance of array elements set * return variance of array elements set
* biasCorrected - if true bias correction will be applied * biasCorrected - if true bias correction will be applied
*/ */
NDArray varianceNumber(nd4j::variance::Ops op, bool biasCorrected = true); NDArray varianceNumber(sd::variance::Ops op, bool biasCorrected = true);
/** /**
* apply scalar operation to array * apply scalar operation to array
* extraParams - extra parameters for operation * extraParams - extra parameters for operation
* returns scalar array * returns scalar array
*/ */
NDArray reduceNumber(nd4j::reduce::FloatOps ops, void *extraParams = nullptr) const; NDArray reduceNumber(sd::reduce::FloatOps ops, void *extraParams = nullptr) const;
NDArray reduceNumber(nd4j::reduce::SameOps ops, void *extraParams = nullptr) const; NDArray reduceNumber(sd::reduce::SameOps ops, void *extraParams = nullptr) const;
NDArray reduceNumber(nd4j::reduce::BoolOps ops, void *extraParams = nullptr) const; NDArray reduceNumber(sd::reduce::BoolOps ops, void *extraParams = nullptr) const;
NDArray reduceNumber(nd4j::reduce::LongOps ops, void *extraParams = nullptr) const; NDArray reduceNumber(sd::reduce::LongOps ops, void *extraParams = nullptr) const;
void reduceNumber(nd4j::reduce::FloatOps ops, NDArray& target, void *extraParams = nullptr) const; void reduceNumber(sd::reduce::FloatOps ops, NDArray& target, void *extraParams = nullptr) const;
void reduceNumber(nd4j::reduce::SameOps ops, NDArray& target, void *extraParams = nullptr) const; void reduceNumber(sd::reduce::SameOps ops, NDArray& target, void *extraParams = nullptr) const;
void reduceNumber(nd4j::reduce::BoolOps ops, NDArray& target, void *extraParams = nullptr) const; void reduceNumber(sd::reduce::BoolOps ops, NDArray& target, void *extraParams = nullptr) const;
void reduceNumber(nd4j::reduce::LongOps ops, NDArray& target, void *extraParams = nullptr) const; void reduceNumber(sd::reduce::LongOps ops, NDArray& target, void *extraParams = nullptr) const;
/** /**
* returns element index which corresponds to some condition imposed by operation * returns element index which corresponds to some condition imposed by operation
* extraParams - extra parameters for operation * extraParams - extra parameters for operation
*/ */
NDArray indexReduceNumber(nd4j::indexreduce::Ops op, ExtraArguments *extraParams = nullptr); NDArray indexReduceNumber(sd::indexreduce::Ops op, ExtraArguments *extraParams = nullptr);
/** /**
* returns index of max element in a given array (optionally: along given dimension(s)) * returns index of max element in a given array (optionally: along given dimension(s))
@ -687,31 +687,31 @@ namespace nd4j {
void makeBothActual() const { syncToDevice(); syncToHost(); } void makeBothActual() const { syncToDevice(); syncToHost(); }
void applyTransform(nd4j::transform::FloatOps op, NDArray& target, ExtraArguments *extraParams = nullptr); void applyTransform(sd::transform::FloatOps op, NDArray& target, ExtraArguments *extraParams = nullptr);
void applyTransform(nd4j::transform::SameOps op, NDArray& target, ExtraArguments *extraParams = nullptr); void applyTransform(sd::transform::SameOps op, NDArray& target, ExtraArguments *extraParams = nullptr);
void applyTransform(nd4j::transform::AnyOps op, NDArray& target, ExtraArguments *extraParams = nullptr); void applyTransform(sd::transform::AnyOps op, NDArray& target, ExtraArguments *extraParams = nullptr);
void applyTransform(nd4j::transform::BoolOps op, NDArray& target, ExtraArguments *extraParams = nullptr); void applyTransform(sd::transform::BoolOps op, NDArray& target, ExtraArguments *extraParams = nullptr);
void applyTransform(nd4j::transform::StrictOps op, NDArray& target, ExtraArguments *extraParams = nullptr); void applyTransform(sd::transform::StrictOps op, NDArray& target, ExtraArguments *extraParams = nullptr);
/** /**
* apply OpName transformation to this array and store result in new array to be returned * apply OpName transformation to this array and store result in new array to be returned
* extraParams - extra parameters for operation * extraParams - extra parameters for operation
*/ */
NDArray transform(nd4j::transform::FloatOps op, void *extraParams = nullptr) const &; NDArray transform(sd::transform::FloatOps op, void *extraParams = nullptr) const &;
NDArray transform(nd4j::transform::SameOps op, void *extraParams = nullptr) const &; NDArray transform(sd::transform::SameOps op, void *extraParams = nullptr) const &;
NDArray transform(nd4j::transform::BoolOps op, void *extraParams = nullptr) const &; NDArray transform(sd::transform::BoolOps op, void *extraParams = nullptr) const &;
NDArray transform(nd4j::transform::StrictOps op, void *extraParams = nullptr) const &; NDArray transform(sd::transform::StrictOps op, void *extraParams = nullptr) const &;
NDArray transform(nd4j::transform::FloatOps op, void *extraParams = nullptr) &&; NDArray transform(sd::transform::FloatOps op, void *extraParams = nullptr) &&;
NDArray transform(nd4j::transform::SameOps op, void *extraParams = nullptr) &&; NDArray transform(sd::transform::SameOps op, void *extraParams = nullptr) &&;
NDArray transform(nd4j::transform::BoolOps op, void *extraParams = nullptr) &&; NDArray transform(sd::transform::BoolOps op, void *extraParams = nullptr) &&;
NDArray transform(nd4j::transform::StrictOps op, void *extraParams = nullptr) &&; NDArray transform(sd::transform::StrictOps op, void *extraParams = nullptr) &&;
/** /**
* apply pairwise OpName transformation based on "this" and "other" arras elements, store result in this array * apply pairwise OpName transformation based on "this" and "other" arras elements, store result in this array
* other - second array necessary for pairwise operation * other - second array necessary for pairwise operation
* extraParams - extra parameters for operation * extraParams - extra parameters for operation
*/ */
void applyPairwiseTransform(nd4j::pairwise::Ops op, const NDArray& other, ExtraArguments *extraParams = nullptr); void applyPairwiseTransform(sd::pairwise::Ops op, const NDArray& other, ExtraArguments *extraParams = nullptr);
/** /**
* apply pairwise OpName transformation based on "this" and "other" arras elements, store result in target array * apply pairwise OpName transformation based on "this" and "other" arras elements, store result in target array
@ -719,11 +719,11 @@ namespace nd4j {
* target - where to store result * target - where to store result
* extraParams - extra parameters for operation * extraParams - extra parameters for operation
*/ */
void applyPairwiseTransform(nd4j::pairwise::Ops op, const NDArray& other, NDArray& target, ExtraArguments *extraParams = nullptr) const; void applyPairwiseTransform(sd::pairwise::Ops op, const NDArray& other, NDArray& target, ExtraArguments *extraParams = nullptr) const;
void applyPairwiseTransform(nd4j::pairwise::BoolOps op, const NDArray& other, NDArray& target, ExtraArguments *extraParams = nullptr) const; void applyPairwiseTransform(sd::pairwise::BoolOps op, const NDArray& other, NDArray& target, ExtraArguments *extraParams = nullptr) const;
void applyPairwiseTransform(nd4j::pairwise::IntOps op, const NDArray& other, NDArray&target, ExtraArguments *extraParams = nullptr) const; void applyPairwiseTransform(sd::pairwise::IntOps op, const NDArray& other, NDArray&target, ExtraArguments *extraParams = nullptr) const;
/** /**
* apply operation which requires broadcasting, broadcast a smaller array (tad) along bigger one (this) * apply operation which requires broadcasting, broadcast a smaller array (tad) along bigger one (this)
@ -732,23 +732,23 @@ namespace nd4j {
* target - where to store result * target - where to store result
* extraParams - extra parameters for operation * extraParams - extra parameters for operation
*/ */
void applyBroadcast(nd4j::broadcast::Ops op, const std::initializer_list<int> dimensions, const NDArray& tad, NDArray& target, ExtraArguments* extraArgs = nullptr); void applyBroadcast(sd::broadcast::Ops op, const std::initializer_list<int> dimensions, const NDArray& tad, NDArray& target, ExtraArguments* extraArgs = nullptr);
void applyBroadcast(nd4j::broadcast::Ops op, const std::vector<int> &dimensions, const NDArray &tad, NDArray &target, ExtraArguments *extraArgs = nullptr); void applyBroadcast(sd::broadcast::Ops op, const std::vector<int> &dimensions, const NDArray &tad, NDArray &target, ExtraArguments *extraArgs = nullptr);
void applyBroadcast(nd4j::broadcast::BoolOps op, const std::vector<int> &dimensions, const NDArray &tad, NDArray &target, ExtraArguments *extraArgs = nullptr); void applyBroadcast(sd::broadcast::BoolOps op, const std::vector<int> &dimensions, const NDArray &tad, NDArray &target, ExtraArguments *extraArgs = nullptr);
void applyBroadcast(nd4j::broadcast::IntOps op, const std::vector<int> &dimensions, const NDArray& tad, NDArray &target, ExtraArguments *extraArgs = nullptr); void applyBroadcast(sd::broadcast::IntOps op, const std::vector<int> &dimensions, const NDArray& tad, NDArray &target, ExtraArguments *extraArgs = nullptr);
/** /**
* apply operation which requires broadcasting, broadcast one tensor along another, also this method checks the possibility of broadcasting * apply operation which requires broadcasting, broadcast one tensor along another, also this method checks the possibility of broadcasting
* other - input array * other - input array
* extraParams - extra parameters for operation * extraParams - extra parameters for operation
*/ */
NDArray applyTrueBroadcast(nd4j::BroadcastOpsTuple op, const NDArray& other, ExtraArguments *extraArgs = nullptr) const &; NDArray applyTrueBroadcast(sd::BroadcastOpsTuple op, const NDArray& other, ExtraArguments *extraArgs = nullptr) const &;
NDArray applyTrueBroadcast(nd4j::BroadcastOpsTuple op, NDArray&& other, ExtraArguments *extraArgs = nullptr) const &; NDArray applyTrueBroadcast(sd::BroadcastOpsTuple op, NDArray&& other, ExtraArguments *extraArgs = nullptr) const &;
NDArray applyTrueBroadcast(nd4j::BroadcastOpsTuple op, NDArray&& other, ExtraArguments *extraArgs = nullptr) &&; NDArray applyTrueBroadcast(sd::BroadcastOpsTuple op, NDArray&& other, ExtraArguments *extraArgs = nullptr) &&;
NDArray applyTrueBroadcast(nd4j::BroadcastOpsTuple op, const NDArray& other, ExtraArguments *extraArgs = nullptr) &&; NDArray applyTrueBroadcast(sd::BroadcastOpsTuple op, const NDArray& other, ExtraArguments *extraArgs = nullptr) &&;
/** /**
* apply operation which requires broadcasting, broadcast one tensor along another, also this method checks the possibility of broadcasting * apply operation which requires broadcasting, broadcast one tensor along another, also this method checks the possibility of broadcasting
@ -757,11 +757,11 @@ namespace nd4j {
* checkTargetShape - if true check whether target shape is suitable for broadcasting * checkTargetShape - if true check whether target shape is suitable for broadcasting
* extraParams - extra parameters for operation * extraParams - extra parameters for operation
*/ */
void applyTrueBroadcast(nd4j::BroadcastOpsTuple op, const NDArray& other, NDArray& target, const bool checkTargetShape = true, ExtraArguments *extraArgs = nullptr) const; void applyTrueBroadcast(sd::BroadcastOpsTuple op, const NDArray& other, NDArray& target, const bool checkTargetShape = true, ExtraArguments *extraArgs = nullptr) const;
void applyTrueBroadcast(nd4j::BroadcastBoolOpsTuple op, const NDArray& other, NDArray& target, const bool checkTargetShape = true, ExtraArguments *extraArgs = nullptr) const; void applyTrueBroadcast(sd::BroadcastBoolOpsTuple op, const NDArray& other, NDArray& target, const bool checkTargetShape = true, ExtraArguments *extraArgs = nullptr) const;
void applyTrueBroadcast(nd4j::BroadcastIntOpsTuple op, const NDArray& other, NDArray& target, const bool checkTargetShape = true, ExtraArguments *extraArgs = nullptr) const; void applyTrueBroadcast(sd::BroadcastIntOpsTuple op, const NDArray& other, NDArray& target, const bool checkTargetShape = true, ExtraArguments *extraArgs = nullptr) const;
/** /**
@ -771,13 +771,13 @@ namespace nd4j {
* extraParams - extra parameters for operation * extraParams - extra parameters for operation
*/ */
template <typename T> template <typename T>
void applyScalar(nd4j::scalar::Ops op, const T scalar, NDArray& target, ExtraArguments *extraParams = nullptr); void applyScalar(sd::scalar::Ops op, const T scalar, NDArray& target, ExtraArguments *extraParams = nullptr);
template <typename T> template <typename T>
void applyScalar(nd4j::scalar::BoolOps op, const T scalar, NDArray& target, ExtraArguments *extraParams = nullptr) const; void applyScalar(sd::scalar::BoolOps op, const T scalar, NDArray& target, ExtraArguments *extraParams = nullptr) const;
template <typename T> template <typename T>
void applyScalar(nd4j::scalar::IntOps op, const T scalar, NDArray& target, ExtraArguments *extraParams = nullptr) const; void applyScalar(sd::scalar::IntOps op, const T scalar, NDArray& target, ExtraArguments *extraParams = nullptr) const;
/** /**
* apply a scalar operation to an array * apply a scalar operation to an array
@ -785,11 +785,11 @@ namespace nd4j {
* target - where to store result * target - where to store result
* extraParams - extra parameters for operation * extraParams - extra parameters for operation
*/ */
void applyScalarArr(nd4j::scalar::Ops op, const NDArray& scalar, NDArray& target, ExtraArguments *extraParams = nullptr); void applyScalarArr(sd::scalar::Ops op, const NDArray& scalar, NDArray& target, ExtraArguments *extraParams = nullptr);
void applyScalarArr(nd4j::scalar::BoolOps op, const NDArray& scalar, NDArray& target, ExtraArguments *extraParams = nullptr) const; void applyScalarArr(sd::scalar::BoolOps op, const NDArray& scalar, NDArray& target, ExtraArguments *extraParams = nullptr) const;
void applyScalarArr(nd4j::scalar::IntOps op, const NDArray& scalar, NDArray& target, ExtraArguments *extraParams = nullptr) const; void applyScalarArr(sd::scalar::IntOps op, const NDArray& scalar, NDArray& target, ExtraArguments *extraParams = nullptr) const;
#if defined(__CUDABLAS__) //&& defined(BUILD_TESTS) #if defined(__CUDABLAS__) //&& defined(BUILD_TESTS)
template <typename Lambda> template <typename Lambda>
@ -840,7 +840,7 @@ namespace nd4j {
* dimensions - vector of dimensions to reduce along * dimensions - vector of dimensions to reduce along
* extraArgs - extra parameters for operation * extraArgs - extra parameters for operation
*/ */
NDArray applyIndexReduce(nd4j::indexreduce::Ops op, const std::vector<int>& dimensions, const ExtraArguments *extraParams = nullptr) const; NDArray applyIndexReduce(sd::indexreduce::Ops op, const std::vector<int>& dimensions, const ExtraArguments *extraParams = nullptr) const;
/** /**
* reduces dimensions in array relying on index operation OpName * reduces dimensions in array relying on index operation OpName
@ -848,14 +848,14 @@ namespace nd4j {
* dimensions - vector of dimensions to reduce along * dimensions - vector of dimensions to reduce along
* extraArgs - extra parameters for operation * extraArgs - extra parameters for operation
*/ */
void applyIndexReduce(nd4j::indexreduce::Ops op, NDArray& target, const std::vector<int>& dimensions, const ExtraArguments *extraParams = nullptr) const; void applyIndexReduce(sd::indexreduce::Ops op, NDArray& target, const std::vector<int>& dimensions, const ExtraArguments *extraParams = nullptr) const;
/** /**
* apply reduce3 operation OpName to this and other array, return result in new output array * apply reduce3 operation OpName to this and other array, return result in new output array
* other - input array * other - input array
* extraArgs - extra parameters for operation * extraArgs - extra parameters for operation
*/ */
NDArray applyReduce3(nd4j::reduce3::Ops op, const NDArray& other, const ExtraArguments* extraParams = nullptr) const; NDArray applyReduce3(sd::reduce3::Ops op, const NDArray& other, const ExtraArguments* extraParams = nullptr) const;
/** /**
* apply reduce3 operation OpName to this and other array, return result in new output array * apply reduce3 operation OpName to this and other array, return result in new output array
@ -863,7 +863,7 @@ namespace nd4j {
* dimensions - vector of dimensions to reduce along (tads not axis) * dimensions - vector of dimensions to reduce along (tads not axis)
* extraArgs - extra parameters for operation * extraArgs - extra parameters for operation
*/ */
NDArray applyAllReduce3(nd4j::reduce3::Ops op, const NDArray& other, const std::vector<int>& dimensions, const ExtraArguments* extraParams = nullptr) const; NDArray applyAllReduce3(sd::reduce3::Ops op, const NDArray& other, const std::vector<int>& dimensions, const ExtraArguments* extraParams = nullptr) const;
/** /**
* apply reduce3 (exec) operation OpName to this and other array, return result in new output array * apply reduce3 (exec) operation OpName to this and other array, return result in new output array
@ -871,18 +871,18 @@ namespace nd4j {
* dimensions - vector of dimensions to reduce along (same as reduceAlongDimension) * dimensions - vector of dimensions to reduce along (same as reduceAlongDimension)
* extraArgs - extra parameters for operation * extraArgs - extra parameters for operation
*/ */
NDArray applyReduce3(nd4j::reduce3::Ops op, const NDArray& other, const std::vector<int>& dimensions, const ExtraArguments* extraParams = nullptr) const; NDArray applyReduce3(sd::reduce3::Ops op, const NDArray& other, const std::vector<int>& dimensions, const ExtraArguments* extraParams = nullptr) const;
/** /**
* returns variance along given dimensions * returns variance along given dimensions
* biasCorrected - if true bias correction will be applied * biasCorrected - if true bias correction will be applied
* dimensions - vector of dimensions to calculate variance along * dimensions - vector of dimensions to calculate variance along
*/ */
NDArray varianceAlongDimension(nd4j::variance::Ops op, const bool biasCorrected, const std::vector<int>& dimensions) const; NDArray varianceAlongDimension(sd::variance::Ops op, const bool biasCorrected, const std::vector<int>& dimensions) const;
NDArray varianceAlongDimension(nd4j::variance::Ops op, const bool biasCorrected, const std::initializer_list<int>& dimensions) const; NDArray varianceAlongDimension(sd::variance::Ops op, const bool biasCorrected, const std::initializer_list<int>& dimensions) const;
void varianceAlongDimension(nd4j::variance::Ops op, NDArray& target, const bool biasCorrected, const std::vector<int>& dimensions) const; void varianceAlongDimension(sd::variance::Ops op, NDArray& target, const bool biasCorrected, const std::vector<int>& dimensions) const;
void varianceAlongDimension(nd4j::variance::Ops op, NDArray& target, const bool biasCorrected, const std::initializer_list<int>& dimensions) const; void varianceAlongDimension(sd::variance::Ops op, NDArray& target, const bool biasCorrected, const std::initializer_list<int>& dimensions) const;
#endif #endif
@ -1224,7 +1224,7 @@ namespace nd4j {
* set _shapeInfo * set _shapeInfo
*/ */
void setShapeInfo(const Nd4jLong *shapeInfo); void setShapeInfo(const Nd4jLong *shapeInfo);
void setShapeInfo(const Nd4jLong *shapeInfo, const nd4j::DataType dtype); void setShapeInfo(const Nd4jLong *shapeInfo, const sd::DataType dtype);
void setShapeInfo(const ShapeDescriptor& descriptor); void setShapeInfo(const ShapeDescriptor& descriptor);
void setShapeInfo(const ConstantDataBuffer& shapeBuffer); void setShapeInfo(const ConstantDataBuffer& shapeBuffer);
@ -1271,7 +1271,7 @@ namespace nd4j {
* set _shapeInfo * set _shapeInfo
*/ */
FORCEINLINE void setShapeInfo(Nd4jLong *shapeInfo); FORCEINLINE void setShapeInfo(Nd4jLong *shapeInfo);
FORCEINLINE void setShapeInfo(Nd4jLong *shapeInfo, const nd4j::DataType dtype); FORCEINLINE void setShapeInfo(Nd4jLong *shapeInfo, const sd::DataType dtype);
/** /**
* returns the value of "dim" dimension * returns the value of "dim" dimension
@ -1537,13 +1537,13 @@ void NDArray::setShapeInfo(Nd4jLong *shapeInfo) {
_length = shape::length(_shapeInfo); _length = shape::length(_shapeInfo);
} }
else { else {
_dataType = nd4j::DataType::INHERIT; _dataType = sd::DataType::INHERIT;
_length = 0; _length = 0;
} }
} }
////////////////////////////////////////////////////////////////////////// //////////////////////////////////////////////////////////////////////////
void NDArray::setShapeInfo(Nd4jLong *shapeInfo, const nd4j::DataType dtype) { void NDArray::setShapeInfo(Nd4jLong *shapeInfo, const sd::DataType dtype) {
auto buffer = ConstantShapeHelper::getInstance()->bufferForShapeInfo(shapeInfo); auto buffer = ConstantShapeHelper::getInstance()->bufferForShapeInfo(shapeInfo);
_shapeInfo = buffer.primaryAsT<Nd4jLong>(); _shapeInfo = buffer.primaryAsT<Nd4jLong>();
_shapeInfoD = buffer.specialAsT<Nd4jLong>(); _shapeInfoD = buffer.specialAsT<Nd4jLong>();
@ -1556,7 +1556,7 @@ void NDArray::setShapeInfo(Nd4jLong *shapeInfo, const nd4j::DataType dtype) {
_length = shape::length(_shapeInfo); _length = shape::length(_shapeInfo);
} }
else { else {
_dataType = nd4j::DataType::INHERIT; _dataType = sd::DataType::INHERIT;
_length = 0; _length = 0;
} }
} }
@ -1981,7 +1981,7 @@ Nd4jLong* NDArray::getSpecialShapeInfo() const{
#if defined(__CUDACC__) //&& defined(BUILD_TESTS) #if defined(__CUDACC__) //&& defined(BUILD_TESTS)
// for CUDA we need stil stuff inline // for CUDA we need stil stuff inline
#include "cuda/NDArrayLambda.hpp" #include <array/NDArrayLambda.hXX>
#endif #endif
} }

View File

@ -0,0 +1,191 @@
/*******************************************************************************
* Copyright (c) 2015-2018 Skymind, Inc.
* Copyright (c) 2019-2020 Konduit K.K.
*
* This program and the accompanying materials are made available under the
* terms of the Apache License, Version 2.0 which is available at
* https://www.apache.org/licenses/LICENSE-2.0.
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
*
* SPDX-License-Identifier: Apache-2.0
******************************************************************************/
//
// Created by raver119 on 2018-09-16.
// @author Oleg Semeniv <oleg.semeniv@gmail.com>
//
#ifndef DEV_TESTS_NDARRAYFACTORY_H
#define DEV_TESTS_NDARRAYFACTORY_H
#include <vector>
#include <initializer_list>
#include <array/NDArray.h>
//#include <memory/Workspace.h>
#include <execution/LaunchContext.h>
#include <string>
namespace sd {
class ND4J_EXPORT NDArrayFactory {
private:
template <typename T>
static void memcpyFromVector(void *ptr, const std::vector<T> &vector);
public:
template <typename T>
static NDArray* empty_(sd::LaunchContext * context = sd::LaunchContext ::defaultContext());
static NDArray* empty_(sd::DataType dataType, sd::LaunchContext * context = sd::LaunchContext ::defaultContext());
template <typename T>
static NDArray empty(sd::LaunchContext * context = sd::LaunchContext ::defaultContext());
static NDArray empty(sd::DataType dataType, sd::LaunchContext * context = sd::LaunchContext ::defaultContext());
template <typename T>
static NDArray* valueOf(const std::initializer_list<Nd4jLong>& shape, T value, char order = 'c', sd::LaunchContext * context = sd::LaunchContext ::defaultContext());
template <typename T>
static NDArray* valueOf(const std::vector<Nd4jLong>& shape, T value, char order = 'c', sd::LaunchContext * context = sd::LaunchContext ::defaultContext());
static NDArray* valueOf(const std::vector<Nd4jLong>& shape, const NDArray& value, char order = 'c', sd::LaunchContext * context = sd::LaunchContext ::defaultContext());
template <typename T>
static NDArray* linspace(T from, T to, Nd4jLong numElements);
template <typename T>
static NDArray* create_(const T value, sd::LaunchContext * context = sd::LaunchContext ::defaultContext());
static NDArray* create_(sd::DataType dtype, sd::LaunchContext * context = sd::LaunchContext ::defaultContext());
template <typename T>
static NDArray create(const T value, sd::LaunchContext * context = sd::LaunchContext ::defaultContext());
static NDArray create(sd::DataType dtype, sd::LaunchContext * context = sd::LaunchContext ::defaultContext());
template <typename T>
static NDArray create(DataType type, const T scalar, sd::LaunchContext * context = sd::LaunchContext ::defaultContext());
template <typename T>
static NDArray* vector(Nd4jLong length, T startingValue = (T) 0, sd::LaunchContext * context = sd::LaunchContext ::defaultContext());
template <typename T>
static NDArray* create_(char order, const std::vector<Nd4jLong> &shape, sd::LaunchContext * context = sd::LaunchContext ::defaultContext());
static NDArray* create_( char order, const std::vector<Nd4jLong> &shape, sd::DataType dataType, sd::LaunchContext * context = sd::LaunchContext ::defaultContext());
template <typename T>
static NDArray* create_(char order, const std::vector<Nd4jLong> &shape, const std::vector<T> &data, sd::LaunchContext * context = sd::LaunchContext ::defaultContext());
template <typename T>
static NDArray create(char order, const std::vector<Nd4jLong> &shape, const std::vector<T> &data, sd::LaunchContext * context = sd::LaunchContext ::defaultContext());
template <typename T>
static NDArray create(char order, const std::vector<Nd4jLong> &shape, sd::LaunchContext * context = sd::LaunchContext ::defaultContext());
static NDArray create(char order, const std::vector<Nd4jLong> &shape, sd::DataType dtype, sd::LaunchContext * context = sd::LaunchContext ::defaultContext());
template <typename T>
static NDArray create(const std::vector<T> &values, sd::LaunchContext * context = sd::LaunchContext ::defaultContext());
#ifndef __JAVACPP_HACK__
// this method only available out of javacpp
/**
* This constructor creates vector of T
*
* @param values
*/
template <typename T>
static NDArray create(char order, const std::initializer_list<Nd4jLong>& shape, sd::LaunchContext * context = sd::LaunchContext ::defaultContext());
template <typename T>
static NDArray create(T* buffer, char order, const std::initializer_list<Nd4jLong>& shape, sd::LaunchContext * context = sd::LaunchContext ::defaultContext());
template <typename T>
static NDArray create(char order, const std::vector<Nd4jLong> &shape, const std::initializer_list<T>& data, sd::LaunchContext * context = sd::LaunchContext ::defaultContext());
/**
* This method creates NDArray from .npy file
* @param fileName
* @return
*/
static NDArray fromNpyFile(const char *fileName);
/**
* This factory create array from utf8 string
* @return NDArray default dataType UTF8
*/
static NDArray string(const char *string, sd::DataType dtype = sd::DataType::UTF8, sd::LaunchContext * context = sd::LaunchContext ::defaultContext());
static NDArray* string_(const char *string, sd::DataType dtype = sd::DataType::UTF8, sd::LaunchContext * context = sd::LaunchContext ::defaultContext());
static NDArray* string_(const std::string &string, sd::DataType dtype = sd::DataType::UTF8, sd::LaunchContext * context = sd::LaunchContext ::defaultContext());
static NDArray string(const std::string& string, sd::DataType dtype = sd::DataType::UTF8, sd::LaunchContext* context = sd::LaunchContext::defaultContext());
/**
* This factory create array from utf16 string
* @return NDArray default dataType UTF16
*/
static NDArray string(const char16_t* u16string, sd::DataType dtype = sd::DataType::UTF16, sd::LaunchContext* context = sd::LaunchContext::defaultContext());
static NDArray* string_(const char16_t* u16string, sd::DataType dtype = sd::DataType::UTF16, sd::LaunchContext* context = sd::LaunchContext::defaultContext());
static NDArray* string_(const std::u16string& u16string, sd::DataType dtype = sd::DataType::UTF16, sd::LaunchContext* context = sd::LaunchContext::defaultContext());
static NDArray string(const std::u16string& u16string, sd::DataType dtype = sd::DataType::UTF16, sd::LaunchContext* context = sd::LaunchContext::defaultContext());
/**
* This factory create array from utf32 string
* @return NDArray default dataType UTF32
*/
static NDArray string(const char32_t* u32string, sd::DataType dtype = sd::DataType::UTF32, sd::LaunchContext* context = sd::LaunchContext::defaultContext());
static NDArray* string_(const char32_t* u32string, sd::DataType dtype = sd::DataType::UTF32, sd::LaunchContext* context = sd::LaunchContext::defaultContext());
static NDArray* string_(const std::u32string& u32string, sd::DataType dtype = sd::DataType::UTF32, sd::LaunchContext* context = sd::LaunchContext::defaultContext());
static NDArray string(const std::u32string& u32string, sd::DataType dtype = sd::DataType::UTF32, sd::LaunchContext* context = sd::LaunchContext::defaultContext());
/**
* This factory create array from vector of utf8 strings
* @return NDArray default dataType UTF8
*/
static NDArray string( const std::vector<Nd4jLong> &shape, const std::initializer_list<const char *> &strings, sd::DataType dtype = sd::DataType::UTF8, sd::LaunchContext * context = sd::LaunchContext ::defaultContext());
static NDArray string( const std::vector<Nd4jLong> &shape, const std::initializer_list<std::string> &string, sd::DataType dtype = sd::DataType::UTF8, sd::LaunchContext * context = sd::LaunchContext ::defaultContext());
static NDArray string( const std::vector<Nd4jLong> &shape, const std::vector<const char *> &strings, sd::DataType dtype = sd::DataType::UTF8, sd::LaunchContext * context = sd::LaunchContext ::defaultContext());
static NDArray string( const std::vector<Nd4jLong> &shape, const std::vector<std::string> &string, sd::DataType dtype = sd::DataType::UTF8, sd::LaunchContext * context = sd::LaunchContext ::defaultContext());
static NDArray* string_( const std::vector<Nd4jLong> &shape, const std::initializer_list<const char *> &strings, sd::DataType dtype = sd::DataType::UTF8, sd::LaunchContext * context = sd::LaunchContext ::defaultContext());
static NDArray* string_( const std::vector<Nd4jLong> &shape, const std::initializer_list<std::string> &string, sd::DataType dtype = sd::DataType::UTF8, sd::LaunchContext * context = sd::LaunchContext ::defaultContext());
static NDArray* string_( const std::vector<Nd4jLong> &shape, const std::vector<const char *> &strings, sd::DataType dtype = sd::DataType::UTF8, sd::LaunchContext * context = sd::LaunchContext ::defaultContext());
static NDArray* string_( const std::vector<Nd4jLong> &shape, const std::vector<std::string> &string, sd::DataType dtype = sd::DataType::UTF8, sd::LaunchContext * context = sd::LaunchContext ::defaultContext());
/**
* This factory create array from vector of utf16 strings
* @return NDArray default dataType UTF16
*/
static NDArray string( const std::vector<Nd4jLong>& shape, const std::initializer_list<const char16_t*>& strings, sd::DataType dtype = sd::DataType::UTF16, sd::LaunchContext* context = sd::LaunchContext::defaultContext());
static NDArray string( const std::vector<Nd4jLong>& shape, const std::initializer_list<std::u16string>& string, sd::DataType dtype = sd::DataType::UTF16, sd::LaunchContext* context = sd::LaunchContext::defaultContext());
static NDArray string( const std::vector<Nd4jLong>& shape, const std::vector<const char16_t*>& strings, sd::DataType dtype = sd::DataType::UTF16, sd::LaunchContext* context = sd::LaunchContext::defaultContext());
static NDArray string( const std::vector<Nd4jLong>& shape, const std::vector<std::u16string>& string, sd::DataType dtype = sd::DataType::UTF16, sd::LaunchContext* context = sd::LaunchContext::defaultContext());
static NDArray* string_( const std::vector<Nd4jLong>& shape, const std::initializer_list<const char16_t*>& strings, sd::DataType dtype = sd::DataType::UTF16, sd::LaunchContext* context = sd::LaunchContext::defaultContext());
static NDArray* string_( const std::vector<Nd4jLong>& shape, const std::initializer_list<std::u16string>& string, sd::DataType dtype = sd::DataType::UTF16, sd::LaunchContext* context = sd::LaunchContext::defaultContext());
static NDArray* string_( const std::vector<Nd4jLong>& shape, const std::vector<const char16_t*>& strings, sd::DataType dtype = sd::DataType::UTF16, sd::LaunchContext* context = sd::LaunchContext::defaultContext());
static NDArray* string_( const std::vector<Nd4jLong>& shape, const std::vector<std::u16string>& string, sd::DataType dtype = sd::DataType::UTF16, sd::LaunchContext* context = sd::LaunchContext::defaultContext());
/**
* This factory create array from vector of utf32 strings
* @return NDArray default dataType UTF32
*/
static NDArray string( const std::vector<Nd4jLong>& shape, const std::initializer_list<const char32_t*>& strings, sd::DataType dtype = sd::DataType::UTF32, sd::LaunchContext* context = sd::LaunchContext::defaultContext());
static NDArray string( const std::vector<Nd4jLong>& shape, const std::initializer_list<std::u32string>& string, sd::DataType dtype = sd::DataType::UTF32, sd::LaunchContext* context = sd::LaunchContext::defaultContext());
static NDArray string( const std::vector<Nd4jLong>& shape, const std::vector<const char32_t*>& strings, sd::DataType dtype = sd::DataType::UTF32, sd::LaunchContext* context = sd::LaunchContext::defaultContext());
static NDArray string( const std::vector<Nd4jLong>& shape, const std::vector<std::u32string>& string, sd::DataType dtype = sd::DataType::UTF32, sd::LaunchContext* context = sd::LaunchContext::defaultContext());
static NDArray* string_( const std::vector<Nd4jLong>& shape, const std::initializer_list<const char32_t*>& strings, sd::DataType dtype = sd::DataType::UTF32, sd::LaunchContext* context = sd::LaunchContext::defaultContext());
static NDArray* string_( const std::vector<Nd4jLong>& shape, const std::initializer_list<std::u32string>& string, sd::DataType dtype = sd::DataType::UTF32, sd::LaunchContext* context = sd::LaunchContext::defaultContext());
static NDArray* string_( const std::vector<Nd4jLong>& shape, const std::vector<const char32_t*>& strings, sd::DataType dtype = sd::DataType::UTF32, sd::LaunchContext* context = sd::LaunchContext::defaultContext());
static NDArray* string_( const std::vector<Nd4jLong>& shape, const std::vector<std::u32string>& string, sd::DataType dtype = sd::DataType::UTF32, sd::LaunchContext* context = sd::LaunchContext::defaultContext());
static ResultSet createSetOfArrs(const Nd4jLong numOfArrs, const void* buffer, const Nd4jLong* shapeInfo, const Nd4jLong* offsets, sd::LaunchContext * context = sd::LaunchContext ::defaultContext());
#endif
};
}
#endif //DEV_TESTS_NDARRAYFACTORY_H

View File

@ -17,8 +17,8 @@
#ifndef CUDA_LAMBDA_HELPER #ifndef CUDA_LAMBDA_HELPER
#define CUDA_LAMBDA_HELPER #define CUDA_LAMBDA_HELPER
#include <pointercast.h> #include <system/pointercast.h>
#include <op_boilerplate.h> #include <system/op_boilerplate.h>
#include <helpers/shape.h> #include <helpers/shape.h>
#include <cuda.h> #include <cuda.h>
#include <cuda_runtime.h> #include <cuda_runtime.h>

View File

@ -26,25 +26,25 @@
#include <string> #include <string>
#include <atomic> #include <atomic>
#include <unordered_map> #include <unordered_map>
#include <NDArray.h> #include <array/NDArray.h>
#include <memory/Workspace.h> #include <memory/Workspace.h>
#include <dll.h> #include <system/dll.h>
namespace nd4j { namespace sd {
class ND4J_EXPORT NDArrayList { class ND4J_EXPORT NDArrayList {
private: private:
// workspace where chunks belong to // workspace where chunks belong to
//nd4j::memory::Workspace* _workspace = nullptr; //sd::memory::Workspace* _workspace = nullptr;
nd4j::LaunchContext * _context = nd4j::LaunchContext ::defaultContext(); sd::LaunchContext * _context = sd::LaunchContext ::defaultContext();
// numeric and symbolic ids of this list // numeric and symbolic ids of this list
std::pair<int, int> _id; std::pair<int, int> _id;
std::string _name; std::string _name;
nd4j::DataType _dtype; sd::DataType _dtype;
// stored chunks // stored chunks
MAP_IMPL<int, nd4j::NDArray*> _chunks; MAP_IMPL<int, sd::NDArray*> _chunks;
// just a counter, for stored elements // just a counter, for stored elements
std::atomic<int> _elements; std::atomic<int> _elements;
@ -65,7 +65,7 @@ namespace nd4j {
NDArrayList(int height, bool expandable = false); NDArrayList(int height, bool expandable = false);
~NDArrayList(); ~NDArrayList();
nd4j::DataType dataType(); sd::DataType dataType();
NDArray* read(int idx); NDArray* read(int idx);
NDArray* readRaw(int idx); NDArray* readRaw(int idx);
@ -82,8 +82,8 @@ namespace nd4j {
std::pair<int,int>& id(); std::pair<int,int>& id();
std::string& name(); std::string& name();
//nd4j::memory::Workspace* workspace(); //sd::memory::Workspace* workspace();
nd4j::LaunchContext * context(); sd::LaunchContext * context();
NDArrayList* clone(); NDArrayList* clone();
bool equals(NDArrayList& other); bool equals(NDArrayList& other);

View File

@ -19,7 +19,7 @@
// //
// PLESE NOTE: It will delete all stored NDArrays upon destructor call // PLESE NOTE: It will delete all stored NDArrays upon destructor call
// //
// Created by raver119 on 07.09.17. // @author raver119@gmail.com
// //
#ifndef LIBND4J_RESULTSET_H #ifndef LIBND4J_RESULTSET_H
@ -27,22 +27,25 @@
#include <vector> #include <vector>
#include <graph/generated/result_generated.h> #include <graph/generated/result_generated.h>
#include <pointercast.h> #include <system/pointercast.h>
#include <dll.h> #include <system/dll.h>
namespace nd4j { namespace sd {
class NDArray; // forward declaration of template class NDArray class NDArray; // forward declaration of template class NDArray
class ND4J_EXPORT ResultSet { class ND4J_EXPORT ResultSet {
private: private:
std::vector<nd4j::NDArray *> _content; std::vector<sd::NDArray *> _content;
Nd4jStatus _status = ND4J_STATUS_OK; Nd4jStatus _status = ND4J_STATUS_OK;
bool _removable = true; bool _removable = true;
public: public:
// default constructor explicit ResultSet();
ResultSet(const nd4j::graph::FlatResult* result = nullptr);
#ifndef __JAVACPP_HACK__
ResultSet(const sd::graph::FlatResult* result);
#endif
ResultSet(const ResultSet& other) noexcept; ResultSet(const ResultSet& other) noexcept;
@ -57,9 +60,9 @@ namespace nd4j {
~ResultSet(); ~ResultSet();
int size(); int size();
nd4j::NDArray* at(const unsigned long idx) const; sd::NDArray* at(const unsigned long idx) const;
nd4j::NDArray* operator[](const unsigned long idx) const; sd::NDArray* operator[](const unsigned long idx) const;
void push_back(nd4j::NDArray* array); void push_back(sd::NDArray* array);
Nd4jStatus status(); Nd4jStatus status();
void setStatus(Nd4jStatus status); void setStatus(Nd4jStatus status);

View File

@ -23,12 +23,12 @@
#include <unordered_map> #include <unordered_map>
#include <vector> #include <vector>
#include <dll.h> #include <system/dll.h>
#include <pointercast.h> #include <system/pointercast.h>
#include <DataType.h> #include <array/DataType.h>
#include <initializer_list> #include <initializer_list>
namespace nd4j { namespace sd {
class ND4J_EXPORT ShapeDescriptor { class ND4J_EXPORT ShapeDescriptor {
@ -44,7 +44,7 @@ class ND4J_EXPORT ShapeDescriptor {
public: public:
ShapeDescriptor(const ShapeDescriptor &other); ShapeDescriptor(const ShapeDescriptor &other);
ShapeDescriptor(const Nd4jLong *shapeInfo, bool inheritDtype = true); ShapeDescriptor(const Nd4jLong *shapeInfo, bool inheritDtype = true);
explicit ShapeDescriptor(const Nd4jLong *shapeInfo, const nd4j::DataType dtypeOverride); explicit ShapeDescriptor(const Nd4jLong *shapeInfo, const sd::DataType dtypeOverride);
explicit ShapeDescriptor(const Nd4jLong *shapeInfo, const Nd4jLong *dtypeOverride); explicit ShapeDescriptor(const Nd4jLong *shapeInfo, const Nd4jLong *dtypeOverride);
explicit ShapeDescriptor(const Nd4jLong *shapeInfo, const Nd4jLong *dtypeOverride, const Nd4jLong *orderOverride); explicit ShapeDescriptor(const Nd4jLong *shapeInfo, const Nd4jLong *dtypeOverride, const Nd4jLong *orderOverride);
explicit ShapeDescriptor(const DataType type, const Nd4jLong length); explicit ShapeDescriptor(const DataType type, const Nd4jLong length);
@ -91,9 +91,9 @@ class ND4J_EXPORT ShapeDescriptor {
namespace std { namespace std {
template<> template<>
class ND4J_EXPORT hash<nd4j::ShapeDescriptor> { class ND4J_EXPORT hash<sd::ShapeDescriptor> {
public: public:
size_t operator()(const nd4j::ShapeDescriptor &k) const; size_t operator()(const sd::ShapeDescriptor &k) const;
}; };
} }

View File

@ -22,10 +22,10 @@
#define LIBND4J_SHAPELIST_H #define LIBND4J_SHAPELIST_H
#include <vector> #include <vector>
#include <shape.h> #include <helpers/shape.h>
#include <dll.h> #include <system/dll.h>
namespace nd4j { namespace sd {
class ND4J_EXPORT ShapeList { class ND4J_EXPORT ShapeList {
protected: protected:
std::vector<Nd4jLong*> _shapes; std::vector<Nd4jLong*> _shapes;

View File

@ -21,7 +21,7 @@
#ifndef ND4J_SPACE_TYPE_H #ifndef ND4J_SPACE_TYPE_H
#define ND4J_SPACE_TYPE_H #define ND4J_SPACE_TYPE_H
namespace nd4j { namespace sd {
enum SpaceType { enum SpaceType {
CONTINUOUS = 1, CONTINUOUS = 1,
COMPLEX = 2, COMPLEX = 2,

View File

@ -21,7 +21,7 @@
#ifndef LIBND4J_SPARSETYPE_H #ifndef LIBND4J_SPARSETYPE_H
#define LIBND4J_SPARSETYPE_H #define LIBND4J_SPARSETYPE_H
namespace nd4j { namespace sd {
enum SparseType { enum SparseType {
CSR = 1, CSR = 1,
CSC = 2, CSC = 2,

View File

@ -22,9 +22,9 @@
#define DEV_TESTS_TADDESCRIPTOR_H #define DEV_TESTS_TADDESCRIPTOR_H
#include "ShapeDescriptor.h" #include "ShapeDescriptor.h"
#include <dll.h> #include <system/dll.h>
namespace nd4j { namespace sd {
class ND4J_EXPORT TadDescriptor { class ND4J_EXPORT TadDescriptor {
private: private:
ShapeDescriptor _originalShape; ShapeDescriptor _originalShape;
@ -62,9 +62,9 @@ namespace nd4j {
namespace std { namespace std {
template<> template<>
class ND4J_EXPORT hash<nd4j::TadDescriptor> { class ND4J_EXPORT hash<sd::TadDescriptor> {
public: public:
size_t operator()(const nd4j::TadDescriptor &k) const; size_t operator()(const sd::TadDescriptor &k) const;
}; };
} }

View File

@ -23,7 +23,7 @@
#include "ConstantDataBuffer.h" #include "ConstantDataBuffer.h"
namespace nd4j { namespace sd {
class ND4J_EXPORT TadPack { class ND4J_EXPORT TadPack {
private: private:
ConstantDataBuffer _tadShape; ConstantDataBuffer _tadShape;

View File

@ -19,10 +19,10 @@
// @author Yurii Shyrma (iuriish@yahoo.com) // @author Yurii Shyrma (iuriish@yahoo.com)
// //
#include "../DataBuffer.h" #include <array/DataBuffer.h>
#include <DataTypeUtils.h> #include <array/DataTypeUtils.h>
namespace nd4j { namespace sd {
void DataBuffer::expand(const uint64_t size) { void DataBuffer::expand(const uint64_t size) {
if (size > _lenInBytes) { if (size > _lenInBytes) {
// allocate new buffer // allocate new buffer

View File

@ -17,15 +17,15 @@
#ifndef NDARRAY_CPP #ifndef NDARRAY_CPP
#define NDARRAY_CPP #define NDARRAY_CPP
#include "../NDArray.h" #include <array/NDArray.h>
#include "../NDArrayFactory.h" #include <array/NDArrayFactory.h>
#include "NativeOpExecutioner.h" #include <legacy/NativeOpExecutioner.h>
#include <BroadcastPairwiseConverter.h> #include <loops/BroadcastPairwiseConverter.h>
#include <memory/Workspace.h> #include <memory/Workspace.h>
#include <memory/MemoryRegistrator.h> #include <memory/MemoryRegistrator.h>
#include <ops.h> #include <ops/ops.h>
#include <ops/gemm.h> #include <ops/gemm.h>
#include <pointercast.h> #include <system/pointercast.h>
#include <stdexcept> #include <stdexcept>
#include <memory> #include <memory>
#include <helpers/logger.h> #include <helpers/logger.h>
@ -38,16 +38,16 @@
#include <helpers/ShapeUtils.h> #include <helpers/ShapeUtils.h>
#include <sstream> #include <sstream>
#include <helpers/ArrayUtils.h> #include <helpers/ArrayUtils.h>
#include <MmulHelper.h> #include <helpers/MmulHelper.h>
#include <helpers/threshold.h> #include <helpers/threshold.h>
#include <exceptions/datatype_exception.h> #include <exceptions/datatype_exception.h>
#include <exceptions/allocation_exception.h> #include <exceptions/allocation_exception.h>
#include <helpers/ConstantTadHelper.h> #include <helpers/ConstantTadHelper.h>
#include <NDArray.hpp> #include <array/NDArray.hXX>
namespace nd4j { namespace sd {
//////////////////////////////////////////////////////////////////////// ////////////////////////////////////////////////////////////////////////

View File

@ -0,0 +1,148 @@
################################################################################
# Copyright (c) 2015-2018 Skymind, Inc.
#
# This program and the accompanying materials are made available under the
# terms of the Apache License, Version 2.0 which is available at
# https://www.apache.org/licenses/LICENSE-2.0.
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
# SPDX-License-Identifier: Apache-2.0
################################################################################
#ifndef NDARRAY_MACRO
#define NDARRAY_MACRO
#include <op_boilerplate.h>
//NDArray<T> *other, T *extraParams
BUILD_CALL_1(template void NDArray<float>::template applyPairwiseTransform, float, (NDArray<float>* other, float* extraParams), PAIRWISE_TRANSFORM_OPS)
BUILD_CALL_1(template void NDArray<float16>::applyPairwiseTransform, float16, (NDArray<float16>* other, float16* extraParams), PAIRWISE_TRANSFORM_OPS)
BUILD_CALL_1(template void NDArray<double>::applyPairwiseTransform, double, (NDArray<double>* other, double* extraParams), PAIRWISE_TRANSFORM_OPS)
// NDArray<T> *other, NDArray<T> *target, T *extraParams
BUILD_CALL_1(template void sd::NDArray<float>::applyPairwiseTransform, float, (NDArray<float>* other, NDArray<float>* target, float* extraParams), PAIRWISE_TRANSFORM_OPS)
BUILD_CALL_1(template void sd::NDArray<float16>::applyPairwiseTransform, float16, (NDArray<float16>* other, NDArray<float16>* target, float16* extraParams), PAIRWISE_TRANSFORM_OPS)
BUILD_CALL_1(template void sd::NDArray<double>::applyPairwiseTransform, double, (NDArray<double>* other, NDArray<double>* target, double* extraParams), PAIRWISE_TRANSFORM_OPS)
BUILD_CALL_1(template void sd::NDArray<float16>::applyScalar, float16, (NDArray<float16>& scalar, NDArray<float16>* target, float16 *extraParams) const, SCALAR_OPS)
BUILD_CALL_1(template void sd::NDArray<float16>::applyScalar, float16, (float16 scalar, NDArray<float16>* target, float16 *extraParams) const, SCALAR_OPS)
BUILD_CALL_1(template void sd::NDArray<float>::applyScalar, float, (NDArray<float>& scalar, NDArray<float>* target, float *extraParams) const, SCALAR_OPS)
BUILD_CALL_1(template void sd::NDArray<float>::applyScalar, float, (float scalar, NDArray<float>* target, float *extraParams) const, SCALAR_OPS)
BUILD_CALL_1(template void sd::NDArray<double>::applyScalar, double, (NDArray<double>& scalar, NDArray<double>* target, double *extraParams) const, SCALAR_OPS)
BUILD_CALL_1(template void sd::NDArray<double>::applyScalar, double, (double scalar, NDArray<double>* target, double *extraParams) const, SCALAR_OPS)
BUILD_CALL_1(template float16 sd::NDArray<float16>::reduceNumber, float16, (float16 *extraParams) const, REDUCE_OPS)
BUILD_CALL_1(template float sd::NDArray<float>::reduceNumber, float, (float *extraParams) const, REDUCE_OPS)
BUILD_CALL_1(template double sd::NDArray<double>::reduceNumber, double, (double *extraParams) const, REDUCE_OPS)
BUILD_CALL_1(template Nd4jLong sd::NDArray<float16>::indexReduceNumber, float16, (float16 *extraParams), INDEX_REDUCE_OPS)
BUILD_CALL_1(template Nd4jLong sd::NDArray<float>::indexReduceNumber, float, (float *extraParams), INDEX_REDUCE_OPS)
BUILD_CALL_1(template Nd4jLong sd::NDArray<double>::indexReduceNumber, double, (double *extraParams), INDEX_REDUCE_OPS)
BUILD_CALL_1(template void sd::NDArray<float16>::applyBroadcast, float16, (std::initializer_list<int> list, const sd::NDArray<float16>* a, sd::NDArray<float16>* b, float16* c), BROADCAST_OPS)
BUILD_CALL_1(template void sd::NDArray<float>::applyBroadcast, float, (std::initializer_list<int> list, const sd::NDArray<float>* a, sd::NDArray<float>* b, float* c), BROADCAST_OPS)
BUILD_CALL_1(template void sd::NDArray<double>::applyBroadcast, double, (std::initializer_list<int> list, const sd::NDArray<double>* a, sd::NDArray<double>* b, double* c), BROADCAST_OPS)
BUILD_CALL_1(template void sd::NDArray<float16>::applyTrueBroadcast, float16,(const sd::NDArray<float16>* a, sd::NDArray<float16>* target, const bool checkTargetShape, float16* c) const, BROADCAST_OPS)
BUILD_CALL_1(template void sd::NDArray<float>::applyTrueBroadcast, float, (const sd::NDArray<float>* a, sd::NDArray<float>* target, const bool checkTargetShape, float* c) const, BROADCAST_OPS)
BUILD_CALL_1(template void sd::NDArray<double>::applyTrueBroadcast, double, (const sd::NDArray<double>* a, sd::NDArray<double>* target, const bool checkTargetShape, double* c) const, BROADCAST_OPS)
BUILD_CALL_1(template sd::NDArray<float16>* sd::NDArray<float16>::applyTrueBroadcast, float16, (const sd::NDArray<float16>* a, float16* c) const, BROADCAST_OPS)
BUILD_CALL_1(template sd::NDArray<float>* sd::NDArray<float>::applyTrueBroadcast, float, (const sd::NDArray<float>* a, float* c) const, BROADCAST_OPS)
BUILD_CALL_1(template sd::NDArray<double>* sd::NDArray<double>::applyTrueBroadcast, double, (const sd::NDArray<double>* a, double* c) const, BROADCAST_OPS)
BUILD_CALL_1(template sd::NDArray<float16> sd::NDArray<float16>::applyTrueBroadcast, float16, (const sd::NDArray<float16>& a, float16* c) const, BROADCAST_OPS)
BUILD_CALL_1(template sd::NDArray<float> sd::NDArray<float>::applyTrueBroadcast, float, (const sd::NDArray<float>& a, float* c) const, BROADCAST_OPS)
BUILD_CALL_1(template sd::NDArray<double> sd::NDArray<double>::applyTrueBroadcast, double, (const sd::NDArray<double>& a, double* c) const, BROADCAST_OPS)
BUILD_CALL_1(template void sd::NDArray<float16>::applyTransform, float16, (NDArray<float16>* target, float16* extraParams), TRANSFORM_OPS)
BUILD_CALL_1(template void sd::NDArray<float>::applyTransform, float, (NDArray<float>* target, float* extraParams), TRANSFORM_OPS)
BUILD_CALL_1(template void sd::NDArray<double>::applyTransform, double, (NDArray<double>* target, double* extraParams), TRANSFORM_OPS)
BUILD_CALL_1(template void sd::NDArray<float16>::applyTransform, float16, (float16* extraParams), TRANSFORM_OPS)
BUILD_CALL_1(template void sd::NDArray<float>::applyTransform, float, (float* extraParams), TRANSFORM_OPS)
BUILD_CALL_1(template void sd::NDArray<double>::applyTransform, double, (double* extraParams), TRANSFORM_OPS)
BUILD_CALL_1(template void sd::NDArray<float16>::applyRandom, float16, (sd::random::RandomBuffer *buffer, NDArray<float16>* y, NDArray<float16>* z, float16* extraParams), RANDOM_OPS)
BUILD_CALL_1(template void sd::NDArray<float>::applyRandom, float, (sd::random::RandomBuffer *buffer, NDArray<float>* y, NDArray<float>* z, float* extraParams), RANDOM_OPS)
BUILD_CALL_1(template void sd::NDArray<double>::applyRandom, double, (sd::random::RandomBuffer *buffer, NDArray<double>* y, NDArray<double>* z, double* extraParams), RANDOM_OPS)
BUILD_CALL_1(template NDArray<float16> sd::NDArray<float16>::transform, float16, (float16* extraParams) const, TRANSFORM_OPS)
BUILD_CALL_1(template NDArray<float> sd::NDArray<float>::transform, float, (float* extraParams) const, TRANSFORM_OPS)
BUILD_CALL_1(template NDArray<double> sd::NDArray<double>::transform, double, (double* extraParams) const, TRANSFORM_OPS)
BUILD_CALL_1(template NDArray<float> *sd::NDArray<float>::template reduceAlongDimension, float, (const std::vector<int>& dimensions, const bool keepDims, const bool supportOldShapes) const, REDUCE_OPS)
BUILD_CALL_1(template NDArray<float16> *sd::NDArray<float16>::template reduceAlongDimension, float16, (const std::vector<int>& dimensions, const bool keepDims, const bool supportOldShapes) const, REDUCE_OPS)
BUILD_CALL_1(template NDArray<double> *sd::NDArray<double>::template reduceAlongDimension, double, (const std::vector<int>& dimensions, const bool keepDims, const bool supportOldShapes) const, REDUCE_OPS)
BUILD_CALL_1(template NDArray<float> sd::NDArray<float>::template reduceAlongDims, float, (const std::vector<int>& dimensions, const bool keepDims, const bool supportOldShapes) const, REDUCE_OPS)
BUILD_CALL_1(template NDArray<float16> sd::NDArray<float16>::template reduceAlongDims, float16, (const std::vector<int>& dimensions, const bool keepDims, const bool supportOldShapes) const, REDUCE_OPS)
BUILD_CALL_1(template NDArray<double> sd::NDArray<double>::template reduceAlongDims, double, (const std::vector<int>& dimensions, const bool keepDims, const bool supportOldShapes) const, REDUCE_OPS)
BUILD_CALL_1(template NDArray<float> *sd::NDArray<float>::template reduceAlongDimension, float, (const std::initializer_list<int>& dimensions, const bool keepDims, const bool supportOldShapes) const, REDUCE_OPS)
BUILD_CALL_1(template NDArray<float16> *sd::NDArray<float16>::template reduceAlongDimension, float16, (const std::initializer_list<int>& dimensions, const bool keepDims, const bool supportOldShapes) const, REDUCE_OPS)
BUILD_CALL_1(template NDArray<double> *sd::NDArray<double>::template reduceAlongDimension, double, (const std::initializer_list<int>& dimensions, const bool keepDims, const bool supportOldShapes) const, REDUCE_OPS)
BUILD_CALL_1(template void sd::NDArray<float>::template reduceAlongDimension, float, (NDArray<float>* target, const std::vector<int>& dimensions, const bool keepDims, const bool supportOldShapes, float * extras) const, REDUCE_OPS)
BUILD_CALL_1(template void sd::NDArray<float16>::template reduceAlongDimension, float16, (NDArray<float16>* target, const std::vector<int>& dimensions, const bool keepDims, const bool supportOldShapes, float16 * extras) const, REDUCE_OPS)
BUILD_CALL_1(template void sd::NDArray<double>::template reduceAlongDimension, double, (NDArray<double>* target, const std::vector<int>& dimension, const bool keepDims, const bool supportOldShapes, double * extras) const, REDUCE_OPS)
BUILD_CALL_1(template NDArray<float> *sd::NDArray<float>::template varianceAlongDimension, float, (const bool biasCorrected, const std::initializer_list<int>& dimensions) const, SUMMARY_STATS_OPS)
BUILD_CALL_1(template NDArray<float16> *sd::NDArray<float16>::template varianceAlongDimension, float16, (const bool biasCorrected, const std::initializer_list<int>& dimensions) const, SUMMARY_STATS_OPS)
BUILD_CALL_1(template NDArray<double> *sd::NDArray<double>::template varianceAlongDimension, double, (const bool biasCorrected, const std::initializer_list<int>& dimensions) const, SUMMARY_STATS_OPS)
BUILD_CALL_1(template void sd::NDArray<float>::template varianceAlongDimension, float, (const NDArray<float> *target, const bool biasCorrected, const std::initializer_list<int>& dimensions), SUMMARY_STATS_OPS)
BUILD_CALL_1(template void sd::NDArray<float16>::template varianceAlongDimension, float16, (const NDArray<float16> *target,const bool biasCorrected, const std::initializer_list<int>& dimensions), SUMMARY_STATS_OPS)
BUILD_CALL_1(template void sd::NDArray<double>::template varianceAlongDimension, double, (const NDArray<double> *target, const bool biasCorrected, const std::initializer_list<int>& dimensions), SUMMARY_STATS_OPS)
BUILD_CALL_1(template void sd::NDArray<float>::template varianceAlongDimension, float, (const NDArray<float> *target, const bool biasCorrected, const std::vector<int>& dimensions), SUMMARY_STATS_OPS)
BUILD_CALL_1(template void sd::NDArray<float16>::template varianceAlongDimension, float16, (const NDArray<float16> *target,const bool biasCorrected, const std::vector<int>& dimensions), SUMMARY_STATS_OPS)
BUILD_CALL_1(template void sd::NDArray<double>::template varianceAlongDimension, double, (const NDArray<double> *target, const bool biasCorrected, const std::vector<int>& dimensions), SUMMARY_STATS_OPS)
BUILD_CALL_1(template float sd::NDArray<float>::template varianceNumber, float, (bool biasCorrected), SUMMARY_STATS_OPS)
BUILD_CALL_1(template float16 sd::NDArray<float16>::template varianceNumber, float16, (bool biasCorrected), SUMMARY_STATS_OPS)
BUILD_CALL_1(template double sd::NDArray<double>::template varianceNumber, double, (bool biasCorrected), SUMMARY_STATS_OPS)
BUILD_CALL_1(template NDArray<float> *sd::NDArray<float>::template applyReduce3, float, (const NDArray<float>* other, const float* extraParams) const, REDUCE3_OPS)
BUILD_CALL_1(template NDArray<float16> *sd::NDArray<float16>::template applyReduce3, float16, (const NDArray<float16>* other, const float16* extraParams) const, REDUCE3_OPS)
BUILD_CALL_1(template NDArray<double> *sd::NDArray<double>::template applyReduce3, double, (const NDArray<double>* other, const double* extraParams) const, REDUCE3_OPS)
BUILD_CALL_1(template NDArray<float> *sd::NDArray<float>::template applyReduce3, float, (const NDArray<float>* other, const std::vector<int> &dims, const float* extraParams) const, REDUCE3_OPS)
BUILD_CALL_1(template NDArray<float16> *sd::NDArray<float16>::template applyReduce3, float16, (const NDArray<float16>* other, const std::vector<int> &dims, const float16* extraParams) const, REDUCE3_OPS)
BUILD_CALL_1(template NDArray<double> *sd::NDArray<double>::template applyReduce3, double, (const NDArray<double>* other, const std::vector<int> &dims, const double* extraParams) const, REDUCE3_OPS)
BUILD_CALL_1(template void sd::NDArray<float>::template applyIndexReduce, float, (const NDArray<float>* target, const std::vector<int> & alpha, const float* beta) const, INDEX_REDUCE_OPS)
BUILD_CALL_1(template void sd::NDArray<float16>::template applyIndexReduce, float16, (const NDArray<float16>* target, const std::vector<int> & alpha, const float16* beta) const, INDEX_REDUCE_OPS)
BUILD_CALL_1(template void sd::NDArray<double>::template applyIndexReduce, double, (const NDArray<double>* target, const std::vector<int> & alpha, const double* beta) const, INDEX_REDUCE_OPS)
BUILD_CALL_1(template NDArray<float> *sd::NDArray<float>::template applyIndexReduce, float, (const std::vector<int> & alpha, const float* beta) const, INDEX_REDUCE_OPS)
BUILD_CALL_1(template NDArray<float16> *sd::NDArray<float16>::template applyIndexReduce, float16, (const std::vector<int> & alpha, const float16* beta) const, INDEX_REDUCE_OPS)
BUILD_CALL_1(template NDArray<double> *sd::NDArray<double>::template applyIndexReduce, double, (const std::vector<int> & alpha, const double* beta) const, INDEX_REDUCE_OPS)
BUILD_CALL_1(template NDArray<float> *sd::NDArray<float>::template applyAllReduce3, float, (const sd::NDArray<float>* alpha, const std::vector<int> & beta, float const* gamma) const, REDUCE3_OPS)
BUILD_CALL_1(template NDArray<float16> *sd::NDArray<float16>::template applyAllReduce3, float16, (const sd::NDArray<float16>* alpha, const std::vector<int> & beta, float16 const* gamma) const, REDUCE3_OPS)
BUILD_CALL_1(template NDArray<double> *sd::NDArray<double>::template applyAllReduce3, double, (const sd::NDArray<double>* alpha, const std::vector<int> & beta, double const* gamma) const, REDUCE3_OPS)
template NDArray<float> mmul(const NDArray<float>& left, const NDArray<float>& right);
template NDArray<float16> mmul(const NDArray<float16>& left, const NDArray<float16>& right);
template NDArray<double> mmul(const NDArray<double>& left, const NDArray<double>& right);
// template NDArray<float> operator-(const float, const NDArray<float>&);
// template NDArray<float16> operator-(const float16, const NDArray<float16>&);
// template NDArray<double> operator-(const double, const NDArray<double>&);
// template NDArray<float> operator+(const float, const NDArray<float>&);
// template NDArray<float16> operator+(const float16, const NDArray<float16>&);
// template NDArray<double> operator+(const double, const NDArray<double>&);
#endif

View File

@ -20,14 +20,14 @@
// //
#include "../DataBuffer.h" #include "../DataBuffer.h"
#include <DataTypeUtils.h> #include <array/DataTypeUtils.h>
#include <op_boilerplate.h> #include <system/op_boilerplate.h>
#include <exceptions/cuda_exception.h> #include <exceptions/cuda_exception.h>
#include <execution/AffinityManager.h> #include <execution/AffinityManager.h>
#include <memory/MemoryCounter.h> #include <memory/MemoryCounter.h>
#include <exceptions/allocation_exception.h> #include <exceptions/allocation_exception.h>
namespace nd4j { namespace sd {
void DataBuffer::expand(const uint64_t size) { void DataBuffer::expand(const uint64_t size) {
if (size > _lenInBytes) { if (size > _lenInBytes) {
// allocate new buffer // allocate new buffer
@ -67,19 +67,19 @@ namespace nd4j {
void DataBuffer::allocateSpecial() { void DataBuffer::allocateSpecial() {
if (_specialBuffer == nullptr && getLenInBytes() > 0) { if (_specialBuffer == nullptr && getLenInBytes() > 0) {
auto deviceId = nd4j::AffinityManager::currentDeviceId(); auto deviceId = sd::AffinityManager::currentDeviceId();
if (_workspace == nullptr) if (_workspace == nullptr)
if (!nd4j::memory::MemoryCounter::getInstance()->validate(getLenInBytes())) if (!sd::memory::MemoryCounter::getInstance()->validate(getLenInBytes()))
throw nd4j::allocation_exception::build("Requested amount exceeds device limits", nd4j::memory::MemoryCounter::getInstance()->deviceLimit(deviceId), getLenInBytes()); throw sd::allocation_exception::build("Requested amount exceeds device limits", sd::memory::MemoryCounter::getInstance()->deviceLimit(deviceId), getLenInBytes());
ALLOCATE_SPECIAL(_specialBuffer, _workspace, getLenInBytes(), int8_t); ALLOCATE_SPECIAL(_specialBuffer, _workspace, getLenInBytes(), int8_t);
_isOwnerSpecial = true; _isOwnerSpecial = true;
if (_workspace == nullptr) { if (_workspace == nullptr) {
nd4j::memory::MemoryCounter::getInstance()->countIn(deviceId, getLenInBytes()); sd::memory::MemoryCounter::getInstance()->countIn(deviceId, getLenInBytes());
nd4j::memory::MemoryCounter::getInstance()->countIn(nd4j::memory::MemoryType::DEVICE, getLenInBytes()); sd::memory::MemoryCounter::getInstance()->countIn(sd::memory::MemoryType::DEVICE, getLenInBytes());
} }
} }
} }
@ -135,8 +135,8 @@ void DataBuffer::deleteSpecial() {
// count out towards DataBuffer device, only if we're not in workspace // count out towards DataBuffer device, only if we're not in workspace
if (_workspace == nullptr) { if (_workspace == nullptr) {
nd4j::memory::MemoryCounter::getInstance()->countOut(_deviceId, getLenInBytes()); sd::memory::MemoryCounter::getInstance()->countOut(_deviceId, getLenInBytes());
nd4j::memory::MemoryCounter::getInstance()->countOut(nd4j::memory::MemoryType::DEVICE, getLenInBytes()); sd::memory::MemoryCounter::getInstance()->countOut(sd::memory::MemoryType::DEVICE, getLenInBytes());
} }
} }
} }

View File

@ -17,14 +17,14 @@
#ifndef NDARRAY_CPP #ifndef NDARRAY_CPP
#define NDARRAY_CPP #define NDARRAY_CPP
#include "../NDArray.h" #include <array/NDArray.h>
#include "../NDArrayFactory.h" #include <array/NDArrayFactory.h>
#include "NativeOpExecutioner.h" #include <legacy/NativeOpExecutioner.h>
#include <memory/Workspace.h> #include <memory/Workspace.h>
#include <memory/MemoryRegistrator.h> #include <memory/MemoryRegistrator.h>
#include <ops.h> #include <ops/ops.h>
#include <ops/gemm.h> #include <ops/gemm.h>
#include <pointercast.h> #include <system/pointercast.h>
#include <stdexcept> #include <stdexcept>
#include <memory> #include <memory>
#include <helpers/logger.h> #include <helpers/logger.h>
@ -37,17 +37,17 @@
#include <helpers/ShapeUtils.h> #include <helpers/ShapeUtils.h>
#include <sstream> #include <sstream>
#include <helpers/ArrayUtils.h> #include <helpers/ArrayUtils.h>
#include <MmulHelper.h> #include <helpers/MmulHelper.h>
#include <helpers/threshold.h> #include <helpers/threshold.h>
#include <exceptions/datatype_exception.h> #include <exceptions/datatype_exception.h>
#include <exceptions/cuda_exception.h> #include <exceptions/cuda_exception.h>
#include <specials_cuda.h> #include <ops/specials_cuda.h>
#include <loops/special_kernels.h> #include <loops/special_kernels.h>
#include <PointersManager.h> #include <helpers/PointersManager.h>
#include "../NDArray.hpp" #include <array/NDArray.hXX>
#include <ConstantShapeHelper.h> #include <helpers/ConstantShapeHelper.h>
namespace nd4j { namespace sd {
void* NDArray::platformBuffer() { return specialBuffer(); } void* NDArray::platformBuffer() { return specialBuffer(); }
void* NDArray::getPlatformBuffer() const { return getSpecialBuffer(); } void* NDArray::getPlatformBuffer() const { return getSpecialBuffer(); }
@ -569,6 +569,6 @@ template void NDArray::printCurrentBuffer<double>(const bool host, const char* m
#endif #endif
} // end namespace nd4j } // end namespace sd
#endif #endif

View File

@ -21,8 +21,8 @@
#include <array/ByteOrderUtils.h> #include <array/ByteOrderUtils.h>
namespace nd4j { namespace sd {
ByteOrder ByteOrderUtils::fromFlatByteOrder(nd4j::graph::ByteOrder order) { ByteOrder ByteOrderUtils::fromFlatByteOrder(sd::graph::ByteOrder order) {
return (ByteOrder) order; return (ByteOrder) order;
} }
} }

View File

@ -20,7 +20,7 @@
#include "../ConstantDataBuffer.h" #include "../ConstantDataBuffer.h"
namespace nd4j { namespace sd {
ConstantDataBuffer::ConstantDataBuffer(Nd4jPointer primary, Nd4jPointer special, Nd4jLong numEelements, Nd4jLong sizeOf) { ConstantDataBuffer::ConstantDataBuffer(Nd4jPointer primary, Nd4jPointer special, Nd4jLong numEelements, Nd4jLong sizeOf) {
_primaryBuffer = primary; _primaryBuffer = primary;
_specialBuffer = special; _specialBuffer = special;

View File

@ -19,10 +19,10 @@
// //
#include <array/ConstantDescriptor.h> #include <array/ConstantDescriptor.h>
#include <DataTypeUtils.h> #include <array/DataTypeUtils.h>
#include <stdexcept> #include <stdexcept>
namespace nd4j { namespace sd {
ConstantDescriptor::ConstantDescriptor(double* values, int length) { ConstantDescriptor::ConstantDescriptor(double* values, int length) {
for (int e = 0; e < length; e++) for (int e = 0; e < length; e++)
_floatValues.emplace_back(values[e]); _floatValues.emplace_back(values[e]);
@ -77,7 +77,7 @@ namespace nd4j {
} }
namespace std { namespace std {
size_t hash<nd4j::ConstantDescriptor>::operator()(const nd4j::ConstantDescriptor &k) const { size_t hash<sd::ConstantDescriptor>::operator()(const sd::ConstantDescriptor &k) const {
using std::hash; using std::hash;
// Compute individual hash values for first, // Compute individual hash values for first,
// second and third and combine them using XOR // second and third and combine them using XOR

View File

@ -18,17 +18,17 @@
// Created by raver on 5/17/2019. // Created by raver on 5/17/2019.
// //
#include <DataTypeUtils.h> #include <array/DataTypeUtils.h>
#include <array/ConstantHolder.h> #include <array/ConstantHolder.h>
#include <shape.h> #include <helpers/shape.h>
namespace nd4j { namespace sd {
ConstantHolder::ConstantHolder(const ConstantHolder& other) { ConstantHolder::ConstantHolder(const ConstantHolder& other) {
_buffers = other._buffers; _buffers = other._buffers;
_deviceId = other._deviceId; _deviceId = other._deviceId;
} }
bool ConstantHolder::hasBuffer(nd4j::DataType dataType) { bool ConstantHolder::hasBuffer(sd::DataType dataType) {
return _buffers.count(dataType) > 0; return _buffers.count(dataType) > 0;
} }
@ -42,7 +42,7 @@ namespace nd4j {
} }
BUILD_SINGLE_TEMPLATE(template ND4J_EXPORT bool ConstantHolder::hasBuffer, (void), LIBND4J_TYPES); BUILD_SINGLE_TEMPLATE(template ND4J_EXPORT bool ConstantHolder::hasBuffer, (void), LIBND4J_TYPES);
void ConstantHolder::addBuffer(ConstantDataBuffer &pointer, nd4j::DataType dataType) { void ConstantHolder::addBuffer(ConstantDataBuffer &pointer, sd::DataType dataType) {
_buffers[dataType] = pointer; _buffers[dataType] = pointer;
} }
@ -52,7 +52,7 @@ namespace nd4j {
} }
BUILD_SINGLE_TEMPLATE(template ND4J_EXPORT void ConstantHolder::addBuffer, (ConstantDataBuffer& cb), LIBND4J_TYPES); BUILD_SINGLE_TEMPLATE(template ND4J_EXPORT void ConstantHolder::addBuffer, (ConstantDataBuffer& cb), LIBND4J_TYPES);
ConstantDataBuffer* ConstantHolder::getConstantDataBuffer(nd4j::DataType dataType) { ConstantDataBuffer* ConstantHolder::getConstantDataBuffer(sd::DataType dataType) {
if (!hasBuffer(dataType)) if (!hasBuffer(dataType))
throw std::runtime_error("Requested dataType is absent in storage"); throw std::runtime_error("Requested dataType is absent in storage");

View File

@ -26,7 +26,7 @@
#include <memory/MemoryCounter.h> #include <memory/MemoryCounter.h>
#include <exceptions/allocation_exception.h> #include <exceptions/allocation_exception.h>
namespace nd4j { namespace sd {
///// IMLEMENTATION OF COMMON METHODS ///// ///// IMLEMENTATION OF COMMON METHODS /////
@ -41,7 +41,7 @@ namespace nd4j {
_workspace = nullptr; _workspace = nullptr;
_isOwnerPrimary = false; _isOwnerPrimary = false;
_isOwnerSpecial = false; _isOwnerSpecial = false;
_deviceId = nd4j::AffinityManager::currentDeviceId(); _deviceId = sd::AffinityManager::currentDeviceId();
setCountersToZero(); setCountersToZero();
} }
@ -83,7 +83,7 @@ namespace nd4j {
_workspace = workspace; _workspace = workspace;
_isOwnerPrimary = isOwnerPrimary; _isOwnerPrimary = isOwnerPrimary;
_isOwnerSpecial = isOwnerSpecial; _isOwnerSpecial = isOwnerSpecial;
_deviceId = nd4j::AffinityManager::currentDeviceId(); _deviceId = sd::AffinityManager::currentDeviceId();
setCountersToZero(); setCountersToZero();
@ -115,7 +115,7 @@ namespace nd4j {
_dataType = dataType; _dataType = dataType;
_workspace = workspace; _workspace = workspace;
_deviceId = nd4j::AffinityManager::currentDeviceId(); _deviceId = sd::AffinityManager::currentDeviceId();
setCountersToZero(); setCountersToZero();
@ -134,7 +134,7 @@ namespace nd4j {
_primaryBuffer = nullptr; _primaryBuffer = nullptr;
_specialBuffer = nullptr; _specialBuffer = nullptr;
_deviceId = nd4j::AffinityManager::currentDeviceId(); _deviceId = sd::AffinityManager::currentDeviceId();
setCountersToZero(); setCountersToZero();
@ -234,17 +234,17 @@ namespace nd4j {
void DataBuffer::allocatePrimary() { void DataBuffer::allocatePrimary() {
if (_primaryBuffer == nullptr && getLenInBytes() > 0) { if (_primaryBuffer == nullptr && getLenInBytes() > 0) {
auto deviceId = nd4j::AffinityManager::currentDeviceId(); auto deviceId = sd::AffinityManager::currentDeviceId();
// check if this allocation won't bring us above limit // check if this allocation won't bring us above limit
if (_workspace == nullptr) { if (_workspace == nullptr) {
if (Environment::getInstance()->isCPU()) { if (Environment::getInstance()->isCPU()) {
// on cpu backend we validate against device 0 for now // on cpu backend we validate against device 0 for now
if (!nd4j::memory::MemoryCounter::getInstance()->validate(getLenInBytes())) if (!sd::memory::MemoryCounter::getInstance()->validate(getLenInBytes()))
throw nd4j::allocation_exception::build("Requested amount exceeds HOST device limits", nd4j::memory::MemoryCounter::getInstance()->deviceLimit(deviceId), getLenInBytes()); throw sd::allocation_exception::build("Requested amount exceeds HOST device limits", sd::memory::MemoryCounter::getInstance()->deviceLimit(deviceId), getLenInBytes());
} else { } else {
// in heterogenous mode we valdate against device group // in heterogenous mode we valdate against device group
if (!nd4j::memory::MemoryCounter::getInstance()->validateGroup(nd4j::memory::MemoryType::HOST, getLenInBytes())) if (!sd::memory::MemoryCounter::getInstance()->validateGroup(sd::memory::MemoryType::HOST, getLenInBytes()))
throw nd4j::allocation_exception::build("Requested amount exceeds HOST group limits", nd4j::memory::MemoryCounter::getInstance()->groupLimit(nd4j::memory::MemoryType::HOST), getLenInBytes()); throw sd::allocation_exception::build("Requested amount exceeds HOST group limits", sd::memory::MemoryCounter::getInstance()->groupLimit(sd::memory::MemoryType::HOST), getLenInBytes());
} }
} }
@ -254,9 +254,9 @@ namespace nd4j {
// count in towards current deviceId if we're not in workspace mode // count in towards current deviceId if we're not in workspace mode
if (_workspace == nullptr) { if (_workspace == nullptr) {
if (Environment::getInstance()->isCPU()) // we don't want this counter to be added to CUDA device if (Environment::getInstance()->isCPU()) // we don't want this counter to be added to CUDA device
nd4j::memory::MemoryCounter::getInstance()->countIn(deviceId, getLenInBytes()); sd::memory::MemoryCounter::getInstance()->countIn(deviceId, getLenInBytes());
nd4j::memory::MemoryCounter::getInstance()->countIn(nd4j::memory::MemoryType::HOST, getLenInBytes()); sd::memory::MemoryCounter::getInstance()->countIn(sd::memory::MemoryType::HOST, getLenInBytes());
} }
} }
} }
@ -280,9 +280,9 @@ namespace nd4j {
// count out towards DataBuffer device, only if we're not in workspace // count out towards DataBuffer device, only if we're not in workspace
if (_workspace == nullptr) { if (_workspace == nullptr) {
if (Environment::getInstance()->isCPU()) if (Environment::getInstance()->isCPU())
nd4j::memory::MemoryCounter::getInstance()->countOut(_deviceId, getLenInBytes()); sd::memory::MemoryCounter::getInstance()->countOut(_deviceId, getLenInBytes());
nd4j::memory::MemoryCounter::getInstance()->countOut(nd4j::memory::MemoryType::HOST, getLenInBytes()); sd::memory::MemoryCounter::getInstance()->countOut(sd::memory::MemoryType::HOST, getLenInBytes());
} }
} }
} }

View File

@ -22,12 +22,12 @@
#include <array/DataTypeUtils.h> #include <array/DataTypeUtils.h>
#include <types/float16.h> #include <types/float16.h>
namespace nd4j { namespace sd {
DataType DataTypeUtils::fromInt(int val) { DataType DataTypeUtils::fromInt(int val) {
return (DataType) val; return (DataType) val;
} }
DataType DataTypeUtils::fromFlatDataType(nd4j::graph::DType dtype) { DataType DataTypeUtils::fromFlatDataType(sd::graph::DType dtype) {
return (DataType) dtype; return (DataType) dtype;
} }

View File

@ -29,7 +29,7 @@
#include <cuda_runtime.h> #include <cuda_runtime.h>
#endif #endif
namespace nd4j { namespace sd {
ExtraArguments::ExtraArguments(std::initializer_list<double> arguments) { ExtraArguments::ExtraArguments(std::initializer_list<double> arguments) {
_fpArgs = arguments; _fpArgs = arguments;
} }
@ -122,7 +122,7 @@ namespace nd4j {
BUILD_SINGLE_TEMPLATE(template ND4J_EXPORT void *ExtraArguments::argumentsAsT, (Nd4jLong offset), LIBND4J_TYPES); BUILD_SINGLE_TEMPLATE(template ND4J_EXPORT void *ExtraArguments::argumentsAsT, (Nd4jLong offset), LIBND4J_TYPES);
void* ExtraArguments::argumentsAsT(nd4j::DataType dataType, Nd4jLong offset) { void* ExtraArguments::argumentsAsT(sd::DataType dataType, Nd4jLong offset) {
if (_fpArgs.empty() && _intArgs.empty()) if (_fpArgs.empty() && _intArgs.empty())
return nullptr; return nullptr;

View File

@ -23,7 +23,7 @@
#include <execution/AffinityManager.h> #include <execution/AffinityManager.h>
#include <helpers/logger.h> #include <helpers/logger.h>
namespace nd4j { namespace sd {
InteropDataBuffer::InteropDataBuffer(InteropDataBuffer &dataBuffer, uint64_t length, uint64_t offset) { InteropDataBuffer::InteropDataBuffer(InteropDataBuffer &dataBuffer, uint64_t length, uint64_t offset) {
_dataBuffer = dataBuffer.getDataBuffer(); _dataBuffer = dataBuffer.getDataBuffer();
@ -39,7 +39,7 @@ namespace nd4j {
_dataBuffer = databuffer; _dataBuffer = databuffer;
} }
InteropDataBuffer::InteropDataBuffer(size_t elements, nd4j::DataType dtype, bool allocateBoth) { InteropDataBuffer::InteropDataBuffer(size_t elements, sd::DataType dtype, bool allocateBoth) {
if (elements == 0) { if (elements == 0) {
_dataBuffer = std::make_shared<DataBuffer>(); _dataBuffer = std::make_shared<DataBuffer>();
_dataBuffer->setDataType(dtype); _dataBuffer->setDataType(dtype);
@ -95,7 +95,7 @@ namespace nd4j {
} }
void InteropDataBuffer::prepareSpecialUse(const std::vector<const InteropDataBuffer*>& writeList, const std::vector<const InteropDataBuffer*>& readList, bool synchronizeWritables) { void InteropDataBuffer::prepareSpecialUse(const std::vector<const InteropDataBuffer*>& writeList, const std::vector<const InteropDataBuffer*>& readList, bool synchronizeWritables) {
auto currentDeviceId = nd4j::AffinityManager::currentDeviceId(); auto currentDeviceId = sd::AffinityManager::currentDeviceId();
for (const auto &v:readList) { for (const auto &v:readList) {
if (v == nullptr) if (v == nullptr)
continue; continue;

View File

@ -20,30 +20,30 @@
// @author Oleg Semeniv <oleg.semeniv@gmail.com> // @author Oleg Semeniv <oleg.semeniv@gmail.com>
// //
#include <NDArrayFactory.h> #include <array/NDArrayFactory.h>
#include <exceptions/cuda_exception.h> #include <exceptions/cuda_exception.h>
#include <ConstantHelper.h> #include <helpers/ConstantHelper.h>
#include <ConstantShapeHelper.h> #include <helpers/ConstantShapeHelper.h>
#include <GraphExecutioner.h> #include <graph/GraphExecutioner.h>
#include <ShapeUtils.h> #include <helpers/ShapeUtils.h>
#include <type_traits> #include <type_traits>
#include <StringUtils.h> #include <helpers/StringUtils.h>
#include <NativeOps.h> #include <legacy/NativeOps.h>
namespace nd4j { namespace sd {
//////////////////////////////////////////////////////////////////////// ////////////////////////////////////////////////////////////////////////
template <> template <>
ND4J_EXPORT NDArray NDArrayFactory::create<bool>(const char order, const std::vector<Nd4jLong> &shape, const std::vector<bool> &data, nd4j::LaunchContext * context) { ND4J_EXPORT NDArray NDArrayFactory::create<bool>(const char order, const std::vector<Nd4jLong> &shape, const std::vector<bool> &data, sd::LaunchContext * context) {
if ((int) shape.size() > MAX_RANK) if ((int) shape.size() > MAX_RANK)
throw std::invalid_argument("NDArrayFactory::create: rank of NDArray can't exceed 32 !"); throw std::invalid_argument("NDArrayFactory::create: rank of NDArray can't exceed 32 !");
ShapeDescriptor descriptor(nd4j::DataType::BOOL, order, shape); ShapeDescriptor descriptor(sd::DataType::BOOL, order, shape);
if (descriptor.arrLength() != data.size()) { if (descriptor.arrLength() != data.size()) {
nd4j_printf("NDArrayFactory::create: data size [%i] doesn't match shape length [%lld]\n", data.size(), descriptor.arrLength()); nd4j_printf("NDArrayFactory::create: data size [%i] doesn't match shape length [%lld]\n", data.size(), descriptor.arrLength());
@ -54,7 +54,7 @@ namespace nd4j {
ALLOCATE(hostBuffer, context->getWorkspace(), data.size(), bool); ALLOCATE(hostBuffer, context->getWorkspace(), data.size(), bool);
std::copy(data.begin(), data.end(), hostBuffer); std::copy(data.begin(), data.end(), hostBuffer);
std::shared_ptr<DataBuffer> buffer = std::make_shared<DataBuffer>(hostBuffer, data.size() * sizeof(bool), nd4j::DataType::BOOL, true, context->getWorkspace()); std::shared_ptr<DataBuffer> buffer = std::make_shared<DataBuffer>(hostBuffer, data.size() * sizeof(bool), sd::DataType::BOOL, true, context->getWorkspace());
NDArray result(buffer, descriptor, context); NDArray result(buffer, descriptor, context);
@ -63,7 +63,7 @@ namespace nd4j {
//////////////////////////////////////////////////////////////////////// ////////////////////////////////////////////////////////////////////////
template <typename T> template <typename T>
NDArray NDArrayFactory::create(const char order, const std::vector<Nd4jLong> &shape, const std::vector<T> &data, nd4j::LaunchContext * context) { NDArray NDArrayFactory::create(const char order, const std::vector<Nd4jLong> &shape, const std::vector<T> &data, sd::LaunchContext * context) {
if ((int) shape.size() > MAX_RANK) if ((int) shape.size() > MAX_RANK)
throw std::invalid_argument("NDArrayFactory::create: rank of NDArray can't exceed 32 !"); throw std::invalid_argument("NDArrayFactory::create: rank of NDArray can't exceed 32 !");
@ -81,25 +81,25 @@ namespace nd4j {
return result; return result;
} }
template ND4J_EXPORT NDArray NDArrayFactory::create(const char order, const std::vector<Nd4jLong> &shape, const std::vector<double>& data, nd4j::LaunchContext * context); template ND4J_EXPORT NDArray NDArrayFactory::create(const char order, const std::vector<Nd4jLong> &shape, const std::vector<double>& data, sd::LaunchContext * context);
template ND4J_EXPORT NDArray NDArrayFactory::create(const char order, const std::vector<Nd4jLong> &shape, const std::vector<float>& data, nd4j::LaunchContext * context); template ND4J_EXPORT NDArray NDArrayFactory::create(const char order, const std::vector<Nd4jLong> &shape, const std::vector<float>& data, sd::LaunchContext * context);
template ND4J_EXPORT NDArray NDArrayFactory::create(const char order, const std::vector<Nd4jLong> &shape, const std::vector<float16>& data, nd4j::LaunchContext * context); template ND4J_EXPORT NDArray NDArrayFactory::create(const char order, const std::vector<Nd4jLong> &shape, const std::vector<float16>& data, sd::LaunchContext * context);
template ND4J_EXPORT NDArray NDArrayFactory::create(const char order, const std::vector<Nd4jLong> &shape, const std::vector<bfloat16>& data, nd4j::LaunchContext * context); template ND4J_EXPORT NDArray NDArrayFactory::create(const char order, const std::vector<Nd4jLong> &shape, const std::vector<bfloat16>& data, sd::LaunchContext * context);
template ND4J_EXPORT NDArray NDArrayFactory::create(const char order, const std::vector<Nd4jLong> &shape, const std::vector<Nd4jLong>& data, nd4j::LaunchContext * context); template ND4J_EXPORT NDArray NDArrayFactory::create(const char order, const std::vector<Nd4jLong> &shape, const std::vector<Nd4jLong>& data, sd::LaunchContext * context);
template ND4J_EXPORT NDArray NDArrayFactory::create(const char order, const std::vector<Nd4jLong> &shape, const std::vector<uint64_t>& data, nd4j::LaunchContext * context); template ND4J_EXPORT NDArray NDArrayFactory::create(const char order, const std::vector<Nd4jLong> &shape, const std::vector<uint64_t>& data, sd::LaunchContext * context);
template ND4J_EXPORT NDArray NDArrayFactory::create(const char order, const std::vector<Nd4jLong> &shape, const std::vector<int>& data, nd4j::LaunchContext * context); template ND4J_EXPORT NDArray NDArrayFactory::create(const char order, const std::vector<Nd4jLong> &shape, const std::vector<int>& data, sd::LaunchContext * context);
template ND4J_EXPORT NDArray NDArrayFactory::create(const char order, const std::vector<Nd4jLong> &shape, const std::vector<unsigned int>& data, nd4j::LaunchContext * context); template ND4J_EXPORT NDArray NDArrayFactory::create(const char order, const std::vector<Nd4jLong> &shape, const std::vector<unsigned int>& data, sd::LaunchContext * context);
template ND4J_EXPORT NDArray NDArrayFactory::create(const char order, const std::vector<Nd4jLong> &shape, const std::vector<int16_t>& data, nd4j::LaunchContext * context); template ND4J_EXPORT NDArray NDArrayFactory::create(const char order, const std::vector<Nd4jLong> &shape, const std::vector<int16_t>& data, sd::LaunchContext * context);
template ND4J_EXPORT NDArray NDArrayFactory::create(const char order, const std::vector<Nd4jLong> &shape, const std::vector<int8_t>& data, nd4j::LaunchContext * context); template ND4J_EXPORT NDArray NDArrayFactory::create(const char order, const std::vector<Nd4jLong> &shape, const std::vector<int8_t>& data, sd::LaunchContext * context);
template ND4J_EXPORT NDArray NDArrayFactory::create(const char order, const std::vector<Nd4jLong> &shape, const std::vector<uint8_t>& data, nd4j::LaunchContext * context); template ND4J_EXPORT NDArray NDArrayFactory::create(const char order, const std::vector<Nd4jLong> &shape, const std::vector<uint8_t>& data, sd::LaunchContext * context);
template ND4J_EXPORT NDArray NDArrayFactory::create(const char order, const std::vector<Nd4jLong> &shape, const std::vector<bool>& data, nd4j::LaunchContext * context); template ND4J_EXPORT NDArray NDArrayFactory::create(const char order, const std::vector<Nd4jLong> &shape, const std::vector<bool>& data, sd::LaunchContext * context);
//////////////////////////////////////////////////////////////////////// ////////////////////////////////////////////////////////////////////////
template<typename T> template<typename T>
NDArray* NDArrayFactory::create_(const char order, const std::vector<Nd4jLong> &shape, nd4j::LaunchContext * context) { NDArray* NDArrayFactory::create_(const char order, const std::vector<Nd4jLong> &shape, sd::LaunchContext * context) {
return create_(order, shape, DataTypeUtils::fromT<T>(), context); return create_(order, shape, DataTypeUtils::fromT<T>(), context);
} }
BUILD_SINGLE_TEMPLATE(template ND4J_EXPORT NDArray* NDArrayFactory::create_, (const char order, const std::vector<Nd4jLong> &shape, nd4j::LaunchContext * context), LIBND4J_TYPES); BUILD_SINGLE_TEMPLATE(template ND4J_EXPORT NDArray* NDArrayFactory::create_, (const char order, const std::vector<Nd4jLong> &shape, sd::LaunchContext * context), LIBND4J_TYPES);
//////////////////////////////////////////////////////////////////////// ////////////////////////////////////////////////////////////////////////
template <typename T> template <typename T>
@ -128,44 +128,44 @@ template ND4J_EXPORT void NDArrayFactory::memcpyFromVector(void *ptr, const std:
#ifndef __JAVACPP_HACK__ #ifndef __JAVACPP_HACK__
//////////////////////////////////////////////////////////////////////// ////////////////////////////////////////////////////////////////////////
template <typename T> template <typename T>
NDArray* NDArrayFactory::valueOf(const std::initializer_list<Nd4jLong>& shape, const T value, const char order, nd4j::LaunchContext * context) { NDArray* NDArrayFactory::valueOf(const std::initializer_list<Nd4jLong>& shape, const T value, const char order, sd::LaunchContext * context) {
return valueOf(std::vector<Nd4jLong>(shape), value, order); return valueOf(std::vector<Nd4jLong>(shape), value, order);
} }
template ND4J_EXPORT NDArray* NDArrayFactory::valueOf(const std::initializer_list<Nd4jLong>& shape, const double value, const char order, nd4j::LaunchContext * context); template ND4J_EXPORT NDArray* NDArrayFactory::valueOf(const std::initializer_list<Nd4jLong>& shape, const double value, const char order, sd::LaunchContext * context);
template ND4J_EXPORT NDArray* NDArrayFactory::valueOf(const std::initializer_list<Nd4jLong>& shape, const float value, const char order, nd4j::LaunchContext * context); template ND4J_EXPORT NDArray* NDArrayFactory::valueOf(const std::initializer_list<Nd4jLong>& shape, const float value, const char order, sd::LaunchContext * context);
template ND4J_EXPORT NDArray* NDArrayFactory::valueOf(const std::initializer_list<Nd4jLong>& shape, const float16 value, const char order, nd4j::LaunchContext * context); template ND4J_EXPORT NDArray* NDArrayFactory::valueOf(const std::initializer_list<Nd4jLong>& shape, const float16 value, const char order, sd::LaunchContext * context);
template ND4J_EXPORT NDArray* NDArrayFactory::valueOf(const std::initializer_list<Nd4jLong>& shape, const bfloat16 value, const char order, nd4j::LaunchContext * context); template ND4J_EXPORT NDArray* NDArrayFactory::valueOf(const std::initializer_list<Nd4jLong>& shape, const bfloat16 value, const char order, sd::LaunchContext * context);
template ND4J_EXPORT NDArray* NDArrayFactory::valueOf(const std::initializer_list<Nd4jLong>& shape, const Nd4jLong value, const char order, nd4j::LaunchContext * context); template ND4J_EXPORT NDArray* NDArrayFactory::valueOf(const std::initializer_list<Nd4jLong>& shape, const Nd4jLong value, const char order, sd::LaunchContext * context);
template ND4J_EXPORT NDArray* NDArrayFactory::valueOf(const std::initializer_list<Nd4jLong>& shape, const int value, const char order, nd4j::LaunchContext * context); template ND4J_EXPORT NDArray* NDArrayFactory::valueOf(const std::initializer_list<Nd4jLong>& shape, const int value, const char order, sd::LaunchContext * context);
template ND4J_EXPORT NDArray* NDArrayFactory::valueOf(const std::initializer_list<Nd4jLong>& shape, const uint8_t value, const char order, nd4j::LaunchContext * context); template ND4J_EXPORT NDArray* NDArrayFactory::valueOf(const std::initializer_list<Nd4jLong>& shape, const uint8_t value, const char order, sd::LaunchContext * context);
template ND4J_EXPORT NDArray* NDArrayFactory::valueOf(const std::initializer_list<Nd4jLong>& shape, const int8_t value, const char order, nd4j::LaunchContext * context); template ND4J_EXPORT NDArray* NDArrayFactory::valueOf(const std::initializer_list<Nd4jLong>& shape, const int8_t value, const char order, sd::LaunchContext * context);
template ND4J_EXPORT NDArray* NDArrayFactory::valueOf(const std::initializer_list<Nd4jLong>& shape, const int16_t value, const char order, nd4j::LaunchContext * context); template ND4J_EXPORT NDArray* NDArrayFactory::valueOf(const std::initializer_list<Nd4jLong>& shape, const int16_t value, const char order, sd::LaunchContext * context);
template ND4J_EXPORT NDArray* NDArrayFactory::valueOf(const std::initializer_list<Nd4jLong>& shape, const bool value, const char order, nd4j::LaunchContext * context); template ND4J_EXPORT NDArray* NDArrayFactory::valueOf(const std::initializer_list<Nd4jLong>& shape, const bool value, const char order, sd::LaunchContext * context);
//////////////////////////////////////////////////////////////////////// ////////////////////////////////////////////////////////////////////////
template <typename T> template <typename T>
NDArray NDArrayFactory::create(const char order, const std::vector<Nd4jLong> &shape, const std::initializer_list<T>& data, nd4j::LaunchContext * context) { NDArray NDArrayFactory::create(const char order, const std::vector<Nd4jLong> &shape, const std::initializer_list<T>& data, sd::LaunchContext * context) {
std::vector<T> vec(data); std::vector<T> vec(data);
return create<T>(order, shape, vec, context); return create<T>(order, shape, vec, context);
} }
template ND4J_EXPORT NDArray NDArrayFactory::create(const char order, const std::vector<Nd4jLong> &shape, const std::initializer_list<double>& data, nd4j::LaunchContext * context); template ND4J_EXPORT NDArray NDArrayFactory::create(const char order, const std::vector<Nd4jLong> &shape, const std::initializer_list<double>& data, sd::LaunchContext * context);
template ND4J_EXPORT NDArray NDArrayFactory::create(const char order, const std::vector<Nd4jLong> &shape, const std::initializer_list<float>& data, nd4j::LaunchContext * context); template ND4J_EXPORT NDArray NDArrayFactory::create(const char order, const std::vector<Nd4jLong> &shape, const std::initializer_list<float>& data, sd::LaunchContext * context);
template ND4J_EXPORT NDArray NDArrayFactory::create(const char order, const std::vector<Nd4jLong> &shape, const std::initializer_list<float16>& data, nd4j::LaunchContext * context); template ND4J_EXPORT NDArray NDArrayFactory::create(const char order, const std::vector<Nd4jLong> &shape, const std::initializer_list<float16>& data, sd::LaunchContext * context);
template ND4J_EXPORT NDArray NDArrayFactory::create(const char order, const std::vector<Nd4jLong> &shape, const std::initializer_list<bfloat16>& data, nd4j::LaunchContext * context); template ND4J_EXPORT NDArray NDArrayFactory::create(const char order, const std::vector<Nd4jLong> &shape, const std::initializer_list<bfloat16>& data, sd::LaunchContext * context);
template ND4J_EXPORT NDArray NDArrayFactory::create(const char order, const std::vector<Nd4jLong> &shape, const std::initializer_list<Nd4jLong>& data, nd4j::LaunchContext * context); template ND4J_EXPORT NDArray NDArrayFactory::create(const char order, const std::vector<Nd4jLong> &shape, const std::initializer_list<Nd4jLong>& data, sd::LaunchContext * context);
template ND4J_EXPORT NDArray NDArrayFactory::create(const char order, const std::vector<Nd4jLong> &shape, const std::initializer_list<uint64_t>& data, nd4j::LaunchContext * context); template ND4J_EXPORT NDArray NDArrayFactory::create(const char order, const std::vector<Nd4jLong> &shape, const std::initializer_list<uint64_t>& data, sd::LaunchContext * context);
template ND4J_EXPORT NDArray NDArrayFactory::create(const char order, const std::vector<Nd4jLong> &shape, const std::initializer_list<int>& data, nd4j::LaunchContext * context); template ND4J_EXPORT NDArray NDArrayFactory::create(const char order, const std::vector<Nd4jLong> &shape, const std::initializer_list<int>& data, sd::LaunchContext * context);
template ND4J_EXPORT NDArray NDArrayFactory::create(const char order, const std::vector<Nd4jLong> &shape, const std::initializer_list<unsigned int>& data, nd4j::LaunchContext * context); template ND4J_EXPORT NDArray NDArrayFactory::create(const char order, const std::vector<Nd4jLong> &shape, const std::initializer_list<unsigned int>& data, sd::LaunchContext * context);
template ND4J_EXPORT NDArray NDArrayFactory::create(const char order, const std::vector<Nd4jLong> &shape, const std::initializer_list<int16_t>& data, nd4j::LaunchContext * context); template ND4J_EXPORT NDArray NDArrayFactory::create(const char order, const std::vector<Nd4jLong> &shape, const std::initializer_list<int16_t>& data, sd::LaunchContext * context);
template ND4J_EXPORT NDArray NDArrayFactory::create(const char order, const std::vector<Nd4jLong> &shape, const std::initializer_list<int8_t>& data, nd4j::LaunchContext * context); template ND4J_EXPORT NDArray NDArrayFactory::create(const char order, const std::vector<Nd4jLong> &shape, const std::initializer_list<int8_t>& data, sd::LaunchContext * context);
template ND4J_EXPORT NDArray NDArrayFactory::create(const char order, const std::vector<Nd4jLong> &shape, const std::initializer_list<uint8_t>& data, nd4j::LaunchContext * context); template ND4J_EXPORT NDArray NDArrayFactory::create(const char order, const std::vector<Nd4jLong> &shape, const std::initializer_list<uint8_t>& data, sd::LaunchContext * context);
template ND4J_EXPORT NDArray NDArrayFactory::create(const char order, const std::vector<Nd4jLong> &shape, const std::initializer_list<bool>& data, nd4j::LaunchContext * context); template ND4J_EXPORT NDArray NDArrayFactory::create(const char order, const std::vector<Nd4jLong> &shape, const std::initializer_list<bool>& data, sd::LaunchContext * context);
#endif #endif
//////////////////////////////////////////////////////////////////////// ////////////////////////////////////////////////////////////////////////
template <typename T> template <typename T>
NDArray* NDArrayFactory::create_(const T scalar, nd4j::LaunchContext * context) { NDArray* NDArrayFactory::create_(const T scalar, sd::LaunchContext * context) {
std::shared_ptr<DataBuffer> buffer = std::make_shared<DataBuffer>(1 * sizeof(T), DataTypeUtils::fromT<T>(), context->getWorkspace(), true); std::shared_ptr<DataBuffer> buffer = std::make_shared<DataBuffer>(1 * sizeof(T), DataTypeUtils::fromT<T>(), context->getWorkspace(), true);
@ -178,22 +178,22 @@ template ND4J_EXPORT void NDArrayFactory::memcpyFromVector(void *ptr, const std:
return res; return res;
} }
template ND4J_EXPORT NDArray* NDArrayFactory::create_(const double scalar, nd4j::LaunchContext * context); template ND4J_EXPORT NDArray* NDArrayFactory::create_(const double scalar, sd::LaunchContext * context);
template ND4J_EXPORT NDArray* NDArrayFactory::create_(const float scalar, nd4j::LaunchContext * context); template ND4J_EXPORT NDArray* NDArrayFactory::create_(const float scalar, sd::LaunchContext * context);
template ND4J_EXPORT NDArray* NDArrayFactory::create_(const float16 scalar, nd4j::LaunchContext * context); template ND4J_EXPORT NDArray* NDArrayFactory::create_(const float16 scalar, sd::LaunchContext * context);
template ND4J_EXPORT NDArray* NDArrayFactory::create_(const bfloat16 scalar, nd4j::LaunchContext * context); template ND4J_EXPORT NDArray* NDArrayFactory::create_(const bfloat16 scalar, sd::LaunchContext * context);
template ND4J_EXPORT NDArray* NDArrayFactory::create_(const Nd4jLong scalar, nd4j::LaunchContext * context); template ND4J_EXPORT NDArray* NDArrayFactory::create_(const Nd4jLong scalar, sd::LaunchContext * context);
template ND4J_EXPORT NDArray* NDArrayFactory::create_(const int scalar, nd4j::LaunchContext * context); template ND4J_EXPORT NDArray* NDArrayFactory::create_(const int scalar, sd::LaunchContext * context);
template ND4J_EXPORT NDArray* NDArrayFactory::create_(const bool scalar, nd4j::LaunchContext * context); template ND4J_EXPORT NDArray* NDArrayFactory::create_(const bool scalar, sd::LaunchContext * context);
template ND4J_EXPORT NDArray* NDArrayFactory::create_(const int8_t scalar, nd4j::LaunchContext * context); template ND4J_EXPORT NDArray* NDArrayFactory::create_(const int8_t scalar, sd::LaunchContext * context);
template ND4J_EXPORT NDArray* NDArrayFactory::create_(const uint8_t scalar, nd4j::LaunchContext * context); template ND4J_EXPORT NDArray* NDArrayFactory::create_(const uint8_t scalar, sd::LaunchContext * context);
template ND4J_EXPORT NDArray* NDArrayFactory::create_(const uint16_t scalar, nd4j::LaunchContext * context); template ND4J_EXPORT NDArray* NDArrayFactory::create_(const uint16_t scalar, sd::LaunchContext * context);
template ND4J_EXPORT NDArray* NDArrayFactory::create_(const uint32_t scalar, nd4j::LaunchContext * context); template ND4J_EXPORT NDArray* NDArrayFactory::create_(const uint32_t scalar, sd::LaunchContext * context);
template ND4J_EXPORT NDArray* NDArrayFactory::create_(const uint64_t scalar, nd4j::LaunchContext * context); template ND4J_EXPORT NDArray* NDArrayFactory::create_(const uint64_t scalar, sd::LaunchContext * context);
template ND4J_EXPORT NDArray* NDArrayFactory::create_(const int16_t scalar, nd4j::LaunchContext * context); template ND4J_EXPORT NDArray* NDArrayFactory::create_(const int16_t scalar, sd::LaunchContext * context);
template <typename T> template <typename T>
NDArray NDArrayFactory::create(nd4j::DataType type, const T scalar, nd4j::LaunchContext * context) { NDArray NDArrayFactory::create(sd::DataType type, const T scalar, sd::LaunchContext * context) {
if (type == DataTypeUtils::fromT<T>()) if (type == DataTypeUtils::fromT<T>())
return NDArrayFactory::create(scalar, context); return NDArrayFactory::create(scalar, context);
@ -204,23 +204,23 @@ template ND4J_EXPORT void NDArrayFactory::memcpyFromVector(void *ptr, const std:
return res; return res;
} }
// BUILD_DOUBLE_TEMPLATE(template ND4J_EXPORT NDArray NDArrayFactory::create, (DataType type, const T scalar, nd4j::LaunchContext * context), LIBND4J_TYPES); // BUILD_DOUBLE_TEMPLATE(template ND4J_EXPORT NDArray NDArrayFactory::create, (DataType type, const T scalar, sd::LaunchContext * context), LIBND4J_TYPES);
template ND4J_EXPORT NDArray NDArrayFactory::create(DataType type, const double scalar, nd4j::LaunchContext * context); template ND4J_EXPORT NDArray NDArrayFactory::create(DataType type, const double scalar, sd::LaunchContext * context);
template ND4J_EXPORT NDArray NDArrayFactory::create(DataType type, const float scalar, nd4j::LaunchContext * context); template ND4J_EXPORT NDArray NDArrayFactory::create(DataType type, const float scalar, sd::LaunchContext * context);
template ND4J_EXPORT NDArray NDArrayFactory::create(DataType type, const float16 scalar, nd4j::LaunchContext * context); template ND4J_EXPORT NDArray NDArrayFactory::create(DataType type, const float16 scalar, sd::LaunchContext * context);
template ND4J_EXPORT NDArray NDArrayFactory::create(DataType type, const bfloat16 scalar, nd4j::LaunchContext * context); template ND4J_EXPORT NDArray NDArrayFactory::create(DataType type, const bfloat16 scalar, sd::LaunchContext * context);
template ND4J_EXPORT NDArray NDArrayFactory::create(DataType type, const Nd4jLong scalar, nd4j::LaunchContext * context); template ND4J_EXPORT NDArray NDArrayFactory::create(DataType type, const Nd4jLong scalar, sd::LaunchContext * context);
template ND4J_EXPORT NDArray NDArrayFactory::create(DataType type, const int scalar, nd4j::LaunchContext * context); template ND4J_EXPORT NDArray NDArrayFactory::create(DataType type, const int scalar, sd::LaunchContext * context);
template ND4J_EXPORT NDArray NDArrayFactory::create(DataType type, const int8_t scalar, nd4j::LaunchContext * context); template ND4J_EXPORT NDArray NDArrayFactory::create(DataType type, const int8_t scalar, sd::LaunchContext * context);
template ND4J_EXPORT NDArray NDArrayFactory::create(DataType type, const uint8_t scalar, nd4j::LaunchContext * context); template ND4J_EXPORT NDArray NDArrayFactory::create(DataType type, const uint8_t scalar, sd::LaunchContext * context);
template ND4J_EXPORT NDArray NDArrayFactory::create(DataType type, const uint16_t scalar, nd4j::LaunchContext* workspace); template ND4J_EXPORT NDArray NDArrayFactory::create(DataType type, const uint16_t scalar, sd::LaunchContext* workspace);
template ND4J_EXPORT NDArray NDArrayFactory::create(DataType type, const uint32_t scalar, nd4j::LaunchContext* workspace); template ND4J_EXPORT NDArray NDArrayFactory::create(DataType type, const uint32_t scalar, sd::LaunchContext* workspace);
template ND4J_EXPORT NDArray NDArrayFactory::create(DataType type, const uint64_t scalar, nd4j::LaunchContext* workspace); template ND4J_EXPORT NDArray NDArrayFactory::create(DataType type, const uint64_t scalar, sd::LaunchContext* workspace);
template ND4J_EXPORT NDArray NDArrayFactory::create(DataType type, const int16_t scalar, nd4j::LaunchContext * context); template ND4J_EXPORT NDArray NDArrayFactory::create(DataType type, const int16_t scalar, sd::LaunchContext * context);
template ND4J_EXPORT NDArray NDArrayFactory::create(DataType type, const bool scalar, nd4j::LaunchContext * context); template ND4J_EXPORT NDArray NDArrayFactory::create(DataType type, const bool scalar, sd::LaunchContext * context);
template <typename T> template <typename T>
NDArray NDArrayFactory::create(const T scalar, nd4j::LaunchContext * context) { NDArray NDArrayFactory::create(const T scalar, sd::LaunchContext * context) {
std::shared_ptr<DataBuffer> buffer = std::make_shared<DataBuffer>(1 * sizeof(T), DataTypeUtils::fromT<T>(), context->getWorkspace(), true); std::shared_ptr<DataBuffer> buffer = std::make_shared<DataBuffer>(1 * sizeof(T), DataTypeUtils::fromT<T>(), context->getWorkspace(), true);
@ -233,73 +233,73 @@ template ND4J_EXPORT void NDArrayFactory::memcpyFromVector(void *ptr, const std:
return res; return res;
} }
template ND4J_EXPORT NDArray NDArrayFactory::create(const double scalar, nd4j::LaunchContext * context); template ND4J_EXPORT NDArray NDArrayFactory::create(const double scalar, sd::LaunchContext * context);
template ND4J_EXPORT NDArray NDArrayFactory::create(const float scalar, nd4j::LaunchContext * context); template ND4J_EXPORT NDArray NDArrayFactory::create(const float scalar, sd::LaunchContext * context);
template ND4J_EXPORT NDArray NDArrayFactory::create(const float16 scalar, nd4j::LaunchContext * context); template ND4J_EXPORT NDArray NDArrayFactory::create(const float16 scalar, sd::LaunchContext * context);
template ND4J_EXPORT NDArray NDArrayFactory::create(const bfloat16 scalar, nd4j::LaunchContext * context); template ND4J_EXPORT NDArray NDArrayFactory::create(const bfloat16 scalar, sd::LaunchContext * context);
template ND4J_EXPORT NDArray NDArrayFactory::create(const Nd4jLong scalar, nd4j::LaunchContext * context); template ND4J_EXPORT NDArray NDArrayFactory::create(const Nd4jLong scalar, sd::LaunchContext * context);
template ND4J_EXPORT NDArray NDArrayFactory::create(const int scalar, nd4j::LaunchContext * context); template ND4J_EXPORT NDArray NDArrayFactory::create(const int scalar, sd::LaunchContext * context);
template ND4J_EXPORT NDArray NDArrayFactory::create(const int8_t scalar, nd4j::LaunchContext * context); template ND4J_EXPORT NDArray NDArrayFactory::create(const int8_t scalar, sd::LaunchContext * context);
template ND4J_EXPORT NDArray NDArrayFactory::create(const uint8_t scalar, nd4j::LaunchContext * context); template ND4J_EXPORT NDArray NDArrayFactory::create(const uint8_t scalar, sd::LaunchContext * context);
template ND4J_EXPORT NDArray NDArrayFactory::create(const int16_t scalar, nd4j::LaunchContext * context); template ND4J_EXPORT NDArray NDArrayFactory::create(const int16_t scalar, sd::LaunchContext * context);
template ND4J_EXPORT NDArray NDArrayFactory::create(const uint16_t scalar, nd4j::LaunchContext* workspace); template ND4J_EXPORT NDArray NDArrayFactory::create(const uint16_t scalar, sd::LaunchContext* workspace);
template ND4J_EXPORT NDArray NDArrayFactory::create(const uint32_t scalar, nd4j::LaunchContext* workspace); template ND4J_EXPORT NDArray NDArrayFactory::create(const uint32_t scalar, sd::LaunchContext* workspace);
template ND4J_EXPORT NDArray NDArrayFactory::create(const uint64_t scalar, nd4j::LaunchContext* workspace); template ND4J_EXPORT NDArray NDArrayFactory::create(const uint64_t scalar, sd::LaunchContext* workspace);
template ND4J_EXPORT NDArray NDArrayFactory::create(const bool scalar, nd4j::LaunchContext * context); template ND4J_EXPORT NDArray NDArrayFactory::create(const bool scalar, sd::LaunchContext * context);
//////////////////////////////////////////////////////////////////////// ////////////////////////////////////////////////////////////////////////
template<typename T> template<typename T>
NDArray* NDArrayFactory::create_(const char order, const std::vector<Nd4jLong> &shape, const std::vector<T> &data, nd4j::LaunchContext * context) { NDArray* NDArrayFactory::create_(const char order, const std::vector<Nd4jLong> &shape, const std::vector<T> &data, sd::LaunchContext * context) {
return new NDArray(NDArrayFactory::create<T>(order, shape, data, context)); return new NDArray(NDArrayFactory::create<T>(order, shape, data, context));
} }
template ND4J_EXPORT NDArray* NDArrayFactory::create_(const char order, const std::vector<Nd4jLong> &shape, const std::vector<double> &data, nd4j::LaunchContext * context); template ND4J_EXPORT NDArray* NDArrayFactory::create_(const char order, const std::vector<Nd4jLong> &shape, const std::vector<double> &data, sd::LaunchContext * context);
template ND4J_EXPORT NDArray* NDArrayFactory::create_(const char order, const std::vector<Nd4jLong> &shape, const std::vector<float> &data, nd4j::LaunchContext * context); template ND4J_EXPORT NDArray* NDArrayFactory::create_(const char order, const std::vector<Nd4jLong> &shape, const std::vector<float> &data, sd::LaunchContext * context);
template ND4J_EXPORT NDArray* NDArrayFactory::create_(const char order, const std::vector<Nd4jLong> &shape, const std::vector<float16> &data, nd4j::LaunchContext * context); template ND4J_EXPORT NDArray* NDArrayFactory::create_(const char order, const std::vector<Nd4jLong> &shape, const std::vector<float16> &data, sd::LaunchContext * context);
template ND4J_EXPORT NDArray* NDArrayFactory::create_(const char order, const std::vector<Nd4jLong> &shape, const std::vector<bfloat16> &data, nd4j::LaunchContext * context); template ND4J_EXPORT NDArray* NDArrayFactory::create_(const char order, const std::vector<Nd4jLong> &shape, const std::vector<bfloat16> &data, sd::LaunchContext * context);
template ND4J_EXPORT NDArray* NDArrayFactory::create_(const char order, const std::vector<Nd4jLong> &shape, const std::vector<int> &data, nd4j::LaunchContext * context); template ND4J_EXPORT NDArray* NDArrayFactory::create_(const char order, const std::vector<Nd4jLong> &shape, const std::vector<int> &data, sd::LaunchContext * context);
template ND4J_EXPORT NDArray* NDArrayFactory::create_(const char order, const std::vector<Nd4jLong> &shape, const std::vector<unsigned int> &data, nd4j::LaunchContext * context); template ND4J_EXPORT NDArray* NDArrayFactory::create_(const char order, const std::vector<Nd4jLong> &shape, const std::vector<unsigned int> &data, sd::LaunchContext * context);
template ND4J_EXPORT NDArray* NDArrayFactory::create_(const char order, const std::vector<Nd4jLong> &shape, const std::vector<unsigned long> &data, nd4j::LaunchContext * context); template ND4J_EXPORT NDArray* NDArrayFactory::create_(const char order, const std::vector<Nd4jLong> &shape, const std::vector<unsigned long> &data, sd::LaunchContext * context);
template ND4J_EXPORT NDArray* NDArrayFactory::create_(const char order, const std::vector<Nd4jLong> &shape, const std::vector<Nd4jLong> &data, nd4j::LaunchContext * context); template ND4J_EXPORT NDArray* NDArrayFactory::create_(const char order, const std::vector<Nd4jLong> &shape, const std::vector<Nd4jLong> &data, sd::LaunchContext * context);
template ND4J_EXPORT NDArray* NDArrayFactory::create_(const char order, const std::vector<Nd4jLong> &shape, const std::vector<int8_t> &data, nd4j::LaunchContext * context); template ND4J_EXPORT NDArray* NDArrayFactory::create_(const char order, const std::vector<Nd4jLong> &shape, const std::vector<int8_t> &data, sd::LaunchContext * context);
template ND4J_EXPORT NDArray* NDArrayFactory::create_(const char order, const std::vector<Nd4jLong> &shape, const std::vector<uint8_t> &data, nd4j::LaunchContext * context); template ND4J_EXPORT NDArray* NDArrayFactory::create_(const char order, const std::vector<Nd4jLong> &shape, const std::vector<uint8_t> &data, sd::LaunchContext * context);
template ND4J_EXPORT NDArray* NDArrayFactory::create_(const char order, const std::vector<Nd4jLong> &shape, const std::vector<int16_t> &data, nd4j::LaunchContext * context); template ND4J_EXPORT NDArray* NDArrayFactory::create_(const char order, const std::vector<Nd4jLong> &shape, const std::vector<int16_t> &data, sd::LaunchContext * context);
template ND4J_EXPORT NDArray* NDArrayFactory::create_(const char order, const std::vector<Nd4jLong> &shape, const std::vector<uint16_t> &data, nd4j::LaunchContext * context); template ND4J_EXPORT NDArray* NDArrayFactory::create_(const char order, const std::vector<Nd4jLong> &shape, const std::vector<uint16_t> &data, sd::LaunchContext * context);
template ND4J_EXPORT NDArray* NDArrayFactory::create_(const char order, const std::vector<Nd4jLong> &shape, const std::vector<bool> &data, nd4j::LaunchContext * context); template ND4J_EXPORT NDArray* NDArrayFactory::create_(const char order, const std::vector<Nd4jLong> &shape, const std::vector<bool> &data, sd::LaunchContext * context);
//////////////////////////////////////////////////////////////////////// ////////////////////////////////////////////////////////////////////////
template <> template <>
ND4J_EXPORT NDArray* NDArrayFactory::valueOf(const std::vector<Nd4jLong>& shape, NDArray* value, const char order, nd4j::LaunchContext * context) { ND4J_EXPORT NDArray* NDArrayFactory::valueOf(const std::vector<Nd4jLong>& shape, NDArray* value, const char order, sd::LaunchContext * context) {
auto result = create_(order, shape, value->dataType(), context); auto result = create_(order, shape, value->dataType(), context);
result->assign(*value); result->assign(*value);
return result; return result;
} }
template <> template <>
ND4J_EXPORT NDArray* NDArrayFactory::valueOf(const std::vector<Nd4jLong>& shape, NDArray& value, const char order, nd4j::LaunchContext * context) { ND4J_EXPORT NDArray* NDArrayFactory::valueOf(const std::vector<Nd4jLong>& shape, NDArray& value, const char order, sd::LaunchContext * context) {
auto result = create_(order, shape, value.dataType(), context); auto result = create_(order, shape, value.dataType(), context);
result->assign(value); result->assign(value);
return result; return result;
} }
template <typename T> template <typename T>
NDArray* NDArrayFactory::valueOf(const std::vector<Nd4jLong>& shape, const T value, const char order, nd4j::LaunchContext * context) { NDArray* NDArrayFactory::valueOf(const std::vector<Nd4jLong>& shape, const T value, const char order, sd::LaunchContext * context) {
auto result = create_(order, shape, DataTypeUtils::fromT<T>()); auto result = create_(order, shape, DataTypeUtils::fromT<T>());
result->assign(value); result->assign(value);
return result; return result;
} }
template ND4J_EXPORT NDArray* NDArrayFactory::valueOf(const std::vector<Nd4jLong>& shape, const double value, const char order, nd4j::LaunchContext * context); template ND4J_EXPORT NDArray* NDArrayFactory::valueOf(const std::vector<Nd4jLong>& shape, const double value, const char order, sd::LaunchContext * context);
template ND4J_EXPORT NDArray* NDArrayFactory::valueOf(const std::vector<Nd4jLong>& shape, const float value, const char order, nd4j::LaunchContext * context); template ND4J_EXPORT NDArray* NDArrayFactory::valueOf(const std::vector<Nd4jLong>& shape, const float value, const char order, sd::LaunchContext * context);
template ND4J_EXPORT NDArray* NDArrayFactory::valueOf(const std::vector<Nd4jLong>& shape, const float16 value, const char order, nd4j::LaunchContext * context); template ND4J_EXPORT NDArray* NDArrayFactory::valueOf(const std::vector<Nd4jLong>& shape, const float16 value, const char order, sd::LaunchContext * context);
template ND4J_EXPORT NDArray* NDArrayFactory::valueOf(const std::vector<Nd4jLong>& shape, const bfloat16 value, const char order, nd4j::LaunchContext * context); template ND4J_EXPORT NDArray* NDArrayFactory::valueOf(const std::vector<Nd4jLong>& shape, const bfloat16 value, const char order, sd::LaunchContext * context);
template ND4J_EXPORT NDArray* NDArrayFactory::valueOf(const std::vector<Nd4jLong>& shape, const Nd4jLong value, const char order, nd4j::LaunchContext * context); template ND4J_EXPORT NDArray* NDArrayFactory::valueOf(const std::vector<Nd4jLong>& shape, const Nd4jLong value, const char order, sd::LaunchContext * context);
template ND4J_EXPORT NDArray* NDArrayFactory::valueOf(const std::vector<Nd4jLong>& shape, const int value, const char order, nd4j::LaunchContext * context); template ND4J_EXPORT NDArray* NDArrayFactory::valueOf(const std::vector<Nd4jLong>& shape, const int value, const char order, sd::LaunchContext * context);
template ND4J_EXPORT NDArray* NDArrayFactory::valueOf(const std::vector<Nd4jLong>& shape, const int16_t value, const char order, nd4j::LaunchContext * context); template ND4J_EXPORT NDArray* NDArrayFactory::valueOf(const std::vector<Nd4jLong>& shape, const int16_t value, const char order, sd::LaunchContext * context);
template ND4J_EXPORT NDArray* NDArrayFactory::valueOf(const std::vector<Nd4jLong>& shape, const int8_t value, const char order, nd4j::LaunchContext * context); template ND4J_EXPORT NDArray* NDArrayFactory::valueOf(const std::vector<Nd4jLong>& shape, const int8_t value, const char order, sd::LaunchContext * context);
template ND4J_EXPORT NDArray* NDArrayFactory::valueOf(const std::vector<Nd4jLong>& shape, const uint8_t value, const char order, nd4j::LaunchContext * context); template ND4J_EXPORT NDArray* NDArrayFactory::valueOf(const std::vector<Nd4jLong>& shape, const uint8_t value, const char order, sd::LaunchContext * context);
template ND4J_EXPORT NDArray* NDArrayFactory::valueOf(const std::vector<Nd4jLong>& shape, const bool value, const char order, nd4j::LaunchContext * context); template ND4J_EXPORT NDArray* NDArrayFactory::valueOf(const std::vector<Nd4jLong>& shape, const bool value, const char order, sd::LaunchContext * context);
//////////////////////////////////////////////////////////////////////// ////////////////////////////////////////////////////////////////////////
@ -331,7 +331,7 @@ template ND4J_EXPORT NDArray* NDArrayFactory::create_(const char order, const st
//////////////////////////////////////////////////////////////////////// ////////////////////////////////////////////////////////////////////////
template <typename T> template <typename T>
NDArray* NDArrayFactory::vector(Nd4jLong length, const T value, nd4j::LaunchContext * context) { NDArray* NDArrayFactory::vector(Nd4jLong length, const T value, sd::LaunchContext * context) {
std::shared_ptr<DataBuffer> buffer = std::make_shared<DataBuffer>(length * sizeof(T), DataTypeUtils::fromT<T>(), context->getWorkspace(), true); std::shared_ptr<DataBuffer> buffer = std::make_shared<DataBuffer>(length * sizeof(T), DataTypeUtils::fromT<T>(), context->getWorkspace(), true);
@ -344,37 +344,37 @@ template ND4J_EXPORT NDArray* NDArrayFactory::create_(const char order, const st
return res; return res;
} }
template ND4J_EXPORT NDArray* NDArrayFactory::vector(Nd4jLong length, const double startingValue, nd4j::LaunchContext * context); template ND4J_EXPORT NDArray* NDArrayFactory::vector(Nd4jLong length, const double startingValue, sd::LaunchContext * context);
template ND4J_EXPORT NDArray* NDArrayFactory::vector(Nd4jLong length, const float startingValue, nd4j::LaunchContext * context); template ND4J_EXPORT NDArray* NDArrayFactory::vector(Nd4jLong length, const float startingValue, sd::LaunchContext * context);
template ND4J_EXPORT NDArray* NDArrayFactory::vector(Nd4jLong length, const float16 startingValue, nd4j::LaunchContext * context); template ND4J_EXPORT NDArray* NDArrayFactory::vector(Nd4jLong length, const float16 startingValue, sd::LaunchContext * context);
template ND4J_EXPORT NDArray* NDArrayFactory::vector(Nd4jLong length, const bfloat16 startingValue, nd4j::LaunchContext * context); template ND4J_EXPORT NDArray* NDArrayFactory::vector(Nd4jLong length, const bfloat16 startingValue, sd::LaunchContext * context);
template ND4J_EXPORT NDArray* NDArrayFactory::vector(Nd4jLong length, const Nd4jLong startingValue, nd4j::LaunchContext * context); template ND4J_EXPORT NDArray* NDArrayFactory::vector(Nd4jLong length, const Nd4jLong startingValue, sd::LaunchContext * context);
template ND4J_EXPORT NDArray* NDArrayFactory::vector(Nd4jLong length, const int startingValue, nd4j::LaunchContext * context); template ND4J_EXPORT NDArray* NDArrayFactory::vector(Nd4jLong length, const int startingValue, sd::LaunchContext * context);
template ND4J_EXPORT NDArray* NDArrayFactory::vector(Nd4jLong length, const uint8_t startingValue, nd4j::LaunchContext * context); template ND4J_EXPORT NDArray* NDArrayFactory::vector(Nd4jLong length, const uint8_t startingValue, sd::LaunchContext * context);
template ND4J_EXPORT NDArray* NDArrayFactory::vector(Nd4jLong length, const uint16_t startingValue, nd4j::LaunchContext *workspace); template ND4J_EXPORT NDArray* NDArrayFactory::vector(Nd4jLong length, const uint16_t startingValue, sd::LaunchContext *workspace);
template ND4J_EXPORT NDArray* NDArrayFactory::vector(Nd4jLong length, const uint32_t startingValue, nd4j::LaunchContext *workspace); template ND4J_EXPORT NDArray* NDArrayFactory::vector(Nd4jLong length, const uint32_t startingValue, sd::LaunchContext *workspace);
template ND4J_EXPORT NDArray* NDArrayFactory::vector(Nd4jLong length, const uint64_t startingValue, nd4j::LaunchContext *workspace); template ND4J_EXPORT NDArray* NDArrayFactory::vector(Nd4jLong length, const uint64_t startingValue, sd::LaunchContext *workspace);
template ND4J_EXPORT NDArray* NDArrayFactory::vector(Nd4jLong length, const int8_t startingValue, nd4j::LaunchContext * context); template ND4J_EXPORT NDArray* NDArrayFactory::vector(Nd4jLong length, const int8_t startingValue, sd::LaunchContext * context);
template ND4J_EXPORT NDArray* NDArrayFactory::vector(Nd4jLong length, const int16_t startingValue, nd4j::LaunchContext * context); template ND4J_EXPORT NDArray* NDArrayFactory::vector(Nd4jLong length, const int16_t startingValue, sd::LaunchContext * context);
template ND4J_EXPORT NDArray* NDArrayFactory::vector(Nd4jLong length, const bool startingValue, nd4j::LaunchContext * context); template ND4J_EXPORT NDArray* NDArrayFactory::vector(Nd4jLong length, const bool startingValue, sd::LaunchContext * context);
//////////////////////////////////////////////////////////////////////// ////////////////////////////////////////////////////////////////////////
template <typename T> template <typename T>
NDArray NDArrayFactory::create(const char order, const std::initializer_list<Nd4jLong>& shape, nd4j::LaunchContext * context) { NDArray NDArrayFactory::create(const char order, const std::initializer_list<Nd4jLong>& shape, sd::LaunchContext * context) {
std::vector<Nd4jLong> vec(shape); std::vector<Nd4jLong> vec(shape);
return create<T>(order, vec, context); return create<T>(order, vec, context);
} }
BUILD_SINGLE_TEMPLATE(template ND4J_EXPORT NDArray NDArrayFactory::create, (const char, const std::initializer_list<Nd4jLong>&, nd4j::LaunchContext * context), LIBND4J_TYPES); BUILD_SINGLE_TEMPLATE(template ND4J_EXPORT NDArray NDArrayFactory::create, (const char, const std::initializer_list<Nd4jLong>&, sd::LaunchContext * context), LIBND4J_TYPES);
//////////////////////////////////////////////////////////////////////// ////////////////////////////////////////////////////////////////////////
template <typename T> template <typename T>
NDArray NDArrayFactory::create(const char order, const std::vector<Nd4jLong> &shape, nd4j::LaunchContext * context) { NDArray NDArrayFactory::create(const char order, const std::vector<Nd4jLong> &shape, sd::LaunchContext * context) {
return create(order, shape, DataTypeUtils::fromT<T>(), context); return create(order, shape, DataTypeUtils::fromT<T>(), context);
} }
BUILD_SINGLE_TEMPLATE(template ND4J_EXPORT NDArray NDArrayFactory::create, (const char order, const std::vector<Nd4jLong> &shape, nd4j::LaunchContext * context), LIBND4J_TYPES); BUILD_SINGLE_TEMPLATE(template ND4J_EXPORT NDArray NDArrayFactory::create, (const char order, const std::vector<Nd4jLong> &shape, sd::LaunchContext * context), LIBND4J_TYPES);
//////////////////////////////////////////////////////////////////////// ////////////////////////////////////////////////////////////////////////
NDArray NDArrayFactory::create(const char order, const std::vector<Nd4jLong> &shape, nd4j::DataType dtype, nd4j::LaunchContext* context) { NDArray NDArrayFactory::create(const char order, const std::vector<Nd4jLong> &shape, sd::DataType dtype, sd::LaunchContext* context) {
if ((int) shape.size() > MAX_RANK) if ((int) shape.size() > MAX_RANK)
throw std::invalid_argument("NDArrayFactory::create: rank of NDArray can't exceed 32"); throw std::invalid_argument("NDArrayFactory::create: rank of NDArray can't exceed 32");
@ -392,7 +392,7 @@ NDArray NDArrayFactory::create(const char order, const std::vector<Nd4jLong> &sh
//////////////////////////////////////////////////////////////////////// ////////////////////////////////////////////////////////////////////////
NDArray NDArrayFactory::create(nd4j::DataType dtype, nd4j::LaunchContext * context) { NDArray NDArrayFactory::create(sd::DataType dtype, sd::LaunchContext * context) {
std::shared_ptr<DataBuffer> buffer = std::make_shared<DataBuffer>(DataTypeUtils::sizeOfElement(dtype), dtype, context->getWorkspace(), true); std::shared_ptr<DataBuffer> buffer = std::make_shared<DataBuffer>(DataTypeUtils::sizeOfElement(dtype), dtype, context->getWorkspace(), true);
@ -403,7 +403,7 @@ NDArray NDArrayFactory::create(nd4j::DataType dtype, nd4j::LaunchContext * conte
return res; return res;
} }
NDArray* NDArrayFactory::create_(nd4j::DataType dtype, nd4j::LaunchContext * context) { NDArray* NDArrayFactory::create_(sd::DataType dtype, sd::LaunchContext * context) {
auto result = new NDArray(); auto result = new NDArray();
*result = NDArrayFactory::create(dtype, context); *result = NDArrayFactory::create(dtype, context);
return result; return result;
@ -411,7 +411,7 @@ NDArray* NDArrayFactory::create_(nd4j::DataType dtype, nd4j::LaunchContext * con
//////////////////////////////////////////////////////////////////////// ////////////////////////////////////////////////////////////////////////
template <typename T> template <typename T>
NDArray NDArrayFactory::create(const std::vector<T> &values, nd4j::LaunchContext * context) { NDArray NDArrayFactory::create(const std::vector<T> &values, sd::LaunchContext * context) {
std::shared_ptr<DataBuffer> buffer = std::make_shared<DataBuffer>(values.size() * sizeof(T), DataTypeUtils::fromT<T>(), context->getWorkspace(), true); std::shared_ptr<DataBuffer> buffer = std::make_shared<DataBuffer>(values.size() * sizeof(T), DataTypeUtils::fromT<T>(), context->getWorkspace(), true);
@ -424,21 +424,21 @@ NDArray NDArrayFactory::create(const std::vector<T> &values, nd4j::LaunchContext
return res; return res;
} }
template ND4J_EXPORT NDArray NDArrayFactory::create(const std::vector<double> &values, nd4j::LaunchContext * context); template ND4J_EXPORT NDArray NDArrayFactory::create(const std::vector<double> &values, sd::LaunchContext * context);
template ND4J_EXPORT NDArray NDArrayFactory::create(const std::vector<float> &values, nd4j::LaunchContext * context); template ND4J_EXPORT NDArray NDArrayFactory::create(const std::vector<float> &values, sd::LaunchContext * context);
template ND4J_EXPORT NDArray NDArrayFactory::create(const std::vector<float16> &values, nd4j::LaunchContext * context); template ND4J_EXPORT NDArray NDArrayFactory::create(const std::vector<float16> &values, sd::LaunchContext * context);
template ND4J_EXPORT NDArray NDArrayFactory::create(const std::vector<bfloat16> &values, nd4j::LaunchContext * context); template ND4J_EXPORT NDArray NDArrayFactory::create(const std::vector<bfloat16> &values, sd::LaunchContext * context);
template ND4J_EXPORT NDArray NDArrayFactory::create(const std::vector<Nd4jLong> &values, nd4j::LaunchContext * context); template ND4J_EXPORT NDArray NDArrayFactory::create(const std::vector<Nd4jLong> &values, sd::LaunchContext * context);
template ND4J_EXPORT NDArray NDArrayFactory::create(const std::vector<int> &values, nd4j::LaunchContext * context); template ND4J_EXPORT NDArray NDArrayFactory::create(const std::vector<int> &values, sd::LaunchContext * context);
template ND4J_EXPORT NDArray NDArrayFactory::create(const std::vector<int16_t> &values, nd4j::LaunchContext * context); template ND4J_EXPORT NDArray NDArrayFactory::create(const std::vector<int16_t> &values, sd::LaunchContext * context);
template ND4J_EXPORT NDArray NDArrayFactory::create(const std::vector<uint16_t> &values, nd4j::LaunchContext * context); template ND4J_EXPORT NDArray NDArrayFactory::create(const std::vector<uint16_t> &values, sd::LaunchContext * context);
template ND4J_EXPORT NDArray NDArrayFactory::create(const std::vector<int8_t> &values, nd4j::LaunchContext * context); template ND4J_EXPORT NDArray NDArrayFactory::create(const std::vector<int8_t> &values, sd::LaunchContext * context);
template ND4J_EXPORT NDArray NDArrayFactory::create(const std::vector<uint8_t> &values, nd4j::LaunchContext * context); template ND4J_EXPORT NDArray NDArrayFactory::create(const std::vector<uint8_t> &values, sd::LaunchContext * context);
template ND4J_EXPORT NDArray NDArrayFactory::create(const std::vector<bool> &values, nd4j::LaunchContext * context); template ND4J_EXPORT NDArray NDArrayFactory::create(const std::vector<bool> &values, sd::LaunchContext * context);
//////////////////////////////////////////////////////////////////////// ////////////////////////////////////////////////////////////////////////
template <typename T> template <typename T>
NDArray* NDArrayFactory::empty_(nd4j::LaunchContext * context) { NDArray* NDArrayFactory::empty_(sd::LaunchContext * context) {
auto shapeInfo = ShapeBuilders::createScalarShapeInfo(DataTypeUtils::fromT<T>(), context->getWorkspace()); auto shapeInfo = ShapeBuilders::createScalarShapeInfo(DataTypeUtils::fromT<T>(), context->getWorkspace());
ArrayOptions::setPropertyBit(shapeInfo, ARRAY_EMPTY); ArrayOptions::setPropertyBit(shapeInfo, ARRAY_EMPTY);
auto result = new NDArray(nullptr, shapeInfo, context, false); auto result = new NDArray(nullptr, shapeInfo, context, false);
@ -447,11 +447,11 @@ template ND4J_EXPORT NDArray NDArrayFactory::create(const std::vector<bool> &val
return result; return result;
} }
BUILD_SINGLE_TEMPLATE(template ND4J_EXPORT NDArray* NDArrayFactory::empty_, (nd4j::LaunchContext * context), LIBND4J_TYPES); BUILD_SINGLE_TEMPLATE(template ND4J_EXPORT NDArray* NDArrayFactory::empty_, (sd::LaunchContext * context), LIBND4J_TYPES);
NDArray* NDArrayFactory::empty_(nd4j::DataType dataType, nd4j::LaunchContext * context) { NDArray* NDArrayFactory::empty_(sd::DataType dataType, sd::LaunchContext * context) {
if (context == nullptr) if (context == nullptr)
context = nd4j::LaunchContext ::defaultContext(); context = sd::LaunchContext ::defaultContext();
auto shapeInfo = ShapeBuilders::createScalarShapeInfo(dataType, context->getWorkspace()); auto shapeInfo = ShapeBuilders::createScalarShapeInfo(dataType, context->getWorkspace());
ArrayOptions::setPropertyBit(shapeInfo, ARRAY_EMPTY); ArrayOptions::setPropertyBit(shapeInfo, ARRAY_EMPTY);
@ -464,13 +464,13 @@ template ND4J_EXPORT NDArray NDArrayFactory::create(const std::vector<bool> &val
//////////////////////////////////////////////////////////////////////// ////////////////////////////////////////////////////////////////////////
template <typename T> template <typename T>
NDArray NDArrayFactory::empty(nd4j::LaunchContext * context) { NDArray NDArrayFactory::empty(sd::LaunchContext * context) {
return empty(DataTypeUtils::fromT<T>(), context); return empty(DataTypeUtils::fromT<T>(), context);
} }
BUILD_SINGLE_TEMPLATE(template ND4J_EXPORT NDArray NDArrayFactory::empty, (nd4j::LaunchContext * context), LIBND4J_TYPES); BUILD_SINGLE_TEMPLATE(template ND4J_EXPORT NDArray NDArrayFactory::empty, (sd::LaunchContext * context), LIBND4J_TYPES);
//////////////////////////////////////////////////////////////////////// ////////////////////////////////////////////////////////////////////////
NDArray NDArrayFactory::empty(nd4j::DataType dataType, nd4j::LaunchContext * context) { NDArray NDArrayFactory::empty(sd::DataType dataType, sd::LaunchContext * context) {
auto shapeInfo = ShapeBuilders::createScalarShapeInfo(dataType, context->getWorkspace()); auto shapeInfo = ShapeBuilders::createScalarShapeInfo(dataType, context->getWorkspace());
ArrayOptions::setPropertyBit(shapeInfo, ARRAY_EMPTY); ArrayOptions::setPropertyBit(shapeInfo, ARRAY_EMPTY);
NDArray result(nullptr, shapeInfo, context, false); NDArray result(nullptr, shapeInfo, context, false);
@ -481,21 +481,21 @@ template ND4J_EXPORT NDArray NDArrayFactory::create(const std::vector<bool> &val
} }
//////////////////////////////////////////////////////////////////////// ////////////////////////////////////////////////////////////////////////
NDArray* NDArrayFactory::valueOf(const std::vector<Nd4jLong>& shape, const NDArray& value, const char order, nd4j::LaunchContext * context) { NDArray* NDArrayFactory::valueOf(const std::vector<Nd4jLong>& shape, const NDArray& value, const char order, sd::LaunchContext * context) {
auto res = NDArrayFactory::create_(order, shape, value.dataType(), context); auto res = NDArrayFactory::create_(order, shape, value.dataType(), context);
res->assign(const_cast<NDArray&>(value)); res->assign(const_cast<NDArray&>(value));
return res; return res;
} }
//////////////////////////////////////////////////////////////////////// ////////////////////////////////////////////////////////////////////////
NDArray* NDArrayFactory::create_( const char order, const std::vector<Nd4jLong> &shape, nd4j::DataType dataType, nd4j::LaunchContext * context) { NDArray* NDArrayFactory::create_( const char order, const std::vector<Nd4jLong> &shape, sd::DataType dataType, sd::LaunchContext * context) {
return new NDArray(order, shape, dataType, context); return new NDArray(order, shape, dataType, context);
} }
//////////////////////////////////////////////////////////////////////// ////////////////////////////////////////////////////////////////////////
template <typename T> template <typename T>
NDArray NDArrayFactory::create(T* buffer, const char order, const std::initializer_list<Nd4jLong>& shape, nd4j::LaunchContext * context) { NDArray NDArrayFactory::create(T* buffer, const char order, const std::initializer_list<Nd4jLong>& shape, sd::LaunchContext * context) {
if ((int) shape.size() > MAX_RANK) if ((int) shape.size() > MAX_RANK)
throw std::invalid_argument("NDArrayFactory::create: Rank of NDArray can't exceed 32"); throw std::invalid_argument("NDArrayFactory::create: Rank of NDArray can't exceed 32");
@ -510,89 +510,89 @@ NDArray NDArrayFactory::create(T* buffer, const char order, const std::initializ
return result; return result;
} }
template ND4J_EXPORT NDArray NDArrayFactory::create(double* buffer, const char order, const std::initializer_list<Nd4jLong>& shape, nd4j::LaunchContext * context); template ND4J_EXPORT NDArray NDArrayFactory::create(double* buffer, const char order, const std::initializer_list<Nd4jLong>& shape, sd::LaunchContext * context);
template ND4J_EXPORT NDArray NDArrayFactory::create(float* buffer, const char order, const std::initializer_list<Nd4jLong>& shape, nd4j::LaunchContext * context); template ND4J_EXPORT NDArray NDArrayFactory::create(float* buffer, const char order, const std::initializer_list<Nd4jLong>& shape, sd::LaunchContext * context);
template ND4J_EXPORT NDArray NDArrayFactory::create(float16* buffer, const char order, const std::initializer_list<Nd4jLong>& shape, nd4j::LaunchContext * context); template ND4J_EXPORT NDArray NDArrayFactory::create(float16* buffer, const char order, const std::initializer_list<Nd4jLong>& shape, sd::LaunchContext * context);
template ND4J_EXPORT NDArray NDArrayFactory::create(bfloat16* buffer, const char order, const std::initializer_list<Nd4jLong>& shape, nd4j::LaunchContext * context); template ND4J_EXPORT NDArray NDArrayFactory::create(bfloat16* buffer, const char order, const std::initializer_list<Nd4jLong>& shape, sd::LaunchContext * context);
template ND4J_EXPORT NDArray NDArrayFactory::create(Nd4jLong * buffer, const char order, const std::initializer_list<Nd4jLong>& shape, nd4j::LaunchContext * context); template ND4J_EXPORT NDArray NDArrayFactory::create(Nd4jLong * buffer, const char order, const std::initializer_list<Nd4jLong>& shape, sd::LaunchContext * context);
template ND4J_EXPORT NDArray NDArrayFactory::create(int* buffer, const char order, const std::initializer_list<Nd4jLong>& shape, nd4j::LaunchContext * context); template ND4J_EXPORT NDArray NDArrayFactory::create(int* buffer, const char order, const std::initializer_list<Nd4jLong>& shape, sd::LaunchContext * context);
template ND4J_EXPORT NDArray NDArrayFactory::create(bool* buffer, const char order, const std::initializer_list<Nd4jLong>& shape, nd4j::LaunchContext * context); template ND4J_EXPORT NDArray NDArrayFactory::create(bool* buffer, const char order, const std::initializer_list<Nd4jLong>& shape, sd::LaunchContext * context);
template ND4J_EXPORT NDArray NDArrayFactory::create(uint8_t * buffer, const char order, const std::initializer_list<Nd4jLong>& shape, nd4j::LaunchContext * context); template ND4J_EXPORT NDArray NDArrayFactory::create(uint8_t * buffer, const char order, const std::initializer_list<Nd4jLong>& shape, sd::LaunchContext * context);
template ND4J_EXPORT NDArray NDArrayFactory::create(int8_t* buffer, const char order, const std::initializer_list<Nd4jLong>& shape, nd4j::LaunchContext * context); template ND4J_EXPORT NDArray NDArrayFactory::create(int8_t* buffer, const char order, const std::initializer_list<Nd4jLong>& shape, sd::LaunchContext * context);
template ND4J_EXPORT NDArray NDArrayFactory::create(int16_t* buffer, const char order, const std::initializer_list<Nd4jLong>& shape, nd4j::LaunchContext * context); template ND4J_EXPORT NDArray NDArrayFactory::create(int16_t* buffer, const char order, const std::initializer_list<Nd4jLong>& shape, sd::LaunchContext * context);
///////////////////////////////////////////////////////////////////////////////////// /////////////////////////////////////////////////////////////////////////////////////
NDArray NDArrayFactory::string(const char16_t* u16string, nd4j::DataType dtype, nd4j::LaunchContext* context) { NDArray NDArrayFactory::string(const char16_t* u16string, sd::DataType dtype, sd::LaunchContext* context) {
return NDArray(u16string, dtype, context); return NDArray(u16string, dtype, context);
} }
///////////////////////////////////////////////////////////////////////// /////////////////////////////////////////////////////////////////////////
NDArray* NDArrayFactory::string_(const char16_t* u16string, nd4j::DataType dtype, nd4j::LaunchContext* context) { NDArray* NDArrayFactory::string_(const char16_t* u16string, sd::DataType dtype, sd::LaunchContext* context) {
return string_(std::u16string(u16string), dtype, context); return string_(std::u16string(u16string), dtype, context);
} }
///////////////////////////////////////////////////////////////////////// /////////////////////////////////////////////////////////////////////////
NDArray* NDArrayFactory::string_(const std::u16string& u16string, nd4j::DataType dtype, nd4j::LaunchContext* context) { NDArray* NDArrayFactory::string_(const std::u16string& u16string, sd::DataType dtype, sd::LaunchContext* context) {
auto res = new NDArray(); auto res = new NDArray();
*res = NDArray(u16string, dtype, context); *res = NDArray(u16string, dtype, context);
return res; return res;
} }
///////////////////////////////////////////////////////////////////////// /////////////////////////////////////////////////////////////////////////
NDArray NDArrayFactory::string(const std::u16string& u16string, nd4j::DataType dtype, nd4j::LaunchContext* context) { NDArray NDArrayFactory::string(const std::u16string& u16string, sd::DataType dtype, sd::LaunchContext* context) {
return NDArray(u16string, dtype, context); return NDArray(u16string, dtype, context);
} }
///////////////////////////////////////////////////////////////////////// /////////////////////////////////////////////////////////////////////////
NDArray NDArrayFactory::string(const char32_t* u32string, nd4j::DataType dtype, nd4j::LaunchContext* context) { NDArray NDArrayFactory::string(const char32_t* u32string, sd::DataType dtype, sd::LaunchContext* context) {
return NDArray(u32string, dtype, context); return NDArray(u32string, dtype, context);
} }
///////////////////////////////////////////////////////////////////////// /////////////////////////////////////////////////////////////////////////
NDArray* NDArrayFactory::string_(const char32_t* u32string, nd4j::DataType dtype, nd4j::LaunchContext* context) { NDArray* NDArrayFactory::string_(const char32_t* u32string, sd::DataType dtype, sd::LaunchContext* context) {
return string_(std::u32string(u32string), dtype, context); return string_(std::u32string(u32string), dtype, context);
} }
///////////////////////////////////////////////////////////////////////// /////////////////////////////////////////////////////////////////////////
NDArray* NDArrayFactory::string_(const std::u32string& u32string, nd4j::DataType dtype, nd4j::LaunchContext* context) { NDArray* NDArrayFactory::string_(const std::u32string& u32string, sd::DataType dtype, sd::LaunchContext* context) {
auto res = new NDArray(); auto res = new NDArray();
*res = NDArray(u32string, dtype, context); *res = NDArray(u32string, dtype, context);
return res; return res;
} }
///////////////////////////////////////////////////////////////////////// /////////////////////////////////////////////////////////////////////////
NDArray NDArrayFactory::string(const std::u32string& u32string, nd4j::DataType dtype, nd4j::LaunchContext* context) { NDArray NDArrayFactory::string(const std::u32string& u32string, sd::DataType dtype, sd::LaunchContext* context) {
return NDArray(u32string, dtype, context); return NDArray(u32string, dtype, context);
} }
///////////////////////////////////////////////////////////////////////// /////////////////////////////////////////////////////////////////////////
NDArray NDArrayFactory::string(const char* str, nd4j::DataType dtype, nd4j::LaunchContext* context) { NDArray NDArrayFactory::string(const char* str, sd::DataType dtype, sd::LaunchContext* context) {
return NDArray(str, dtype, context); return NDArray(str, dtype, context);
} }
///////////////////////////////////////////////////////////////////////// /////////////////////////////////////////////////////////////////////////
NDArray* NDArrayFactory::string_(const char* str, nd4j::DataType dtype, nd4j::LaunchContext* context) { NDArray* NDArrayFactory::string_(const char* str, sd::DataType dtype, sd::LaunchContext* context) {
return string_(std::string(str), dtype, context); return string_(std::string(str), dtype, context);
} }
///////////////////////////////////////////////////////////////////////// /////////////////////////////////////////////////////////////////////////
NDArray* NDArrayFactory::string_(const std::string& str, nd4j::DataType dtype, nd4j::LaunchContext* context) { NDArray* NDArrayFactory::string_(const std::string& str, sd::DataType dtype, sd::LaunchContext* context) {
auto res = new NDArray(); auto res = new NDArray();
*res = NDArray(str, dtype, context); *res = NDArray(str, dtype, context);
return res; return res;
} }
///////////////////////////////////////////////////////////////////////// /////////////////////////////////////////////////////////////////////////
NDArray NDArrayFactory::string(const std::string& str, nd4j::DataType dtype, nd4j::LaunchContext* context) { NDArray NDArrayFactory::string(const std::string& str, sd::DataType dtype, sd::LaunchContext* context) {
return NDArray(str, dtype, context); return NDArray(str, dtype, context);
} }
///////////////////////////////////////////////////////////////////////// /////////////////////////////////////////////////////////////////////////
NDArray NDArrayFactory::string(const std::vector<Nd4jLong> &shape, const std::initializer_list<const char *> &strings, nd4j::DataType dataType, nd4j::LaunchContext * context) { NDArray NDArrayFactory::string(const std::vector<Nd4jLong> &shape, const std::initializer_list<const char *> &strings, sd::DataType dataType, sd::LaunchContext * context) {
return NDArray(shape, std::vector<const char*>(strings), dataType, context); return NDArray(shape, std::vector<const char*>(strings), dataType, context);
} }
///////////////////////////////////////////////////////////////////////// /////////////////////////////////////////////////////////////////////////
NDArray NDArrayFactory::string( const std::vector<Nd4jLong> &shape, const std::vector<const char *> &strings, nd4j::DataType dataType, nd4j::LaunchContext * context) { NDArray NDArrayFactory::string( const std::vector<Nd4jLong> &shape, const std::vector<const char *> &strings, sd::DataType dataType, sd::LaunchContext * context) {
return NDArray( shape, strings, dataType, context); return NDArray( shape, strings, dataType, context);
} }
///////////////////////////////////////////////////////////////////////// /////////////////////////////////////////////////////////////////////////
NDArray NDArrayFactory::string( const std::vector<Nd4jLong> &shape, const std::initializer_list<std::string> &string, nd4j::DataType dataType, nd4j::LaunchContext * context) { NDArray NDArrayFactory::string( const std::vector<Nd4jLong> &shape, const std::initializer_list<std::string> &string, sd::DataType dataType, sd::LaunchContext * context) {
return NDArray( shape, std::vector<std::string>(string), dataType, context); return NDArray( shape, std::vector<std::string>(string), dataType, context);
} }
///////////////////////////////////////////////////////////////////////// /////////////////////////////////////////////////////////////////////////
NDArray* NDArrayFactory::string_( const std::vector<Nd4jLong> &shape, const std::initializer_list<const char *> &strings, nd4j::DataType dataType, nd4j::LaunchContext * context) { NDArray* NDArrayFactory::string_( const std::vector<Nd4jLong> &shape, const std::initializer_list<const char *> &strings, sd::DataType dataType, sd::LaunchContext * context) {
return NDArrayFactory::string_( shape, std::vector<const char*>(strings), dataType, context); return NDArrayFactory::string_( shape, std::vector<const char*>(strings), dataType, context);
} }
///////////////////////////////////////////////////////////////////////// /////////////////////////////////////////////////////////////////////////
NDArray* NDArrayFactory::string_( const std::vector<Nd4jLong> &shape, const std::vector<const char *> &strings, nd4j::DataType dataType, nd4j::LaunchContext * context) { NDArray* NDArrayFactory::string_( const std::vector<Nd4jLong> &shape, const std::vector<const char *> &strings, sd::DataType dataType, sd::LaunchContext * context) {
std::vector<std::string> vec(strings.size()); std::vector<std::string> vec(strings.size());
int cnt = 0; int cnt = 0;
for (auto s:strings) for (auto s:strings)
@ -601,37 +601,37 @@ template ND4J_EXPORT NDArray NDArrayFactory::create(int16_t* buffer, const char
return NDArrayFactory::string_( shape, vec, dataType, context); return NDArrayFactory::string_( shape, vec, dataType, context);
} }
///////////////////////////////////////////////////////////////////////// /////////////////////////////////////////////////////////////////////////
NDArray* NDArrayFactory::string_( const std::vector<Nd4jLong> &shape, const std::initializer_list<std::string> &string, nd4j::DataType dataType, nd4j::LaunchContext * context) { NDArray* NDArrayFactory::string_( const std::vector<Nd4jLong> &shape, const std::initializer_list<std::string> &string, sd::DataType dataType, sd::LaunchContext * context) {
return NDArrayFactory::string_( shape, std::vector<std::string>(string), dataType, context); return NDArrayFactory::string_( shape, std::vector<std::string>(string), dataType, context);
} }
///////////////////////////////////////////////////////////////////////// /////////////////////////////////////////////////////////////////////////
NDArray NDArrayFactory::string( const std::vector<Nd4jLong> &shape, const std::vector<std::string> &string, nd4j::DataType dataType, nd4j::LaunchContext * context) { NDArray NDArrayFactory::string( const std::vector<Nd4jLong> &shape, const std::vector<std::string> &string, sd::DataType dataType, sd::LaunchContext * context) {
return NDArray(shape, string, dataType, context); return NDArray(shape, string, dataType, context);
} }
///////////////////////////////////////////////////////////////////////// /////////////////////////////////////////////////////////////////////////
NDArray* NDArrayFactory::string_(const std::vector<Nd4jLong> &shape, const std::vector<std::string> &string, nd4j::DataType dataType, nd4j::LaunchContext * context) { NDArray* NDArrayFactory::string_(const std::vector<Nd4jLong> &shape, const std::vector<std::string> &string, sd::DataType dataType, sd::LaunchContext * context) {
auto res = new NDArray(); auto res = new NDArray();
*res = NDArray( shape, string, dataType, context); *res = NDArray( shape, string, dataType, context);
return res; return res;
} }
///////////////////////////////////////////////////////////////////////// /////////////////////////////////////////////////////////////////////////
NDArray NDArrayFactory::string(const std::vector<Nd4jLong>& shape, const std::initializer_list<const char16_t*>& strings, nd4j::DataType dataType, nd4j::LaunchContext* context) { NDArray NDArrayFactory::string(const std::vector<Nd4jLong>& shape, const std::initializer_list<const char16_t*>& strings, sd::DataType dataType, sd::LaunchContext* context) {
return NDArray( shape, std::vector<const char16_t*>(strings), dataType, context); return NDArray( shape, std::vector<const char16_t*>(strings), dataType, context);
} }
///////////////////////////////////////////////////////////////////////// /////////////////////////////////////////////////////////////////////////
NDArray NDArrayFactory::string( const std::vector<Nd4jLong>& shape, const std::vector<const char16_t*>& strings, nd4j::DataType dataType, nd4j::LaunchContext* context) { NDArray NDArrayFactory::string( const std::vector<Nd4jLong>& shape, const std::vector<const char16_t*>& strings, sd::DataType dataType, sd::LaunchContext* context) {
return NDArray( shape, strings, dataType, context); return NDArray( shape, strings, dataType, context);
} }
///////////////////////////////////////////////////////////////////////// /////////////////////////////////////////////////////////////////////////
NDArray NDArrayFactory::string( const std::vector<Nd4jLong>& shape, const std::initializer_list<std::u16string>& string, nd4j::DataType dataType, nd4j::LaunchContext* context) { NDArray NDArrayFactory::string( const std::vector<Nd4jLong>& shape, const std::initializer_list<std::u16string>& string, sd::DataType dataType, sd::LaunchContext* context) {
return NDArray( shape, std::vector<std::u16string>(string), dataType, context); return NDArray( shape, std::vector<std::u16string>(string), dataType, context);
} }
///////////////////////////////////////////////////////////////////////// /////////////////////////////////////////////////////////////////////////
NDArray* NDArrayFactory::string_( const std::vector<Nd4jLong>& shape, const std::initializer_list<const char16_t*>& strings, nd4j::DataType dataType, nd4j::LaunchContext* context) { NDArray* NDArrayFactory::string_( const std::vector<Nd4jLong>& shape, const std::initializer_list<const char16_t*>& strings, sd::DataType dataType, sd::LaunchContext* context) {
return NDArrayFactory::string_( shape, std::vector<const char16_t*>(strings), dataType, context); return NDArrayFactory::string_( shape, std::vector<const char16_t*>(strings), dataType, context);
} }
///////////////////////////////////////////////////////////////////////// /////////////////////////////////////////////////////////////////////////
NDArray* NDArrayFactory::string_( const std::vector<Nd4jLong>& shape, const std::vector<const char16_t*>& strings, nd4j::DataType dataType, nd4j::LaunchContext* context) { NDArray* NDArrayFactory::string_( const std::vector<Nd4jLong>& shape, const std::vector<const char16_t*>& strings, sd::DataType dataType, sd::LaunchContext* context) {
std::vector<std::u16string> vec(strings.size()); std::vector<std::u16string> vec(strings.size());
int cnt = 0; int cnt = 0;
for (auto s : strings) for (auto s : strings)
@ -640,37 +640,37 @@ template ND4J_EXPORT NDArray NDArrayFactory::create(int16_t* buffer, const char
return NDArrayFactory::string_( shape, vec, dataType, context); return NDArrayFactory::string_( shape, vec, dataType, context);
} }
///////////////////////////////////////////////////////////////////////// /////////////////////////////////////////////////////////////////////////
NDArray* NDArrayFactory::string_( const std::vector<Nd4jLong>& shape, const std::initializer_list<std::u16string>& string, nd4j::DataType dataType, nd4j::LaunchContext* context) { NDArray* NDArrayFactory::string_( const std::vector<Nd4jLong>& shape, const std::initializer_list<std::u16string>& string, sd::DataType dataType, sd::LaunchContext* context) {
return NDArrayFactory::string_( shape, std::vector<std::u16string>(string), dataType, context); return NDArrayFactory::string_( shape, std::vector<std::u16string>(string), dataType, context);
} }
///////////////////////////////////////////////////////////////////////// /////////////////////////////////////////////////////////////////////////
NDArray* NDArrayFactory::string_( const std::vector<Nd4jLong>& shape, const std::vector<std::u16string>& string, nd4j::DataType dataType, nd4j::LaunchContext* context) { NDArray* NDArrayFactory::string_( const std::vector<Nd4jLong>& shape, const std::vector<std::u16string>& string, sd::DataType dataType, sd::LaunchContext* context) {
auto res = new NDArray(); auto res = new NDArray();
*res = NDArray( shape, string, dataType, context); *res = NDArray( shape, string, dataType, context);
return res; return res;
} }
///////////////////////////////////////////////////////////////////////// /////////////////////////////////////////////////////////////////////////
NDArray NDArrayFactory::string( const std::vector<Nd4jLong>& shape, const std::vector<std::u16string>& string, nd4j::DataType dtype, nd4j::LaunchContext* context) { NDArray NDArrayFactory::string( const std::vector<Nd4jLong>& shape, const std::vector<std::u16string>& string, sd::DataType dtype, sd::LaunchContext* context) {
return NDArray( shape, string, dtype, context); return NDArray( shape, string, dtype, context);
} }
///////////////////////////////////////////////////////////////////////// /////////////////////////////////////////////////////////////////////////
NDArray NDArrayFactory::string( const std::vector<Nd4jLong>& shape, const std::initializer_list<const char32_t*>& strings, nd4j::DataType dataType, nd4j::LaunchContext* context) { NDArray NDArrayFactory::string( const std::vector<Nd4jLong>& shape, const std::initializer_list<const char32_t*>& strings, sd::DataType dataType, sd::LaunchContext* context) {
return NDArray( shape, std::vector<const char32_t*>(strings), dataType, context); return NDArray( shape, std::vector<const char32_t*>(strings), dataType, context);
} }
///////////////////////////////////////////////////////////////////////// /////////////////////////////////////////////////////////////////////////
NDArray NDArrayFactory::string( const std::vector<Nd4jLong>& shape, const std::vector<const char32_t*>& strings, nd4j::DataType dataType, nd4j::LaunchContext* context) { NDArray NDArrayFactory::string( const std::vector<Nd4jLong>& shape, const std::vector<const char32_t*>& strings, sd::DataType dataType, sd::LaunchContext* context) {
return NDArray( shape, strings, dataType, context); return NDArray( shape, strings, dataType, context);
} }
///////////////////////////////////////////////////////////////////////// /////////////////////////////////////////////////////////////////////////
NDArray NDArrayFactory::string( const std::vector<Nd4jLong>& shape, const std::initializer_list<std::u32string>& string, nd4j::DataType dataType, nd4j::LaunchContext* context) { NDArray NDArrayFactory::string( const std::vector<Nd4jLong>& shape, const std::initializer_list<std::u32string>& string, sd::DataType dataType, sd::LaunchContext* context) {
return NDArray(shape, std::vector<std::u32string>(string), dataType, context); return NDArray(shape, std::vector<std::u32string>(string), dataType, context);
} }
///////////////////////////////////////////////////////////////////////// /////////////////////////////////////////////////////////////////////////
NDArray* NDArrayFactory::string_( const std::vector<Nd4jLong>& shape, const std::initializer_list<const char32_t*>& strings, nd4j::DataType dataType, nd4j::LaunchContext* context) { NDArray* NDArrayFactory::string_( const std::vector<Nd4jLong>& shape, const std::initializer_list<const char32_t*>& strings, sd::DataType dataType, sd::LaunchContext* context) {
return NDArrayFactory::string_( shape, std::vector<const char32_t*>(strings), dataType, context); return NDArrayFactory::string_( shape, std::vector<const char32_t*>(strings), dataType, context);
} }
///////////////////////////////////////////////////////////////////////// /////////////////////////////////////////////////////////////////////////
NDArray* NDArrayFactory::string_( const std::vector<Nd4jLong>& shape, const std::vector<const char32_t*>& strings, nd4j::DataType dataType, nd4j::LaunchContext* context) { NDArray* NDArrayFactory::string_( const std::vector<Nd4jLong>& shape, const std::vector<const char32_t*>& strings, sd::DataType dataType, sd::LaunchContext* context) {
std::vector<std::u32string> vec(strings.size()); std::vector<std::u32string> vec(strings.size());
int cnt = 0; int cnt = 0;
for (auto s : strings) for (auto s : strings)
@ -678,23 +678,23 @@ template ND4J_EXPORT NDArray NDArrayFactory::create(int16_t* buffer, const char
return NDArrayFactory::string_( shape, vec, dataType, context); return NDArrayFactory::string_( shape, vec, dataType, context);
} }
///////////////////////////////////////////////////////////////////////// /////////////////////////////////////////////////////////////////////////
NDArray* NDArrayFactory::string_( const std::vector<Nd4jLong>& shape, const std::initializer_list<std::u32string>& string, nd4j::DataType dataType, nd4j::LaunchContext* context) { NDArray* NDArrayFactory::string_( const std::vector<Nd4jLong>& shape, const std::initializer_list<std::u32string>& string, sd::DataType dataType, sd::LaunchContext* context) {
return NDArrayFactory::string_( shape, std::vector<std::u32string>(string), dataType, context); return NDArrayFactory::string_( shape, std::vector<std::u32string>(string), dataType, context);
} }
///////////////////////////////////////////////////////////////////////// /////////////////////////////////////////////////////////////////////////
NDArray* NDArrayFactory::string_( const std::vector<Nd4jLong>& shape, const std::vector<std::u32string>& string, nd4j::DataType dataType, nd4j::LaunchContext* context) { NDArray* NDArrayFactory::string_( const std::vector<Nd4jLong>& shape, const std::vector<std::u32string>& string, sd::DataType dataType, sd::LaunchContext* context) {
auto res = new NDArray(); auto res = new NDArray();
*res = NDArray( shape, string, dataType, context); *res = NDArray( shape, string, dataType, context);
return res; return res;
} }
///////////////////////////////////////////////////////////////////////// /////////////////////////////////////////////////////////////////////////
NDArray NDArrayFactory::string(const std::vector<Nd4jLong>& shape, const std::vector<std::u32string>& string, nd4j::DataType dtype, nd4j::LaunchContext* context) { NDArray NDArrayFactory::string(const std::vector<Nd4jLong>& shape, const std::vector<std::u32string>& string, sd::DataType dtype, sd::LaunchContext* context) {
return NDArray( shape, string, dtype, context); return NDArray( shape, string, dtype, context);
} }
NDArray NDArrayFactory::fromNpyFile(const char *fileName) { NDArray NDArrayFactory::fromNpyFile(const char *fileName) {
auto size = nd4j::graph::getFileSize(fileName); auto size = sd::graph::getFileSize(fileName);
if (size < 0) if (size < 0)
throw std::runtime_error("File doesn't exit"); throw std::runtime_error("File doesn't exit");
@ -705,7 +705,7 @@ template ND4J_EXPORT NDArray NDArrayFactory::create(int16_t* buffer, const char
auto length = shape::length(shape); auto length = shape::length(shape);
int8_t *buffer = nullptr; int8_t *buffer = nullptr;
nd4j::memory::Workspace *workspace = nullptr; sd::memory::Workspace *workspace = nullptr;
auto byteLen = length * DataTypeUtils::sizeOfElement(ArrayOptions::dataType(shape)); auto byteLen = length * DataTypeUtils::sizeOfElement(ArrayOptions::dataType(shape));
ALLOCATE(buffer, workspace, byteLen, int8_t); ALLOCATE(buffer, workspace, byteLen, int8_t);

View File

@ -24,7 +24,7 @@
#include <helpers/ShapeUtils.h> #include <helpers/ShapeUtils.h>
#include <ops/declarable/CustomOperations.h> #include <ops/declarable/CustomOperations.h>
namespace nd4j { namespace sd {
NDArrayList::NDArrayList(int height, bool expandable) { NDArrayList::NDArrayList(int height, bool expandable) {
_expandable = expandable; _expandable = expandable;
_elements.store(0); _elements.store(0);
@ -47,7 +47,7 @@ namespace nd4j {
return new NDArray(readRaw(idx)->dup()); return new NDArray(readRaw(idx)->dup());
} }
nd4j::DataType NDArrayList::dataType() { sd::DataType NDArrayList::dataType() {
return _dtype; return _dtype;
} }
@ -144,7 +144,7 @@ namespace nd4j {
NDArray* NDArrayList::stack() { NDArray* NDArrayList::stack() {
// FIXME: this is bad for perf, but ok as poc // FIXME: this is bad for perf, but ok as poc
nd4j::ops::stack op; sd::ops::stack op;
std::vector<NDArray*> inputs; std::vector<NDArray*> inputs;
std::vector<double> targs; std::vector<double> targs;
std::vector<Nd4jLong> iargs({0}); std::vector<Nd4jLong> iargs({0});
@ -175,7 +175,7 @@ namespace nd4j {
return _name; return _name;
} }
nd4j::LaunchContext * NDArrayList::context() { sd::LaunchContext * NDArrayList::context() {
return _context; return _context;
} }

View File

@ -21,39 +21,40 @@
#include <array/ResultSet.h> #include <array/ResultSet.h>
#include <graph/FlatUtils.h> #include <graph/FlatUtils.h>
namespace nd4j { namespace sd {
ResultSet::ResultSet(const nd4j::graph::FlatResult* result) { ResultSet::ResultSet() {
if (result != nullptr) { //
for (int e = 0; e < result->variables()->size(); e++) { }
auto var = result->variables()->Get(e);
NDArray* array; ResultSet::ResultSet(const sd::graph::FlatResult* result) {
for (int e = 0; e < result->variables()->size(); e++) {
auto var = result->variables()->Get(e);
if (var->ndarray() != nullptr) { NDArray* array;
array = nd4j::graph::FlatUtils::fromFlatArray(var->ndarray());
} else if (var->shape() != nullptr) {
std::vector<Nd4jLong> shapeInfo;
for (int i = 0; i < var->shape()->size(); i++) {
shapeInfo.emplace_back(var->shape()->Get(i));
}
// we just create empty array here if (var->ndarray() != nullptr) {
int s0 = shapeInfo.at(0); array = sd::graph::FlatUtils::fromFlatArray(var->ndarray());
} else if (var->shape() != nullptr) {
std::vector<Nd4jLong> shape; std::vector<Nd4jLong> shapeInfo;
for (int i = 0; i < s0; i++) { for (int i = 0; i < var->shape()->size(); i++) {
shape.emplace_back(shapeInfo.at(i + 1)); shapeInfo.emplace_back(var->shape()->Get(i));
}
array = new NDArray((char) shapeInfo.at(shapeInfo.size() - 1), shape, DataTypeUtils::fromFlatDataType(var->dtype()));
} else {
nd4j_printf("Either shape or NDArray should be defined in FlatResult variable\n","");
throw std::runtime_error("Empty variable");
} }
// we just create empty array here
int s0 = shapeInfo.at(0);
_content.push_back(array); std::vector<Nd4jLong> shape;
for (int i = 0; i < s0; i++) {
shape.emplace_back(shapeInfo.at(i + 1));
}
array = new NDArray((char) shapeInfo.at(shapeInfo.size() - 1), shape, DataTypeUtils::fromFlatDataType(var->dtype()));
} else {
nd4j_printf("Either shape or NDArray should be defined in FlatResult variable\n","");
throw std::runtime_error("Empty variable");
} }
_content.push_back(array);
} }
} }
@ -123,15 +124,15 @@ namespace nd4j {
return (int) _content.size(); return (int) _content.size();
} }
nd4j::NDArray* ResultSet::at(const unsigned long idx) const { sd::NDArray* ResultSet::at(const unsigned long idx) const {
return _content.at(idx); return _content.at(idx);
} }
nd4j::NDArray* ResultSet::operator[](const unsigned long idx) const { sd::NDArray* ResultSet::operator[](const unsigned long idx) const {
return _content[idx]; return _content[idx];
} }
void ResultSet::push_back(nd4j::NDArray *array) { void ResultSet::push_back(sd::NDArray *array) {
_content.emplace_back(array); _content.emplace_back(array);
} }

View File

@ -18,11 +18,11 @@
// @author raver119@gmail.com // @author raver119@gmail.com
// //
#include "../ShapeDescriptor.h" #include <array/ShapeDescriptor.h>
#include <shape.h> #include <helpers/shape.h>
#include <ShapeBuilders.h> #include <helpers/ShapeBuilders.h>
namespace nd4j { namespace sd {
////////////////////////////////////////////////////////////////////////// //////////////////////////////////////////////////////////////////////////
// equal to operator // equal to operator
@ -226,7 +226,7 @@ namespace nd4j {
_strides.emplace_back(shapeInfo[e + 1 + _rank]); _strides.emplace_back(shapeInfo[e + 1 + _rank]);
} }
ShapeDescriptor::ShapeDescriptor(const Nd4jLong *shapeInfo, const nd4j::DataType dtypeOverride) ShapeDescriptor::ShapeDescriptor(const Nd4jLong *shapeInfo, const sd::DataType dtypeOverride)
: ShapeDescriptor::ShapeDescriptor(shapeInfo, false) { : ShapeDescriptor::ShapeDescriptor(shapeInfo, false) {
_dataType = dtypeOverride; _dataType = dtypeOverride;
} }
@ -356,14 +356,14 @@ namespace nd4j {
} }
namespace std { namespace std {
size_t hash<nd4j::ShapeDescriptor>::operator()(const nd4j::ShapeDescriptor &k) const { size_t hash<sd::ShapeDescriptor>::operator()(const sd::ShapeDescriptor &k) const {
auto res = std::hash<Nd4jLong>()(k.arrLength()); auto res = std::hash<Nd4jLong>()(k.arrLength());
res ^= std::hash<char>()(k.order()) + 0x9e3779b9 + (res << 6) + (res >> 2); res ^= std::hash<char>()(k.order()) + 0x9e3779b9 + (res << 6) + (res >> 2);
res ^= k.dataType() + 0x9e3779b9 + (res << 6) + (res >> 2); res ^= k.dataType() + 0x9e3779b9 + (res << 6) + (res >> 2);
res ^= std::hash<int>()(k.rank()) + 0x9e3779b9 + (res << 6) + (res >> 2); res ^= std::hash<int>()(k.rank()) + 0x9e3779b9 + (res << 6) + (res >> 2);
res ^= std::hash<Nd4jLong>()(k.ews()) + 0x9e3779b9 + (res << 6) + (res >> 2); res ^= std::hash<Nd4jLong>()(k.ews()) + 0x9e3779b9 + (res << 6) + (res >> 2);
auto shapes = const_cast<nd4j::ShapeDescriptor&>(k).shape(); auto shapes = const_cast<sd::ShapeDescriptor&>(k).shape();
auto strides = const_cast<nd4j::ShapeDescriptor&>(k).strides(); auto strides = const_cast<sd::ShapeDescriptor&>(k).strides();
for (auto s: shapes) { for (auto s: shapes) {
res ^= std::hash<Nd4jLong>()(s) + 0x9e3779b9 + (res << 6) + (res >> 2); res ^= std::hash<Nd4jLong>()(s) + 0x9e3779b9 + (res << 6) + (res >> 2);
} }

View File

@ -18,10 +18,10 @@
// @author raver119@gmail.com // @author raver119@gmail.com
// //
#include <pointercast.h> #include <system/pointercast.h>
#include <array/ShapeList.h> #include <array/ShapeList.h>
namespace nd4j { namespace sd {
//ShapeList::ShapeList(bool autoRemovable) { //ShapeList::ShapeList(bool autoRemovable) {
// _autoremovable = autoRemovable; // _autoremovable = autoRemovable;
// } // }

View File

@ -22,7 +22,7 @@
#include <algorithm> #include <algorithm>
#include "../TadDescriptor.h" #include "../TadDescriptor.h"
namespace nd4j { namespace sd {
TadDescriptor::TadDescriptor(const TadDescriptor &other) { TadDescriptor::TadDescriptor(const TadDescriptor &other) {
_originalShape = other._originalShape; _originalShape = other._originalShape;
_axis = other._axis; _axis = other._axis;
@ -79,13 +79,13 @@ namespace nd4j {
} }
namespace std { namespace std {
size_t hash<nd4j::TadDescriptor>::operator()(const nd4j::TadDescriptor &k) const { size_t hash<sd::TadDescriptor>::operator()(const sd::TadDescriptor &k) const {
// Compute individual hash values for first, // Compute individual hash values for first,
// second and third and combine them using XOR // second and third and combine them using XOR
// and bit shifting: // and bit shifting:
auto res = std::hash<int>()((int)k.areUnitiesinShape()); auto res = std::hash<int>()((int)k.areUnitiesinShape());
res ^= std::hash<nd4j::ShapeDescriptor>()(k.originalShapeConst()) + 0x9e3779b9 + (res << 6) + (res >> 2); res ^= std::hash<sd::ShapeDescriptor>()(k.originalShapeConst()) + 0x9e3779b9 + (res << 6) + (res >> 2);
auto axes = const_cast<nd4j::TadDescriptor&>(k).axis(); auto axes = const_cast<sd::TadDescriptor&>(k).axis();
for (auto a: axes) { for (auto a: axes) {
res ^= std::hash<int>()(a) + 0x9e3779b9 + (res << 6) + (res >> 2); res ^= std::hash<int>()(a) + 0x9e3779b9 + (res << 6) + (res >> 2);
} }

View File

@ -19,10 +19,10 @@
// //
#include "../TadPack.h" #include "../TadPack.h"
#include <Environment.h> #include <system/Environment.h>
#include <helpers/shape.h> #include <helpers/shape.h>
namespace nd4j { namespace sd {
TadPack::TadPack(ConstantDataBuffer &shapes, ConstantDataBuffer &offets, Nd4jLong numTads) { TadPack::TadPack(ConstantDataBuffer &shapes, ConstantDataBuffer &offets, Nd4jLong numTads) {
_tadShape = shapes; _tadShape = shapes;
_tadOffsets = offets; _tadOffsets = offets;
@ -49,11 +49,11 @@ namespace nd4j {
} }
Nd4jLong* TadPack::platformShapeInfo() const { Nd4jLong* TadPack::platformShapeInfo() const {
return nd4j::Environment::getInstance()->isCPU() ? primaryShapeInfo() : specialShapeInfo(); return sd::Environment::getInstance()->isCPU() ? primaryShapeInfo() : specialShapeInfo();
} }
Nd4jLong* TadPack::platformOffsets() const { Nd4jLong* TadPack::platformOffsets() const {
return nd4j::Environment::getInstance()->isCPU() ? primaryOffsets() : specialOffsets(); return sd::Environment::getInstance()->isCPU() ? primaryOffsets() : specialOffsets();
} }
int TadPack::shapeInfoLength() const { int TadPack::shapeInfoLength() const {

View File

@ -48,7 +48,7 @@
#endif #endif
#ifndef CBLAS_H #ifndef CBLAS_H
#include <dll.h> #include <system/dll.h>
#ifdef __cplusplus #ifdef __cplusplus
extern "C" { extern "C" {

View File

@ -30,7 +30,7 @@ enum CBLAS_UPLO {CblasUpper=121, CblasLower=122};
enum CBLAS_DIAG {CblasNonUnit=131, CblasUnit=132}; enum CBLAS_DIAG {CblasNonUnit=131, CblasUnit=132};
enum CBLAS_SIDE {CblasLeft=141, CblasRight=142}; enum CBLAS_SIDE {CblasLeft=141, CblasRight=142};
*/ */
#include <dll.h> #include <system/dll.h>
#ifdef __cplusplus #ifdef __cplusplus
extern "C" { extern "C" {

View File

@ -49,8 +49,8 @@
#include <string> #include <string>
#include <fstream> #include <fstream>
#include <streambuf> #include <streambuf>
#include <op_boilerplate.h> #include <system/op_boilerplate.h>
#include <dll.h> #include <system/dll.h>
#include <array/DataType.h> #include <array/DataType.h>
@ -238,7 +238,7 @@ namespace cnpy {
ND4J_EXPORT npz_t npzLoad(std::string fname); ND4J_EXPORT npz_t npzLoad(std::string fname);
ND4J_EXPORT nd4j::DataType dataTypeFromHeader(char *data); ND4J_EXPORT sd::DataType dataTypeFromHeader(char *data);
/** /**
* Parse the numpy header from * Parse the numpy header from
* the given file * the given file

View File

@ -23,8 +23,8 @@
#include <string> #include <string>
#include <stdexcept> #include <stdexcept>
#include <pointercast.h> #include <system/pointercast.h>
#include <dll.h> #include <system/dll.h>
#if defined(_MSC_VER) #if defined(_MSC_VER)
@ -33,7 +33,7 @@
#endif #endif
namespace nd4j { namespace sd {
class ND4J_EXPORT allocation_exception : public std::runtime_error { class ND4J_EXPORT allocation_exception : public std::runtime_error {
public: public:
allocation_exception(std::string message); allocation_exception(std::string message);

View File

@ -23,7 +23,7 @@
#include <string> #include <string>
#include <stdexcept> #include <stdexcept>
#include <dll.h> #include <system/dll.h>
#if defined(_MSC_VER) #if defined(_MSC_VER)
@ -32,7 +32,7 @@
#endif #endif
namespace nd4j { namespace sd {
class ND4J_EXPORT cuda_exception : public std::runtime_error { class ND4J_EXPORT cuda_exception : public std::runtime_error {
public: public:
cuda_exception(std::string message); cuda_exception(std::string message);

View File

@ -24,7 +24,7 @@
#include <string> #include <string>
#include <stdexcept> #include <stdexcept>
#include <array/DataType.h> #include <array/DataType.h>
#include <dll.h> #include <system/dll.h>
#if defined(_MSC_VER) #if defined(_MSC_VER)
@ -33,16 +33,16 @@
#endif #endif
namespace nd4j { namespace sd {
class ND4J_EXPORT datatype_exception : public std::runtime_error { class ND4J_EXPORT datatype_exception : public std::runtime_error {
public: public:
datatype_exception(std::string message); datatype_exception(std::string message);
~datatype_exception() = default; ~datatype_exception() = default;
static datatype_exception build(std::string message, nd4j::DataType actual); static datatype_exception build(std::string message, sd::DataType actual);
static datatype_exception build(std::string message, nd4j::DataType expected, nd4j::DataType actual); static datatype_exception build(std::string message, sd::DataType expected, sd::DataType actual);
static datatype_exception build(std::string message, nd4j::DataType expected, nd4j::DataType actualX, nd4j::DataType actualY); static datatype_exception build(std::string message, sd::DataType expected, sd::DataType actualX, sd::DataType actualY);
}; };
} }

View File

@ -23,8 +23,8 @@
#include <string> #include <string>
#include <stdexcept> #include <stdexcept>
#include <pointercast.h> #include <system/pointercast.h>
#include <dll.h> #include <system/dll.h>
#if defined(_MSC_VER) #if defined(_MSC_VER)
@ -33,7 +33,7 @@
#endif #endif
namespace nd4j { namespace sd {
class ND4J_EXPORT graph_exception : public std::runtime_error { class ND4J_EXPORT graph_exception : public std::runtime_error {
protected: protected:
Nd4jLong _graphId; Nd4jLong _graphId;

View File

@ -21,11 +21,11 @@
#ifndef DEV_TESTS_GRAPH_EXECUTION_EXCEPTION_H #ifndef DEV_TESTS_GRAPH_EXECUTION_EXCEPTION_H
#define DEV_TESTS_GRAPH_EXECUTION_EXCEPTION_H #define DEV_TESTS_GRAPH_EXECUTION_EXCEPTION_H
#include <op_boilerplate.h> #include <system/op_boilerplate.h>
#include <pointercast.h> #include <system/pointercast.h>
#include <stdexcept> #include <stdexcept>
#include <exceptions/graph_exception.h> #include <exceptions/graph_exception.h>
#include <dll.h> #include <system/dll.h>
#if defined(_MSC_VER) #if defined(_MSC_VER)
@ -34,7 +34,7 @@
#endif #endif
namespace nd4j { namespace sd {
class ND4J_EXPORT graph_execution_exception: public graph_exception { class ND4J_EXPORT graph_execution_exception: public graph_exception {
public: public:
explicit graph_execution_exception(Nd4jLong graphId); explicit graph_execution_exception(Nd4jLong graphId);

View File

@ -21,11 +21,11 @@
#ifndef DEV_TESTS_GRAPH_EXISTS_EXCEPTION_H #ifndef DEV_TESTS_GRAPH_EXISTS_EXCEPTION_H
#define DEV_TESTS_GRAPH_EXISTS_EXCEPTION_H #define DEV_TESTS_GRAPH_EXISTS_EXCEPTION_H
#include <op_boilerplate.h> #include <system/op_boilerplate.h>
#include <pointercast.h> #include <system/pointercast.h>
#include <stdexcept> #include <stdexcept>
#include <exceptions/graph_exception.h> #include <exceptions/graph_exception.h>
#include <dll.h> #include <system/dll.h>
#if defined(_MSC_VER) #if defined(_MSC_VER)
@ -34,7 +34,7 @@
#endif #endif
namespace nd4j { namespace sd {
class ND4J_EXPORT graph_exists_exception: public graph_exception { class ND4J_EXPORT graph_exists_exception: public graph_exception {
public: public:
explicit graph_exists_exception(Nd4jLong graphId); explicit graph_exists_exception(Nd4jLong graphId);

View File

@ -19,9 +19,9 @@
// //
#include <exceptions/allocation_exception.h> #include <exceptions/allocation_exception.h>
#include <StringUtils.h> #include <helpers/StringUtils.h>
namespace nd4j { namespace sd {
allocation_exception::allocation_exception(std::string message) : std::runtime_error(message){ allocation_exception::allocation_exception(std::string message) : std::runtime_error(message){
// //
} }

View File

@ -19,9 +19,9 @@
// //
#include <exceptions/cuda_exception.h> #include <exceptions/cuda_exception.h>
#include <StringUtils.h> #include <helpers/StringUtils.h>
namespace nd4j { namespace sd {
cuda_exception::cuda_exception(std::string message) : std::runtime_error(message){ cuda_exception::cuda_exception(std::string message) : std::runtime_error(message){
// //
} }

View File

@ -21,19 +21,19 @@
#include <array/DataTypeUtils.h> #include <array/DataTypeUtils.h>
#include <exceptions/datatype_exception.h> #include <exceptions/datatype_exception.h>
namespace nd4j { namespace sd {
datatype_exception::datatype_exception(std::string message) : std::runtime_error(message){ datatype_exception::datatype_exception(std::string message) : std::runtime_error(message){
// //
} }
datatype_exception datatype_exception::build(std::string message, nd4j::DataType expected, nd4j::DataType actual) { datatype_exception datatype_exception::build(std::string message, sd::DataType expected, sd::DataType actual) {
auto exp = DataTypeUtils::asString(expected); auto exp = DataTypeUtils::asString(expected);
auto act = DataTypeUtils::asString(actual); auto act = DataTypeUtils::asString(actual);
message += "; Expected: [" + exp + "]; Actual: [" + act + "]"; message += "; Expected: [" + exp + "]; Actual: [" + act + "]";
return datatype_exception(message); return datatype_exception(message);
} }
datatype_exception datatype_exception::build(std::string message, nd4j::DataType expected, nd4j::DataType actualX, nd4j::DataType actualY) { datatype_exception datatype_exception::build(std::string message, sd::DataType expected, sd::DataType actualX, sd::DataType actualY) {
auto exp = DataTypeUtils::asString(expected); auto exp = DataTypeUtils::asString(expected);
auto actX = DataTypeUtils::asString(actualX); auto actX = DataTypeUtils::asString(actualX);
auto actY = DataTypeUtils::asString(actualY); auto actY = DataTypeUtils::asString(actualY);
@ -41,7 +41,7 @@ namespace nd4j {
return datatype_exception(message); return datatype_exception(message);
} }
datatype_exception datatype_exception::build(std::string message, nd4j::DataType actual) { datatype_exception datatype_exception::build(std::string message, sd::DataType actual) {
auto act = DataTypeUtils::asString(actual); auto act = DataTypeUtils::asString(actual);
message += "; Actual: [" + act + "]"; message += "; Actual: [" + act + "]";
return datatype_exception(message); return datatype_exception(message);

View File

@ -21,7 +21,7 @@
#include <exceptions/graph_exception.h> #include <exceptions/graph_exception.h>
#include <helpers/StringUtils.h> #include <helpers/StringUtils.h>
namespace nd4j { namespace sd {
graph_exception::graph_exception(std::string message, Nd4jLong graphId) : std::runtime_error(message) { graph_exception::graph_exception(std::string message, Nd4jLong graphId) : std::runtime_error(message) {
this->_message = message; this->_message = message;
this->_graphId = graphId; this->_graphId = graphId;

View File

@ -21,7 +21,7 @@
#include <helpers/StringUtils.h> #include <helpers/StringUtils.h>
#include <exceptions/graph_execution_exception.h> #include <exceptions/graph_execution_exception.h>
namespace nd4j { namespace sd {
graph_execution_exception::graph_execution_exception(Nd4jLong graphId) : graph_exception(StringUtils::buildGraphErrorMessage("Caught exception during graph execution", graphId), graphId) { graph_execution_exception::graph_execution_exception(Nd4jLong graphId) : graph_exception(StringUtils::buildGraphErrorMessage("Caught exception during graph execution", graphId), graphId) {
_graphId = graphId; _graphId = graphId;
} }

View File

@ -21,7 +21,7 @@
#include <helpers/StringUtils.h> #include <helpers/StringUtils.h>
#include <exceptions/graph_exists_exception.h> #include <exceptions/graph_exists_exception.h>
namespace nd4j { namespace sd {
graph_exists_exception::graph_exists_exception(Nd4jLong graphId) : graph_exception(StringUtils::buildGraphErrorMessage("Graph with given ID already exists", graphId), graphId) { graph_exists_exception::graph_exists_exception(Nd4jLong graphId) : graph_exception(StringUtils::buildGraphErrorMessage("Graph with given ID already exists", graphId), graphId) {
_graphId = graphId; _graphId = graphId;
} }

View File

@ -21,7 +21,7 @@
#include <helpers/StringUtils.h> #include <helpers/StringUtils.h>
#include <exceptions/no_results_exception.h> #include <exceptions/no_results_exception.h>
namespace nd4j { namespace sd {
no_results_exception::no_results_exception(Nd4jLong graphId) : graph_exception(StringUtils::buildGraphErrorMessage("Got no results after graph execution", graphId), graphId) { no_results_exception::no_results_exception(Nd4jLong graphId) : graph_exception(StringUtils::buildGraphErrorMessage("Got no results after graph execution", graphId), graphId) {
_graphId = graphId; _graphId = graphId;
} }

View File

@ -21,7 +21,7 @@
#include <helpers/StringUtils.h> #include <helpers/StringUtils.h>
#include <exceptions/unknown_graph_exception.h> #include <exceptions/unknown_graph_exception.h>
namespace nd4j { namespace sd {
unknown_graph_exception::unknown_graph_exception(Nd4jLong graphId) : graph_exception(StringUtils::buildGraphErrorMessage("Unknown graph", graphId), graphId) { unknown_graph_exception::unknown_graph_exception(Nd4jLong graphId) : graph_exception(StringUtils::buildGraphErrorMessage("Unknown graph", graphId), graphId) {
_graphId = graphId; _graphId = graphId;
} }

View File

@ -21,11 +21,11 @@
#ifndef DEV_TESTS_NO_RESULTS_EXCEPTION_H #ifndef DEV_TESTS_NO_RESULTS_EXCEPTION_H
#define DEV_TESTS_NO_RESULTS_EXCEPTION_H #define DEV_TESTS_NO_RESULTS_EXCEPTION_H
#include <op_boilerplate.h> #include <system/op_boilerplate.h>
#include <pointercast.h> #include <system/pointercast.h>
#include <stdexcept> #include <stdexcept>
#include <exceptions/graph_exception.h> #include <exceptions/graph_exception.h>
#include <dll.h> #include <system/dll.h>
#if defined(_MSC_VER) #if defined(_MSC_VER)
@ -34,7 +34,7 @@
#endif #endif
namespace nd4j { namespace sd {
class ND4J_EXPORT no_results_exception: public graph_exception { class ND4J_EXPORT no_results_exception: public graph_exception {
public: public:
explicit no_results_exception(Nd4jLong graphId); explicit no_results_exception(Nd4jLong graphId);

View File

@ -21,11 +21,11 @@
#ifndef DEV_TESTS_UNKNOWN_GRAPH_EXCEPTION_H #ifndef DEV_TESTS_UNKNOWN_GRAPH_EXCEPTION_H
#define DEV_TESTS_UNKNOWN_GRAPH_EXCEPTION_H #define DEV_TESTS_UNKNOWN_GRAPH_EXCEPTION_H
#include <op_boilerplate.h> #include <system/op_boilerplate.h>
#include <pointercast.h> #include <system/pointercast.h>
#include <stdexcept> #include <stdexcept>
#include <exceptions/graph_exception.h> #include <exceptions/graph_exception.h>
#include <dll.h> #include <system/dll.h>
#if defined(_MSC_VER) #if defined(_MSC_VER)
@ -34,7 +34,7 @@
#endif #endif
namespace nd4j { namespace sd {
class ND4J_EXPORT unknown_graph_exception: public graph_exception { class ND4J_EXPORT unknown_graph_exception: public graph_exception {
public: public:
explicit unknown_graph_exception(Nd4jLong graphId); explicit unknown_graph_exception(Nd4jLong graphId);

View File

@ -21,12 +21,12 @@
#ifndef LIBND4J_AFFINITYMANAGER_H #ifndef LIBND4J_AFFINITYMANAGER_H
#define LIBND4J_AFFINITYMANAGER_H #define LIBND4J_AFFINITYMANAGER_H
#include <dll.h> #include <system/dll.h>
#include <pointercast.h> #include <system/pointercast.h>
#include <atomic> #include <atomic>
#include <mutex> #include <mutex>
namespace nd4j { namespace sd {
class ND4J_EXPORT AffinityManager { class ND4J_EXPORT AffinityManager {
private: private:
static std::atomic<int> _lastDevice; static std::atomic<int> _lastDevice;

View File

@ -21,7 +21,7 @@
#ifndef SAMEDIFF_CALLABLEINTERFACE_H #ifndef SAMEDIFF_CALLABLEINTERFACE_H
#define SAMEDIFF_CALLABLEINTERFACE_H #define SAMEDIFF_CALLABLEINTERFACE_H
#include <openmp_pragmas.h> #include <system/openmp_pragmas.h>
#include <cstdint> #include <cstdint>
#include <functional> #include <functional>
#include <atomic> #include <atomic>

View File

@ -25,7 +25,7 @@
#include <vector> #include <vector>
#include <atomic> #include <atomic>
#include <condition_variable> #include <condition_variable>
#include <op_boilerplate.h> #include <system/op_boilerplate.h>
namespace samediff { namespace samediff {
class CallableWithArguments { class CallableWithArguments {

View File

@ -21,11 +21,11 @@
#ifndef LIBND4J_CONTEXTBUFFERS_H #ifndef LIBND4J_CONTEXTBUFFERS_H
#define LIBND4J_CONTEXTBUFFERS_H #define LIBND4J_CONTEXTBUFFERS_H
#include <dll.h> #include <system/dll.h>
#include <pointercast.h> #include <system/pointercast.h>
#include <execution/ErrorReference.h> #include <execution/ErrorReference.h>
namespace nd4j { namespace sd {
class ND4J_EXPORT ContextBuffers { class ND4J_EXPORT ContextBuffers {
private: private:
void* _reductionPointer = nullptr; void* _reductionPointer = nullptr;

View File

@ -22,7 +22,7 @@
#define DEV_TESTS_ERRORREFERENCE_H #define DEV_TESTS_ERRORREFERENCE_H
#include <string> #include <string>
#include <dll.h> #include <system/dll.h>
namespace sd { namespace sd {
class ND4J_EXPORT ErrorReference { class ND4J_EXPORT ErrorReference {

View File

@ -21,7 +21,7 @@
#ifndef SD_EXECUTOR_H #ifndef SD_EXECUTOR_H
#define SD_EXECUTOR_H #define SD_EXECUTOR_H
namespace nd4j { namespace sd {
class Executor { class Executor {
public: public:
static void execute() { static void execute() {

View File

@ -35,9 +35,9 @@
#include "config.h" #include "config.h"
#endif #endif
#include <dll.h> #include <system/dll.h>
#include <memory> #include <memory>
#include <op_boilerplate.h> #include <system/op_boilerplate.h>
#include <memory/Workspace.h> #include <memory/Workspace.h>
#include <vector> #include <vector>
#include <mutex> #include <mutex>
@ -46,7 +46,7 @@
namespace nd4j { namespace sd {
class ND4J_EXPORT LaunchContext { class ND4J_EXPORT LaunchContext {
@ -68,7 +68,7 @@ class ND4J_EXPORT LaunchContext {
bool _isAllocated = false; bool _isAllocated = false;
#endif // CUDA #endif // CUDA
nd4j::memory::Workspace* _workspace = nullptr; sd::memory::Workspace* _workspace = nullptr;
int _deviceID = 0; int _deviceID = 0;
public: public:
@ -100,8 +100,8 @@ class ND4J_EXPORT LaunchContext {
LaunchContext(Nd4jPointer cudaStream, Nd4jPointer reductionPointer = nullptr, Nd4jPointer scalarPointer = nullptr, Nd4jPointer allocationPointer = nullptr); LaunchContext(Nd4jPointer cudaStream, Nd4jPointer reductionPointer = nullptr, Nd4jPointer scalarPointer = nullptr, Nd4jPointer allocationPointer = nullptr);
LaunchContext(); LaunchContext();
~LaunchContext(); ~LaunchContext();
nd4j::memory::Workspace* getWorkspace() const { return _workspace; } sd::memory::Workspace* getWorkspace() const { return _workspace; }
void setWorkspace(nd4j::memory::Workspace* theWorkspace) { void setWorkspace(sd::memory::Workspace* theWorkspace) {
_workspace = theWorkspace; _workspace = theWorkspace;
} }

View File

@ -21,10 +21,10 @@
#define SAMEDIFF_THREADS_H #define SAMEDIFF_THREADS_H
#include <functional> #include <functional>
#include <openmp_pragmas.h> #include <system/openmp_pragmas.h>
#include <op_boilerplate.h> #include <system/op_boilerplate.h>
#include <Environment.h> #include <system/Environment.h>
#include <op_enums.h> #include <system/op_enums.h>
namespace samediff { namespace samediff {
class ND4J_EXPORT ThreadsHelper { class ND4J_EXPORT ThreadsHelper {
@ -107,7 +107,7 @@ namespace samediff {
* @param increment * @param increment
* @return * @return
*/ */
static int parallel_for(FUNC_1D function, int64_t start, int64_t stop, int64_t increment = 1, uint32_t numThreads = nd4j::Environment::getInstance()->maxMasterThreads()); static int parallel_for(FUNC_1D function, int64_t start, int64_t stop, int64_t increment = 1, uint32_t numThreads = sd::Environment::getInstance()->maxMasterThreads());
/** /**
* This function executes 1 dimensional loop for a given number of threads * This function executes 1 dimensional loop for a given number of threads
@ -119,7 +119,7 @@ namespace samediff {
* @param numThreads * @param numThreads
* @return * @return
*/ */
static int parallel_tad(FUNC_1D function, int64_t start, int64_t stop, int64_t increment = 1, uint32_t numThreads = nd4j::Environment::getInstance()->maxMasterThreads()); static int parallel_tad(FUNC_1D function, int64_t start, int64_t stop, int64_t increment = 1, uint32_t numThreads = sd::Environment::getInstance()->maxMasterThreads());
/** /**
* This method will execute function splitting 2 nested loops space with multiple threads * This method will execute function splitting 2 nested loops space with multiple threads
@ -134,7 +134,7 @@ namespace samediff {
* @param inc_y * @param inc_y
* @return * @return
*/ */
static int parallel_for(FUNC_2D function, int64_t start_x, int64_t stop_x, int64_t inc_x, int64_t start_y, int64_t stop_y, int64_t inc_y, uint64_t numThreads = nd4j::Environment::getInstance()->maxMasterThreads(), bool debug = false); static int parallel_for(FUNC_2D function, int64_t start_x, int64_t stop_x, int64_t inc_x, int64_t start_y, int64_t stop_y, int64_t inc_y, uint64_t numThreads = sd::Environment::getInstance()->maxMasterThreads(), bool debug = false);
/** /**
* This method will execute function splitting 3 nested loops space with multiple threads * This method will execute function splitting 3 nested loops space with multiple threads
@ -152,7 +152,7 @@ namespace samediff {
* @param inc_z * @param inc_z
* @return * @return
*/ */
static int parallel_for(FUNC_3D function, int64_t start_x, int64_t stop_x, int64_t inc_x, int64_t start_y, int64_t stop_y, int64_t inc_y, int64_t start_z, int64_t stop_z, int64_t inc_z, uint64_t numThreads = nd4j::Environment::getInstance()->maxMasterThreads()); static int parallel_for(FUNC_3D function, int64_t start_x, int64_t stop_x, int64_t inc_x, int64_t start_y, int64_t stop_y, int64_t inc_y, int64_t start_z, int64_t stop_z, int64_t inc_z, uint64_t numThreads = sd::Environment::getInstance()->maxMasterThreads());
/** /**
* *
@ -160,18 +160,18 @@ namespace samediff {
* @param numThreads * @param numThreads
* @return * @return
*/ */
static int parallel_do(FUNC_DO function, uint64_t numThreads = nd4j::Environment::getInstance()->maxMasterThreads()); static int parallel_do(FUNC_DO function, uint64_t numThreads = sd::Environment::getInstance()->maxMasterThreads());
static int64_t parallel_long(FUNC_RL function, FUNC_AL aggregator, int64_t start, int64_t stop, int64_t increment = 1, uint64_t numThreads = nd4j::Environment::getInstance()->maxMasterThreads()); static int64_t parallel_long(FUNC_RL function, FUNC_AL aggregator, int64_t start, int64_t stop, int64_t increment = 1, uint64_t numThreads = sd::Environment::getInstance()->maxMasterThreads());
static double parallel_double(FUNC_RD function, FUNC_AD aggregator, int64_t start, int64_t stop, int64_t increment = 1, uint64_t numThreads = nd4j::Environment::getInstance()->maxMasterThreads()); static double parallel_double(FUNC_RD function, FUNC_AD aggregator, int64_t start, int64_t stop, int64_t increment = 1, uint64_t numThreads = sd::Environment::getInstance()->maxMasterThreads());
/** /**
* This method will execute function in parallel preserving the parts to be aligned increment size * This method will execute function in parallel preserving the parts to be aligned increment size
* PLEASE NOTE: this function can use smaller number of threads than requested. * PLEASE NOTE: this function can use smaller number of threads than requested.
* *
*/ */
static int parallel_aligned_increment(FUNC_1D function, int64_t start, int64_t stop, int64_t increment, size_t type_size = sizeof(float), uint32_t req_numThreads = nd4j::Environment::getInstance()->maxMasterThreads()); static int parallel_aligned_increment(FUNC_1D function, int64_t start, int64_t stop, int64_t increment, size_t type_size = sizeof(float), uint32_t req_numThreads = sd::Environment::getInstance()->maxMasterThreads());
}; };
} }

View File

@ -20,7 +20,7 @@
#include <execution/AffinityManager.h> #include <execution/AffinityManager.h>
namespace nd4j { namespace sd {
int AffinityManager::currentDeviceId() { int AffinityManager::currentDeviceId() {
return 0; return 0;
} }

View File

@ -20,7 +20,7 @@
#include <execution/ContextBuffers.h> #include <execution/ContextBuffers.h>
#include <execution/AffinityManager.h> #include <execution/AffinityManager.h>
namespace nd4j { namespace sd {
ContextBuffers::ContextBuffers() { ContextBuffers::ContextBuffers() {
_deviceId = AffinityManager::currentDeviceId(); _deviceId = AffinityManager::currentDeviceId();
} }

View File

@ -19,21 +19,21 @@
// //
#include <execution/LaunchContext.h> #include <execution/LaunchContext.h>
#include <logger.h> #include <helpers/logger.h>
#include <exceptions/cuda_exception.h> #include <exceptions/cuda_exception.h>
#include <thread> #include <thread>
#if defined(IOS_BUILD) || defined(APPLE_BUILD) || defined(ANDROID_BUILD) #if defined(SD_IOS_BUILD) || defined(SD_APPLE_BUILD) || defined(SD_ANDROID_BUILD)
nd4j::ContextBuffers contextBuffers = nd4j::ContextBuffers(); sd::ContextBuffers contextBuffers = sd::ContextBuffers();
#else #else
thread_local nd4j::ContextBuffers contextBuffers = nd4j::ContextBuffers(); thread_local sd::ContextBuffers contextBuffers = sd::ContextBuffers();
#endif #endif
#ifdef HAVE_MKLDNN #ifdef HAVE_MKLDNN
#include <dnnl.hpp> #include <dnnl.hpp>
#endif #endif
namespace nd4j { namespace sd {
LaunchContext::~LaunchContext() { LaunchContext::~LaunchContext() {
#ifdef HAVE_MKLDNN #ifdef HAVE_MKLDNN

View File

@ -18,14 +18,14 @@
// @author raver119@gmail.com // @author raver119@gmail.com
// //
#include <logger.h> #include <helpers/logger.h>
#include <execution/AffinityManager.h> #include <execution/AffinityManager.h>
#include <exceptions/cuda_exception.h> #include <exceptions/cuda_exception.h>
#include <LaunchContext.h> #include <execution/LaunchContext.h>
thread_local int globalThreadToDevice = -1; thread_local int globalThreadToDevice = -1;
namespace nd4j { namespace sd {
std::mutex AffinityManager::_currentMutex; std::mutex AffinityManager::_currentMutex;
std::mutex AffinityManager::_numberMutex; std::mutex AffinityManager::_numberMutex;
int AffinityManager::_numberOfDevices = -1; int AffinityManager::_numberOfDevices = -1;

View File

@ -20,15 +20,15 @@
#include <execution/ContextBuffers.h> #include <execution/ContextBuffers.h>
#include <exceptions/cuda_exception.h> #include <exceptions/cuda_exception.h>
#include <logger.h> #include <helpers/logger.h>
#include <AffinityManager.h> #include <execution/AffinityManager.h>
#include <cuda.h> #include <cuda.h>
#include <cuda_runtime_api.h> #include <cuda_runtime_api.h>
#include <cuda_runtime.h> #include <cuda_runtime.h>
#include <cuda_device_runtime_api.h> #include <cuda_device_runtime_api.h>
namespace nd4j { namespace sd {
ContextBuffers::ContextBuffers() { ContextBuffers::ContextBuffers() {
//nd4j_printf("Creating ContextBuffers for device [%i]\n", AffinityManager::currentDeviceId()); //nd4j_printf("Creating ContextBuffers for device [%i]\n", AffinityManager::currentDeviceId());
_deviceId = AffinityManager::currentDeviceId(); _deviceId = AffinityManager::currentDeviceId();

View File

@ -19,15 +19,15 @@
// //
#include <execution/LaunchContext.h> #include <execution/LaunchContext.h>
#include <logger.h> #include <helpers/logger.h>
#include <exceptions/cuda_exception.h> #include <exceptions/cuda_exception.h>
#include <helpers/cublasHelper.h> #include <helpers/cublasHelper.h>
#include <thread> #include <thread>
#include <execution/AffinityManager.h> #include <execution/AffinityManager.h>
thread_local nd4j::ContextBuffers contextBuffers = nd4j::ContextBuffers(); thread_local sd::ContextBuffers contextBuffers = sd::ContextBuffers();
namespace nd4j { namespace sd {
std::vector<std::shared_ptr<LaunchContext>> LaunchContext::_contexts = std::vector<std::shared_ptr<LaunchContext>>(); std::vector<std::shared_ptr<LaunchContext>> LaunchContext::_contexts = std::vector<std::shared_ptr<LaunchContext>>();
std::mutex LaunchContext::_mutex; std::mutex LaunchContext::_mutex;

View File

@ -19,7 +19,7 @@
// //
#include <execution/BlockingQueue.h> #include <execution/BlockingQueue.h>
#include <CallableWithArguments.h> #include <execution/CallableWithArguments.h>
#include <thread> #include <thread>
namespace samediff { namespace samediff {

View File

@ -78,7 +78,7 @@ namespace samediff {
ThreadPool::ThreadPool() { ThreadPool::ThreadPool() {
// TODO: number of threads must reflect number of cores for UMA system. In case of NUMA it should be per-device pool // TODO: number of threads must reflect number of cores for UMA system. In case of NUMA it should be per-device pool
// FIXME: on mobile phones this feature must NOT be used // FIXME: on mobile phones this feature must NOT be used
_available = nd4j::Environment::getInstance()->maxThreads(); _available = sd::Environment::getInstance()->maxThreads();
_queues.resize(_available.load()); _queues.resize(_available.load());
_threads.resize(_available.load()); _threads.resize(_available.load());

View File

@ -22,18 +22,18 @@
#include <vector> #include <vector>
#include <thread> #include <thread>
#include <helpers/logger.h> #include <helpers/logger.h>
#include <templatemath.h> #include <math/templatemath.h>
#include <shape.h> #include <helpers/shape.h>
namespace samediff { namespace samediff {
int ThreadsHelper::numberOfThreads(int maxThreads, uint64_t numberOfElements) { int ThreadsHelper::numberOfThreads(int maxThreads, uint64_t numberOfElements) {
// let's see how many threads we actually need first // let's see how many threads we actually need first
auto optimalThreads = nd4j::math::nd4j_max<uint64_t>(1, numberOfElements / 1024); auto optimalThreads = sd::math::nd4j_max<uint64_t>(1, numberOfElements / 1024);
// now return the smallest value // now return the smallest value
return nd4j::math::nd4j_min<int>(optimalThreads, maxThreads); return sd::math::nd4j_min<int>(optimalThreads, maxThreads);
} }
Span3::Span3(int64_t startX, int64_t stopX, int64_t incX, int64_t startY, int64_t stopY, int64_t incY, int64_t startZ, int64_t stopZ, int64_t incZ) { Span3::Span3(int64_t startX, int64_t stopX, int64_t incX, int64_t startY, int64_t stopY, int64_t incY, int64_t startZ, int64_t stopZ, int64_t incZ) {
@ -264,7 +264,7 @@ namespace samediff {
int ThreadsHelper::numberOfThreads2d(int maxThreads, uint64_t iters_x, uint64_t iters_y) { int ThreadsHelper::numberOfThreads2d(int maxThreads, uint64_t iters_x, uint64_t iters_y) {
// in some cases there's nothing to think about, part 1 // in some cases there's nothing to think about, part 1
if (iters_x < maxThreads && iters_y < maxThreads) if (iters_x < maxThreads && iters_y < maxThreads)
return nd4j::math::nd4j_max<int>(iters_x, iters_y); return sd::math::nd4j_max<int>(iters_x, iters_y);
auto remX = iters_x % maxThreads; auto remX = iters_x % maxThreads;
auto remY = iters_y % maxThreads; auto remY = iters_y % maxThreads;

View File

@ -31,7 +31,7 @@ namespace samediff {
Ticket::Ticket() { Ticket::Ticket() {
_acquired = true; _acquired = true;
_interfaces.resize(nd4j::Environment::getInstance()->maxThreads()); _interfaces.resize(sd::Environment::getInstance()->maxThreads());
} }
bool Ticket::acquired() { bool Ticket::acquired() {

View File

@ -21,13 +21,13 @@
#ifndef LIBND4J_INPUTLIST_H #ifndef LIBND4J_INPUTLIST_H
#define LIBND4J_INPUTLIST_H #define LIBND4J_INPUTLIST_H
#include <op_boilerplate.h> #include <system/op_boilerplate.h>
#include <pointercast.h> #include <system/pointercast.h>
#include <dll.h> #include <system/dll.h>
#include <vector> #include <vector>
#include <types/pair.h> #include <types/pair.h>
namespace nd4j { namespace sd {
namespace graph { namespace graph {
class ND4J_EXPORT ArgumentsList { class ND4J_EXPORT ArgumentsList {
protected: protected:

View File

@ -23,7 +23,7 @@
#define LIBND4J_CONTEXT_H #define LIBND4J_CONTEXT_H
#include <vector> #include <vector>
#include <NDArray.h> #include <array/NDArray.h>
#include <graph/Variable.h> #include <graph/Variable.h>
#include <graph/VariableSpace.h> #include <graph/VariableSpace.h>
#include <graph/ContextPrototype.h> #include <graph/ContextPrototype.h>
@ -39,26 +39,26 @@
#include <cuda_device_runtime_api.h> #include <cuda_device_runtime_api.h>
#endif #endif
namespace nd4j { namespace sd {
namespace graph { namespace graph {
/** /**
* This class defines input desired for any given node/operation within graph * This class defines input desired for any given node/operation within graph
*/ */
class ND4J_EXPORT Context : public nd4j::graph::ContextPrototype { class ND4J_EXPORT Context : public sd::graph::ContextPrototype {
protected: protected:
nd4j::memory::Workspace* _workspace = nullptr; sd::memory::Workspace* _workspace = nullptr;
nd4j::graph::VariableSpace* _variableSpace = nullptr; sd::graph::VariableSpace* _variableSpace = nullptr;
std::pair<Nd4jLong, Nd4jLong> _executionTime; std::pair<Nd4jLong, Nd4jLong> _executionTime;
nd4j::random::RandomBuffer* _rng = nullptr; sd::random::RandomBuffer* _rng = nullptr;
nd4j::DataType _dataType = nd4j::DataType::FLOAT32; sd::DataType _dataType = sd::DataType::FLOAT32;
// branch for divergent_op // branch for divergent_op
int _branch = 0; int _branch = 0;
// temporary context for standalone ops execution // temporary context for standalone ops execution
LaunchContext* _context = nullptr; LaunchContext* _context = nullptr;
std::vector<nd4j::DataType> _dataTypes; std::vector<sd::DataType> _dataTypes;
// fields for fast execution (out-of-graph ops use) // fields for fast execution (out-of-graph ops use)
std::vector<NDArray*> _fastpath_in; std::vector<NDArray*> _fastpath_in;
@ -87,30 +87,30 @@ namespace nd4j {
Nd4jLong getOuterTime(); Nd4jLong getOuterTime();
Nd4jLong getInnerTime(); Nd4jLong getInnerTime();
nd4j::DataType dataType() override; sd::DataType dataType() override;
nd4j::DataType dataType(int index) override; sd::DataType dataType(int index) override;
void setDataType(int index, nd4j::DataType type) override; void setDataType(int index, sd::DataType type) override;
// these methods are related to Workspace abstraction // these methods are related to Workspace abstraction
bool hasWorkspaceProvided(); bool hasWorkspaceProvided();
void attachWorkspace(nd4j::memory::Workspace* workspace); void attachWorkspace(sd::memory::Workspace* workspace);
void forgetWorkspace(); void forgetWorkspace();
// these methods return full-time workspace // these methods return full-time workspace
nd4j::memory::Workspace* getWorkspace(); sd::memory::Workspace* getWorkspace();
nd4j::memory::Workspace* workspace(); sd::memory::Workspace* workspace();
nd4j::memory::Workspace* fWorkspace(); sd::memory::Workspace* fWorkspace();
// this method returns workspace for temporary allocations // this method returns workspace for temporary allocations
nd4j::memory::Workspace* tWorkspace(); sd::memory::Workspace* tWorkspace();
// this method returns workspace for object allocations // this method returns workspace for object allocations
nd4j::memory::Workspace* oWorkspace(); sd::memory::Workspace* oWorkspace();
void setVariableSpace(VariableSpace* variableSpace); void setVariableSpace(VariableSpace* variableSpace);
nd4j::random::RandomBuffer* getRNG(); sd::random::RandomBuffer* getRNG();
void setRNG(nd4j::random::RandomBuffer* rng); void setRNG(sd::random::RandomBuffer* rng);
void setTargetEngine(samediff::Engine engine); void setTargetEngine(samediff::Engine engine);
@ -206,12 +206,12 @@ namespace nd4j {
void setTArguments(double *arguments, int numberOfArguments); void setTArguments(double *arguments, int numberOfArguments);
void setIArguments(Nd4jLong *arguments, int numberOfArguments); void setIArguments(Nd4jLong *arguments, int numberOfArguments);
void setBArguments(bool *arguments, int numberOfArguments); void setBArguments(bool *arguments, int numberOfArguments);
void setDArguments(nd4j::DataType *arguments, int numberOfArguments); void setDArguments(sd::DataType *arguments, int numberOfArguments);
void setTArguments(const std::vector<double> &tArgs); void setTArguments(const std::vector<double> &tArgs);
void setIArguments(const std::vector<Nd4jLong> &tArgs); void setIArguments(const std::vector<Nd4jLong> &tArgs);
void setBArguments(const std::vector<bool> &tArgs); void setBArguments(const std::vector<bool> &tArgs);
void setDArguments(const std::vector<nd4j::DataType> &dArgs); void setDArguments(const std::vector<sd::DataType> &dArgs);
/** /**
* This method purges fastpath in/out contents and releases all the handles. * This method purges fastpath in/out contents and releases all the handles.

View File

@ -23,10 +23,10 @@
#define ND4J_CONTEXT_PROTOTYPE_H #define ND4J_CONTEXT_PROTOTYPE_H
#include <vector> #include <vector>
#include <Environment.h> #include <system/Environment.h>
#include <array/DataType.h> #include <array/DataType.h>
#include <dll.h> #include <system/dll.h>
#include <RandomGenerator.h> #include <graph/RandomGenerator.h>
#include <ops/declarable/OpDescriptor.h> #include <ops/declarable/OpDescriptor.h>
#include <execution/Engine.h> #include <execution/Engine.h>
#include <execution/ExecutionMode.h> #include <execution/ExecutionMode.h>
@ -35,7 +35,7 @@
#include <config.h> #include <config.h>
#endif #endif
namespace nd4j { namespace sd {
namespace graph { namespace graph {
class ND4J_EXPORT ContextPrototype { class ND4J_EXPORT ContextPrototype {
@ -47,10 +47,10 @@ namespace nd4j {
std::vector<int> _iArgs; std::vector<int> _iArgs;
std::vector<bool> _bArgs; std::vector<bool> _bArgs;
std::vector<int> _axis; std::vector<int> _axis;
std::vector<nd4j::DataType> _dArgs; std::vector<sd::DataType> _dArgs;
// TODO: remove this field // TODO: remove this field
nd4j::DataType _dataType = nd4j::DataType::FLOAT32; sd::DataType _dataType = sd::DataType::FLOAT32;
bool _isInplace; bool _isInplace;
// opNum for legacy XYZ ops // opNum for legacy XYZ ops
@ -58,17 +58,17 @@ namespace nd4j {
uint64_t _rootSeed; uint64_t _rootSeed;
RandomGenerator _randomGenerator; RandomGenerator _randomGenerator;
std::vector<nd4j::DataType> _dataTypes; std::vector<sd::DataType> _dataTypes;
nd4j::ops::OpDescriptor* _opDescriptor; sd::ops::OpDescriptor* _opDescriptor;
bool _useMKLDNN = nd4j::Environment::getInstance()->isUseMKLDNN(); bool _useMKLDNN = sd::Environment::getInstance()->isUseMKLDNN();
// target engine for execution // target engine for execution
samediff::Engine _engine = DEFAULT_ENGINE; samediff::Engine _engine = DEFAULT_ENGINE;
samediff::ExecutionMode _execMode = samediff::ExecutionMode::MODE_UNDEFINED; samediff::ExecutionMode _execMode = samediff::ExecutionMode::MODE_UNDEFINED;
public: public:
explicit ContextPrototype(nd4j::ops::OpDescriptor* opDescriptor = nullptr, int nodeId = 1, bool inPlace = false); explicit ContextPrototype(sd::ops::OpDescriptor* opDescriptor = nullptr, int nodeId = 1, bool inPlace = false);
~ContextPrototype() = default; ~ContextPrototype() = default;
int getNodeId(); int getNodeId();
@ -77,11 +77,11 @@ namespace nd4j {
// this method returns true, if inputs are defined // this method returns true, if inputs are defined
bool hasVariablesFilled(); bool hasVariablesFilled();
void setOpDescriptor(nd4j::ops::OpDescriptor* opDescriptor); void setOpDescriptor(sd::ops::OpDescriptor* opDescriptor);
virtual nd4j::DataType dataType(); virtual sd::DataType dataType();
virtual nd4j::DataType dataType(int index); virtual sd::DataType dataType(int index);
virtual void setDataType(int index, nd4j::DataType type); virtual void setDataType(int index, sd::DataType type);
bool isInplace(); bool isInplace();
void markInplace(bool reallyInplace); void markInplace(bool reallyInplace);
@ -96,7 +96,7 @@ namespace nd4j {
std::vector<double>* getTArguments(); std::vector<double>* getTArguments();
std::vector<int>* getIArguments(); std::vector<int>* getIArguments();
std::vector<bool>* getBArguments(); std::vector<bool>* getBArguments();
std::vector<nd4j::DataType>* getDArguments(); std::vector<sd::DataType>* getDArguments();
std::vector<int>* getAxis(); std::vector<int>* getAxis();
samediff::Engine engine(); samediff::Engine engine();

View File

@ -29,7 +29,7 @@
#include <flatbuffers/flatbuffers.h> #include <flatbuffers/flatbuffers.h>
#include <graph/Variable.h> #include <graph/Variable.h>
namespace nd4j { namespace sd {
namespace graph { namespace graph {
class ExecutionResult { class ExecutionResult {
private: private:

View File

@ -22,22 +22,22 @@
#define LIBND4J_EXECUTORCONFIGURATION_H #define LIBND4J_EXECUTORCONFIGURATION_H
#include <graph/generated/config_generated.h> #include <graph/generated/config_generated.h>
#include <pointercast.h> #include <system/pointercast.h>
#include <dll.h> #include <system/dll.h>
namespace nd4j { namespace sd {
namespace graph { namespace graph {
class ND4J_EXPORT ExecutorConfiguration { class ND4J_EXPORT ExecutorConfiguration {
public: public:
nd4j::graph::ProfilingMode _profilingMode; sd::graph::ProfilingMode _profilingMode;
nd4j::graph::ExecutionMode _executionMode; sd::graph::ExecutionMode _executionMode;
nd4j::graph::OutputMode _outputMode; sd::graph::OutputMode _outputMode;
bool _timestats; bool _timestats;
Nd4jLong _footprintForward = 0L; Nd4jLong _footprintForward = 0L;
Nd4jLong _footprintBackward = 0L; Nd4jLong _footprintBackward = 0L;
Direction _direction = Direction_FORWARD_ONLY; Direction _direction = Direction_FORWARD_ONLY;
explicit ExecutorConfiguration(const nd4j::graph::FlatConfiguration *conf = nullptr); explicit ExecutorConfiguration(const sd::graph::FlatConfiguration *conf = nullptr);
~ExecutorConfiguration() = default; ~ExecutorConfiguration() = default;
ExecutorConfiguration* clone(); ExecutorConfiguration* clone();

View File

@ -22,12 +22,12 @@
#define LIBND4J_FLATUTILS_H #define LIBND4J_FLATUTILS_H
#include <utility> #include <utility>
#include <pointercast.h> #include <system/pointercast.h>
#include <graph/generated/array_generated.h> #include <graph/generated/array_generated.h>
#include <graph/generated/node_generated.h> #include <graph/generated/node_generated.h>
#include <NDArray.h> #include <array/NDArray.h>
namespace nd4j { namespace sd {
namespace graph { namespace graph {
class ND4J_EXPORT FlatUtils { class ND4J_EXPORT FlatUtils {
public: public:
@ -35,7 +35,7 @@ namespace nd4j {
static std::pair<Nd4jLong, Nd4jLong> fromLongPair(LongPair* pair); static std::pair<Nd4jLong, Nd4jLong> fromLongPair(LongPair* pair);
static NDArray* fromFlatArray(const nd4j::graph::FlatArray* flatArray); static NDArray* fromFlatArray(const sd::graph::FlatArray* flatArray);
static flatbuffers::Offset<FlatArray> toFlatArray(flatbuffers::FlatBufferBuilder &builder, NDArray &array); static flatbuffers::Offset<FlatArray> toFlatArray(flatbuffers::FlatBufferBuilder &builder, NDArray &array);
}; };

View File

@ -21,16 +21,16 @@
#ifndef LIBND4J_FLOWPATH_H #ifndef LIBND4J_FLOWPATH_H
#define LIBND4J_FLOWPATH_H #define LIBND4J_FLOWPATH_H
#include <op_boilerplate.h> #include <system/op_boilerplate.h>
#include <unordered_map> #include <unordered_map>
#include <map> #include <map>
#include <pointercast.h> #include <system/pointercast.h>
#include <graph/NodeState.h> #include <graph/NodeState.h>
#include <graph/FrameState.h> #include <graph/FrameState.h>
#include <graph/profiling/GraphProfile.h> #include <graph/profiling/GraphProfile.h>
#include <dll.h> #include <system/dll.h>
namespace nd4j { namespace sd {
namespace graph { namespace graph {
class ND4J_EXPORT FlowPath { class ND4J_EXPORT FlowPath {
private: private:

View File

@ -22,10 +22,10 @@
#define LIBND4J_FRAMESTATE_H #define LIBND4J_FRAMESTATE_H
#include <string> #include <string>
#include <pointercast.h> #include <system/pointercast.h>
#include <dll.h> #include <system/dll.h>
namespace nd4j { namespace sd {
namespace graph { namespace graph {
class ND4J_EXPORT FrameState { class ND4J_EXPORT FrameState {
private: private:

View File

@ -37,7 +37,7 @@
#include <graph/ExecutorConfiguration.h> #include <graph/ExecutorConfiguration.h>
#include <ops/declarable/OpDescriptor.h> #include <ops/declarable/OpDescriptor.h>
namespace nd4j { namespace sd {
namespace graph { namespace graph {
class ND4J_EXPORT Graph { class ND4J_EXPORT Graph {
@ -51,10 +51,10 @@ namespace nd4j {
// vector holds ID's of top nodes only // vector holds ID's of top nodes only
std::vector<int > *_nodes; std::vector<int > *_nodes;
MAP_IMPL<int, nd4j::graph::Node*> *_mapped; MAP_IMPL<int, sd::graph::Node*> *_mapped;
MAP_IMPL<int, std::vector<nd4j::graph::Node*> *> *_onion; MAP_IMPL<int, std::vector<sd::graph::Node*> *> *_onion;
MAP_IMPL<int, nd4j::graph::Node*> _unmapped; MAP_IMPL<int, sd::graph::Node*> _unmapped;
std::vector<int> _unmappedMap; // macOS? std::vector<int> _unmappedMap; // macOS?
std::mutex _mutexPreprocessing; std::mutex _mutexPreprocessing;
@ -68,11 +68,11 @@ namespace nd4j {
std::vector<Scope*> _scopes; std::vector<Scope*> _scopes;
//////////////////////////////////////// ////////////////////////////////////////
Nd4jStatus validateNode(nd4j::graph::Node *node); Nd4jStatus validateNode(sd::graph::Node *node);
void expandOnion(int newLayer); void expandOnion(int newLayer);
void injectNode(nd4j::graph::Node *node); void injectNode(sd::graph::Node *node);
void pushToOutputOnce(int id); void pushToOutputOnce(int id);
@ -105,39 +105,39 @@ namespace nd4j {
int numberOfPlaceholders(); int numberOfPlaceholders();
std::vector<nd4j::graph::Variable*>* getPlaceholders(); std::vector<sd::graph::Variable*>* getPlaceholders();
/** /**
* This method returns pointer to thread_local VariableSpace * This method returns pointer to thread_local VariableSpace
* @return * @return
*/ */
nd4j::graph::VariableSpace *getVariableSpace(); sd::graph::VariableSpace *getVariableSpace();
/** /**
* This method adds given node to the graph * This method adds given node to the graph
* *
* @param node * @param node
*/ */
void addNode(nd4j::graph::Node *node); void addNode(sd::graph::Node *node);
/** /**
* This method returns layered representation of the graph * This method returns layered representation of the graph
* *
* @return * @return
*/ */
MAP_IMPL<int, std::vector<nd4j::graph::Node*> *> *getOnion(); MAP_IMPL<int, std::vector<sd::graph::Node*> *> *getOnion();
/** /**
* This method returns map of all nodes of the graph * This method returns map of all nodes of the graph
* @return * @return
*/ */
MAP_IMPL<int, nd4j::graph::Node*>* getMapped(); MAP_IMPL<int, sd::graph::Node*>* getMapped();
/** /**
* This method returns outputs of this graph * This method returns outputs of this graph
* @return * @return
*/ */
std::vector<nd4j::graph::Variable*> *fetchOutputs(); std::vector<sd::graph::Variable*> *fetchOutputs();
/** /**
* This method returns pointer to ExecutorConfiguration * This method returns pointer to ExecutorConfiguration
@ -156,7 +156,7 @@ namespace nd4j {
* This method returns all nodes at once (order is NOT guaranteed) * This method returns all nodes at once (order is NOT guaranteed)
* @return * @return
*/ */
std::vector<nd4j::graph::Node*> *getAllNodes(); std::vector<sd::graph::Node*> *getAllNodes();
/** /**
* This method prints out Graph op-by-op, and respective inputs * This method prints out Graph op-by-op, and respective inputs
@ -166,7 +166,7 @@ namespace nd4j {
/** /**
* This method collect all ops from the graph into ops vector * This method collect all ops from the graph into ops vector
*/ */
std::vector<nd4j::ops::OpDescriptor> getOperations(); std::vector<sd::ops::OpDescriptor> getOperations();
/** /**
* This method returns Scope ptr specified with id * This method returns Scope ptr specified with id

View File

@ -31,13 +31,13 @@
#include <graph/ResultWrapper.h> #include <graph/ResultWrapper.h>
#include <sys/stat.h> #include <sys/stat.h>
#include <graph/ExecutionResult.h> #include <graph/ExecutionResult.h>
#include <dll.h> #include <system/dll.h>
#define TF_INPUT "Placeholder" #define TF_INPUT "Placeholder"
#define TF_CONST "Const" #define TF_CONST "Const"
#define TF_VAR "VariableV2" #define TF_VAR "VariableV2"
namespace nd4j { namespace sd {
namespace graph { namespace graph {
class ND4J_EXPORT GraphExecutioner { class ND4J_EXPORT GraphExecutioner {
@ -45,7 +45,7 @@ namespace nd4j {
public: public:
//static Nd4jStatus executeFlatNode(nd4j::graph::Graph *graph, nd4j::graph::Node *node, nd4j::graph::VariableSpace<float> *variableSpace); //static Nd4jStatus executeFlatNode(sd::graph::Graph *graph, sd::graph::Node *node, sd::graph::VariableSpace<float> *variableSpace);
static Nd4jStatus executeFlatNode(Graph *graph, Node *node, VariableSpace *variableSpace); static Nd4jStatus executeFlatNode(Graph *graph, Node *node, VariableSpace *variableSpace);
@ -62,7 +62,7 @@ namespace nd4j {
* @param pointer Pointer to FlatBuffer * @param pointer Pointer to FlatBuffer
* @return pointer to FlatBuffer with result * @return pointer to FlatBuffer with result
*/ */
static nd4j::graph::ResultWrapper* executeFlatBuffer(Nd4jPointer pointer); static sd::graph::ResultWrapper* executeFlatBuffer(Nd4jPointer pointer);
static flatbuffers::Offset<FlatResult> execute(Graph *graph, flatbuffers::FlatBufferBuilder &builder, const FlatInferenceRequest* request); static flatbuffers::Offset<FlatResult> execute(Graph *graph, flatbuffers::FlatBufferBuilder &builder, const FlatInferenceRequest* request);

View File

@ -19,14 +19,14 @@
// //
#include <helpers/logger.h> #include <helpers/logger.h>
#include <pointercast.h> #include <system/pointercast.h>
#include <unordered_map> #include <unordered_map>
#include <map> #include <map>
#include <graph/Graph.h> #include <graph/Graph.h>
#include <helpers/SimpleReadWriteLock.h> #include <helpers/SimpleReadWriteLock.h>
#include <exceptions/unknown_graph_exception.h> #include <exceptions/unknown_graph_exception.h>
namespace nd4j { namespace sd {
namespace graph { namespace graph {
class ND4J_EXPORT GraphHolder { class ND4J_EXPORT GraphHolder {
private: private:

Some files were not shown because too many files have changed in this diff Show More