2019-06-06 14:21:15 +02:00
|
|
|
/*******************************************************************************
|
|
|
|
* Copyright (c) 2015-2018 Skymind, Inc.
|
|
|
|
*
|
|
|
|
* This program and the accompanying materials are made available under the
|
|
|
|
* terms of the Apache License, Version 2.0 which is available at
|
|
|
|
* https://www.apache.org/licenses/LICENSE-2.0.
|
|
|
|
*
|
|
|
|
* Unless required by applicable law or agreed to in writing, software
|
|
|
|
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
|
|
|
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
|
|
|
* License for the specific language governing permissions and limitations
|
|
|
|
* under the License.
|
|
|
|
*
|
|
|
|
* SPDX-License-Identifier: Apache-2.0
|
|
|
|
******************************************************************************/
|
|
|
|
|
|
|
|
#ifndef NDARRAY_H
|
|
|
|
#define NDARRAY_H
|
|
|
|
|
2020-03-02 10:49:41 +01:00
|
|
|
#include <system/dll.h>
|
2019-06-06 14:21:15 +02:00
|
|
|
#include <initializer_list>
|
|
|
|
#include <functional>
|
2020-03-02 10:49:41 +01:00
|
|
|
#include <helpers/shape.h>
|
|
|
|
#include "legacy/NativeOpExecutioner.h"
|
2019-06-06 14:21:15 +02:00
|
|
|
#include <indexing/NDIndex.h>
|
|
|
|
#include <indexing/IndicesList.h>
|
|
|
|
#include <graph/Intervals.h>
|
|
|
|
#include <array/DataType.h>
|
2019-11-13 15:15:18 +01:00
|
|
|
#include <array/DataTypeUtils.h>
|
2019-06-06 14:21:15 +02:00
|
|
|
#include <stdint.h>
|
|
|
|
#include <array/ArrayOptions.h>
|
|
|
|
#include <array/ArrayType.h>
|
|
|
|
#include <array/ResultSet.h>
|
|
|
|
#include <helpers/ShapeBuilders.h>
|
2020-03-02 10:49:41 +01:00
|
|
|
#include <system/op_enums.h>
|
2019-06-06 14:21:15 +02:00
|
|
|
#include <ops/BroadcastOpsTuple.h>
|
|
|
|
#include <ops/BroadcastBoolOpsTuple.h>
|
2019-08-30 09:12:40 +02:00
|
|
|
#include <ops/BroadcastIntOpsTuple.h>
|
2019-06-06 14:21:15 +02:00
|
|
|
#include <array/ExtraArguments.h>
|
2020-03-02 10:49:41 +01:00
|
|
|
#include <graph/Status.h>
|
|
|
|
#include <array/ShapeDescriptor.h>
|
2019-06-06 14:21:15 +02:00
|
|
|
#include <helpers/ConstantShapeHelper.h>
|
|
|
|
#include <array/DataBuffer.h>
|
2019-08-21 06:32:21 +02:00
|
|
|
#include <execution/AffinityManager.h>
|
2020-01-04 11:27:50 +01:00
|
|
|
#include <memory>
|
|
|
|
#include <array/InteropDataBuffer.h>
|
2020-01-24 08:11:09 +01:00
|
|
|
#include <memory/MemoryCounter.h>
|
2020-06-06 14:26:55 +02:00
|
|
|
#include <array/ConstantShapeBuffer.h>
|
2019-06-06 14:21:15 +02:00
|
|
|
|
|
|
|
|
2020-03-02 10:49:41 +01:00
|
|
|
namespace sd {
|
2019-06-06 14:21:15 +02:00
|
|
|
|
2019-12-20 20:35:39 +01:00
|
|
|
template <typename T, typename = typename std::enable_if<DataTypeUtils::scalarTypesForNDarray<T>::value>::type>
|
|
|
|
ND4J_EXPORT NDArray operator+(const NDArray& arr, const T& scalar);
|
|
|
|
template <typename T, typename = typename std::enable_if<DataTypeUtils::scalarTypesForNDarray<T>::value>::type>
|
|
|
|
ND4J_EXPORT NDArray operator+(NDArray&& arr, const T& scalar);
|
|
|
|
template <typename T, typename = typename std::enable_if<DataTypeUtils::scalarTypesForNDarray<T>::value>::type>
|
|
|
|
ND4J_EXPORT NDArray operator+(const T& scalar, const NDArray& arr);
|
|
|
|
template <typename T, typename = typename std::enable_if<DataTypeUtils::scalarTypesForNDarray<T>::value>::type>
|
|
|
|
ND4J_EXPORT NDArray operator+(const T& scalar, NDArray&& arr);
|
|
|
|
|
|
|
|
template <typename T, typename = typename std::enable_if<DataTypeUtils::scalarTypesForNDarray<T>::value>::type>
|
|
|
|
ND4J_EXPORT NDArray operator-(const NDArray& arr, const T& scalar);
|
|
|
|
template <typename T, typename = typename std::enable_if<DataTypeUtils::scalarTypesForNDarray<T>::value>::type>
|
|
|
|
ND4J_EXPORT NDArray operator-(NDArray&& arr, const T& scalar);
|
|
|
|
template <typename T, typename = typename std::enable_if<DataTypeUtils::scalarTypesForNDarray<T>::value>::type>
|
|
|
|
ND4J_EXPORT NDArray operator-(const T& scalar, const NDArray& arr);
|
|
|
|
template <typename T, typename = typename std::enable_if<DataTypeUtils::scalarTypesForNDarray<T>::value>::type>
|
|
|
|
ND4J_EXPORT NDArray operator-(const T& scalar, NDArray&& arr);
|
|
|
|
|
|
|
|
template <typename T, typename = typename std::enable_if<DataTypeUtils::scalarTypesForNDarray<T>::value>::type>
|
|
|
|
ND4J_EXPORT NDArray operator*(const NDArray& arr, const T& scalar);
|
|
|
|
template <typename T, typename = typename std::enable_if<DataTypeUtils::scalarTypesForNDarray<T>::value>::type>
|
|
|
|
ND4J_EXPORT NDArray operator*(NDArray&& arr, const T& scalar);
|
|
|
|
template <typename T, typename = typename std::enable_if<DataTypeUtils::scalarTypesForNDarray<T>::value>::type>
|
|
|
|
ND4J_EXPORT NDArray operator*(const T& scalar, const NDArray& arr);
|
|
|
|
template <typename T, typename = typename std::enable_if<DataTypeUtils::scalarTypesForNDarray<T>::value>::type>
|
|
|
|
ND4J_EXPORT NDArray operator*(const T& scalar, NDArray&& arr);
|
|
|
|
|
|
|
|
template <typename T, typename = typename std::enable_if<DataTypeUtils::scalarTypesForNDarray<T>::value>::type>
|
|
|
|
ND4J_EXPORT NDArray operator/(const NDArray& arr, const T& scalar);
|
|
|
|
template <typename T, typename = typename std::enable_if<DataTypeUtils::scalarTypesForNDarray<T>::value>::type>
|
|
|
|
ND4J_EXPORT NDArray operator/(NDArray&& arr, const T& scalar);
|
|
|
|
template <typename T, typename = typename std::enable_if<DataTypeUtils::scalarTypesForNDarray<T>::value>::type>
|
|
|
|
ND4J_EXPORT NDArray operator/(const T& scalar, const NDArray& arr);
|
|
|
|
template <typename T, typename = typename std::enable_if<DataTypeUtils::scalarTypesForNDarray<T>::value>::type>
|
|
|
|
ND4J_EXPORT NDArray operator/(const T& scalar, NDArray&& arr);
|
|
|
|
|
|
|
|
template <typename T1, typename T2, typename = typename std::enable_if<std::is_same<NDArray, typename std::decay<T1>::type>::value && std::is_same<NDArray, typename std::decay<T2>::type>::value>::type>
|
|
|
|
ND4J_EXPORT NDArray operator+(T1&& arr1, T2&& arr2);
|
|
|
|
template <typename T1, typename T2, typename = typename std::enable_if<std::is_same<NDArray, typename std::decay<T1>::type>::value && std::is_same<NDArray, typename std::decay<T2>::type>::value>::type>
|
|
|
|
ND4J_EXPORT NDArray operator-(T1&& arr1, T2&& arr2);
|
|
|
|
template <typename T1, typename T2, typename = typename std::enable_if<std::is_same<NDArray, typename std::decay<T1>::type>::value && std::is_same<NDArray, typename std::decay<T2>::type>::value>::type>
|
|
|
|
ND4J_EXPORT NDArray operator*(T1&& arr1, T2&& arr2);
|
|
|
|
template <typename T1, typename T2, typename = typename std::enable_if<std::is_same<NDArray, typename std::decay<T1>::type>::value && std::is_same<NDArray, typename std::decay<T2>::type>::value>::type>
|
|
|
|
ND4J_EXPORT NDArray operator/(T1&& arr1, T2&& arr2);
|
2019-06-06 14:21:15 +02:00
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
ND4J_EXPORT NDArray mmul(const NDArray&, const NDArray&);
|
|
|
|
|
|
|
|
class ND4J_EXPORT NDArray {
|
|
|
|
private:
|
|
|
|
/**
|
|
|
|
* This method applies given value to the buffer, wrt templates
|
|
|
|
* @tparam T
|
|
|
|
* @tparam Y
|
|
|
|
* @param buffer
|
|
|
|
* @param indices
|
|
|
|
* @param value
|
|
|
|
*/
|
|
|
|
template <typename T, typename Y>
|
|
|
|
void templatedSet(void *buffer, const Nd4jLong *indices, const void *value);
|
|
|
|
|
|
|
|
template <typename T, typename Y>
|
|
|
|
void templatedSet(void *buffer, const Nd4jLong xOffset, const void *value);
|
|
|
|
|
|
|
|
template <typename T>
|
2020-03-02 10:49:41 +01:00
|
|
|
void templatedSet(void *buffer, const Nd4jLong xOfsset, sd::DataType dtype, const void *value);
|
2019-06-06 14:21:15 +02:00
|
|
|
|
|
|
|
template <typename T>
|
|
|
|
void templatedAssign(void *xBuffer, const Nd4jLong xOffset, const void *yBuffer, const Nd4jLong yOffset) const;
|
|
|
|
|
|
|
|
template <typename X, typename Y>
|
|
|
|
void templatedDoubleAssign(void *xBuffer, const Nd4jLong xOffset, const void *yBuffer, const Nd4jLong yOffset) const;
|
|
|
|
|
|
|
|
template <typename T, typename R>
|
2020-05-09 07:06:14 +02:00
|
|
|
FORCEINLINE R templatedGet(void const* buffer, const Nd4jLong index) const;
|
2019-06-06 14:21:15 +02:00
|
|
|
/*
|
|
|
|
template <typename T, typename R>
|
|
|
|
R templatedGetIndex(void *buffer, Nd4jLong *indices) const;
|
|
|
|
*/
|
|
|
|
template <typename T>
|
|
|
|
void* templatedPointerShift(const Nd4jLong offset) const;
|
|
|
|
|
|
|
|
FORCEINLINE void copyBufferStatus(const NDArray& other) const;
|
|
|
|
|
|
|
|
protected:
|
|
|
|
|
|
|
|
/**
|
|
|
|
* if true then array doesn't own buffer and simply points to another's buffer
|
|
|
|
*/
|
|
|
|
bool _isView = false;
|
|
|
|
|
|
|
|
/**
|
|
|
|
* pointer on DataBuffer buffers in cpu/device memory
|
|
|
|
*/
|
|
|
|
std::shared_ptr<DataBuffer> _buffer = std::make_shared<DataBuffer>();
|
|
|
|
|
|
|
|
/**
|
|
|
|
* buffers offset, it is the same both for cpu and device buffers
|
|
|
|
*/
|
|
|
|
Nd4jLong _offset = 0L;
|
|
|
|
|
|
|
|
/**
|
|
|
|
* contains shape info: matrix rank, numbers of elements per each dimension, dimensions strides, element-wise-stride, c-like or fortan-like order
|
|
|
|
*/
|
2020-06-06 14:26:55 +02:00
|
|
|
const Nd4jLong *_shapeInfo = nullptr;
|
|
|
|
const Nd4jLong *_shapeInfoD = nullptr;
|
2019-06-06 14:21:15 +02:00
|
|
|
|
|
|
|
/**
|
|
|
|
* pointer on device launch context (with all data needed there).
|
|
|
|
*/
|
2020-03-02 10:49:41 +01:00
|
|
|
sd::LaunchContext * _context = sd::LaunchContext::defaultContext();
|
2019-06-06 14:21:15 +02:00
|
|
|
|
|
|
|
// indicates if array's buffer is within workspace
|
|
|
|
bool _isAttached = false;
|
|
|
|
|
|
|
|
/**
|
|
|
|
* Field to store cached length
|
|
|
|
*/
|
|
|
|
Nd4jLong _length = -1L;
|
|
|
|
|
|
|
|
/**
|
|
|
|
* type of array elements
|
|
|
|
*/
|
2020-03-02 10:49:41 +01:00
|
|
|
sd::DataType _dataType = FLOAT32;
|
2019-06-06 14:21:15 +02:00
|
|
|
|
2019-08-20 17:52:41 +02:00
|
|
|
/**
|
|
|
|
* deviceID where this NDArray belongs to
|
|
|
|
*/
|
|
|
|
int _deviceId = AffinityManager::currentDeviceId();
|
|
|
|
|
2019-06-06 14:21:15 +02:00
|
|
|
template<typename T>
|
|
|
|
std::string toStringValue(T value);
|
|
|
|
|
|
|
|
public:
|
|
|
|
NDArray() = default;
|
|
|
|
|
|
|
|
/**
|
|
|
|
* do not allocate memory, memory for array is passed from outside
|
|
|
|
*/
|
|
|
|
#ifndef __JAVACPP_HACK__
|
2020-03-02 10:49:41 +01:00
|
|
|
NDArray(std::shared_ptr<DataBuffer> buffer, const ShapeDescriptor& descriptor, sd::LaunchContext* context = sd::LaunchContext::defaultContext(), const Nd4jLong offset = 0);
|
2019-06-06 14:21:15 +02:00
|
|
|
|
2020-05-09 07:06:14 +02:00
|
|
|
NDArray(std::shared_ptr<DataBuffer> buffer, char order, const std::vector<Nd4jLong> &shape, sd::LaunchContext* context = sd::LaunchContext::defaultContext());
|
2019-06-06 14:21:15 +02:00
|
|
|
|
Oleh convert (#200)
* StringUtils for utf convertor raw implementation of all possible combinations, need to be add counter of bytes per symbol for any type and add api to call convertors and store data
Signed-off-by: Oleg <oleg.semeniv@gmail.com>
* StringUtils for utf convertor more corrections to support convertors
Signed-off-by: Oleg <oleg.semeniv@gmail.com>
* StringUtils for utf convertor some corrections and bug fixes, need review to discuss how to add multi-threading
Signed-off-by: Oleg <oleg.semeniv@gmail.com>
* StringUtils for utf convertor #8613 some corrections to move to multi-threading, add one test need discussion data inputs/outputs array presentation, need discussion the way of multi-threading
* StringUtils for utf convertor #8613 tests added some corrections to optimize build
Signed-off-by: Oleg <oleg.semeniv@gmail.com>
* StringUtils for utf convertor #8613 some corrections and code clean up
Signed-off-by: Oleg <oleg.semeniv@gmail.com>
* StringUtils for utf convertor #8613 code clean up and optimize usage, need update ndarray factory before replace std usage
Signed-off-by: Oleg <oleg.semeniv@gmail.com>
* StringUtils for utf convertor #8613 some staff to integrate converters into NDArrayFactory, update tests and add some functionality
Signed-off-by: Oleg <oleg.semeniv@gmail.com>
* StringUtils for utf convertor #8613 minor corrections and bug fix before discussion
* StringUtils for utf convertor #8613 some fixes and tets
* StringUtils for utf convertor #8613 some more staff to support different unicode
Signed-off-by: Oleg <oleg.semeniv@gmail.com>
* StringUtils for utf convertor #8613 fix linking bug
* StringUtils for utf convertor #8613 corrected several tests as defaults for string ndarray changed
* StringUtils for utf convertor #8613 replace some incorrect implementation, revert some test changes, need sync before testing
* StringUtils for utf convertor #8613 fixed several thing that were badly implemented yesterday, need optimization, testing (before testing have to be add support of u32 and u16 buffer visualization)
* StringUtils for utf convertor #8613 fixed to support u16 and u32, and convertor in ndarray, fix buffer print, etc
Signed-off-by: Oleg <oleg.semeniv@gmail.com>
* StringUtils for utf convertor #8613 merge master and sync with server
Signed-off-by: Oleg <oleg.semeniv@gmail.com>
* StringUtils for utf convertor #8613 some correction for string cast, need print check only asci support
Signed-off-by: Oleg <oleg.semeniv@gmail.com>
* StringUtils for utf convertor #8613 merge master, remove copies and add cast, need test, refactoring according review and clean up
* StringUtils for utf convertor #8613 fixed cast and copy issues
Signed-off-by: Oleg <oleg.semeniv@gmail.com>
* StringUtils for utf convertor #8613 fixed cuda and update tests
* StringUtils for utf convertor #8613 integration into NdArray, fix several tests for build pass, refactoring, etc
* - avoid ambiguity of NDArray ctrs overloading in some tests
Signed-off-by: Yurii <iuriish@yahoo.com>
* StringUtils for utf convertor #8613 NDArray string constructors added, updated NDArrayFactory, refactoring unicode and tests, etc
Signed-off-by: Oleg <oleg.semeniv@gmail.com>
* StringUtils for utf convertor #8613 fixed cuda build and test, refactoring and void* added to some functions
Signed-off-by: Oleg <oleg.semeniv@gmail.com>
* StringUtils for utf convertor #8613 void* integration, removed copy operation, refactoring, added tests for NDArray string constructors, etc
Signed-off-by: Oleg <oleg.semeniv@gmail.com>
* StringUtils for utf convertor #8613 several more fixes, improvements and updates
Signed-off-by: Oleg <oleg.semeniv@gmail.com>
* StringUtils for utf convertor #8613 master merge, code clean up and optimization before review
Signed-off-by: Oleg <oleg.semeniv@gmail.com>
* StringUtils for utf convertor #8613 minor fixes string element size define
Signed-off-by: Oleg <oleg.semeniv@gmail.com>
* StringUtils for utf convertor #8613 revert last changes as mistake
Signed-off-by: Oleg <oleg.semeniv@gmail.com>
* StringUtils for utf convertor #8613 fixed NDArray constructor build problem, remove order from string factory, fixed order use for factory via project, added catch of incorrect sync in cast of arrays to data types, fixed e method for strings, etc
Signed-off-by: Oleg <oleg.semeniv@gmail.com>
* StringUtils for utf convertor #8613 added javacpp hack, added multi-threading, minor corrections in license agreement
Signed-off-by: Oleg <oleg.semeniv@gmail.com>
* StringUtils for utf convertor #8613 windows builds fix, as "sting" is not treated as utf8
Signed-off-by: Oleg <oleg.semeniv@gmail.com>
Co-authored-by: Yurii Shyrma <iuriish@yahoo.com>
2020-01-31 14:30:49 +01:00
|
|
|
/**
|
|
|
|
* This contructors create scalar array containing string utf8
|
|
|
|
*
|
|
|
|
*/
|
2020-03-02 10:49:41 +01:00
|
|
|
NDArray(const char* str, sd::DataType dtype = sd::DataType::UTF8, sd::LaunchContext* context = sd::LaunchContext::defaultContext())
|
Oleh convert (#200)
* StringUtils for utf convertor raw implementation of all possible combinations, need to be add counter of bytes per symbol for any type and add api to call convertors and store data
Signed-off-by: Oleg <oleg.semeniv@gmail.com>
* StringUtils for utf convertor more corrections to support convertors
Signed-off-by: Oleg <oleg.semeniv@gmail.com>
* StringUtils for utf convertor some corrections and bug fixes, need review to discuss how to add multi-threading
Signed-off-by: Oleg <oleg.semeniv@gmail.com>
* StringUtils for utf convertor #8613 some corrections to move to multi-threading, add one test need discussion data inputs/outputs array presentation, need discussion the way of multi-threading
* StringUtils for utf convertor #8613 tests added some corrections to optimize build
Signed-off-by: Oleg <oleg.semeniv@gmail.com>
* StringUtils for utf convertor #8613 some corrections and code clean up
Signed-off-by: Oleg <oleg.semeniv@gmail.com>
* StringUtils for utf convertor #8613 code clean up and optimize usage, need update ndarray factory before replace std usage
Signed-off-by: Oleg <oleg.semeniv@gmail.com>
* StringUtils for utf convertor #8613 some staff to integrate converters into NDArrayFactory, update tests and add some functionality
Signed-off-by: Oleg <oleg.semeniv@gmail.com>
* StringUtils for utf convertor #8613 minor corrections and bug fix before discussion
* StringUtils for utf convertor #8613 some fixes and tets
* StringUtils for utf convertor #8613 some more staff to support different unicode
Signed-off-by: Oleg <oleg.semeniv@gmail.com>
* StringUtils for utf convertor #8613 fix linking bug
* StringUtils for utf convertor #8613 corrected several tests as defaults for string ndarray changed
* StringUtils for utf convertor #8613 replace some incorrect implementation, revert some test changes, need sync before testing
* StringUtils for utf convertor #8613 fixed several thing that were badly implemented yesterday, need optimization, testing (before testing have to be add support of u32 and u16 buffer visualization)
* StringUtils for utf convertor #8613 fixed to support u16 and u32, and convertor in ndarray, fix buffer print, etc
Signed-off-by: Oleg <oleg.semeniv@gmail.com>
* StringUtils for utf convertor #8613 merge master and sync with server
Signed-off-by: Oleg <oleg.semeniv@gmail.com>
* StringUtils for utf convertor #8613 some correction for string cast, need print check only asci support
Signed-off-by: Oleg <oleg.semeniv@gmail.com>
* StringUtils for utf convertor #8613 merge master, remove copies and add cast, need test, refactoring according review and clean up
* StringUtils for utf convertor #8613 fixed cast and copy issues
Signed-off-by: Oleg <oleg.semeniv@gmail.com>
* StringUtils for utf convertor #8613 fixed cuda and update tests
* StringUtils for utf convertor #8613 integration into NdArray, fix several tests for build pass, refactoring, etc
* - avoid ambiguity of NDArray ctrs overloading in some tests
Signed-off-by: Yurii <iuriish@yahoo.com>
* StringUtils for utf convertor #8613 NDArray string constructors added, updated NDArrayFactory, refactoring unicode and tests, etc
Signed-off-by: Oleg <oleg.semeniv@gmail.com>
* StringUtils for utf convertor #8613 fixed cuda build and test, refactoring and void* added to some functions
Signed-off-by: Oleg <oleg.semeniv@gmail.com>
* StringUtils for utf convertor #8613 void* integration, removed copy operation, refactoring, added tests for NDArray string constructors, etc
Signed-off-by: Oleg <oleg.semeniv@gmail.com>
* StringUtils for utf convertor #8613 several more fixes, improvements and updates
Signed-off-by: Oleg <oleg.semeniv@gmail.com>
* StringUtils for utf convertor #8613 master merge, code clean up and optimization before review
Signed-off-by: Oleg <oleg.semeniv@gmail.com>
* StringUtils for utf convertor #8613 minor fixes string element size define
Signed-off-by: Oleg <oleg.semeniv@gmail.com>
* StringUtils for utf convertor #8613 revert last changes as mistake
Signed-off-by: Oleg <oleg.semeniv@gmail.com>
* StringUtils for utf convertor #8613 fixed NDArray constructor build problem, remove order from string factory, fixed order use for factory via project, added catch of incorrect sync in cast of arrays to data types, fixed e method for strings, etc
Signed-off-by: Oleg <oleg.semeniv@gmail.com>
* StringUtils for utf convertor #8613 added javacpp hack, added multi-threading, minor corrections in license agreement
Signed-off-by: Oleg <oleg.semeniv@gmail.com>
* StringUtils for utf convertor #8613 windows builds fix, as "sting" is not treated as utf8
Signed-off-by: Oleg <oleg.semeniv@gmail.com>
Co-authored-by: Yurii Shyrma <iuriish@yahoo.com>
2020-01-31 14:30:49 +01:00
|
|
|
: NDArray(std::string(str), dtype, context) {
|
|
|
|
}
|
2020-03-02 10:49:41 +01:00
|
|
|
NDArray(const std::string& string, sd::DataType dtype = sd::DataType::UTF8, sd::LaunchContext* context = sd::LaunchContext::defaultContext());
|
Oleh convert (#200)
* StringUtils for utf convertor raw implementation of all possible combinations, need to be add counter of bytes per symbol for any type and add api to call convertors and store data
Signed-off-by: Oleg <oleg.semeniv@gmail.com>
* StringUtils for utf convertor more corrections to support convertors
Signed-off-by: Oleg <oleg.semeniv@gmail.com>
* StringUtils for utf convertor some corrections and bug fixes, need review to discuss how to add multi-threading
Signed-off-by: Oleg <oleg.semeniv@gmail.com>
* StringUtils for utf convertor #8613 some corrections to move to multi-threading, add one test need discussion data inputs/outputs array presentation, need discussion the way of multi-threading
* StringUtils for utf convertor #8613 tests added some corrections to optimize build
Signed-off-by: Oleg <oleg.semeniv@gmail.com>
* StringUtils for utf convertor #8613 some corrections and code clean up
Signed-off-by: Oleg <oleg.semeniv@gmail.com>
* StringUtils for utf convertor #8613 code clean up and optimize usage, need update ndarray factory before replace std usage
Signed-off-by: Oleg <oleg.semeniv@gmail.com>
* StringUtils for utf convertor #8613 some staff to integrate converters into NDArrayFactory, update tests and add some functionality
Signed-off-by: Oleg <oleg.semeniv@gmail.com>
* StringUtils for utf convertor #8613 minor corrections and bug fix before discussion
* StringUtils for utf convertor #8613 some fixes and tets
* StringUtils for utf convertor #8613 some more staff to support different unicode
Signed-off-by: Oleg <oleg.semeniv@gmail.com>
* StringUtils for utf convertor #8613 fix linking bug
* StringUtils for utf convertor #8613 corrected several tests as defaults for string ndarray changed
* StringUtils for utf convertor #8613 replace some incorrect implementation, revert some test changes, need sync before testing
* StringUtils for utf convertor #8613 fixed several thing that were badly implemented yesterday, need optimization, testing (before testing have to be add support of u32 and u16 buffer visualization)
* StringUtils for utf convertor #8613 fixed to support u16 and u32, and convertor in ndarray, fix buffer print, etc
Signed-off-by: Oleg <oleg.semeniv@gmail.com>
* StringUtils for utf convertor #8613 merge master and sync with server
Signed-off-by: Oleg <oleg.semeniv@gmail.com>
* StringUtils for utf convertor #8613 some correction for string cast, need print check only asci support
Signed-off-by: Oleg <oleg.semeniv@gmail.com>
* StringUtils for utf convertor #8613 merge master, remove copies and add cast, need test, refactoring according review and clean up
* StringUtils for utf convertor #8613 fixed cast and copy issues
Signed-off-by: Oleg <oleg.semeniv@gmail.com>
* StringUtils for utf convertor #8613 fixed cuda and update tests
* StringUtils for utf convertor #8613 integration into NdArray, fix several tests for build pass, refactoring, etc
* - avoid ambiguity of NDArray ctrs overloading in some tests
Signed-off-by: Yurii <iuriish@yahoo.com>
* StringUtils for utf convertor #8613 NDArray string constructors added, updated NDArrayFactory, refactoring unicode and tests, etc
Signed-off-by: Oleg <oleg.semeniv@gmail.com>
* StringUtils for utf convertor #8613 fixed cuda build and test, refactoring and void* added to some functions
Signed-off-by: Oleg <oleg.semeniv@gmail.com>
* StringUtils for utf convertor #8613 void* integration, removed copy operation, refactoring, added tests for NDArray string constructors, etc
Signed-off-by: Oleg <oleg.semeniv@gmail.com>
* StringUtils for utf convertor #8613 several more fixes, improvements and updates
Signed-off-by: Oleg <oleg.semeniv@gmail.com>
* StringUtils for utf convertor #8613 master merge, code clean up and optimization before review
Signed-off-by: Oleg <oleg.semeniv@gmail.com>
* StringUtils for utf convertor #8613 minor fixes string element size define
Signed-off-by: Oleg <oleg.semeniv@gmail.com>
* StringUtils for utf convertor #8613 revert last changes as mistake
Signed-off-by: Oleg <oleg.semeniv@gmail.com>
* StringUtils for utf convertor #8613 fixed NDArray constructor build problem, remove order from string factory, fixed order use for factory via project, added catch of incorrect sync in cast of arrays to data types, fixed e method for strings, etc
Signed-off-by: Oleg <oleg.semeniv@gmail.com>
* StringUtils for utf convertor #8613 added javacpp hack, added multi-threading, minor corrections in license agreement
Signed-off-by: Oleg <oleg.semeniv@gmail.com>
* StringUtils for utf convertor #8613 windows builds fix, as "sting" is not treated as utf8
Signed-off-by: Oleg <oleg.semeniv@gmail.com>
Co-authored-by: Yurii Shyrma <iuriish@yahoo.com>
2020-01-31 14:30:49 +01:00
|
|
|
|
|
|
|
/**
|
|
|
|
* This contructors create scalar array containing string utf16
|
|
|
|
*
|
|
|
|
*/
|
2020-03-02 10:49:41 +01:00
|
|
|
NDArray(const char16_t* u16string, sd::DataType dtype = sd::DataType::UTF16, sd::LaunchContext* context = sd::LaunchContext::defaultContext())
|
Oleh convert (#200)
* StringUtils for utf convertor raw implementation of all possible combinations, need to be add counter of bytes per symbol for any type and add api to call convertors and store data
Signed-off-by: Oleg <oleg.semeniv@gmail.com>
* StringUtils for utf convertor more corrections to support convertors
Signed-off-by: Oleg <oleg.semeniv@gmail.com>
* StringUtils for utf convertor some corrections and bug fixes, need review to discuss how to add multi-threading
Signed-off-by: Oleg <oleg.semeniv@gmail.com>
* StringUtils for utf convertor #8613 some corrections to move to multi-threading, add one test need discussion data inputs/outputs array presentation, need discussion the way of multi-threading
* StringUtils for utf convertor #8613 tests added some corrections to optimize build
Signed-off-by: Oleg <oleg.semeniv@gmail.com>
* StringUtils for utf convertor #8613 some corrections and code clean up
Signed-off-by: Oleg <oleg.semeniv@gmail.com>
* StringUtils for utf convertor #8613 code clean up and optimize usage, need update ndarray factory before replace std usage
Signed-off-by: Oleg <oleg.semeniv@gmail.com>
* StringUtils for utf convertor #8613 some staff to integrate converters into NDArrayFactory, update tests and add some functionality
Signed-off-by: Oleg <oleg.semeniv@gmail.com>
* StringUtils for utf convertor #8613 minor corrections and bug fix before discussion
* StringUtils for utf convertor #8613 some fixes and tets
* StringUtils for utf convertor #8613 some more staff to support different unicode
Signed-off-by: Oleg <oleg.semeniv@gmail.com>
* StringUtils for utf convertor #8613 fix linking bug
* StringUtils for utf convertor #8613 corrected several tests as defaults for string ndarray changed
* StringUtils for utf convertor #8613 replace some incorrect implementation, revert some test changes, need sync before testing
* StringUtils for utf convertor #8613 fixed several thing that were badly implemented yesterday, need optimization, testing (before testing have to be add support of u32 and u16 buffer visualization)
* StringUtils for utf convertor #8613 fixed to support u16 and u32, and convertor in ndarray, fix buffer print, etc
Signed-off-by: Oleg <oleg.semeniv@gmail.com>
* StringUtils for utf convertor #8613 merge master and sync with server
Signed-off-by: Oleg <oleg.semeniv@gmail.com>
* StringUtils for utf convertor #8613 some correction for string cast, need print check only asci support
Signed-off-by: Oleg <oleg.semeniv@gmail.com>
* StringUtils for utf convertor #8613 merge master, remove copies and add cast, need test, refactoring according review and clean up
* StringUtils for utf convertor #8613 fixed cast and copy issues
Signed-off-by: Oleg <oleg.semeniv@gmail.com>
* StringUtils for utf convertor #8613 fixed cuda and update tests
* StringUtils for utf convertor #8613 integration into NdArray, fix several tests for build pass, refactoring, etc
* - avoid ambiguity of NDArray ctrs overloading in some tests
Signed-off-by: Yurii <iuriish@yahoo.com>
* StringUtils for utf convertor #8613 NDArray string constructors added, updated NDArrayFactory, refactoring unicode and tests, etc
Signed-off-by: Oleg <oleg.semeniv@gmail.com>
* StringUtils for utf convertor #8613 fixed cuda build and test, refactoring and void* added to some functions
Signed-off-by: Oleg <oleg.semeniv@gmail.com>
* StringUtils for utf convertor #8613 void* integration, removed copy operation, refactoring, added tests for NDArray string constructors, etc
Signed-off-by: Oleg <oleg.semeniv@gmail.com>
* StringUtils for utf convertor #8613 several more fixes, improvements and updates
Signed-off-by: Oleg <oleg.semeniv@gmail.com>
* StringUtils for utf convertor #8613 master merge, code clean up and optimization before review
Signed-off-by: Oleg <oleg.semeniv@gmail.com>
* StringUtils for utf convertor #8613 minor fixes string element size define
Signed-off-by: Oleg <oleg.semeniv@gmail.com>
* StringUtils for utf convertor #8613 revert last changes as mistake
Signed-off-by: Oleg <oleg.semeniv@gmail.com>
* StringUtils for utf convertor #8613 fixed NDArray constructor build problem, remove order from string factory, fixed order use for factory via project, added catch of incorrect sync in cast of arrays to data types, fixed e method for strings, etc
Signed-off-by: Oleg <oleg.semeniv@gmail.com>
* StringUtils for utf convertor #8613 added javacpp hack, added multi-threading, minor corrections in license agreement
Signed-off-by: Oleg <oleg.semeniv@gmail.com>
* StringUtils for utf convertor #8613 windows builds fix, as "sting" is not treated as utf8
Signed-off-by: Oleg <oleg.semeniv@gmail.com>
Co-authored-by: Yurii Shyrma <iuriish@yahoo.com>
2020-01-31 14:30:49 +01:00
|
|
|
: NDArray(std::u16string(u16string), dtype, context) {
|
|
|
|
}
|
|
|
|
|
2020-03-02 10:49:41 +01:00
|
|
|
NDArray(const std::u16string& u16string, sd::DataType dtype = sd::DataType::UTF16, sd::LaunchContext* context = sd::LaunchContext::defaultContext());
|
Oleh convert (#200)
* StringUtils for utf convertor raw implementation of all possible combinations, need to be add counter of bytes per symbol for any type and add api to call convertors and store data
Signed-off-by: Oleg <oleg.semeniv@gmail.com>
* StringUtils for utf convertor more corrections to support convertors
Signed-off-by: Oleg <oleg.semeniv@gmail.com>
* StringUtils for utf convertor some corrections and bug fixes, need review to discuss how to add multi-threading
Signed-off-by: Oleg <oleg.semeniv@gmail.com>
* StringUtils for utf convertor #8613 some corrections to move to multi-threading, add one test need discussion data inputs/outputs array presentation, need discussion the way of multi-threading
* StringUtils for utf convertor #8613 tests added some corrections to optimize build
Signed-off-by: Oleg <oleg.semeniv@gmail.com>
* StringUtils for utf convertor #8613 some corrections and code clean up
Signed-off-by: Oleg <oleg.semeniv@gmail.com>
* StringUtils for utf convertor #8613 code clean up and optimize usage, need update ndarray factory before replace std usage
Signed-off-by: Oleg <oleg.semeniv@gmail.com>
* StringUtils for utf convertor #8613 some staff to integrate converters into NDArrayFactory, update tests and add some functionality
Signed-off-by: Oleg <oleg.semeniv@gmail.com>
* StringUtils for utf convertor #8613 minor corrections and bug fix before discussion
* StringUtils for utf convertor #8613 some fixes and tets
* StringUtils for utf convertor #8613 some more staff to support different unicode
Signed-off-by: Oleg <oleg.semeniv@gmail.com>
* StringUtils for utf convertor #8613 fix linking bug
* StringUtils for utf convertor #8613 corrected several tests as defaults for string ndarray changed
* StringUtils for utf convertor #8613 replace some incorrect implementation, revert some test changes, need sync before testing
* StringUtils for utf convertor #8613 fixed several thing that were badly implemented yesterday, need optimization, testing (before testing have to be add support of u32 and u16 buffer visualization)
* StringUtils for utf convertor #8613 fixed to support u16 and u32, and convertor in ndarray, fix buffer print, etc
Signed-off-by: Oleg <oleg.semeniv@gmail.com>
* StringUtils for utf convertor #8613 merge master and sync with server
Signed-off-by: Oleg <oleg.semeniv@gmail.com>
* StringUtils for utf convertor #8613 some correction for string cast, need print check only asci support
Signed-off-by: Oleg <oleg.semeniv@gmail.com>
* StringUtils for utf convertor #8613 merge master, remove copies and add cast, need test, refactoring according review and clean up
* StringUtils for utf convertor #8613 fixed cast and copy issues
Signed-off-by: Oleg <oleg.semeniv@gmail.com>
* StringUtils for utf convertor #8613 fixed cuda and update tests
* StringUtils for utf convertor #8613 integration into NdArray, fix several tests for build pass, refactoring, etc
* - avoid ambiguity of NDArray ctrs overloading in some tests
Signed-off-by: Yurii <iuriish@yahoo.com>
* StringUtils for utf convertor #8613 NDArray string constructors added, updated NDArrayFactory, refactoring unicode and tests, etc
Signed-off-by: Oleg <oleg.semeniv@gmail.com>
* StringUtils for utf convertor #8613 fixed cuda build and test, refactoring and void* added to some functions
Signed-off-by: Oleg <oleg.semeniv@gmail.com>
* StringUtils for utf convertor #8613 void* integration, removed copy operation, refactoring, added tests for NDArray string constructors, etc
Signed-off-by: Oleg <oleg.semeniv@gmail.com>
* StringUtils for utf convertor #8613 several more fixes, improvements and updates
Signed-off-by: Oleg <oleg.semeniv@gmail.com>
* StringUtils for utf convertor #8613 master merge, code clean up and optimization before review
Signed-off-by: Oleg <oleg.semeniv@gmail.com>
* StringUtils for utf convertor #8613 minor fixes string element size define
Signed-off-by: Oleg <oleg.semeniv@gmail.com>
* StringUtils for utf convertor #8613 revert last changes as mistake
Signed-off-by: Oleg <oleg.semeniv@gmail.com>
* StringUtils for utf convertor #8613 fixed NDArray constructor build problem, remove order from string factory, fixed order use for factory via project, added catch of incorrect sync in cast of arrays to data types, fixed e method for strings, etc
Signed-off-by: Oleg <oleg.semeniv@gmail.com>
* StringUtils for utf convertor #8613 added javacpp hack, added multi-threading, minor corrections in license agreement
Signed-off-by: Oleg <oleg.semeniv@gmail.com>
* StringUtils for utf convertor #8613 windows builds fix, as "sting" is not treated as utf8
Signed-off-by: Oleg <oleg.semeniv@gmail.com>
Co-authored-by: Yurii Shyrma <iuriish@yahoo.com>
2020-01-31 14:30:49 +01:00
|
|
|
|
|
|
|
/**
|
|
|
|
* This contructors create scalar array containing string utf32
|
|
|
|
*
|
|
|
|
*/
|
2020-03-02 10:49:41 +01:00
|
|
|
NDArray(const char32_t* u32string, sd::DataType dtype = sd::DataType::UTF32, sd::LaunchContext* context = sd::LaunchContext::defaultContext())
|
Oleh convert (#200)
* StringUtils for utf convertor raw implementation of all possible combinations, need to be add counter of bytes per symbol for any type and add api to call convertors and store data
Signed-off-by: Oleg <oleg.semeniv@gmail.com>
* StringUtils for utf convertor more corrections to support convertors
Signed-off-by: Oleg <oleg.semeniv@gmail.com>
* StringUtils for utf convertor some corrections and bug fixes, need review to discuss how to add multi-threading
Signed-off-by: Oleg <oleg.semeniv@gmail.com>
* StringUtils for utf convertor #8613 some corrections to move to multi-threading, add one test need discussion data inputs/outputs array presentation, need discussion the way of multi-threading
* StringUtils for utf convertor #8613 tests added some corrections to optimize build
Signed-off-by: Oleg <oleg.semeniv@gmail.com>
* StringUtils for utf convertor #8613 some corrections and code clean up
Signed-off-by: Oleg <oleg.semeniv@gmail.com>
* StringUtils for utf convertor #8613 code clean up and optimize usage, need update ndarray factory before replace std usage
Signed-off-by: Oleg <oleg.semeniv@gmail.com>
* StringUtils for utf convertor #8613 some staff to integrate converters into NDArrayFactory, update tests and add some functionality
Signed-off-by: Oleg <oleg.semeniv@gmail.com>
* StringUtils for utf convertor #8613 minor corrections and bug fix before discussion
* StringUtils for utf convertor #8613 some fixes and tets
* StringUtils for utf convertor #8613 some more staff to support different unicode
Signed-off-by: Oleg <oleg.semeniv@gmail.com>
* StringUtils for utf convertor #8613 fix linking bug
* StringUtils for utf convertor #8613 corrected several tests as defaults for string ndarray changed
* StringUtils for utf convertor #8613 replace some incorrect implementation, revert some test changes, need sync before testing
* StringUtils for utf convertor #8613 fixed several thing that were badly implemented yesterday, need optimization, testing (before testing have to be add support of u32 and u16 buffer visualization)
* StringUtils for utf convertor #8613 fixed to support u16 and u32, and convertor in ndarray, fix buffer print, etc
Signed-off-by: Oleg <oleg.semeniv@gmail.com>
* StringUtils for utf convertor #8613 merge master and sync with server
Signed-off-by: Oleg <oleg.semeniv@gmail.com>
* StringUtils for utf convertor #8613 some correction for string cast, need print check only asci support
Signed-off-by: Oleg <oleg.semeniv@gmail.com>
* StringUtils for utf convertor #8613 merge master, remove copies and add cast, need test, refactoring according review and clean up
* StringUtils for utf convertor #8613 fixed cast and copy issues
Signed-off-by: Oleg <oleg.semeniv@gmail.com>
* StringUtils for utf convertor #8613 fixed cuda and update tests
* StringUtils for utf convertor #8613 integration into NdArray, fix several tests for build pass, refactoring, etc
* - avoid ambiguity of NDArray ctrs overloading in some tests
Signed-off-by: Yurii <iuriish@yahoo.com>
* StringUtils for utf convertor #8613 NDArray string constructors added, updated NDArrayFactory, refactoring unicode and tests, etc
Signed-off-by: Oleg <oleg.semeniv@gmail.com>
* StringUtils for utf convertor #8613 fixed cuda build and test, refactoring and void* added to some functions
Signed-off-by: Oleg <oleg.semeniv@gmail.com>
* StringUtils for utf convertor #8613 void* integration, removed copy operation, refactoring, added tests for NDArray string constructors, etc
Signed-off-by: Oleg <oleg.semeniv@gmail.com>
* StringUtils for utf convertor #8613 several more fixes, improvements and updates
Signed-off-by: Oleg <oleg.semeniv@gmail.com>
* StringUtils for utf convertor #8613 master merge, code clean up and optimization before review
Signed-off-by: Oleg <oleg.semeniv@gmail.com>
* StringUtils for utf convertor #8613 minor fixes string element size define
Signed-off-by: Oleg <oleg.semeniv@gmail.com>
* StringUtils for utf convertor #8613 revert last changes as mistake
Signed-off-by: Oleg <oleg.semeniv@gmail.com>
* StringUtils for utf convertor #8613 fixed NDArray constructor build problem, remove order from string factory, fixed order use for factory via project, added catch of incorrect sync in cast of arrays to data types, fixed e method for strings, etc
Signed-off-by: Oleg <oleg.semeniv@gmail.com>
* StringUtils for utf convertor #8613 added javacpp hack, added multi-threading, minor corrections in license agreement
Signed-off-by: Oleg <oleg.semeniv@gmail.com>
* StringUtils for utf convertor #8613 windows builds fix, as "sting" is not treated as utf8
Signed-off-by: Oleg <oleg.semeniv@gmail.com>
Co-authored-by: Yurii Shyrma <iuriish@yahoo.com>
2020-01-31 14:30:49 +01:00
|
|
|
: NDArray(std::u32string(u32string), dtype, context) {
|
|
|
|
}
|
|
|
|
|
2020-03-02 10:49:41 +01:00
|
|
|
NDArray(const std::u32string& u32string, sd::DataType dtype = sd::DataType::UTF32, sd::LaunchContext* context = sd::LaunchContext::defaultContext());
|
Oleh convert (#200)
* StringUtils for utf convertor raw implementation of all possible combinations, need to be add counter of bytes per symbol for any type and add api to call convertors and store data
Signed-off-by: Oleg <oleg.semeniv@gmail.com>
* StringUtils for utf convertor more corrections to support convertors
Signed-off-by: Oleg <oleg.semeniv@gmail.com>
* StringUtils for utf convertor some corrections and bug fixes, need review to discuss how to add multi-threading
Signed-off-by: Oleg <oleg.semeniv@gmail.com>
* StringUtils for utf convertor #8613 some corrections to move to multi-threading, add one test need discussion data inputs/outputs array presentation, need discussion the way of multi-threading
* StringUtils for utf convertor #8613 tests added some corrections to optimize build
Signed-off-by: Oleg <oleg.semeniv@gmail.com>
* StringUtils for utf convertor #8613 some corrections and code clean up
Signed-off-by: Oleg <oleg.semeniv@gmail.com>
* StringUtils for utf convertor #8613 code clean up and optimize usage, need update ndarray factory before replace std usage
Signed-off-by: Oleg <oleg.semeniv@gmail.com>
* StringUtils for utf convertor #8613 some staff to integrate converters into NDArrayFactory, update tests and add some functionality
Signed-off-by: Oleg <oleg.semeniv@gmail.com>
* StringUtils for utf convertor #8613 minor corrections and bug fix before discussion
* StringUtils for utf convertor #8613 some fixes and tets
* StringUtils for utf convertor #8613 some more staff to support different unicode
Signed-off-by: Oleg <oleg.semeniv@gmail.com>
* StringUtils for utf convertor #8613 fix linking bug
* StringUtils for utf convertor #8613 corrected several tests as defaults for string ndarray changed
* StringUtils for utf convertor #8613 replace some incorrect implementation, revert some test changes, need sync before testing
* StringUtils for utf convertor #8613 fixed several thing that were badly implemented yesterday, need optimization, testing (before testing have to be add support of u32 and u16 buffer visualization)
* StringUtils for utf convertor #8613 fixed to support u16 and u32, and convertor in ndarray, fix buffer print, etc
Signed-off-by: Oleg <oleg.semeniv@gmail.com>
* StringUtils for utf convertor #8613 merge master and sync with server
Signed-off-by: Oleg <oleg.semeniv@gmail.com>
* StringUtils for utf convertor #8613 some correction for string cast, need print check only asci support
Signed-off-by: Oleg <oleg.semeniv@gmail.com>
* StringUtils for utf convertor #8613 merge master, remove copies and add cast, need test, refactoring according review and clean up
* StringUtils for utf convertor #8613 fixed cast and copy issues
Signed-off-by: Oleg <oleg.semeniv@gmail.com>
* StringUtils for utf convertor #8613 fixed cuda and update tests
* StringUtils for utf convertor #8613 integration into NdArray, fix several tests for build pass, refactoring, etc
* - avoid ambiguity of NDArray ctrs overloading in some tests
Signed-off-by: Yurii <iuriish@yahoo.com>
* StringUtils for utf convertor #8613 NDArray string constructors added, updated NDArrayFactory, refactoring unicode and tests, etc
Signed-off-by: Oleg <oleg.semeniv@gmail.com>
* StringUtils for utf convertor #8613 fixed cuda build and test, refactoring and void* added to some functions
Signed-off-by: Oleg <oleg.semeniv@gmail.com>
* StringUtils for utf convertor #8613 void* integration, removed copy operation, refactoring, added tests for NDArray string constructors, etc
Signed-off-by: Oleg <oleg.semeniv@gmail.com>
* StringUtils for utf convertor #8613 several more fixes, improvements and updates
Signed-off-by: Oleg <oleg.semeniv@gmail.com>
* StringUtils for utf convertor #8613 master merge, code clean up and optimization before review
Signed-off-by: Oleg <oleg.semeniv@gmail.com>
* StringUtils for utf convertor #8613 minor fixes string element size define
Signed-off-by: Oleg <oleg.semeniv@gmail.com>
* StringUtils for utf convertor #8613 revert last changes as mistake
Signed-off-by: Oleg <oleg.semeniv@gmail.com>
* StringUtils for utf convertor #8613 fixed NDArray constructor build problem, remove order from string factory, fixed order use for factory via project, added catch of incorrect sync in cast of arrays to data types, fixed e method for strings, etc
Signed-off-by: Oleg <oleg.semeniv@gmail.com>
* StringUtils for utf convertor #8613 added javacpp hack, added multi-threading, minor corrections in license agreement
Signed-off-by: Oleg <oleg.semeniv@gmail.com>
* StringUtils for utf convertor #8613 windows builds fix, as "sting" is not treated as utf8
Signed-off-by: Oleg <oleg.semeniv@gmail.com>
Co-authored-by: Yurii Shyrma <iuriish@yahoo.com>
2020-01-31 14:30:49 +01:00
|
|
|
|
|
|
|
/**
|
|
|
|
* This contructors create array from vector of utf8 strings
|
|
|
|
*
|
|
|
|
*/
|
2020-03-02 10:49:41 +01:00
|
|
|
NDArray(const std::vector<Nd4jLong>& shape, const std::vector<const char*>& strings, sd::DataType dtype = sd::DataType::UTF8, sd::LaunchContext* context = sd::LaunchContext::defaultContext());
|
|
|
|
NDArray(const std::vector<Nd4jLong>& shape, const std::vector<std::string>& string, sd::DataType dtype = sd::DataType::UTF8, sd::LaunchContext* context = sd::LaunchContext::defaultContext());
|
Oleh convert (#200)
* StringUtils for utf convertor raw implementation of all possible combinations, need to be add counter of bytes per symbol for any type and add api to call convertors and store data
Signed-off-by: Oleg <oleg.semeniv@gmail.com>
* StringUtils for utf convertor more corrections to support convertors
Signed-off-by: Oleg <oleg.semeniv@gmail.com>
* StringUtils for utf convertor some corrections and bug fixes, need review to discuss how to add multi-threading
Signed-off-by: Oleg <oleg.semeniv@gmail.com>
* StringUtils for utf convertor #8613 some corrections to move to multi-threading, add one test need discussion data inputs/outputs array presentation, need discussion the way of multi-threading
* StringUtils for utf convertor #8613 tests added some corrections to optimize build
Signed-off-by: Oleg <oleg.semeniv@gmail.com>
* StringUtils for utf convertor #8613 some corrections and code clean up
Signed-off-by: Oleg <oleg.semeniv@gmail.com>
* StringUtils for utf convertor #8613 code clean up and optimize usage, need update ndarray factory before replace std usage
Signed-off-by: Oleg <oleg.semeniv@gmail.com>
* StringUtils for utf convertor #8613 some staff to integrate converters into NDArrayFactory, update tests and add some functionality
Signed-off-by: Oleg <oleg.semeniv@gmail.com>
* StringUtils for utf convertor #8613 minor corrections and bug fix before discussion
* StringUtils for utf convertor #8613 some fixes and tets
* StringUtils for utf convertor #8613 some more staff to support different unicode
Signed-off-by: Oleg <oleg.semeniv@gmail.com>
* StringUtils for utf convertor #8613 fix linking bug
* StringUtils for utf convertor #8613 corrected several tests as defaults for string ndarray changed
* StringUtils for utf convertor #8613 replace some incorrect implementation, revert some test changes, need sync before testing
* StringUtils for utf convertor #8613 fixed several thing that were badly implemented yesterday, need optimization, testing (before testing have to be add support of u32 and u16 buffer visualization)
* StringUtils for utf convertor #8613 fixed to support u16 and u32, and convertor in ndarray, fix buffer print, etc
Signed-off-by: Oleg <oleg.semeniv@gmail.com>
* StringUtils for utf convertor #8613 merge master and sync with server
Signed-off-by: Oleg <oleg.semeniv@gmail.com>
* StringUtils for utf convertor #8613 some correction for string cast, need print check only asci support
Signed-off-by: Oleg <oleg.semeniv@gmail.com>
* StringUtils for utf convertor #8613 merge master, remove copies and add cast, need test, refactoring according review and clean up
* StringUtils for utf convertor #8613 fixed cast and copy issues
Signed-off-by: Oleg <oleg.semeniv@gmail.com>
* StringUtils for utf convertor #8613 fixed cuda and update tests
* StringUtils for utf convertor #8613 integration into NdArray, fix several tests for build pass, refactoring, etc
* - avoid ambiguity of NDArray ctrs overloading in some tests
Signed-off-by: Yurii <iuriish@yahoo.com>
* StringUtils for utf convertor #8613 NDArray string constructors added, updated NDArrayFactory, refactoring unicode and tests, etc
Signed-off-by: Oleg <oleg.semeniv@gmail.com>
* StringUtils for utf convertor #8613 fixed cuda build and test, refactoring and void* added to some functions
Signed-off-by: Oleg <oleg.semeniv@gmail.com>
* StringUtils for utf convertor #8613 void* integration, removed copy operation, refactoring, added tests for NDArray string constructors, etc
Signed-off-by: Oleg <oleg.semeniv@gmail.com>
* StringUtils for utf convertor #8613 several more fixes, improvements and updates
Signed-off-by: Oleg <oleg.semeniv@gmail.com>
* StringUtils for utf convertor #8613 master merge, code clean up and optimization before review
Signed-off-by: Oleg <oleg.semeniv@gmail.com>
* StringUtils for utf convertor #8613 minor fixes string element size define
Signed-off-by: Oleg <oleg.semeniv@gmail.com>
* StringUtils for utf convertor #8613 revert last changes as mistake
Signed-off-by: Oleg <oleg.semeniv@gmail.com>
* StringUtils for utf convertor #8613 fixed NDArray constructor build problem, remove order from string factory, fixed order use for factory via project, added catch of incorrect sync in cast of arrays to data types, fixed e method for strings, etc
Signed-off-by: Oleg <oleg.semeniv@gmail.com>
* StringUtils for utf convertor #8613 added javacpp hack, added multi-threading, minor corrections in license agreement
Signed-off-by: Oleg <oleg.semeniv@gmail.com>
* StringUtils for utf convertor #8613 windows builds fix, as "sting" is not treated as utf8
Signed-off-by: Oleg <oleg.semeniv@gmail.com>
Co-authored-by: Yurii Shyrma <iuriish@yahoo.com>
2020-01-31 14:30:49 +01:00
|
|
|
|
|
|
|
/**
|
|
|
|
* This contructors create array from vector of utf16 strings
|
|
|
|
*
|
|
|
|
*/
|
2020-03-02 10:49:41 +01:00
|
|
|
NDArray(const std::vector<Nd4jLong>& shape, const std::vector<const char16_t*>& strings, sd::DataType dtype = sd::DataType::UTF16, sd::LaunchContext* context = sd::LaunchContext::defaultContext());
|
|
|
|
NDArray(const std::vector<Nd4jLong>& shape, const std::vector<std::u16string>& string, sd::DataType dtype = sd::DataType::UTF16, sd::LaunchContext* context = sd::LaunchContext::defaultContext());
|
Oleh convert (#200)
* StringUtils for utf convertor raw implementation of all possible combinations, need to be add counter of bytes per symbol for any type and add api to call convertors and store data
Signed-off-by: Oleg <oleg.semeniv@gmail.com>
* StringUtils for utf convertor more corrections to support convertors
Signed-off-by: Oleg <oleg.semeniv@gmail.com>
* StringUtils for utf convertor some corrections and bug fixes, need review to discuss how to add multi-threading
Signed-off-by: Oleg <oleg.semeniv@gmail.com>
* StringUtils for utf convertor #8613 some corrections to move to multi-threading, add one test need discussion data inputs/outputs array presentation, need discussion the way of multi-threading
* StringUtils for utf convertor #8613 tests added some corrections to optimize build
Signed-off-by: Oleg <oleg.semeniv@gmail.com>
* StringUtils for utf convertor #8613 some corrections and code clean up
Signed-off-by: Oleg <oleg.semeniv@gmail.com>
* StringUtils for utf convertor #8613 code clean up and optimize usage, need update ndarray factory before replace std usage
Signed-off-by: Oleg <oleg.semeniv@gmail.com>
* StringUtils for utf convertor #8613 some staff to integrate converters into NDArrayFactory, update tests and add some functionality
Signed-off-by: Oleg <oleg.semeniv@gmail.com>
* StringUtils for utf convertor #8613 minor corrections and bug fix before discussion
* StringUtils for utf convertor #8613 some fixes and tets
* StringUtils for utf convertor #8613 some more staff to support different unicode
Signed-off-by: Oleg <oleg.semeniv@gmail.com>
* StringUtils for utf convertor #8613 fix linking bug
* StringUtils for utf convertor #8613 corrected several tests as defaults for string ndarray changed
* StringUtils for utf convertor #8613 replace some incorrect implementation, revert some test changes, need sync before testing
* StringUtils for utf convertor #8613 fixed several thing that were badly implemented yesterday, need optimization, testing (before testing have to be add support of u32 and u16 buffer visualization)
* StringUtils for utf convertor #8613 fixed to support u16 and u32, and convertor in ndarray, fix buffer print, etc
Signed-off-by: Oleg <oleg.semeniv@gmail.com>
* StringUtils for utf convertor #8613 merge master and sync with server
Signed-off-by: Oleg <oleg.semeniv@gmail.com>
* StringUtils for utf convertor #8613 some correction for string cast, need print check only asci support
Signed-off-by: Oleg <oleg.semeniv@gmail.com>
* StringUtils for utf convertor #8613 merge master, remove copies and add cast, need test, refactoring according review and clean up
* StringUtils for utf convertor #8613 fixed cast and copy issues
Signed-off-by: Oleg <oleg.semeniv@gmail.com>
* StringUtils for utf convertor #8613 fixed cuda and update tests
* StringUtils for utf convertor #8613 integration into NdArray, fix several tests for build pass, refactoring, etc
* - avoid ambiguity of NDArray ctrs overloading in some tests
Signed-off-by: Yurii <iuriish@yahoo.com>
* StringUtils for utf convertor #8613 NDArray string constructors added, updated NDArrayFactory, refactoring unicode and tests, etc
Signed-off-by: Oleg <oleg.semeniv@gmail.com>
* StringUtils for utf convertor #8613 fixed cuda build and test, refactoring and void* added to some functions
Signed-off-by: Oleg <oleg.semeniv@gmail.com>
* StringUtils for utf convertor #8613 void* integration, removed copy operation, refactoring, added tests for NDArray string constructors, etc
Signed-off-by: Oleg <oleg.semeniv@gmail.com>
* StringUtils for utf convertor #8613 several more fixes, improvements and updates
Signed-off-by: Oleg <oleg.semeniv@gmail.com>
* StringUtils for utf convertor #8613 master merge, code clean up and optimization before review
Signed-off-by: Oleg <oleg.semeniv@gmail.com>
* StringUtils for utf convertor #8613 minor fixes string element size define
Signed-off-by: Oleg <oleg.semeniv@gmail.com>
* StringUtils for utf convertor #8613 revert last changes as mistake
Signed-off-by: Oleg <oleg.semeniv@gmail.com>
* StringUtils for utf convertor #8613 fixed NDArray constructor build problem, remove order from string factory, fixed order use for factory via project, added catch of incorrect sync in cast of arrays to data types, fixed e method for strings, etc
Signed-off-by: Oleg <oleg.semeniv@gmail.com>
* StringUtils for utf convertor #8613 added javacpp hack, added multi-threading, minor corrections in license agreement
Signed-off-by: Oleg <oleg.semeniv@gmail.com>
* StringUtils for utf convertor #8613 windows builds fix, as "sting" is not treated as utf8
Signed-off-by: Oleg <oleg.semeniv@gmail.com>
Co-authored-by: Yurii Shyrma <iuriish@yahoo.com>
2020-01-31 14:30:49 +01:00
|
|
|
|
|
|
|
/**
|
|
|
|
* This contructors create array from vector of utf32 strings
|
|
|
|
*
|
|
|
|
*/
|
2020-03-02 10:49:41 +01:00
|
|
|
NDArray(const std::vector<Nd4jLong>& shape, const std::vector<const char32_t*>& strings, sd::DataType dtype = sd::DataType::UTF32, sd::LaunchContext* context = sd::LaunchContext::defaultContext());
|
|
|
|
NDArray(const std::vector<Nd4jLong>& shape, const std::vector<std::u32string>& string, sd::DataType dtype = sd::DataType::UTF32, sd::LaunchContext* context = sd::LaunchContext::defaultContext());
|
Oleh convert (#200)
* StringUtils for utf convertor raw implementation of all possible combinations, need to be add counter of bytes per symbol for any type and add api to call convertors and store data
Signed-off-by: Oleg <oleg.semeniv@gmail.com>
* StringUtils for utf convertor more corrections to support convertors
Signed-off-by: Oleg <oleg.semeniv@gmail.com>
* StringUtils for utf convertor some corrections and bug fixes, need review to discuss how to add multi-threading
Signed-off-by: Oleg <oleg.semeniv@gmail.com>
* StringUtils for utf convertor #8613 some corrections to move to multi-threading, add one test need discussion data inputs/outputs array presentation, need discussion the way of multi-threading
* StringUtils for utf convertor #8613 tests added some corrections to optimize build
Signed-off-by: Oleg <oleg.semeniv@gmail.com>
* StringUtils for utf convertor #8613 some corrections and code clean up
Signed-off-by: Oleg <oleg.semeniv@gmail.com>
* StringUtils for utf convertor #8613 code clean up and optimize usage, need update ndarray factory before replace std usage
Signed-off-by: Oleg <oleg.semeniv@gmail.com>
* StringUtils for utf convertor #8613 some staff to integrate converters into NDArrayFactory, update tests and add some functionality
Signed-off-by: Oleg <oleg.semeniv@gmail.com>
* StringUtils for utf convertor #8613 minor corrections and bug fix before discussion
* StringUtils for utf convertor #8613 some fixes and tets
* StringUtils for utf convertor #8613 some more staff to support different unicode
Signed-off-by: Oleg <oleg.semeniv@gmail.com>
* StringUtils for utf convertor #8613 fix linking bug
* StringUtils for utf convertor #8613 corrected several tests as defaults for string ndarray changed
* StringUtils for utf convertor #8613 replace some incorrect implementation, revert some test changes, need sync before testing
* StringUtils for utf convertor #8613 fixed several thing that were badly implemented yesterday, need optimization, testing (before testing have to be add support of u32 and u16 buffer visualization)
* StringUtils for utf convertor #8613 fixed to support u16 and u32, and convertor in ndarray, fix buffer print, etc
Signed-off-by: Oleg <oleg.semeniv@gmail.com>
* StringUtils for utf convertor #8613 merge master and sync with server
Signed-off-by: Oleg <oleg.semeniv@gmail.com>
* StringUtils for utf convertor #8613 some correction for string cast, need print check only asci support
Signed-off-by: Oleg <oleg.semeniv@gmail.com>
* StringUtils for utf convertor #8613 merge master, remove copies and add cast, need test, refactoring according review and clean up
* StringUtils for utf convertor #8613 fixed cast and copy issues
Signed-off-by: Oleg <oleg.semeniv@gmail.com>
* StringUtils for utf convertor #8613 fixed cuda and update tests
* StringUtils for utf convertor #8613 integration into NdArray, fix several tests for build pass, refactoring, etc
* - avoid ambiguity of NDArray ctrs overloading in some tests
Signed-off-by: Yurii <iuriish@yahoo.com>
* StringUtils for utf convertor #8613 NDArray string constructors added, updated NDArrayFactory, refactoring unicode and tests, etc
Signed-off-by: Oleg <oleg.semeniv@gmail.com>
* StringUtils for utf convertor #8613 fixed cuda build and test, refactoring and void* added to some functions
Signed-off-by: Oleg <oleg.semeniv@gmail.com>
* StringUtils for utf convertor #8613 void* integration, removed copy operation, refactoring, added tests for NDArray string constructors, etc
Signed-off-by: Oleg <oleg.semeniv@gmail.com>
* StringUtils for utf convertor #8613 several more fixes, improvements and updates
Signed-off-by: Oleg <oleg.semeniv@gmail.com>
* StringUtils for utf convertor #8613 master merge, code clean up and optimization before review
Signed-off-by: Oleg <oleg.semeniv@gmail.com>
* StringUtils for utf convertor #8613 minor fixes string element size define
Signed-off-by: Oleg <oleg.semeniv@gmail.com>
* StringUtils for utf convertor #8613 revert last changes as mistake
Signed-off-by: Oleg <oleg.semeniv@gmail.com>
* StringUtils for utf convertor #8613 fixed NDArray constructor build problem, remove order from string factory, fixed order use for factory via project, added catch of incorrect sync in cast of arrays to data types, fixed e method for strings, etc
Signed-off-by: Oleg <oleg.semeniv@gmail.com>
* StringUtils for utf convertor #8613 added javacpp hack, added multi-threading, minor corrections in license agreement
Signed-off-by: Oleg <oleg.semeniv@gmail.com>
* StringUtils for utf convertor #8613 windows builds fix, as "sting" is not treated as utf8
Signed-off-by: Oleg <oleg.semeniv@gmail.com>
Co-authored-by: Yurii Shyrma <iuriish@yahoo.com>
2020-01-31 14:30:49 +01:00
|
|
|
|
2019-06-06 14:21:15 +02:00
|
|
|
#endif
|
|
|
|
|
|
|
|
/**
|
|
|
|
* do not allocate memory, memory for array is passed from outside
|
|
|
|
*/
|
2020-05-09 07:06:14 +02:00
|
|
|
NDArray(void *buffer, Nd4jLong* shapeInfo, sd::LaunchContext* context = sd::LaunchContext::defaultContext(), bool isBuffAlloc = false);
|
|
|
|
NDArray(void *buffer, const Nd4jLong* shapeInfo, sd::LaunchContext* context = sd::LaunchContext::defaultContext(), bool isBuffAlloc = false);
|
2019-06-06 14:21:15 +02:00
|
|
|
|
|
|
|
/**
|
|
|
|
* do not allocate memory, memory for array is passed from outside
|
|
|
|
* we suppose the content of both (device and host) buffers is identical
|
|
|
|
*/
|
2020-05-09 07:06:14 +02:00
|
|
|
NDArray(void *buffer, void *bufferD, const Nd4jLong* shapeInfo, sd::LaunchContext* context = sd::LaunchContext::defaultContext(), bool isBuffAlloc = false, bool isBuffDAlloc = false);
|
2019-06-06 14:21:15 +02:00
|
|
|
|
|
|
|
/**
|
|
|
|
* copy constructor
|
|
|
|
*/
|
|
|
|
NDArray(const NDArray& other);
|
|
|
|
|
|
|
|
/**
|
|
|
|
* move constructor
|
|
|
|
*/
|
|
|
|
NDArray(NDArray&& other) noexcept;
|
|
|
|
|
|
|
|
/**
|
|
|
|
* constructor, create array stored at given workspace
|
|
|
|
*/
|
2020-03-02 10:49:41 +01:00
|
|
|
NDArray(sd::LaunchContext * context);
|
2019-06-06 14:21:15 +02:00
|
|
|
|
|
|
|
|
|
|
|
/**
|
|
|
|
* constructor creates new NDArray using shape information from "shapeInfo", set all elements in new array to zeros, if copyStrides is true then use stride values from "shapeInfo", else calculate strides independently
|
|
|
|
*/
|
2020-05-09 07:06:14 +02:00
|
|
|
NDArray(const Nd4jLong* shapeInfo, bool copyStrides = false, sd::LaunchContext* context = sd::LaunchContext::defaultContext(), bool nullify = true);
|
2019-06-06 14:21:15 +02:00
|
|
|
|
|
|
|
/**
|
|
|
|
* constructor creates new NDArray using shape information from "shapeInfo", set all elements in new array to be zeros, if copyStrides is true then use stride values from "shapeInfo", else calculate strides independently
|
|
|
|
* set dtype as array type
|
|
|
|
*/
|
2020-05-09 07:06:14 +02:00
|
|
|
NDArray(const Nd4jLong* shapeInfo, sd::DataType dtype, bool copyStrides = false, sd::LaunchContext* context = sd::LaunchContext::defaultContext(), bool nullify = true);
|
2019-06-06 14:21:15 +02:00
|
|
|
|
|
|
|
/**
|
|
|
|
* this constructor creates new array using shape information contained in vector argument
|
|
|
|
*/
|
2020-05-09 07:06:14 +02:00
|
|
|
NDArray(char order, const std::vector<Nd4jLong> &shape, sd::DataType dtype = DOUBLE, sd::LaunchContext* context = sd::LaunchContext::defaultContext());
|
2019-06-06 14:21:15 +02:00
|
|
|
|
|
|
|
/**
|
|
|
|
* This constructor creates new array with elements copied from data and using shape information stored in shape, elements from data will be casted to dtype
|
|
|
|
*/
|
2020-05-09 07:06:14 +02:00
|
|
|
NDArray(char order, const std::vector<Nd4jLong> &shape, const std::vector<double>& data, sd::DataType dtype = DOUBLE, sd::LaunchContext* context = sd::LaunchContext::defaultContext());
|
2019-06-06 14:21:15 +02:00
|
|
|
|
|
|
|
/**
|
2019-07-20 07:58:44 +02:00
|
|
|
* this constructor creates new array using given buffer (without memory allocation) and shape information stored in shape
|
2019-06-06 14:21:15 +02:00
|
|
|
*/
|
2020-05-09 07:06:14 +02:00
|
|
|
NDArray(void *buffer, char order, const std::vector<Nd4jLong> &shape, sd::DataType dtype, sd::LaunchContext* context = sd::LaunchContext::defaultContext(), const bool isBuffAlloc = false);
|
2019-06-06 14:21:15 +02:00
|
|
|
|
2019-07-23 07:38:00 +02:00
|
|
|
/**
|
|
|
|
* This method returns new array with the same shape & data type
|
|
|
|
* @return
|
|
|
|
*/
|
|
|
|
NDArray like();
|
|
|
|
|
|
|
|
/**
|
|
|
|
* This method returns new uninitialized array with the same shape & data type
|
|
|
|
* @return
|
|
|
|
*/
|
2020-02-28 09:37:26 +01:00
|
|
|
NDArray ulike() const;
|
2019-07-23 07:38:00 +02:00
|
|
|
|
|
|
|
|
2019-06-06 14:21:15 +02:00
|
|
|
/**
|
|
|
|
* this constructor creates new NDArray with shape matching "other" array,
|
|
|
|
* doesn't copy "other" elements into new array !!!
|
|
|
|
*/
|
2020-05-09 07:06:14 +02:00
|
|
|
explicit NDArray(const NDArray* other, bool copyStrides = false, sd::LaunchContext* context = sd::LaunchContext ::defaultContext());
|
2019-06-06 14:21:15 +02:00
|
|
|
|
|
|
|
/**
|
|
|
|
* this constructor creates scalar(and set its value = 0) or empty array depending on bool argument isScalar
|
|
|
|
*/
|
2020-05-09 07:06:14 +02:00
|
|
|
NDArray(sd::DataType dtype, sd::LaunchContext* context = sd::LaunchContext::defaultContext(), bool isScalar = true);
|
2019-06-06 14:21:15 +02:00
|
|
|
|
|
|
|
/**
|
|
|
|
* This method blocks until asynchronous operation finishes
|
|
|
|
*/
|
|
|
|
void synchronize(const char* msg) const;
|
|
|
|
|
|
|
|
/**
|
|
|
|
* This method allows to set _isAttached flag
|
|
|
|
* @param reallyAttached
|
|
|
|
*/
|
|
|
|
void setAttached(bool reallyAttached);
|
|
|
|
|
|
|
|
void tickWriteHost() const;
|
|
|
|
void tickWriteDevice() const;
|
|
|
|
void tickReadHost() const;
|
|
|
|
void tickReadDevice() const;
|
|
|
|
void tickBothActual() const;
|
|
|
|
bool isActualOnHostSide() const;
|
|
|
|
bool isActualOnDeviceSide() const;
|
|
|
|
void makeBothBuffersActual() const;
|
|
|
|
|
|
|
|
void syncToHost() const;
|
|
|
|
void syncToDevice() const;
|
|
|
|
void syncShape() const;
|
|
|
|
|
|
|
|
/**
|
|
|
|
* This method can be used on architectures that use special buffers
|
|
|
|
* @param writeList
|
|
|
|
* @param readList
|
|
|
|
*/
|
2020-06-02 09:43:12 +02:00
|
|
|
static void registerSpecialUse(const std::vector<const NDArray*>& writeList, const std::vector<const NDArray*>& readList = {});
|
|
|
|
static void prepareSpecialUse(const std::vector<const NDArray*>& writeList, const std::vector<const NDArray*>& readList = {}, bool synchronizeWritables = false);
|
2019-06-06 14:21:15 +02:00
|
|
|
|
2020-06-02 09:43:12 +02:00
|
|
|
static void registerPrimaryUse(const std::vector<const NDArray*>& writeList, const std::vector<const NDArray*>& readList = {});
|
|
|
|
static void preparePrimaryUse(const std::vector<const NDArray*>& writeList, const std::vector<const NDArray*>& readList = {}, bool synchronizeWritables = false);
|
2019-08-03 12:23:12 +02:00
|
|
|
|
2019-06-06 14:21:15 +02:00
|
|
|
/**
|
|
|
|
* This method returns buffer pointer offset by given number of elements, wrt own data type
|
|
|
|
* @param offset
|
|
|
|
* @return
|
|
|
|
*/
|
2020-05-09 07:06:14 +02:00
|
|
|
void const* bufferWithOffset(Nd4jLong offset) const;
|
|
|
|
void* bufferWithOffset(Nd4jLong offset);
|
2019-06-06 14:21:15 +02:00
|
|
|
|
2020-05-09 07:06:14 +02:00
|
|
|
void const* specialBufferWithOffset(Nd4jLong offset) const;
|
|
|
|
void* specialBufferWithOffset(Nd4jLong offset);
|
2019-06-06 14:21:15 +02:00
|
|
|
/**
|
|
|
|
* copy assignment operator
|
|
|
|
* in particular, when _dataType != other._dataType and both shapes are the same, there will be allocation of new _buffer and _dataType acquires other._dataType
|
|
|
|
*/
|
|
|
|
NDArray& operator=(const NDArray& other);
|
|
|
|
|
|
|
|
/**
|
|
|
|
* move assignment operator
|
|
|
|
*/
|
|
|
|
NDArray& operator=(NDArray&& other) noexcept;
|
|
|
|
|
|
|
|
/**
|
|
|
|
* assignment operator, assigns the same scalar to all array elements
|
|
|
|
*/
|
|
|
|
template <typename T>
|
|
|
|
NDArray& operator=(const T scalar);
|
|
|
|
|
|
|
|
|
|
|
|
/**
|
|
|
|
* operators for memory allocation and deletion
|
|
|
|
*/
|
|
|
|
void* operator new(size_t i);
|
|
|
|
void operator delete(void* p);
|
|
|
|
|
|
|
|
|
2020-03-02 10:49:41 +01:00
|
|
|
void setContext(sd::LaunchContext * context);
|
2019-06-06 14:21:15 +02:00
|
|
|
|
|
|
|
/**
|
|
|
|
* create a new array by replicating current array by repeats times along given dimension
|
2019-08-21 20:10:29 +02:00
|
|
|
* axis - axis along which to repeat elements
|
2019-06-06 14:21:15 +02:00
|
|
|
* repeats - number of repetitions
|
|
|
|
*/
|
2019-12-20 20:35:39 +01:00
|
|
|
NDArray repeat(const int axis, const std::vector<int>& repeats) const;
|
2019-06-06 14:21:15 +02:00
|
|
|
|
|
|
|
/**
|
|
|
|
* This method fills this array with zeros
|
|
|
|
*/
|
|
|
|
void nullify();
|
|
|
|
|
|
|
|
/**
|
|
|
|
* This method returns quantized copy of given array
|
|
|
|
*
|
|
|
|
* @param array
|
|
|
|
* @return
|
|
|
|
*/
|
2019-12-20 20:35:39 +01:00
|
|
|
static NDArray quantize(const NDArray &array);
|
2019-06-06 14:21:15 +02:00
|
|
|
|
|
|
|
/**
|
|
|
|
* fill target array by repeating current array
|
2019-08-21 20:10:29 +02:00
|
|
|
* axis - axis along which to repeat elements
|
|
|
|
* repeats - vector containing numbers of repetition for elements at given axis
|
2019-06-06 14:21:15 +02:00
|
|
|
*/
|
2019-08-21 20:10:29 +02:00
|
|
|
void repeat(const int axis, const std::vector<int>& repeats, NDArray& target) const;
|
2019-06-06 14:21:15 +02:00
|
|
|
|
|
|
|
/**
|
|
|
|
* creates array which points on certain sub-range of this array, sub-range is defined by given indices
|
|
|
|
*/
|
2019-12-20 20:35:39 +01:00
|
|
|
NDArray subarray(IndicesList& indices) const;
|
|
|
|
NDArray subarray(const std::initializer_list<NDIndex*>& idx) const;
|
|
|
|
NDArray subarray(const Intervals& idx) const;
|
2019-06-06 14:21:15 +02:00
|
|
|
|
|
|
|
/**
|
|
|
|
* cast array elements to given dtype
|
|
|
|
*/
|
2019-12-20 20:35:39 +01:00
|
|
|
NDArray cast(DataType dtype) const;
|
2019-06-06 14:21:15 +02:00
|
|
|
|
2019-12-20 20:35:39 +01:00
|
|
|
void cast(NDArray& target, DataType dtype);
|
2019-06-06 14:21:15 +02:00
|
|
|
|
|
|
|
/**
|
|
|
|
* returns _context
|
|
|
|
*/
|
2020-03-02 10:49:41 +01:00
|
|
|
sd::LaunchContext * getContext() const {
|
2019-06-06 14:21:15 +02:00
|
|
|
return _context;
|
|
|
|
}
|
|
|
|
|
|
|
|
#ifndef __JAVACPP_HACK__
|
|
|
|
FORCEINLINE std::shared_ptr<DataBuffer> getDataBuffer() const;
|
|
|
|
FORCEINLINE std::shared_ptr<DataBuffer> dataBuffer();
|
|
|
|
#endif
|
|
|
|
|
|
|
|
/**
|
|
|
|
* returns host buffer
|
|
|
|
*/
|
|
|
|
FORCEINLINE void* buffer();
|
2020-05-09 07:06:14 +02:00
|
|
|
FORCEINLINE const void* buffer() const;
|
2019-06-06 14:21:15 +02:00
|
|
|
|
|
|
|
|
|
|
|
/**
|
|
|
|
* returns buffer offset (offset is the same for host and device buffers)
|
|
|
|
*/
|
2020-05-09 07:06:14 +02:00
|
|
|
FORCEINLINE Nd4jLong bufferOffset() const;
|
2019-06-06 14:21:15 +02:00
|
|
|
|
|
|
|
/**
|
|
|
|
* if _bufferD==nullptr return _buffer, else return _bufferD
|
|
|
|
*/
|
Merge master to upstream (#7945)
* Shugeo strided slice zeros (#14)
* Modified strided_slice op to properly work with empty-like shapes.
* Fixed test for reduce_mean with empty-like input.
* [WIP] Last merge (#15)
* correct logsoftmax looss (#2)
* Small SameDiff listener fix (#4)
* Various fixes (#6)
* #7839 Fix for asXMatrix and tests
* #7866 EmbeddingSequenceLayer dtype fix + test
* #7856 SameDiff save/load stream methods
* #7859 RegressionEvaluation rank 4 fix + tests + axis configuration
* EvaluationBinary 3d/4d
* More evaluation 3d/4d tests
* #7847 Evaluation empty checks
* Small test ifx
* #7848 Fix median edge case
* Improve DL4J samediff layer tests
* [WIP] FastText wrapper implemented (#8)
* FastText implemented
* Some fixes
* Fix shapes for wordsNearest
* Validation of input vectors
* Fixes
* Fixed test
* Thread tagged
* Some tweaks
* setContextClassLoader for DeallocatorServiceThread
* Numpy format tests (#1)
* Various fixes (#11)
* #7852 SameDiff gather fix
* #7892 SameDiff placeholder to constant conversion
* #7890 validate input rank for MLN/CG init methods
* Fix broken permute shape calculation
* Permute and gather fixes
* Tests
* #7850 LogSumExp fix + test
* Handful of test fixes
* Empty arrays with non-scalar shapes (#10)
* minor rearrangements for lambdas
* empty tensors with non-scalar shapes
* numpy empty tensors with non-scalar shapes
* few more empty tweaks
* Small fixes
* conv3d signature update
* micro fix in batchnorm mkldnn
* Import fixes
* Fix
* MKL-DNN update
* Small fill fix
* fill with empty input + test
* Fixes
* Small error improvement
* Fix
* one special test
* couple of fixes for lstm
* Rewrite TFGraphMapper.getNDArrayFromTensor to be maintainable and less error prone
* Fixes
* FP16
* Unsigned
* BFloat16
* Fill op - empty tweaks
* - couple of fixes for empty arrays construction
- stack updated
* strided slice fix
* one transform test
* provide method for reducing shapeInfo in case of input array is empty
* Fixed reduceAlongDimensions to use empty input properly.
* couple of broadcast tests
* couple of tests broadcast tests + tweak to make them pass
* add check of non-empty to methods producing sub-arrays
* Fixed reshapeC with zeros in shape.
* complete empty check in reduce_... legacy ops
* Concat and cumsum/prod
* Tweak to empty shape inference on import
* add empty check to the rest of reduce legacy ops
* one more test
* correct typo in evalReduceShapeInfoEmpty
* Added tests for reduce_* ops to tests with zero shapes.
* few more tests for empty reductions
* Fixed strided_slice op with empty case and tests.
* one more empty reduction test
* Fixed strided_slice test.
* add empty check to NDArray::reshapei
* infOrMax
* empty min/max with infinity tests
* made unstack working correctly with empty arrays
* few IndexReduce tests + tweaks for empty shapes
* add test for empty concat
* few tests fixed
* Validation fix for reductions on empty shapes
* Reverse fix
* Reduction shape calc fixes
* SameDiff.generateOutputVariable: don't use shape function to determine number of outputs
* Range fix
* - NDArray constructor updated for scalars/empty arrays
- few tests fixed
* More fixes
* Empty creator fixes
* concat fix
* concat fix
* TF import tests: allow 'both all NaN' and 'both all inf' to pass
* Slice, zero fraction, and reshape fixes
* transpose, gather
* Zero fraction
* scalar cast fix
* Empty reduction axis support
* few more tests fixed
* Fixed input checks conforming with TF for concat op and tests.
* few tests fixed
* matmul scalar shape fix
* Fixed checkout for data type and scalarity with concat to allow non-empty scalars with vector concats.
* broadcast bool fix
* few more tests
* few more tests
* correct evalReduceShapeInfoEmpty
* argmax/argmin + tests
* one more empty edge case + one more test
* argmax/argmin/realdiv_bp tweaks
* empty reshape test + fix
* Helper fixes
* Small fixes
* Gather test fix
* Gather test fix
* Small fixes
* reduce scalar zero values
* scalar mean workaround
* Remove debug code
* along dim mean workaround
* one more test
* - equalsTo() tweak for empty arrays
- one more test
* broadcast tweaks
* [WIP] Fixing outstanding issues for NLP (#9)
* Avoid using not-inited objects
* Test fixed.
* Redundant method avoided for models like FastText
* KMeans++ implementation
* KMeans++ implementation
* Disable parallel execution
* KMeans++
* Tests
* Dev branch merge (#16)
* SameDiff: convertDataType and gradient check util improvements (#12)
* GradCheck util improvements
* StopGradient constructor + test
* SameDiff: Add datatype conversion
* Javadoc and add DataType.isNumerical()
* Small fix
* Fix SameDiff TF import test cases intermediate naming (workaround for bad default)
* TFGraphTestAllHelper: check intermediates in execution order
* Add missing debug listener
* [WIP] lstmBlock fix + other changes (#13)
- fixes lstmBlock issue
- changes NDArray method reshape(), permute(), transpose() by making them return instance instead of pointer
- CheckNumerics op
- fixes for ReduceBool IsInfOrNan & IsFinite
* Small test fix
* CheckNumerics op wrapper
* Fix some issues on master (#17)
* Fix DataVec test issue
* Fix issue with dl4j SameDiff output layer
* Dtype fix for lambda layers
* #7912 BertIterator dtype fix (use float32 not global default)
* [WIP] Next set of CUDA stuff (#7)
New CUDA implementations and improvements
* bad file
* Dev branch master merge (#23)
* SameDiff: convertDataType and gradient check util improvements (#12)
* GradCheck util improvements
* StopGradient constructor + test
* SameDiff: Add datatype conversion
* Javadoc and add DataType.isNumerical()
* Small fix
* Fix SameDiff TF import test cases intermediate naming (workaround for bad default)
* TFGraphTestAllHelper: check intermediates in execution order
* Add missing debug listener
* [WIP] lstmBlock fix + other changes (#13)
- fixes lstmBlock issue
- changes NDArray method reshape(), permute(), transpose() by making them return instance instead of pointer
- CheckNumerics op
- fixes for ReduceBool IsInfOrNan & IsFinite
* Small test fix
* CheckNumerics op wrapper
* Compatibility of deserialization (#18)
Signed-off-by: Alexander Stoyakin <alexander.stoyakin@gmail.com>
* SameDiff: add activation gradient checking support for debugging (#19)
* SameDiff gradient checker: first pass on activation gradient checks
* Fixes + tests for activation gradient checking
* Javadoc
* [WIP] Some nd4j data type corrections (#20)
* Adjust data type
* Set correct Data type.
* Size of proper data type.
* fix averaged cpu load (#22)
* SameDiff ops, TF import and fixes (#24)
* CheckNumerics tests + fixes + misc fixes
Signed-off-by: AlexDBlack <blacka101@gmail.com>
* Fake quant
Signed-off-by: AlexDBlack <blacka101@gmail.com>
* Fixes
Signed-off-by: AlexDBlack <blacka101@gmail.com>
* FakeQuantWithMinMaxArgs
Signed-off-by: AlexDBlack <blacka101@gmail.com>
* CheckNumerics fix
Signed-off-by: AlexDBlack <blacka101@gmail.com>
* Fix libnd4j ALL_INTS and ALL_FLOATS declaration (uint and bfloat types)
Signed-off-by: AlexDBlack <blacka101@gmail.com>
* Small fix
Signed-off-by: AlexDBlack <blacka101@gmail.com>
* Javadoc
Signed-off-by: AlexDBlack <blacka101@gmail.com>
* Exception tweak
Signed-off-by: AlexDBlack <blacka101@gmail.com>
* fix
Signed-off-by: AlexDBlack <blacka101@gmail.com>
* Fix for out of scope stack allocated var use
Signed-off-by: AlexDBlack <blacka101@gmail.com>
* Ignores
Signed-off-by: AlexDBlack <blacka101@gmail.com>
* Ignore for known failing test (already logged issue)
Signed-off-by: AlexDBlack <blacka101@gmail.com>
* Merge upstream to fork (#25)
* Add thousand-separator commas to TotalParams (#7915)
* Add thousand-separator commas to TotalParams
The number of parameters can be quite large, and it would help the reading of the summary printout to have the TotalParams column & values at the bottom have thousand-separator-commas in them.
* Add thousand-separator commas to MultiLayerNetwork
Corresponding change to MultiLayerNetwork
Signed-off-by: Jxtps Jxtps <jxtps435@gmail.com>
* Update contributing and issue/PR templates (#7934)
Signed-off-by: AlexDBlack <blacka101@gmail.com>
* Fix link to AdaDelta paper (#7942)
Fix link to AdaDelta paper hosted on matthewzeiler.com
Signed-off-by: Jxtps
* Fixes, and ignores for known/logged failing issues (#7943)
Signed-off-by: AlexDBlack <blacka101@gmail.com>
* SameDiff + DL4J/SameDiff: Multiple fixes (#28)
* #7919 HDF5 attribute buffer length fix
Signed-off-by: AlexDBlack <blacka101@gmail.com>
* #7909 Arbiter constructor exception ux improvements
Signed-off-by: AlexDBlack <blacka101@gmail.com>
* #7925 RNN output layer length checks
Signed-off-by: AlexDBlack <blacka101@gmail.com>
* #7939 Add listener for validating inputs are not incorrectly modified
Signed-off-by: AlexDBlack <blacka101@gmail.com>
* #7939 Integrate NonInplaceValidationListener into tests
* #7844 DL4J SameDiff fixes for variable minibatch size
* DL4J SameDiff fixes - ensure gradient for input placeholder is available
Signed-off-by: AlexDBlack <blacka101@gmail.com>
* Tweaks to ExternalErrorsFunction - use placeholders, make more robust
* Another fix
* More fixes
* More SameDiff/DL4J fixes
* Scope out scalar array creation in BaseScalarOp
* Remove debug code
Signed-off-by: AlexDBlack <blacka101@gmail.com>
* [WIP] Final dev branch merge (#29)
* SameDiff: convertDataType and gradient check util improvements (#12)
* GradCheck util improvements
* StopGradient constructor + test
* SameDiff: Add datatype conversion
* Javadoc and add DataType.isNumerical()
* Small fix
* Fix SameDiff TF import test cases intermediate naming (workaround for bad default)
* TFGraphTestAllHelper: check intermediates in execution order
* Add missing debug listener
* [WIP] lstmBlock fix + other changes (#13)
- fixes lstmBlock issue
- changes NDArray method reshape(), permute(), transpose() by making them return instance instead of pointer
- CheckNumerics op
- fixes for ReduceBool IsInfOrNan & IsFinite
* Small test fix
* CheckNumerics op wrapper
* Compatibility of deserialization (#18)
Signed-off-by: Alexander Stoyakin <alexander.stoyakin@gmail.com>
* SameDiff: add activation gradient checking support for debugging (#19)
* SameDiff gradient checker: first pass on activation gradient checks
* Fixes + tests for activation gradient checking
* Javadoc
* [WIP] Some nd4j data type corrections (#20)
* Adjust data type
* Set correct Data type.
* Size of proper data type.
* fix averaged cpu load (#22)
* [WIP] Multiple dataset iterators (#27)
* Splitting dataset into arbitrary number
* Fixes
* Multiple split of iterator
* Test
* Test
* Some fixes
* signature change
* one more tweak
Signed-off-by: raver119 <raver119@gmail.com>
* one more test for sequential use of DataSetIteratorSplitter
Signed-off-by: raver119 <raver119@gmail.com>
* Fixes
* Fixes
* one more test for Alexander
Signed-off-by: raver119 <raver119@gmail.com>
* Some fixes
* Some fixes
* one more test for Alexander
Signed-off-by: raver119 <raver119@gmail.com>
* minor test fix
Signed-off-by: raver119 <raver119@gmail.com>
* Some fixes
* Some fixes
* couple of assertions tweaked
Signed-off-by: raver119 <raver119@gmail.com>
* MDS splitter test :/
Signed-off-by: raver119 <raver119@gmail.com>
* Minor refactoring
* Multi dataset
* Some fixes
* More tests
* Small number of test fixes/improvements (failures on CI) (#31)
Signed-off-by: AlexDBlack <blacka101@gmail.com>
* [WIP] More CUDA stuff (#26)
* initial commit
Signed-off-by: raver119 <raver119@gmail.com>
* LRN BP CUDA
Signed-off-by: raver119 <raver119@gmail.com>
* less memory
Signed-off-by: raver119 <raver119@gmail.com>
* Fixed bug with crop_and_resize op helper.
* get rid of unnecessary index-calculation dunction
Signed-off-by: Yurii <yurii@skymind.io>
* Fixed sort with nth_element cuda-based helper.
* Refactored nth_element.
* Refactored nth_element op and tests.
* Modified usage of dim array with sortTad routine.
* Refactored main routine of helper for non_max_image_suppression op.
* non_max_image_suppression op helper with cuda kernel implementation. Initial revision.
* fix vol2col cuda kernel
* meh
Signed-off-by: raver119 <raver119@gmail.com>
* topK concept
Signed-off-by: raver119 <raver119@gmail.com>
* unsorted topK with scanWitdh of 1
Signed-off-by: raver119 <raver119@gmail.com>
* correct vol2col tests
* sorted/unsorted topK
Signed-off-by: raver119 <raver119@gmail.com>
* implementation and fixing col2im/col2vol
* Corrected usage flags with input/output with reverse op.
* dup is const now
Signed-off-by: raver119 <raver119@gmail.com>
* percentile op
Signed-off-by: raver119 <raver119@gmail.com>
* group tests for mapool2d
Signed-off-by: Yurii <yurii@skymind.io>
* special test for george
Signed-off-by: raver119 <raver119@gmail.com>
* less threads for sortTad
Signed-off-by: raver119 <raver119@gmail.com>
* provide conv2d for cuda
Signed-off-by: Yurii <yurii@skymind.io>
* remove auther in sort tad kernel code
Signed-off-by: Yurii <yurii@skymind.io>
* provide depthwise_conv2d for cuda
Signed-off-by: Yurii <yurii@skymind.io>
* - max_pooling_with_argmax
- null check for special use
Signed-off-by: raver119 <raver119@gmail.com>
* dts cuda
Signed-off-by: raver119 <raver119@gmail.com>
* provide sconv2d for cuda
Signed-off-by: Yurii <yurii@skymind.io>
* std cuda
Signed-off-by: raver119 <raver119@gmail.com>
* Refactored non_max_suppression op to conform TF implementation.
* Improved suppression helper.
* provide pooling3d for cuda
Signed-off-by: Yurii <yurii@skymind.io>
* minor lstm rearrangements
Signed-off-by: raver119 <raver119@gmail.com>
* more of minor lstm rearrangements
Signed-off-by: raver119 <raver119@gmail.com>
* (bi)dynamic_rnn
Signed-off-by: raver119 <raver119@gmail.com>
* templates init order
Signed-off-by: raver119 <raver119@gmail.com>
* Refactored non_max_suppression op.
* Added cuda kernel for non_max_suppression.
* CPU sort by key/value
Signed-off-by: raver119 <raver119@gmail.com>
* CPU sort TAD by key/value
Signed-off-by: raver119 <raver119@gmail.com>
* CPU sort TAD by key/value tests
Signed-off-by: raver119 <raver119@gmail.com>
* Eliminate compiler error with cuda implementation.
* - repaired gradCheck in cuda
- provide conv2d_bp for cuda
Signed-off-by: Yurii <yurii@skymind.io>
* missed signature
Signed-off-by: raver119 <raver119@gmail.com>
* provide depthwise_conv2d_bp for cuda
Signed-off-by: Yurii <yurii@skymind.io>
* Implementation of lup helper with cuda kernel. Initial commit.
* further work on backprops for convolutions
Signed-off-by: Yurii <yurii@skymind.io>
* CUDA linear sort by key/val
Signed-off-by: raver119 <raver119@gmail.com>
* CUDA tad sort by key/val
Signed-off-by: raver119 <raver119@gmail.com>
* start providing of backprop for pooling2d/3d
Signed-off-by: Yurii <yurii@skymind.io>
* Added atomicAdd for bool datatype.
* dynamic partition concept
Signed-off-by: raver119 <raver119@gmail.com>
* dynamic partition concept
Signed-off-by: raver119 <raver119@gmail.com>
* dynamic partition scalar CUDA
Signed-off-by: raver119 <raver119@gmail.com>
* important comment
Signed-off-by: raver119 <raver119@gmail.com>
* fix pooling2d/3d backprop helpers
Signed-off-by: Yurii <yurii@skymind.io>
* Added non-linear test with dynamic_partition.
* Improved test for dynamic_partition.
* dynamic_partition TAD concept
Signed-off-by: raver119 <raver119@gmail.com>
* - dynamic_partition TAD CUDA impl
- dynamic_partition TAD CPU fix
Signed-off-by: raver119 <raver119@gmail.com>
* - rewrite cpu code for usampling2d/3d
- write cuda code for usampling2d/3d
Signed-off-by: Yurii <yurii@skymind.io>
* dynamic_stitch CUDA vector case
Signed-off-by: raver119 <raver119@gmail.com>
* dynamic_stitch CUDA TAD case concept
Signed-off-by: raver119 <raver119@gmail.com>
* dynamic_stitch CUDA TAD case impl
Signed-off-by: raver119 <raver119@gmail.com>
* Added tests for dynamic_stitch 3D-4D cases.
* minor tests tweaks
Signed-off-by: raver119 <raver119@gmail.com>
* Fixed type check for dynamic stitch.
* min/max bp
Signed-off-by: raver119 <raver119@gmail.com>
* rewrite code for upsampling2d/3d cpu
Signed-off-by: Yurii <yurii@skymind.io>
* reduce min/max/norm_max bp
Signed-off-by: raver119 <raver119@gmail.com>
* lup implementation. Additional enhancements.
* provide code for upsamling2d/3d backprop
Signed-off-by: Yurii <yurii@skymind.io>
* weightedCrossEntropyWithLogits
Signed-off-by: raver119 <raver119@gmail.com>
* Fixed template math atomicMul for 64bit ints.
* Refactored dynamic_partition_bp op.
* inverseBroadcast fix
Signed-off-by: raver119 <raver119@gmail.com>
* DynamicPartitionBP test datatype fixed.
* - nd4j_atomicMul Windows fix
- cpu/NDArrayLambda.hpp excluded from CUDA
Signed-off-by: raver119 <raver119@gmail.com>
2019-06-27 17:37:04 +02:00
|
|
|
void* specialBuffer();
|
2020-05-09 07:06:14 +02:00
|
|
|
const void* specialBuffer() const;
|
2019-06-06 14:21:15 +02:00
|
|
|
|
|
|
|
/**
|
|
|
|
* returns device buffer if compilation is for cuda case, otherwise returns host buffer
|
|
|
|
*/
|
|
|
|
void* platformBuffer();
|
2020-05-09 07:06:14 +02:00
|
|
|
const void* platformBuffer() const;
|
2019-06-06 14:21:15 +02:00
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
template <typename T>
|
2020-05-09 07:06:14 +02:00
|
|
|
T* bufferAsT();
|
|
|
|
|
|
|
|
template <typename T>
|
|
|
|
const T* bufferAsT() const;
|
2019-06-06 14:21:15 +02:00
|
|
|
|
|
|
|
/**
|
|
|
|
* returns _shapeInfo
|
|
|
|
*/
|
2020-05-09 07:06:14 +02:00
|
|
|
FORCEINLINE const Nd4jLong* shapeInfo() const;
|
2019-06-06 14:21:15 +02:00
|
|
|
|
|
|
|
|
|
|
|
/**
|
|
|
|
* Returns True if it's legally empty NDArray, or false otherwise
|
|
|
|
* @return
|
|
|
|
*/
|
|
|
|
FORCEINLINE bool isEmpty() const;
|
|
|
|
|
|
|
|
/**
|
|
|
|
* if _shapeInfoD==nullptr return _shapeInfo, else return _shapeInfoD
|
|
|
|
*/
|
2020-05-09 07:06:14 +02:00
|
|
|
FORCEINLINE const Nd4jLong* specialShapeInfo() const;
|
2019-06-06 14:21:15 +02:00
|
|
|
|
2020-05-09 07:06:14 +02:00
|
|
|
const Nd4jLong* platformShapeInfo() const;
|
2019-06-06 14:21:15 +02:00
|
|
|
|
|
|
|
/**
|
|
|
|
* permutes (in-place) the dimensions in array according to "dimensions" array
|
|
|
|
*/
|
|
|
|
bool permutei(const std::initializer_list<int>& dimensions);
|
|
|
|
bool permutei(const std::vector<int>& dimensions);
|
|
|
|
bool permutei(const int* dimensions, const int rank);
|
|
|
|
|
|
|
|
bool permutei(const std::initializer_list<Nd4jLong>& dimensions);
|
|
|
|
bool permutei(const std::vector<Nd4jLong>& dimensions);
|
|
|
|
bool permutei(const Nd4jLong* dimensions, const int rank);
|
|
|
|
|
|
|
|
bool isFinite();
|
|
|
|
bool hasNaNs();
|
|
|
|
bool hasInfs();
|
|
|
|
|
|
|
|
void copyBuffersContinuouslyFrom(const NDArray& other, size_t sizeToCopyInBytes = 0, Nd4jLong offsetThis = 0, Nd4jLong offsetOther = 0);
|
|
|
|
|
|
|
|
/**
|
|
|
|
* permutes the dimensions in array according to "dimensions" array, new array points on _buffer of this array
|
|
|
|
*/
|
2019-12-20 20:35:39 +01:00
|
|
|
NDArray permute(const std::initializer_list<int>& dimensions) const &;
|
|
|
|
NDArray permute(const std::vector<int>& dimensions) const &;
|
|
|
|
NDArray permute(const int* dimensions, const int rank) const &;
|
|
|
|
NDArray permute(const std::initializer_list<int>& dimensions) &&;
|
|
|
|
NDArray permute(const std::vector<int>& dimensions) &&;
|
|
|
|
NDArray permute(const int* dimensions, const int rank) &&;
|
2019-06-06 14:21:15 +02:00
|
|
|
|
|
|
|
void permute(const int* dimensions, const int rank, NDArray& target) const;
|
|
|
|
void permute(const std::vector<int>& dimensions, NDArray& target) const;
|
|
|
|
|
2019-12-20 20:35:39 +01:00
|
|
|
NDArray permute(const std::initializer_list<Nd4jLong>& dimensions) const &;
|
|
|
|
NDArray permute(const std::vector<Nd4jLong>& dimensions) const &;
|
|
|
|
NDArray permute(const Nd4jLong* dimensions, const int rank) const &;
|
|
|
|
NDArray permute(const std::initializer_list<Nd4jLong>& dimensions) &&;
|
|
|
|
NDArray permute(const std::vector<Nd4jLong>& dimensions) &&;
|
|
|
|
NDArray permute(const Nd4jLong* dimensions, const int rank) &&;
|
2019-06-06 14:21:15 +02:00
|
|
|
|
|
|
|
void permute(const Nd4jLong* dimensions, const int rank, NDArray& target) const;
|
|
|
|
void permute(const std::vector<Nd4jLong>& dimensions, NDArray& target) const;
|
|
|
|
|
|
|
|
/**
|
|
|
|
* This method streamlines given view or permuted array, and reallocates buffer
|
|
|
|
*/
|
|
|
|
void streamline(char order = 'a');
|
|
|
|
|
|
|
|
/**
|
|
|
|
* prints information about array shape
|
|
|
|
* msg - message to print out
|
|
|
|
*/
|
|
|
|
void printShapeInfo(const char * msg = nullptr) const;
|
|
|
|
|
|
|
|
/**
|
|
|
|
* prints buffer elements
|
|
|
|
* msg - message to print out
|
|
|
|
* limit - number of array elements to print out
|
|
|
|
* sync - if true check whether host buffer is actual, if it is not then make it so
|
|
|
|
*/
|
|
|
|
void printBuffer(const char* msg = nullptr, Nd4jLong limit = -1, const bool sync = true) const;
|
|
|
|
|
2019-08-08 17:05:21 +02:00
|
|
|
/**
|
|
|
|
* print element by element consequently in a way they (elements) are stored in physical memory
|
|
|
|
*/
|
|
|
|
void printLinearBuffer() const;
|
|
|
|
|
2019-06-06 14:21:15 +02:00
|
|
|
/**
|
|
|
|
* prints _buffer (if host = true) or _bufferD (if host = false) as it is, that is in current state without checking buffer status
|
|
|
|
*/
|
|
|
|
template<typename T>
|
|
|
|
void printCurrentBuffer(const bool host = true, const char* msg = nullptr, const int precision = 1) const;
|
|
|
|
|
|
|
|
/**
|
|
|
|
* prints buffer elements, takes into account offset between elements (element-wise-stride)
|
|
|
|
* msg - message to print out
|
|
|
|
* limit - number of array elements to print out
|
|
|
|
*/
|
|
|
|
void printIndexedBuffer(const char* msg = nullptr, Nd4jLong limit = -1) const;
|
|
|
|
|
|
|
|
std::string asIndexedString(Nd4jLong limit = -1);
|
|
|
|
std::string asString(Nd4jLong limit = -1);
|
|
|
|
|
|
|
|
/**
|
|
|
|
* this method assigns values of given array to this one
|
|
|
|
*/
|
2019-08-02 19:01:03 +02:00
|
|
|
void assign(const NDArray* other, bool allowParallelism = true);
|
2019-06-06 14:21:15 +02:00
|
|
|
|
|
|
|
/**
|
|
|
|
* this method assigns values of given array to this one
|
|
|
|
*/
|
2019-08-02 19:01:03 +02:00
|
|
|
void assign(const NDArray& other, bool allowParallelism = true);
|
2019-06-06 14:21:15 +02:00
|
|
|
|
|
|
|
/**
|
|
|
|
* this method assigns given value to all elements in array
|
|
|
|
*/
|
2019-12-20 20:35:39 +01:00
|
|
|
template <typename T, typename = typename std::enable_if<DataTypeUtils::scalarTypesForNDarray<T>::value>::type>
|
|
|
|
void assign(const T& value, bool allowParallelism = true);
|
2019-06-06 14:21:15 +02:00
|
|
|
|
|
|
|
/**
|
|
|
|
* returns new copy of this array, optionally in different order
|
|
|
|
*/
|
2019-12-20 20:35:39 +01:00
|
|
|
NDArray dup(const char newOrder = 'a') const;
|
2019-06-06 14:21:15 +02:00
|
|
|
|
|
|
|
/**
|
|
|
|
* returns sum of all elements of array
|
|
|
|
*/
|
|
|
|
NDArray sumNumber() const;
|
|
|
|
|
|
|
|
/**
|
|
|
|
* returns mean number of array
|
|
|
|
*/
|
|
|
|
NDArray meanNumber() const;
|
|
|
|
|
|
|
|
#ifndef __JAVACPP_HACK__
|
|
|
|
|
|
|
|
/**
|
|
|
|
* This method explicitly enforces new shape for this NDArray, old shape/stride information is lost
|
|
|
|
*/
|
|
|
|
void enforce(const std::initializer_list<Nd4jLong> &dimensions, char order = 'a');
|
|
|
|
void enforce(std::vector<Nd4jLong> &dimensions, char order = 'a');
|
|
|
|
|
|
|
|
|
|
|
|
/**
|
|
|
|
* method reduces array by excluding its shapes along dimensions present in given dimensions vector, result is stored in new array to be returned
|
|
|
|
* dimensions - array of dimensions to reduce along
|
|
|
|
* keepDims - if true then put unities in place of reduced dimensions
|
|
|
|
*/
|
|
|
|
|
2020-03-02 10:49:41 +01:00
|
|
|
NDArray reduceAlongDimension(sd::reduce::FloatOps op, const std::vector<int>& dimensions, const bool keepDims = false, const bool supportOldShapes = false) const;
|
|
|
|
NDArray reduceAlongDimension(sd::reduce::FloatOps op, const std::initializer_list<int>& dimensions, const bool keepDims = false, const bool supportOldShapes = false) const;
|
2019-06-06 14:21:15 +02:00
|
|
|
|
2020-03-02 10:49:41 +01:00
|
|
|
NDArray reduceAlongDimension(sd::reduce::SameOps op, const std::vector<int>& dimensions, const bool keepDims = false, const bool supportOldShapes = false) const;
|
|
|
|
NDArray reduceAlongDimension(sd::reduce::SameOps op, const std::initializer_list<int>& dimensions, const bool keepDims = false, const bool supportOldShapes = false) const;
|
2019-06-06 14:21:15 +02:00
|
|
|
|
2020-03-02 10:49:41 +01:00
|
|
|
NDArray reduceAlongDimension(sd::reduce::BoolOps op, const std::vector<int>& dimensions, const bool keepDims = false, const bool supportOldShapes = false) const;
|
|
|
|
NDArray reduceAlongDimension(sd::reduce::BoolOps op, const std::initializer_list<int>& dimensions, const bool keepDims = false, const bool supportOldShapes = false) const;
|
2019-06-06 14:21:15 +02:00
|
|
|
|
2020-03-02 10:49:41 +01:00
|
|
|
NDArray reduceAlongDimension(sd::reduce::LongOps op, const std::vector<int>& dimensions, const bool keepDims = false, const bool supportOldShapes = false) const;
|
|
|
|
NDArray reduceAlongDimension(sd::reduce::LongOps op, const std::initializer_list<int>& dimensions, const bool keepDims = false, const bool supportOldShapes = false) const;
|
2019-06-06 14:21:15 +02:00
|
|
|
|
|
|
|
/**
|
|
|
|
* method reduces array by excluding its shapes along dimensions present in given dimensions vector
|
|
|
|
* target - where to save result of reducing
|
|
|
|
* dimensions - array of dimensions to reduce along
|
|
|
|
* keepDims - if true then put unities in place of reduced dimensions
|
|
|
|
* extras - extra parameters
|
|
|
|
*/
|
2020-03-02 10:49:41 +01:00
|
|
|
void reduceAlongDimension(sd::reduce::FloatOps op, NDArray& target, const std::vector<int>& dimensions, const bool keepDims = false, const bool supportOldShapes = false, const bool checkTargetShape = true) const;
|
|
|
|
void reduceAlongDimension(sd::reduce::SameOps op, NDArray& target, const std::vector<int>& dimensions, const bool keepDims = false, const bool supportOldShapes = false, const bool checkTargetShape = true) const;
|
|
|
|
void reduceAlongDimension(sd::reduce::BoolOps op, NDArray& target, const std::vector<int>& dimensions, const bool keepDims = false, const bool supportOldShapes = false, const bool checkTargetShape = true) const;
|
|
|
|
void reduceAlongDimension(sd::reduce::LongOps op, NDArray& target, const std::vector<int>& dimensions, const bool keepDims = false, const bool supportOldShapes = false, const bool checkTargetShape = true) const;
|
2019-06-06 14:21:15 +02:00
|
|
|
|
|
|
|
/**
|
|
|
|
* return variance of array elements set
|
|
|
|
* biasCorrected - if true bias correction will be applied
|
|
|
|
*/
|
2020-03-02 10:49:41 +01:00
|
|
|
NDArray varianceNumber(sd::variance::Ops op, bool biasCorrected = true);
|
2019-06-06 14:21:15 +02:00
|
|
|
|
|
|
|
/**
|
|
|
|
* apply scalar operation to array
|
|
|
|
* extraParams - extra parameters for operation
|
2019-08-21 20:11:46 +02:00
|
|
|
* returns scalar array
|
2019-06-06 14:21:15 +02:00
|
|
|
*/
|
2020-03-02 10:49:41 +01:00
|
|
|
NDArray reduceNumber(sd::reduce::FloatOps ops, void *extraParams = nullptr) const;
|
|
|
|
NDArray reduceNumber(sd::reduce::SameOps ops, void *extraParams = nullptr) const;
|
|
|
|
NDArray reduceNumber(sd::reduce::BoolOps ops, void *extraParams = nullptr) const;
|
|
|
|
NDArray reduceNumber(sd::reduce::LongOps ops, void *extraParams = nullptr) const;
|
2019-06-06 14:21:15 +02:00
|
|
|
|
2020-03-02 10:49:41 +01:00
|
|
|
void reduceNumber(sd::reduce::FloatOps ops, NDArray& target, void *extraParams = nullptr) const;
|
|
|
|
void reduceNumber(sd::reduce::SameOps ops, NDArray& target, void *extraParams = nullptr) const;
|
|
|
|
void reduceNumber(sd::reduce::BoolOps ops, NDArray& target, void *extraParams = nullptr) const;
|
|
|
|
void reduceNumber(sd::reduce::LongOps ops, NDArray& target, void *extraParams = nullptr) const;
|
2019-06-06 14:21:15 +02:00
|
|
|
|
|
|
|
/**
|
|
|
|
* returns element index which corresponds to some condition imposed by operation
|
|
|
|
* extraParams - extra parameters for operation
|
|
|
|
*/
|
2020-03-02 10:49:41 +01:00
|
|
|
NDArray indexReduceNumber(sd::indexreduce::Ops op, ExtraArguments *extraParams = nullptr);
|
2019-06-06 14:21:15 +02:00
|
|
|
|
|
|
|
/**
|
|
|
|
* returns index of max element in a given array (optionally: along given dimension(s))
|
|
|
|
* dimensions - optional vector with dimensions
|
|
|
|
*/
|
|
|
|
Nd4jLong argMax(std::initializer_list<int> dimensions = {});
|
|
|
|
|
|
|
|
// FIXME: remove this method eventually
|
|
|
|
void makeBothActual() const { syncToDevice(); syncToHost(); }
|
|
|
|
|
|
|
|
|
2020-03-02 10:49:41 +01:00
|
|
|
void applyTransform(sd::transform::FloatOps op, NDArray& target, ExtraArguments *extraParams = nullptr);
|
|
|
|
void applyTransform(sd::transform::SameOps op, NDArray& target, ExtraArguments *extraParams = nullptr);
|
|
|
|
void applyTransform(sd::transform::AnyOps op, NDArray& target, ExtraArguments *extraParams = nullptr);
|
|
|
|
void applyTransform(sd::transform::BoolOps op, NDArray& target, ExtraArguments *extraParams = nullptr);
|
|
|
|
void applyTransform(sd::transform::StrictOps op, NDArray& target, ExtraArguments *extraParams = nullptr);
|
2019-06-06 14:21:15 +02:00
|
|
|
|
|
|
|
/**
|
|
|
|
* apply OpName transformation to this array and store result in new array to be returned
|
|
|
|
* extraParams - extra parameters for operation
|
|
|
|
*/
|
2020-03-02 10:49:41 +01:00
|
|
|
NDArray transform(sd::transform::FloatOps op, void *extraParams = nullptr) const &;
|
|
|
|
NDArray transform(sd::transform::SameOps op, void *extraParams = nullptr) const &;
|
|
|
|
NDArray transform(sd::transform::BoolOps op, void *extraParams = nullptr) const &;
|
|
|
|
NDArray transform(sd::transform::StrictOps op, void *extraParams = nullptr) const &;
|
|
|
|
NDArray transform(sd::transform::FloatOps op, void *extraParams = nullptr) &&;
|
|
|
|
NDArray transform(sd::transform::SameOps op, void *extraParams = nullptr) &&;
|
|
|
|
NDArray transform(sd::transform::BoolOps op, void *extraParams = nullptr) &&;
|
|
|
|
NDArray transform(sd::transform::StrictOps op, void *extraParams = nullptr) &&;
|
2019-06-06 14:21:15 +02:00
|
|
|
|
|
|
|
/**
|
|
|
|
* apply pairwise OpName transformation based on "this" and "other" arras elements, store result in this array
|
|
|
|
* other - second array necessary for pairwise operation
|
|
|
|
* extraParams - extra parameters for operation
|
|
|
|
*/
|
2020-03-02 10:49:41 +01:00
|
|
|
void applyPairwiseTransform(sd::pairwise::Ops op, const NDArray& other, ExtraArguments *extraParams = nullptr);
|
2019-06-06 14:21:15 +02:00
|
|
|
|
|
|
|
/**
|
|
|
|
* apply pairwise OpName transformation based on "this" and "other" arras elements, store result in target array
|
|
|
|
* other - second array necessary for pairwise operation
|
|
|
|
* target - where to store result
|
|
|
|
* extraParams - extra parameters for operation
|
|
|
|
*/
|
2020-03-02 10:49:41 +01:00
|
|
|
void applyPairwiseTransform(sd::pairwise::Ops op, const NDArray& other, NDArray& target, ExtraArguments *extraParams = nullptr) const;
|
2019-06-06 14:21:15 +02:00
|
|
|
|
2020-03-02 10:49:41 +01:00
|
|
|
void applyPairwiseTransform(sd::pairwise::BoolOps op, const NDArray& other, NDArray& target, ExtraArguments *extraParams = nullptr) const;
|
2019-06-06 14:21:15 +02:00
|
|
|
|
2020-03-02 10:49:41 +01:00
|
|
|
void applyPairwiseTransform(sd::pairwise::IntOps op, const NDArray& other, NDArray&target, ExtraArguments *extraParams = nullptr) const;
|
2019-08-30 09:12:40 +02:00
|
|
|
|
2019-06-06 14:21:15 +02:00
|
|
|
/**
|
|
|
|
* apply operation which requires broadcasting, broadcast a smaller array (tad) along bigger one (this)
|
|
|
|
* tad - array to broadcast
|
|
|
|
* dimensions - dimensions array to broadcast along
|
|
|
|
* target - where to store result
|
|
|
|
* extraParams - extra parameters for operation
|
|
|
|
*/
|
2020-03-02 10:49:41 +01:00
|
|
|
void applyBroadcast(sd::broadcast::Ops op, const std::initializer_list<int> dimensions, const NDArray& tad, NDArray& target, ExtraArguments* extraArgs = nullptr);
|
2019-06-06 14:21:15 +02:00
|
|
|
|
2020-03-02 10:49:41 +01:00
|
|
|
void applyBroadcast(sd::broadcast::Ops op, const std::vector<int> &dimensions, const NDArray &tad, NDArray &target, ExtraArguments *extraArgs = nullptr);
|
2019-06-06 14:21:15 +02:00
|
|
|
|
2020-03-02 10:49:41 +01:00
|
|
|
void applyBroadcast(sd::broadcast::BoolOps op, const std::vector<int> &dimensions, const NDArray &tad, NDArray &target, ExtraArguments *extraArgs = nullptr);
|
2019-08-30 09:12:40 +02:00
|
|
|
|
2020-03-02 10:49:41 +01:00
|
|
|
void applyBroadcast(sd::broadcast::IntOps op, const std::vector<int> &dimensions, const NDArray& tad, NDArray &target, ExtraArguments *extraArgs = nullptr);
|
2019-08-30 09:12:40 +02:00
|
|
|
|
2019-06-06 14:21:15 +02:00
|
|
|
/**
|
|
|
|
* apply operation which requires broadcasting, broadcast one tensor along another, also this method checks the possibility of broadcasting
|
|
|
|
* other - input array
|
|
|
|
* extraParams - extra parameters for operation
|
|
|
|
*/
|
2020-03-02 10:49:41 +01:00
|
|
|
NDArray applyTrueBroadcast(sd::BroadcastOpsTuple op, const NDArray& other, ExtraArguments *extraArgs = nullptr) const &;
|
|
|
|
NDArray applyTrueBroadcast(sd::BroadcastOpsTuple op, NDArray&& other, ExtraArguments *extraArgs = nullptr) const &;
|
|
|
|
NDArray applyTrueBroadcast(sd::BroadcastOpsTuple op, NDArray&& other, ExtraArguments *extraArgs = nullptr) &&;
|
|
|
|
NDArray applyTrueBroadcast(sd::BroadcastOpsTuple op, const NDArray& other, ExtraArguments *extraArgs = nullptr) &&;
|
2019-06-06 14:21:15 +02:00
|
|
|
|
|
|
|
/**
|
|
|
|
* apply operation which requires broadcasting, broadcast one tensor along another, also this method checks the possibility of broadcasting
|
|
|
|
* other - input array
|
|
|
|
* target - where to store result
|
|
|
|
* checkTargetShape - if true check whether target shape is suitable for broadcasting
|
|
|
|
* extraParams - extra parameters for operation
|
|
|
|
*/
|
2020-03-02 10:49:41 +01:00
|
|
|
void applyTrueBroadcast(sd::BroadcastOpsTuple op, const NDArray& other, NDArray& target, const bool checkTargetShape = true, ExtraArguments *extraArgs = nullptr) const;
|
2019-06-06 14:21:15 +02:00
|
|
|
|
2020-03-02 10:49:41 +01:00
|
|
|
void applyTrueBroadcast(sd::BroadcastBoolOpsTuple op, const NDArray& other, NDArray& target, const bool checkTargetShape = true, ExtraArguments *extraArgs = nullptr) const;
|
2019-06-06 14:21:15 +02:00
|
|
|
|
2020-03-02 10:49:41 +01:00
|
|
|
void applyTrueBroadcast(sd::BroadcastIntOpsTuple op, const NDArray& other, NDArray& target, const bool checkTargetShape = true, ExtraArguments *extraArgs = nullptr) const;
|
2019-08-30 09:12:40 +02:00
|
|
|
|
|
|
|
|
2019-06-06 14:21:15 +02:00
|
|
|
/**
|
|
|
|
* apply a scalar operation to an array
|
|
|
|
* scalar - input scalar
|
|
|
|
* target - where to store result
|
|
|
|
* extraParams - extra parameters for operation
|
|
|
|
*/
|
|
|
|
template <typename T>
|
2020-03-02 10:49:41 +01:00
|
|
|
void applyScalar(sd::scalar::Ops op, const T scalar, NDArray& target, ExtraArguments *extraParams = nullptr);
|
2019-06-06 14:21:15 +02:00
|
|
|
|
|
|
|
template <typename T>
|
2020-03-02 10:49:41 +01:00
|
|
|
void applyScalar(sd::scalar::BoolOps op, const T scalar, NDArray& target, ExtraArguments *extraParams = nullptr) const;
|
2019-06-06 14:21:15 +02:00
|
|
|
|
2019-08-30 09:12:40 +02:00
|
|
|
template <typename T>
|
2020-03-02 10:49:41 +01:00
|
|
|
void applyScalar(sd::scalar::IntOps op, const T scalar, NDArray& target, ExtraArguments *extraParams = nullptr) const;
|
2019-08-30 09:12:40 +02:00
|
|
|
|
2019-06-06 14:21:15 +02:00
|
|
|
/**
|
|
|
|
* apply a scalar operation to an array
|
|
|
|
* scalar - input array which is simple scalar
|
|
|
|
* target - where to store result
|
|
|
|
* extraParams - extra parameters for operation
|
|
|
|
*/
|
2020-03-02 10:49:41 +01:00
|
|
|
void applyScalarArr(sd::scalar::Ops op, const NDArray& scalar, NDArray& target, ExtraArguments *extraParams = nullptr);
|
2019-06-06 14:21:15 +02:00
|
|
|
|
2020-03-02 10:49:41 +01:00
|
|
|
void applyScalarArr(sd::scalar::BoolOps op, const NDArray& scalar, NDArray& target, ExtraArguments *extraParams = nullptr) const;
|
2019-06-06 14:21:15 +02:00
|
|
|
|
2020-03-02 10:49:41 +01:00
|
|
|
void applyScalarArr(sd::scalar::IntOps op, const NDArray& scalar, NDArray& target, ExtraArguments *extraParams = nullptr) const;
|
2019-06-06 14:21:15 +02:00
|
|
|
|
Merge master to upstream (#7945)
* Shugeo strided slice zeros (#14)
* Modified strided_slice op to properly work with empty-like shapes.
* Fixed test for reduce_mean with empty-like input.
* [WIP] Last merge (#15)
* correct logsoftmax looss (#2)
* Small SameDiff listener fix (#4)
* Various fixes (#6)
* #7839 Fix for asXMatrix and tests
* #7866 EmbeddingSequenceLayer dtype fix + test
* #7856 SameDiff save/load stream methods
* #7859 RegressionEvaluation rank 4 fix + tests + axis configuration
* EvaluationBinary 3d/4d
* More evaluation 3d/4d tests
* #7847 Evaluation empty checks
* Small test ifx
* #7848 Fix median edge case
* Improve DL4J samediff layer tests
* [WIP] FastText wrapper implemented (#8)
* FastText implemented
* Some fixes
* Fix shapes for wordsNearest
* Validation of input vectors
* Fixes
* Fixed test
* Thread tagged
* Some tweaks
* setContextClassLoader for DeallocatorServiceThread
* Numpy format tests (#1)
* Various fixes (#11)
* #7852 SameDiff gather fix
* #7892 SameDiff placeholder to constant conversion
* #7890 validate input rank for MLN/CG init methods
* Fix broken permute shape calculation
* Permute and gather fixes
* Tests
* #7850 LogSumExp fix + test
* Handful of test fixes
* Empty arrays with non-scalar shapes (#10)
* minor rearrangements for lambdas
* empty tensors with non-scalar shapes
* numpy empty tensors with non-scalar shapes
* few more empty tweaks
* Small fixes
* conv3d signature update
* micro fix in batchnorm mkldnn
* Import fixes
* Fix
* MKL-DNN update
* Small fill fix
* fill with empty input + test
* Fixes
* Small error improvement
* Fix
* one special test
* couple of fixes for lstm
* Rewrite TFGraphMapper.getNDArrayFromTensor to be maintainable and less error prone
* Fixes
* FP16
* Unsigned
* BFloat16
* Fill op - empty tweaks
* - couple of fixes for empty arrays construction
- stack updated
* strided slice fix
* one transform test
* provide method for reducing shapeInfo in case of input array is empty
* Fixed reduceAlongDimensions to use empty input properly.
* couple of broadcast tests
* couple of tests broadcast tests + tweak to make them pass
* add check of non-empty to methods producing sub-arrays
* Fixed reshapeC with zeros in shape.
* complete empty check in reduce_... legacy ops
* Concat and cumsum/prod
* Tweak to empty shape inference on import
* add empty check to the rest of reduce legacy ops
* one more test
* correct typo in evalReduceShapeInfoEmpty
* Added tests for reduce_* ops to tests with zero shapes.
* few more tests for empty reductions
* Fixed strided_slice op with empty case and tests.
* one more empty reduction test
* Fixed strided_slice test.
* add empty check to NDArray::reshapei
* infOrMax
* empty min/max with infinity tests
* made unstack working correctly with empty arrays
* few IndexReduce tests + tweaks for empty shapes
* add test for empty concat
* few tests fixed
* Validation fix for reductions on empty shapes
* Reverse fix
* Reduction shape calc fixes
* SameDiff.generateOutputVariable: don't use shape function to determine number of outputs
* Range fix
* - NDArray constructor updated for scalars/empty arrays
- few tests fixed
* More fixes
* Empty creator fixes
* concat fix
* concat fix
* TF import tests: allow 'both all NaN' and 'both all inf' to pass
* Slice, zero fraction, and reshape fixes
* transpose, gather
* Zero fraction
* scalar cast fix
* Empty reduction axis support
* few more tests fixed
* Fixed input checks conforming with TF for concat op and tests.
* few tests fixed
* matmul scalar shape fix
* Fixed checkout for data type and scalarity with concat to allow non-empty scalars with vector concats.
* broadcast bool fix
* few more tests
* few more tests
* correct evalReduceShapeInfoEmpty
* argmax/argmin + tests
* one more empty edge case + one more test
* argmax/argmin/realdiv_bp tweaks
* empty reshape test + fix
* Helper fixes
* Small fixes
* Gather test fix
* Gather test fix
* Small fixes
* reduce scalar zero values
* scalar mean workaround
* Remove debug code
* along dim mean workaround
* one more test
* - equalsTo() tweak for empty arrays
- one more test
* broadcast tweaks
* [WIP] Fixing outstanding issues for NLP (#9)
* Avoid using not-inited objects
* Test fixed.
* Redundant method avoided for models like FastText
* KMeans++ implementation
* KMeans++ implementation
* Disable parallel execution
* KMeans++
* Tests
* Dev branch merge (#16)
* SameDiff: convertDataType and gradient check util improvements (#12)
* GradCheck util improvements
* StopGradient constructor + test
* SameDiff: Add datatype conversion
* Javadoc and add DataType.isNumerical()
* Small fix
* Fix SameDiff TF import test cases intermediate naming (workaround for bad default)
* TFGraphTestAllHelper: check intermediates in execution order
* Add missing debug listener
* [WIP] lstmBlock fix + other changes (#13)
- fixes lstmBlock issue
- changes NDArray method reshape(), permute(), transpose() by making them return instance instead of pointer
- CheckNumerics op
- fixes for ReduceBool IsInfOrNan & IsFinite
* Small test fix
* CheckNumerics op wrapper
* Fix some issues on master (#17)
* Fix DataVec test issue
* Fix issue with dl4j SameDiff output layer
* Dtype fix for lambda layers
* #7912 BertIterator dtype fix (use float32 not global default)
* [WIP] Next set of CUDA stuff (#7)
New CUDA implementations and improvements
* bad file
* Dev branch master merge (#23)
* SameDiff: convertDataType and gradient check util improvements (#12)
* GradCheck util improvements
* StopGradient constructor + test
* SameDiff: Add datatype conversion
* Javadoc and add DataType.isNumerical()
* Small fix
* Fix SameDiff TF import test cases intermediate naming (workaround for bad default)
* TFGraphTestAllHelper: check intermediates in execution order
* Add missing debug listener
* [WIP] lstmBlock fix + other changes (#13)
- fixes lstmBlock issue
- changes NDArray method reshape(), permute(), transpose() by making them return instance instead of pointer
- CheckNumerics op
- fixes for ReduceBool IsInfOrNan & IsFinite
* Small test fix
* CheckNumerics op wrapper
* Compatibility of deserialization (#18)
Signed-off-by: Alexander Stoyakin <alexander.stoyakin@gmail.com>
* SameDiff: add activation gradient checking support for debugging (#19)
* SameDiff gradient checker: first pass on activation gradient checks
* Fixes + tests for activation gradient checking
* Javadoc
* [WIP] Some nd4j data type corrections (#20)
* Adjust data type
* Set correct Data type.
* Size of proper data type.
* fix averaged cpu load (#22)
* SameDiff ops, TF import and fixes (#24)
* CheckNumerics tests + fixes + misc fixes
Signed-off-by: AlexDBlack <blacka101@gmail.com>
* Fake quant
Signed-off-by: AlexDBlack <blacka101@gmail.com>
* Fixes
Signed-off-by: AlexDBlack <blacka101@gmail.com>
* FakeQuantWithMinMaxArgs
Signed-off-by: AlexDBlack <blacka101@gmail.com>
* CheckNumerics fix
Signed-off-by: AlexDBlack <blacka101@gmail.com>
* Fix libnd4j ALL_INTS and ALL_FLOATS declaration (uint and bfloat types)
Signed-off-by: AlexDBlack <blacka101@gmail.com>
* Small fix
Signed-off-by: AlexDBlack <blacka101@gmail.com>
* Javadoc
Signed-off-by: AlexDBlack <blacka101@gmail.com>
* Exception tweak
Signed-off-by: AlexDBlack <blacka101@gmail.com>
* fix
Signed-off-by: AlexDBlack <blacka101@gmail.com>
* Fix for out of scope stack allocated var use
Signed-off-by: AlexDBlack <blacka101@gmail.com>
* Ignores
Signed-off-by: AlexDBlack <blacka101@gmail.com>
* Ignore for known failing test (already logged issue)
Signed-off-by: AlexDBlack <blacka101@gmail.com>
* Merge upstream to fork (#25)
* Add thousand-separator commas to TotalParams (#7915)
* Add thousand-separator commas to TotalParams
The number of parameters can be quite large, and it would help the reading of the summary printout to have the TotalParams column & values at the bottom have thousand-separator-commas in them.
* Add thousand-separator commas to MultiLayerNetwork
Corresponding change to MultiLayerNetwork
Signed-off-by: Jxtps Jxtps <jxtps435@gmail.com>
* Update contributing and issue/PR templates (#7934)
Signed-off-by: AlexDBlack <blacka101@gmail.com>
* Fix link to AdaDelta paper (#7942)
Fix link to AdaDelta paper hosted on matthewzeiler.com
Signed-off-by: Jxtps
* Fixes, and ignores for known/logged failing issues (#7943)
Signed-off-by: AlexDBlack <blacka101@gmail.com>
* SameDiff + DL4J/SameDiff: Multiple fixes (#28)
* #7919 HDF5 attribute buffer length fix
Signed-off-by: AlexDBlack <blacka101@gmail.com>
* #7909 Arbiter constructor exception ux improvements
Signed-off-by: AlexDBlack <blacka101@gmail.com>
* #7925 RNN output layer length checks
Signed-off-by: AlexDBlack <blacka101@gmail.com>
* #7939 Add listener for validating inputs are not incorrectly modified
Signed-off-by: AlexDBlack <blacka101@gmail.com>
* #7939 Integrate NonInplaceValidationListener into tests
* #7844 DL4J SameDiff fixes for variable minibatch size
* DL4J SameDiff fixes - ensure gradient for input placeholder is available
Signed-off-by: AlexDBlack <blacka101@gmail.com>
* Tweaks to ExternalErrorsFunction - use placeholders, make more robust
* Another fix
* More fixes
* More SameDiff/DL4J fixes
* Scope out scalar array creation in BaseScalarOp
* Remove debug code
Signed-off-by: AlexDBlack <blacka101@gmail.com>
* [WIP] Final dev branch merge (#29)
* SameDiff: convertDataType and gradient check util improvements (#12)
* GradCheck util improvements
* StopGradient constructor + test
* SameDiff: Add datatype conversion
* Javadoc and add DataType.isNumerical()
* Small fix
* Fix SameDiff TF import test cases intermediate naming (workaround for bad default)
* TFGraphTestAllHelper: check intermediates in execution order
* Add missing debug listener
* [WIP] lstmBlock fix + other changes (#13)
- fixes lstmBlock issue
- changes NDArray method reshape(), permute(), transpose() by making them return instance instead of pointer
- CheckNumerics op
- fixes for ReduceBool IsInfOrNan & IsFinite
* Small test fix
* CheckNumerics op wrapper
* Compatibility of deserialization (#18)
Signed-off-by: Alexander Stoyakin <alexander.stoyakin@gmail.com>
* SameDiff: add activation gradient checking support for debugging (#19)
* SameDiff gradient checker: first pass on activation gradient checks
* Fixes + tests for activation gradient checking
* Javadoc
* [WIP] Some nd4j data type corrections (#20)
* Adjust data type
* Set correct Data type.
* Size of proper data type.
* fix averaged cpu load (#22)
* [WIP] Multiple dataset iterators (#27)
* Splitting dataset into arbitrary number
* Fixes
* Multiple split of iterator
* Test
* Test
* Some fixes
* signature change
* one more tweak
Signed-off-by: raver119 <raver119@gmail.com>
* one more test for sequential use of DataSetIteratorSplitter
Signed-off-by: raver119 <raver119@gmail.com>
* Fixes
* Fixes
* one more test for Alexander
Signed-off-by: raver119 <raver119@gmail.com>
* Some fixes
* Some fixes
* one more test for Alexander
Signed-off-by: raver119 <raver119@gmail.com>
* minor test fix
Signed-off-by: raver119 <raver119@gmail.com>
* Some fixes
* Some fixes
* couple of assertions tweaked
Signed-off-by: raver119 <raver119@gmail.com>
* MDS splitter test :/
Signed-off-by: raver119 <raver119@gmail.com>
* Minor refactoring
* Multi dataset
* Some fixes
* More tests
* Small number of test fixes/improvements (failures on CI) (#31)
Signed-off-by: AlexDBlack <blacka101@gmail.com>
* [WIP] More CUDA stuff (#26)
* initial commit
Signed-off-by: raver119 <raver119@gmail.com>
* LRN BP CUDA
Signed-off-by: raver119 <raver119@gmail.com>
* less memory
Signed-off-by: raver119 <raver119@gmail.com>
* Fixed bug with crop_and_resize op helper.
* get rid of unnecessary index-calculation dunction
Signed-off-by: Yurii <yurii@skymind.io>
* Fixed sort with nth_element cuda-based helper.
* Refactored nth_element.
* Refactored nth_element op and tests.
* Modified usage of dim array with sortTad routine.
* Refactored main routine of helper for non_max_image_suppression op.
* non_max_image_suppression op helper with cuda kernel implementation. Initial revision.
* fix vol2col cuda kernel
* meh
Signed-off-by: raver119 <raver119@gmail.com>
* topK concept
Signed-off-by: raver119 <raver119@gmail.com>
* unsorted topK with scanWitdh of 1
Signed-off-by: raver119 <raver119@gmail.com>
* correct vol2col tests
* sorted/unsorted topK
Signed-off-by: raver119 <raver119@gmail.com>
* implementation and fixing col2im/col2vol
* Corrected usage flags with input/output with reverse op.
* dup is const now
Signed-off-by: raver119 <raver119@gmail.com>
* percentile op
Signed-off-by: raver119 <raver119@gmail.com>
* group tests for mapool2d
Signed-off-by: Yurii <yurii@skymind.io>
* special test for george
Signed-off-by: raver119 <raver119@gmail.com>
* less threads for sortTad
Signed-off-by: raver119 <raver119@gmail.com>
* provide conv2d for cuda
Signed-off-by: Yurii <yurii@skymind.io>
* remove auther in sort tad kernel code
Signed-off-by: Yurii <yurii@skymind.io>
* provide depthwise_conv2d for cuda
Signed-off-by: Yurii <yurii@skymind.io>
* - max_pooling_with_argmax
- null check for special use
Signed-off-by: raver119 <raver119@gmail.com>
* dts cuda
Signed-off-by: raver119 <raver119@gmail.com>
* provide sconv2d for cuda
Signed-off-by: Yurii <yurii@skymind.io>
* std cuda
Signed-off-by: raver119 <raver119@gmail.com>
* Refactored non_max_suppression op to conform TF implementation.
* Improved suppression helper.
* provide pooling3d for cuda
Signed-off-by: Yurii <yurii@skymind.io>
* minor lstm rearrangements
Signed-off-by: raver119 <raver119@gmail.com>
* more of minor lstm rearrangements
Signed-off-by: raver119 <raver119@gmail.com>
* (bi)dynamic_rnn
Signed-off-by: raver119 <raver119@gmail.com>
* templates init order
Signed-off-by: raver119 <raver119@gmail.com>
* Refactored non_max_suppression op.
* Added cuda kernel for non_max_suppression.
* CPU sort by key/value
Signed-off-by: raver119 <raver119@gmail.com>
* CPU sort TAD by key/value
Signed-off-by: raver119 <raver119@gmail.com>
* CPU sort TAD by key/value tests
Signed-off-by: raver119 <raver119@gmail.com>
* Eliminate compiler error with cuda implementation.
* - repaired gradCheck in cuda
- provide conv2d_bp for cuda
Signed-off-by: Yurii <yurii@skymind.io>
* missed signature
Signed-off-by: raver119 <raver119@gmail.com>
* provide depthwise_conv2d_bp for cuda
Signed-off-by: Yurii <yurii@skymind.io>
* Implementation of lup helper with cuda kernel. Initial commit.
* further work on backprops for convolutions
Signed-off-by: Yurii <yurii@skymind.io>
* CUDA linear sort by key/val
Signed-off-by: raver119 <raver119@gmail.com>
* CUDA tad sort by key/val
Signed-off-by: raver119 <raver119@gmail.com>
* start providing of backprop for pooling2d/3d
Signed-off-by: Yurii <yurii@skymind.io>
* Added atomicAdd for bool datatype.
* dynamic partition concept
Signed-off-by: raver119 <raver119@gmail.com>
* dynamic partition concept
Signed-off-by: raver119 <raver119@gmail.com>
* dynamic partition scalar CUDA
Signed-off-by: raver119 <raver119@gmail.com>
* important comment
Signed-off-by: raver119 <raver119@gmail.com>
* fix pooling2d/3d backprop helpers
Signed-off-by: Yurii <yurii@skymind.io>
* Added non-linear test with dynamic_partition.
* Improved test for dynamic_partition.
* dynamic_partition TAD concept
Signed-off-by: raver119 <raver119@gmail.com>
* - dynamic_partition TAD CUDA impl
- dynamic_partition TAD CPU fix
Signed-off-by: raver119 <raver119@gmail.com>
* - rewrite cpu code for usampling2d/3d
- write cuda code for usampling2d/3d
Signed-off-by: Yurii <yurii@skymind.io>
* dynamic_stitch CUDA vector case
Signed-off-by: raver119 <raver119@gmail.com>
* dynamic_stitch CUDA TAD case concept
Signed-off-by: raver119 <raver119@gmail.com>
* dynamic_stitch CUDA TAD case impl
Signed-off-by: raver119 <raver119@gmail.com>
* Added tests for dynamic_stitch 3D-4D cases.
* minor tests tweaks
Signed-off-by: raver119 <raver119@gmail.com>
* Fixed type check for dynamic stitch.
* min/max bp
Signed-off-by: raver119 <raver119@gmail.com>
* rewrite code for upsampling2d/3d cpu
Signed-off-by: Yurii <yurii@skymind.io>
* reduce min/max/norm_max bp
Signed-off-by: raver119 <raver119@gmail.com>
* lup implementation. Additional enhancements.
* provide code for upsamling2d/3d backprop
Signed-off-by: Yurii <yurii@skymind.io>
* weightedCrossEntropyWithLogits
Signed-off-by: raver119 <raver119@gmail.com>
* Fixed template math atomicMul for 64bit ints.
* Refactored dynamic_partition_bp op.
* inverseBroadcast fix
Signed-off-by: raver119 <raver119@gmail.com>
* DynamicPartitionBP test datatype fixed.
* - nd4j_atomicMul Windows fix
- cpu/NDArrayLambda.hpp excluded from CUDA
Signed-off-by: raver119 <raver119@gmail.com>
2019-06-27 17:37:04 +02:00
|
|
|
#if defined(__CUDABLAS__) //&& defined(BUILD_TESTS)
|
2019-06-06 14:21:15 +02:00
|
|
|
template <typename Lambda>
|
2019-12-20 20:35:39 +01:00
|
|
|
FORCEINLINE void applyLambda(Lambda func, NDArray& target);
|
2019-06-06 14:21:15 +02:00
|
|
|
|
|
|
|
template <typename Lambda>
|
2019-12-20 20:35:39 +01:00
|
|
|
FORCEINLINE void applyPairwiseLambda(const NDArray& other, Lambda func, NDArray& target);
|
2019-06-06 14:21:15 +02:00
|
|
|
|
|
|
|
template <typename Lambda>
|
2019-12-20 20:35:39 +01:00
|
|
|
FORCEINLINE void applyIndexedLambda(Lambda func, NDArray& target);
|
2019-06-06 14:21:15 +02:00
|
|
|
|
|
|
|
template <typename Lambda>
|
2019-12-20 20:35:39 +01:00
|
|
|
FORCEINLINE void applyIndexedPairwiseLambda(NDArray& other, Lambda func, NDArray& target);
|
2019-06-06 14:21:15 +02:00
|
|
|
|
|
|
|
template <typename Lambda>
|
2019-12-20 20:35:39 +01:00
|
|
|
FORCEINLINE void applyTriplewiseLambda(NDArray& second, NDArray& third, Lambda func, NDArray& target);
|
2019-06-06 14:21:15 +02:00
|
|
|
#else
|
|
|
|
|
|
|
|
/**
|
|
|
|
* apply operation "func" to an array
|
|
|
|
* func - what operation to apply
|
|
|
|
* target - where to store result
|
|
|
|
*/
|
|
|
|
template <typename T>
|
2019-12-20 20:35:39 +01:00
|
|
|
void applyLambda(const std::function<T(T)>& func, NDArray& target);
|
2019-06-06 14:21:15 +02:00
|
|
|
|
|
|
|
/**
|
|
|
|
* apply pairwise operation "func" to an array
|
|
|
|
* other - input array
|
|
|
|
* func - what pairwise operation to apply
|
|
|
|
* target - where to store result
|
|
|
|
*/
|
|
|
|
template <typename T>
|
2019-12-20 20:35:39 +01:00
|
|
|
void applyPairwiseLambda(const NDArray& other, const std::function<T(T, T)>& func, NDArray& target);
|
2019-06-06 14:21:15 +02:00
|
|
|
|
|
|
|
template <typename T>
|
2019-12-20 20:35:39 +01:00
|
|
|
void applyIndexedLambda(const std::function<T(Nd4jLong, T)>& func, NDArray& target);
|
2019-06-06 14:21:15 +02:00
|
|
|
|
|
|
|
template <typename T>
|
2019-12-20 20:35:39 +01:00
|
|
|
void applyIndexedPairwiseLambda(NDArray& other, const std::function<T(Nd4jLong, T, T)>& func, NDArray& target);
|
2019-06-06 14:21:15 +02:00
|
|
|
|
|
|
|
template <typename T>
|
2019-12-20 20:35:39 +01:00
|
|
|
void applyTriplewiseLambda(NDArray& second, NDArray& third, const std::function<T(T, T, T)>& func, NDArray& target);
|
2019-06-06 14:21:15 +02:00
|
|
|
#endif
|
|
|
|
|
|
|
|
/**
|
|
|
|
* reduces dimensions in this array relying on index operation OpName
|
|
|
|
* dimensions - vector of dimensions to reduce along
|
|
|
|
* extraArgs - extra parameters for operation
|
|
|
|
*/
|
2020-03-02 10:49:41 +01:00
|
|
|
NDArray applyIndexReduce(sd::indexreduce::Ops op, const std::vector<int>& dimensions, const ExtraArguments *extraParams = nullptr) const;
|
2019-06-06 14:21:15 +02:00
|
|
|
|
|
|
|
/**
|
|
|
|
* reduces dimensions in array relying on index operation OpName
|
|
|
|
* target - where to store result
|
|
|
|
* dimensions - vector of dimensions to reduce along
|
|
|
|
* extraArgs - extra parameters for operation
|
|
|
|
*/
|
2020-03-02 10:49:41 +01:00
|
|
|
void applyIndexReduce(sd::indexreduce::Ops op, NDArray& target, const std::vector<int>& dimensions, const ExtraArguments *extraParams = nullptr) const;
|
2019-06-06 14:21:15 +02:00
|
|
|
|
|
|
|
/**
|
|
|
|
* apply reduce3 operation OpName to this and other array, return result in new output array
|
|
|
|
* other - input array
|
|
|
|
* extraArgs - extra parameters for operation
|
|
|
|
*/
|
2020-03-02 10:49:41 +01:00
|
|
|
NDArray applyReduce3(sd::reduce3::Ops op, const NDArray& other, const ExtraArguments* extraParams = nullptr) const;
|
2019-06-06 14:21:15 +02:00
|
|
|
|
|
|
|
/**
|
|
|
|
* apply reduce3 operation OpName to this and other array, return result in new output array
|
|
|
|
* other - input array
|
|
|
|
* dimensions - vector of dimensions to reduce along (tads not axis)
|
|
|
|
* extraArgs - extra parameters for operation
|
|
|
|
*/
|
2020-03-02 10:49:41 +01:00
|
|
|
NDArray applyAllReduce3(sd::reduce3::Ops op, const NDArray& other, const std::vector<int>& dimensions, const ExtraArguments* extraParams = nullptr) const;
|
2019-06-06 14:21:15 +02:00
|
|
|
|
|
|
|
/**
|
|
|
|
* apply reduce3 (exec) operation OpName to this and other array, return result in new output array
|
|
|
|
* other - input array
|
|
|
|
* dimensions - vector of dimensions to reduce along (same as reduceAlongDimension)
|
|
|
|
* extraArgs - extra parameters for operation
|
|
|
|
*/
|
2020-03-02 10:49:41 +01:00
|
|
|
NDArray applyReduce3(sd::reduce3::Ops op, const NDArray& other, const std::vector<int>& dimensions, const ExtraArguments* extraParams = nullptr) const;
|
2019-06-06 14:21:15 +02:00
|
|
|
|
|
|
|
/**
|
|
|
|
* returns variance along given dimensions
|
|
|
|
* biasCorrected - if true bias correction will be applied
|
|
|
|
* dimensions - vector of dimensions to calculate variance along
|
|
|
|
*/
|
2020-03-02 10:49:41 +01:00
|
|
|
NDArray varianceAlongDimension(sd::variance::Ops op, const bool biasCorrected, const std::vector<int>& dimensions) const;
|
|
|
|
NDArray varianceAlongDimension(sd::variance::Ops op, const bool biasCorrected, const std::initializer_list<int>& dimensions) const;
|
2019-06-06 14:21:15 +02:00
|
|
|
|
2020-03-02 10:49:41 +01:00
|
|
|
void varianceAlongDimension(sd::variance::Ops op, NDArray& target, const bool biasCorrected, const std::vector<int>& dimensions) const;
|
|
|
|
void varianceAlongDimension(sd::variance::Ops op, NDArray& target, const bool biasCorrected, const std::initializer_list<int>& dimensions) const;
|
2019-06-06 14:21:15 +02:00
|
|
|
|
|
|
|
#endif
|
|
|
|
|
|
|
|
/**
|
|
|
|
* apply transpose operation to the copy of this array, that is this array remains unaffected
|
|
|
|
*/
|
2019-12-20 20:35:39 +01:00
|
|
|
NDArray transpose() const &;
|
|
|
|
NDArray transpose() &&;
|
2019-06-06 14:21:15 +02:00
|
|
|
|
|
|
|
/**
|
|
|
|
* perform transpose operation and store result in target, this array remains unaffected
|
|
|
|
* target - where to store result
|
|
|
|
*/
|
|
|
|
void transpose(NDArray& target) const;
|
|
|
|
|
|
|
|
/**
|
|
|
|
* apply in-place transpose operation to this array, so this array becomes transposed
|
|
|
|
*/
|
|
|
|
void transposei();
|
|
|
|
|
|
|
|
/**
|
|
|
|
* returns the number of arrays pointing on specified dimension(s)
|
|
|
|
* dimensions - array of dimensions to point on
|
|
|
|
*/
|
|
|
|
Nd4jLong tensorsAlongDimension(const std::initializer_list<int> dimensions) const ;
|
|
|
|
Nd4jLong tensorsAlongDimension(const std::vector<int>& dimensions) const ;
|
|
|
|
|
|
|
|
/**
|
|
|
|
* returns true if elements of two arrays are equal to within given epsilon value
|
|
|
|
* other - input array to compare
|
|
|
|
* eps - epsilon, this value defines the precision of elements comparison
|
|
|
|
*/
|
|
|
|
bool equalsTo(const NDArray *other, double eps = 1e-5) const;
|
|
|
|
bool equalsTo(const NDArray &other, double eps = 1e-5) const;
|
|
|
|
|
|
|
|
/**
|
|
|
|
* add given row vector to all rows of this array
|
|
|
|
* row - row vector to add
|
|
|
|
*/
|
2019-12-20 20:35:39 +01:00
|
|
|
void addiRowVector(const NDArray& row);
|
2019-06-06 14:21:15 +02:00
|
|
|
|
|
|
|
/**
|
|
|
|
* add given row vector to all rows of this array, store result in target
|
|
|
|
* row - row vector to add
|
|
|
|
* target - where to store result
|
|
|
|
*/
|
2019-12-20 20:35:39 +01:00
|
|
|
void addRowVector(const NDArray& row, NDArray& target) const;
|
2019-06-06 14:21:15 +02:00
|
|
|
|
|
|
|
/**
|
|
|
|
* subtract given row vector from all rows of this array, store result in target
|
|
|
|
* row - row vector to subtract
|
|
|
|
* target - where to store result
|
|
|
|
*/
|
2019-12-20 20:35:39 +01:00
|
|
|
void subRowVector(const NDArray& row, NDArray& target) const;
|
2019-06-06 14:21:15 +02:00
|
|
|
|
|
|
|
/**
|
|
|
|
* multiply all rows of this array on given row vector, store result in target
|
|
|
|
* row - row vector to multiply on
|
|
|
|
* target - where to store result
|
|
|
|
*/
|
2019-12-20 20:35:39 +01:00
|
|
|
void mulRowVector(const NDArray &row, NDArray& target) const;
|
2019-06-06 14:21:15 +02:00
|
|
|
|
|
|
|
/**
|
|
|
|
* divide all rows of this array on given row vector, store result in target
|
|
|
|
* row - row vector to divide on
|
|
|
|
* target - where to store result
|
|
|
|
*/
|
2019-12-20 20:35:39 +01:00
|
|
|
void divRowVector(const NDArray &row, NDArray& target) const;
|
2019-06-06 14:21:15 +02:00
|
|
|
|
|
|
|
/**
|
|
|
|
* add given column vector to all columns of this array, store result in target
|
|
|
|
* column - column vector to add
|
|
|
|
* target - where to store result
|
|
|
|
*/
|
2019-12-20 20:35:39 +01:00
|
|
|
void addColumnVector(const NDArray &column, NDArray& target) const;
|
2019-06-06 14:21:15 +02:00
|
|
|
|
|
|
|
/**
|
|
|
|
* add given column vector to all columns of this array, this array becomes affected (in-place operation)
|
|
|
|
* column - column vector to add
|
|
|
|
*/
|
2019-12-20 20:35:39 +01:00
|
|
|
void addiColumnVector(const NDArray &column);
|
2019-06-06 14:21:15 +02:00
|
|
|
|
|
|
|
/**
|
|
|
|
* multiply all columns of this array on given column vector, this array becomes affected (in-place operation)
|
|
|
|
* column - column vector to multiply on
|
|
|
|
*/
|
2019-12-20 20:35:39 +01:00
|
|
|
void muliColumnVector(const NDArray &column);
|
2019-06-06 14:21:15 +02:00
|
|
|
|
|
|
|
/**
|
|
|
|
* returns number of bytes used by _buffer & _shapeInfo
|
|
|
|
*/
|
|
|
|
FORCEINLINE Nd4jLong memoryFootprint();
|
|
|
|
|
|
|
|
/**
|
|
|
|
* these methods suited for FlatBuffers use
|
|
|
|
*/
|
|
|
|
template <typename T>
|
2020-05-12 06:47:09 +02:00
|
|
|
std::vector<T> getBufferAsVector() const;
|
2019-06-06 14:21:15 +02:00
|
|
|
std::vector<Nd4jLong> getShapeAsVector() const;
|
2020-01-20 19:32:46 +01:00
|
|
|
std::vector<int> getShapeAsVectorInt() const;
|
2020-05-12 06:47:09 +02:00
|
|
|
std::vector<Nd4jLong> getShapeInfoAsVector() const;
|
|
|
|
std::vector<int64_t> getShapeInfoAsFlatVector() const;
|
|
|
|
std::vector<int64_t> getShapeAsFlatVector() const;
|
2019-06-06 14:21:15 +02:00
|
|
|
|
|
|
|
/**
|
|
|
|
* set new order and shape in case of suitable array length (in-place operation)
|
|
|
|
* order - order to set
|
|
|
|
* shape - shape to set
|
Oleh tenzor mmul (#231)
* Libnd4j: TensorMMul backprop op #8174, raw implementation
Signed-off-by: Oleg <oleg.semeniv@gmail.com>
* Libnd4j: TensorMMul backprop op #8174 merge master and some corrections
Signed-off-by: Oleg <oleg.semeniv@gmail.com>
* Libnd4j: TensorMMul backprop op #8174 algorithm update, need testing, sync with master
* Libnd4j: TensorMMul backprop op #8174 fixed incorrect B axes calculation
Signed-off-by: Oleg <oleg.semeniv@gmail.com>
* Libnd4j: TensorMMul backprop op #8174 optimize axes identification and fix bug of indeces overlapping, added first test. need testing with different shapes
Signed-off-by: Oleg <oleg.semeniv@gmail.com>
* Libnd4j: TensorMMul backprop op #8174 some fixes and improvements need more testing
Signed-off-by: Oleg <oleg.semeniv@gmail.com>
* Libnd4j: TensorMMul backprop op #8174 fixed order of matrix multiply
Signed-off-by: Oleg <oleg.semeniv@gmail.com>
* Libnd4j: TensorMMul backprop op #8174 fixed issue of incorrect axes definition, add tests based on TF, need additional testing for case dLdC not equal 1
Signed-off-by: Oleg <oleg.semeniv@gmail.com>
* Libnd4j: TensorMMul backprop op #8174 fixed scalar case add test
Signed-off-by: Oleg <oleg.semeniv@gmail.com>
* Libnd4j: TensorMMul backprop op #8174 fixed bp algorithm, axes definition, need some mode testing with different orders combination f,c; c,f f,f and add some checks for inputs
Signed-off-by: Oleg <oleg.semeniv@gmail.com>
* Libnd4j: TensorMMul backprop op #8174 some checks and corrections added tests, exists the problem with different input orders support A-f B-c and A-f B-f
Signed-off-by: Oleg <oleg.semeniv@gmail.com>
* Libnd4j: TensorMMul backprop op #8174 sync master
Signed-off-by: Oleg <oleg.semeniv@gmail.com>
* - correct bug in MmulHelper::tensorDot(a, b, c, axes_a, axes_b,permutForC)
Signed-off-by: Yurii <iuriish@yahoo.com>
* Libnd4j: TensorMMul backprop op #8174 code clean up and refactoring
Signed-off-by: Oleg <oleg.semeniv@gmail.com>
* - add check for linspase ordered permutations in ShapeUtils::evalShapeForTensorDot
Signed-off-by: Yurii <iuriish@yahoo.com>
* - provide additional code in shape::reshape stuff in order to reduce amount of allocation/copy operations during reshaping procedure
Signed-off-by: Yurii <iuriish@yahoo.com>
* - further work on problem of wrong shape evaluation during permute/reshape procedures
Signed-off-by: Yurii <iuriish@yahoo.com>
* - still looking for bug reason in reshape/permute stuff
Signed-off-by: Yurii <iuriish@yahoo.com>
* - correct bug in transform cuda native ops
Signed-off-by: Yurii <iuriish@yahoo.com>
* - correct bug in NDArray::assign
Signed-off-by: Yurii <iuriish@yahoo.com>
* - remove old shape::reshape stuff
Signed-off-by: Yurii <iuriish@yahoo.com>
* - add possibility to disable copy of old buffer to new buffer during reshape operation in NDArray class
Signed-off-by: Yurii <iuriish@yahoo.com>
* - correct bug in tensorDot which had to do with wrong pointers assigments
Signed-off-by: Yurii <iuriish@yahoo.com>
Co-authored-by: Oleh <oleg.semeniv@gmail.com>
2020-02-13 18:33:54 +01:00
|
|
|
* copyToNewBuff - if true then old buffer will be copied to new buffer if last one will be allocated after reshaping
|
2019-06-06 14:21:15 +02:00
|
|
|
* if there was permute applied before or there are weird strides, then new buffer is allocated for array
|
|
|
|
*/
|
Oleh tenzor mmul (#231)
* Libnd4j: TensorMMul backprop op #8174, raw implementation
Signed-off-by: Oleg <oleg.semeniv@gmail.com>
* Libnd4j: TensorMMul backprop op #8174 merge master and some corrections
Signed-off-by: Oleg <oleg.semeniv@gmail.com>
* Libnd4j: TensorMMul backprop op #8174 algorithm update, need testing, sync with master
* Libnd4j: TensorMMul backprop op #8174 fixed incorrect B axes calculation
Signed-off-by: Oleg <oleg.semeniv@gmail.com>
* Libnd4j: TensorMMul backprop op #8174 optimize axes identification and fix bug of indeces overlapping, added first test. need testing with different shapes
Signed-off-by: Oleg <oleg.semeniv@gmail.com>
* Libnd4j: TensorMMul backprop op #8174 some fixes and improvements need more testing
Signed-off-by: Oleg <oleg.semeniv@gmail.com>
* Libnd4j: TensorMMul backprop op #8174 fixed order of matrix multiply
Signed-off-by: Oleg <oleg.semeniv@gmail.com>
* Libnd4j: TensorMMul backprop op #8174 fixed issue of incorrect axes definition, add tests based on TF, need additional testing for case dLdC not equal 1
Signed-off-by: Oleg <oleg.semeniv@gmail.com>
* Libnd4j: TensorMMul backprop op #8174 fixed scalar case add test
Signed-off-by: Oleg <oleg.semeniv@gmail.com>
* Libnd4j: TensorMMul backprop op #8174 fixed bp algorithm, axes definition, need some mode testing with different orders combination f,c; c,f f,f and add some checks for inputs
Signed-off-by: Oleg <oleg.semeniv@gmail.com>
* Libnd4j: TensorMMul backprop op #8174 some checks and corrections added tests, exists the problem with different input orders support A-f B-c and A-f B-f
Signed-off-by: Oleg <oleg.semeniv@gmail.com>
* Libnd4j: TensorMMul backprop op #8174 sync master
Signed-off-by: Oleg <oleg.semeniv@gmail.com>
* - correct bug in MmulHelper::tensorDot(a, b, c, axes_a, axes_b,permutForC)
Signed-off-by: Yurii <iuriish@yahoo.com>
* Libnd4j: TensorMMul backprop op #8174 code clean up and refactoring
Signed-off-by: Oleg <oleg.semeniv@gmail.com>
* - add check for linspase ordered permutations in ShapeUtils::evalShapeForTensorDot
Signed-off-by: Yurii <iuriish@yahoo.com>
* - provide additional code in shape::reshape stuff in order to reduce amount of allocation/copy operations during reshaping procedure
Signed-off-by: Yurii <iuriish@yahoo.com>
* - further work on problem of wrong shape evaluation during permute/reshape procedures
Signed-off-by: Yurii <iuriish@yahoo.com>
* - still looking for bug reason in reshape/permute stuff
Signed-off-by: Yurii <iuriish@yahoo.com>
* - correct bug in transform cuda native ops
Signed-off-by: Yurii <iuriish@yahoo.com>
* - correct bug in NDArray::assign
Signed-off-by: Yurii <iuriish@yahoo.com>
* - remove old shape::reshape stuff
Signed-off-by: Yurii <iuriish@yahoo.com>
* - add possibility to disable copy of old buffer to new buffer during reshape operation in NDArray class
Signed-off-by: Yurii <iuriish@yahoo.com>
* - correct bug in tensorDot which had to do with wrong pointers assigments
Signed-off-by: Yurii <iuriish@yahoo.com>
Co-authored-by: Oleh <oleg.semeniv@gmail.com>
2020-02-13 18:33:54 +01:00
|
|
|
bool reshapei(const char order, const std::initializer_list<Nd4jLong>& shape, const bool copyToNewBuff = true);
|
|
|
|
bool reshapei(const char order, const std::vector<Nd4jLong>& shape, const bool copyToNewBuff = true);
|
2019-06-06 14:21:15 +02:00
|
|
|
|
Oleh tenzor mmul (#231)
* Libnd4j: TensorMMul backprop op #8174, raw implementation
Signed-off-by: Oleg <oleg.semeniv@gmail.com>
* Libnd4j: TensorMMul backprop op #8174 merge master and some corrections
Signed-off-by: Oleg <oleg.semeniv@gmail.com>
* Libnd4j: TensorMMul backprop op #8174 algorithm update, need testing, sync with master
* Libnd4j: TensorMMul backprop op #8174 fixed incorrect B axes calculation
Signed-off-by: Oleg <oleg.semeniv@gmail.com>
* Libnd4j: TensorMMul backprop op #8174 optimize axes identification and fix bug of indeces overlapping, added first test. need testing with different shapes
Signed-off-by: Oleg <oleg.semeniv@gmail.com>
* Libnd4j: TensorMMul backprop op #8174 some fixes and improvements need more testing
Signed-off-by: Oleg <oleg.semeniv@gmail.com>
* Libnd4j: TensorMMul backprop op #8174 fixed order of matrix multiply
Signed-off-by: Oleg <oleg.semeniv@gmail.com>
* Libnd4j: TensorMMul backprop op #8174 fixed issue of incorrect axes definition, add tests based on TF, need additional testing for case dLdC not equal 1
Signed-off-by: Oleg <oleg.semeniv@gmail.com>
* Libnd4j: TensorMMul backprop op #8174 fixed scalar case add test
Signed-off-by: Oleg <oleg.semeniv@gmail.com>
* Libnd4j: TensorMMul backprop op #8174 fixed bp algorithm, axes definition, need some mode testing with different orders combination f,c; c,f f,f and add some checks for inputs
Signed-off-by: Oleg <oleg.semeniv@gmail.com>
* Libnd4j: TensorMMul backprop op #8174 some checks and corrections added tests, exists the problem with different input orders support A-f B-c and A-f B-f
Signed-off-by: Oleg <oleg.semeniv@gmail.com>
* Libnd4j: TensorMMul backprop op #8174 sync master
Signed-off-by: Oleg <oleg.semeniv@gmail.com>
* - correct bug in MmulHelper::tensorDot(a, b, c, axes_a, axes_b,permutForC)
Signed-off-by: Yurii <iuriish@yahoo.com>
* Libnd4j: TensorMMul backprop op #8174 code clean up and refactoring
Signed-off-by: Oleg <oleg.semeniv@gmail.com>
* - add check for linspase ordered permutations in ShapeUtils::evalShapeForTensorDot
Signed-off-by: Yurii <iuriish@yahoo.com>
* - provide additional code in shape::reshape stuff in order to reduce amount of allocation/copy operations during reshaping procedure
Signed-off-by: Yurii <iuriish@yahoo.com>
* - further work on problem of wrong shape evaluation during permute/reshape procedures
Signed-off-by: Yurii <iuriish@yahoo.com>
* - still looking for bug reason in reshape/permute stuff
Signed-off-by: Yurii <iuriish@yahoo.com>
* - correct bug in transform cuda native ops
Signed-off-by: Yurii <iuriish@yahoo.com>
* - correct bug in NDArray::assign
Signed-off-by: Yurii <iuriish@yahoo.com>
* - remove old shape::reshape stuff
Signed-off-by: Yurii <iuriish@yahoo.com>
* - add possibility to disable copy of old buffer to new buffer during reshape operation in NDArray class
Signed-off-by: Yurii <iuriish@yahoo.com>
* - correct bug in tensorDot which had to do with wrong pointers assigments
Signed-off-by: Yurii <iuriish@yahoo.com>
Co-authored-by: Oleh <oleg.semeniv@gmail.com>
2020-02-13 18:33:54 +01:00
|
|
|
bool reshapei(const std::initializer_list<Nd4jLong>& shape, const bool copyToNewBuff = true);
|
|
|
|
bool reshapei(const std::vector<Nd4jLong>& shape, const bool copyToNewBuff = true);
|
2019-06-06 14:21:15 +02:00
|
|
|
|
|
|
|
/**
|
|
|
|
* creates new array with corresponding order and shape, new array will point on _buffer of this array
|
|
|
|
* order - order to set
|
|
|
|
* shape - shape to set
|
|
|
|
*
|
|
|
|
* if permute have been applied before or there are weird strides, then new buffer is allocated for new array
|
|
|
|
*/
|
Oleh tenzor mmul (#231)
* Libnd4j: TensorMMul backprop op #8174, raw implementation
Signed-off-by: Oleg <oleg.semeniv@gmail.com>
* Libnd4j: TensorMMul backprop op #8174 merge master and some corrections
Signed-off-by: Oleg <oleg.semeniv@gmail.com>
* Libnd4j: TensorMMul backprop op #8174 algorithm update, need testing, sync with master
* Libnd4j: TensorMMul backprop op #8174 fixed incorrect B axes calculation
Signed-off-by: Oleg <oleg.semeniv@gmail.com>
* Libnd4j: TensorMMul backprop op #8174 optimize axes identification and fix bug of indeces overlapping, added first test. need testing with different shapes
Signed-off-by: Oleg <oleg.semeniv@gmail.com>
* Libnd4j: TensorMMul backprop op #8174 some fixes and improvements need more testing
Signed-off-by: Oleg <oleg.semeniv@gmail.com>
* Libnd4j: TensorMMul backprop op #8174 fixed order of matrix multiply
Signed-off-by: Oleg <oleg.semeniv@gmail.com>
* Libnd4j: TensorMMul backprop op #8174 fixed issue of incorrect axes definition, add tests based on TF, need additional testing for case dLdC not equal 1
Signed-off-by: Oleg <oleg.semeniv@gmail.com>
* Libnd4j: TensorMMul backprop op #8174 fixed scalar case add test
Signed-off-by: Oleg <oleg.semeniv@gmail.com>
* Libnd4j: TensorMMul backprop op #8174 fixed bp algorithm, axes definition, need some mode testing with different orders combination f,c; c,f f,f and add some checks for inputs
Signed-off-by: Oleg <oleg.semeniv@gmail.com>
* Libnd4j: TensorMMul backprop op #8174 some checks and corrections added tests, exists the problem with different input orders support A-f B-c and A-f B-f
Signed-off-by: Oleg <oleg.semeniv@gmail.com>
* Libnd4j: TensorMMul backprop op #8174 sync master
Signed-off-by: Oleg <oleg.semeniv@gmail.com>
* - correct bug in MmulHelper::tensorDot(a, b, c, axes_a, axes_b,permutForC)
Signed-off-by: Yurii <iuriish@yahoo.com>
* Libnd4j: TensorMMul backprop op #8174 code clean up and refactoring
Signed-off-by: Oleg <oleg.semeniv@gmail.com>
* - add check for linspase ordered permutations in ShapeUtils::evalShapeForTensorDot
Signed-off-by: Yurii <iuriish@yahoo.com>
* - provide additional code in shape::reshape stuff in order to reduce amount of allocation/copy operations during reshaping procedure
Signed-off-by: Yurii <iuriish@yahoo.com>
* - further work on problem of wrong shape evaluation during permute/reshape procedures
Signed-off-by: Yurii <iuriish@yahoo.com>
* - still looking for bug reason in reshape/permute stuff
Signed-off-by: Yurii <iuriish@yahoo.com>
* - correct bug in transform cuda native ops
Signed-off-by: Yurii <iuriish@yahoo.com>
* - correct bug in NDArray::assign
Signed-off-by: Yurii <iuriish@yahoo.com>
* - remove old shape::reshape stuff
Signed-off-by: Yurii <iuriish@yahoo.com>
* - add possibility to disable copy of old buffer to new buffer during reshape operation in NDArray class
Signed-off-by: Yurii <iuriish@yahoo.com>
* - correct bug in tensorDot which had to do with wrong pointers assigments
Signed-off-by: Yurii <iuriish@yahoo.com>
Co-authored-by: Oleh <oleg.semeniv@gmail.com>
2020-02-13 18:33:54 +01:00
|
|
|
NDArray reshape(const char order, const std::vector<Nd4jLong>& shape, const bool copyToNewBuff = true) const &;
|
|
|
|
NDArray reshape(const char order, const std::vector<Nd4jLong>& shape, const bool copyToNewBuff = true) &&;
|
2019-06-06 14:21:15 +02:00
|
|
|
|
|
|
|
/**
|
|
|
|
* calculate strides and set given order
|
|
|
|
* order - order to set
|
|
|
|
*/
|
|
|
|
void updateStrides(const char order);
|
|
|
|
|
|
|
|
/**
|
|
|
|
* change an array by repeating it the number of times given by reps (in-place operation)
|
|
|
|
* repeats - contains numbers of repetitions
|
|
|
|
*/
|
|
|
|
void tilei(const std::vector<Nd4jLong>& repeats);
|
|
|
|
|
|
|
|
/**
|
|
|
|
* returns new array which is created by repeating of this array the number of times given by reps
|
|
|
|
* repeats - contains numbers of repetitions
|
|
|
|
*/
|
|
|
|
NDArray tile(const std::vector<Nd4jLong>& repeats) const;
|
|
|
|
|
|
|
|
/**
|
|
|
|
* change an array by repeating it the number of times given by reps (in-place operation)
|
|
|
|
* repeats - contains numbers of repetitions
|
|
|
|
* target - where to store result
|
|
|
|
*/
|
|
|
|
void tile(const std::vector<Nd4jLong>& repeats, NDArray& target) const;
|
|
|
|
|
|
|
|
/**
|
|
|
|
* change an array by repeating it the number of times to acquire the new shape which is the same as target shape
|
|
|
|
* target - where to store result
|
|
|
|
*/
|
|
|
|
void tile(NDArray& target) const;
|
|
|
|
|
|
|
|
/**
|
|
|
|
* check whether array is identity matrix
|
|
|
|
*/
|
|
|
|
bool isIdentityMatrix();
|
|
|
|
|
|
|
|
/**
|
|
|
|
* check whether array is unitary matrix
|
|
|
|
*/
|
|
|
|
bool isUnitary();
|
|
|
|
|
|
|
|
/**
|
|
|
|
* operator returns subarray with buffer pointing at this->_buffer with offset defined by given intervals
|
|
|
|
* idx - intervals of indexes which define the subarrays to point on, idx has form {dim0Start,dim0End, dim1Start,dim1End, ....} and length (2 * this->rankOf())
|
|
|
|
* when (dimStart == dimEnd) then whole range will be used for current dimension
|
|
|
|
* keepUnitiesInShape - if false then eliminate unities from resulting array shape, for example {1,a,1,b} -> {a,b}
|
|
|
|
* isStrided - if true then idx has length (3 * this->rankOf()) and contains additional stride numbers which correspond to stride between dimStart and dimEnd,
|
|
|
|
* so structure of idx is like {dim0Start,dim0End,dim0Stride, dim1Start,dim1End,dim1Stride, ....}
|
|
|
|
*/
|
|
|
|
NDArray operator()(const std::vector<Nd4jLong>& idx, const bool keepUnitiesInShape = false, const bool isStrided = false) const;
|
|
|
|
|
|
|
|
/**
|
|
|
|
* evaluates subarray with buffer pointing at this->_buffer and offset defined by given sequential index subArrIdx and dimensions in dimsToExclude
|
|
|
|
* subArrIdx - index of current sub-array
|
|
|
|
* dimsToExclude - MUST BE SORTED, dimensions to evaluate sub-array along, i.e. when shape is [2,3,4,5] and dimsToExclude={0,2}, then there will be 8 sub-arrays with shape [3,5], and subArrIdx must be in range [0,7]
|
|
|
|
* if dimsToExclude is empty then idxRanges containing all zeros (means whole array) will be returned.
|
|
|
|
* keepUnitiesInShape - if false then eliminate unities from resulting array shape, for example {1,a,1,b} -> {a,b}
|
|
|
|
*/
|
|
|
|
NDArray operator()(const Nd4jLong subArrIdx, const std::vector<int>& dimsToExclude, bool keepUnitiesInShape = false) const;
|
|
|
|
|
|
|
|
/**
|
|
|
|
* processes whole set of sub-arrays
|
|
|
|
* evaluates shapeInfo of sub-arrays (all sub-arrays have the same shapeInfo) and their buffer offsets (each sub-array has its own unique offset from original this-buffer)
|
|
|
|
* dimsToExclude - MUST BE SORTED, dimensions to evaluate sub-array along, i.e. when shape is [2,3,4,5] and dimsToExclude={0,2}, then there will be 8 sub-arrays with shape [3,5]
|
|
|
|
* if dimsToExclude.size() = array rank it means sub-array is whole array and copy of original_shapeInfo will be returned and one zero offset
|
|
|
|
* subArrShapeInfo - output argument, contains shapeInfo common for all sub-arrays
|
|
|
|
* subArrOffsets - output argument, contains successive sub-arrays offsets from original this-buffer
|
|
|
|
* keepUnitiesInShape - if false then eliminate unities from sub-array shapeInfo, for example {1,a,1,b} -> {a,b}
|
|
|
|
*/
|
|
|
|
void getSubArrShapeAndOffsets(const std::vector<int>& dimsToExclude, Nd4jLong* &subArrShapeInfo, Nd4jLong* &subArrOffsets, bool keepUnitiesInShape = false) const;
|
|
|
|
|
|
|
|
/**
|
|
|
|
* addition unary operator array += other
|
|
|
|
* other - input array to add
|
|
|
|
*/
|
|
|
|
void operator+=(const NDArray& other);
|
|
|
|
|
|
|
|
/**
|
|
|
|
* subtraction unary operator array -= other
|
|
|
|
* other - input array to add
|
|
|
|
*/
|
|
|
|
void operator-=(const NDArray& other);
|
|
|
|
|
|
|
|
template <typename T>
|
|
|
|
void operator+=(const T other);
|
|
|
|
|
|
|
|
template <typename T>
|
|
|
|
void operator-=(const T other);
|
|
|
|
|
|
|
|
/**
|
|
|
|
* negative operator, it changes sign of all array elements on opposite
|
|
|
|
*/
|
2019-12-20 20:35:39 +01:00
|
|
|
NDArray operator-() const &;
|
|
|
|
NDArray operator-() &&;
|
2019-06-06 14:21:15 +02:00
|
|
|
|
|
|
|
/**
|
|
|
|
* pairwise multiplication unary operator array *= other
|
|
|
|
* other - input array to multiply on
|
|
|
|
*/
|
|
|
|
void operator*=(const NDArray& other);
|
|
|
|
|
|
|
|
/**
|
|
|
|
* multiplication unary operator array *= scalar
|
|
|
|
* scalar - input scalar to multiply on
|
|
|
|
*/
|
|
|
|
template <typename T>
|
|
|
|
void operator*=(const T scalar);
|
|
|
|
|
|
|
|
/**
|
|
|
|
* pairwise division unary operator: array /= other
|
|
|
|
* other - input array to divide on
|
|
|
|
*/
|
|
|
|
void operator/=(const NDArray& other);
|
|
|
|
|
|
|
|
/**
|
|
|
|
* division unary operator: array /= scalar
|
|
|
|
* scalar - input scalar to divide on
|
|
|
|
*/
|
|
|
|
template <typename T>
|
|
|
|
void operator/=(const T scalar);
|
|
|
|
|
|
|
|
/**
|
|
|
|
* friend function which implements mathematical multiplication of two arrays
|
|
|
|
* left - input array
|
|
|
|
* right - input array
|
|
|
|
*/
|
|
|
|
friend NDArray mmul(const NDArray& left, const NDArray& right);
|
|
|
|
|
|
|
|
/**
|
|
|
|
* return vector containing _buffer as flat binary array
|
|
|
|
*/
|
|
|
|
std::vector<int8_t> asByteVector();
|
|
|
|
|
|
|
|
/**
|
|
|
|
* makes array to be identity matrix (not necessarily square), that is set all diagonal elements = 1, rest = 0
|
|
|
|
*/
|
|
|
|
void setIdentity();
|
|
|
|
|
|
|
|
/**
|
|
|
|
* swaps the contents of tow arrays,
|
|
|
|
* PLEASE NOTE: method doesn't take into account the shapes of arrays, shapes may be different except one condition: arrays lengths must be the same
|
|
|
|
*/
|
|
|
|
void swapUnsafe(NDArray& other);
|
|
|
|
|
|
|
|
/**
|
|
|
|
* return vector with buffer which points on corresponding diagonal elements of array
|
|
|
|
* type - means of vector to be returned: column ('c') or row ('r')
|
|
|
|
*/
|
2019-12-20 20:35:39 +01:00
|
|
|
NDArray diagonal(const char type ) const;
|
2019-06-06 14:21:15 +02:00
|
|
|
|
|
|
|
/**
|
|
|
|
* fill target matrix with given value in one or two directions from main diagonal:
|
2020-05-14 17:06:13 +02:00
|
|
|
* - down from main diagonal starting at subdiagonal number "lower" if direction = 'l' (down) or 'b' (both)
|
2019-06-06 14:21:15 +02:00
|
|
|
* - up from main diagonal starting at superdiagonal number "upper"if direction = 'u' (up) or 'b' (both)
|
|
|
|
* direction - in what direction to fill matrix. There are 3 possible directions:
|
2019-07-12 10:51:51 +02:00
|
|
|
* 'u' - fill up, mathematically this corresponds to lower triangular matrix, subdiagonal "lower" unaffected
|
|
|
|
* 'l' - fill down, mathematically this corresponds to upper triangular matrix, superdiagonal "upper" remains unaffected
|
|
|
|
* 'b' - fill in both directions, both "lower" and "upper" are taken into account
|
2019-06-06 14:21:15 +02:00
|
|
|
* rest of target elements are equal to this array elements
|
|
|
|
* target and this array should have same shapes, except when this_rank = 1 (in that case should be target_rank = 2)
|
|
|
|
*/
|
|
|
|
template <typename T>
|
2019-12-20 20:35:39 +01:00
|
|
|
void fillAsTriangular(const float value, int lower, int upper, NDArray& target, const char direction = 'b');
|
2019-06-06 14:21:15 +02:00
|
|
|
|
|
|
|
/**
|
|
|
|
* change an array by repeating it the number of times in order to acquire new shape equal to the input shape
|
|
|
|
*
|
|
|
|
* shape - contains new shape to broadcast array to
|
|
|
|
* target - optional argument, if target != nullptr the resulting array will be placed in target, in opposite case tile operation is done in place
|
|
|
|
*/
|
|
|
|
NDArray tileToShape(const Nd4jLong* shapeInfo);
|
2019-12-20 20:35:39 +01:00
|
|
|
void tileToShape(const std::vector<Nd4jLong>& shape, NDArray& target);
|
2019-06-06 14:21:15 +02:00
|
|
|
#ifndef __JAVACPP_HACK__
|
2019-12-20 20:35:39 +01:00
|
|
|
void tileToShape(const std::initializer_list<Nd4jLong>& shape, NDArray& target);
|
2019-06-06 14:21:15 +02:00
|
|
|
#endif
|
|
|
|
|
|
|
|
template <typename N>
|
2019-12-20 20:35:39 +01:00
|
|
|
NDArray asT() const;
|
2019-06-06 14:21:15 +02:00
|
|
|
|
Oleh convert (#200)
* StringUtils for utf convertor raw implementation of all possible combinations, need to be add counter of bytes per symbol for any type and add api to call convertors and store data
Signed-off-by: Oleg <oleg.semeniv@gmail.com>
* StringUtils for utf convertor more corrections to support convertors
Signed-off-by: Oleg <oleg.semeniv@gmail.com>
* StringUtils for utf convertor some corrections and bug fixes, need review to discuss how to add multi-threading
Signed-off-by: Oleg <oleg.semeniv@gmail.com>
* StringUtils for utf convertor #8613 some corrections to move to multi-threading, add one test need discussion data inputs/outputs array presentation, need discussion the way of multi-threading
* StringUtils for utf convertor #8613 tests added some corrections to optimize build
Signed-off-by: Oleg <oleg.semeniv@gmail.com>
* StringUtils for utf convertor #8613 some corrections and code clean up
Signed-off-by: Oleg <oleg.semeniv@gmail.com>
* StringUtils for utf convertor #8613 code clean up and optimize usage, need update ndarray factory before replace std usage
Signed-off-by: Oleg <oleg.semeniv@gmail.com>
* StringUtils for utf convertor #8613 some staff to integrate converters into NDArrayFactory, update tests and add some functionality
Signed-off-by: Oleg <oleg.semeniv@gmail.com>
* StringUtils for utf convertor #8613 minor corrections and bug fix before discussion
* StringUtils for utf convertor #8613 some fixes and tets
* StringUtils for utf convertor #8613 some more staff to support different unicode
Signed-off-by: Oleg <oleg.semeniv@gmail.com>
* StringUtils for utf convertor #8613 fix linking bug
* StringUtils for utf convertor #8613 corrected several tests as defaults for string ndarray changed
* StringUtils for utf convertor #8613 replace some incorrect implementation, revert some test changes, need sync before testing
* StringUtils for utf convertor #8613 fixed several thing that were badly implemented yesterday, need optimization, testing (before testing have to be add support of u32 and u16 buffer visualization)
* StringUtils for utf convertor #8613 fixed to support u16 and u32, and convertor in ndarray, fix buffer print, etc
Signed-off-by: Oleg <oleg.semeniv@gmail.com>
* StringUtils for utf convertor #8613 merge master and sync with server
Signed-off-by: Oleg <oleg.semeniv@gmail.com>
* StringUtils for utf convertor #8613 some correction for string cast, need print check only asci support
Signed-off-by: Oleg <oleg.semeniv@gmail.com>
* StringUtils for utf convertor #8613 merge master, remove copies and add cast, need test, refactoring according review and clean up
* StringUtils for utf convertor #8613 fixed cast and copy issues
Signed-off-by: Oleg <oleg.semeniv@gmail.com>
* StringUtils for utf convertor #8613 fixed cuda and update tests
* StringUtils for utf convertor #8613 integration into NdArray, fix several tests for build pass, refactoring, etc
* - avoid ambiguity of NDArray ctrs overloading in some tests
Signed-off-by: Yurii <iuriish@yahoo.com>
* StringUtils for utf convertor #8613 NDArray string constructors added, updated NDArrayFactory, refactoring unicode and tests, etc
Signed-off-by: Oleg <oleg.semeniv@gmail.com>
* StringUtils for utf convertor #8613 fixed cuda build and test, refactoring and void* added to some functions
Signed-off-by: Oleg <oleg.semeniv@gmail.com>
* StringUtils for utf convertor #8613 void* integration, removed copy operation, refactoring, added tests for NDArray string constructors, etc
Signed-off-by: Oleg <oleg.semeniv@gmail.com>
* StringUtils for utf convertor #8613 several more fixes, improvements and updates
Signed-off-by: Oleg <oleg.semeniv@gmail.com>
* StringUtils for utf convertor #8613 master merge, code clean up and optimization before review
Signed-off-by: Oleg <oleg.semeniv@gmail.com>
* StringUtils for utf convertor #8613 minor fixes string element size define
Signed-off-by: Oleg <oleg.semeniv@gmail.com>
* StringUtils for utf convertor #8613 revert last changes as mistake
Signed-off-by: Oleg <oleg.semeniv@gmail.com>
* StringUtils for utf convertor #8613 fixed NDArray constructor build problem, remove order from string factory, fixed order use for factory via project, added catch of incorrect sync in cast of arrays to data types, fixed e method for strings, etc
Signed-off-by: Oleg <oleg.semeniv@gmail.com>
* StringUtils for utf convertor #8613 added javacpp hack, added multi-threading, minor corrections in license agreement
Signed-off-by: Oleg <oleg.semeniv@gmail.com>
* StringUtils for utf convertor #8613 windows builds fix, as "sting" is not treated as utf8
Signed-off-by: Oleg <oleg.semeniv@gmail.com>
Co-authored-by: Yurii Shyrma <iuriish@yahoo.com>
2020-01-31 14:30:49 +01:00
|
|
|
template <typename S>
|
|
|
|
NDArray asS() const;
|
|
|
|
|
2019-12-20 20:35:39 +01:00
|
|
|
NDArray asT(DataType dtype) const;
|
2019-06-06 14:21:15 +02:00
|
|
|
|
|
|
|
|
|
|
|
void linspace(const double start);
|
|
|
|
|
|
|
|
void linspace(const double start, const double step);
|
|
|
|
|
|
|
|
/**
|
|
|
|
* calculates the trace of an array, that is sum of elements on main diagonal = sum array[i, i, i, ...]
|
|
|
|
*/
|
|
|
|
double getTrace() const;
|
|
|
|
|
2019-12-20 20:35:39 +01:00
|
|
|
ResultSet multipleTensorsAlongDimension(const std::vector<int>& indices, const std::vector<int>& dimensions) const;
|
2019-06-06 14:21:15 +02:00
|
|
|
|
2019-12-20 20:35:39 +01:00
|
|
|
ResultSet allTensorsAlongDimension(const std::initializer_list<int>& dimensions) const;
|
2019-06-06 14:21:15 +02:00
|
|
|
|
2019-12-20 20:35:39 +01:00
|
|
|
ResultSet allTensorsAlongDimension(const std::vector<int>& dimensions) const;
|
2019-06-06 14:21:15 +02:00
|
|
|
|
2019-12-20 20:35:39 +01:00
|
|
|
ResultSet allExamples()const ;
|
2019-06-06 14:21:15 +02:00
|
|
|
|
|
|
|
/**
|
|
|
|
* set _shapeInfo
|
|
|
|
*/
|
|
|
|
void setShapeInfo(const Nd4jLong *shapeInfo);
|
2020-03-02 10:49:41 +01:00
|
|
|
void setShapeInfo(const Nd4jLong *shapeInfo, const sd::DataType dtype);
|
2019-06-06 14:21:15 +02:00
|
|
|
void setShapeInfo(const ShapeDescriptor& descriptor);
|
2020-06-06 14:26:55 +02:00
|
|
|
void setShapeInfo(const ConstantShapeBuffer& shapeBuffer);
|
2019-06-06 14:21:15 +02:00
|
|
|
|
|
|
|
/**
|
|
|
|
* returns absolute offset which corresponds to given sequential index
|
|
|
|
*/
|
|
|
|
Nd4jLong getOffset(const Nd4jLong i) const;
|
|
|
|
|
|
|
|
/**
|
|
|
|
* returns reference on array element with given index
|
|
|
|
*/
|
|
|
|
template<typename T>
|
2020-05-14 17:06:13 +02:00
|
|
|
FORCEINLINE T& r(const Nd4jLong index);
|
2019-06-06 14:21:15 +02:00
|
|
|
template<typename T>
|
2020-05-14 17:06:13 +02:00
|
|
|
FORCEINLINE T& r(const Nd4jLong i, const Nd4jLong j);
|
2019-08-07 14:29:17 +02:00
|
|
|
template<typename T>
|
2020-05-14 17:06:13 +02:00
|
|
|
FORCEINLINE T& r(const Nd4jLong i, const Nd4jLong j, const Nd4jLong k);
|
2019-12-06 09:10:44 +01:00
|
|
|
template<typename T>
|
2020-05-14 17:06:13 +02:00
|
|
|
FORCEINLINE T& r(const Nd4jLong i, const Nd4jLong j, const Nd4jLong k, const Nd4jLong w);
|
2019-12-06 09:10:44 +01:00
|
|
|
|
2019-06-06 14:21:15 +02:00
|
|
|
|
|
|
|
/**
|
|
|
|
* returns array element with given index
|
|
|
|
* i - element index in array
|
|
|
|
*/
|
|
|
|
template<typename T>
|
|
|
|
FORCEINLINE T t(const Nd4jLong i) const;
|
|
|
|
template<typename T>
|
|
|
|
FORCEINLINE T t(const Nd4jLong i, const Nd4jLong j) const;
|
2019-08-07 14:29:17 +02:00
|
|
|
template<typename T>
|
|
|
|
FORCEINLINE T t(const Nd4jLong i, const Nd4jLong j, const Nd4jLong k) const;
|
2019-12-06 09:10:44 +01:00
|
|
|
template<typename T>
|
|
|
|
FORCEINLINE T t(const Nd4jLong i, const Nd4jLong j, const Nd4jLong k, const Nd4jLong w) const;
|
2019-06-06 14:21:15 +02:00
|
|
|
|
|
|
|
|
|
|
|
/**
|
|
|
|
* default destructor
|
|
|
|
*/
|
|
|
|
~NDArray() noexcept = default;
|
|
|
|
|
|
|
|
/**
|
|
|
|
* set _shapeInfo
|
|
|
|
*/
|
|
|
|
FORCEINLINE void setShapeInfo(Nd4jLong *shapeInfo);
|
2020-03-02 10:49:41 +01:00
|
|
|
FORCEINLINE void setShapeInfo(Nd4jLong *shapeInfo, const sd::DataType dtype);
|
2019-06-06 14:21:15 +02:00
|
|
|
|
|
|
|
/**
|
|
|
|
* returns the value of "dim" dimension
|
|
|
|
*/
|
|
|
|
Nd4jLong sizeAt(const int dim) const;
|
|
|
|
|
2019-11-19 14:39:36 +01:00
|
|
|
/**
|
|
|
|
* returns stride of "dim" dimension
|
|
|
|
*/
|
|
|
|
Nd4jLong strideAt(const int dim) const;
|
|
|
|
|
2019-06-06 14:21:15 +02:00
|
|
|
/**
|
|
|
|
* returns order of array
|
|
|
|
*/
|
|
|
|
FORCEINLINE char ordering() const;
|
|
|
|
|
|
|
|
/**
|
|
|
|
* return _isView
|
|
|
|
*/
|
|
|
|
FORCEINLINE bool isView() const;
|
|
|
|
|
|
|
|
/**
|
|
|
|
* returns shape portion of shapeInfo
|
|
|
|
*/
|
|
|
|
FORCEINLINE Nd4jLong* shapeOf() const;
|
|
|
|
|
|
|
|
/**
|
|
|
|
* returns strides portion of shapeInfo
|
|
|
|
*/
|
|
|
|
FORCEINLINE Nd4jLong* stridesOf() const;
|
|
|
|
|
|
|
|
/**
|
|
|
|
* returns rank of array
|
|
|
|
*/
|
|
|
|
FORCEINLINE int rankOf() const;
|
|
|
|
|
|
|
|
/**
|
|
|
|
* returns length of array
|
|
|
|
*/
|
|
|
|
FORCEINLINE Nd4jLong lengthOf() const;
|
|
|
|
|
|
|
|
/**
|
|
|
|
* returns number of rows in array
|
|
|
|
*/
|
|
|
|
FORCEINLINE Nd4jLong rows() const;
|
|
|
|
|
|
|
|
/**
|
|
|
|
* returns number of columns in array
|
|
|
|
*/
|
|
|
|
FORCEINLINE Nd4jLong columns() const;
|
|
|
|
|
|
|
|
/**
|
|
|
|
* returns size of array elements type
|
|
|
|
*/
|
|
|
|
FORCEINLINE size_t sizeOfT() const;
|
|
|
|
|
|
|
|
/**
|
|
|
|
* returns element-wise-stride
|
|
|
|
*/
|
|
|
|
FORCEINLINE Nd4jLong ews() const;
|
|
|
|
|
|
|
|
// returns true if arrays have same shape
|
|
|
|
FORCEINLINE bool isSameShape(const NDArray *other) const;
|
|
|
|
FORCEINLINE bool isSameShape(const NDArray &other) const;
|
|
|
|
FORCEINLINE bool isSameShape(const std::initializer_list<Nd4jLong>& shape) const;
|
|
|
|
FORCEINLINE bool isSameShape(const std::vector<Nd4jLong>& shape) const;
|
|
|
|
FORCEINLINE bool areSameShapeAndType(const NDArray& other) const;
|
|
|
|
|
|
|
|
/**
|
|
|
|
* returns true if these two NDArrays have same rank, dimensions, strides, ews and order
|
|
|
|
*/
|
2019-12-20 20:35:39 +01:00
|
|
|
FORCEINLINE bool isSameShapeStrict(const NDArray& other) const;
|
2019-06-06 14:21:15 +02:00
|
|
|
|
|
|
|
/**
|
|
|
|
* returns true if buffer && shapeInfo were defined (non nullptr)
|
|
|
|
*/
|
|
|
|
FORCEINLINE bool nonNull() const;
|
|
|
|
|
|
|
|
template <typename T>
|
|
|
|
T r(const Nd4jLong i) const;
|
|
|
|
|
|
|
|
/**
|
|
|
|
* returns array element with given index from linear buffer
|
|
|
|
* i - element index in array
|
|
|
|
*/
|
|
|
|
template <typename T>
|
|
|
|
T e(const Nd4jLong i) const;
|
|
|
|
|
|
|
|
/**
|
|
|
|
* returns element with given indexes from 2D array
|
|
|
|
* i - number of row
|
|
|
|
* j - number of column
|
|
|
|
*/
|
|
|
|
template <typename T>
|
|
|
|
T e(const Nd4jLong i, const Nd4jLong j) const;
|
|
|
|
|
|
|
|
/**
|
|
|
|
* returns element with given indexes from 3D array
|
|
|
|
* i - height
|
|
|
|
* j - width
|
|
|
|
* k - depth
|
|
|
|
*/
|
|
|
|
template <typename T>
|
|
|
|
T e(const Nd4jLong i, const Nd4jLong j, const Nd4jLong k) const;
|
|
|
|
|
|
|
|
/**
|
|
|
|
* returns element with given indexes from DD array
|
|
|
|
*/
|
|
|
|
template <typename T>
|
|
|
|
T e(const Nd4jLong i, const Nd4jLong j, const Nd4jLong k, const Nd4jLong l) const;
|
|
|
|
|
|
|
|
/**
|
|
|
|
* returns array-scalar containing element of this array with given index
|
|
|
|
* i - element index in array
|
|
|
|
*/
|
|
|
|
NDArray e(const Nd4jLong i) const;
|
|
|
|
|
|
|
|
/**
|
|
|
|
* assigns given scalar to array element by given index, regards array buffer as linear
|
|
|
|
* i - element index in array
|
|
|
|
* value - scalar value to assign
|
|
|
|
*/
|
|
|
|
template <typename T>
|
|
|
|
void p(const Nd4jLong i, const T value);
|
|
|
|
|
|
|
|
void p(const Nd4jLong i, const NDArray& value);
|
|
|
|
|
|
|
|
/**
|
|
|
|
* assigns given scalar to 2D array element by given indexes
|
|
|
|
* i - number of row
|
|
|
|
* j - number of row
|
|
|
|
* value - scalar value to assign
|
|
|
|
*/
|
|
|
|
template <typename T>
|
|
|
|
void p(const Nd4jLong i, const Nd4jLong j, const T value);
|
|
|
|
|
|
|
|
/**
|
|
|
|
* assigns given scalar to 3D array element by given indexes
|
|
|
|
* i - height
|
|
|
|
* j - width
|
|
|
|
* k - depth
|
|
|
|
* value - scalar value to assign
|
|
|
|
*/
|
|
|
|
template <typename T>
|
|
|
|
void p(const Nd4jLong i, const Nd4jLong j, const Nd4jLong k, const T value);
|
|
|
|
|
|
|
|
template <typename T>
|
|
|
|
void p(const Nd4jLong i, const Nd4jLong j, const Nd4jLong k, const Nd4jLong l, const T value);
|
2019-10-07 14:04:44 +02:00
|
|
|
void p(const Nd4jLong i, const Nd4jLong j, const Nd4jLong k, const Nd4jLong l, NDArray const& value);
|
2019-06-06 14:21:15 +02:00
|
|
|
|
|
|
|
|
|
|
|
template <typename T>
|
|
|
|
void pIdx(const Nd4jLong* indices, const T value);
|
|
|
|
|
|
|
|
/**
|
|
|
|
* returns true if array is 2D
|
|
|
|
*/
|
|
|
|
FORCEINLINE bool isMatrix() const;
|
|
|
|
|
|
|
|
/**
|
|
|
|
* returns true if array is vector
|
|
|
|
*/
|
|
|
|
FORCEINLINE bool isVector() const;
|
|
|
|
|
|
|
|
/**
|
|
|
|
* returns true if array is column vector
|
|
|
|
*/
|
|
|
|
FORCEINLINE bool isColumnVector() const;
|
|
|
|
|
|
|
|
/**
|
|
|
|
* returns true if array is row vector
|
|
|
|
*/
|
|
|
|
FORCEINLINE bool isRowVector() const;
|
|
|
|
|
|
|
|
/**
|
|
|
|
* returns true if all dimensions of array except one are unities, for example: [1,1,n,1], [n,1,1], [n], ...
|
|
|
|
* posOfNonUnityDim - one dimension with value > 1
|
|
|
|
*/
|
|
|
|
FORCEINLINE bool isCommonVector(int& posOfNonUnityDim) const;
|
|
|
|
|
|
|
|
|
|
|
|
/**
|
|
|
|
* returns true if array is scalar
|
|
|
|
*/
|
|
|
|
FORCEINLINE bool isScalar() const;
|
|
|
|
|
|
|
|
/**
|
|
|
|
* Returns data type of this array
|
|
|
|
* @return
|
|
|
|
*/
|
|
|
|
FORCEINLINE DataType dataType() const;
|
|
|
|
|
|
|
|
/**
|
|
|
|
* This method returns true if value is from Integer space
|
|
|
|
* @return
|
|
|
|
*/
|
|
|
|
bool isZ() const;
|
|
|
|
|
|
|
|
/**
|
|
|
|
* This method returns true if array is from Real space
|
|
|
|
* @return
|
|
|
|
*/
|
|
|
|
bool isR() const;
|
|
|
|
|
|
|
|
/**
|
|
|
|
* This method returns true if array is from Boolean space
|
|
|
|
* @return
|
|
|
|
*/
|
|
|
|
bool isB() const;
|
|
|
|
|
|
|
|
/**
|
|
|
|
* This method returns true if array contains Complex numbers
|
|
|
|
* @return
|
|
|
|
*/
|
|
|
|
bool isC() const;
|
|
|
|
|
|
|
|
/**
|
|
|
|
* This method returns true if array contains String
|
|
|
|
* @return
|
|
|
|
*/
|
|
|
|
bool isS() const;
|
Oleh tenzor mmul (#231)
* Libnd4j: TensorMMul backprop op #8174, raw implementation
Signed-off-by: Oleg <oleg.semeniv@gmail.com>
* Libnd4j: TensorMMul backprop op #8174 merge master and some corrections
Signed-off-by: Oleg <oleg.semeniv@gmail.com>
* Libnd4j: TensorMMul backprop op #8174 algorithm update, need testing, sync with master
* Libnd4j: TensorMMul backprop op #8174 fixed incorrect B axes calculation
Signed-off-by: Oleg <oleg.semeniv@gmail.com>
* Libnd4j: TensorMMul backprop op #8174 optimize axes identification and fix bug of indeces overlapping, added first test. need testing with different shapes
Signed-off-by: Oleg <oleg.semeniv@gmail.com>
* Libnd4j: TensorMMul backprop op #8174 some fixes and improvements need more testing
Signed-off-by: Oleg <oleg.semeniv@gmail.com>
* Libnd4j: TensorMMul backprop op #8174 fixed order of matrix multiply
Signed-off-by: Oleg <oleg.semeniv@gmail.com>
* Libnd4j: TensorMMul backprop op #8174 fixed issue of incorrect axes definition, add tests based on TF, need additional testing for case dLdC not equal 1
Signed-off-by: Oleg <oleg.semeniv@gmail.com>
* Libnd4j: TensorMMul backprop op #8174 fixed scalar case add test
Signed-off-by: Oleg <oleg.semeniv@gmail.com>
* Libnd4j: TensorMMul backprop op #8174 fixed bp algorithm, axes definition, need some mode testing with different orders combination f,c; c,f f,f and add some checks for inputs
Signed-off-by: Oleg <oleg.semeniv@gmail.com>
* Libnd4j: TensorMMul backprop op #8174 some checks and corrections added tests, exists the problem with different input orders support A-f B-c and A-f B-f
Signed-off-by: Oleg <oleg.semeniv@gmail.com>
* Libnd4j: TensorMMul backprop op #8174 sync master
Signed-off-by: Oleg <oleg.semeniv@gmail.com>
* - correct bug in MmulHelper::tensorDot(a, b, c, axes_a, axes_b,permutForC)
Signed-off-by: Yurii <iuriish@yahoo.com>
* Libnd4j: TensorMMul backprop op #8174 code clean up and refactoring
Signed-off-by: Oleg <oleg.semeniv@gmail.com>
* - add check for linspase ordered permutations in ShapeUtils::evalShapeForTensorDot
Signed-off-by: Yurii <iuriish@yahoo.com>
* - provide additional code in shape::reshape stuff in order to reduce amount of allocation/copy operations during reshaping procedure
Signed-off-by: Yurii <iuriish@yahoo.com>
* - further work on problem of wrong shape evaluation during permute/reshape procedures
Signed-off-by: Yurii <iuriish@yahoo.com>
* - still looking for bug reason in reshape/permute stuff
Signed-off-by: Yurii <iuriish@yahoo.com>
* - correct bug in transform cuda native ops
Signed-off-by: Yurii <iuriish@yahoo.com>
* - correct bug in NDArray::assign
Signed-off-by: Yurii <iuriish@yahoo.com>
* - remove old shape::reshape stuff
Signed-off-by: Yurii <iuriish@yahoo.com>
* - add possibility to disable copy of old buffer to new buffer during reshape operation in NDArray class
Signed-off-by: Yurii <iuriish@yahoo.com>
* - correct bug in tensorDot which had to do with wrong pointers assigments
Signed-off-by: Yurii <iuriish@yahoo.com>
Co-authored-by: Oleh <oleg.semeniv@gmail.com>
2020-02-13 18:33:54 +01:00
|
|
|
|
2019-06-06 14:21:15 +02:00
|
|
|
template <typename T>
|
|
|
|
std::vector<T> asVectorT();
|
|
|
|
|
|
|
|
FORCEINLINE bool isAttached();
|
|
|
|
|
|
|
|
NDArray* detach();
|
|
|
|
|
|
|
|
FORCEINLINE bool operator==(const NDArray &other) const;
|
|
|
|
|
|
|
|
FORCEINLINE bool operator!=(const NDArray &other) const;
|
|
|
|
};
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
//////////////////////////////////////////////////////////////////////////
|
|
|
|
///// IMLEMENTATION OF INLINE METHODS /////
|
|
|
|
//////////////////////////////////////////////////////////////////////////
|
2019-12-20 20:35:39 +01:00
|
|
|
bool NDArray::isAttached() {
|
|
|
|
return this->_context->getWorkspace() != nullptr;
|
|
|
|
}
|
2019-06-06 14:21:15 +02:00
|
|
|
|
2019-12-20 20:35:39 +01:00
|
|
|
template <typename T, typename R>
|
2020-05-09 07:06:14 +02:00
|
|
|
FORCEINLINE R NDArray::templatedGet(void const* buffer, Nd4jLong index) const {
|
|
|
|
auto b = reinterpret_cast<T const*>(buffer);
|
2019-12-20 20:35:39 +01:00
|
|
|
auto v = static_cast<R>(b[index]);
|
|
|
|
return v;
|
|
|
|
}
|
2019-06-06 14:21:15 +02:00
|
|
|
|
2019-12-20 20:35:39 +01:00
|
|
|
//////////////////////////////////////////////////////////////////////////
|
|
|
|
void NDArray::setShapeInfo(Nd4jLong *shapeInfo) {
|
2020-06-06 14:26:55 +02:00
|
|
|
auto buffer = ConstantShapeHelper::getInstance().bufferForShapeInfo(shapeInfo);
|
|
|
|
_shapeInfo = buffer.primary();
|
|
|
|
_shapeInfoD = buffer.special();
|
2019-12-20 20:35:39 +01:00
|
|
|
|
|
|
|
if (shapeInfo != nullptr) {
|
|
|
|
_dataType = ArrayOptions::dataType(_shapeInfo);
|
|
|
|
if(ArrayOptions::arrayType(_shapeInfo) == ArrayType::EMPTY)
|
2019-06-06 14:21:15 +02:00
|
|
|
_length = 0;
|
2019-12-20 20:35:39 +01:00
|
|
|
else
|
|
|
|
_length = shape::length(_shapeInfo);
|
2019-06-06 14:21:15 +02:00
|
|
|
}
|
2019-12-20 20:35:39 +01:00
|
|
|
else {
|
2020-03-02 10:49:41 +01:00
|
|
|
_dataType = sd::DataType::INHERIT;
|
2019-12-20 20:35:39 +01:00
|
|
|
_length = 0;
|
2019-06-06 14:21:15 +02:00
|
|
|
}
|
2019-12-20 20:35:39 +01:00
|
|
|
}
|
2019-06-06 14:21:15 +02:00
|
|
|
|
2019-12-20 20:35:39 +01:00
|
|
|
//////////////////////////////////////////////////////////////////////////
|
2020-03-02 10:49:41 +01:00
|
|
|
void NDArray::setShapeInfo(Nd4jLong *shapeInfo, const sd::DataType dtype) {
|
2020-06-06 14:26:55 +02:00
|
|
|
auto buffer = ConstantShapeHelper::getInstance().bufferForShapeInfo(shapeInfo);
|
|
|
|
_shapeInfo = buffer.primary();
|
|
|
|
_shapeInfoD = buffer.special();
|
2019-12-20 20:35:39 +01:00
|
|
|
|
|
|
|
if (shapeInfo != nullptr) {
|
|
|
|
_dataType = dtype;
|
|
|
|
if(ArrayOptions::arrayType(_shapeInfo) == ArrayType::EMPTY)
|
|
|
|
_length = 0;
|
|
|
|
else
|
|
|
|
_length = shape::length(_shapeInfo);
|
2019-06-06 14:21:15 +02:00
|
|
|
}
|
2019-12-20 20:35:39 +01:00
|
|
|
else {
|
2020-03-02 10:49:41 +01:00
|
|
|
_dataType = sd::DataType::INHERIT;
|
2019-12-20 20:35:39 +01:00
|
|
|
_length = 0;
|
2019-06-06 14:21:15 +02:00
|
|
|
}
|
2019-12-20 20:35:39 +01:00
|
|
|
}
|
2019-06-06 14:21:15 +02:00
|
|
|
|
2019-12-20 20:35:39 +01:00
|
|
|
//////////////////////////////////////////////////////////////////////////
|
|
|
|
char NDArray::ordering() const {
|
|
|
|
return shape::order(_shapeInfo);
|
|
|
|
}
|
2019-06-06 14:21:15 +02:00
|
|
|
|
2019-12-20 20:35:39 +01:00
|
|
|
//////////////////////////////////////////////////////////////////////////
|
|
|
|
bool NDArray::isView() const {
|
|
|
|
return _isView;
|
|
|
|
}
|
2019-06-06 14:21:15 +02:00
|
|
|
|
2019-12-20 20:35:39 +01:00
|
|
|
//////////////////////////////////////////////////////////////////////////
|
|
|
|
Nd4jLong* NDArray::shapeOf() const {
|
|
|
|
return shape::shapeOf(_shapeInfo);
|
|
|
|
}
|
2019-06-06 14:21:15 +02:00
|
|
|
|
|
|
|
//////////////////////////////////////////////////////////////////////////
|
2019-12-20 20:35:39 +01:00
|
|
|
Nd4jLong* NDArray::stridesOf() const {
|
|
|
|
return shape::stride(_shapeInfo);
|
2019-06-06 14:21:15 +02:00
|
|
|
}
|
2019-12-20 20:35:39 +01:00
|
|
|
|
2019-06-06 14:21:15 +02:00
|
|
|
//////////////////////////////////////////////////////////////////////////
|
2019-12-20 20:35:39 +01:00
|
|
|
int NDArray::rankOf() const {
|
|
|
|
return shape::rank(_shapeInfo);
|
|
|
|
}
|
2019-06-06 14:21:15 +02:00
|
|
|
|
|
|
|
//////////////////////////////////////////////////////////////////////////
|
2019-12-20 20:35:39 +01:00
|
|
|
Nd4jLong NDArray::lengthOf() const {
|
|
|
|
return _length;
|
|
|
|
}
|
2019-06-06 14:21:15 +02:00
|
|
|
|
2019-12-20 20:35:39 +01:00
|
|
|
//////////////////////////////////////////////////////////////////////////
|
|
|
|
Nd4jLong NDArray::rows() const {
|
|
|
|
if (this->rankOf() == 1)
|
|
|
|
return 1;
|
2019-06-06 14:21:15 +02:00
|
|
|
|
2019-12-20 20:35:39 +01:00
|
|
|
if (this->rankOf() > 2)
|
|
|
|
throw std::runtime_error("Array with rank > 2 can't have rows");
|
2019-06-06 14:21:15 +02:00
|
|
|
|
2019-12-20 20:35:39 +01:00
|
|
|
return shapeOf()[0];
|
2019-06-06 14:21:15 +02:00
|
|
|
}
|
|
|
|
|
2019-12-20 20:35:39 +01:00
|
|
|
//////////////////////////////////////////////////////////////////////////
|
|
|
|
Nd4jLong NDArray::columns() const {
|
|
|
|
if (this->rankOf() == 1)
|
|
|
|
return this->lengthOf();
|
2019-06-06 14:21:15 +02:00
|
|
|
|
2019-12-20 20:35:39 +01:00
|
|
|
if (this->rankOf() > 2)
|
|
|
|
throw std::runtime_error("Array with rank > 2 can't have columns");
|
2019-06-06 14:21:15 +02:00
|
|
|
|
2019-12-20 20:35:39 +01:00
|
|
|
return shapeOf()[1];
|
2019-06-06 14:21:15 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
//////////////////////////////////////////////////////////////////////////
|
|
|
|
|
2019-12-20 20:35:39 +01:00
|
|
|
size_t NDArray::sizeOfT() const {
|
|
|
|
return DataTypeUtils::sizeOfElement(_dataType);
|
2019-06-06 14:21:15 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
//////////////////////////////////////////////////////////////////////////
|
2019-12-20 20:35:39 +01:00
|
|
|
Nd4jLong NDArray::ews() const {
|
|
|
|
if (this->isEmpty() || this->rankOf() == 0)
|
|
|
|
return 1;
|
2019-06-06 14:21:15 +02:00
|
|
|
|
2019-12-20 20:35:39 +01:00
|
|
|
return shape::elementWiseStride(_shapeInfo);
|
|
|
|
}
|
2019-06-06 14:21:15 +02:00
|
|
|
|
2019-12-20 20:35:39 +01:00
|
|
|
//////////////////////////////////////////////////////////////////////////
|
|
|
|
bool NDArray::nonNull() const {
|
|
|
|
if (isEmpty())
|
|
|
|
return true;
|
2019-06-06 14:21:15 +02:00
|
|
|
|
2020-06-06 14:26:55 +02:00
|
|
|
if(!Environment::getInstance().isCPU())
|
2020-05-09 07:06:14 +02:00
|
|
|
return getDataBuffer()->special() != nullptr && specialShapeInfo() != nullptr;
|
2019-06-06 14:21:15 +02:00
|
|
|
|
2020-05-09 07:06:14 +02:00
|
|
|
return getDataBuffer()->primary() != nullptr && shapeInfo() != nullptr;
|
2019-06-06 14:21:15 +02:00
|
|
|
}
|
|
|
|
|
2019-12-20 20:35:39 +01:00
|
|
|
//////////////////////////////////////////////////////////////////////////
|
|
|
|
bool NDArray::isMatrix() const {
|
|
|
|
if (isEmpty())
|
|
|
|
return false;
|
2019-06-06 14:21:15 +02:00
|
|
|
|
2019-12-20 20:35:39 +01:00
|
|
|
return 0 != shape::isMatrix(this->_shapeInfo);
|
2019-06-06 14:21:15 +02:00
|
|
|
}
|
|
|
|
|
2019-12-20 20:35:39 +01:00
|
|
|
//////////////////////////////////////////////////////////////////////////
|
|
|
|
bool NDArray::isVector() const {
|
|
|
|
if (isEmpty())
|
|
|
|
return false;
|
|
|
|
if (rankOf() == 1)
|
|
|
|
return true;
|
|
|
|
return !isScalar() && shape::isVector(this->_shapeInfo);
|
|
|
|
}
|
2019-06-06 14:21:15 +02:00
|
|
|
|
2019-12-20 20:35:39 +01:00
|
|
|
//////////////////////////////////////////////////////////////////////////
|
|
|
|
bool NDArray::isColumnVector() const {
|
|
|
|
if (isEmpty())
|
|
|
|
return false;
|
2019-06-06 14:21:15 +02:00
|
|
|
|
2019-12-20 20:35:39 +01:00
|
|
|
return !isScalar() && shape::isColumnVector(this->_shapeInfo);
|
2019-06-06 14:21:15 +02:00
|
|
|
}
|
|
|
|
|
2019-12-20 20:35:39 +01:00
|
|
|
//////////////////////////////////////////////////////////////////////////
|
|
|
|
bool NDArray::isRowVector() const {
|
|
|
|
if (isEmpty())
|
|
|
|
return false;
|
2019-06-06 14:21:15 +02:00
|
|
|
|
2019-12-20 20:35:39 +01:00
|
|
|
// 1D edge case
|
|
|
|
if (shape::rank(this->_shapeInfo) == 1)
|
|
|
|
return true;
|
2019-06-06 14:21:15 +02:00
|
|
|
|
2019-12-20 20:35:39 +01:00
|
|
|
return !isScalar() && shape::isRowVector(this->_shapeInfo);
|
2019-06-06 14:21:15 +02:00
|
|
|
}
|
|
|
|
|
2019-12-20 20:35:39 +01:00
|
|
|
//////////////////////////////////////////////////////////////////////////
|
|
|
|
bool NDArray::isCommonVector(int& posOfNonUnityDim) const {
|
2019-06-06 14:21:15 +02:00
|
|
|
|
2019-12-20 20:35:39 +01:00
|
|
|
return shape::isCommonVector(_shapeInfo, posOfNonUnityDim);
|
|
|
|
}
|
2019-06-06 14:21:15 +02:00
|
|
|
|
2019-12-20 20:35:39 +01:00
|
|
|
//////////////////////////////////////////////////////////////////////////
|
|
|
|
bool NDArray::isScalar() const {
|
|
|
|
return 0 != shape::isScalar(this->_shapeInfo);
|
2019-06-06 14:21:15 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2019-12-20 20:35:39 +01:00
|
|
|
//////////////////////////////////////////////////////////////////////////
|
|
|
|
Nd4jLong FORCEINLINE NDArray::memoryFootprint() {
|
|
|
|
Nd4jLong size = this->lengthOf() * this->sizeOfT();
|
|
|
|
size += shape::shapeInfoByteLength(this->rankOf());
|
|
|
|
return size;
|
|
|
|
}
|
2019-06-06 14:21:15 +02:00
|
|
|
|
2019-12-20 20:35:39 +01:00
|
|
|
//////////////////////////////////////////////////////////////////////////
|
|
|
|
// still the definition of inline function must be in header file
|
|
|
|
bool NDArray::isSameShape(const std::vector<Nd4jLong>& shape) const{
|
|
|
|
if (this->isScalar() && shape.size() == 1 && shape[0] == 0)
|
2019-06-06 14:21:15 +02:00
|
|
|
return true;
|
2019-12-20 20:35:39 +01:00
|
|
|
if (this->rankOf() != (int) shape.size())
|
|
|
|
return false;
|
|
|
|
for (int e = 0; e < this->rankOf(); e++) {
|
2020-02-28 15:04:45 +01:00
|
|
|
if (this->shapeOf()[e] != shape[e] && shape[e] != -1)
|
2019-12-20 20:35:39 +01:00
|
|
|
return false;
|
2019-06-06 14:21:15 +02:00
|
|
|
}
|
2019-12-20 20:35:39 +01:00
|
|
|
return true;
|
|
|
|
}
|
2019-06-06 14:21:15 +02:00
|
|
|
|
|
|
|
//////////////////////////////////////////////////////////////////////////
|
|
|
|
bool NDArray::isSameShape(const NDArray *other) const {
|
|
|
|
if (this->isEmpty() != other->isEmpty())
|
|
|
|
return false;
|
|
|
|
|
|
|
|
return isSameShape(std::vector<Nd4jLong>(other->_shapeInfo+1, other->_shapeInfo+1+other->_shapeInfo[0]));
|
|
|
|
}
|
|
|
|
|
|
|
|
//////////////////////////////////////////////////////////////////////////
|
|
|
|
bool NDArray::isSameShape(const NDArray &other) const {
|
|
|
|
return isSameShape(&other);
|
|
|
|
}
|
|
|
|
|
|
|
|
//////////////////////////////////////////////////////////////////////////
|
|
|
|
bool NDArray::isSameShape(const std::initializer_list<Nd4jLong>& other) const {
|
|
|
|
return isSameShape(std::vector<Nd4jLong>(other));
|
|
|
|
}
|
|
|
|
|
|
|
|
//////////////////////////////////////////////////////////////////////////
|
|
|
|
bool NDArray::areSameShapeAndType(const NDArray& other) const {
|
|
|
|
|
|
|
|
if(rankOf() != other.rankOf() || _dataType != other._dataType)
|
|
|
|
return false;
|
|
|
|
|
|
|
|
for(int i = 0; i < rankOf(); ++i)
|
|
|
|
if(sizeAt(i) != other.sizeAt(i))
|
|
|
|
return false;
|
|
|
|
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
//////////////////////////////////////////////////////////////////////////
|
|
|
|
// returns true if these two NDArrays have same _shapeInfo
|
|
|
|
// still the definition of inline function must be in header file
|
|
|
|
|
2019-12-20 20:35:39 +01:00
|
|
|
bool NDArray::isSameShapeStrict(const NDArray& other) const {
|
|
|
|
return shape::equalsStrict(_shapeInfo, other._shapeInfo);
|
2019-06-06 14:21:15 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
//////////////////////////////////////////////////////////////////////////
|
|
|
|
bool NDArray::isEmpty() const {
|
|
|
|
if (this->_shapeInfo == nullptr)
|
|
|
|
return false;
|
|
|
|
|
2020-05-09 07:06:14 +02:00
|
|
|
return ArrayOptions::arrayType(this->shapeInfo()) == ArrayType::EMPTY;
|
2019-06-06 14:21:15 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
//////////////////////////////////////////////////////////////////////////
|
|
|
|
bool NDArray::operator==(const NDArray &other) const {
|
|
|
|
// if (this->dataType() != other.dataType()) // this comparison is already present in equalsTo
|
|
|
|
// return false;
|
|
|
|
|
|
|
|
if (!this->isSameShape(&other))
|
|
|
|
return false;
|
|
|
|
|
|
|
|
return this->equalsTo(&other);
|
|
|
|
}
|
|
|
|
|
|
|
|
//////////////////////////////////////////////////////////////////////////
|
|
|
|
bool NDArray::operator!=(const NDArray &other) const {
|
|
|
|
if (this->dataType() != other.dataType())
|
|
|
|
return true;
|
|
|
|
|
|
|
|
if (!this->isSameShape(&other))
|
|
|
|
return true;
|
|
|
|
|
|
|
|
return !this->equalsTo(&other);
|
|
|
|
}
|
|
|
|
|
|
|
|
//////////////////////////////////////////////////////////////////////////
|
|
|
|
DataType NDArray::dataType() const {
|
|
|
|
return _dataType;
|
|
|
|
// return ArrayOptions::dataType(_shapeInfo);
|
|
|
|
}
|
|
|
|
|
|
|
|
////////////////////////////////////////////////////////////////////////
|
|
|
|
template <typename T>
|
2020-05-14 17:06:13 +02:00
|
|
|
T& NDArray::r(const Nd4jLong i) {
|
2019-06-06 14:21:15 +02:00
|
|
|
|
|
|
|
// if (i >= _length)
|
|
|
|
// throw std::invalid_argument("NDArray::t(i): input index is out of array length !");
|
|
|
|
if (DataTypeUtils::fromT<T>() != _dataType)
|
|
|
|
throw std::invalid_argument("NDArray::t(i): type of array is not equal to template type T!");
|
|
|
|
|
2020-05-14 17:06:13 +02:00
|
|
|
syncToHost();
|
2019-06-06 14:21:15 +02:00
|
|
|
tickWriteHost();
|
2020-05-14 17:06:13 +02:00
|
|
|
|
2019-06-06 14:21:15 +02:00
|
|
|
return *(reinterpret_cast<T*>(bufferWithOffset(getOffset(i))));
|
|
|
|
}
|
|
|
|
|
|
|
|
////////////////////////////////////////////////////////////////////////
|
|
|
|
template <typename T>
|
2020-05-14 17:06:13 +02:00
|
|
|
T& NDArray::r(const Nd4jLong i, const Nd4jLong j) {
|
2019-06-06 14:21:15 +02:00
|
|
|
|
|
|
|
if (rankOf() != 2 || i >= sizeAt(0) || j >= sizeAt(1))
|
|
|
|
throw std::invalid_argument("NDArray::t(i,j): one of input indexes is out of array length or rank!=2 !");
|
|
|
|
if (DataTypeUtils::fromT<T>() != _dataType)
|
|
|
|
throw std::invalid_argument("NDArray::t(i,j): type of array is not equal to template type T!");
|
|
|
|
|
2020-05-14 17:06:13 +02:00
|
|
|
syncToHost();
|
2019-06-06 14:21:15 +02:00
|
|
|
tickWriteHost();
|
2020-05-14 17:06:13 +02:00
|
|
|
|
|
|
|
return *(reinterpret_cast<T*>(bufferWithOffset(i * strideAt(0) + j * strideAt(1))));
|
2019-06-06 14:21:15 +02:00
|
|
|
}
|
|
|
|
|
2019-08-07 14:29:17 +02:00
|
|
|
template <typename T>
|
2020-05-14 17:06:13 +02:00
|
|
|
T& NDArray::r(const Nd4jLong i, const Nd4jLong j, const Nd4jLong k) {
|
2019-08-07 14:29:17 +02:00
|
|
|
|
|
|
|
if (rankOf() != 3 || i >= sizeAt(0) || j >= sizeAt(1) || k >= sizeAt(2))
|
2019-12-06 09:10:44 +01:00
|
|
|
throw std::invalid_argument("NDArray::t(i,j,k): one of input indexes is out of array length or rank!=3!");
|
2019-08-07 14:29:17 +02:00
|
|
|
if (DataTypeUtils::fromT<T>() != _dataType)
|
|
|
|
throw std::invalid_argument("NDArray::t(i,j,k): type of array is not equal to template type T!");
|
|
|
|
|
2020-05-14 17:06:13 +02:00
|
|
|
syncToHost();
|
2019-08-07 14:29:17 +02:00
|
|
|
tickWriteHost();
|
2020-05-14 17:06:13 +02:00
|
|
|
|
|
|
|
return *(reinterpret_cast<T*>(bufferWithOffset(i * strideAt(0) + j * strideAt(1) + k * strideAt(2))));
|
2019-08-07 14:29:17 +02:00
|
|
|
}
|
|
|
|
|
2019-12-06 09:10:44 +01:00
|
|
|
template <typename T>
|
2020-05-14 17:06:13 +02:00
|
|
|
T& NDArray::r(const Nd4jLong i, const Nd4jLong j, const Nd4jLong k, const Nd4jLong w) {
|
2019-12-06 09:10:44 +01:00
|
|
|
|
2019-12-14 12:38:17 +01:00
|
|
|
if (rankOf() != 4 || i >= sizeAt(0) || j >= sizeAt(1) || k >= sizeAt(2) || w >= sizeAt(3))
|
2019-12-06 09:10:44 +01:00
|
|
|
throw std::invalid_argument("NDArray::t(i,j,k,w): one of input indexes is out of array length or rank!=4 !");
|
|
|
|
if (DataTypeUtils::fromT<T>() != _dataType)
|
|
|
|
throw std::invalid_argument("NDArray::t(i,j,k,w): type of array is not equal to template type T!");
|
|
|
|
|
2020-05-14 17:06:13 +02:00
|
|
|
syncToHost();
|
2019-12-06 09:10:44 +01:00
|
|
|
tickWriteHost();
|
2020-05-14 17:06:13 +02:00
|
|
|
|
|
|
|
return *(reinterpret_cast<T*>(bufferWithOffset(i * strideAt(0) + j * strideAt(1) + k * strideAt(2) + w * strideAt(3))));
|
2019-12-06 09:10:44 +01:00
|
|
|
}
|
|
|
|
|
2019-06-06 14:21:15 +02:00
|
|
|
////////////////////////////////////////////////////////////////////////
|
|
|
|
template <typename T>
|
|
|
|
T NDArray::t(const Nd4jLong i) const {
|
|
|
|
|
|
|
|
// if (i >= _length)
|
|
|
|
// throw std::invalid_argument("NDArray::t(i): input index is out of array length !");
|
|
|
|
if (DataTypeUtils::fromT<T>() != _dataType)
|
|
|
|
throw std::invalid_argument("NDArray::t(i): type of array is not equal to template type T!");
|
|
|
|
|
2020-05-14 17:06:13 +02:00
|
|
|
syncToHost();
|
2019-06-06 14:21:15 +02:00
|
|
|
|
2020-05-09 07:06:14 +02:00
|
|
|
return *(reinterpret_cast<const T*>(bufferWithOffset(getOffset(i))));
|
2019-06-06 14:21:15 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
////////////////////////////////////////////////////////////////////////
|
|
|
|
template <typename T>
|
|
|
|
T NDArray::t(const Nd4jLong i, const Nd4jLong j) const {
|
|
|
|
|
|
|
|
if (rankOf() != 2 || i >= sizeAt(0) || j >= sizeAt(1))
|
|
|
|
throw std::invalid_argument("NDArray::t(i,j): one of input indexes is out of array length or rank!=2 !");
|
|
|
|
if (DataTypeUtils::fromT<T>() != _dataType)
|
|
|
|
throw std::invalid_argument("NDArray::t(i,j): type of array is not equal to template type T!");
|
|
|
|
|
2020-05-14 17:06:13 +02:00
|
|
|
syncToHost();
|
2019-06-06 14:21:15 +02:00
|
|
|
|
2020-05-14 17:06:13 +02:00
|
|
|
return *(reinterpret_cast<const T*>(bufferWithOffset(i * strideAt(0) + j * strideAt(1))));
|
2019-06-06 14:21:15 +02:00
|
|
|
}
|
|
|
|
|
2020-05-14 17:06:13 +02:00
|
|
|
////////////////////////////////////////////////////////////////////////
|
|
|
|
template <typename T>
|
|
|
|
T NDArray::t(const Nd4jLong i, const Nd4jLong j, const Nd4jLong k) const {
|
2019-08-07 14:29:17 +02:00
|
|
|
|
2020-05-14 17:06:13 +02:00
|
|
|
if (rankOf() != 3 || i >= sizeAt(0) || j >= sizeAt(1) || k >= sizeAt(2))
|
|
|
|
throw std::invalid_argument("NDArray::t(i,j,k): one of input indexes is out of array length or rank!=3!");
|
|
|
|
if (DataTypeUtils::fromT<T>() != _dataType)
|
|
|
|
throw std::invalid_argument("NDArray::t(i,j,k): type of array is not equal to template type T!");
|
2019-08-07 14:29:17 +02:00
|
|
|
|
2020-05-14 17:06:13 +02:00
|
|
|
syncToHost();
|
2019-08-07 14:29:17 +02:00
|
|
|
|
2020-05-14 17:06:13 +02:00
|
|
|
return *(reinterpret_cast<const T*>(bufferWithOffset(i * strideAt(0) + j * strideAt(1) + k * strideAt(2))));
|
|
|
|
}
|
2019-12-06 09:10:44 +01:00
|
|
|
|
2020-05-14 17:06:13 +02:00
|
|
|
////////////////////////////////////////////////////////////////////////
|
|
|
|
template <typename T>
|
|
|
|
T NDArray::t(const Nd4jLong i, const Nd4jLong j, const Nd4jLong k, const Nd4jLong w) const {
|
2019-12-06 09:10:44 +01:00
|
|
|
|
2020-05-14 17:06:13 +02:00
|
|
|
if (rankOf() != 4 || i >= sizeAt(0) || j >= sizeAt(1) || k >= sizeAt(2) || w >= sizeAt(3))
|
|
|
|
throw std::invalid_argument("NDArray::t(i,j,k,w): one of input indexes is out of array length or rank!=4!");
|
|
|
|
if (DataTypeUtils::fromT<T>() != _dataType)
|
|
|
|
throw std::invalid_argument("NDArray::t(i,j,k,w): type of array is not equal to template type T!");
|
2019-12-06 09:10:44 +01:00
|
|
|
|
2020-05-14 17:06:13 +02:00
|
|
|
syncToHost();
|
2019-12-06 09:10:44 +01:00
|
|
|
|
2020-05-14 17:06:13 +02:00
|
|
|
return *(reinterpret_cast<const T*>(bufferWithOffset(i * strideAt(0) + j * strideAt(1) + k * strideAt(2) + w * strideAt(3))));
|
|
|
|
}
|
2019-08-07 14:29:17 +02:00
|
|
|
|
2019-06-06 14:21:15 +02:00
|
|
|
#ifndef __JAVACPP_HACK__
|
|
|
|
////////////////////////////////////////////////////////////////////////
|
|
|
|
std::shared_ptr<DataBuffer> NDArray::getDataBuffer() const {
|
|
|
|
return _buffer;
|
|
|
|
}
|
|
|
|
|
|
|
|
////////////////////////////////////////////////////////////////////////
|
|
|
|
std::shared_ptr<DataBuffer> NDArray::dataBuffer() {
|
|
|
|
return _buffer;
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
|
|
|
////////////////////////////////////////////////////////////////////////
|
2020-05-09 07:06:14 +02:00
|
|
|
const void* NDArray::buffer() const {
|
2019-06-06 14:21:15 +02:00
|
|
|
return _buffer->primary() != nullptr ? static_cast<int8_t*>(_buffer->primary()) + (_offset * sizeOfT()) : nullptr;
|
|
|
|
}
|
|
|
|
|
|
|
|
//////////////////////////////////////////////////////////////////////////
|
|
|
|
void* NDArray::buffer() {
|
|
|
|
return _buffer->primary() != nullptr ? static_cast<int8_t*>(_buffer->primary()) + (_offset * sizeOfT()) : nullptr;
|
|
|
|
}
|
|
|
|
|
|
|
|
//////////////////////////////////////////////////////////////////////////
|
2020-05-09 07:06:14 +02:00
|
|
|
const Nd4jLong* NDArray::shapeInfo() const {
|
2019-06-06 14:21:15 +02:00
|
|
|
return _shapeInfo;
|
|
|
|
}
|
|
|
|
|
|
|
|
////////////////////////////////////////////////////////////////////////
|
2020-05-09 07:06:14 +02:00
|
|
|
const Nd4jLong* NDArray::specialShapeInfo() const {
|
2019-06-06 14:21:15 +02:00
|
|
|
if (_shapeInfoD == nullptr)
|
|
|
|
return _shapeInfo;
|
|
|
|
// FIXME: this should be fixed once CUDA backend added
|
|
|
|
return _shapeInfoD;
|
|
|
|
}
|
|
|
|
|
|
|
|
////////////////////////////////////////////////////////////////////////
|
2020-05-09 07:06:14 +02:00
|
|
|
Nd4jLong NDArray::bufferOffset() const {
|
2019-06-06 14:21:15 +02:00
|
|
|
return _offset;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
Merge master to upstream (#7945)
* Shugeo strided slice zeros (#14)
* Modified strided_slice op to properly work with empty-like shapes.
* Fixed test for reduce_mean with empty-like input.
* [WIP] Last merge (#15)
* correct logsoftmax looss (#2)
* Small SameDiff listener fix (#4)
* Various fixes (#6)
* #7839 Fix for asXMatrix and tests
* #7866 EmbeddingSequenceLayer dtype fix + test
* #7856 SameDiff save/load stream methods
* #7859 RegressionEvaluation rank 4 fix + tests + axis configuration
* EvaluationBinary 3d/4d
* More evaluation 3d/4d tests
* #7847 Evaluation empty checks
* Small test ifx
* #7848 Fix median edge case
* Improve DL4J samediff layer tests
* [WIP] FastText wrapper implemented (#8)
* FastText implemented
* Some fixes
* Fix shapes for wordsNearest
* Validation of input vectors
* Fixes
* Fixed test
* Thread tagged
* Some tweaks
* setContextClassLoader for DeallocatorServiceThread
* Numpy format tests (#1)
* Various fixes (#11)
* #7852 SameDiff gather fix
* #7892 SameDiff placeholder to constant conversion
* #7890 validate input rank for MLN/CG init methods
* Fix broken permute shape calculation
* Permute and gather fixes
* Tests
* #7850 LogSumExp fix + test
* Handful of test fixes
* Empty arrays with non-scalar shapes (#10)
* minor rearrangements for lambdas
* empty tensors with non-scalar shapes
* numpy empty tensors with non-scalar shapes
* few more empty tweaks
* Small fixes
* conv3d signature update
* micro fix in batchnorm mkldnn
* Import fixes
* Fix
* MKL-DNN update
* Small fill fix
* fill with empty input + test
* Fixes
* Small error improvement
* Fix
* one special test
* couple of fixes for lstm
* Rewrite TFGraphMapper.getNDArrayFromTensor to be maintainable and less error prone
* Fixes
* FP16
* Unsigned
* BFloat16
* Fill op - empty tweaks
* - couple of fixes for empty arrays construction
- stack updated
* strided slice fix
* one transform test
* provide method for reducing shapeInfo in case of input array is empty
* Fixed reduceAlongDimensions to use empty input properly.
* couple of broadcast tests
* couple of tests broadcast tests + tweak to make them pass
* add check of non-empty to methods producing sub-arrays
* Fixed reshapeC with zeros in shape.
* complete empty check in reduce_... legacy ops
* Concat and cumsum/prod
* Tweak to empty shape inference on import
* add empty check to the rest of reduce legacy ops
* one more test
* correct typo in evalReduceShapeInfoEmpty
* Added tests for reduce_* ops to tests with zero shapes.
* few more tests for empty reductions
* Fixed strided_slice op with empty case and tests.
* one more empty reduction test
* Fixed strided_slice test.
* add empty check to NDArray::reshapei
* infOrMax
* empty min/max with infinity tests
* made unstack working correctly with empty arrays
* few IndexReduce tests + tweaks for empty shapes
* add test for empty concat
* few tests fixed
* Validation fix for reductions on empty shapes
* Reverse fix
* Reduction shape calc fixes
* SameDiff.generateOutputVariable: don't use shape function to determine number of outputs
* Range fix
* - NDArray constructor updated for scalars/empty arrays
- few tests fixed
* More fixes
* Empty creator fixes
* concat fix
* concat fix
* TF import tests: allow 'both all NaN' and 'both all inf' to pass
* Slice, zero fraction, and reshape fixes
* transpose, gather
* Zero fraction
* scalar cast fix
* Empty reduction axis support
* few more tests fixed
* Fixed input checks conforming with TF for concat op and tests.
* few tests fixed
* matmul scalar shape fix
* Fixed checkout for data type and scalarity with concat to allow non-empty scalars with vector concats.
* broadcast bool fix
* few more tests
* few more tests
* correct evalReduceShapeInfoEmpty
* argmax/argmin + tests
* one more empty edge case + one more test
* argmax/argmin/realdiv_bp tweaks
* empty reshape test + fix
* Helper fixes
* Small fixes
* Gather test fix
* Gather test fix
* Small fixes
* reduce scalar zero values
* scalar mean workaround
* Remove debug code
* along dim mean workaround
* one more test
* - equalsTo() tweak for empty arrays
- one more test
* broadcast tweaks
* [WIP] Fixing outstanding issues for NLP (#9)
* Avoid using not-inited objects
* Test fixed.
* Redundant method avoided for models like FastText
* KMeans++ implementation
* KMeans++ implementation
* Disable parallel execution
* KMeans++
* Tests
* Dev branch merge (#16)
* SameDiff: convertDataType and gradient check util improvements (#12)
* GradCheck util improvements
* StopGradient constructor + test
* SameDiff: Add datatype conversion
* Javadoc and add DataType.isNumerical()
* Small fix
* Fix SameDiff TF import test cases intermediate naming (workaround for bad default)
* TFGraphTestAllHelper: check intermediates in execution order
* Add missing debug listener
* [WIP] lstmBlock fix + other changes (#13)
- fixes lstmBlock issue
- changes NDArray method reshape(), permute(), transpose() by making them return instance instead of pointer
- CheckNumerics op
- fixes for ReduceBool IsInfOrNan & IsFinite
* Small test fix
* CheckNumerics op wrapper
* Fix some issues on master (#17)
* Fix DataVec test issue
* Fix issue with dl4j SameDiff output layer
* Dtype fix for lambda layers
* #7912 BertIterator dtype fix (use float32 not global default)
* [WIP] Next set of CUDA stuff (#7)
New CUDA implementations and improvements
* bad file
* Dev branch master merge (#23)
* SameDiff: convertDataType and gradient check util improvements (#12)
* GradCheck util improvements
* StopGradient constructor + test
* SameDiff: Add datatype conversion
* Javadoc and add DataType.isNumerical()
* Small fix
* Fix SameDiff TF import test cases intermediate naming (workaround for bad default)
* TFGraphTestAllHelper: check intermediates in execution order
* Add missing debug listener
* [WIP] lstmBlock fix + other changes (#13)
- fixes lstmBlock issue
- changes NDArray method reshape(), permute(), transpose() by making them return instance instead of pointer
- CheckNumerics op
- fixes for ReduceBool IsInfOrNan & IsFinite
* Small test fix
* CheckNumerics op wrapper
* Compatibility of deserialization (#18)
Signed-off-by: Alexander Stoyakin <alexander.stoyakin@gmail.com>
* SameDiff: add activation gradient checking support for debugging (#19)
* SameDiff gradient checker: first pass on activation gradient checks
* Fixes + tests for activation gradient checking
* Javadoc
* [WIP] Some nd4j data type corrections (#20)
* Adjust data type
* Set correct Data type.
* Size of proper data type.
* fix averaged cpu load (#22)
* SameDiff ops, TF import and fixes (#24)
* CheckNumerics tests + fixes + misc fixes
Signed-off-by: AlexDBlack <blacka101@gmail.com>
* Fake quant
Signed-off-by: AlexDBlack <blacka101@gmail.com>
* Fixes
Signed-off-by: AlexDBlack <blacka101@gmail.com>
* FakeQuantWithMinMaxArgs
Signed-off-by: AlexDBlack <blacka101@gmail.com>
* CheckNumerics fix
Signed-off-by: AlexDBlack <blacka101@gmail.com>
* Fix libnd4j ALL_INTS and ALL_FLOATS declaration (uint and bfloat types)
Signed-off-by: AlexDBlack <blacka101@gmail.com>
* Small fix
Signed-off-by: AlexDBlack <blacka101@gmail.com>
* Javadoc
Signed-off-by: AlexDBlack <blacka101@gmail.com>
* Exception tweak
Signed-off-by: AlexDBlack <blacka101@gmail.com>
* fix
Signed-off-by: AlexDBlack <blacka101@gmail.com>
* Fix for out of scope stack allocated var use
Signed-off-by: AlexDBlack <blacka101@gmail.com>
* Ignores
Signed-off-by: AlexDBlack <blacka101@gmail.com>
* Ignore for known failing test (already logged issue)
Signed-off-by: AlexDBlack <blacka101@gmail.com>
* Merge upstream to fork (#25)
* Add thousand-separator commas to TotalParams (#7915)
* Add thousand-separator commas to TotalParams
The number of parameters can be quite large, and it would help the reading of the summary printout to have the TotalParams column & values at the bottom have thousand-separator-commas in them.
* Add thousand-separator commas to MultiLayerNetwork
Corresponding change to MultiLayerNetwork
Signed-off-by: Jxtps Jxtps <jxtps435@gmail.com>
* Update contributing and issue/PR templates (#7934)
Signed-off-by: AlexDBlack <blacka101@gmail.com>
* Fix link to AdaDelta paper (#7942)
Fix link to AdaDelta paper hosted on matthewzeiler.com
Signed-off-by: Jxtps
* Fixes, and ignores for known/logged failing issues (#7943)
Signed-off-by: AlexDBlack <blacka101@gmail.com>
* SameDiff + DL4J/SameDiff: Multiple fixes (#28)
* #7919 HDF5 attribute buffer length fix
Signed-off-by: AlexDBlack <blacka101@gmail.com>
* #7909 Arbiter constructor exception ux improvements
Signed-off-by: AlexDBlack <blacka101@gmail.com>
* #7925 RNN output layer length checks
Signed-off-by: AlexDBlack <blacka101@gmail.com>
* #7939 Add listener for validating inputs are not incorrectly modified
Signed-off-by: AlexDBlack <blacka101@gmail.com>
* #7939 Integrate NonInplaceValidationListener into tests
* #7844 DL4J SameDiff fixes for variable minibatch size
* DL4J SameDiff fixes - ensure gradient for input placeholder is available
Signed-off-by: AlexDBlack <blacka101@gmail.com>
* Tweaks to ExternalErrorsFunction - use placeholders, make more robust
* Another fix
* More fixes
* More SameDiff/DL4J fixes
* Scope out scalar array creation in BaseScalarOp
* Remove debug code
Signed-off-by: AlexDBlack <blacka101@gmail.com>
* [WIP] Final dev branch merge (#29)
* SameDiff: convertDataType and gradient check util improvements (#12)
* GradCheck util improvements
* StopGradient constructor + test
* SameDiff: Add datatype conversion
* Javadoc and add DataType.isNumerical()
* Small fix
* Fix SameDiff TF import test cases intermediate naming (workaround for bad default)
* TFGraphTestAllHelper: check intermediates in execution order
* Add missing debug listener
* [WIP] lstmBlock fix + other changes (#13)
- fixes lstmBlock issue
- changes NDArray method reshape(), permute(), transpose() by making them return instance instead of pointer
- CheckNumerics op
- fixes for ReduceBool IsInfOrNan & IsFinite
* Small test fix
* CheckNumerics op wrapper
* Compatibility of deserialization (#18)
Signed-off-by: Alexander Stoyakin <alexander.stoyakin@gmail.com>
* SameDiff: add activation gradient checking support for debugging (#19)
* SameDiff gradient checker: first pass on activation gradient checks
* Fixes + tests for activation gradient checking
* Javadoc
* [WIP] Some nd4j data type corrections (#20)
* Adjust data type
* Set correct Data type.
* Size of proper data type.
* fix averaged cpu load (#22)
* [WIP] Multiple dataset iterators (#27)
* Splitting dataset into arbitrary number
* Fixes
* Multiple split of iterator
* Test
* Test
* Some fixes
* signature change
* one more tweak
Signed-off-by: raver119 <raver119@gmail.com>
* one more test for sequential use of DataSetIteratorSplitter
Signed-off-by: raver119 <raver119@gmail.com>
* Fixes
* Fixes
* one more test for Alexander
Signed-off-by: raver119 <raver119@gmail.com>
* Some fixes
* Some fixes
* one more test for Alexander
Signed-off-by: raver119 <raver119@gmail.com>
* minor test fix
Signed-off-by: raver119 <raver119@gmail.com>
* Some fixes
* Some fixes
* couple of assertions tweaked
Signed-off-by: raver119 <raver119@gmail.com>
* MDS splitter test :/
Signed-off-by: raver119 <raver119@gmail.com>
* Minor refactoring
* Multi dataset
* Some fixes
* More tests
* Small number of test fixes/improvements (failures on CI) (#31)
Signed-off-by: AlexDBlack <blacka101@gmail.com>
* [WIP] More CUDA stuff (#26)
* initial commit
Signed-off-by: raver119 <raver119@gmail.com>
* LRN BP CUDA
Signed-off-by: raver119 <raver119@gmail.com>
* less memory
Signed-off-by: raver119 <raver119@gmail.com>
* Fixed bug with crop_and_resize op helper.
* get rid of unnecessary index-calculation dunction
Signed-off-by: Yurii <yurii@skymind.io>
* Fixed sort with nth_element cuda-based helper.
* Refactored nth_element.
* Refactored nth_element op and tests.
* Modified usage of dim array with sortTad routine.
* Refactored main routine of helper for non_max_image_suppression op.
* non_max_image_suppression op helper with cuda kernel implementation. Initial revision.
* fix vol2col cuda kernel
* meh
Signed-off-by: raver119 <raver119@gmail.com>
* topK concept
Signed-off-by: raver119 <raver119@gmail.com>
* unsorted topK with scanWitdh of 1
Signed-off-by: raver119 <raver119@gmail.com>
* correct vol2col tests
* sorted/unsorted topK
Signed-off-by: raver119 <raver119@gmail.com>
* implementation and fixing col2im/col2vol
* Corrected usage flags with input/output with reverse op.
* dup is const now
Signed-off-by: raver119 <raver119@gmail.com>
* percentile op
Signed-off-by: raver119 <raver119@gmail.com>
* group tests for mapool2d
Signed-off-by: Yurii <yurii@skymind.io>
* special test for george
Signed-off-by: raver119 <raver119@gmail.com>
* less threads for sortTad
Signed-off-by: raver119 <raver119@gmail.com>
* provide conv2d for cuda
Signed-off-by: Yurii <yurii@skymind.io>
* remove auther in sort tad kernel code
Signed-off-by: Yurii <yurii@skymind.io>
* provide depthwise_conv2d for cuda
Signed-off-by: Yurii <yurii@skymind.io>
* - max_pooling_with_argmax
- null check for special use
Signed-off-by: raver119 <raver119@gmail.com>
* dts cuda
Signed-off-by: raver119 <raver119@gmail.com>
* provide sconv2d for cuda
Signed-off-by: Yurii <yurii@skymind.io>
* std cuda
Signed-off-by: raver119 <raver119@gmail.com>
* Refactored non_max_suppression op to conform TF implementation.
* Improved suppression helper.
* provide pooling3d for cuda
Signed-off-by: Yurii <yurii@skymind.io>
* minor lstm rearrangements
Signed-off-by: raver119 <raver119@gmail.com>
* more of minor lstm rearrangements
Signed-off-by: raver119 <raver119@gmail.com>
* (bi)dynamic_rnn
Signed-off-by: raver119 <raver119@gmail.com>
* templates init order
Signed-off-by: raver119 <raver119@gmail.com>
* Refactored non_max_suppression op.
* Added cuda kernel for non_max_suppression.
* CPU sort by key/value
Signed-off-by: raver119 <raver119@gmail.com>
* CPU sort TAD by key/value
Signed-off-by: raver119 <raver119@gmail.com>
* CPU sort TAD by key/value tests
Signed-off-by: raver119 <raver119@gmail.com>
* Eliminate compiler error with cuda implementation.
* - repaired gradCheck in cuda
- provide conv2d_bp for cuda
Signed-off-by: Yurii <yurii@skymind.io>
* missed signature
Signed-off-by: raver119 <raver119@gmail.com>
* provide depthwise_conv2d_bp for cuda
Signed-off-by: Yurii <yurii@skymind.io>
* Implementation of lup helper with cuda kernel. Initial commit.
* further work on backprops for convolutions
Signed-off-by: Yurii <yurii@skymind.io>
* CUDA linear sort by key/val
Signed-off-by: raver119 <raver119@gmail.com>
* CUDA tad sort by key/val
Signed-off-by: raver119 <raver119@gmail.com>
* start providing of backprop for pooling2d/3d
Signed-off-by: Yurii <yurii@skymind.io>
* Added atomicAdd for bool datatype.
* dynamic partition concept
Signed-off-by: raver119 <raver119@gmail.com>
* dynamic partition concept
Signed-off-by: raver119 <raver119@gmail.com>
* dynamic partition scalar CUDA
Signed-off-by: raver119 <raver119@gmail.com>
* important comment
Signed-off-by: raver119 <raver119@gmail.com>
* fix pooling2d/3d backprop helpers
Signed-off-by: Yurii <yurii@skymind.io>
* Added non-linear test with dynamic_partition.
* Improved test for dynamic_partition.
* dynamic_partition TAD concept
Signed-off-by: raver119 <raver119@gmail.com>
* - dynamic_partition TAD CUDA impl
- dynamic_partition TAD CPU fix
Signed-off-by: raver119 <raver119@gmail.com>
* - rewrite cpu code for usampling2d/3d
- write cuda code for usampling2d/3d
Signed-off-by: Yurii <yurii@skymind.io>
* dynamic_stitch CUDA vector case
Signed-off-by: raver119 <raver119@gmail.com>
* dynamic_stitch CUDA TAD case concept
Signed-off-by: raver119 <raver119@gmail.com>
* dynamic_stitch CUDA TAD case impl
Signed-off-by: raver119 <raver119@gmail.com>
* Added tests for dynamic_stitch 3D-4D cases.
* minor tests tweaks
Signed-off-by: raver119 <raver119@gmail.com>
* Fixed type check for dynamic stitch.
* min/max bp
Signed-off-by: raver119 <raver119@gmail.com>
* rewrite code for upsampling2d/3d cpu
Signed-off-by: Yurii <yurii@skymind.io>
* reduce min/max/norm_max bp
Signed-off-by: raver119 <raver119@gmail.com>
* lup implementation. Additional enhancements.
* provide code for upsamling2d/3d backprop
Signed-off-by: Yurii <yurii@skymind.io>
* weightedCrossEntropyWithLogits
Signed-off-by: raver119 <raver119@gmail.com>
* Fixed template math atomicMul for 64bit ints.
* Refactored dynamic_partition_bp op.
* inverseBroadcast fix
Signed-off-by: raver119 <raver119@gmail.com>
* DynamicPartitionBP test datatype fixed.
* - nd4j_atomicMul Windows fix
- cpu/NDArrayLambda.hpp excluded from CUDA
Signed-off-by: raver119 <raver119@gmail.com>
2019-06-27 17:37:04 +02:00
|
|
|
#if defined(__CUDACC__) //&& defined(BUILD_TESTS)
|
2019-06-06 14:21:15 +02:00
|
|
|
// for CUDA we need stil stuff inline
|
2020-03-02 10:49:41 +01:00
|
|
|
#include <array/NDArrayLambda.hXX>
|
2019-06-06 14:21:15 +02:00
|
|
|
#endif
|
|
|
|
|
|
|
|
}
|
|
|
|
|
|
|
|
#endif
|