* Added declarations for decode/encode_bitmap ops. Signed-off-by: shugeo <sgazeos@gmail.com> * Added implementation for bitmap encoding/decoding ops. Signed-off-by: shugeo <sgazeos@gmail.com> * Added helpers for encode/decode bitmap ops. Signed-off-by: shugeo <sgazeos@gmail.com> * Refactored encodingBitmap helper. Signed-off-by: shugeo <sgazeos@gmail.com> * threshold encode/decode skeleton * helper skeleton * minor import fix * encoder shape fn & op impl * thresholdEncode cpu impl Signed-off-by: raver119@gmail.com <raver119@gmail.com> * thresholdDecode cpu impl Signed-off-by: raver119@gmail.com <raver119@gmail.com> * Only cosmetical changes. Signed-off-by: shugeo <sgazeos@gmail.com> * placeholder Signed-off-by: raver119@gmail.com <raver119@gmail.com> * Added cuda implementation for bitmap decode helper. Signed-off-by: shugeo <sgazeos@gmail.com> * cuda thresholdEstimate Signed-off-by: raver119@gmail.com <raver119@gmail.com> * cuda thresholdDecode Signed-off-by: raver119@gmail.com <raver119@gmail.com> * next step Signed-off-by: raver119@gmail.com <raver119@gmail.com> * - nano cmakelist update (get rid of Clion section) - fixed forgotten throw in AtomicTests Signed-off-by: raver119@gmail.com <raver119@gmail.com> * thesholdEncode cuda impl Signed-off-by: raver119@gmail.com <raver119@gmail.com> * Added tests for bitmap encoding/decoding ops. Signed-off-by: shugeo <sgazeos@gmail.com> * Fixed tests for encode/decode bitmaps. Signed-off-by: shugeo <sgazeos@gmail.com> * Refactored decode/encode helpers. Signed-off-by: shugeo <sgazeos@gmail.com> * Fixed crashes with bitmap decode/encode helpers. Signed-off-by: shugeo <sgazeos@gmail.com> * bitmap encode/decode CPU Signed-off-by: raver119@gmail.com <raver119@gmail.com> * bitmap encode/decode CUDA Signed-off-by: raver119@gmail.com <raver119@gmail.com> * C API removed for threshold/bitmap encode Signed-off-by: raver119@gmail.com <raver119@gmail.com> * EncodeBitmap/DecodeBitmap Java side Signed-off-by: raver119@gmail.com <raver119@gmail.com> * EncodeThreshold/DecodeThreshold Java side Signed-off-by: raver119@gmail.com <raver119@gmail.com> * EncodeThreshold/DecodeThreshold Java side Signed-off-by: raver119@gmail.com <raver119@gmail.com> * few more tests for threshold encoding Signed-off-by: raver119@gmail.com <raver119@gmail.com> * minor test tweak Signed-off-by: raver119@gmail.com <raver119@gmail.com> * two special tests Signed-off-by: raver119@gmail.com <raver119@gmail.com> * encodeBitmap CPU fix Signed-off-by: raver119@gmail.com <raver119@gmail.com> * parallel_long/parallel_double proper spans fix Signed-off-by: raver119@gmail.com <raver119@gmail.com> * encodeThreshold CUDA fix Signed-off-by: raver119@gmail.com <raver119@gmail.com> * nano fix Signed-off-by: raver119@gmail.com <raver119@gmail.com> * grid tweaks Signed-off-by: raver119@gmail.com <raver119@gmail.com> * RTX adaptation for thresholdEncode Signed-off-by: raver119 <raver119@gmail.com> * don't allow threshold encoding for length < 2 Signed-off-by: raver119@gmail.com <raver119@gmail.com> * get rid of NDArrayCompressor in EncodingHandler Signed-off-by: raver119@gmail.com <raver119@gmail.com> * one more minor update of EncodingHandler Signed-off-by: raver119@gmail.com <raver119@gmail.com> * one more minor tweak of EncodingHandler Signed-off-by: raver119@gmail.com <raver119@gmail.com> * - matmul allows integer data types use - EncodingHandler boundary default value - few tests for integer matmul Signed-off-by: raver119@gmail.com <raver119@gmail.com> * minor fix of CUDA bitmap encode Signed-off-by: raver119@gmail.com <raver119@gmail.com> * boundary changed to integer everywhere Signed-off-by: raver119@gmail.com <raver119@gmail.com> * boundary changed to integer everywhere Signed-off-by: raver119@gmail.com <raver119@gmail.com> * re-enable CUDA deallocator Signed-off-by: raver119@gmail.com <raver119@gmail.com> * threshold encoder fix for systems without omp Signed-off-by: raver119@gmail.com <raver119@gmail.com> * - encode_threshold now requires non-negative boundary - minor tweak in EncodingHandler Signed-off-by: raver119@gmail.com <raver119@gmail.com> * restore parallelism in decode_bitmap Signed-off-by: raver119@gmail.com <raver119@gmail.com> * fall back to omp for encode_bitmap cpu Signed-off-by: raver119@gmail.com <raver119@gmail.com> * single time casts Signed-off-by: raver119@gmail.com <raver119@gmail.com> * - additional test for encode_threshold - sync buffers to device before calling for shape function Signed-off-by: raver119@gmail.com <raver119@gmail.com> Co-authored-by: shugeo <sgazeos@gmail.com>
241 lines
8.1 KiB
Plaintext
241 lines
8.1 KiB
Plaintext
/*******************************************************************************
|
|
* Copyright (c) 2019 Konduit K.K.
|
|
*
|
|
* This program and the accompanying materials are made available under the
|
|
* terms of the Apache License, Version 2.0 which is available at
|
|
* https://www.apache.org/licenses/LICENSE-2.0.
|
|
*
|
|
* Unless required by applicable law or agreed to in writing, software
|
|
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
|
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
|
* License for the specific language governing permissions and limitations
|
|
* under the License.
|
|
*
|
|
* SPDX-License-Identifier: Apache-2.0
|
|
******************************************************************************/
|
|
|
|
//
|
|
// @author raver119@gmail.com
|
|
//
|
|
|
|
#include "testlayers.h"
|
|
#include <ops/declarable/CustomOperations.h>
|
|
#include <array/NDArray.h>
|
|
#include <ops/ops.h>
|
|
#include <helpers/GradCheck.h>
|
|
#include <helpers/RandomLauncher.h>
|
|
#include <exceptions/cuda_exception.h>
|
|
|
|
|
|
using namespace sd;
|
|
|
|
|
|
class AtomicTests : public testing::Test {
|
|
public:
|
|
AtomicTests() {
|
|
//
|
|
}
|
|
};
|
|
|
|
template <typename T>
|
|
static _CUDA_G void multiplyKernel(void *vbuffer, uint64_t length, void *vresult) {
|
|
auto buffer = reinterpret_cast<T*>(vbuffer);
|
|
auto result = reinterpret_cast<T*>(vresult);
|
|
|
|
auto tid = blockIdx.x * blockDim.x + threadIdx.x;
|
|
|
|
for (auto e = tid; e < length; e += gridDim.x * blockDim.x) {
|
|
auto rem = e % 4;
|
|
auto i = (e - rem) / 4;
|
|
|
|
sd::math::atomics::nd4j_atomicMul<T>(&result[i], buffer[e]);
|
|
}
|
|
}
|
|
|
|
template <typename T>
|
|
static void multiplyLauncher(void *vbuffer, uint64_t length, void *vresult) {
|
|
multiplyKernel<T><<<256, 256, 1024, *sd::LaunchContext::defaultContext()->getCudaStream()>>>(vbuffer, length, vresult);
|
|
auto err = cudaStreamSynchronize(*sd::LaunchContext::defaultContext()->getCudaStream());
|
|
if (err != 0)
|
|
throw sd::cuda_exception::build("multiply failed", err);
|
|
}
|
|
|
|
template <typename T>
|
|
static _CUDA_G void sumKernel(void *vbuffer, uint64_t length, void *vresult) {
|
|
auto buffer = reinterpret_cast<T*>(vbuffer);
|
|
auto result = reinterpret_cast<T*>(vresult);
|
|
|
|
auto tid = blockIdx.x * blockDim.x + threadIdx.x;
|
|
|
|
for (auto e = tid; e < length; e += gridDim.x * blockDim.x) {
|
|
auto rem = e % 4;
|
|
auto i = (e - rem) / 4;
|
|
|
|
sd::math::atomics::nd4j_atomicAdd<T>(&result[i], buffer[e]);
|
|
}
|
|
}
|
|
|
|
template <typename T>
|
|
static void sumLauncher(void *vbuffer, uint64_t length, void *vresult) {
|
|
sumKernel<T><<<256, 256, 1024, *sd::LaunchContext::defaultContext()->getCudaStream()>>>(vbuffer, length, vresult);
|
|
auto err = cudaStreamSynchronize(*sd::LaunchContext::defaultContext()->getCudaStream());
|
|
if (err != 0)
|
|
throw sd::cuda_exception::build("sum failed", err);
|
|
}
|
|
|
|
template <typename T>
|
|
static _CUDA_G void subKernel(void *vbuffer, uint64_t length, void *vresult) {
|
|
auto buffer = reinterpret_cast<T*>(vbuffer);
|
|
auto result = reinterpret_cast<T*>(vresult);
|
|
|
|
auto tid = blockIdx.x * blockDim.x + threadIdx.x;
|
|
|
|
for (auto e = tid; e < length; e += gridDim.x * blockDim.x) {
|
|
auto rem = e % 4;
|
|
auto i = (e - rem) / 4;
|
|
|
|
sd::math::atomics::nd4j_atomicSub<T>(&result[i], buffer[e]);
|
|
}
|
|
}
|
|
|
|
template <typename T>
|
|
static void subLauncher(void *vbuffer, uint64_t length, void *vresult) {
|
|
subKernel<T><<<256, 256, 1024, *sd::LaunchContext::defaultContext()->getCudaStream()>>>(vbuffer, length, vresult);
|
|
auto err = cudaStreamSynchronize(*sd::LaunchContext::defaultContext()->getCudaStream());
|
|
if (err != 0)
|
|
throw sd::cuda_exception::build("sub failed", err);
|
|
}
|
|
|
|
template <typename T>
|
|
static _CUDA_G void divKernel(void *vbuffer, uint64_t length, void *vresult) {
|
|
auto buffer = reinterpret_cast<T*>(vbuffer);
|
|
auto result = reinterpret_cast<T*>(vresult);
|
|
|
|
auto tid = blockIdx.x * blockDim.x + threadIdx.x;
|
|
|
|
for (auto e = tid; e < length; e += gridDim.x * blockDim.x) {
|
|
auto rem = e % 4;
|
|
auto i = (e - rem) / 4;
|
|
|
|
sd::math::atomics::nd4j_atomicDiv<T>(&result[i], buffer[e]);
|
|
}
|
|
}
|
|
|
|
template <typename T>
|
|
static void divLauncher(void *vbuffer, uint64_t length, void *vresult) {
|
|
divKernel<T><<<256, 256, 1024, *sd::LaunchContext::defaultContext()->getCudaStream()>>>(vbuffer, length, vresult);
|
|
auto err = cudaStreamSynchronize(*sd::LaunchContext::defaultContext()->getCudaStream());
|
|
if (err != 0)
|
|
throw sd::cuda_exception::build("div failed", err);
|
|
}
|
|
|
|
static void multiplyHost(NDArray &input, NDArray &output) {
|
|
BUILD_SINGLE_SELECTOR(input.dataType(), multiplyLauncher, (input.specialBuffer(), input.lengthOf(), output.specialBuffer()), NUMERIC_TYPES);
|
|
}
|
|
|
|
static void sumHost(NDArray &input, NDArray &output) {
|
|
BUILD_SINGLE_SELECTOR(input.dataType(), sumLauncher, (input.specialBuffer(), input.lengthOf(), output.specialBuffer()), NUMERIC_TYPES);
|
|
}
|
|
|
|
static void subHost(NDArray &input, NDArray &output) {
|
|
BUILD_SINGLE_SELECTOR(input.dataType(), subLauncher, (input.specialBuffer(), input.lengthOf(), output.specialBuffer()), FLOAT_TYPES);
|
|
}
|
|
|
|
static void divHost(NDArray &input, NDArray &output) {
|
|
BUILD_SINGLE_SELECTOR(input.dataType(), divLauncher, (input.specialBuffer(), input.lengthOf(), output.specialBuffer()), FLOAT_TYPES);
|
|
}
|
|
|
|
TEST_F(AtomicTests, test_multiply) {
|
|
std::vector<sd::DataType> dtypes = {sd::DataType::FLOAT32, sd::DataType::DOUBLE, sd::DataType::INT16, sd::DataType::HALF};
|
|
|
|
for (auto t:dtypes) {
|
|
nd4j_printf("Trying data type [%s]\n", DataTypeUtils::asString(t).c_str());
|
|
NDArray input('c', {4, 25}, t);
|
|
NDArray output('c', {input.lengthOf() / 4}, t);
|
|
NDArray exp = output.ulike();
|
|
|
|
input.assign(2);
|
|
output.assign(2);
|
|
exp.assign(32);
|
|
|
|
multiplyHost(input, output);
|
|
ASSERT_EQ(exp, output);
|
|
}
|
|
}
|
|
|
|
TEST_F(AtomicTests, test_multiply_2) {
|
|
std::vector<sd::DataType> dtypes = {sd::DataType::FLOAT32, sd::DataType::DOUBLE, sd::DataType::HALF, sd::DataType::BFLOAT16};
|
|
|
|
for (auto t:dtypes) {
|
|
nd4j_printf("Trying data type [%s]\n", DataTypeUtils::asString(t).c_str());
|
|
NDArray input('c', {4, 25}, t);
|
|
NDArray output('c', {input.lengthOf() / 4}, t);
|
|
NDArray exp = output.ulike();
|
|
|
|
input.assign(1.5);
|
|
output.assign(2);
|
|
exp.assign(10.125);
|
|
|
|
multiplyHost(input, output);
|
|
// output.printBuffer("multiply 2");
|
|
ASSERT_EQ(exp, output);
|
|
}
|
|
}
|
|
|
|
TEST_F(AtomicTests, test_sum) {
|
|
std::vector<sd::DataType> dtypes = {sd::DataType::FLOAT32, sd::DataType::DOUBLE, sd::DataType::BFLOAT16, sd::DataType::HALF, sd::DataType::INT16};
|
|
|
|
for (auto t:dtypes) {
|
|
nd4j_printf("Trying data type [%s]\n", DataTypeUtils::asString(t).c_str());
|
|
NDArray input('c', {4, 25}, t);
|
|
NDArray output('c', {input.lengthOf() / 4}, t);
|
|
NDArray exp = output.ulike();
|
|
|
|
input.assign(1);
|
|
output.assign(1);
|
|
exp.assign(5);
|
|
|
|
sumHost(input, output);
|
|
// output.printIndexedBuffer("Sum");
|
|
ASSERT_EQ(exp, output);
|
|
}
|
|
}
|
|
|
|
TEST_F(AtomicTests, test_sub) {
|
|
std::vector<sd::DataType> dtypes = {sd::DataType::FLOAT32, sd::DataType::DOUBLE, sd::DataType::HALF};
|
|
|
|
for (auto t:dtypes) {
|
|
nd4j_printf("Trying data type [%s]\n", DataTypeUtils::asString(t).c_str());
|
|
NDArray input('c', {4, 25}, t);
|
|
NDArray output('c', {input.lengthOf() / 4}, t);
|
|
NDArray exp = output.ulike();
|
|
|
|
input.assign(1);
|
|
output.assign(5);
|
|
exp.assign(1);
|
|
|
|
subHost(input, output);
|
|
// output.printBuffer("Sub");
|
|
|
|
ASSERT_EQ(exp, output);
|
|
}
|
|
}
|
|
|
|
TEST_F(AtomicTests, test_div) {
|
|
std::vector<sd::DataType> dtypes = {sd::DataType::FLOAT32, sd::DataType::DOUBLE, sd::DataType::BFLOAT16, sd::DataType::HALF};
|
|
|
|
for (auto t:dtypes) {
|
|
nd4j_printf("Trying data type [%s]\n", DataTypeUtils::asString(t).c_str());
|
|
NDArray input('c', {4, 25}, t);
|
|
NDArray output('c', {input.lengthOf() / 4}, t);
|
|
NDArray exp = output.ulike();
|
|
|
|
input.assign(2);
|
|
output.assign(32);
|
|
exp.assign(2);
|
|
|
|
divHost(input, output);
|
|
// output.printBuffer("Div");
|
|
ASSERT_EQ(exp, output);
|
|
}
|
|
} |