Eliminated error with resize implementation. (#418)

* Eliminated error with resize implementation.

Signed-off-by: shugeo <sgazeos@gmail.com>

* Refactored resize caller implementation.

Signed-off-by: shugeo <sgazeos@gmail.com>

* Refactored image.resize op helper.

Signed-off-by: shugeo <sgazeos@gmail.com>

* Added dumb implementations for missed resize methods.

Signed-off-by: shugeo <sgazeos@gmail.com>

* Added resize_images op. Refactored image_resize op.

Signed-off-by: shugeo <sgazeos@gmail.com>

* Refactored matrix_band_part op and test.

Signed-off-by: shugeo <sgazeos@gmail.com>

* Refactored resize_images op to comply with preserve_aspect_ratio flag properly.

Signed-off-by: shugeo <sgazeos@gmail.com>

* Refactored resize_images and tests for resizeArea method.

Signed-off-by: shugeo <sgazeos@gmail.com>

* Refactored resize methods and test.

Signed-off-by: shugeo <sgazeos@gmail.com>

* Added new methods for TF2 resize op.

Signed-off-by: shugeo <sgazeos@gmail.com>

* Portion of resize algorithms from TF2

Signed-off-by: shugeo <sgazeos@gmail.com>

* Added routine to process resize with given algorithm.

Signed-off-by: shugeo <sgazeos@gmail.com>

* Added new image resize via scale and translate process helper.

Signed-off-by: shugeo <sgazeos@gmail.com>

* Cpu implementation for V2 image resize operation helpers.

Signed-off-by: shugeo <sgazeos@gmail.com>

* Added implementation for lancos5 algorithm of resize and test.

Signed-off-by: shugeo <sgazeos@gmail.com>

* Added prints for span computing.

Signed-off-by: shugeo <sgazeos@gmail.com>

* The first working implementation and tests for lancos5 resize.

Signed-off-by: shugeo <sgazeos@gmail.com>

* Eliminated waste prints.

Signed-off-by: shugeo <sgazeos@gmail.com>

* Refactored image_resize op and tests."

Signed-off-by: shugeo <sgazeos@gmail.com>

* Lanczos3 resize implementation and tests.

Signed-off-by: shugeo <sgazeos@gmail.com>

* Implemented bicubic resize algorithm and tests.

Signed-off-by: shugeo <sgazeos@gmail.com>

* Added a couple of tests and cosmetic changes with image resize helper.

Signed-off-by: shugeo <sgazeos@gmail.com>

* Added bilinear implementation for image resize.

Signed-off-by: shugeo <sgazeos@gmail.com>

* Refactored bicubic algorithm and also implement area and neighbor algoritms for image resize on cpu arch.

Signed-off-by: shugeo <sgazeos@gmail.com>

* Added a couple of tests for nearest neighbor and area resize.

Signed-off-by: shugeo <sgazeos@gmail.com>

* Cosmetic changes for cpu implementation and added cuda implementation for resize methods.

Signed-off-by: shugeo <sgazeos@gmail.com>

* Separated cuda implementation of v2 image resize.

Signed-off-by: shugeo <sgazeos@gmail.com>

* Added kernels for span calculation and span gathering with new image resize cuda implementation.

Signed-off-by: shugeo <sgazeos@gmail.com>

* Refactored cuda implementation of image resize kernels.

Signed-off-by: shugeo <sgazeos@gmail.com>

* Finished the first working implementation of image resize op and tests.

Signed-off-by: shugeo <sgazeos@gmail.com>

* Fixed resize_images and image_resize ops.

Signed-off-by: shugeo <sgazeos@gmail.com>

* Refactored shape construction and output validation.

Signed-off-by: shugeo <sgazeos@gmail.com>

* Fixed test to properly initalized with float.

Signed-off-by: shugeo <sgazeos@gmail.com>

* Added 3D input opotunity for resize ops.

Signed-off-by: shugeo <sgazeos@gmail.com>

* Fixed test for resize_images op.

Signed-off-by: shugeo <sgazeos@gmail.com>

* Fixed test and call for resize_images op.

Signed-off-by: shugeo <sgazeos@gmail.com>

* Refactored image_resize op output data type handling for nearest neighbors method and tests.

Signed-off-by: shugeo <sgazeos@gmail.com>

* Fixed issue with wrong resize method.

Signed-off-by: shugeo <sgazeos@gmail.com>

* Added checkup for wrong resize methods for resize ops.

Signed-off-by: shugeo <sgazeos@gmail.com>

* Refactored resize methods and test.

Signed-off-by: shugeo <sgazeos@gmail.com>

* Added output data type validation for given resize method.

Signed-off-by: shugeo <sgazeos@gmail.com>

* - ResizeMethod rearranged in order to match C++ side
- minor test fix

Signed-off-by: raver119@gmail.com <raver119@gmail.com>

* Refactored resize_images op.

Signed-off-by: shugeo <sgazeos@gmail.com>

Co-authored-by: raver119@gmail.com <raver119@gmail.com>
master
shugeo 2020-05-27 21:15:03 +03:00 committed by GitHub
parent 5568b9d72f
commit 2aed216c2a
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
16 changed files with 1753 additions and 431 deletions

View File

@ -1,5 +1,5 @@
/******************************************************************************* /*******************************************************************************
* Copyright (c) 2019 Konduit K.K. * Copyright (c) 2020 Konduit K.K.
* *
* This program and the accompanying materials are made available under the * This program and the accompanying materials are made available under the
* terms of the Apache License, Version 2.0 which is available at * terms of the Apache License, Version 2.0 which is available at
@ -32,57 +32,65 @@ namespace sd {
auto size = INPUT_VARIABLE(1); auto size = INPUT_VARIABLE(1);
auto output = OUTPUT_VARIABLE(0); auto output = OUTPUT_VARIABLE(0);
int width; int width;
int height; int height;
bool preserveAspectRatio = false; // - default value
bool antialias = false; bool antialias = false;
REQUIRE_TRUE(size->lengthOf() == 2, 0, "resize_bilinear: Resize params is a pair of values, not %lld.", size->lengthOf()); REQUIRE_TRUE(size->lengthOf() == 2, 0, "image_resize: Resize params is a pair of values, not %lld.", size->lengthOf());
width = size->e<int>(0); width = size->e<int>(1);
height = size->e<int>(1); height = size->e<int>(0);
if (block.getBArguments()->size()) { if (block.numB() == 2) {
preserveAspectRatio = B_ARG(0); antialias = B_ARG(1);
if (block.getBArguments()->size() > 1)
antialias = B_ARG(1);
} }
auto method = helpers::ImageResizeMethods::kResizeBilinear; auto method = helpers::ImageResizeMethods::kResizeBilinear;
if (block.numI() == 1) { if (block.numI() == 1) {
method = (helpers::ImageResizeMethods)INT_ARG(0); method = (helpers::ImageResizeMethods)INT_ARG(0);
} }
REQUIRE_TRUE(method == helpers::ImageResizeMethods::kResizeNearest || output->dataType() == DataType::FLOAT32, 0, "image_resize: Output data type should be FLOAT32 for this method %i", (int)method );
REQUIRE_TRUE(method >= helpers::ImageResizeMethods::kResizeFirst && method <= helpers::ImageResizeMethods::kResizeLast, 0, "image_resize: Resize method should be between %i and %i, but %i was given.", (int)helpers::ImageResizeMethods::kResizeFirst, (int)helpers::ImageResizeMethods::kResizeLast, (int)method);
auto inRank = image->rankOf();
REQUIRE_TRUE(inRank >=3 && inRank <=4, 0, "image_resize: Input rank should be 4 or 3, but %i given.", image->rankOf());
auto source = inRank == 4?image->reshape(image->ordering(), {image->sizeAt(0), image->sizeAt(1), image->sizeAt(2), image->sizeAt(3)}):image->reshape(image->ordering(), {1, image->sizeAt(0), image->sizeAt(1), image->sizeAt(2)});
auto target = inRank == 4?output->reshape(output->ordering(), {output->sizeAt(0), output->sizeAt(1), output->sizeAt(2), output->sizeAt(3)}, false) : output->reshape(output->ordering(), {1, output->sizeAt(0), output->sizeAt(1), output->sizeAt(2)}, false);
return helpers::resizeFunctor(block.launchContext(), image, width, height, method, preserveAspectRatio, antialias, output); return helpers::resizeFunctor(block.launchContext(), image, width, height, method, antialias, output);
} }
DECLARE_SHAPE_FN(image_resize) { DECLARE_SHAPE_FN(image_resize) {
auto shapeList = SHAPELIST();
auto in = inputShape->at(0); auto in = inputShape->at(0);
Nd4jLong* outputShape; Nd4jLong* outputShape;
auto method = helpers::ImageResizeMethods::kResizeBilinear;
if (block.numI() == 1) {
method = (helpers::ImageResizeMethods)INT_ARG(0);
}
int width; int width;
int height; int height;
double ratio = shape::sizeAt(in, 1) / (0.0 + shape::sizeAt(in, 2));
auto newImageSize = INPUT_VARIABLE(1); auto newImageSize = INPUT_VARIABLE(1);
REQUIRE_TRUE(newImageSize->lengthOf() == 2, 0, "resize_bilinear: Resize params is a pair of values, not %i.", newImageSize->lengthOf()); REQUIRE_TRUE(newImageSize->lengthOf() == 2, 0, "resize_bilinear: Resize params is a pair of values, not %i.", newImageSize->lengthOf());
REQUIRE_TRUE(block.numI() <= 1, 0, "resize_bilinear: Resize params already given by the second param. Int params are expensive."); REQUIRE_TRUE(block.numI() <= 1, 0, "resize_bilinear: Resize params already given by the second param. Int params are expensive.");
width = newImageSize->e<int>(0); width = newImageSize->e<int>(1);
height = newImageSize->e<int>(1); height = newImageSize->e<int>(0);
if (block.numB() > 0) {
if (B_ARG(0)) {
width = math::nd4j_ceil<double, int>(height / ratio);
}
}
auto dtype = DataType::FLOAT32;
if (method == helpers::ImageResizeMethods::kResizeNearest)
dtype = ArrayOptions::dataType(in);
auto shape = ConstantShapeHelper::getInstance()->createShapeInfo(dtype, 'c', shape::rank(in) == 4?std::vector<Nd4jLong>{in[1], height, width, in[4]}:std::vector<Nd4jLong>{ height, width, in[4]});
ALLOCATE(outputShape, block.getWorkspace(), shape::shapeInfoLength(4), Nd4jLong); return SHAPELIST(shape);
outputShape[0] = 4;
outputShape[1] = in[1];
outputShape[2] = width;
outputShape[3] = height;
outputShape[4] = in[4];
ShapeUtils::updateStridesAndType(outputShape, in, shape::order(in));
shapeList->push_back(CONSTANT(outputShape));
return shapeList;
} }
DECLARE_TYPES(image_resize) { DECLARE_TYPES(image_resize) {
getOpDescriptor() getOpDescriptor()
->setAllowedInputTypes(0, {ALL_FLOATS}) ->setAllowedInputTypes(0, {ALL_INTS, ALL_FLOATS})
->setAllowedInputTypes(1, {ALL_INTS}) ->setAllowedInputTypes(1, {ALL_INTS})
->setAllowedOutputTypes({ALL_FLOATS}); ->setAllowedOutputTypes({ALL_FLOATS, ALL_INTS});
} }
} }

View File

@ -0,0 +1,135 @@
/*******************************************************************************
* Copyright (c) 2020 Konduit K.K.
*
* This program and the accompanying materials are made available under the
* terms of the Apache License, Version 2.0 which is available at
* https://www.apache.org/licenses/LICENSE-2.0.
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
*
* SPDX-License-Identifier: Apache-2.0
******************************************************************************/
//
// @author sgazeos@gmail.com
//
#include <system/op_boilerplate.h>
#if NOT_EXCLUDED(OP_resize_images)
#include <ops/declarable/CustomOperations.h>
#include <ops/declarable/helpers/image_resize.h>
namespace sd {
namespace ops {
CUSTOM_OP_IMPL(resize_images, 1, 1, false, 0, 0) {
auto image = INPUT_VARIABLE(0);
auto output = OUTPUT_VARIABLE(0);
int width = output->sizeAt(2);
int height = output->sizeAt(1);
int method = helpers::ImageResizeMethods::kResizeBilinear;
if (block.width() > 1) {
auto size = INPUT_VARIABLE(1);
REQUIRE_TRUE(size->lengthOf() == 2, 0, "resize_images: Resize params is a pair of values, not %lld.", size->lengthOf());
// width = size->e<int>(1);
// height = size->e<int>(0);
if (block.width() > 2) {
auto methodT = INPUT_VARIABLE(2);
REQUIRE_TRUE(methodT->isZ() && methodT->isScalar(), 0, "resize_images: Method tensor should be integer scalar, but rank of %i tensor given.", methodT->rankOf());
method = methodT->e<int>(0);
}
else if (block.numI() == 1) {
method = I_ARG(0);
}
}
else {
REQUIRE_TRUE(block.numI() > 1 && block.numI() < 4, 0, "resize_images: Method and size should be given properly.");
if(block.numI() == 3) { // full stack of args
// height = I_ARG(0);
// width = I_ARG(1);
method = I_ARG(2);
}
else if (block.numI() == 2) {
// height = I_ARG(0);
// width = I_ARG(1);
}
}
bool preserveAspectRatio = false; // - default value
bool alignCorners = false;
if (block.numB()) {
alignCorners = B_ARG(0);
if (block.numB() > 1)
preserveAspectRatio = B_ARG(1);
}
REQUIRE_TRUE(method >= helpers::ImageResizeMethods::kResizeFirst && method <= helpers::ImageResizeMethods::kResizeOldLast, 0, "resize_images: Resize method should be between %i and %i, but %i was given.", (int)helpers::ImageResizeMethods::kResizeFirst, (int)helpers::ImageResizeMethods::kResizeOldLast, (int)method);
REQUIRE_TRUE(method == helpers::ImageResizeMethods::kResizeNearest || output->dataType() == DataType::FLOAT32, 0, "image_resize: Output data type should be FLOAT32 for this method %i", (int)method );
auto inRank = image->rankOf();
REQUIRE_TRUE(inRank >=3 && inRank <=4, 0, "image_resize: Input rank should be 4 or 3, but %i given.", inRank);
auto source = inRank == 4?image->reshape(image->ordering(), {image->sizeAt(0), image->sizeAt(1), image->sizeAt(2), image->sizeAt(3)}):image->reshape(image->ordering(), {1, image->sizeAt(0), image->sizeAt(1), image->sizeAt(2)});
auto target = inRank == 4?output->reshape(output->ordering(), {output->sizeAt(0), output->sizeAt(1), output->sizeAt(2), output->sizeAt(3)}, false) : output->reshape(output->ordering(), {1, output->sizeAt(0), output->sizeAt(1), output->sizeAt(2)}, false);
return helpers::resizeImagesFunctor(block.launchContext(), &source, width, height, (helpers::ImageResizeMethods)method, alignCorners, &target);
}
DECLARE_SHAPE_FN(resize_images) {
auto shapeList = SHAPELIST();
auto in = inputShape->at(0);
Nd4jLong* outputShape;
int width;
int height;
if (block.width() > 1) {
auto size = INPUT_VARIABLE(1);
REQUIRE_TRUE(size->lengthOf() == 2, 0, "resize_images: Resize params is a pair of values, not %lld.", size->lengthOf());
width = size->e<int>(1);
height = size->e<int>(0);
}
else {
REQUIRE_TRUE(block.numI() > 1 && block.numI() < 4, 0, "resize_images: Method and size should be given properly.");
if(block.numI() == 3) { // full stack of args
height = I_ARG(0);
width = I_ARG(1);
}
else if (block.numI() == 2) {
height = I_ARG(0);
width = I_ARG(1);
}
}
double ratio = shape::sizeAt(in, 1) / (0.0 + shape::sizeAt(in, 2));
if (block.numB() > 1) {
if (B_ARG(1)) {
width = math::nd4j_ceil<double, int>(height / ratio);
}
}
std::vector<Nd4jLong> shape;
if (shape::rank(in) == 4)
shape = {in[1], height, width, in[4]};
else if (shape::rank(in) == 3)
shape = {height, width, in[3]};
auto outShape = ConstantShapeHelper::getInstance()->createShapeInfo(DataType::FLOAT32, shape::order(in), shape);
return SHAPELIST(outShape);
}
DECLARE_TYPES(resize_images) {
getOpDescriptor()
->setAllowedInputTypes(0, {ALL_FLOATS, ALL_INTS})
->setAllowedInputTypes(1, {ALL_INTS})
->setAllowedOutputTypes({DataType::FLOAT32});
}
}
}
#endif

View File

@ -25,14 +25,27 @@
namespace sd { namespace sd {
namespace ops { namespace ops {
CONFIGURABLE_OP_IMPL(matrix_band_part, 1, 1, true, 0, 2) { CONFIGURABLE_OP_IMPL(matrix_band_part, 1, 1, true, 0, 0) {
auto input = INPUT_VARIABLE(0); auto input = INPUT_VARIABLE(0);
auto output = OUTPUT_VARIABLE(0); auto output = OUTPUT_VARIABLE(0);
Nd4jLong minLower = INT_ARG(0);
Nd4jLong maxUpper = INT_ARG(1);
Nd4jLong minLower(0LL);
Nd4jLong maxUpper(0LL);
if (block.width() == 1) {
REQUIRE_TRUE(block.numI() == 2, 0, "matrix_band_part: min and max band numbers should be given before.");
minLower = INT_ARG(0);
maxUpper = INT_ARG(1);
}
else {
REQUIRE_TRUE(block.width() == 3, 0, "matrix_band_part: min and max band numbers should be given as scalars before.");
auto minLowerT = INPUT_VARIABLE(1);
auto maxUpperT = INPUT_VARIABLE(2);
REQUIRE_TRUE(minLowerT->isScalar() && maxUpperT->isScalar(), 0, "matrix_band_part: min and max should be scalars, but %i and %i ranks given", minLowerT->rankOf(), maxUpperT->rankOf());
minLower = minLowerT->e<Nd4jLong>(0);
maxUpper = maxUpperT->e<Nd4jLong>(0);
}
REQUIRE_TRUE(input->rankOf() >= 2, 0, "matrix_band_part: Input rank should be 2 or greater."); REQUIRE_TRUE(input->rankOf() >= 2, 0, "matrix_band_part: Input rank should be 2 or greater.");
Nd4jLong N = input->sizeAt(-2); Nd4jLong N = input->sizeAt(-2);
Nd4jLong M = input->sizeAt(-1); Nd4jLong M = input->sizeAt(-1);
@ -49,9 +62,10 @@ namespace sd {
DECLARE_TYPES(matrix_band_part) { DECLARE_TYPES(matrix_band_part) {
getOpDescriptor() getOpDescriptor()
->setAllowedInputTypes({ALL_INTS, ALL_FLOATS}) ->setAllowedInputTypes(0, {ALL_INTS, ALL_FLOATS})
->setAllowedInputTypes({ALL_INTS, ALL_FLOATS}) ->setAllowedInputTypes(1, {ALL_INTS})
->setSameMode(true); ->setAllowedInputTypes(2, {ALL_INTS})
->setAllowedInputTypes({ALL_INTS, ALL_FLOATS});
} }
} }

View File

@ -85,6 +85,7 @@ namespace ops {
*/ */
#if NOT_EXCLUDED(OP_rgb_to_yuv) #if NOT_EXCLUDED(OP_rgb_to_yuv)
DECLARE_CONFIGURABLE_OP(yuv_to_rgb, 1, 1, true, 0, 0); DECLARE_CONFIGURABLE_OP(yuv_to_rgb, 1, 1, true, 0, 0);
#endif
/** /**
* Rgb To Yiq * Rgb To Yiq
@ -108,8 +109,156 @@ namespace ops {
DECLARE_CONFIGURABLE_OP(yiq_to_rgb, 1, 1, true, 0, 0); DECLARE_CONFIGURABLE_OP(yiq_to_rgb, 1, 1, true, 0, 0);
#endif #endif
} /**
} * resize_images - resize image with given size and method
* there are 4 methods allowed: RESIZE_BILINEAR(0), RESIZE_NEIGHBOR(1), RESIZE_AREA(2) and RESIZE_BICUBIC(3)
* inputs:
* 0 - 4D tensor with shape {batch, height, width, channels}
* 1 - 1D integer tensor with {new_height, new_width} (optional)
* 2 - 0D integer tensor with method (0 to 3) (optional)
*
* int args:
* 0 - new_height
* 1 - new_width
* 2 - method
*
* bool args:
* 0 - align corners (default false) - optional
* 1 - preserve_aspect_ratio (default false) - optional
*
* CAUTION: one of methods can be used to give size and method - as tensors or as int args only
*
* output:
* 0 - 4D float32 tensor with shape {batch, new_height, new_width, channels}
*
*/
#if NOT_EXCLUDED(OP_resize_images)
DECLARE_CUSTOM_OP(resize_images, 1,1,false, 0, 0);
#endif
#endif /**
* This op make bilinear or nearest neighbor interpolated resize for given tensor
*
* input array:
* 0 - 4D-Tensor with shape (batch, sizeX, sizeY, channels) numeric type
* 1 - 2D-Tensor with shape (num_boxes, 4) float type
* 2 - 1D-Tensor with shape (num_boxes) int type
* 3 - 1D-Tensor with 2 values (newWidth, newHeight) (optional) int type
*
* float arguments (optional)
* 0 - exprapolation_value (optional) default 0.f
*
* int arguments: (optional)
* 0 - mode (default 0 - bilinear interpolation)
*
* output array:
* the 4D-Tensor with resized to crop_size images given - float type
*/
#if NOT_EXCLUDED(OP_crop_and_resize)
DECLARE_CUSTOM_OP(crop_and_resize, 4, 1, false, -1, -1);
#endif
/**
* This op make bilinear interpolated resize for given tensor
*
* input array:
* 0 - 4D-Tensor with shape (batch, sizeX, sizeY, channels)
* 1 - 1D-Tensor with 2 values (newWidth, newHeight) (optional)
*
* int arguments: (optional)
* 0 - new width
* 1 - new height
*
* output array:
* the 4D-Tensor with calculated backproped dots
*
* CAUTION: either size tensor or a pair of int params should be provided.
*/
#if NOT_EXCLUDED(OP_resize_bilinear)
DECLARE_CUSTOM_OP(resize_bilinear, 1, 1, false, 0, -2);
#endif
/**
* This op make nearest neighbor interpolated resize for given tensor
*
* input array:
* 0 - 4D-Tensor with shape (batch, sizeX, sizeY, channels)
* 1 - 1D-Tensor with 2 values (newWidth, newHeight) (optional)
*
* int arguments: (optional)
* 0 - new width
* 1 - new height
*
* output array:
* the 4D-Tensor with resized image (shape is {batch, newWidth, newHeight, channels})
*
* CAUTION: either size tensor or a pair of int params should be provided.
*/
#if NOT_EXCLUDED(OP_resize_nearest_neighbor)
DECLARE_CUSTOM_OP(resize_nearest_neighbor, 1, 1, false, 0, -2);
#endif
/**
* This op make bicubic interpolated resize for given tensor
*
* input array:
* 0 - 4D-Tensor with shape (batch, sizeX, sizeY, channels)
* 1 - 1D-Tensor with 2 values (newWidth, newHeight)
*
* output array:
* the 4D-Tensor with resized image (shape is {batch, newWidth, newHeight, channels})
*
*/
#if NOT_EXCLUDED(OP_resize_bicubic)
DECLARE_CUSTOM_OP(resize_bicubic, 1, 1, false, 0, -2);
#endif
/**
* This op make area interpolated resize (as OpenCV INTER_AREA algorithm) for given tensor
*
* input array:
* 0 - images - 4D-Tensor with shape (batch, sizeX, sizeY, channels)
* 1 - size - 1D-Tensor with 2 values (newWidth, newHeight) (if missing a pair of integer args should be provided).
*
* int args: - proveded only when size tensor is missing
* 0 - new height
* 1 - new width
* boolean args:
* 0 - align_corners - optional (default is false)
*
* output array:
* the 4D-Tensor with resized image (shape is {batch, newWidth, newHeight, channels})
*
*/
#if NOT_EXCLUDED(OP_resize_area)
DECLARE_CUSTOM_OP(resize_area, 1, 1, false, 0, -2);
#endif
/**
* This op make interpolated resize for given tensor with given algorithm.
* Supported algorithms are bilinear, bicubic, nearest_neighbor, lanczos5, gaussian, area and mitchellcubic.
*
* input array:
* 0 - 4D-Tensor with shape (batch, sizeX, sizeY, channels)
* 1 - 1D-Tensor with 2 values (newWidth, newHeight)
*
* optional int args:
* 0 - algorithm - bilinear by default
* optional bool args:
* 0 - preserve_aspect_ratio - default False
* 1 - antialias - default False
*
* output array:
* the 4D-Tensor with resized by given algorithm image (shape is {batch, newWidth, newHeight, channels})
*
*/
#if NOT_EXCLUDED(OP_image_resize)
DECLARE_CUSTOM_OP(image_resize, 2, 1, false, 0, 0);
#endif
}
}
#endif #endif

View File

@ -1771,130 +1771,6 @@ namespace sd {
DECLARE_CUSTOM_OP(reduce_logsumexp, 1, 1, false, 0, 0); DECLARE_CUSTOM_OP(reduce_logsumexp, 1, 1, false, 0, 0);
#endif #endif
/**
* This op make bilinear or nearest neighbor interpolated resize for given tensor
*
* input array:
* 0 - 4D-Tensor with shape (batch, sizeX, sizeY, channels) numeric type
* 1 - 2D-Tensor with shape (num_boxes, 4) float type
* 2 - 1D-Tensor with shape (num_boxes) int type
* 3 - 1D-Tensor with 2 values (newWidth, newHeight) (optional) int type
*
* float arguments (optional)
* 0 - exprapolation_value (optional) default 0.f
*
* int arguments: (optional)
* 0 - mode (default 0 - bilinear interpolation)
*
* output array:
* the 4D-Tensor with resized to crop_size images given - float type
*/
#if NOT_EXCLUDED(OP_crop_and_resize)
DECLARE_CUSTOM_OP(crop_and_resize, 4, 1, false, -1, -1);
#endif
/**
* This op make bilinear interpolated resize for given tensor
*
* input array:
* 0 - 4D-Tensor with shape (batch, sizeX, sizeY, channels)
* 1 - 1D-Tensor with 2 values (newWidth, newHeight) (optional)
*
* int arguments: (optional)
* 0 - new width
* 1 - new height
*
* output array:
* the 4D-Tensor with calculated backproped dots
*
* CAUTION: either size tensor or a pair of int params should be provided.
*/
#if NOT_EXCLUDED(OP_resize_bilinear)
DECLARE_CUSTOM_OP(resize_bilinear, 1, 1, false, 0, -2);
#endif
/**
* This op make nearest neighbor interpolated resize for given tensor
*
* input array:
* 0 - 4D-Tensor with shape (batch, sizeX, sizeY, channels)
* 1 - 1D-Tensor with 2 values (newWidth, newHeight) (optional)
*
* int arguments: (optional)
* 0 - new width
* 1 - new height
*
* output array:
* the 4D-Tensor with resized image (shape is {batch, newWidth, newHeight, channels})
*
* CAUTION: either size tensor or a pair of int params should be provided.
*/
#if NOT_EXCLUDED(OP_resize_nearest_neighbor)
DECLARE_CUSTOM_OP(resize_nearest_neighbor, 1, 1, false, 0, -2);
#endif
/**
* This op make bicubic interpolated resize for given tensor
*
* input array:
* 0 - 4D-Tensor with shape (batch, sizeX, sizeY, channels)
* 1 - 1D-Tensor with 2 values (newWidth, newHeight)
*
* output array:
* the 4D-Tensor with resized image (shape is {batch, newWidth, newHeight, channels})
*
*/
#if NOT_EXCLUDED(OP_resize_bicubic)
DECLARE_CUSTOM_OP(resize_bicubic, 1, 1, false, 0, -2);
#endif
/**
* This op make area interpolated resize (as OpenCV INTER_AREA algorithm) for given tensor
*
* input array:
* 0 - images - 4D-Tensor with shape (batch, sizeX, sizeY, channels)
* 1 - size - 1D-Tensor with 2 values (newWidth, newHeight) (if missing a pair of integer args should be provided).
*
* int args: - proveded only when size tensor is missing
* 0 - new height
* 1 - new width
* boolean args:
* 0 - align_corners - optional (default is false)
*
* output array:
* the 4D-Tensor with resized image (shape is {batch, newWidth, newHeight, channels})
*
*/
#if NOT_EXCLUDED(OP_resize_area)
DECLARE_CUSTOM_OP(resize_area, 1, 1, false, 0, -2);
#endif
/**
* This op make interpolated resize for given tensor with given algorithm.
* Supported algorithms are bilinear, bicubic, nearest_neighbor.
* Need to implement to full compatibility with TF: lanczos5, gaussian, area and mitchellcubic
*
* input array:
* 0 - 4D-Tensor with shape (batch, sizeX, sizeY, channels)
* 1 - 1D-Tensor with 2 values (newWidth, newHeight)
*
* optional int args:
* 0 - algorithm - bilinear by default
* optional bool args:
* 0 - preserve_aspect_ratio - default False
* 1 - antialias - default False
*
* output array:
* the 4D-Tensor with resized by given algorithm image (shape is {batch, newWidth, newHeight, channels})
*
*/
#if NOT_EXCLUDED(OP_image_resize)
DECLARE_CUSTOM_OP(image_resize, 2, 1, false, 0, 0);
#endif
/** /**
* Copy a tensor setting everything outside a central band in each innermost matrix * Copy a tensor setting everything outside a central band in each innermost matrix
* *

View File

@ -418,17 +418,17 @@ namespace helpers {
// Allocate and initialize coefficients table using Bicubic // Allocate and initialize coefficients table using Bicubic
// convolution algorithm. // convolution algorithm.
// https://en.wikipedia.org/wiki/Bicubic_interpolation // https://en.wikipedia.org/wiki/Bicubic_interpolation
float* coeffs_table = new float[(kTableSize + 1) * 2]; float* coeffsTable = new float[(kTableSize + 1) * 2];
auto func = PRAGMA_THREADS_FOR { auto func = PRAGMA_THREADS_FOR {
for (auto i = start; i <= stop; ++i) { for (auto i = start; i <= stop; ++i) {
float x = i * 1.0 / kTableSize; float x = i * 1.0 / kTableSize;
coeffs_table[i * 2] = ((a + 2) * x - (a + 3)) * x * x + 1; coeffsTable[i * 2] = ((a + 2) * x - (a + 3)) * x * x + 1;
x += 1.0; x += 1.0;
coeffs_table[i * 2 + 1] = ((a * x - 5 * a) * x + 8 * a) * x - 4 * a; coeffsTable[i * 2 + 1] = ((a * x - 5 * a) * x + 8 * a) * x - 4 * a;
} }
}; };
samediff::Threads::parallel_for(func, 0, kTableSize); samediff::Threads::parallel_for(func, 0, kTableSize);
return coeffs_table; return coeffsTable;
} }
const float* getCoeffsTable(const bool use_keys_cubic) { const float* getCoeffsTable(const bool use_keys_cubic) {
@ -988,25 +988,392 @@ namespace helpers {
return res; return res;
} }
int resizeAreaFunctor(sd::LaunchContext * context, NDArray const* image, int const width, int const height, int resizeAreaFunctor(sd::LaunchContext * context, NDArray const* image, int const width, int const height, bool const alignCorners, NDArray* output) {
bool const alignCorners, NDArray* output) {
BUILD_SINGLE_SELECTOR(image->dataType(), return resizeAreaFunctor_, (context, image, width, height, alignCorners, output), NUMERIC_TYPES); BUILD_SINGLE_SELECTOR(image->dataType(), return resizeAreaFunctor_, (context, image, width, height, alignCorners, output), NUMERIC_TYPES);
} }
/**
* resize as TF v.2.x implemented (with preserve aspect ratio and antialias flags routines
* */
// An interface for integrated scale functors.
struct IKernelFunc {
virtual float operator()(float x) const = 0;
virtual float radius() const = 0;
};
struct LanczosKernelFunc : public IKernelFunc {
// Pass 1 for Lanczos1 kernel, 3 for Lanczos3 etc.
explicit LanczosKernelFunc(float const radius) : _radius(radius) {}
float operator()(float x) const {
float const kPI = 3.141592653589793f;
x = math::nd4j_abs(x);
if (x > _radius) return 0.f;
// Need to special case the limit case of sin(x) / x when x is zero.
if (x <= 1.e-3f) {
return 1.f;
}
return _radius * std::sin(kPI * x) * std::sin(kPI * x / _radius) / (kPI * kPI * x * x);
}
float radius() const { return _radius; }
const float _radius;
};
struct GaussianKernelFunc : public IKernelFunc {
static constexpr float kRadiusMultiplier = 3.0f;
// https://en.wikipedia.org/wiki/Gaussian_function
// We use sigma = 0.5, as suggested on p. 4 of Ken Turkowski's "Filters
// for Common Resampling Tasks" for kernels with a support of 3 pixels:
// www.realitypixels.com/turk/computergraphics/ResamplingFilters.pdf
// This implies a radius of 1.5,
explicit GaussianKernelFunc(float radius = 1.5f)
: _radius(radius), _sigma(radius / kRadiusMultiplier) {}
float operator()(float x) const {
x = math::nd4j_abs(x);
if (x >= _radius) return 0.0f;
return std::exp(-x * x / (2.0 * _sigma * _sigma));
}
float radius() const { return _radius; }
const float _radius;
const float _sigma; // Gaussian standard deviation
};
struct BoxKernelFunc : public IKernelFunc {
float operator()(float x) const {
x = math::nd4j_abs(x);
return x < 0.5f ? 1.f : x == 0.5f ? 0.5f : 0.f;
}
float radius() const { return 1.f; }
};
struct TriangleKernelFunc : public IKernelFunc {
// https://en.wikipedia.org/wiki/Triangle_function
float operator()(float x) const {
x = math::nd4j_abs(x);
return x < 1.f ? 1.f - x : 0.f;
}
float radius() const { return 1.f; }
};
struct KeysCubicKernelFunc : public IKernelFunc {
// http://ieeexplore.ieee.org/document/1163711/
// R. G. Keys. Cubic convolution interpolation for digital image
// processing. IEEE Transactions on Acoustics, Speech, and Signal
// Processing, 29(6):11531160, 1981.
float operator()(float x) const {
x = math::nd4j_abs(x);
if (x >= 2.0f) {
return 0.0f;
} else if (x >= 1.0f) {
return ((-0.5f * x + 2.5f) * x - 4.0f) * x + 2.0f;
} else {
return ((1.5f * x - 2.5f) * x) * x + 1.0f;
}
}
float radius() const { return 2.f; }
};
struct MitchellCubicKernelFunc : public IKernelFunc {
// https://doi.org/10.1145/378456.378514
// D. P. Mitchell and A. N. Netravali. Reconstruction filters in computer
// graphics. Computer Graphics (Proceedings of ACM SIGGRAPH 1988),
// 22(4):221228, 1988.
float operator()(float x) const {
x = math::nd4j_abs(x);
if (x >= 2.f) {
return 0.f;
} else if (x >= 1.f) {
return (((-7.f / 18.f) * x + 2.f) * x - 10.f / 3.f) * x + 16.f / 9.f;
} else {
return (((7.f / 6.f) * x - 2.f) * x) * x + 8.f / 9.f;
}
}
float radius() const { return 2.f; }
};
// A pre-computed span of pixels along a single dimension.
// The output pixel will be the weighted sum of pixels starting from start.
struct Spans {
// The maximum span size of any output pixel.
int _spanSize;
// int32 tensor with shape {outputSize}.
NDArray _starts;
// float32 tensor of size {outputSize, spanSize}.
// The output pixel at x is computed as:
// dot_product(input[starts[x]:starts[x]+span_size], weights[x]).
NDArray _weights;
};
static int
computeSpans(IKernelFunc* kernel, Nd4jLong const outSize, Nd4jLong const inSize, float const scale, float const translate, bool const antialias, Spans& spans) {
// When sampling, we need the inverse scale and translation, to map from an
// output to an input pixel.
float const invScale = 1.f / scale;
float const invTranslate = -invScale * translate;
// When downsampling the kernel should be scaled since we want to low pass
// filter and interpolate, but when upsampling it should not be since we only
// want to interpolate.
float const kernelScale = antialias ? math::nd4j_max(invScale, 1.f) : 1.f;
spans._spanSize = math::nd4j_min(2 * static_cast<int>(std::ceil(kernel->radius() * kernelScale)) + 1, static_cast<int>(inSize));
spans._starts = NDArrayFactory::create<int>('c', {outSize});
spans._weights = NDArrayFactory::create<float>('c', {outSize, spans._spanSize});
auto startsVec = spans._starts.bufferAsT<int>();
auto weightsVector = spans._weights.bufferAsT<float>();
spans._weights.nullify();
const float invKernelScale = 1.f / kernelScale;
int maxSpanSize = 0;
std::vector<float> tempWeights;
// return value if within bounds or bounds otherwise
auto boundsAmp = [](Nd4jLong const low, Nd4jLong const high, Nd4jLong const value) {
if (high < value) return high;
if (value < low) return low;
return value;
};
for (auto x = 0LL; x < outSize; ++x) {
const float columnFloat = x + 0.5f;
const float sampleFloat = columnFloat * invScale + invTranslate;
// Don't sample when the sampling location is outside the source image.
if (sampleFloat < 0 || sampleFloat > inSize) {
// Add an empty span.
startsVec[x] = 0;
continue;
}
Nd4jLong spanStart = math::nd4j_ceil<float,float>(sampleFloat - kernel->radius() * kernelScale - 0.5f);
Nd4jLong spanEnd = math::nd4j_floor<float, float>(sampleFloat + kernel->radius() * kernelScale - 0.5f);
spanStart = boundsAmp(0LL, inSize - 1, spanStart);
spanEnd = boundsAmp(0LL, inSize - 1, spanEnd) + 1;
int const spanSize = spanEnd - spanStart;
if (spanSize > spans._spanSize) {
return Status::CODE(ND4J_STATUS_BAD_INPUT, "Span is too large: "); // + spanSize + " vs " + spans._spanSize);//, spanSize, spans._spanSize));
}
float totalWeightSum = 0.f;
tempWeights.clear();
for (int source = spanStart; source < spanEnd; ++source) {
float kernelPos = static_cast<float>(source) + 0.5f - sampleFloat;
float weight = (*kernel)(kernelPos * invKernelScale);
totalWeightSum += weight;
tempWeights.push_back(weight);
}
maxSpanSize = std::max(maxSpanSize, spanSize);
if (math::nd4j_abs(totalWeightSum) >= 1000.f * DataTypeUtils::min<float>()) { //
auto totalWeightSumInverted = 1.0f / totalWeightSum;
auto outIndex = spans._spanSize * x;
for (auto weight : tempWeights) {
weightsVector[outIndex] = weight * totalWeightSumInverted;
++outIndex;
}
}
startsVec[x] = spanStart;
}
return Status::OK();
}
template <typename X, typename Z>
static void gatherRows(int const spanSize, int const* starts, Z const* weights, X const* imagePtr, Nd4jLong const inputHeight, Nd4jLong const inputWidth, Nd4jLong const outputHeight,
Nd4jLong const outputWidth, Nd4jLong const channels, Z* outputPtr) {
auto inRowSize = inputWidth * channels;
auto outRowSize = outputWidth * channels;
auto addScaledVector = [](const X* inVector, int vectorLen, Z weight, Z* outVector) {
Z* outVecEnd = outVector + vectorLen;
for (; outVector != outVecEnd; ++outVector, ++inVector) {
*outVector += weight * static_cast<Z>(*inVector);
}
};
for (int y = 0; y < outputHeight; ++y) {
Z* outRowData = outputPtr + outRowSize * y;
memset(outRowData, '\0', outRowSize * sizeof(Z));// std::fill(outRowData, outRowData + outRowSize, 0.f);
int inRow = starts[y];
auto inRowData = imagePtr + inRowSize * inRow;
auto weightsStart = weights + y * spanSize;
auto realSpanSize = math::nd4j_min(starts[y] + spanSize, static_cast<int>(inputHeight)) - starts[y];
auto weightsEnd = weightsStart + realSpanSize;
for (auto weightPtr = weightsStart; weightPtr != weightsEnd; ++weightPtr) {
addScaledVector(inRowData, inRowSize, *weightPtr, outRowData);
inRowData += inRowSize;
}
}
}
template <typename Z>
static void gatherColumns(int const spanSize, int const* starts, Z const* weights, Z const* imagesPtr, Nd4jLong const inputHeight, Nd4jLong const inputWidth, Nd4jLong const outputHeight, Nd4jLong const outputWidth, Nd4jLong channels, Z* outputPtr) {
auto inRowSize = inputWidth * channels;
auto outRowSize = outputWidth * channels;
for (auto y = 0LL; y < outputHeight; ++y) {
auto inputRowStart = imagesPtr + inRowSize * y;
auto outPixels = outputPtr + outRowSize * y;
for (auto x = 0LL; x < outputWidth; ++x, outPixels += channels) {
auto inPixels = inputRowStart + starts[x] * channels;
auto weightsStart = weights + x * spanSize;
auto realSpanSize = math::nd4j_min(starts[x] + spanSize, static_cast<int>(inputWidth)) - starts[x];
auto weightsEnd = weightsStart + realSpanSize;
for (int c = 0; c < channels; ++c) {
outPixels[c] = 0.0f;
}
for (auto weightPtr = weightsStart; weightPtr != weightsEnd; ++weightPtr) {
Z w = *weightPtr;
for (int c = 0; c < channels; ++c) {
outPixels[c] += w * static_cast<Z>(inPixels[c]);
}
inPixels += channels;
}
}
}
}
template <typename X, typename Z>
static void gatherSpans(int const rowSpanSize, NDArray const& rowStarts, NDArray const& rowWeights, int const colSpanSize, NDArray const& columnStarts, NDArray const& columnWeights, NDArray const* images, NDArray& intermediate, NDArray* output) {
auto batchSize = images->sizeAt(0);
auto inputHeight = images->sizeAt(1);
auto inputWidth = images->sizeAt(2);
auto channels = images->sizeAt(3);
auto outputHeight = output->sizeAt(1);
auto outputWidth = output->sizeAt(2);
auto inputPixPerBatch = inputWidth * inputHeight * channels;
auto intermediatePixPerBatch = inputWidth * outputHeight * channels;
auto outputPixPerBatch = outputWidth * outputHeight * channels;
Z* intermediatePtr = intermediate.bufferAsT<Z>();
const X* imagePtr = images->bufferAsT<X>();
Z* outPtr = output->bufferAsT<Z>();
for (int b = 0; b < batchSize; ++b, imagePtr += inputPixPerBatch,
intermediatePtr += intermediatePixPerBatch,
outPtr += outputPixPerBatch) {
gatherRows<X,Z>(rowSpanSize, rowStarts.bufferAsT<int>(), rowWeights.bufferAsT<Z>(),
imagePtr, inputHeight, inputWidth, outputHeight,
inputWidth, channels, intermediatePtr);
gatherColumns<Z>(colSpanSize, columnStarts.bufferAsT<int>(), columnWeights.bufferAsT<Z>(),
intermediatePtr, outputHeight, inputWidth, outputHeight, outputWidth, channels, outPtr);
}
}
template <typename X, typename Z>
static int resizeKernel(IKernelFunc* transformationKernel, NDArray const* input, Nd4jLong outWidth, Nd4jLong outHeight, bool antialias, NDArray* output) {
Nd4jLong const batchSize = input->sizeAt(0);
Nd4jLong const inputHeight = input->sizeAt(1);
Nd4jLong const inputWidth = input->sizeAt(2);
Nd4jLong const channels = input->sizeAt(3);
Z rowScale = Z(outHeight) / Z(inputHeight);
Z columnScale = Z(outWidth) / Z(inputWidth);
// Return if the output is empty.
if (output->lengthOf() == 0) return Status::OK();
Spans colSpans;
auto res = computeSpans(transformationKernel, outWidth, inputWidth, columnScale, 0.f, antialias, colSpans);
if (res != Status::OK()) return res;
Spans rowSpans;
res = computeSpans(transformationKernel, outHeight, inputHeight, rowScale, 0.f, antialias, rowSpans);
NDArray intermediate = NDArrayFactory::create<Z>('c', {batchSize, outHeight, inputWidth, channels});
//const functor::Spans& const_row_spans = row_spans;
//typename TTypes<int32, 1>::ConstTensor row_starts(
//const_row_spans.starts.tensor<int32, 1>());
auto& rowStarts = rowSpans._starts; // shape {outWidth}
auto& rowWeights = rowSpans._weights; // shape {outWidth, numSpans}
auto& columnStarts = colSpans._starts; // shape {outHeights}
auto& columnWeights = colSpans._weights; // shape {outHeights, numSpans}
gatherSpans<X, Z>(rowSpans._spanSize, rowStarts, rowWeights, colSpans._spanSize, columnStarts, columnWeights, input, intermediate, output);
return res;
}
static int resizeBilinear(sd::LaunchContext * context, NDArray const* image, int const width, int const height, bool const antialias, NDArray* output) {
auto kernel = std::unique_ptr<IKernelFunc>(new TriangleKernelFunc());
BUILD_DOUBLE_SELECTOR(image->dataType(), output->dataType(), return resizeKernel,
(kernel.get(), image, (Nd4jLong) width, (Nd4jLong) height, antialias, output),
NUMERIC_TYPES, FLOAT_TYPES_1);
return Status::CODE(ND4J_STATUS_VALIDATION, "helpers::resizeBilinear: Unknown error occured.");
}
static int resizeBicubic(sd::LaunchContext * context, NDArray const* image, int const width, int const height, bool const antialias, NDArray* output) {
if (antialias) {
auto kernel = std::unique_ptr<IKernelFunc>(new KeysCubicKernelFunc());
BUILD_DOUBLE_SELECTOR(image->dataType(), output->dataType(), return resizeKernel,
(kernel.get(), image, (Nd4jLong) width, (Nd4jLong) height, antialias, output),
NUMERIC_TYPES, FLOAT_TYPES_1);
}
else {
return resizeBicubicFunctorA(context, image, width, height, false, true, output);
}
return Status::CODE(ND4J_STATUS_VALIDATION, "helpers::resizeBicubic: Unknown error occured.");
}
static int resizeNeighbor(sd::LaunchContext * context, NDArray const* image, int const width, int const height, bool const antialias, NDArray* output) {
return resizeNeighborFunctor(context, image, width, height, false, true, output);
}
static int resizeArea(sd::LaunchContext * context, NDArray const* image, int const width, int const height, bool const antialias, NDArray* output) {
return resizeAreaFunctor(context, image, width, height, false, output);
}
static int resizeLanczos3(sd::LaunchContext * context, NDArray const* image, int const width, int const height, bool const antialias, NDArray* output) {
auto kernel = std::unique_ptr<IKernelFunc>(new LanczosKernelFunc(3.f));
BUILD_DOUBLE_SELECTOR(image->dataType(), output->dataType(), return resizeKernel, (kernel.get(), image, (Nd4jLong)width, (Nd4jLong)height, antialias, output), NUMERIC_TYPES, FLOAT_TYPES_1);
return Status::CODE(ND4J_STATUS_VALIDATION, "helpers::resizeLanczos3: Unknown error occured.");
}
static int resizeLanczos5(sd::LaunchContext * context, NDArray const* image, int const width, int const height, bool const antialias, NDArray* output) {
auto kernel = std::unique_ptr<IKernelFunc>(new LanczosKernelFunc(5.f));
BUILD_DOUBLE_SELECTOR(image->dataType(), output->dataType(), return resizeKernel, (kernel.get(), image, (Nd4jLong)width, (Nd4jLong)height, antialias, output), NUMERIC_TYPES, FLOAT_TYPES_1);
return Status::CODE(ND4J_STATUS_VALIDATION, "helpers::resizeLanczos5: Unknown error occured.");
}
static int resizeGaussian(sd::LaunchContext * context, NDArray const* image, int const width, int const height, bool const antialias, NDArray* output) {
auto kernel = std::unique_ptr<IKernelFunc>(new GaussianKernelFunc());
BUILD_DOUBLE_SELECTOR(image->dataType(), output->dataType(), return resizeKernel, (kernel.get(), image, (Nd4jLong)width, (Nd4jLong)height, antialias, output), NUMERIC_TYPES, FLOAT_TYPES_1);
return Status::CODE(ND4J_STATUS_VALIDATION, "helpers::resizeGaussian: Unknown error occured.");
}
static int resizeMitchellcubic(sd::LaunchContext * context, NDArray const* image, int const width, int const height, bool const antialias, NDArray* output) {
auto kernel = std::unique_ptr<IKernelFunc>(new MitchellCubicKernelFunc());
BUILD_DOUBLE_SELECTOR(image->dataType(), output->dataType(), return resizeKernel, (kernel.get(), image, (Nd4jLong)width, (Nd4jLong)height, antialias, output), NUMERIC_TYPES, FLOAT_TYPES_1);
return Status::CODE(ND4J_STATUS_VALIDATION, "helpers::resizeMitchelcubic: Unknown error occured.");
}
// ------------------------------------------------------------------------------------------------------------------ //
int resizeImagesFunctor(sd::LaunchContext * context, NDArray const* image, int const width, int const height,
ImageResizeMethods method, bool alignCorners, NDArray* output) {
switch (method) {
case kResizeBilinear:
return resizeBilinearFunctor(context, image, width, height, alignCorners, false, output);
case kResizeNearest:
return resizeNeighborFunctor(context, image, width, height, alignCorners, false, output);
case kResizeBicubic:
return resizeBicubicFunctor(context, image, width, height, alignCorners, false, output);
case kResizeArea:
return resizeAreaFunctor(context, image, width, height, alignCorners, output);
}
nd4j_printf("helper::resizeImagesFunctor: Wrong resize method %i\n", (int)method);
return Status::CODE(ND4J_STATUS_BAD_INPUT, "helper::resizeImagesFunctor: Wrong resize method");
}
// ------------------------------------------------------------------------------------------------------------------ // // ------------------------------------------------------------------------------------------------------------------ //
int resizeFunctor(sd::LaunchContext * context, NDArray const* image, int const width, int const height, int resizeFunctor(sd::LaunchContext * context, NDArray const* image, int const width, int const height,
ImageResizeMethods method, bool preserveAspectRatio, bool antialias, NDArray* output) { ImageResizeMethods method, bool antialias, NDArray* output) {
switch (method) { switch (method) {
case kResizeBilinear: return resizeBilinearFunctor(context, image, width, height, false, false, output); break; case kResizeBilinear: return resizeBilinear(context, image, width, height, antialias, output);
case kResizeNearest: return resizeNeighborFunctor(context, image, width, height, false, false, output); break; case kResizeNearest: return resizeNeighbor(context, image, width, height, antialias, output);
case kResizeBicubic: return resizeBicubicFunctor(context, image, width, height, preserveAspectRatio, antialias, output); break; case kResizeBicubic: return resizeBicubic(context, image, width, height, antialias, output);
case kResizeArea: return resizeAreaFunctor(context, image, width, height, preserveAspectRatio, output); case kResizeArea: return resizeArea(context, image, width, height, antialias, output);
case kResizeLanczos5: case kResizeLanczos3: return resizeLanczos3(context, image, width, height, antialias, output);
case kResizeGaussian: case kResizeLanczos5: return resizeLanczos5(context, image, width, height, antialias, output);
case kResizeMitchelcubic: case kResizeGaussian: return resizeGaussian(context, image, width, height, antialias, output);
throw std::runtime_error("helper::resizeFunctor: Non implemented yet."); case kResizeMitchellcubic: return resizeMitchellcubic(context, image, width, height, antialias, output);
} }
return ND4J_STATUS_OK; nd4j_printf("helper::resizeFunctor: Wrong resize method %i\n", (int)method);
return Status::CODE(ND4J_STATUS_BAD_INPUT, "helper::resizeFunctor: Wrong resize method");
} }

View File

@ -35,6 +35,7 @@ limitations under the License.
#include <ops/declarable/helpers/image_resize.h> #include <ops/declarable/helpers/image_resize.h>
#include <exceptions/cuda_exception.h> #include <exceptions/cuda_exception.h>
#include <array/NDArrayFactory.h>
namespace sd { namespace sd {
namespace ops { namespace ops {
@ -1203,20 +1204,22 @@ namespace helpers {
BUILD_SINGLE_TEMPLATE(template int resizeBicubicFunctorA_, (sd::LaunchContext * context, BUILD_SINGLE_TEMPLATE(template int resizeBicubicFunctorA_, (sd::LaunchContext * context,
NDArray const* image, int width, int height, bool const alignCorners, bool const halfPixelCenters, NDArray* output), NUMERIC_TYPES); NDArray const* image, int width, int height, bool const alignCorners, bool const halfPixelCenters, NDArray* output), NUMERIC_TYPES);
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
int resizeFunctor(sd::LaunchContext * context, NDArray const* image, int width, int height, // ------------------------------------------------------------------------------------------------------------------ //
ImageResizeMethods method, bool preserveAspectRatio, bool antialias, NDArray* output) { int resizeImagesFunctor(sd::LaunchContext * context, NDArray const* image, int const width, int const height,
ImageResizeMethods method, bool alignCorners, NDArray* output) {
switch (method) { switch (method) {
case kResizeBilinear: return resizeBilinearFunctor(context, image, width, height, false, false, output); break; case kResizeBilinear:
case kResizeNearest: return resizeNeighborFunctor(context, image, width, height, false, false, output); break; return resizeBilinearFunctor(context, image, width, height, alignCorners, false, output);
case kResizeBicubic: return resizeBicubicFunctor(context, image, width, height, preserveAspectRatio, antialias, output); break; case kResizeNearest:
case kResizeLanczos5: return resizeNeighborFunctor(context, image, width, height, alignCorners, false, output);
case kResizeGaussian: case kResizeBicubic:
return resizeBicubicFunctor(context, image, width, height, alignCorners, false, output);
case kResizeArea: case kResizeArea:
case kResizeMitchelcubic: return resizeAreaFunctor(context, image, width, height, alignCorners, output);
throw std::runtime_error("helper::resizeFunctor: Non implemented yet."); default:
throw std::runtime_error("helper::resizeImagesFunctor: Wrong resize method.");
} }
return ND4J_STATUS_OK;
} }
//////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// ////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////

View File

@ -0,0 +1,497 @@
#include <array/NDArrayFactory.h>
#include <exceptions/cuda_exception.h>
#include <ops/declarable/helpers/image_resize.h>
#include <helpers/PointersManager.h>
namespace sd {
namespace ops {
namespace helpers {
// -------------------------------------------------------------------------------------------------------------- //
// resize v2 implementation //
// -------------------------------------------------------------------------------------------------------------- //
// A functional interface for a scale kernels.
//struct IKernelFunc {
// _CUDA_HD virtual float operator()(float x) const = 0;
// _CUDA_HD virtual float radius() const = 0;
// _CUDA_HD virtual size_t size() const = 0;
//};
struct LanczosKernelFunc /*: public IKernelFunc*/ {
// Pass 1 for Lanczos1 kernel, 3 for Lanczos3 etc.
explicit LanczosKernelFunc(float const radius) : _radius(radius) {}
_CUDA_HD float operator()(float x) const {
float const kPI = 3.141592653589793f;
x = math::nd4j_abs(x);
if (x > _radius) return 0.f;
// Need to special case the limit case of sin(x) / x when x is zero.
if (x <= 1.e-3f) {
return 1.f;
}
return _radius * std::sin(kPI * x) * std::sin(kPI * x / _radius) / (kPI * kPI * x * x);
}
_CUDA_HD float radius() const { return _radius; }
const float _radius;
};
struct GaussianKernelFunc /*: public IKernelFunc*/ {
static constexpr float kRadiusMultiplier = 3.0f;
// https://en.wikipedia.org/wiki/Gaussian_function
// We use sigma = 0.5, as suggested on p. 4 of Ken Turkowski's "Filters
// for Common Resampling Tasks" for kernels with a support of 3 pixels:
// www.realitypixels.com/turk/computergraphics/ResamplingFilters.pdf
// This implies a radius of 1.5,
explicit GaussianKernelFunc(float radius = 1.5f)
: _radius(radius), _sigma(radius / kRadiusMultiplier) {}
_CUDA_HD float operator()(float x) const {
x = math::nd4j_abs(x);
if (x >= _radius) return 0.0f;
return std::exp(-x * x / (2.0 * _sigma * _sigma));
}
_CUDA_HD float radius() const { return _radius; }
const float _radius;
const float _sigma; // Gaussian standard deviation
};
struct BoxKernelFunc /*: public IKernelFunc*/ {
_CUDA_HD float operator()(float x) const {
x = math::nd4j_abs(x);
return x < 0.5f ? 1.f : x == 0.5f ? 0.5f : 0.f;
}
_CUDA_HD float radius() const { return 1.f; }
_CUDA_HD size_t size() const { return sizeof(BoxKernelFunc); }
};
struct TriangleKernelFunc /*: public IKernelFunc*/ {
// https://en.wikipedia.org/wiki/Triangle_function
_CUDA_HD float operator()(float x) const {
x = math::nd4j_abs(x);
return x < 1.f ? 1.f - x : 0.f;
}
_CUDA_HD float radius() const { return 1.f; }
};
struct KeysCubicKernelFunc /*: public IKernelFunc*/ {
// http://ieeexplore.ieee.org/document/1163711/
// R. G. Keys. Cubic convolution interpolation for digital image
// processing. IEEE Transactions on Acoustics, Speech, and Signal
// Processing, 29(6):11531160, 1981.
_CUDA_HD float operator()(float x) const {
x = math::nd4j_abs(x);
if (x >= 2.0f) {
return 0.0f;
} else if (x >= 1.0f) {
return ((-0.5f * x + 2.5f) * x - 4.0f) * x + 2.0f;
} else {
return ((1.5f * x - 2.5f) * x) * x + 1.0f;
}
}
_CUDA_HD float radius() const { return 2.f; }
};
struct MitchellCubicKernelFunc/* : public IKernelFunc*/ {
// https://doi.org/10.1145/378456.378514
// D. P. Mitchell and A. N. Netravali. Reconstruction filters in computer
// graphics. Computer Graphics (Proceedings of ACM SIGGRAPH 1988),
// 22(4):221228, 1988.
_CUDA_HD float operator()(float x) const {
x = math::nd4j_abs(x);
if (x >= 2.f) {
return 0.f;
} else if (x >= 1.f) {
return (((-7.f / 18.f) * x + 2.f) * x - 10.f / 3.f) * x + 16.f / 9.f;
} else {
return (((7.f / 6.f) * x - 2.f) * x) * x + 8.f / 9.f;
}
}
_CUDA_HD float radius() const { return 2.f; }
};
// A pre-computed span of pixels along a single dimension.
// The output pixel will be the weighted sum of pixels starting from start.
struct Spans {
// The maximum span size of any output pixel.
int _spanSize;
// int32 tensor with shape {outputSize}.
NDArray _starts;
// float32 tensor of size {outputSize, spanSize}.
// The output pixel at x is computed as:
// dot_product(input[starts[x]:starts[x]+span_size], weights[x]).
NDArray _weights;
};
static inline _CUDA_HD Nd4jLong boundsAmp(Nd4jLong const low, Nd4jLong const high, Nd4jLong const value) {
if (high < value) return high;
if (value < low) return low;
return value;
}
template <typename TKernelFunc>
static __global__ void computeSpansKernel(TKernelFunc* kernel, int* startsVec, float* weightsVector, Nd4jLong outSize, Nd4jLong inSize, float kernelScale, int spanSize, float const invScale, float const invTranslate, float invKernelScale, float* tempWeightsBuf) {
// return value if within bounds or bounds otherwise
auto tid = threadIdx.x + blockIdx.x * blockDim.x;
auto step = blockDim.x * gridDim.x;
__shared__ int maxSpanSize;
if (threadIdx.x == 0 && blockIdx.x == 0) {
maxSpanSize = 0;
}
__syncthreads();
for (auto x = tid; x < outSize; x += step) {
const float columnFloat = x + 0.5f;
const float sampleFloat = columnFloat * invScale + invTranslate;
// Don't sample when the sampling location is outside the source image.
if (sampleFloat < 0 || sampleFloat > inSize) {
// Add an empty span.
startsVec[x] = 0;
continue;
}
Nd4jLong spanStart = math::nd4j_ceil<float,float>(sampleFloat - kernel->radius() * kernelScale - 0.5f);
Nd4jLong spanEnd = math::nd4j_floor<float, float>(sampleFloat + kernel->radius() * kernelScale - 0.5f);
spanStart = boundsAmp(0LL, inSize - 1, spanStart);
spanEnd = boundsAmp(0LL, inSize - 1, spanEnd) + 1;
int const spanSize = spanEnd - spanStart;
if (spanSize > spanSize) {
return ; //throw "Exception"; ////return Status::CODE(ND4J_STATUS_BAD_INPUT, "Span is too large: "); // + spanSize + " vs " + spans._spanSize);//, spanSize, spans._spanSize));
}
float totalWeightSum = 0.f;
auto tempWeights = &tempWeightsBuf[x];
auto actualWeights = 0;
for (int source = spanStart; source < spanEnd; ++source) {
float kernelPos = static_cast<float>(source) + 0.5f - sampleFloat;
float weight = (*kernel)(kernelPos * invKernelScale);
totalWeightSum += weight;
tempWeights[actualWeights++] = weight;
}
maxSpanSize = math::nd4j_max(maxSpanSize, spanSize);
if (math::nd4j_abs(totalWeightSum) >= 1000.f * DataTypeUtils::min<float>()) { //
auto totalWeightSumInverted = 1.0f / totalWeightSum;
auto outIndex = spanSize * x;
for (auto weightIndex = 0; weightIndex < actualWeights; ++weightIndex) {
weightsVector[outIndex] = tempWeights[weightIndex] * totalWeightSumInverted;
++outIndex;
}
}
startsVec[x] = spanStart;
}
}
template <typename TKernelFunc>
static int computeSpans(LaunchContext* context, TKernelFunc& kernel, Nd4jLong const outSize, Nd4jLong const inSize, float const scale, float const translate, bool const antialias, Spans& spans) {
// When sampling, we need the inverse scale and translation, to map from an
// output to an input pixel.
float const invScale = 1.f / scale;
float const invTranslate = -invScale * translate;
// When downsampling the kernel should be scaled since we want to low pass
// filter and interpolate, but when upsampling it should not be since we only
// want to interpolate.
float const kernelScale = antialias ? math::nd4j_max(invScale, 1.f) : 1.f;
spans._spanSize = math::nd4j_min(2 * static_cast<int>(std::ceil(kernel.radius() * kernelScale)) + 1, static_cast<int>(inSize));
spans._starts = NDArrayFactory::create<int>('c', {outSize}); spans._starts.syncToHost();
spans._weights = NDArrayFactory::create<float>('c', {outSize, spans._spanSize}); spans._weights.syncToHost();
auto startsVec = reinterpret_cast<int*>(spans._starts.buffer());
auto weightsVector = reinterpret_cast<float*>(spans._weights.buffer());
spans._weights.nullify();
const float invKernelScale = 1.f / kernelScale;
// NDArray tempWeights = NDArrayFactory::create<float>('c', {outSize, spans._spanSize});
// auto tempWeightsBuf = reinterpret_cast<float*>(tempWeights.specialBuffer());
// PointersManager mg(context, "ops::helpers::computeSpans");
// auto specialKernel = reinterpret_cast<TKernelFunc*>(mg.replicatePointer(&kernel, sizeof(TKernelFunc)));
auto stream = context->getCudaStream();
//computeSpansKernel<TKernelFunc><<<1, 1, 128, *stream>>>(specialKernel, startsVec, weightsVector, outSize, inSize, kernelScale, spans._spanSize, invScale, invTranslate, invKernelScale, tempWeightsBuf);
auto maxSpanSize = 0;
std::vector<float> tempWeights;
for (auto x = 0; x < outSize; x ++) {
const float columnFloat = x + 0.5f;
const float sampleFloat = columnFloat * invScale + invTranslate;
// Don't sample when the sampling location is outside the source image.
if (sampleFloat < 0 || sampleFloat > inSize) {
// Add an empty span.
startsVec[x] = 0;
continue;
}
Nd4jLong spanStart = math::nd4j_ceil<float,float>(sampleFloat - kernel.radius() * kernelScale - 0.5f);
Nd4jLong spanEnd = math::nd4j_floor<float, float>(sampleFloat + kernel.radius() * kernelScale - 0.5f);
spanStart = boundsAmp(0LL, inSize - 1, spanStart);
spanEnd = boundsAmp(0LL, inSize - 1, spanEnd) + 1;
int const spanSize = spanEnd - spanStart;
if (spanSize > spans._spanSize) {
return Status::CODE(ND4J_STATUS_BAD_INPUT, "Span is too large: "); // + spanSize + " vs " + spans._spanSize);//, spanSize, spans._spanSize));
}
float totalWeightSum = 0.f;
tempWeights.clear();
for (int source = spanStart; source < spanEnd; ++source) {
float kernelPos = static_cast<float>(source) + 0.5f - sampleFloat;
float weight = kernel(kernelPos * invKernelScale);
totalWeightSum += weight;
tempWeights.push_back(weight);
}
maxSpanSize = math::nd4j_max(maxSpanSize, spanSize);
if (math::nd4j_abs(totalWeightSum) >= 1000.f * DataTypeUtils::min<float>()) { //
auto totalWeightSumInverted = 1.0f / totalWeightSum;
auto outIndex = spans._spanSize * x;
for (auto weightIndex = 0; weightIndex < tempWeights.size(); ++weightIndex) {
weightsVector[outIndex++] = tempWeights[weightIndex] * totalWeightSumInverted;
// ++outIndex;
}
}
startsVec[x] = spanStart;
}
spans._starts.tickWriteHost(); spans._weights.tickWriteHost();
spans._starts.syncToDevice();
spans._weights.syncToDevice();
// cudaStreamSynchronize(*stream);
return Status::OK();
}
//template int computeSpans(LaunchContext* context, TriangleKernelFunc& kernel, Nd4jLong const outSize, Nd4jLong const inSize, float const scale, float const translate, bool const antialias, Spans& spans);
template <typename X, typename Z>
static __device__ void gatherRows(int const spanSize, int const* starts, Z const* weights, X const* imagePtr, Nd4jLong const inputHeight, Nd4jLong const inputWidth, Nd4jLong const outputHeight,
Nd4jLong const outputWidth, Nd4jLong const channels, Z* outputPtr) {
auto inRowSize = inputWidth * channels;
auto outRowSize = outputWidth * channels;
auto addScaledVector = [](const X* inVector, int vectorLen, Z weight, Z* outVector) {
Z* outVecEnd = outVector + vectorLen;
for (; outVector != outVecEnd; ++outVector, ++inVector) {
*outVector += weight * static_cast<Z>(*inVector);
}
};
for (int y = 0; y < outputHeight; ++y) {
Z* outRowData = outputPtr + outRowSize * y;
memset(outRowData, '\0', outRowSize * sizeof(Z));// std::fill(outRowData, outRowData + outRowSize, 0.f);
int inRow = starts[y];
auto inRowData = imagePtr + inRowSize * inRow;
auto weightsStart = weights + y * spanSize;
auto realSpanSize = math::nd4j_min(starts[y] + spanSize, static_cast<int>(inputHeight)) - starts[y];
auto weightsEnd = weightsStart + realSpanSize;
for (auto weightPtr = weightsStart; weightPtr != weightsEnd; ++weightPtr) {
addScaledVector(inRowData, inRowSize, *weightPtr, outRowData);
inRowData += inRowSize;
}
}
}
template <typename Z>
static __device__ void gatherColumns(int const spanSize, int const* starts, Z const* weights, Z const* imagesPtr, Nd4jLong const inputHeight, Nd4jLong const inputWidth, Nd4jLong const outputHeight, Nd4jLong const outputWidth, Nd4jLong channels, Z* outputPtr) {
auto inRowSize = inputWidth * channels;
auto outRowSize = outputWidth * channels;
for (auto y = 0LL; y < outputHeight; ++y) {
auto inputRowStart = imagesPtr + inRowSize * y;
auto outPixels = outputPtr + outRowSize * y;
for (auto x = 0LL; x < outputWidth; ++x, outPixels += channels) {
auto inPixels = inputRowStart + starts[x] * channels;
auto weightsStart = weights + x * spanSize;
auto realSpanSize = math::nd4j_min(starts[x] + spanSize, static_cast<int>(inputWidth)) - starts[x];
auto weightsEnd = weightsStart + realSpanSize;
for (int c = 0; c < channels; ++c) {
outPixels[c] = 0.0f;
}
for (auto weightPtr = weightsStart; weightPtr != weightsEnd; ++weightPtr) {
Z w = *weightPtr;
for (int c = 0; c < channels; ++c) {
outPixels[c] += w * static_cast<Z>(inPixels[c]);
}
inPixels += channels;
}
}
}
}
template <typename X, typename Z>
static __global__ void batchedGatherSpan(Nd4jLong batchSize, Nd4jLong inputWidth, Nd4jLong inputHeight, Nd4jLong outputWidth, Nd4jLong outputHeight, Nd4jLong channels, int rowSpanSize, int const* rowStartsBuf, Z const* rowWeightBuf, int columnSpanSize, int const* columnStartsBuf, Z const* columnWeightBuf, X const* pImages, Z* pIntermediate, Z* pOutput,
Nd4jLong inputPixPerBatch, Nd4jLong intermediatePixPerBatch, Nd4jLong outputPixPerBatch) {
auto tid = threadIdx.x + blockIdx.x * blockDim.x;
auto step = blockDim.x * gridDim.x;
for (int b = tid; b < batchSize; b += step) {
auto imagePtr = pImages + b * inputPixPerBatch;
auto intermediatePtr = pIntermediate + b * intermediatePixPerBatch;
auto outputPtr = pOutput + b * outputPixPerBatch;
gatherRows<X, Z>(rowSpanSize, rowStartsBuf, rowWeightBuf,
imagePtr, inputHeight, inputWidth, outputHeight,
inputWidth, channels, intermediatePtr);
gatherColumns<Z>(columnSpanSize, columnStartsBuf, columnWeightBuf,
intermediatePtr, outputHeight, inputWidth, outputHeight, outputWidth, channels, outputPtr);
}
}
template <typename X, typename Z>
static void gatherSpans(LaunchContext* context, int const rowSpanSize, NDArray const& rowStarts, NDArray const& rowWeights, int const colSpanSize, NDArray const& columnStarts, NDArray const& columnWeights, NDArray const* images, NDArray& intermediate, NDArray* output) {
auto batchSize = images->sizeAt(0);
auto inputHeight = images->sizeAt(1);
auto inputWidth = images->sizeAt(2);
auto channels = images->sizeAt(3);
auto outputHeight = output->sizeAt(1);
auto outputWidth = output->sizeAt(2);
auto inputPixPerBatch = inputWidth * inputHeight * channels;
auto intermediatePixPerBatch = inputWidth * outputHeight * channels;
auto outputPixPerBatch = outputWidth * outputHeight * channels;
auto intermediatePtr = reinterpret_cast<Z*>(intermediate.specialBuffer());
auto imagePtr = reinterpret_cast<X const*>(images->specialBuffer());
auto outputPtr = reinterpret_cast<Z*>(output->specialBuffer());
auto stream = context->getCudaStream();
auto rowStartsBuf = reinterpret_cast<int const*>(rowStarts.specialBuffer());
auto rowWeightBuf = reinterpret_cast<Z const*>(rowWeights.specialBuffer());
auto columnStartsBuf = reinterpret_cast<int const*>(columnStarts.specialBuffer());
auto columnWeightBuf = reinterpret_cast<Z const*>(columnWeights.specialBuffer());
batchedGatherSpan<X,Z><<<128, 128, 256, *stream>>>(batchSize, inputWidth, inputHeight, outputWidth, outputHeight, channels, rowSpanSize, rowStartsBuf, rowWeightBuf, colSpanSize, columnStartsBuf, columnWeightBuf, imagePtr, intermediatePtr, outputPtr, inputPixPerBatch, intermediatePixPerBatch, outputPixPerBatch);
}
template <typename X, typename Z>
static int resizeKernel(LaunchContext* context, ImageResizeMethods method, NDArray const* input, Nd4jLong outWidth, Nd4jLong outHeight, bool antialias, NDArray* output) {
Nd4jLong const batchSize = input->sizeAt(0);
Nd4jLong const inputHeight = input->sizeAt(1);
Nd4jLong const inputWidth = input->sizeAt(2);
Nd4jLong const channels = input->sizeAt(3);
NDArray::prepareSpecialUse({output}, {input});
Z rowScale = Z(outHeight) / Z(inputHeight);
Z columnScale = Z(outWidth) / Z(inputWidth);
// Return if the output is empty.
if (output->lengthOf() == 0) return Status::OK();
Spans colSpans;
Spans rowSpans;
auto res = Status::OK();
switch(method) {
case kResizeBilinear: {
TriangleKernelFunc kernel;
res = computeSpans(context, kernel, outWidth, inputWidth, columnScale, 0.f, antialias,
colSpans);
if (res != Status::OK()) return res;
res = computeSpans(context, kernel, outHeight, inputHeight, rowScale, 0.f, antialias, rowSpans);
}
break;
case kResizeBicubic: {
KeysCubicKernelFunc kernel;
res = computeSpans(context, kernel, outWidth, inputWidth, columnScale, 0.f, antialias,
colSpans);
if (res != Status::OK()) return res;
res = computeSpans(context, kernel, outHeight, inputHeight, rowScale, 0.f, antialias, rowSpans);
} break;
case kResizeLanczos3:{
LanczosKernelFunc kernel(3.f);
res = computeSpans(context, kernel, outWidth, inputWidth, columnScale, 0.f, antialias,
colSpans);
if (res != Status::OK()) return res;
res = computeSpans(context, kernel, outHeight, inputHeight, rowScale, 0.f, antialias, rowSpans);
} break;
case kResizeLanczos5: {
LanczosKernelFunc kernel(5.f);
res = computeSpans(context, kernel, outWidth, inputWidth, columnScale, 0.f, antialias,
colSpans);
if (res != Status::OK()) return res;
res = computeSpans(context, kernel, outHeight, inputHeight, rowScale, 0.f, antialias, rowSpans);
} break;
case kResizeGaussian: {
GaussianKernelFunc kernel;
res = computeSpans(context, kernel, outWidth, inputWidth, columnScale, 0.f, antialias,
colSpans);
if (res != Status::OK()) return res;
res = computeSpans(context, kernel, outHeight, inputHeight, rowScale, 0.f, antialias, rowSpans);
} break;
case kResizeMitchellcubic:{
MitchellCubicKernelFunc kernel;
res = computeSpans(context, kernel, outWidth, inputWidth, columnScale, 0.f, antialias,
colSpans);
if (res != Status::OK()) return res;
res = computeSpans(context, kernel, outHeight, inputHeight, rowScale, 0.f, antialias, rowSpans);
} break;
};
NDArray intermediate = NDArrayFactory::create<Z>('c', {batchSize, outHeight, inputWidth, channels});
//const functor::Spans& const_row_spans = row_spans;
//typename TTypes<int32, 1>::ConstTensor row_starts(
//const_row_spans.starts.tensor<int32, 1>());
auto& rowStarts = rowSpans._starts; // shape {outWidth}
auto& rowWeights = rowSpans._weights; // shape {outWidth, numSpans}
auto& columnStarts = colSpans._starts; // shape {outHeights}
auto& columnWeights = colSpans._weights; // shape {outHeights, numSpans}
gatherSpans<X, Z>(context, rowSpans._spanSize, rowStarts, rowWeights, colSpans._spanSize, columnStarts, columnWeights, input, intermediate, output);
NDArray::registerSpecialUse({output}, {input});
return res;
}
static int resizeTriangle(sd::LaunchContext * context, NDArray const* image, int const width, int const height, bool const antialias, NDArray* output) {
// std::unique_ptr<IKernelFunc> kernel(new TriangleKernelFunc);
BUILD_DOUBLE_SELECTOR(image->dataType(), output->dataType(), return resizeKernel,(context, kResizeBilinear, image, width, height, antialias, output), NUMERIC_TYPES, FLOAT_TYPES_1);
return Status::CODE(ND4J_STATUS_VALIDATION, "helpers::resizeTriangle: This resize method is avaliable in future versions");
}
static int resizeLanczos3(sd::LaunchContext * context, NDArray const* image, int const width, int const height, bool const antialias, NDArray* output) {
// std::unique_ptr<IKernelFunc> kernel(new LanczosKernelFunc(3.f));
BUILD_DOUBLE_SELECTOR(image->dataType(), output->dataType(), return resizeKernel,(context, kResizeLanczos3, image, width, height, antialias, output), NUMERIC_TYPES, FLOAT_TYPES_1);
return Status::CODE(ND4J_STATUS_VALIDATION, "helpers::resizeLanczos3: This resize method is avaliable in future versions");
}
static int resizeLanczos5(sd::LaunchContext * context, NDArray const* image, int const width, int const height, bool const antialias, NDArray* output) {
// std::unique_ptr<IKernelFunc> kernel(new LanczosKernelFunc(5.f));
BUILD_DOUBLE_SELECTOR(image->dataType(), output->dataType(), return resizeKernel,(context, kResizeLanczos5, image, width, height, antialias, output), NUMERIC_TYPES, FLOAT_TYPES_1);
return Status::CODE(ND4J_STATUS_VALIDATION, "helpers::resizeLanczos5: This resize method is avaliable in future versions");
}
static int resizeGaussian(sd::LaunchContext * context, NDArray const* image, int const width, int const height, bool const antialias, NDArray* output) {
BUILD_DOUBLE_SELECTOR(image->dataType(), output->dataType(), return resizeKernel,(context, kResizeGaussian, image, width, height, antialias, output), NUMERIC_TYPES, FLOAT_TYPES_1);
return Status::CODE(ND4J_STATUS_VALIDATION, "helpers::resizeGaussian: This resize method is avaliable in future versions");
}
static int resizeMitchellcubic(sd::LaunchContext * context, NDArray const* image, int const width, int const height, bool const antialias, NDArray* output) {
BUILD_DOUBLE_SELECTOR(image->dataType(), output->dataType(), return resizeKernel,(context, kResizeMitchellcubic, image, width, height, antialias, output), NUMERIC_TYPES, FLOAT_TYPES_1);
return Status::CODE(ND4J_STATUS_VALIDATION, "helpers::resizeMitchelcubic: This resize method is avaliable in future versions");
}
static int resizeKeycubic(sd::LaunchContext * context, NDArray const* image, int const width, int const height, bool const antialias, NDArray* output) {
if (!antialias)
return resizeBicubicFunctorA(context, image, width, height, false, true, output);
BUILD_DOUBLE_SELECTOR(image->dataType(), output->dataType(), return resizeKernel,(context, kResizeBicubic, image, width, height, antialias, output), NUMERIC_TYPES, FLOAT_TYPES_1);
return Status::CODE(ND4J_STATUS_VALIDATION, "helpers::resizeKeycubic: This resize method is avaliable in future versions");
}
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
int resizeFunctor(sd::LaunchContext * context, NDArray const* image, int width, int height,
ImageResizeMethods method, bool antialias, NDArray* output) {
switch (method) {
case kResizeBilinear: return resizeTriangle(context, image, width, height, antialias, output);
case kResizeNearest: return resizeNeighborFunctor(context, image, width, height, false, true, output);
case kResizeBicubic: return resizeKeycubic(context, image, width, height, antialias, output);
case kResizeLanczos3: return resizeLanczos3(context, image, width, height, antialias, output);
case kResizeLanczos5: return resizeLanczos5(context, image, width, height, antialias, output);
case kResizeGaussian: return resizeGaussian(context, image, width, height, antialias, output);
case kResizeArea: return resizeAreaFunctor(context, image, width, height, false, output);
case kResizeMitchellcubic: return resizeMitchellcubic(context, image, width, height, antialias, output);
default:
nd4j_printf("helper::resizeFunctor: Wrong resize method %i\n", (int)method);
throw std::runtime_error("helper::resizeFunctor: Wrong resize method.");
}
return ND4J_STATUS_OK;
}
}
}
}

View File

@ -28,13 +28,17 @@ namespace ops {
namespace helpers { namespace helpers {
enum ImageResizeMethods { enum ImageResizeMethods {
kResizeBilinear = 1, kResizeBilinear = 0, // as java require
kResizeBicubic,
kResizeNearest, kResizeNearest,
kResizeBicubic,
kResizeArea,
kResizeGaussian, kResizeGaussian,
kResizeLanczos3,
kResizeLanczos5, kResizeLanczos5,
kResizeMitchelcubic, kResizeMitchellcubic,
kResizeArea kResizeFirst = kResizeBilinear,
kResizeLast = kResizeMitchellcubic,
kResizeOldLast = kResizeArea
}; };
int resizeBilinearFunctor(sd::LaunchContext * context, NDArray const* image, int const width, int const height, int resizeBilinearFunctor(sd::LaunchContext * context, NDArray const* image, int const width, int const height,
@ -49,7 +53,10 @@ namespace helpers {
bool const alignCorners, NDArray* output); bool const alignCorners, NDArray* output);
int resizeFunctor(sd::LaunchContext * context, NDArray const* image, int const width, int const height, int resizeFunctor(sd::LaunchContext * context, NDArray const* image, int const width, int const height,
ImageResizeMethods method, bool preserveAspectRatio, bool antialias, NDArray* output); ImageResizeMethods method, bool antialias, NDArray* output);
int resizeImagesFunctor(sd::LaunchContext * context, NDArray const* image, int const width, int const height,
ImageResizeMethods method, bool alignCorners, NDArray* output);
} }
} }
} }

View File

@ -396,6 +396,29 @@ TEST_F(DeclarableOpsTests10, TestMarixBandPart_Test_1) {
ASSERT_TRUE(exp.equalsTo(results.at(0))); ASSERT_TRUE(exp.equalsTo(results.at(0)));
} }
///////////////////////////////////////////////////////////////////
TEST_F(DeclarableOpsTests10, TestMarixBandPart_Test_2) {
auto x = NDArrayFactory::create<double>('c', {2, 3, 3});
auto minD = NDArrayFactory::create<int>(1);
auto maxD = NDArrayFactory::create<int>(1);
auto exp = NDArrayFactory::create<double>('c', {2, 3, 3});
x.linspace(1);
exp.linspace(1);
exp.p(0, 0, 2, 0.);
exp.p(1, 0, 2, 0.);
exp.p(0, 2, 0, 0.);
exp.p(1, 2, 0, 0.);
sd::ops::matrix_band_part op;
auto results = op.evaluate({&x, &minD, &maxD}, {}, {});
ASSERT_EQ(ND4J_STATUS_OK, results.status());
//results.at(0)->printIndexedBuffer("MBP Test1");
//exp.printIndexedBuffer("MBP Expec");
ASSERT_TRUE(exp.equalsTo(results.at(0)));
}
////////////////////////////////////////////////////////////////////////////// //////////////////////////////////////////////////////////////////////////////
TEST_F(DeclarableOpsTests10, atan2_test1) { TEST_F(DeclarableOpsTests10, atan2_test1) {
@ -1528,6 +1551,71 @@ TEST_F(DeclarableOpsTests10, ImageResizeBilinear_Test01) {
} }
TEST_F(DeclarableOpsTests10, ResizeImages_Test1) {
NDArray input = NDArrayFactory::create<float>('c', {2, 4, 5, 3});
input.linspace(1.);
auto expected = NDArrayFactory::create<float>('c', {2, 7, 9, 3}, {
1.f, 2.f, 3.f, 2.6666667f, 3.6666667f, 4.666667f, 4.3333335f, 5.3333335f, 6.3333335f, 6.f,
7.f, 8.f, 7.666667f, 8.666667f, 9.666667f, 9.333334f, 10.333334f, 11.333334f, 11.f, 12.f,
13.f, 12.666667f, 13.666667f, 14.666667f, 13.f, 14.f, 15.f, 9.571429f, 10.571429f, 11.571429f,
11.238095f, 12.238095f, 13.238095f, 12.904762f, 13.904762f, 14.904762f, 14.571429f, 15.571429f, 16.57143f,
16.238096f, 17.238096f, 18.238096f, 17.904762f, 18.904762f, 19.904762f, 19.57143f, 20.57143f, 21.57143f,
21.238096f, 22.238096f, 23.238096f, 21.57143f, 22.57143f, 23.57143f, 18.142859f, 19.142859f, 20.142859f,
19.809525f, 20.809525f, 21.809525f, 21.476192f, 22.476192f, 23.476192f, 23.142859f, 24.142859f, 25.142859f,
24.809526f, 25.809526f, 26.809526f, 26.476192f, 27.476192f, 28.476192f, 28.142859f, 29.142859f, 30.142859f,
29.809526f, 30.809526f, 31.809526f, 30.142859f, 31.142859f, 32.142857f, 26.714287f, 27.714287f, 28.714287f,
28.380955f, 29.380955f, 30.380955f, 30.04762f, 31.04762f, 32.047623f, 31.714287f, 32.714287f, 33.714287f,
33.380955f, 34.380955f, 35.380955f, 35.047623f, 36.047623f, 37.047623f, 36.714287f, 37.714287f, 38.714287f,
38.380955f, 39.380955f, 40.380955f, 38.714287f, 39.714287f, 40.714287f, 35.285717f, 36.285717f, 37.285717f,
36.952385f, 37.952385f, 38.952385f, 38.61905f, 39.61905f, 40.61905f, 40.285717f, 41.285717f, 42.285717f,
41.952385f, 42.952385f, 43.952385f, 43.61905f, 44.61905f, 45.61905f, 45.285717f, 46.285717f, 47.285717f,
46.952385f, 47.952385f, 48.952385f, 47.285717f, 48.285717f, 49.285717f, 43.857143f, 44.857143f, 45.857143f,
45.52381f, 46.52381f, 47.52381f, 47.190475f, 48.190475f, 49.190475f, 48.857143f, 49.857143f, 50.857143f,
50.52381f, 51.52381f, 52.52381f, 52.190475f, 53.190475f, 54.190475f, 53.857143f, 54.857143f, 55.857143f,
55.52381f, 56.52381f, 57.52381f, 55.857143f, 56.857143f, 57.857143f, 46.f, 47.f, 48.f,
47.666668f, 48.666668f, 49.666668f, 49.333332f, 50.333332f, 51.333332f, 51.f, 52.f, 53.f,
52.666668f, 53.666668f, 54.666668f, 54.333332f, 55.333332f, 56.333332f, 56.f, 57.f, 58.f,
57.666668f, 58.666668f, 59.666668f, 58.f, 59.f, 60.f, 61.f, 62.f, 63.f,
62.666668f, 63.666668f, 64.666664f, 64.333336f, 65.333336f, 66.333336f, 66.f, 67.f, 68.f,
67.666664f, 68.666664f, 69.666664f, 69.333336f, 70.333336f, 71.333336f, 71.f, 72.f, 73.f,
72.666664f, 73.666664f, 74.666664f, 73.f, 74.f, 75.f, 69.57143f, 70.57143f, 71.57143f,
71.2381f, 72.2381f, 73.23809f, 72.90476f, 73.90476f, 74.90476f, 74.57143f, 75.57143f, 76.57143f,
76.23809f, 77.23809f, 78.23809f, 77.90476f, 78.90476f, 79.90476f, 79.57143f, 80.57143f, 81.57143f,
81.23809f, 82.23809f, 83.23809f, 81.57143f, 82.57143f, 83.57143f, 78.14286f, 79.14286f, 80.14286f,
79.809525f, 80.809525f, 81.809525f, 81.4762f, 82.4762f, 83.4762f, 83.14286f, 84.14286f, 85.14286f,
84.809525f, 85.809525f, 86.809525f, 86.4762f, 87.4762f, 88.4762f, 88.14286f, 89.14286f, 90.14286f,
89.809525f, 90.809525f, 91.809525f, 90.14286f, 91.14286f, 92.14286f, 86.71429f, 87.71429f, 88.71429f,
88.38095f, 89.38095f, 90.38095f, 90.04762f, 91.04762f, 92.04762f, 91.71429f, 92.71429f, 93.71429f,
93.38095f, 94.38095f, 95.38095f, 95.04762f, 96.04762f, 97.04762f, 96.71429f, 97.71429f, 98.71429f,
98.38095f, 99.38095f, 100.38095f, 98.71429f, 99.71429f, 100.71429f, 95.28571f, 96.28571f, 97.28571f,
96.95238f, 97.95238f, 98.95238f, 98.61905f, 99.61905f, 100.61905f, 100.28571f, 101.28571f, 102.28571f,
101.95238f, 102.95238f, 103.95238f, 103.61905f, 104.61905f, 105.61905f, 105.28571f, 106.28571f, 107.28571f,
106.95238f, 107.95238f, 108.95238f, 107.28571f, 108.28571f, 109.28571f, 103.85715f, 104.85715f, 105.85715f,
105.5238f, 106.5238f, 107.5238f,107.190475f,108.190475f,109.190475f, 108.85715f, 109.85715f, 110.85715f,
110.5238f, 111.5238f, 112.5238f,112.190475f,113.190475f,114.190475f, 113.85715f, 114.85715f, 115.85715f,
115.5238f, 116.5238f, 117.5238f, 115.85715f, 116.85715f, 117.85715f, 106.f, 107.f, 108.f,
107.666664f,108.666664f,109.666664f,109.333336f,110.333336f,111.333336f, 111.f, 112.f, 113.f,
112.666664f,113.666664f,114.666664f,114.333336f,115.333336f,116.333336f, 116.f, 117.f, 118.f,
117.666664f,118.666664f,119.666664f, 118.f, 119.f, 120.f
});
auto size = NDArrayFactory::create<int>({7, 11});
sd::ops::resize_images op;
auto results = op.evaluate({&input, &size}, {}, {0}, {false, true}); // resize with bilinear method
ASSERT_EQ(ND4J_STATUS_OK, results.status());
NDArray *result = results.at(0);
// result->printBuffer("Resized to 7x9");
// expected.printBuffer("Expect for 7x9");
// result.printShapeInfo("Output shape");
// expected.printShapeInfo("Expect shape");
ASSERT_TRUE(expected.isSameShape(result));
ASSERT_TRUE(expected.equalsTo(result));
}
TEST_F(DeclarableOpsTests10, ImageResizeBilinear_Test02) { TEST_F(DeclarableOpsTests10, ImageResizeBilinear_Test02) {
NDArray input = NDArrayFactory::create<float>('c', {2, 5,5,3}, { NDArray input = NDArrayFactory::create<float>('c', {2, 5,5,3}, {

View File

@ -25,6 +25,7 @@
#include <ops/ops.h> #include <ops/ops.h>
#include <helpers/GradCheck.h> #include <helpers/GradCheck.h>
#include <helpers/MmulHelper.h> #include <helpers/MmulHelper.h>
#include <ops/declarable/helpers/image_resize.h>
using namespace sd; using namespace sd;
@ -1346,6 +1347,34 @@ TEST_F(DeclarableOpsTests11, ImageResizeArea_Test8) {
ASSERT_TRUE(expected.equalsTo(result)); ASSERT_TRUE(expected.equalsTo(result));
} }
TEST_F(DeclarableOpsTests11, ResizeImages_Test8) {
NDArray input = NDArrayFactory::create<int>('c', {1, 3, 3, 1}, {
1, 2, 3, 4, 5, 6, 7, 8, 9
});
NDArray expected = NDArrayFactory::create<float>('c', {1, 6, 6, 1}, {
// 1.f, 1.f, 2.f, 2.f, 3.f, 3.f, 1.f, 1.f, 2.f, 2.f, 3.f, 3.f, 4.f, 4.f, 5.f, 5.f, 6.f, 6.f, 4.f, 4.f, 5.f, 5.f,
// 6.f, 6.f, 7.f, 7.f, 8.f, 8.f, 9.f, 9.f, 7.f, 7.f, 8.f, 8.f, 9.f, 9.f
1.f , 1.f , 1.5f, 2.f , 2.f, 3.f, 1.f , 1.f , 1.5f, 2.f , 2.f, 3.f,
2.5f, 2.5f, 3.f, 3.5f, 3.5f, 4.5f, 4.f , 4.f , 4.5f , 5.f, 5.f, 6.f ,
4.f, 4.f, 4.5f , 5.f, 5.f, 6.f, 7.f , 7.f , 7.5f , 8.f , 8.f , 9.f
});
//input.linspace(1);
// auto size = NDArrayFactory::create<int>({6, 6});
sd::ops::resize_images op;
auto results = op.evaluate({&input}, {}, {6, 8, ops::helpers::kResizeArea}, {true, true}); // resize_area to 6x8 with align corners and preserve aspect ratio of input image
ASSERT_EQ(ND4J_STATUS_OK, results.status());
NDArray* result = results.at(0);
// result->printBuffer("Area Resized to 6x6");
// expected.printBuffer("Area Expect for 6x6");
ASSERT_TRUE(expected.isSameShape(result));
ASSERT_TRUE(expected.equalsTo(result));
}
/////////////////////////////////////////////////////////////////// ///////////////////////////////////////////////////////////////////
TEST_F(DeclarableOpsTests11, ImageResizeArea_Test9) { TEST_F(DeclarableOpsTests11, ImageResizeArea_Test9) {
@ -1354,7 +1383,10 @@ TEST_F(DeclarableOpsTests11, ImageResizeArea_Test9) {
}); });
NDArray expected = NDArrayFactory::create<float>('c', {1, 10, 10, 4}, { NDArray expected = NDArrayFactory::create<float>('c', {1, 10, 10, 4}, {
1.000000f, 2.000000f, 3.000000f, 4.000000f, 1.000000f, 2.000000f, 3.000000f, 4.000000f, 1.000000f, 2.000000f, 3.000000f, 4.000000f, 3.666667f, 4.666667f, 5.666667f, 6.666667f, 5.000000f, 6.000000f, 7.000000f, 8.000000f, 5.000000f, 6.000000f, 7.000000f, 8.000000f, 6.333336f, 7.333336f, 8.333336f, 9.333337f, 9.000000f, 10.000000f, 11.000000f, 12.000000f, 9.000000f, 10.000000f, 11.000000f, 12.000000f, 8.999998f, 9.999998f, 10.999998f, 11.999998f, 1.000000f, 2.000000f, 3.000000f, 4.000000f, 1.000000f, 2.000000f, 3.000000f, 4.000000f, 1.000000f, 2.000000f, 3.000000f, 4.000000f, 3.666667f, 4.666667f, 5.666667f, 6.666667f, 5.000000f, 6.000000f, 7.000000f, 8.000000f, 5.000000f, 6.000000f, 7.000000f, 8.000000f, 6.333336f, 7.333336f, 8.333336f, 9.333337f, 9.000000f, 10.000000f, 11.000000f, 12.000000f, 9.000000f, 10.000000f, 11.000000f, 12.000000f, 8.999998f, 9.999998f, 10.999998f, 11.999998f, 1.000000f, 2.000000f, 3.000000f, 4.000000f, 1.000000f, 2.000000f, 3.000000f, 4.000000f, 1.000000f, 2.000000f, 3.000000f, 4.000000f, 3.666667f, 4.666667f, 5.666667f, 6.666667f, 5.000000f, 6.000000f, 7.000000f, 8.000000f, 5.000000f, 6.000000f, 7.000000f, 8.000000f, 6.333336f, 7.333336f, 8.333336f, 9.333337f, 9.000000f, 10.000000f, 11.000000f, 12.000000f, 9.000000f, 10.000000f, 11.000000f, 12.000000f, 8.999998f, 9.999998f, 10.999998f, 11.999998f, 1.000000f, 2.000000f, 3.000000f, 4.000000f, 1.000000f, 2.000000f, 3.000000f, 4.000000f, 1.000000f, 2.000000f, 3.000000f, 4.000000f, 3.666667f, 4.666667f, 5.666667f, 6.666667f, 5.000000f, 6.000000f, 7.000000f, 8.000000f, 5.000000f, 6.000000f, 7.000000f, 8.000000f, 6.333336f, 7.333336f, 8.333336f, 9.333337f, 9.000000f, 10.000000f, 11.000000f, 12.000000f, 9.000000f, 10.000000f, 11.000000f, 12.000000f, 8.999998f, 9.999998f, 10.999998f, 11.999998f, 1.000000f, 2.000000f, 3.000000f, 4.000000f, 1.000000f, 2.000000f, 3.000000f, 4.000000f, 1.000000f, 2.000000f, 3.000000f, 4.000000f, 3.666667f, 4.666667f, 5.666667f, 6.666667f, 5.000000f, 6.000000f, 7.000000f, 8.000000f, 5.000000f, 6.000000f, 7.000000f, 8.000000f, 6.333336f, 7.333336f, 8.333336f, 9.333336f, 8.999999f, 9.999999f, 11.000000f, 11.999999f, 8.999999f, 9.999999f, 11.000000f, 11.999999f, 8.999998f, 9.999997f, 10.999997f, 11.999997f, 13.000003f, 14.000004f, 15.000003f, 16.000004f, 13.000003f, 14.000004f, 15.000003f, 16.000004f, 13.000003f, 14.000004f, 15.000003f, 16.000004f, 15.666671f, 16.666672f, 17.666672f, 18.666672f, 17.000006f, 18.000004f, 19.000006f, 20.000004f, 17.000006f, 18.000004f, 19.000006f, 20.000004f, 18.333344f, 19.333344f, 20.333345f, 21.333344f, 21.000006f, 22.000006f, 23.000006f, 24.000006f, 21.000006f, 22.000006f, 23.000006f, 24.000006f, 21.000002f, 22.000000f, 23.000002f, 24.000000f, 13.000000f, 14.000001f, 15.000000f, 16.000000f, 13.000000f, 14.000001f, 15.000000f, 16.000000f, 13.000000f, 14.000001f, 15.000000f, 16.000000f, 15.666667f, 16.666668f, 17.666668f, 18.666668f, 17.000002f, 18.000000f, 19.000002f, 20.000000f, 17.000002f, 18.000000f, 19.000002f, 20.000000f, 18.333340f, 19.333340f, 20.333342f, 21.333340f, 21.000002f, 22.000000f, 22.999998f, 24.000000f, 21.000002f, 22.000000f, 22.999998f, 24.000000f, 20.999996f, 21.999996f, 22.999994f, 23.999996f, 13.000000f, 14.000001f, 15.000000f, 16.000000f, 13.000000f, 14.000001f, 15.000000f, 16.000000f, 13.000000f, 14.000001f, 15.000000f, 16.000000f, 15.666667f, 16.666668f, 17.666668f, 18.666668f, 17.000002f, 18.000000f, 19.000002f, 20.000000f, 17.000002f, 18.000000f, 19.000002f, 20.000000f, 18.333340f, 19.333340f, 20.333342f, 21.333340f, 21.000002f, 22.000000f, 22.999998f, 24.000000f, 21.000002f, 22.000000f, 22.999998f, 24.000000f, 20.999996f, 21.999996f, 22.999994f, 23.999996f, 13.000000f, 14.000001f, 15.000000f, 16.000000f, 13.000000f, 14.000001f, 15.000000f, 16.000000f, 13.000000f, 14.000001f, 15.000000f, 16.000000f, 15.666667f, 16.666668f, 17.666668f, 18.666668f, 17.000002f, 18.000000f, 19.000002f, 20.000000f, 17.000002f, 18.000000f, 19.000002f, 20.000000f, 18.333340f, 19.333340f, 20.333342f, 21.333340f, 21.000002f, 22.000000f, 22.999998f, 24.000000f, 21.000002f, 22.000000f, 22.999998f, 24.000000f, 20.999996f, 21.999996f, 22.999994f, 23.999996f, 12.999995f, 13.999995f, 14.999994f, 15.999994f, 12.999995f, 13.999995f, 14.999994f, 15.999994f, 12.999995f, 13.999995f, 14.999994f, 15.999994f, 15.666661f, 16.666662f, 17.666660f, 18.666660f, 16.999994f, 17.999994f, 18.999992f, 19.999992f, 16.999994f, 17.999994f, 18.999992f, 19.999992f, 18.333334f, 19.333332f, 20.333334f, 21.333332f, 20.999992f, 21.999992f, 22.999990f, 23.999992f, 20.999992f, 21.999992f, 22.999990f, 23.999992f, 20.999989f, 21.999989f, 22.999987f, 23.999987f 1.000000f, 2.000000f, 3.000000f, 4.000000f, 1.000000f, 2.000000f, 3.000000f, 4.000000f, 1.000000f, 2.000000f,
3.000000f, 4.000000f, 3.666667f, 4.666667f, 5.666667f, 6.666667f, 5.000000f, 6.000000f, 7.000000f, 8.000000f,
5.000000f, 6.000000f, 7.000000f, 8.000000f, 6.333336f, 7.333336f, 8.333336f, 9.333337f, 9.000000f, 10.000000f,
11.000000f, 12.000000f, 9.000000f, 10.000000f, 11.000000f, 12.000000f, 8.999998f, 9.999998f, 10.999998f, 11.999998f, 1.000000f, 2.000000f, 3.000000f, 4.000000f, 1.000000f, 2.000000f, 3.000000f, 4.000000f, 1.000000f, 2.000000f, 3.000000f, 4.000000f, 3.666667f, 4.666667f, 5.666667f, 6.666667f, 5.000000f, 6.000000f, 7.000000f, 8.000000f, 5.000000f, 6.000000f, 7.000000f, 8.000000f, 6.333336f, 7.333336f, 8.333336f, 9.333337f, 9.000000f, 10.000000f, 11.000000f, 12.000000f, 9.000000f, 10.000000f, 11.000000f, 12.000000f, 8.999998f, 9.999998f, 10.999998f, 11.999998f, 1.000000f, 2.000000f, 3.000000f, 4.000000f, 1.000000f, 2.000000f, 3.000000f, 4.000000f, 1.000000f, 2.000000f, 3.000000f, 4.000000f, 3.666667f, 4.666667f, 5.666667f, 6.666667f, 5.000000f, 6.000000f, 7.000000f, 8.000000f, 5.000000f, 6.000000f, 7.000000f, 8.000000f, 6.333336f, 7.333336f, 8.333336f, 9.333337f, 9.000000f, 10.000000f, 11.000000f, 12.000000f, 9.000000f, 10.000000f, 11.000000f, 12.000000f, 8.999998f, 9.999998f, 10.999998f, 11.999998f, 1.000000f, 2.000000f, 3.000000f, 4.000000f, 1.000000f, 2.000000f, 3.000000f, 4.000000f, 1.000000f, 2.000000f, 3.000000f, 4.000000f, 3.666667f, 4.666667f, 5.666667f, 6.666667f, 5.000000f, 6.000000f, 7.000000f, 8.000000f, 5.000000f, 6.000000f, 7.000000f, 8.000000f, 6.333336f, 7.333336f, 8.333336f, 9.333337f, 9.000000f, 10.000000f, 11.000000f, 12.000000f, 9.000000f, 10.000000f, 11.000000f, 12.000000f, 8.999998f, 9.999998f, 10.999998f, 11.999998f, 1.000000f, 2.000000f, 3.000000f, 4.000000f, 1.000000f, 2.000000f, 3.000000f, 4.000000f, 1.000000f, 2.000000f, 3.000000f, 4.000000f, 3.666667f, 4.666667f, 5.666667f, 6.666667f, 5.000000f, 6.000000f, 7.000000f, 8.000000f, 5.000000f, 6.000000f, 7.000000f, 8.000000f, 6.333336f, 7.333336f, 8.333336f, 9.333336f, 8.999999f, 9.999999f, 11.000000f, 11.999999f, 8.999999f, 9.999999f, 11.000000f, 11.999999f, 8.999998f, 9.999997f, 10.999997f, 11.999997f, 13.000003f, 14.000004f, 15.000003f, 16.000004f, 13.000003f, 14.000004f, 15.000003f, 16.000004f, 13.000003f, 14.000004f, 15.000003f, 16.000004f, 15.666671f, 16.666672f, 17.666672f, 18.666672f, 17.000006f, 18.000004f, 19.000006f, 20.000004f, 17.000006f, 18.000004f, 19.000006f, 20.000004f, 18.333344f, 19.333344f, 20.333345f, 21.333344f, 21.000006f, 22.000006f, 23.000006f, 24.000006f, 21.000006f, 22.000006f, 23.000006f, 24.000006f, 21.000002f, 22.000000f, 23.000002f, 24.000000f, 13.000000f, 14.000001f, 15.000000f, 16.000000f, 13.000000f, 14.000001f, 15.000000f, 16.000000f, 13.000000f, 14.000001f, 15.000000f, 16.000000f, 15.666667f, 16.666668f, 17.666668f, 18.666668f, 17.000002f, 18.000000f, 19.000002f, 20.000000f, 17.000002f, 18.000000f, 19.000002f, 20.000000f, 18.333340f, 19.333340f, 20.333342f, 21.333340f, 21.000002f, 22.000000f, 22.999998f, 24.000000f, 21.000002f, 22.000000f, 22.999998f, 24.000000f, 20.999996f, 21.999996f, 22.999994f, 23.999996f, 13.000000f, 14.000001f, 15.000000f, 16.000000f, 13.000000f, 14.000001f, 15.000000f, 16.000000f, 13.000000f, 14.000001f, 15.000000f, 16.000000f, 15.666667f, 16.666668f, 17.666668f, 18.666668f, 17.000002f, 18.000000f, 19.000002f, 20.000000f, 17.000002f, 18.000000f, 19.000002f, 20.000000f, 18.333340f, 19.333340f, 20.333342f, 21.333340f, 21.000002f, 22.000000f, 22.999998f, 24.000000f, 21.000002f, 22.000000f, 22.999998f, 24.000000f, 20.999996f, 21.999996f, 22.999994f, 23.999996f, 13.000000f, 14.000001f, 15.000000f, 16.000000f, 13.000000f, 14.000001f, 15.000000f, 16.000000f, 13.000000f, 14.000001f, 15.000000f, 16.000000f, 15.666667f, 16.666668f, 17.666668f, 18.666668f, 17.000002f, 18.000000f, 19.000002f, 20.000000f, 17.000002f, 18.000000f, 19.000002f, 20.000000f, 18.333340f, 19.333340f, 20.333342f, 21.333340f, 21.000002f, 22.000000f, 22.999998f, 24.000000f, 21.000002f, 22.000000f, 22.999998f, 24.000000f, 20.999996f, 21.999996f, 22.999994f, 23.999996f, 12.999995f, 13.999995f, 14.999994f, 15.999994f, 12.999995f, 13.999995f, 14.999994f, 15.999994f, 12.999995f, 13.999995f, 14.999994f, 15.999994f, 15.666661f, 16.666662f, 17.666660f, 18.666660f, 16.999994f, 17.999994f, 18.999992f, 19.999992f, 16.999994f, 17.999994f, 18.999992f, 19.999992f, 18.333334f, 19.333332f, 20.333334f, 21.333332f, 20.999992f, 21.999992f, 22.999990f, 23.999992f, 20.999992f, 21.999992f, 22.999990f, 23.999992f, 20.999989f, 21.999989f, 22.999987f, 23.999987f
}); });
//input.linspace(1); //input.linspace(1);

View File

@ -27,6 +27,7 @@
#include <helpers/ConstantTadHelper.h> #include <helpers/ConstantTadHelper.h>
#include <helpers/PointersManager.h> #include <helpers/PointersManager.h>
#include <helpers/MmulHelper.h> #include <helpers/MmulHelper.h>
#include <ops/declarable/helpers/image_resize.h>
using namespace sd; using namespace sd;
@ -2821,6 +2822,330 @@ TEST_F(DeclarableOpsTests12, QR_Test_2) {
} }
TEST_F(DeclarableOpsTests12, ImageResize_Test1) {
NDArray input = NDArrayFactory::create<float>('c', {1, 5, 5, 1}, {
1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25
});
auto size = NDArrayFactory::create<int>({7, 8});
NDArray expected = NDArrayFactory::create<float>('c', {1, 7, 8, 1}, {
0.628328f, 0.97913796f, 1.8058043f, 2.563919f, 2.844548f,
3.6026628f, 4.4293294f, 4.7801394f, 2.9474494f, 3.2982588f,
4.1249247f, 4.8830395f, 5.1636696f, 5.9217834f, 6.7484493f,
7.09926f, 8.165832f, 8.516642f, 9.3433075f, 10.101422f,
10.382052f, 11.140167f, 11.966835f, 12.317646f, 10.924093f,
11.274903f, 12.10157f, 12.859686f, 13.140315f, 13.898429f,
14.725095f, 15.075906f, 13.682358f, 14.033167f, 14.859833f,
15.617949f, 15.898578f, 16.656693f, 17.48336f, 17.834171f,
18.900742f, 19.251549f, 20.078213f, 20.83633f, 21.11696f,
21.875074f, 22.701742f, 23.052553f, 21.219858f, 21.57067f,
22.397337f, 23.155449f, 23.436079f, 24.194195f, 25.020863f,
25.371672f
});
sd::ops::image_resize op;
// resize with lancos5 without antialising and aspect ratio preserving
auto results = op.evaluate({&input, &size}, {}, {ops::helpers::kResizeLanczos5}, {false, false});
ASSERT_EQ(ND4J_STATUS_OK, results.status());
auto result = results[0];///.at(0);
// result->printBuffer("Lancos5 Resized to 7x8");
// expected.printBuffer("Lancos5 Expect for 7x8");
ASSERT_TRUE(expected.isSameShape(result));
ASSERT_TRUE(expected.equalsTo(result));
}
TEST_F(DeclarableOpsTests12, ImageResize_Test2) {
NDArray input = NDArrayFactory::create<int>('c', {1, 5, 5, 1}, {
1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25
});
auto size = NDArrayFactory::create<int>({7, 8});
NDArray expected = NDArrayFactory::create<float>('c', {1, 7, 8, 1}, {
0.628328f, 0.97913796f, 1.8058043f, 2.563919f, 2.844548f,
3.6026628f, 4.4293294f, 4.7801394f, 2.9474494f, 3.2982588f,
4.1249247f, 4.8830395f, 5.1636696f, 5.9217834f, 6.7484493f,
7.09926f, 8.165832f, 8.516642f, 9.3433075f, 10.101422f,
10.382052f, 11.140167f, 11.966835f, 12.317646f, 10.924093f,
11.274903f, 12.10157f, 12.859686f, 13.140315f, 13.898429f,
14.725095f, 15.075906f, 13.682358f, 14.033167f, 14.859833f,
15.617949f, 15.898578f, 16.656693f, 17.48336f, 17.834171f,
18.900742f, 19.251549f, 20.078213f, 20.83633f, 21.11696f,
21.875074f, 22.701742f, 23.052553f, 21.219858f, 21.57067f,
22.397337f, 23.155449f, 23.436079f, 24.194195f, 25.020863f,
25.371672f
});
sd::ops::image_resize op;
// resize with lanczos5 without antialising and aspect ratio preserving
auto results = op.evaluate({&input, &size}, {}, {ops::helpers::kResizeLanczos5}, {false, false});
ASSERT_EQ(ND4J_STATUS_OK, results.status());
auto result = results[0];///.at(0);
// result.printBuffer("Lanczos5 Resized to 8x7");
// expected.printBuffer("Lanczos5 Expect for 8x7");
ASSERT_TRUE(expected.isSameShape(result));
ASSERT_TRUE(expected.equalsTo(result));
}
TEST_F(DeclarableOpsTests12, ImageResize_Test3) {
NDArray input = NDArrayFactory::create<int>('c', {1, 5, 5, 1}, {
1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25
});
auto size = NDArrayFactory::create<int>({7, 8});
NDArray expected = NDArrayFactory::create<float>('c', {1, 7, 8, 1}, {
0.6537938f, 1.0309073f, 1.8018917f, 2.4606667f, 2.9888396f, 3.6476145f, 4.418599f,
4.7957115f, 3.1913466f, 3.5684595f, 4.3394437f, 4.998219f, 5.526393f, 6.185168f,
6.956152f, 7.3332644f, 7.626866f, 8.00398f, 8.774965f, 9.433739f, 9.961912f,
10.620688f, 11.391673f, 11.7687845f, 10.929041f, 11.306154f, 12.077138f, 12.735914f,
13.264087f, 13.922862f, 14.693848f, 15.07096f, 14.231217f, 14.60833f, 15.379314f,
16.038086f, 16.56626f, 17.225037f, 17.996023f, 18.373135f, 18.666735f, 19.043848f,
19.814833f, 20.473606f, 21.00178f, 21.660557f, 22.431541f, 22.808653f, 21.204287f,
21.581398f, 22.352386f, 23.01116f, 23.539333f, 24.19811f, 24.969095f, 25.346205f
});
sd::ops::image_resize op;
// resize with lanczos3 without antialising and aspect ratio preserving
auto results = op.evaluate({&input, &size}, {}, {ops::helpers::kResizeLanczos3}, {false, false});
ASSERT_EQ(ND4J_STATUS_OK, results.status());
auto result = results[0];///.at(0);
// result.printBuffer("Lanczos3 Resized to 8x7");
// expected.printBuffer("Lanczos3 Expect for 8x7");
ASSERT_TRUE(expected.isSameShape(result));
ASSERT_TRUE(expected.equalsTo(result));
}
TEST_F(DeclarableOpsTests12, ImageResize_Test4) {
NDArray input = NDArrayFactory::create<int>('c', {1, 5, 5, 1}, {
1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25
});
auto size = NDArrayFactory::create<int>({7, 8});
NDArray expected = NDArrayFactory::create<float>('c', {1, 7, 8, 1}, {
1.4150869f, 1.7928237f, 2.4084527f, 3.0680697f, 3.6419308f, 4.301548f, 4.9171767f,
5.294914f, 4.012885f, 4.390622f, 5.0062513f, 5.6658688f, 6.23973f, 6.899347f,
7.514975f, 7.8927126f, 7.358912f, 7.736648f, 8.352278f, 9.011895f, 9.585756f,
10.245375f, 10.861001f, 11.238739f, 11.060086f, 11.437822f, 12.0534525f, 12.713069f,
13.28693f, 13.946548f, 14.562176f, 14.939912f, 14.761261f, 15.138998f, 15.754629f,
16.414246f, 16.988108f, 17.647724f, 18.263351f, 18.641088f, 18.107288f, 18.485023f,
19.100655f, 19.760273f, 20.334133f, 20.993752f, 21.609377f, 21.987114f, 20.705086f,
21.082823f, 21.698452f, 22.35807f, 22.93193f, 23.591549f, 24.207174f, 24.584913f
});
sd::ops::image_resize op;
// resize with gaussian without antialising and aspect ratio preserving
auto results = op.evaluate({&input, &size}, {}, {ops::helpers::kResizeGaussian}, {false, false});
ASSERT_EQ(ND4J_STATUS_OK, results.status());
auto result = results[0];///.at(0);
// result.printBuffer("Lanczos3 Resized to 8x7");
// expected.printBuffer("Lanczos3 Expect for 8x7");
ASSERT_TRUE(expected.isSameShape(result));
ASSERT_TRUE(expected.equalsTo(result));
}
TEST_F(DeclarableOpsTests12, ImageResize_Test5) {
NDArray input = NDArrayFactory::create<int>('c', {1, 5, 5, 1}, {
1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25
});
auto size = NDArrayFactory::create<int>({7, 8});
NDArray expected = NDArrayFactory::create<float>('c', {1, 7, 8, 1}, {
0.6372399f, 1.0536414f, 1.7716959f, 2.3966959f, 3.0216959f, 3.6466963f, 4.3647504f, 4.781152f,
3.3926036f, 3.8090053f, 4.5270596f, 5.1520596f, 5.7770596f, 6.4020596f, 7.1201134f, 7.5365143f,
7.358708f, 7.7751093f, 8.493164f, 9.118163f, 9.743165f, 10.368165f, 11.086218f, 11.502619f,
10.928043f, 11.344445f, 12.0625f, 12.6875f, 13.3125f, 13.9375f, 14.655554f, 15.071955f,
14.49738f, 14.913782f, 15.631836f, 16.256836f, 16.881836f, 17.506836f, 18.22489f, 18.64129f,
18.463486f, 18.879889f, 19.597942f, 20.222942f, 20.847942f, 21.472942f, 22.190996f, 22.607397f,
21.218851f, 21.635252f, 22.353308f, 22.978308f, 23.603308f, 24.228308f, 24.946362f, 25.362762f
});
sd::ops::image_resize op;
// resize with bicubic without antialising and aspect ratio preserving
auto results = op.evaluate({&input, &size}, {}, {ops::helpers::kResizeBicubic}, {false, false});
ASSERT_EQ(ND4J_STATUS_OK, results.status());
auto result = results[0];///.at(0);
// result->printBuffer("Bicubic Resized to 7x8");
// expected.printBuffer("Bicubic Expect for 7x8");
ASSERT_TRUE(expected.isSameShape(result));
ASSERT_TRUE(expected.equalsTo(result));
}
TEST_F(DeclarableOpsTests12, ImageResize_Test6) {
NDArray input = NDArrayFactory::create<int>('c', {1, 5, 5, 1}, {
1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25
});
auto size = NDArrayFactory::create<int>({7, 8});
NDArray expected = NDArrayFactory::create<float>('c', {1, 7, 8, 1}, {
0.63678247f, 1.0531839f, 1.7712381f, 2.396238f, 3.021238f , 3.646238f, 4.364292f, 4.780694f,
3.3934183f, 3.8098197f, 4.5278745f, 5.1528745f, 5.7778745f, 6.402874f, 7.1209283f, 7.5373297f,
7.3566165f, 7.7730184f, 8.491073f, 9.116073f, 9.741073f, 10.366074f , 11.084127f , 11.500528f,
10.928043f, 11.344445f, 12.0625f , 12.6875f , 13.3125f , 13.9375f , 14.655554f, 15.071955f , 14.499474f , 14.915876f , 15.633932f, 16.25893f, 16.883932f, 17.508932f, 18.226984f , 18.643385f,
18.46267f, 18.87907f, 19.597128f, 20.222126f , 20.847128f, 21.472126f, 22.190182f , 22.606583f , 21.219305f, 21.635706f ,
22.353762f, 22.978762f , 23.603762f , 24.228764f, 24.946815f , 25.363216f
});
sd::ops::image_resize op;
// resize with bicubic with antialising and without aspect ratio preserving
auto results = op.evaluate({&input, &size}, {}, {ops::helpers::kResizeBicubic}, {false, true});
ASSERT_EQ(ND4J_STATUS_OK, results.status());
auto result = results[0];///.at(0);
// result->printBuffer("Bicubic Resized to 7x8");
// expected.printBuffer("Bicubic Expect for 7x8");
ASSERT_TRUE(expected.isSameShape(result));
ASSERT_TRUE(expected.equalsTo(result));
}
TEST_F(DeclarableOpsTests12, ImageResize_Test7) {
NDArray input = NDArrayFactory::create<int>('c', {1, 5, 5, 1}, {
1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25
});
auto size = NDArrayFactory::create<int>({7, 8});
NDArray expected = NDArrayFactory::create<float>('c', {1, 7, 8, 1}, {
0.98593485f, 1.3872082f, 2.0625007f, 2.6875007f, 3.3125012f, 3.937501f, 4.612794f, 5.014066f,
3.6096964f, 4.01097f, 4.6862626f, 5.311262f, 5.936263f, 6.561262f, 7.2365556f, 7.637828f,
7.4145045f, 7.8157787f, 8.491071f, 9.116072f, 9.741073f, 10.366072f, 11.041365f, 11.4426365f,
10.985933f, 11.387209f, 12.062499f, 12.687501f, 13.312502f, 13.9375f, 14.612794f, 15.014066f,
14.557361f, 14.958637f, 15.633926f, 16.25893f, 16.88393f, 17.508926f, 18.18422f, 18.585491f,
18.36217f, 18.763443f, 19.438736f, 20.063736f, 20.688738f, 21.313736f, 21.98903f, 22.3903f,
20.985931f, 21.387209f, 22.0625f, 22.6875f, 23.3125f, 23.937498f, 24.612793f, 25.014061f
});
sd::ops::image_resize op;
// resize with Mitchell cubic with antialising and without aspect ratio preserving
auto results = op.evaluate({&input, &size}, {}, {ops::helpers::kResizeMitchellcubic}, {false, true});
ASSERT_EQ(ND4J_STATUS_OK, results.status());
auto result = results[0];///.at(0);
// result->printBuffer("Mitchell cubic Resized to 7x8");
// expected.printBuffer("Mitchell cubic Expect for 7x8");
ASSERT_TRUE(expected.isSameShape(result));
ASSERT_TRUE(expected.equalsTo(result));
}
TEST_F(DeclarableOpsTests12, ImageResize_Test8) {
NDArray input = NDArrayFactory::create<int>('c', {1, 5, 5, 1}, {
1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25
});
auto size = NDArrayFactory::create<int>({7, 8});
NDArray expected = NDArrayFactory::create<float>('c', {1, 7, 8, 1}, {
1.f , 1.4375f , 2.0625f , 2.6875f , 3.3125f , 3.9375f , 4.5625f , 5.f ,
3.8571427f, 4.2946424f, 4.9196424f, 5.5446424f, 6.1696424f, 6.7946424f, 7.4196424f, 7.8571424f,
7.4285717f, 7.8660717f, 8.491072f , 9.116072f , 9.741072f , 10.366072f , 10.991072f , 11.428572f ,
11.f , 11.4375f , 12.0625f , 12.6875f , 13.3125f , 13.9375f , 14.5625f , 15.f ,
14.571429f , 15.008929f, 15.633929f, 16.25893f , 16.88393f , 17.50893f , 18.13393f , 18.57143f ,
18.142857f , 18.580357f, 19.205357f, 19.830357f , 20.455357f , 21.080357f , 21.705357f , 22.142857f ,
21.f , 21.4375f , 22.0625f , 22.6875f , 23.3125f , 23.9375f , 24.5625f , 25.f
});
sd::ops::image_resize op;
// resize with bilinear without antialising and aspect ratio preserving
auto results = op.evaluate({&input, &size}, {}, {ops::helpers::kResizeBilinear}, {false, false});
ASSERT_EQ(ND4J_STATUS_OK, results.status());
auto result = results[0];///.at(0);
// result->printBuffer("Bilinear Resized to 7x8");
// expected.printBuffer("Bilinear Expect for 7x8");
ASSERT_TRUE(expected.isSameShape(result));
ASSERT_TRUE(expected.equalsTo(result));
}
TEST_F(DeclarableOpsTests12, ImageResize_Test9) {
NDArray input = NDArrayFactory::create<int>('c', {1, 5, 5, 1}, {
1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25
});
auto size = NDArrayFactory::create<int>({7, 8});
NDArray expected = NDArrayFactory::create<float>('c', {1, 7, 8, 1}, {
1.f , 1.4f , 2.f , 2.8f , 3.2f , 4.f , 4.6f , 5.f ,
4.f , 4.4f , 5.f , 5.8f , 6.2f , 7.f , 7.6f , 8.f ,
6.999998f, 7.399998f, 7.999998f, 8.799997f, 9.199997f, 9.999997f, 10.599997f, 10.999996f,
11.f, 11.399999f, 12.f, 12.799999f, 13.199999f, 13.999998f, 14.599998f, 14.999999f,
15.f, 15.4f, 16.f, 16.8f, 17.2f, 18.f, 18.6f, 19.f, 17.999989f,
18.399990f, 18.999989f, 19.799988f, 20.199987f, 20.999989f, 21.599989f, 21.999989f, 21.f,
21.4f, 22.f, 22.8f, 23.2f, 24.f, 24.6f, 25.f
});
sd::ops::image_resize op;
// resize with area without antialising and aspect ratio preserving
auto results = op.evaluate({&input, &size}, {}, {ops::helpers::kResizeArea}, {false, false});
ASSERT_EQ(ND4J_STATUS_OK, results.status());
auto result = results[0];///.at(0);
// result->printBuffer("Area Resized to 7x8");
// expected.printBuffer("Area Expect for 7x8");
ASSERT_TRUE(expected.isSameShape(result));
ASSERT_TRUE(expected.equalsTo(result));
}
TEST_F(DeclarableOpsTests12, ImageResize_Test10) {
NDArray input = NDArrayFactory::create<float>('c', {1, 5, 5, 1}, {
1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25
});
auto size = NDArrayFactory::create<int>({7, 8});
NDArray expected = NDArrayFactory::create<float>('c', {1, 7, 8, 1}, {
1, 1, 2, 3, 3, 4, 5, 5, 6, 6, 7, 8, 8, 9, 10, 10, 6,
6, 7, 8, 8, 9, 10, 10, 11, 11, 12, 13, 13, 14, 15, 15, 16, 16,
17, 18, 18, 19, 20, 20, 16, 16, 17, 18, 18, 19, 20, 20, 21, 21, 22,
23, 23, 24, 25, 25
});
sd::ops::image_resize op;
// resize with nearest neigbors without antialising and aspect ratio preserving
auto results = op.evaluate({&input, &size}, {}, {ops::helpers::kResizeNearest}, {false, false});
ASSERT_EQ(ND4J_STATUS_OK, results.status());
auto result = results[0];///.at(0);
// result->printBuffer("Nearest neighbor Resized to 7x8");
// expected.printBuffer("Nearest neighbor Expect for 7x8");
ASSERT_TRUE(expected.isSameShape(result));
ASSERT_TRUE(expected.equalsTo(result));
}
TEST_F(DeclarableOpsTests12, ImageResize_Test11) {
NDArray input = NDArrayFactory::create<int>('c', {1, 5, 5, 1}, {
1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25
});
auto size = NDArrayFactory::create<int>({7, 8});
NDArray expected = NDArrayFactory::create<int>('c', {1, 7, 8, 1}, {
1, 1, 2, 3, 3, 4, 5, 5, 6, 6, 7, 8, 8, 9, 10, 10, 6,
6, 7, 8, 8, 9, 10, 10, 11, 11, 12, 13, 13, 14, 15, 15, 16, 16,
17, 18, 18, 19, 20, 20, 16, 16, 17, 18, 18, 19, 20, 20, 21, 21, 22,
23, 23, 24, 25, 25
});
sd::ops::image_resize op;
// resize with nearest neigbors without antialising and aspect ratio preserving
auto results = op.evaluate({&input, &size}, {}, {ops::helpers::kResizeNearest}, {false, false});
ASSERT_EQ(ND4J_STATUS_OK, results.status());
auto result = results[0];///.at(0);
// result->printBuffer("Nearest neighbor Resized to 7x8");
// expected.printBuffer("Nearest neighbor Expect for 7x8");
ASSERT_TRUE(expected.isSameShape(result));
ASSERT_TRUE(expected.equalsTo(result));
}
//////////////////////////////////////////////////////////////////////////////// ////////////////////////////////////////////////////////////////////////////////
TEST_F(DeclarableOpsTests12, TriangularSolve_Test_1) { TEST_F(DeclarableOpsTests12, TriangularSolve_Test_1) {

View File

@ -27,17 +27,12 @@ package org.nd4j.enums;
* ResizeArea: Anti-aliased resampling with area interpolation. 'antialias' has no effect when used with area interpolation; it always anti-aliases. * ResizeArea: Anti-aliased resampling with area interpolation. 'antialias' has no effect when used with area interpolation; it always anti-aliases.
* ResizeMitchelcubic: Mitchell-Netravali Cubic non-interpolating filter. For synthetic images (especially those lacking proper prefiltering), less ringing than Keys cubic kernel but less sharp. */ * ResizeMitchelcubic: Mitchell-Netravali Cubic non-interpolating filter. For synthetic images (especially those lacking proper prefiltering), less ringing than Keys cubic kernel but less sharp. */
public enum ImageResizeMethod { public enum ImageResizeMethod {
ResizeBilinear, ResizeBilinear, // as java require
ResizeBicubic,
ResizeNearest, ResizeNearest,
ResizeBicubic,
ResizeArea,
ResizeGaussian, ResizeGaussian,
ResizeLanczos3,
ResizeLanczos5, ResizeLanczos5,
ResizeMitchellcubic;
ResizeMitchelcubic,
ResizeArea
} }

View File

@ -4417,7 +4417,7 @@ public native @Cast("bool") boolean isOptimalRequirementsMet();
/** /**
* fill target matrix with given value in one or two directions from main diagonal: * fill target matrix with given value in one or two directions from main diagonal:
* - down from main diagonal starting at subdiagonal number "lower" if direction = 'd' (down) or 'b' (both) * - down from main diagonal starting at subdiagonal number "lower" if direction = 'l' (down) or 'b' (both)
* - up from main diagonal starting at superdiagonal number "upper"if direction = 'u' (up) or 'b' (both) * - up from main diagonal starting at superdiagonal number "upper"if direction = 'u' (up) or 'b' (both)
* direction - in what direction to fill matrix. There are 3 possible directions: * direction - in what direction to fill matrix. There are 3 possible directions:
* 'u' - fill up, mathematically this corresponds to lower triangular matrix, subdiagonal "lower" unaffected * 'u' - fill up, mathematically this corresponds to lower triangular matrix, subdiagonal "lower" unaffected
@ -4830,8 +4830,10 @@ public native @Cast("bool") boolean isOptimalRequirementsMet();
//////////////////////////////////////////////////////////////////////// ////////////////////////////////////////////////////////////////////////
////////////////////////////////////////////////////////////////////////
////////////////////////////////////////////////////////////////////////
// #ifndef __JAVACPP_HACK__ // #ifndef __JAVACPP_HACK__
@ -7349,9 +7351,9 @@ public static final int PREALLOC_SIZE = 33554432;
* Returns the element wise stride for this information * Returns the element wise stride for this information
* buffer * buffer
*/ */
@Namespace("shape") public static native @Cast("Nd4jLong") long elementWiseStride(@Cast("const Nd4jLong*") LongPointer buffer); @Namespace("shape") public static native @Cast("Nd4jLong") long elementWiseStride(@Cast("const Nd4jLong*") LongPointer shapeInfo);
@Namespace("shape") public static native @Cast("Nd4jLong") long elementWiseStride(@Cast("const Nd4jLong*") LongBuffer buffer); @Namespace("shape") public static native @Cast("Nd4jLong") long elementWiseStride(@Cast("const Nd4jLong*") LongBuffer shapeInfo);
@Namespace("shape") public static native @Cast("Nd4jLong") long elementWiseStride(@Cast("const Nd4jLong*") long[] buffer); @Namespace("shape") public static native @Cast("Nd4jLong") long elementWiseStride(@Cast("const Nd4jLong*") long[] shapeInfo);
/** /**

View File

@ -4421,7 +4421,7 @@ public native @Cast("bool") boolean isOptimalRequirementsMet();
/** /**
* fill target matrix with given value in one or two directions from main diagonal: * fill target matrix with given value in one or two directions from main diagonal:
* - down from main diagonal starting at subdiagonal number "lower" if direction = 'd' (down) or 'b' (both) * - down from main diagonal starting at subdiagonal number "lower" if direction = 'l' (down) or 'b' (both)
* - up from main diagonal starting at superdiagonal number "upper"if direction = 'u' (up) or 'b' (both) * - up from main diagonal starting at superdiagonal number "upper"if direction = 'u' (up) or 'b' (both)
* direction - in what direction to fill matrix. There are 3 possible directions: * direction - in what direction to fill matrix. There are 3 possible directions:
* 'u' - fill up, mathematically this corresponds to lower triangular matrix, subdiagonal "lower" unaffected * 'u' - fill up, mathematically this corresponds to lower triangular matrix, subdiagonal "lower" unaffected
@ -4834,8 +4834,10 @@ public native @Cast("bool") boolean isOptimalRequirementsMet();
//////////////////////////////////////////////////////////////////////// ////////////////////////////////////////////////////////////////////////
////////////////////////////////////////////////////////////////////////
////////////////////////////////////////////////////////////////////////
// #ifndef __JAVACPP_HACK__ // #ifndef __JAVACPP_HACK__
@ -7353,9 +7355,9 @@ public static final int PREALLOC_SIZE = 33554432;
* Returns the element wise stride for this information * Returns the element wise stride for this information
* buffer * buffer
*/ */
@Namespace("shape") public static native @Cast("Nd4jLong") long elementWiseStride(@Cast("const Nd4jLong*") LongPointer buffer); @Namespace("shape") public static native @Cast("Nd4jLong") long elementWiseStride(@Cast("const Nd4jLong*") LongPointer shapeInfo);
@Namespace("shape") public static native @Cast("Nd4jLong") long elementWiseStride(@Cast("const Nd4jLong*") LongBuffer buffer); @Namespace("shape") public static native @Cast("Nd4jLong") long elementWiseStride(@Cast("const Nd4jLong*") LongBuffer shapeInfo);
@Namespace("shape") public static native @Cast("Nd4jLong") long elementWiseStride(@Cast("const Nd4jLong*") long[] buffer); @Namespace("shape") public static native @Cast("Nd4jLong") long elementWiseStride(@Cast("const Nd4jLong*") long[] shapeInfo);
/** /**
@ -21173,214 +21175,6 @@ public static final int TAD_THRESHOLD = TAD_THRESHOLD();
} }
// #endif // #endif
/**
* This op make bilinear or nearest neighbor interpolated resize for given tensor
*
* input array:
* 0 - 4D-Tensor with shape (batch, sizeX, sizeY, channels) numeric type
* 1 - 2D-Tensor with shape (num_boxes, 4) float type
* 2 - 1D-Tensor with shape (num_boxes) int type
* 3 - 1D-Tensor with 2 values (newWidth, newHeight) (optional) int type
*
* float arguments (optional)
* 0 - exprapolation_value (optional) default 0.f
*
* int arguments: (optional)
* 0 - mode (default 0 - bilinear interpolation)
*
* output array:
* the 4D-Tensor with resized to crop_size images given - float type
*/
// #if NOT_EXCLUDED(OP_crop_and_resize)
@Namespace("sd::ops") public static class crop_and_resize extends DeclarableCustomOp {
static { Loader.load(); }
/** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
public crop_and_resize(Pointer p) { super(p); }
/** Native array allocator. Access with {@link Pointer#position(long)}. */
public crop_and_resize(long size) { super((Pointer)null); allocateArray(size); }
private native void allocateArray(long size);
@Override public crop_and_resize position(long position) {
return (crop_and_resize)super.position(position);
}
public crop_and_resize() { super((Pointer)null); allocate(); }
private native void allocate();
public native ShapeList calculateOutputShape(ShapeList inputShape, @ByRef Context block);
}
// #endif
/**
* This op make bilinear interpolated resize for given tensor
*
* input array:
* 0 - 4D-Tensor with shape (batch, sizeX, sizeY, channels)
* 1 - 1D-Tensor with 2 values (newWidth, newHeight) (optional)
*
* int arguments: (optional)
* 0 - new width
* 1 - new height
*
* output array:
* the 4D-Tensor with calculated backproped dots
*
* CAUTION: either size tensor or a pair of int params should be provided.
*/
// #if NOT_EXCLUDED(OP_resize_bilinear)
@Namespace("sd::ops") public static class resize_bilinear extends DeclarableCustomOp {
static { Loader.load(); }
/** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
public resize_bilinear(Pointer p) { super(p); }
/** Native array allocator. Access with {@link Pointer#position(long)}. */
public resize_bilinear(long size) { super((Pointer)null); allocateArray(size); }
private native void allocateArray(long size);
@Override public resize_bilinear position(long position) {
return (resize_bilinear)super.position(position);
}
public resize_bilinear() { super((Pointer)null); allocate(); }
private native void allocate();
public native ShapeList calculateOutputShape(ShapeList inputShape, @ByRef Context block);
}
// #endif
/**
* This op make nearest neighbor interpolated resize for given tensor
*
* input array:
* 0 - 4D-Tensor with shape (batch, sizeX, sizeY, channels)
* 1 - 1D-Tensor with 2 values (newWidth, newHeight) (optional)
*
* int arguments: (optional)
* 0 - new width
* 1 - new height
*
* output array:
* the 4D-Tensor with resized image (shape is {batch, newWidth, newHeight, channels})
*
* CAUTION: either size tensor or a pair of int params should be provided.
*/
// #if NOT_EXCLUDED(OP_resize_nearest_neighbor)
@Namespace("sd::ops") public static class resize_nearest_neighbor extends DeclarableCustomOp {
static { Loader.load(); }
/** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
public resize_nearest_neighbor(Pointer p) { super(p); }
/** Native array allocator. Access with {@link Pointer#position(long)}. */
public resize_nearest_neighbor(long size) { super((Pointer)null); allocateArray(size); }
private native void allocateArray(long size);
@Override public resize_nearest_neighbor position(long position) {
return (resize_nearest_neighbor)super.position(position);
}
public resize_nearest_neighbor() { super((Pointer)null); allocate(); }
private native void allocate();
public native ShapeList calculateOutputShape(ShapeList inputShape, @ByRef Context block);
}
// #endif
/**
* This op make bicubic interpolated resize for given tensor
*
* input array:
* 0 - 4D-Tensor with shape (batch, sizeX, sizeY, channels)
* 1 - 1D-Tensor with 2 values (newWidth, newHeight)
*
* output array:
* the 4D-Tensor with resized image (shape is {batch, newWidth, newHeight, channels})
*
*/
// #if NOT_EXCLUDED(OP_resize_bicubic)
@Namespace("sd::ops") public static class resize_bicubic extends DeclarableCustomOp {
static { Loader.load(); }
/** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
public resize_bicubic(Pointer p) { super(p); }
/** Native array allocator. Access with {@link Pointer#position(long)}. */
public resize_bicubic(long size) { super((Pointer)null); allocateArray(size); }
private native void allocateArray(long size);
@Override public resize_bicubic position(long position) {
return (resize_bicubic)super.position(position);
}
public resize_bicubic() { super((Pointer)null); allocate(); }
private native void allocate();
public native ShapeList calculateOutputShape(ShapeList inputShape, @ByRef Context block);
}
// #endif
/**
* This op make area interpolated resize (as OpenCV INTER_AREA algorithm) for given tensor
*
* input array:
* 0 - images - 4D-Tensor with shape (batch, sizeX, sizeY, channels)
* 1 - size - 1D-Tensor with 2 values (newWidth, newHeight) (if missing a pair of integer args should be provided).
*
* int args: - proveded only when size tensor is missing
* 0 - new height
* 1 - new width
* boolean args:
* 0 - align_corners - optional (default is false)
*
* output array:
* the 4D-Tensor with resized image (shape is {batch, newWidth, newHeight, channels})
*
*/
// #if NOT_EXCLUDED(OP_resize_area)
@Namespace("sd::ops") public static class resize_area extends DeclarableCustomOp {
static { Loader.load(); }
/** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
public resize_area(Pointer p) { super(p); }
/** Native array allocator. Access with {@link Pointer#position(long)}. */
public resize_area(long size) { super((Pointer)null); allocateArray(size); }
private native void allocateArray(long size);
@Override public resize_area position(long position) {
return (resize_area)super.position(position);
}
public resize_area() { super((Pointer)null); allocate(); }
private native void allocate();
public native ShapeList calculateOutputShape(ShapeList inputShape, @ByRef Context block);
}
// #endif
/**
* This op make interpolated resize for given tensor with given algorithm.
* Supported algorithms are bilinear, bicubic, nearest_neighbor.
* Need to implement to full compatibility with TF: lanczos5, gaussian, area and mitchellcubic
*
* input array:
* 0 - 4D-Tensor with shape (batch, sizeX, sizeY, channels)
* 1 - 1D-Tensor with 2 values (newWidth, newHeight)
*
* optional int args:
* 0 - algorithm - bilinear by default
* optional bool args:
* 0 - preserve_aspect_ratio - default False
* 1 - antialias - default False
*
* output array:
* the 4D-Tensor with resized by given algorithm image (shape is {batch, newWidth, newHeight, channels})
*
*/
// #if NOT_EXCLUDED(OP_image_resize)
@Namespace("sd::ops") public static class image_resize extends DeclarableCustomOp {
static { Loader.load(); }
/** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
public image_resize(Pointer p) { super(p); }
/** Native array allocator. Access with {@link Pointer#position(long)}. */
public image_resize(long size) { super((Pointer)null); allocateArray(size); }
private native void allocateArray(long size);
@Override public image_resize position(long position) {
return (image_resize)super.position(position);
}
public image_resize() { super((Pointer)null); allocate(); }
private native void allocate();
public native ShapeList calculateOutputShape(ShapeList inputShape, @ByRef Context block);
}
// #endif
/** /**
* Copy a tensor setting everything outside a central band in each innermost matrix * Copy a tensor setting everything outside a central band in each innermost matrix
* *
@ -22966,6 +22760,34 @@ public static final int TAD_THRESHOLD = TAD_THRESHOLD();
} }
// #endif // #endif
/**
* calculates square root of matrix such that
* x[..., M, M] = z[..., M, M] x z[..., M, M]
*
* Input array:
* x[..., M, M], the necessary condition is: rank of x >= 2 and equality of last two dimensions
*
* Outputs arrays:
* z - same shape as x
*/
// #if NOT_EXCLUDED(OP_sqrtm)
@Namespace("sd::ops") public static class sqrtm extends DeclarableOp {
static { Loader.load(); }
/** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
public sqrtm(Pointer p) { super(p); }
/** Native array allocator. Access with {@link Pointer#position(long)}. */
public sqrtm(long size) { super((Pointer)null); allocateArray(size); }
private native void allocateArray(long size);
@Override public sqrtm position(long position) {
return (sqrtm)super.position(position);
}
public sqrtm() { super((Pointer)null); allocate(); }
private native void allocate();
public native ShapeList calculateOutputShape(ShapeList inputShape, @ByRef Context block);
}
// #endif
// #endif // #endif

View File

@ -2107,14 +2107,16 @@ public class TransformOpValidation extends BaseOpValidation {
//TODO: Methods failed ResizeLanczos5, ResizeMitchelcubic, ResizeArea //TODO: Methods failed ResizeLanczos5, ResizeMitchelcubic, ResizeArea
for (ImageResizeMethod method : ImageResizeMethod.values()) { for (ImageResizeMethod method : ImageResizeMethod.values()) {
if (method==ImageResizeMethod.ResizeLanczos5 || method==ImageResizeMethod.ResizeArea || method==ImageResizeMethod.ResizeMitchelcubic) if (method==ImageResizeMethod.ResizeLanczos5 || method==ImageResizeMethod.ResizeArea || method==ImageResizeMethod.ResizeMitchellcubic)
{continue;} {continue;}
log.info("Trying {}", method);
Nd4j.getRandom().setSeed(12345); Nd4j.getRandom().setSeed(12345);
SameDiff sd = SameDiff.create(); SameDiff sd = SameDiff.create();
boolean preserveAspectRatio = true; boolean preserveAspectRatio = true;
boolean antialias = true; boolean antialias = true;
SDVariable inputImage = sd.var(Nd4j.rand(1, 5, 5, 3)); SDVariable inputImage = sd.var(Nd4j.rand(DataType.FLOAT, 1, 5, 5, 3));
// NHWC format // NHWC format
long[] expectedShape = new long[]{1, 3, 3, 3}; long[] expectedShape = new long[]{1, 3, 3, 3};
SDVariable requestedSize = sd.constant(Nd4j.createFromArray( new long[]{3, 3})); SDVariable requestedSize = sd.constant(Nd4j.createFromArray( new long[]{3, 3}));