* platform helpers draft Signed-off-by: raver119 <raver119@gmail.com> * typo Signed-off-by: raver119 <raver119@gmail.com> * disable platform cmake Signed-off-by: raver119 <raver119@gmail.com> * another draft Signed-off-by: raver119 <raver119@gmail.com> * mkldnn convolution refactored Signed-off-by: raver119 <raver119@gmail.com> * minor tweaks Signed-off-by: raver119 <raver119@gmail.com> * one more safety check Signed-off-by: raver119 <raver119@gmail.com> * prototype works Signed-off-by: raver119 <raver119@gmail.com> * meh Signed-off-by: raver119 <raver119@gmail.com> * force static library mode for mkldnn Signed-off-by: raver119 <raver119@gmail.com> * - ismax fix - experimental arg fix - don't enforce openblas on Apple hardware Signed-off-by: raver119 <raver119@gmail.com> * bunch of small fixes Signed-off-by: raver119@gmail.com <raver119@gmail.com> * declare concurrent Signed-off-by: raver119@gmail.com <raver119@gmail.com> * - MKLDNN version upgrade to 1.0.2 - avgpool2d/maxpool2d APIs update Signed-off-by: raver119 <raver119@gmail.com> * - avgpool2d_bp/maxpool2d_bp APIs update Signed-off-by: raver119 <raver119@gmail.com> * - conv2d/batchnorm APIs update Signed-off-by: raver119 <raver119@gmail.com> * - lrn/conv2d_bp/conv3d/conv3d_bp APIs update Signed-off-by: raver119 <raver119@gmail.com> * all ops converted to MKLDNN 1.x Signed-off-by: raver119 <raver119@gmail.com> * bunch of tweaks Signed-off-by: raver119 <raver119@gmail.com> * namespace for platform helpers Signed-off-by: raver119 <raver119@gmail.com> * make sure platform helpers aren't opimized out Signed-off-by: raver119 <raver119@gmail.com> * build cpu_features on x86 systems Signed-off-by: raver119 <raver119@gmail.com> * build cpu_features on x86 systems Signed-off-by: raver119 <raver119@gmail.com> * more of cpu_features Signed-off-by: raver119 <raver119@gmail.com> * - mkldnn removed from java - cpu_features checks in CpuNDArrayFactory Signed-off-by: raver119 <raver119@gmail.com> * F16C definition renamed Signed-off-by: raver119 <raver119@gmail.com> * some mkldnn rearrangements Signed-off-by: raver119 <raver119@gmail.com> * check supported instructions before doing anything Signed-off-by: raver119 <raver119@gmail.com> * typo Signed-off-by: raver119 <raver119@gmail.com> * missied impl Signed-off-by: raver119 <raver119@gmail.com> * BUILD_PIC option Signed-off-by: raver119 <raver119@gmail.com> * conv2d fix Signed-off-by: raver119 <raver119@gmail.com> * avgpool3d fix Signed-off-by: raver119 <raver119@gmail.com> * avgpool3d_bp fix Signed-off-by: raver119 <raver119@gmail.com> * avgpool2d_bp leak fix Signed-off-by: raver119 <raver119@gmail.com> * avgpool3d_bp leak fix Signed-off-by: raver119 <raver119@gmail.com> * maxpool bp leaks fixed Signed-off-by: raver119 <raver119@gmail.com> * printf removed Signed-off-by: raver119 <raver119@gmail.com> * batchnorm fix Signed-off-by: raver119 <raver119@gmail.com> * AVX warning/error polishing Signed-off-by: AlexDBlack <blacka101@gmail.com> * Fix Signed-off-by: AlexDBlack <blacka101@gmail.com> * More polish Signed-off-by: AlexDBlack <blacka101@gmail.com> * Polish Signed-off-by: AlexDBlack <blacka101@gmail.com> * remove previous MKL-DNN support layer Signed-off-by: raver119 <raver119@gmail.com> * avx2 tweak Signed-off-by: raver119 <raver119@gmail.com> * allow static for apple Signed-off-by: raver119@gmail.com <raver119@gmail.com> * exclude mkldnn in one more place Signed-off-by: raver119 <raver119@gmail.com> * exclude mkldnn in one more place Signed-off-by: raver119 <raver119@gmail.com> * restore OPENBLAS_PATH use Signed-off-by: raver119 <raver119@gmail.com> * add runtime check for avx/avx2 support Signed-off-by: raver119 <raver119@gmail.com> * convolution_auto Signed-off-by: raver119 <raver119@gmail.com> * Add logic for helper argument * minor test fix Signed-off-by: raver119 <raver119@gmail.com> * few tweaks Signed-off-by: raver119 <raver119@gmail.com> * few tweaks Signed-off-by: raver119 <raver119@gmail.com> * skip OpTracker props for non-x86 builds Signed-off-by: raver119 <raver119@gmail.com> * linux arm isn't x86 :) Signed-off-by: raver119 <raver119@gmail.com> * avx-512 Signed-off-by: raver119 <raver119@gmail.com> * CUDA presets fix Signed-off-by: raver119 <raver119@gmail.com> * BUILD_PIC Signed-off-by: raver119 <raver119@gmail.com> * prefetchw for avx2 Signed-off-by: raver119 <raver119@gmail.com> * BUILD_PIC again Signed-off-by: raver119 <raver119@gmail.com>
138 lines
4.5 KiB
C++
138 lines
4.5 KiB
C++
/*******************************************************************************
|
|
* Copyright (c) 2015-2018 Skymind, Inc.
|
|
*
|
|
* This program and the accompanying materials are made available under the
|
|
* terms of the Apache License, Version 2.0 which is available at
|
|
* https://www.apache.org/licenses/LICENSE-2.0.
|
|
*
|
|
* Unless required by applicable law or agreed to in writing, software
|
|
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
|
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
|
* License for the specific language governing permissions and limitations
|
|
* under the License.
|
|
*
|
|
* SPDX-License-Identifier: Apache-2.0
|
|
******************************************************************************/
|
|
|
|
//
|
|
// @author raver119@gmail.com
|
|
//
|
|
|
|
#include <op_boilerplate.h>
|
|
#if NOT_EXCLUDED(OP_dilation2d)
|
|
|
|
#include <ops/declarable/headers/convo.h>
|
|
#include <ops/declarable/helpers/dilation2d.h>
|
|
|
|
namespace nd4j {
|
|
namespace ops {
|
|
CUSTOM_OP_IMPL(dilation2d, 2, 1, false, 0, 1) {
|
|
auto input = INPUT_VARIABLE(0);
|
|
auto weights = INPUT_VARIABLE(1);
|
|
|
|
auto output = OUTPUT_VARIABLE(0);
|
|
|
|
REQUIRE_TRUE(input->rankOf() == 4, 0, "Dilation2D: input should be 4D");
|
|
REQUIRE_TRUE(weights->rankOf() == 3, 0, "Dilation2D: weights should be 3D");
|
|
|
|
const int bS = input->sizeAt(0);
|
|
const int iC = input->sizeAt(3);
|
|
const bool isSameShape = INT_ARG(0) == 1;
|
|
|
|
REQUIRE_TRUE(input->sizeAt(3) == weights->sizeAt(2), 0, "Dilation2D: number of input channels doesn't match number of channels in weights: %i vs %i", input->sizeAt(3), weights->sizeAt(2));
|
|
|
|
std::vector<int> strides(4);
|
|
std::vector<int> rates(4);
|
|
|
|
if (block.width() > 2) {
|
|
REQUIRE_TRUE(block.width() >= 4, 0, "Dilation2D: number of input arrays should be 4 at least");
|
|
|
|
|
|
auto r = INPUT_VARIABLE(2);
|
|
auto s = INPUT_VARIABLE(3);
|
|
|
|
strides = s->template asVectorT<int>();
|
|
rates = r->template asVectorT<int>();
|
|
} else {
|
|
REQUIRE_TRUE(block.numI() >= 9, 0, "Dilation2D: number of Int arguments should be 9 at least");
|
|
|
|
int e = 1;
|
|
for (int cnt = 0;cnt < 4; cnt++)
|
|
rates[cnt] = INT_ARG(e++);
|
|
|
|
|
|
for (int cnt = 0; cnt < 4; cnt++)
|
|
strides[cnt] = INT_ARG(e++);
|
|
}
|
|
|
|
|
|
int sH = 0, sW = 0;
|
|
int dH = 0, dW = 0;
|
|
int pH = 0, pW = 0;
|
|
int oH = 0, oW = 0;
|
|
|
|
helpers::dilation_hw(block.launchContext(), input->shapeInfo(), weights->shapeInfo(), strides, rates, isSameShape, &sH, &sW, &pH, &pW, &dH, &dW, &oH, &oW);
|
|
|
|
|
|
REQUIRE_TRUE(oH > 0 && oW > 0, 0, "Dilation2D: outY and outX should have positive values, but got [%i, %i] instead", oH, oW);
|
|
|
|
helpers::dilation2d(block.launchContext(), input, weights, output, sH, sW, pH, pW, dH, dW);
|
|
|
|
return Status::OK();
|
|
}
|
|
|
|
DECLARE_TYPES(dilation2d) {
|
|
getOpDescriptor()
|
|
->setAllowedInputTypes(nd4j::DataType::ANY)
|
|
->setAllowedOutputTypes({ALL_FLOATS});
|
|
}
|
|
|
|
DECLARE_SHAPE_FN(dilation2d) {
|
|
auto input = inputShape->at(0);
|
|
auto weights = inputShape->at(1);
|
|
|
|
const int bS = shape::sizeAt(input, 0);
|
|
const int iC = shape::sizeAt(input, 3);
|
|
const bool isSameShape = INT_ARG(0) == 1;
|
|
|
|
std::vector<int> strides(4);
|
|
std::vector<int> rates(4);
|
|
|
|
Nd4jLong *newShape;
|
|
|
|
if (block.width() > 2) {
|
|
auto r = INPUT_VARIABLE(2);
|
|
auto s = INPUT_VARIABLE(3);
|
|
|
|
|
|
strides = s->template asVectorT<int>();
|
|
rates = r->template asVectorT<int>();
|
|
} else {
|
|
if (block.numI() < 9) {
|
|
newShape = ConstantShapeHelper::getInstance()->scalarShapeInfo(block.dataType());
|
|
return SHAPELIST(newShape);
|
|
}
|
|
|
|
int e = 1;
|
|
for (int cnt = 0;cnt < 4; cnt++)
|
|
rates[cnt] = INT_ARG(e++);
|
|
|
|
for (int cnt = 0; cnt < 4; cnt++)
|
|
strides[cnt] = INT_ARG(e++);
|
|
}
|
|
|
|
int sH = 0, sW = 0;
|
|
int dH = 0, dW = 0;
|
|
int pH = 0, pW = 0;
|
|
int oH = 0, oW = 0;
|
|
|
|
helpers::dilation_hw(block.launchContext(), input, weights, strides, rates, isSameShape, &sH, &sW, &pH, &pW, &dH, &dW, &oH, &oW);
|
|
|
|
std::array<Nd4jLong, 4> shape = {{bS, oH, oW, iC}};
|
|
newShape = ConstantShapeHelper::getInstance()->createShapeInfo(ArrayOptions::dataType(weights), 'c', 4, shape.data());
|
|
return SHAPELIST(newShape);
|
|
}
|
|
}
|
|
}
|
|
|
|
#endif |