cavis/libnd4j/include/ops/declarable/generic/transforms/merge_add.cpp

98 lines
2.8 KiB
C++
Raw Normal View History

2019-06-06 14:21:15 +02:00
/*******************************************************************************
* Copyright (c) 2015-2018 Skymind, Inc.
*
* This program and the accompanying materials are made available under the
* terms of the Apache License, Version 2.0 which is available at
* https://www.apache.org/licenses/LICENSE-2.0.
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
*
* SPDX-License-Identifier: Apache-2.0
******************************************************************************/
//
// Created by raver119 on 24.11.17.
//
#include <system/op_boilerplate.h>
2019-06-06 14:21:15 +02:00
#if NOT_EXCLUDED(OP_mergeadd)
#include <ops/declarable/CustomOperations.h>
#include<ops/declarable/helpers/transforms.h>
namespace sd {
2019-06-06 14:21:15 +02:00
namespace ops {
OP_IMPL(mergeadd, -1, 1, false) {
REQUIRE_OK(this->validateInputDimensionsMatch(block));
auto output = OUTPUT_VARIABLE(0);
Backpropagation implementation of mergemax, mergeadd and mergeavg ops (#343) * libnd4j: first step of merge_max implementation Signed-off-by: Oleg <oleg.semeniv@gmail.com> * libnd4j fixed typos Signed-off-by: Oleg <oleg.semeniv@gmail.com> * libnd4j some corrections for mergeMaxBp Signed-off-by: Oleg <oleg.semeniv@gmail.com> * libnd4j some minor corrections Signed-off-by: Oleg <oleg.semeniv@gmail.com> * libnd4j test added for mergemax_bp Signed-off-by: Oleg <oleg.semeniv@gmail.com> * libnd4j fixed several problems tests added, check with gradCheck Signed-off-by: Oleg <oleg.semeniv@gmail.com> * libnd4j remove duplicated tests Signed-off-by: Oleg <oleg.semeniv@gmail.com> * libnd4j split implementation of transforms ops into separate file implementation Signed-off-by: Oleg <oleg.semeniv@gmail.com> * libnd4j code clean up, added mergeavg_bp and mergeadd_bp, need testing Signed-off-by: Oleg <oleg.semeniv@gmail.com> * libnd4j merge master, fixed typos and added tests Signed-off-by: Oleg <oleg.semeniv@gmail.com> * libnd4j some minor fixes Signed-off-by: Oleg <oleg.semeniv@gmail.com> * libnd4j added helper for mergeAddBp operation, this permits to skip nullify Signed-off-by: Oleg <oleg.semeniv@gmail.com> * libnd4j file renaming changes and cuda some corrections, need some additional corrections Signed-off-by: Oleg <oleg.semeniv@gmail.com> * libnd4j some additional corrections for merge ops Signed-off-by: Oleg <oleg.semeniv@gmail.com> * libnd4j more corrections per request for cuda more proper usage Signed-off-by: Oleg <oleg.semeniv@gmail.com>
2020-03-25 06:40:30 +01:00
std::vector<const NDArray*> inArrs(block.width());
2019-06-06 14:21:15 +02:00
for(int i = 0; i < block.width(); ++i)
inArrs[i] = INPUT_VARIABLE(i);
helpers::mergeAdd(block.launchContext(), inArrs, *output);
return Status::OK();
}
DECLARE_SYN(mergesum, mergeadd);
DECLARE_SYN(add_n, mergeadd);
DECLARE_SYN(addn, mergeadd);
DECLARE_SYN(accumulaten, mergeadd);
DECLARE_SYN(accumulate_n, mergeadd);
DECLARE_TYPES(mergeadd) {
getOpDescriptor()
->setAllowedInputTypes(sd::DataType::ANY)
->setAllowedOutputTypes(sd::DataType::ANY);
2019-06-06 14:21:15 +02:00
}
Backpropagation implementation of mergemax, mergeadd and mergeavg ops (#343) * libnd4j: first step of merge_max implementation Signed-off-by: Oleg <oleg.semeniv@gmail.com> * libnd4j fixed typos Signed-off-by: Oleg <oleg.semeniv@gmail.com> * libnd4j some corrections for mergeMaxBp Signed-off-by: Oleg <oleg.semeniv@gmail.com> * libnd4j some minor corrections Signed-off-by: Oleg <oleg.semeniv@gmail.com> * libnd4j test added for mergemax_bp Signed-off-by: Oleg <oleg.semeniv@gmail.com> * libnd4j fixed several problems tests added, check with gradCheck Signed-off-by: Oleg <oleg.semeniv@gmail.com> * libnd4j remove duplicated tests Signed-off-by: Oleg <oleg.semeniv@gmail.com> * libnd4j split implementation of transforms ops into separate file implementation Signed-off-by: Oleg <oleg.semeniv@gmail.com> * libnd4j code clean up, added mergeavg_bp and mergeadd_bp, need testing Signed-off-by: Oleg <oleg.semeniv@gmail.com> * libnd4j merge master, fixed typos and added tests Signed-off-by: Oleg <oleg.semeniv@gmail.com> * libnd4j some minor fixes Signed-off-by: Oleg <oleg.semeniv@gmail.com> * libnd4j added helper for mergeAddBp operation, this permits to skip nullify Signed-off-by: Oleg <oleg.semeniv@gmail.com> * libnd4j file renaming changes and cuda some corrections, need some additional corrections Signed-off-by: Oleg <oleg.semeniv@gmail.com> * libnd4j some additional corrections for merge ops Signed-off-by: Oleg <oleg.semeniv@gmail.com> * libnd4j more corrections per request for cuda more proper usage Signed-off-by: Oleg <oleg.semeniv@gmail.com>
2020-03-25 06:40:30 +01:00
CUSTOM_OP_IMPL(mergeadd_bp, 2, 1, false, 0, 0) {
auto inSize = block.width() - 1;
REQUIRE_OK(this->validateInputDimensionsMatch(block));
std::vector<NDArray*> outArrs(inSize);
const auto gradient = INPUT_VARIABLE(inSize);
for (int i = 0; i < inSize; ++i) {
outArrs[i] = OUTPUT_VARIABLE(i);
}
helpers::mergeAddBp(block.launchContext(), *gradient, outArrs);
return Status::OK();
}
DECLARE_TYPES(mergeadd_bp) {
getOpDescriptor()
->setAllowedInputTypes(sd::DataType::ANY)
->setAllowedOutputTypes(sd::DataType::ANY);
}
DECLARE_SHAPE_FN(mergeadd_bp) {
const int numOfInArrs = block.width() - 1;
auto shapeList = SHAPELIST();
for (int e = 0; e < numOfInArrs; e++) {
auto inShape = inputShape->at(e);
shapeList->push_back(ConstantShapeHelper::getInstance()->createShapeInfo(ShapeDescriptor(ArrayOptions::dataType(inShape), shape::order(inShape), shape::shapeOf(inShape), shape::rank(inShape))));
}
return shapeList;
}
2019-06-06 14:21:15 +02:00
}
}
#endif