* Libnd4j: TensorMMul backprop op #8174, raw implementation Signed-off-by: Oleg <oleg.semeniv@gmail.com> * Libnd4j: TensorMMul backprop op #8174 merge master and some corrections Signed-off-by: Oleg <oleg.semeniv@gmail.com> * Libnd4j: TensorMMul backprop op #8174 algorithm update, need testing, sync with master * Libnd4j: TensorMMul backprop op #8174 fixed incorrect B axes calculation Signed-off-by: Oleg <oleg.semeniv@gmail.com> * Libnd4j: TensorMMul backprop op #8174 optimize axes identification and fix bug of indeces overlapping, added first test. need testing with different shapes Signed-off-by: Oleg <oleg.semeniv@gmail.com> * Libnd4j: TensorMMul backprop op #8174 some fixes and improvements need more testing Signed-off-by: Oleg <oleg.semeniv@gmail.com> * Libnd4j: TensorMMul backprop op #8174 fixed order of matrix multiply Signed-off-by: Oleg <oleg.semeniv@gmail.com> * Libnd4j: TensorMMul backprop op #8174 fixed issue of incorrect axes definition, add tests based on TF, need additional testing for case dLdC not equal 1 Signed-off-by: Oleg <oleg.semeniv@gmail.com> * Libnd4j: TensorMMul backprop op #8174 fixed scalar case add test Signed-off-by: Oleg <oleg.semeniv@gmail.com> * Libnd4j: TensorMMul backprop op #8174 fixed bp algorithm, axes definition, need some mode testing with different orders combination f,c; c,f f,f and add some checks for inputs Signed-off-by: Oleg <oleg.semeniv@gmail.com> * Libnd4j: TensorMMul backprop op #8174 some checks and corrections added tests, exists the problem with different input orders support A-f B-c and A-f B-f Signed-off-by: Oleg <oleg.semeniv@gmail.com> * Libnd4j: TensorMMul backprop op #8174 sync master Signed-off-by: Oleg <oleg.semeniv@gmail.com> * - correct bug in MmulHelper::tensorDot(a, b, c, axes_a, axes_b,permutForC) Signed-off-by: Yurii <iuriish@yahoo.com> * Libnd4j: TensorMMul backprop op #8174 code clean up and refactoring Signed-off-by: Oleg <oleg.semeniv@gmail.com> * - add check for linspase ordered permutations in ShapeUtils::evalShapeForTensorDot Signed-off-by: Yurii <iuriish@yahoo.com> * - provide additional code in shape::reshape stuff in order to reduce amount of allocation/copy operations during reshaping procedure Signed-off-by: Yurii <iuriish@yahoo.com> * - further work on problem of wrong shape evaluation during permute/reshape procedures Signed-off-by: Yurii <iuriish@yahoo.com> * - still looking for bug reason in reshape/permute stuff Signed-off-by: Yurii <iuriish@yahoo.com> * - correct bug in transform cuda native ops Signed-off-by: Yurii <iuriish@yahoo.com> * - correct bug in NDArray::assign Signed-off-by: Yurii <iuriish@yahoo.com> * - remove old shape::reshape stuff Signed-off-by: Yurii <iuriish@yahoo.com> * - add possibility to disable copy of old buffer to new buffer during reshape operation in NDArray class Signed-off-by: Yurii <iuriish@yahoo.com> * - correct bug in tensorDot which had to do with wrong pointers assigments Signed-off-by: Yurii <iuriish@yahoo.com> Co-authored-by: Oleh <oleg.semeniv@gmail.com>
86 lines
2.8 KiB
C++
86 lines
2.8 KiB
C++
/*******************************************************************************
|
|
* Copyright (c) 2015-2018 Skymind, Inc.
|
|
*
|
|
* This program and the accompanying materials are made available under the
|
|
* terms of the Apache License, Version 2.0 which is available at
|
|
* https://www.apache.org/licenses/LICENSE-2.0.
|
|
*
|
|
* Unless required by applicable law or agreed to in writing, software
|
|
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
|
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
|
* License for the specific language governing permissions and limitations
|
|
* under the License.
|
|
*
|
|
* SPDX-License-Identifier: Apache-2.0
|
|
******************************************************************************/
|
|
|
|
//
|
|
// @author raver119@gmail.com
|
|
//
|
|
|
|
#include <ops/declarable/helpers/helpers.h>
|
|
#include <execution/Threads.h>
|
|
|
|
namespace nd4j {
|
|
namespace ops {
|
|
namespace helpers {
|
|
|
|
void crossBatched(nd4j::LaunchContext * context, NDArray *a, NDArray *b, NDArray *o);
|
|
|
|
void FORCEINLINE cross(nd4j::LaunchContext * context, NDArray *a, NDArray *b, NDArray *o) {
|
|
|
|
if (a->isR()) {
|
|
auto a0 = a->e<double>(0);
|
|
auto a1 = a->e<double>(1);
|
|
auto a2 = a->e<double>(2);
|
|
|
|
auto b0 = b->e<double>(0);
|
|
auto b1 = b->e<double>(1);
|
|
auto b2 = b->e<double>(2);
|
|
|
|
o->p(Nd4jLong(0L), a1 * b2 - a2 * b1);
|
|
o->p(1L, a2 * b0 - a0 * b2);
|
|
o->p(2L, a0 * b1 - a1 * b0);
|
|
} else {
|
|
auto a0 = a->e<Nd4jLong>(0);
|
|
auto a1 = a->e<Nd4jLong>(1);
|
|
auto a2 = a->e<Nd4jLong>(2);
|
|
|
|
auto b0 = b->e<Nd4jLong>(0);
|
|
auto b1 = b->e<Nd4jLong>(1);
|
|
auto b2 = b->e<Nd4jLong>(2);
|
|
|
|
o->p(Nd4jLong(0L), a1 * b2 - a2 * b1);
|
|
o->p(1L, a2 * b0 - a0 * b2);
|
|
o->p(2L, a0 * b1 - a1 * b0);
|
|
}
|
|
}
|
|
|
|
void FORCEINLINE _crossBatched(nd4j::LaunchContext * context, NDArray *a, NDArray *b, NDArray *o) {
|
|
auto a_ = a->reshape(a->ordering(), {-1, 3});
|
|
auto b_ = b->reshape(b->ordering(), {-1, 3});
|
|
auto o_ = o->reshape(o->ordering(), {-1, 3}, false);
|
|
|
|
auto tadsA = a_.allTensorsAlongDimension({1});
|
|
auto tadsB = b_.allTensorsAlongDimension({1});
|
|
auto tadsO = o_.allTensorsAlongDimension({1});
|
|
|
|
int tads = tadsA.size();
|
|
|
|
auto func = PRAGMA_THREADS_FOR {
|
|
for (auto e = start; e < stop; e += increment) {
|
|
auto a_ = tadsA.at(e);
|
|
auto b_ = tadsB.at(e);
|
|
auto o_ = tadsO.at(e);
|
|
|
|
helpers::cross(context, a_, b_, o_);
|
|
}
|
|
};
|
|
|
|
samediff::Threads::parallel_tad(func, 0, tads);
|
|
}
|
|
|
|
void weightedCrossEntropyWithLogitsFunctor(nd4j::LaunchContext * context, NDArray const* targets, NDArray const* input, NDArray const* weights, NDArray* output);
|
|
}
|
|
}
|
|
} |