/******************************************************************************* * Copyright (c) 2015-2018 Skymind, Inc. * * This program and the accompanying materials are made available under the * terms of the Apache License, Version 2.0 which is available at * https://www.apache.org/licenses/LICENSE-2.0. * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the * License for the specific language governing permissions and limitations * under the License. * * SPDX-License-Identifier: Apache-2.0 ******************************************************************************/ // // Created by Yurii Shyrma on 02.01.2018 // #include #include #include #include namespace nd4j { namespace ops { namespace helpers { /////////////////////////////////////////////////////////////////// template static void stack_(const std::vector& inArrs, NDArray* outArr, const int dim) { if(inArrs[0]->rankOf() == 0) { int inSize = inArrs.size(); auto func = PRAGMA_THREADS_FOR { for (auto i = start; i < stop; i += increment) outArr->p(i, inArrs[i]->t(0)); }; samediff::Threads::parallel_for(func, 0, inSize); } else { std::vector dimsToExclude = ShapeUtils::evalDimsToExclude(outArr->rankOf(), {dim}); auto list = outArr->allTensorsAlongDimension(dimsToExclude); // list.size() == block.width() int listSize = list->size(); auto func = PRAGMA_THREADS_FOR { for (auto i = start; i < stop; i += increment) list->at(i)->assign(inArrs[i]); }; samediff::Threads::parallel_tad(func, 0, listSize); delete list; } } void stack(nd4j::LaunchContext * context, const std::vector& inArrs, NDArray* outArr, const int dim) { BUILD_SINGLE_SELECTOR(outArr->dataType(), stack_, (inArrs, outArr, dim), LIBND4J_TYPES); } BUILD_SINGLE_TEMPLATE(template void stack_ , (const std::vector& inArrs, NDArray* outArr, const int dim), LIBND4J_TYPES); } } }