/******************************************************************************* * Copyright (c) 2015-2018 Skymind, Inc. * * This program and the accompanying materials are made available under the * terms of the Apache License, Version 2.0 which is available at * https://www.apache.org/licenses/LICENSE-2.0. * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the * License for the specific language governing permissions and limitations * under the License. * * SPDX-License-Identifier: Apache-2.0 ******************************************************************************/ // // @author Yurii Shyrma (iuriish@yahoo.com), created on 20.04.2018 // #include #include #include #include #include #include #include #include #include namespace sd { namespace ops { namespace helpers { /////////////////////////////////////////////////////////////////// // x - input, y - indices, z - output template __global__ static void gatherNDCuda(const void *vx, const Nd4jLong *xShapeInfo, const void *vy, const Nd4jLong *yShapeInfo, void *vz, const Nd4jLong *zShapeInfo) { const auto x = reinterpret_cast(vx); const auto y = reinterpret_cast(vy); auto z = reinterpret_cast(vz); __shared__ int xRank, yRank, zRank, maxRank, yLastDim; __shared__ Nd4jLong zLen, totalThreads, *sharedMem; if (threadIdx.x == 0) { extern __shared__ unsigned char shmem[]; sharedMem = reinterpret_cast(shmem); xRank = shape::rank(xShapeInfo); yRank = shape::rank(yShapeInfo); zRank = shape::rank(zShapeInfo); maxRank = sd::math::nd4j_max(yRank, sd::math::nd4j_max(xRank, zRank)); zLen = shape::length(zShapeInfo); yLastDim = yShapeInfo[yRank]; totalThreads = gridDim.x * blockDim.x; } __syncthreads(); auto coord = sharedMem + threadIdx.x * maxRank; Nd4jLong *zCoordStart, *xCoordStart; if(yLastDim == xRank) { zCoordStart = coord; xCoordStart = coord; } if(zRank >= xRank) { zCoordStart = coord; xCoordStart = coord + zRank - xRank; } else { zCoordStart = coord + xRank - zRank; xCoordStart = coord; } const auto tid = blockIdx.x * blockDim.x + threadIdx.x; for (Nd4jLong i = tid; i < zLen; i += totalThreads) { shape::index2coords(i, zShapeInfo, zCoordStart); const auto zOffset = shape::getOffset(zShapeInfo, zCoordStart); // last y coordinate int coordToRestore; if(yLastDim != xRank) coordToRestore = static_cast(zCoordStart[yRank - 1]); zCoordStart[yRank - 1] = 0; // last y coordinate const auto yOffset = shape::getOffset(yShapeInfo, zCoordStart); //restore z coordinate if(yLastDim != xRank) zCoordStart[yRank - 1] = coordToRestore; // construct coordinates for x for(uint j = 0; j < yLastDim; ++j) xCoordStart[j] = y[yOffset + j * yShapeInfo[2 * yRank]]; // last stride const auto xOffset = shape::getOffset(xShapeInfo, xCoordStart); z[zOffset] = x[xOffset]; // printf("z[%lld] = x[%lld] = %f\n", zOffset, xOffset, (float) z[zOffset]); } } /////////////////////////////////////////////////////////////////// template static void gatherNDCudaLauncher(const int blocksPerGrid, const int threadsPerBlock, const int sharedMem, const cudaStream_t *stream, const void *vx, const Nd4jLong *xShapeInfo, const void *vy, const Nd4jLong *yShapeInfo, void *vz, const Nd4jLong *zShapeInfo) { gatherNDCuda<<>>(vx, xShapeInfo, vy, yShapeInfo, vz, zShapeInfo); } /////////////////////////////////////////////////////////////////// void gatherND(sd::LaunchContext * context, NDArray& input, NDArray& indices, NDArray& output) { const int maxRank = sd::math::nd4j_max(indices.rankOf(), sd::math::nd4j_max(input.rankOf(), output.rankOf())); const int threadsPerBlock = 256; const int blocksPerGrid = (output.lengthOf() + threadsPerBlock - 1) / threadsPerBlock; const int sharedMem = 8 * threadsPerBlock * maxRank + 128; const auto xType = input.dataType(); const auto yType = indices.dataType(); PointersManager manager(context, "gatherND"); NDArray::prepareSpecialUse({&output}, {&input, &indices}); BUILD_DOUBLE_SELECTOR(xType, yType, gatherNDCudaLauncher, (blocksPerGrid, threadsPerBlock, sharedMem, context->getCudaStream(), input.specialBuffer(), input.specialShapeInfo(), indices.specialBuffer(), indices.specialShapeInfo(), output.specialBuffer(), output.specialShapeInfo()), LIBND4J_TYPES, INDEXING_TYPES); NDArray::registerSpecialUse({&output}, {&input, &indices}); manager.synchronize(); } } } }