/******************************************************************************* * Copyright (c) 2015-2018 Skymind, Inc. * * This program and the accompanying materials are made available under the * terms of the Apache License, Version 2.0 which is available at * https://www.apache.org/licenses/LICENSE-2.0. * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the * License for the specific language governing permissions and limitations * under the License. * * SPDX-License-Identifier: Apache-2.0 ******************************************************************************/ // // @author raver119@gmail.com // @author Yurii Shyrma, created on 28.11.2018 // #include ////////////////////////////////////////////////////////////////////////// template __global__ void bitonicSortStepKernelValue(void *vx, Nd4jLong *xShapeInfo, void *vy, Nd4jLong *yShapeInfo, int j, int k, int length, bool descending) { auto x = static_cast(vx); auto y = static_cast(vy); unsigned int i, ixj; /* Sorting partners: i and ixj */ i = threadIdx.x + blockDim.x * blockIdx.x; __shared__ Nd4jLong xLength; if (threadIdx.x == 0) xLength = shape::length(xShapeInfo); __syncthreads(); if (i >= length) return; ixj = i^j; /* The threads with the lowest ids sort the array. */ if ((ixj)>i) { int posI = shape::getIndexOffset(i, yShapeInfo, xLength); int posIXJ = shape::getIndexOffset(ixj, yShapeInfo, xLength); if ((i&k)==0) { /* Sort ascending */ if (!descending == (y[posI]>y[posIXJ])) { /* exchange(i,ixj); */ X temp = x[posI]; x[posI] = x[posIXJ]; x[posIXJ] = temp; Y ytemp = y[posI]; y[posI] = y[posIXJ]; y[posIXJ] = ytemp; } } else if ((i&k)!=0) { /* Sort descending */ if (!descending == (y[posI] __global__ void bitonicSortStepKernelKey(void *vx, Nd4jLong *xShapeInfo, void *vy, Nd4jLong *yShapeInfo, int j, int k, int length, bool descending) { auto x = static_cast(vx); auto y = static_cast(vy); unsigned int i, ixj; /* Sorting partners: i and ixj */ i = threadIdx.x + blockDim.x * blockIdx.x; __shared__ Nd4jLong xLength; if (threadIdx.x == 0) xLength = shape::length(xShapeInfo); __syncthreads(); if (i >= length) return; ixj = i^j; /* The threads with the lowest ids sort the array. */ if ((ixj)>i) { int posI = shape::getIndexOffset(i, xShapeInfo, xLength); int posIXJ = shape::getIndexOffset(ixj, xShapeInfo, xLength); if ((i&k)==0) { /* Sort ascending */ if (!descending == (x[posI]>x[posIXJ])) { /* exchange(i,ixj); */ X temp = x[posI]; x[posI] = x[posIXJ]; x[posIXJ] = temp; Y ytemp = y[posI]; y[posI] = y[posIXJ]; y[posIXJ] = ytemp; } } else if ((i&k)!=0) { /* Sort descending */ if (!descending == (x[posI] __global__ void bitonicSortStepKernel(void *vx, Nd4jLong *xShapeInfo, int j, int k, int length, bool descending) { auto x = static_cast(vx); unsigned int i, ixj; /* Sorting partners: i and ixj */ i = threadIdx.x + blockDim.x * blockIdx.x; __shared__ Nd4jLong xLength; if (threadIdx.x == 0) xLength = shape::length(xShapeInfo); __syncthreads(); if (i >= length) return; ixj = i^j; /* The threads with the lowest ids sort the array. */ if ((ixj)>i) { int posI = shape::getIndexOffset(i, xShapeInfo, xLength); int posIXJ = shape::getIndexOffset(ixj, xShapeInfo, xLength); if ((i&k)==0) { /* Sort ascending */ if (!descending == (x[posI]>x[posIXJ])) { /* exchange(i,ixj); */ T temp = x[posI]; x[posI] = x[posIXJ]; x[posIXJ] = temp; } } else if ((i&k)!=0) { /* Sort descending */ if (!descending == (x[posI] __host__ void bitonicSortStepGeneric(dim3 &launchDims, cudaStream_t *stream, void *vx, Nd4jLong *xShapeInfo, int j, int k, int length, bool descending) { bitonicSortStepKernel<<>>(vx, xShapeInfo, j, k, length, descending); } ////////////////////////////////////////////////////////////////////////// template __host__ void bitonicSortStepGenericKey(dim3 &launchDims, cudaStream_t *stream, void *vx, Nd4jLong *xShapeInfo, void *vy, Nd4jLong *yShapeInfo, int j, int k, int length, bool descending) { bitonicSortStepKernelKey<<>>(vx, xShapeInfo, vy, yShapeInfo, j, k, length, descending); } ////////////////////////////////////////////////////////////////////////// template __host__ void bitonicSortStepGenericValue(dim3 &launchDims, cudaStream_t *stream, void *vx, Nd4jLong *xShapeInfo, void *vy, Nd4jLong *yShapeInfo, int j, int k, int length, bool descending) { bitonicSortStepKernelValue<<>>(vx, xShapeInfo, vy, yShapeInfo, j, k, length, descending); } BUILD_SINGLE_TEMPLATE(template void ND4J_EXPORT bitonicSortStepGeneric, (dim3 &launchDims, cudaStream_t *stream, void *vx, Nd4jLong *xShapeInfo, int j, int k, int length, bool descending), LIBND4J_TYPES); BUILD_DOUBLE_TEMPLATE(template void ND4J_EXPORT bitonicSortStepGenericKey, (dim3 &launchDims, cudaStream_t *stream, void *vx, Nd4jLong *xShapeInfo, void *vy, Nd4jLong *yShapeInfo, int j, int k, int length, bool descending), LIBND4J_TYPES, LIBND4J_TYPES); BUILD_DOUBLE_TEMPLATE(template void ND4J_EXPORT bitonicSortStepGenericValue, (dim3 &launchDims, cudaStream_t *stream, void *vx, Nd4jLong *xShapeInfo, void *vy, Nd4jLong *yShapeInfo, int j, int k, int length, bool descending), LIBND4J_TYPES, LIBND4J_TYPES);