/******************************************************************************* * Copyright (c) 2015-2018 Skymind, Inc. * * This program and the accompanying materials are made available under the * terms of the Apache License, Version 2.0 which is available at * https://www.apache.org/licenses/LICENSE-2.0. * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the * License for the specific language governing permissions and limitations * under the License. * * SPDX-License-Identifier: Apache-2.0 ******************************************************************************/ // // @author raver119@gmail.com // @author Yurii Shyrma (iuriish@yahoo.com) // #include #include #include namespace sd { namespace ops { namespace helpers { /////////////////////////////////////////////////////////////////// template static void _CUDA_G adjustHueCuda(const void* vx, const Nd4jLong* xShapeInfo, const Nd4jLong* xTadOffsets, void* vz, const Nd4jLong *zShapeInfo, const Nd4jLong* zTadOffsets, const Nd4jLong numOfTads, const T delta, const int dimC) { const T* x = reinterpret_cast(vx); T* z = reinterpret_cast(vz); __shared__ int rank; __shared__ Nd4jLong xDimCstride, zDimCstride; if (threadIdx.x == 0) { rank = shape::rank(xShapeInfo); xDimCstride = shape::stride(xShapeInfo)[dimC]; zDimCstride = shape::stride(zShapeInfo)[dimC]; } __syncthreads(); const auto tid = blockIdx.x * blockDim.x + threadIdx.x; for (Nd4jLong i = tid; i < numOfTads; i += gridDim.x * blockDim.x) { const T* xTad = x + xTadOffsets[i]; T* zTad = z + zTadOffsets[i]; T h, s, v; rgbToHsv(xTad[0], xTad[xDimCstride], xTad[2 * xDimCstride], h, s, v); h += delta ; if(h > 1) h -= 1; else if(h < 0) h += 1; hsvToRgb(h, s, v, zTad[0], zTad[zDimCstride], zTad[2 * zDimCstride]); } } /////////////////////////////////////////////////////////////////// template static _CUDA_H void adjustHueCudaLauncher(const int blocksPerGrid, const int threadsPerBlock, const cudaStream_t *stream, const void* vx, const Nd4jLong* xShapeInfo, const Nd4jLong* xTadOffsets, void* vz, const Nd4jLong* zShapeInfo, const Nd4jLong* zTadOffsets, const Nd4jLong numOfTads, const NDArray* deltaScalarArr, const int dimC) { adjustHueCuda<<>>(vx, xShapeInfo, xTadOffsets, vz, zShapeInfo, zTadOffsets, numOfTads, deltaScalarArr->e(0), dimC); } //////////////////////////////////////////////////////////////////////// void adjustHue(sd::LaunchContext* context, const NDArray *input, const NDArray* deltaScalarArr, NDArray *output, const int dimC) { auto packX = sd::ConstantTadHelper::getInstance().tadForDimensions(input->shapeInfo(), {dimC}); auto packZ = sd::ConstantTadHelper::getInstance().tadForDimensions(output->shapeInfo(), {dimC}); const Nd4jLong numOfTads = packX.numberOfTads(); const int threadsPerBlock = MAX_NUM_THREADS / 2; const int blocksPerGrid = (numOfTads + threadsPerBlock - 1) / threadsPerBlock; PointersManager manager(context, "adjustHue"); NDArray::prepareSpecialUse({output}, {input, deltaScalarArr}); BUILD_SINGLE_SELECTOR(input->dataType(), adjustHueCudaLauncher, (blocksPerGrid, threadsPerBlock, context->getCudaStream(), input->specialBuffer(), input->specialShapeInfo(), packX.platformOffsets(), output->specialBuffer(), output->specialShapeInfo(), packZ.platformOffsets(), numOfTads, deltaScalarArr, dimC), FLOAT_TYPES); NDArray::registerSpecialUse({output}, {input, deltaScalarArr}); manager.synchronize(); } /* template static void _CUDA_G adjustHueSingleNHWCKernel(void *xBuffer, Nd4jLong *xShapeInfo, void *zBuffer, Nd4jLong *zShapeInfo, Nd4jLong tuples, float delta) { int numChannels = 3; auto tid = threadIdx.x + blockIdx.x * blockDim.x; auto bIn = reinterpret_cast(xBuffer); auto bOut = reinterpret_cast(zBuffer); static const int kChannelRange = 6; for (Nd4jLong e = tid; e < tuples; e += blockDim.x * gridDim.x) { auto i = bIn + e * numChannels; auto o = bOut + e * numChannels; T h, v_min, v_max; helpers::rgb_to_hv(i[0], i[1], i[2], &h, &v_min, &v_max); h += delta * kChannelRange; while (h < (T) 0.) h += (T) kChannelRange; while (h >= (T) kChannelRange) h -= (T) kChannelRange; helpers::hv_to_rgb(h, v_min, v_max, o, o + 1, o + 2); } } template static void _CUDA_G adjustHueSingleNCHWKernel(void *xBuffer, Nd4jLong *xTadShapeInfo, Nd4jLong *xOffsets, void *zBuffer, Nd4jLong *zTadShapeInfo, Nd4jLong *zOffsets, Nd4jLong tadLength, Nd4jLong tuples, float delta) { int numChannels = 3; auto tid = threadIdx.x + blockIdx.x * blockDim.x; static const int kChannelRange = 6; auto bufferR = reinterpret_cast(xBuffer) + xOffsets[0]; auto bufferG = reinterpret_cast(xBuffer) + xOffsets[1]; auto bufferB = reinterpret_cast(xBuffer) + xOffsets[2]; auto outputR = reinterpret_cast(zBuffer) + zOffsets[0]; auto outputG = reinterpret_cast(zBuffer) + zOffsets[1]; auto outputB = reinterpret_cast(zBuffer) + zOffsets[2]; for (Nd4jLong e = tid; e < tuples; e += blockDim.x * gridDim.x) { auto _ri = bufferR + shape::getIndexOffset(e, xTadShapeInfo); auto _gi = bufferG + shape::getIndexOffset(e, xTadShapeInfo); auto _bi = bufferB + shape::getIndexOffset(e, xTadShapeInfo); auto _ro = outputR + shape::getIndexOffset(e, xTadShapeInfo); auto _go = outputG + shape::getIndexOffset(e, xTadShapeInfo); auto _bo = outputB + shape::getIndexOffset(e, xTadShapeInfo); T h, v_min, v_max; helpers::rgb_to_hv(_ri[0], _gi[0], _bi[0], &h, &v_min, &v_max); h += delta * kChannelRange; while (h < (T) 0) h += (T) kChannelRange; while (h >= (T) kChannelRange) h -= (T) kChannelRange; helpers::hv_to_rgb(h, v_min, v_max, _ro, _go, _bo); } } template static void _adjust_hue_single(sd::LaunchContext * context, NDArray *array, NDArray *output, float delta, bool isNHWC) { // numChannels is always 3 auto tuples = array->lengthOf() / 3; if (isNHWC) { adjustHueSingleNHWCKernel<<<256, 256, 1024, *context->getCudaStream()>>>(array->specialBuffer(), array->specialShapeInfo(), output->specialBuffer(), output->special(), tuples, delta); } else { // TODO: check this one auto packX = sd::ConstantTadHelper::getInstance().tadForDimensions(array->shapeInfo(), {1, 2}); auto packZ = sd::ConstantTadHelper::getInstance().tadForDimensions(output->shapeInfo(), {1, 2}); auto tadLength = shape::length(packX.primaryShapeInfo()); adjustHueSingleNCHWKernel<<<256, 256, 1024, *context->getCudaStream()>>>(array->specialBuffer(), packX.platformShapeInfo(), packX.platformOffsets(), output->specialBuffer(), packZ.platformShapeInfo(), packZ.platformOffsets(), tadLength, tuples, delta); } } template static void _adjust_hue_batch(sd::LaunchContext * context, NDArray *array, NDArray *output, float delta, bool isNHWC) { auto xType = array->dataType(); // numChannels is always 3 auto tuples = array->lengthOf() / 3; if (isNHWC) { // in case of nhwc batch, we don't really care about examples: it's still bunch of RGB values BUILD_SINGLE_SELECTOR(xType, _adjust_hue_single, (context, array, output, delta, isNHWC);, FLOAT_TYPES); } else { // TODO: check this one auto packX = sd::ConstantTadHelper::getInstance().tadForDimensions(array->shapeInfo(), {0, 2, 3}); auto packZ = sd::ConstantTadHelper::getInstance().tadForDimensions(output->shapeInfo(), {0, 2, 3}); auto tadLength = shape::length(packX.primary()); adjustHueSingleNCHWKernel<<<256, 256, 1024, *context->getCudaStream()>>>(array->specialBuffer(), packX.platformShapeInfo(), packX.platformOffsets(), output->specialBuffer(), packZ.platform(), packZ.platform(), tadLength, tuples, delta); } } void _adjust_hue(sd::LaunchContext * context, NDArray *array, NDArray *output, NDArray* delta, bool isNHWC) { auto xType = array->dataType(); float d = delta->e(0); if (array->rankOf() == 4) { BUILD_SINGLE_SELECTOR(xType, _adjust_hue_batch, (context, array, output, d, isNHWC);, FLOAT_TYPES); } else { BUILD_SINGLE_SELECTOR(xType, _adjust_hue_single, (context, array, output, d, isNHWC);, FLOAT_TYPES); } } */ } } }