2019-06-06 14:21:15 +02:00
|
|
|
/*******************************************************************************
|
|
|
|
* Copyright (c) 2015-2018 Skymind, Inc.
|
|
|
|
*
|
|
|
|
* This program and the accompanying materials are made available under the
|
|
|
|
* terms of the Apache License, Version 2.0 which is available at
|
|
|
|
* https://www.apache.org/licenses/LICENSE-2.0.
|
|
|
|
*
|
|
|
|
* Unless required by applicable law or agreed to in writing, software
|
|
|
|
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
|
|
|
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
|
|
|
* License for the specific language governing permissions and limitations
|
|
|
|
* under the License.
|
|
|
|
*
|
|
|
|
* SPDX-License-Identifier: Apache-2.0
|
|
|
|
******************************************************************************/
|
|
|
|
|
|
|
|
|
|
|
|
#ifndef REDUCE_SAME_H
|
|
|
|
#define REDUCE_SAME_H
|
2020-03-02 10:49:41 +01:00
|
|
|
#include <system/dll.h>
|
2019-06-06 14:21:15 +02:00
|
|
|
//#include <string>
|
|
|
|
#include <stdio.h>
|
|
|
|
#include <helpers/shape.h>
|
|
|
|
#ifdef _OPENMP
|
|
|
|
#include <omp.h>
|
|
|
|
#endif
|
2020-03-02 10:49:41 +01:00
|
|
|
#include <math/templatemath.h>
|
|
|
|
#include <system/nd4jmalloc.h>
|
|
|
|
#include <system/pairwise_util.h>
|
2019-06-06 14:21:15 +02:00
|
|
|
#include <ops/ops.h>
|
2020-03-02 10:49:41 +01:00
|
|
|
#include <system/op_boilerplate.h>
|
2019-06-06 14:21:15 +02:00
|
|
|
|
|
|
|
#pragma once
|
|
|
|
#ifdef __CUDACC__
|
|
|
|
#include <cuda.h>
|
|
|
|
#include <cuda_runtime.h>
|
|
|
|
#endif
|
|
|
|
|
|
|
|
#include "legacy_ops.h"
|
|
|
|
|
|
|
|
//an op for the kernel
|
|
|
|
namespace functions {
|
|
|
|
namespace reduce {
|
|
|
|
|
|
|
|
/**
|
|
|
|
* A reduce function
|
|
|
|
* reduces a vector down to
|
|
|
|
* a subset of itself
|
|
|
|
* via aggregating member
|
|
|
|
* elements.
|
|
|
|
*/
|
|
|
|
template<typename X>
|
|
|
|
class ReduceSameFunction {
|
|
|
|
public:
|
|
|
|
#ifdef __CUDACC__
|
|
|
|
|
|
|
|
template<typename OpType>
|
|
|
|
static __device__ void aggregatePartials(void *sPartials, Nd4jLong tid, Nd4jLong numItems, void *extraParams);
|
|
|
|
|
|
|
|
template<typename OpType>
|
2020-05-09 07:06:14 +02:00
|
|
|
static __device__ void execScalarCuda( void const* vx, Nd4jLong const *xShapeInfo, void *extraParams, void *vz, Nd4jLong const* zShapeInfo, void *reductionBuffer, Nd4jLong const* tadOnlyShapeInfo);
|
2019-06-06 14:21:15 +02:00
|
|
|
|
2020-05-09 07:06:14 +02:00
|
|
|
static __device__ void execScalarCudaLegacy(int opNum, void const* vx, Nd4jLong const* xShapeInfo, void *extraParams, void *vz, Nd4jLong const* zShapeInfo, void *reductionBuffer, Nd4jLong const* tadOnlyShapeInfo);
|
2019-06-06 14:21:15 +02:00
|
|
|
|
2019-08-02 19:01:03 +02:00
|
|
|
template<typename OpType>
|
2020-05-09 07:06:14 +02:00
|
|
|
static __device__ void transformCudaXD( void const* vx, Nd4jLong const* xShapeInfo, void *extraParams, void *vz, Nd4jLong const* zShapeInfo, int *dimension, int dimensionLength, void *reductionBuffer, Nd4jLong const* tadOnlyShapeInfo, Nd4jLong const* tadOffsets);
|
2019-06-06 14:21:15 +02:00
|
|
|
|
|
|
|
template<typename OpType>
|
2020-05-09 07:06:14 +02:00
|
|
|
static __host__ void intermediateScalar(dim3 launchDims, cudaStream_t *stream, void const* vx, Nd4jLong const* xShapeInfo, Nd4jLong const* hXShapeInfo, void *extraParams, void *vz, Nd4jLong const* zShapeInfo, Nd4jLong const* hZShapeInfo, int *dimension, int dimensionLength, void *reductionBuffer, Nd4jLong const* tadOnlyShapeInfo);
|
2019-06-06 14:21:15 +02:00
|
|
|
|
|
|
|
template<typename OpType>
|
2020-05-09 07:06:14 +02:00
|
|
|
static __host__ void intermediateXD(dim3 launchDims, cudaStream_t *stream, void const* vx, Nd4jLong const* xShapeInfo, Nd4jLong const* hXShapeInfo, void *extraParams, void *vz, Nd4jLong const* zShapeInfo, Nd4jLong const* hZShapeInfo, int *dimension, int dimensionLength, void *reductionPointer, Nd4jLong const* tadShapeInfo, Nd4jLong const* tadOffsets);
|
2019-06-06 14:21:15 +02:00
|
|
|
|
2020-05-09 07:06:14 +02:00
|
|
|
static __host__ void execReduceScalar(dim3 launchDims, cudaStream_t *stream, int opNum, void const* vx, Nd4jLong const* xShapeInfo, Nd4jLong const* hXShapeInfo, void *extraParams, void *vz, Nd4jLong const* zShapeInfo, Nd4jLong const* hZShapeInfo, int *dimension, int dimensionLength, void *reductionBuffer, Nd4jLong const* tadOnlyShapeInfo);
|
2019-06-06 14:21:15 +02:00
|
|
|
|
2020-05-09 07:06:14 +02:00
|
|
|
static __host__ void execReduceXD(dim3 launchDims, cudaStream_t *stream, int opNum, int rank, void const* vx, Nd4jLong const* xShapeInfo, Nd4jLong const* hXShapeInfo, void *extraParams, void *vz, Nd4jLong const* zShapeInfo, Nd4jLong const* hZShapeInfo, int *dimension, int dimensionLength, void *reductionPointer, Nd4jLong const* tadShapeInfo, Nd4jLong const* tadOffsets);
|
2019-11-13 15:15:18 +01:00
|
|
|
#else
|
2019-06-06 14:21:15 +02:00
|
|
|
|
|
|
|
/**
|
|
|
|
* Reduce down to 1 number
|
|
|
|
* @param x the input
|
|
|
|
* @param xShapeInfo the shape information
|
|
|
|
* for the input
|
|
|
|
* @param extraParams the extra params
|
|
|
|
* @return
|
|
|
|
*/
|
|
|
|
template<typename OpType>
|
2020-05-09 07:06:14 +02:00
|
|
|
static _CUDA_H X execScalar(const void *x, const Nd4jLong *xShapeInfo,
|
|
|
|
void *extraParams);
|
2019-06-06 14:21:15 +02:00
|
|
|
|
|
|
|
template<typename OpType>
|
2020-05-09 07:06:14 +02:00
|
|
|
static _CUDA_H void execScalar(const void *x, const Nd4jLong *xShapeInfo,
|
|
|
|
void *extraParams,
|
|
|
|
void *z, const Nd4jLong *zShapeInfo);
|
2019-06-06 14:21:15 +02:00
|
|
|
|
|
|
|
|
|
|
|
static X execScalar(int opNum,
|
2020-05-09 07:06:14 +02:00
|
|
|
const void *x, const Nd4jLong *xShapeInfo,
|
|
|
|
void *extraParams);
|
2019-06-06 14:21:15 +02:00
|
|
|
|
|
|
|
static void execScalar(int opNum,
|
2020-05-09 07:06:14 +02:00
|
|
|
const void *x, const Nd4jLong *xShapeInfo,
|
|
|
|
void *extraParams,
|
|
|
|
void *z, const Nd4jLong *zShapeInfo);
|
2019-06-06 14:21:15 +02:00
|
|
|
|
|
|
|
static void exec(int opNum,
|
2020-05-09 07:06:14 +02:00
|
|
|
const void *x, const Nd4jLong *xShapeInfo,
|
2019-06-06 14:21:15 +02:00
|
|
|
void *extraParams,
|
2020-05-09 07:06:14 +02:00
|
|
|
void *result, const Nd4jLong *resultShapeInfoBuffer,
|
|
|
|
int *dimension, int dimensionLength,
|
|
|
|
const Nd4jLong *tadShapeInfo, const Nd4jLong *tadOffset,
|
|
|
|
int64_t start, int64_t stop);
|
2019-06-06 14:21:15 +02:00
|
|
|
|
|
|
|
/**
|
|
|
|
* Execute on the cpu
|
|
|
|
* @param x the input data
|
|
|
|
* @param xShapeInfo the shape information for x
|
|
|
|
* @param extraParams the extra parameters
|
|
|
|
* @param result the result buffer
|
|
|
|
* @param resultShapeInfoBuffer the shape information
|
|
|
|
* @param dimension the dimension to perform
|
|
|
|
* the reduce along long
|
|
|
|
* @param dimensionLength the length of the dimension buffer
|
|
|
|
*/
|
|
|
|
|
|
|
|
|
|
|
|
template<typename OpType>
|
2020-05-09 07:06:14 +02:00
|
|
|
static void _CUDA_H exec(const void *x, const Nd4jLong *xShapeInfo,
|
|
|
|
void *extraParams,
|
|
|
|
void *result, const Nd4jLong *resultShapeInfoBuffer,
|
|
|
|
int *dimension, int dimensionLength,
|
|
|
|
const Nd4jLong *tadShapeInfo, const Nd4jLong *tadOffset,
|
|
|
|
int64_t start, int64_t stop);
|
2019-06-06 14:21:15 +02:00
|
|
|
|
|
|
|
/**
|
|
|
|
* CPU implementation
|
|
|
|
* @param x the input data
|
|
|
|
* @param xShapeInfo the shape information for
|
|
|
|
* the input data
|
|
|
|
* @param extraParams the extra parameters for the problem
|
|
|
|
* @param result the result buffer
|
|
|
|
* @param resultShapeInfo the shape information
|
|
|
|
*/
|
|
|
|
template<typename OpType>
|
2020-05-09 07:06:14 +02:00
|
|
|
static void _CUDA_H exec(const void *x, const Nd4jLong *xShapeInfo,
|
|
|
|
void *extraParams,
|
|
|
|
void *result, const Nd4jLong *resultShapeInfo);
|
2019-06-06 14:21:15 +02:00
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
/**
|
|
|
|
* Reduce down to 1 number
|
|
|
|
* @param x the input
|
|
|
|
* @param xShapeInfo the shape information
|
|
|
|
* for the input
|
|
|
|
* @param extraParams the extra params
|
|
|
|
* @return
|
|
|
|
*/
|
|
|
|
template<typename OpType>
|
2020-05-09 07:06:14 +02:00
|
|
|
static X _CUDA_H execScalar(const void *x, Nd4jLong xElementWiseStride,
|
|
|
|
Nd4jLong length,
|
|
|
|
void *extraParams);
|
2019-11-13 15:15:18 +01:00
|
|
|
|
|
|
|
#endif
|
2019-06-06 14:21:15 +02:00
|
|
|
};
|
|
|
|
|
|
|
|
#ifdef __CUDACC__
|
|
|
|
/**
|
|
|
|
*
|
|
|
|
* @param extraParams
|
|
|
|
* @param sPartials
|
|
|
|
* @param sMemSize
|
|
|
|
*/
|
|
|
|
template<typename T>
|
|
|
|
__device__ void initializeShared(T *extraParams, T **sPartials, int sMemSize);
|
|
|
|
|
|
|
|
#endif
|
|
|
|
|
|
|
|
}
|
|
|
|
|
|
|
|
}
|
|
|
|
|
|
|
|
#endif
|
|
|
|
|