* platform helpers draft Signed-off-by: raver119 <raver119@gmail.com> * typo Signed-off-by: raver119 <raver119@gmail.com> * disable platform cmake Signed-off-by: raver119 <raver119@gmail.com> * another draft Signed-off-by: raver119 <raver119@gmail.com> * mkldnn convolution refactored Signed-off-by: raver119 <raver119@gmail.com> * minor tweaks Signed-off-by: raver119 <raver119@gmail.com> * one more safety check Signed-off-by: raver119 <raver119@gmail.com> * prototype works Signed-off-by: raver119 <raver119@gmail.com> * meh Signed-off-by: raver119 <raver119@gmail.com> * force static library mode for mkldnn Signed-off-by: raver119 <raver119@gmail.com> * - ismax fix - experimental arg fix - don't enforce openblas on Apple hardware Signed-off-by: raver119 <raver119@gmail.com> * bunch of small fixes Signed-off-by: raver119@gmail.com <raver119@gmail.com> * declare concurrent Signed-off-by: raver119@gmail.com <raver119@gmail.com> * - MKLDNN version upgrade to 1.0.2 - avgpool2d/maxpool2d APIs update Signed-off-by: raver119 <raver119@gmail.com> * - avgpool2d_bp/maxpool2d_bp APIs update Signed-off-by: raver119 <raver119@gmail.com> * - conv2d/batchnorm APIs update Signed-off-by: raver119 <raver119@gmail.com> * - lrn/conv2d_bp/conv3d/conv3d_bp APIs update Signed-off-by: raver119 <raver119@gmail.com> * all ops converted to MKLDNN 1.x Signed-off-by: raver119 <raver119@gmail.com> * bunch of tweaks Signed-off-by: raver119 <raver119@gmail.com> * namespace for platform helpers Signed-off-by: raver119 <raver119@gmail.com> * make sure platform helpers aren't opimized out Signed-off-by: raver119 <raver119@gmail.com> * build cpu_features on x86 systems Signed-off-by: raver119 <raver119@gmail.com> * build cpu_features on x86 systems Signed-off-by: raver119 <raver119@gmail.com> * more of cpu_features Signed-off-by: raver119 <raver119@gmail.com> * - mkldnn removed from java - cpu_features checks in CpuNDArrayFactory Signed-off-by: raver119 <raver119@gmail.com> * F16C definition renamed Signed-off-by: raver119 <raver119@gmail.com> * some mkldnn rearrangements Signed-off-by: raver119 <raver119@gmail.com> * check supported instructions before doing anything Signed-off-by: raver119 <raver119@gmail.com> * typo Signed-off-by: raver119 <raver119@gmail.com> * missied impl Signed-off-by: raver119 <raver119@gmail.com> * BUILD_PIC option Signed-off-by: raver119 <raver119@gmail.com> * conv2d fix Signed-off-by: raver119 <raver119@gmail.com> * avgpool3d fix Signed-off-by: raver119 <raver119@gmail.com> * avgpool3d_bp fix Signed-off-by: raver119 <raver119@gmail.com> * avgpool2d_bp leak fix Signed-off-by: raver119 <raver119@gmail.com> * avgpool3d_bp leak fix Signed-off-by: raver119 <raver119@gmail.com> * maxpool bp leaks fixed Signed-off-by: raver119 <raver119@gmail.com> * printf removed Signed-off-by: raver119 <raver119@gmail.com> * batchnorm fix Signed-off-by: raver119 <raver119@gmail.com> * AVX warning/error polishing Signed-off-by: AlexDBlack <blacka101@gmail.com> * Fix Signed-off-by: AlexDBlack <blacka101@gmail.com> * More polish Signed-off-by: AlexDBlack <blacka101@gmail.com> * Polish Signed-off-by: AlexDBlack <blacka101@gmail.com> * remove previous MKL-DNN support layer Signed-off-by: raver119 <raver119@gmail.com> * avx2 tweak Signed-off-by: raver119 <raver119@gmail.com> * allow static for apple Signed-off-by: raver119@gmail.com <raver119@gmail.com> * exclude mkldnn in one more place Signed-off-by: raver119 <raver119@gmail.com> * exclude mkldnn in one more place Signed-off-by: raver119 <raver119@gmail.com> * restore OPENBLAS_PATH use Signed-off-by: raver119 <raver119@gmail.com> * add runtime check for avx/avx2 support Signed-off-by: raver119 <raver119@gmail.com> * convolution_auto Signed-off-by: raver119 <raver119@gmail.com> * Add logic for helper argument * minor test fix Signed-off-by: raver119 <raver119@gmail.com> * few tweaks Signed-off-by: raver119 <raver119@gmail.com> * few tweaks Signed-off-by: raver119 <raver119@gmail.com> * skip OpTracker props for non-x86 builds Signed-off-by: raver119 <raver119@gmail.com> * linux arm isn't x86 :) Signed-off-by: raver119 <raver119@gmail.com> * avx-512 Signed-off-by: raver119 <raver119@gmail.com> * CUDA presets fix Signed-off-by: raver119 <raver119@gmail.com> * BUILD_PIC Signed-off-by: raver119 <raver119@gmail.com> * prefetchw for avx2 Signed-off-by: raver119 <raver119@gmail.com> * BUILD_PIC again Signed-off-by: raver119 <raver119@gmail.com>
176 lines
5.8 KiB
Plaintext
176 lines
5.8 KiB
Plaintext
/*******************************************************************************
|
|
* Copyright (c) 2015-2018 Skymind, Inc.
|
|
*
|
|
* This program and the accompanying materials are made available under the
|
|
* terms of the Apache License, Version 2.0 which is available at
|
|
* https://www.apache.org/licenses/LICENSE-2.0.
|
|
*
|
|
* Unless required by applicable law or agreed to in writing, software
|
|
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
|
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
|
* License for the specific language governing permissions and limitations
|
|
* under the License.
|
|
*
|
|
* SPDX-License-Identifier: Apache-2.0
|
|
******************************************************************************/
|
|
|
|
//
|
|
// Created by raver119 on 30.11.17.
|
|
//
|
|
|
|
#include <execution/LaunchContext.h>
|
|
#include <logger.h>
|
|
#include <exceptions/cuda_exception.h>
|
|
#include <helpers/cublasHelper.h>
|
|
#include <thread>
|
|
#include <execution/AffinityManager.h>
|
|
|
|
thread_local nd4j::ContextBuffers contextBuffers = nd4j::ContextBuffers();
|
|
|
|
namespace nd4j {
|
|
|
|
std::vector<std::shared_ptr<LaunchContext>> LaunchContext::_contexts = std::vector<std::shared_ptr<LaunchContext>>();
|
|
std::mutex LaunchContext::_mutex;
|
|
|
|
////////////////////////////////////////////////////////////////////////
|
|
LaunchContext::LaunchContext(cudaStream_t *cudaStream, cudaStream_t& specialCudaStream, void* reductionPointer, void* scalarPointer, int* allocationPointer) {
|
|
|
|
//_cudaStream = cudaStream;
|
|
//_cudaSpecialStream = &specialCudaStream; // ideal is = new cudaStream_t; *_cudaSpecialStream = specialCudaStream;
|
|
//_reductionPointer = reductionPointer;
|
|
//_scalarPointer = scalarPointer;
|
|
//_allocationPointer = allocationPointer;
|
|
_workspace = nullptr;
|
|
_isAllocated = false;
|
|
}
|
|
|
|
LaunchContext::~LaunchContext() {
|
|
if (_isAllocated) {
|
|
|
|
}
|
|
}
|
|
|
|
////////////////////////////////////////////////////////////////////////
|
|
LaunchContext::LaunchContext() {
|
|
// default constructor, just to make clang/ranlib happy
|
|
_workspace = nullptr;
|
|
_deviceID = 0;
|
|
|
|
_isAllocated = true;
|
|
}
|
|
|
|
LaunchContext::LaunchContext(Nd4jPointer cudaStream, Nd4jPointer reductionPointer, Nd4jPointer scalarPointer, Nd4jPointer allocationPointer) {
|
|
_isAllocated = false;
|
|
//_cudaStream = reinterpret_cast<cudaStream_t*>(cudaStream);
|
|
// _cudaSpecialStream = reinterpret_cast<cudaStream_t*>(cudaStream);
|
|
//_reductionPointer = reductionPointer;
|
|
//_scalarPointer = scalarPointer;
|
|
//_allocationPointer = reinterpret_cast<int *>(allocationPointer);
|
|
}
|
|
|
|
LaunchContext* LaunchContext::defaultContext() {
|
|
/**
|
|
* This method returns LaunchContext, that has multiple entities within:
|
|
* 1) temporary buffers. they must be per-thread
|
|
* 2) CUDA stream. it must be either per-thread or per-device
|
|
* 3) cuBLAS handle. it must be per-device
|
|
*/
|
|
auto deviceId = AffinityManager::currentDeviceId();
|
|
|
|
// we need this block synchronous, to avoid double initialization etc
|
|
_mutex.lock();
|
|
if (LaunchContext::_contexts.empty()) {
|
|
// create one context per device
|
|
auto numDevices = AffinityManager::numberOfDevices();
|
|
|
|
_contexts.resize(numDevices);
|
|
for (int e = 0; e < numDevices; e++) {
|
|
AffinityManager::setCurrentNativeDevice(e);
|
|
|
|
LaunchContext::_contexts[e] = std::make_shared<LaunchContext>();
|
|
}
|
|
|
|
// don't forget to restore device back again
|
|
AffinityManager::setCurrentNativeDevice(deviceId);
|
|
}
|
|
_mutex.unlock();
|
|
|
|
// return context for current device
|
|
return LaunchContext::_contexts[deviceId].get();
|
|
}
|
|
|
|
|
|
void* LaunchContext::getReductionPointer () const {
|
|
return contextBuffers.reductionBuffer();
|
|
};
|
|
|
|
void* LaunchContext::getScalarPointer() const {
|
|
return contextBuffers.scalarBuffer();
|
|
};
|
|
|
|
int* LaunchContext::getAllocationPointer() const {
|
|
return reinterpret_cast<int*>(contextBuffers.allocationBuffer());
|
|
};
|
|
|
|
void* LaunchContext::getCublasHandle() const {
|
|
return CublasHelper::getInstance()->handle();
|
|
};
|
|
|
|
void* LaunchContext::getCusolverHandle() const {
|
|
return CublasHelper::getInstance()->solver();
|
|
};
|
|
|
|
cudaStream_t* LaunchContext::getCudaStream() const {
|
|
return reinterpret_cast<cudaStream_t*>(contextBuffers.execStream());
|
|
};
|
|
|
|
cudaStream_t* LaunchContext::getCudaSpecialStream() const {
|
|
return reinterpret_cast<cudaStream_t*>(contextBuffers.specialStream());;
|
|
};
|
|
|
|
|
|
void LaunchContext::setReductionPointer (void* reductionPointer) {
|
|
contextBuffers.setReductionBuffer(reductionPointer);
|
|
};
|
|
|
|
void LaunchContext::setScalarPointer(void* scalarPointer) {
|
|
contextBuffers.setScalarBuffer(scalarPointer);
|
|
};
|
|
|
|
void LaunchContext::setAllocationPointer(int* allocationPointer) {
|
|
contextBuffers.setAllocationBuffer(allocationPointer);
|
|
};
|
|
|
|
void LaunchContext::setCudaStream(cudaStream_t* cudaStream) {
|
|
//_cudaStream = cudaStream;
|
|
};
|
|
|
|
void LaunchContext::setCudaSpecialStream(cudaStream_t* cudaStream) {
|
|
//_cudaSpecialStream = cudaStream;
|
|
};
|
|
|
|
void LaunchContext::setCublasHandle(void *handle) {
|
|
_cublasHandle = handle;
|
|
};
|
|
|
|
void LaunchContext::swapContextBuffers(ContextBuffers &buffers) {
|
|
contextBuffers = buffers;
|
|
};
|
|
|
|
void LaunchContext::releaseBuffers() {
|
|
//nd4j_printf("LaunchContext::releaseBuffers() was invoked\n", "");
|
|
contextBuffers.release();
|
|
}
|
|
|
|
bool LaunchContext::isInitialized() {
|
|
return contextBuffers.isInitialized();
|
|
}
|
|
|
|
sd::ErrorReference* LaunchContext::errorReference() {
|
|
return contextBuffers.errorReference();
|
|
}
|
|
|
|
void* LaunchContext::engine() {
|
|
return _engine;
|
|
}
|
|
} |