Fix unsorted segment ops
parent
41498c9b69
commit
e88d0fe96c
|
@ -551,6 +551,31 @@ public class JavaSourceArgDescriptorSource implements ArgDescriptorSource {
|
|||
}
|
||||
}
|
||||
|
||||
|
||||
|
||||
if(name.contains("fill")) {
|
||||
argDescriptorProposals.add(ArgDescriptorProposal.builder()
|
||||
.sourceOfProposal("java")
|
||||
.proposalWeight(Double.MAX_VALUE)
|
||||
.descriptor(OpNamespace.ArgDescriptor.newBuilder()
|
||||
.setArgType(OpNamespace.ArgDescriptor.ArgType.INPUT_TENSOR)
|
||||
.setName("shape")
|
||||
.setIsArray(false)
|
||||
.setArgIndex(0)
|
||||
.build()).build());
|
||||
|
||||
argDescriptorProposals.add(ArgDescriptorProposal.builder()
|
||||
.sourceOfProposal("java")
|
||||
.proposalWeight(Double.MAX_VALUE)
|
||||
.descriptor(OpNamespace.ArgDescriptor.newBuilder()
|
||||
.setArgType(OpNamespace.ArgDescriptor.ArgType.INPUT_TENSOR)
|
||||
.setName("result")
|
||||
.setIsArray(false)
|
||||
.setArgIndex(1)
|
||||
.build()).build());
|
||||
|
||||
}
|
||||
|
||||
if(name.contains("loop_cond")) {
|
||||
argDescriptorProposals.add(ArgDescriptorProposal.builder()
|
||||
.sourceOfProposal("java")
|
||||
|
|
|
@ -855,6 +855,63 @@ public class Libnd4jArgDescriptorSource implements ArgDescriptorSource {
|
|||
.build()).build());
|
||||
}
|
||||
|
||||
if(name.contains("fill")) {
|
||||
argDescriptorProposals.add(ArgDescriptorProposal.builder()
|
||||
.sourceOfProposal("java")
|
||||
.proposalWeight(Double.MAX_VALUE)
|
||||
.descriptor(OpNamespace.ArgDescriptor.newBuilder()
|
||||
.setArgType(OpNamespace.ArgDescriptor.ArgType.INPUT_TENSOR)
|
||||
.setName("shape")
|
||||
.setIsArray(false)
|
||||
.setArgIndex(0)
|
||||
.build()).build());
|
||||
|
||||
argDescriptorProposals.add(ArgDescriptorProposal.builder()
|
||||
.sourceOfProposal("java")
|
||||
.proposalWeight(Double.MAX_VALUE)
|
||||
.descriptor(OpNamespace.ArgDescriptor.newBuilder()
|
||||
.setArgType(OpNamespace.ArgDescriptor.ArgType.INPUT_TENSOR)
|
||||
.setName("result")
|
||||
.setIsArray(false)
|
||||
.setArgIndex(1)
|
||||
.build()).build());
|
||||
|
||||
}
|
||||
|
||||
if(name.contains("unsorted_")) {
|
||||
argDescriptorProposals.add(ArgDescriptorProposal.builder()
|
||||
.sourceOfProposal("c++")
|
||||
.proposalWeight(Double.MAX_VALUE)
|
||||
.descriptor(OpNamespace.ArgDescriptor.newBuilder()
|
||||
.setArgType(OpNamespace.ArgDescriptor.ArgType.INPUT_TENSOR)
|
||||
.setName("input")
|
||||
.setIsArray(false)
|
||||
.setArgIndex(0)
|
||||
.build()).build());
|
||||
|
||||
argDescriptorProposals.add(ArgDescriptorProposal.builder()
|
||||
.sourceOfProposal("c++")
|
||||
.proposalWeight(Double.MAX_VALUE)
|
||||
.descriptor(OpNamespace.ArgDescriptor.newBuilder()
|
||||
.setArgType(OpNamespace.ArgDescriptor.ArgType.INPUT_TENSOR)
|
||||
.setName("idxSegments")
|
||||
.setIsArray(false)
|
||||
.setArgIndex(1)
|
||||
.build()).build());
|
||||
|
||||
argDescriptorProposals.add(ArgDescriptorProposal.builder()
|
||||
.sourceOfProposal("c++")
|
||||
.proposalWeight(Double.MAX_VALUE)
|
||||
.descriptor(OpNamespace.ArgDescriptor.newBuilder()
|
||||
.setArgType(OpNamespace.ArgDescriptor.ArgType.INPUT_TENSOR)
|
||||
.setName("numSegments")
|
||||
.setIsArray(false)
|
||||
.setArgIndex(2)
|
||||
.build()).build());
|
||||
|
||||
|
||||
}
|
||||
|
||||
|
||||
if(name.equals("lin_space")) {
|
||||
argDescriptorProposals.add(ArgDescriptorProposal.builder()
|
||||
|
|
File diff suppressed because it is too large
Load Diff
|
@ -33,7 +33,8 @@ namespace sd {
|
|||
_offset = offset;
|
||||
|
||||
if (_offset + length > _dataBuffer->getLenInBytes()) {
|
||||
throw std::runtime_error("offset + length is higher than original length");
|
||||
this->expand(length);
|
||||
nd4j_debug("Expanding data buffer length by %d\n",length);
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -28,42 +28,50 @@ namespace sd {
|
|||
CUSTOM_OP_IMPL(unsorted_segment_max, 2, 1, false, 0, 0) {
|
||||
auto input = INPUT_VARIABLE(0);
|
||||
auto idxSegments = INPUT_VARIABLE(1);
|
||||
auto reshapedSegments = *idxSegments;
|
||||
if(!idxSegments->isVector() && idxSegments->rankOf() > 1) {
|
||||
reshapedSegments = idxSegments->reshape('c',{idxSegments->lengthOf()},false);
|
||||
}
|
||||
|
||||
auto segmentedOutput = OUTPUT_NULLIFIED(0);
|
||||
Nd4jLong numOfClasses = block.width() == 3 ? INPUT_VARIABLE(2)->e<Nd4jLong>(0) : INT_ARG(0);
|
||||
REQUIRE_TRUE(idxSegments->isVector(), 0, "unsorted_segment_max: segment indexes array should be a vector, but it rank is %i.", idxSegments->rankOf());
|
||||
REQUIRE_TRUE(idxSegments->lengthOf() == input->sizeAt(0), 0, "unsorted_segment_max: segment indexes array length should be equal to the input first dimension, but %ld != %ild.", idxSegments->lengthOf(), input->sizeAt(0));
|
||||
|
||||
Nd4jLong wrong;
|
||||
|
||||
REQUIRE_TRUE(helpers::unsortedSegmentIndicesValidate(block.launchContext(), idxSegments, numOfClasses, wrong), 0, "unsorted_segment_max: segment indices should be in range [0, %ld), but %ld != %ld",
|
||||
numOfClasses, wrong, numOfClasses);
|
||||
|
||||
helpers::unsortedSegmentMaxFunctor(block.launchContext(), input, idxSegments, numOfClasses, segmentedOutput);
|
||||
REQUIRE_TRUE(reshapedSegments.isVector(), 0, "unsorted_segment_max: segment indexes array should be a vector, but it rank is %i.", idxSegments->rankOf());
|
||||
helpers::unsortedSegmentMaxFunctor(block.launchContext(), input, &reshapedSegments, numOfClasses, segmentedOutput);
|
||||
|
||||
return ND4J_STATUS_OK;
|
||||
}
|
||||
DECLARE_TYPES(unsorted_segment_max) {
|
||||
getOpDescriptor()
|
||||
->setAllowedOutputTypes({ALL_FLOATS, ALL_INTS})
|
||||
->setAllowedInputTypes(0, {ALL_FLOATS, ALL_INTS})
|
||||
->setAllowedInputTypes(1, {ALL_INTS})
|
||||
->setSameMode(true);
|
||||
->setAllowedOutputTypes({ALL_FLOATS, ALL_INTS})
|
||||
->setAllowedInputTypes(0, {ALL_FLOATS, ALL_INTS})
|
||||
->setAllowedInputTypes(1, {ALL_INTS})
|
||||
->setSameMode(true);
|
||||
}
|
||||
DECLARE_SHAPE_FN(unsorted_segment_max) {
|
||||
|
||||
|
||||
auto in = inputShape->at(0);
|
||||
int outRank = shape::rank(in);
|
||||
Nd4jLong* outputShape = nullptr;
|
||||
Nd4jLong numOfClasses = block.width() == 3 ? INPUT_VARIABLE(2)->e<Nd4jLong>(0) : INT_ARG(0);
|
||||
Nd4jLong* outputShape;
|
||||
|
||||
ALLOCATE(outputShape, block.getWorkspace(), shape::shapeInfoLength(outRank), Nd4jLong);
|
||||
if(INPUT_VARIABLE(0)->rankOf() >= 2) {
|
||||
ALLOCATE(outputShape, block.getWorkspace(), shape::shapeInfoLength(outRank), Nd4jLong);
|
||||
outputShape[0] = outRank;
|
||||
outputShape[1] = numOfClasses;
|
||||
for(int i = 1; i < outRank; i++)
|
||||
outputShape[i + 1] = shape::sizeAt(in, i);
|
||||
|
||||
outputShape[0] = outRank;
|
||||
outputShape[1] = numOfClasses;
|
||||
for(int i = 1; i < outRank; ++i)
|
||||
outputShape[i + 1] = shape::sizeAt(in, i);
|
||||
ShapeUtils::updateStridesAndType(outputShape, in, shape::order(in));
|
||||
|
||||
} else {
|
||||
ALLOCATE(outputShape, block.getWorkspace(), shape::shapeInfoLength(1), Nd4jLong);
|
||||
outputShape[0] = 1;
|
||||
outputShape[1] = numOfClasses;
|
||||
shape::printShapeInfo(outputShape);
|
||||
ShapeUtils::updateStridesAndType(outputShape, in, shape::order(in));
|
||||
}
|
||||
|
||||
ShapeUtils::updateStridesAndType(outputShape, in, shape::order(in));
|
||||
|
||||
return SHAPELIST(CONSTANT(outputShape));
|
||||
}
|
||||
|
@ -75,7 +83,7 @@ namespace sd {
|
|||
DECLARE_TYPES(unsorted_segment_max_bp) {
|
||||
getOpDescriptor()
|
||||
->setAllowedOutputTypes(0, {ALL_FLOATS})
|
||||
->setAllowedOutputTypes(1, {ALL_INTS})
|
||||
->setAllowedOutputTypes(1, {ALL_INTS})
|
||||
->setAllowedInputTypes(0, {ALL_FLOATS})
|
||||
->setAllowedInputTypes(1, {ALL_INTS})
|
||||
->setAllowedInputTypes(2, {ALL_FLOATS})
|
||||
|
|
|
@ -27,19 +27,21 @@ namespace sd {
|
|||
namespace ops {
|
||||
CUSTOM_OP_IMPL(unsorted_segment_mean, 2, 1, false, 0, 0) {
|
||||
auto input = INPUT_VARIABLE(0);
|
||||
auto reshapedInput = *input;
|
||||
/* if(!input->isVector()) {
|
||||
reshapedInput = input->reshape('c',{input->lengthOf()},false);
|
||||
}*/
|
||||
|
||||
auto idxSegments = INPUT_VARIABLE(1);
|
||||
auto reshapedSegments = *idxSegments;
|
||||
if(!idxSegments->isVector() && idxSegments->rankOf() > 1) {
|
||||
reshapedSegments = idxSegments->reshape('c',{idxSegments->lengthOf()},false);
|
||||
}
|
||||
|
||||
auto segmentedOutput = OUTPUT_NULLIFIED(0);
|
||||
Nd4jLong numOfClasses = block.width() == 3 ? INPUT_VARIABLE(2)->e<Nd4jLong>(0) : INT_ARG(0);
|
||||
|
||||
REQUIRE_TRUE(idxSegments->isVector(), 0, "unsorted_segment_mean: segment indexes array should be a vector, but it rank is %i.", idxSegments->rankOf());
|
||||
REQUIRE_TRUE(idxSegments->lengthOf() == input->sizeAt(0), 0, "unsorted_segment_mean: segment indexes array length should be equal to the input first dimension, but %ld != %ld.", idxSegments->lengthOf(), input->sizeAt(0));
|
||||
|
||||
Nd4jLong wrong;
|
||||
|
||||
REQUIRE_TRUE(helpers::unsortedSegmentIndicesValidate(block.launchContext(), idxSegments, numOfClasses, wrong), 0, "unsorted_segment_mean: segment indices should be in range [0, %ld), but %ld != %ld",
|
||||
numOfClasses, wrong, numOfClasses);
|
||||
|
||||
helpers::unsortedSegmentMeanFunctor(block.launchContext(), input, idxSegments, numOfClasses, segmentedOutput);
|
||||
REQUIRE_TRUE(reshapedSegments.isVector(), 0, "unsorted_segment_sum: segment indexes array should be a vector, but it rank is %i.", idxSegments->rankOf());
|
||||
helpers::unsortedSegmentMeanFunctor(block.launchContext(), &reshapedInput, &reshapedSegments, numOfClasses, segmentedOutput);
|
||||
|
||||
return ND4J_STATUS_OK;
|
||||
}
|
||||
|
@ -58,14 +60,23 @@ namespace sd {
|
|||
Nd4jLong* outputShape = nullptr;
|
||||
Nd4jLong numOfClasses = block.width() == 3 ? INPUT_VARIABLE(2)->e<Nd4jLong>(0) : INT_ARG(0);
|
||||
|
||||
ALLOCATE(outputShape, block.getWorkspace(), shape::shapeInfoLength(outRank), Nd4jLong);
|
||||
if(INPUT_VARIABLE(0)->rankOf() >= 2) {
|
||||
ALLOCATE(outputShape, block.getWorkspace(), shape::shapeInfoLength(outRank), Nd4jLong);
|
||||
outputShape[0] = outRank;
|
||||
outputShape[1] = numOfClasses;
|
||||
for(int i = 1; i < outRank; i++)
|
||||
outputShape[i + 1] = shape::sizeAt(in, i);
|
||||
|
||||
outputShape[0] = outRank;
|
||||
outputShape[1] = numOfClasses;
|
||||
for(int i = 1; i < outRank; ++i)
|
||||
outputShape[i + 1] = shape::sizeAt(in, i);
|
||||
ShapeUtils::updateStridesAndType(outputShape, in, shape::order(in));
|
||||
|
||||
} else {
|
||||
ALLOCATE(outputShape, block.getWorkspace(), shape::shapeInfoLength(1), Nd4jLong);
|
||||
outputShape[0] = 1;
|
||||
outputShape[1] = numOfClasses;
|
||||
shape::printShapeInfo(outputShape);
|
||||
ShapeUtils::updateStridesAndType(outputShape, in, shape::order(in));
|
||||
}
|
||||
|
||||
ShapeUtils::updateStridesAndType(outputShape, in, shape::order(in));
|
||||
|
||||
return SHAPELIST(CONSTANT(outputShape));
|
||||
}
|
||||
|
|
|
@ -27,37 +27,49 @@ namespace sd {
|
|||
namespace ops {
|
||||
CUSTOM_OP_IMPL(unsorted_segment_min, 2, 1, false, 0, 0) {
|
||||
auto input = INPUT_VARIABLE(0);
|
||||
auto reshapedInput = *input;
|
||||
|
||||
|
||||
auto idxSegments = INPUT_VARIABLE(1);
|
||||
auto reshapedSegments = *idxSegments;
|
||||
if(!idxSegments->isVector() && idxSegments->rankOf() > 1) {
|
||||
reshapedSegments = idxSegments->reshape('c',{idxSegments->lengthOf()},false);
|
||||
}
|
||||
|
||||
auto segmentedOutput = OUTPUT_NULLIFIED(0);
|
||||
Nd4jLong numOfClasses = block.width() == 3 ? INPUT_VARIABLE(2)->e<Nd4jLong>(0) : INT_ARG(0);
|
||||
REQUIRE_TRUE(idxSegments->isVector(), 0, "unsorted_segment_min: segment indexes array should be a vector, but it rank is %i.", idxSegments->rankOf());
|
||||
REQUIRE_TRUE(idxSegments->lengthOf() == input->sizeAt(0), 0, "unsorted_segment_min: segment indexes array length should be equal to the input first dimension, but %ld != %ld.", idxSegments->lengthOf(), input->sizeAt(0));
|
||||
|
||||
Nd4jLong wrong;
|
||||
|
||||
REQUIRE_TRUE(helpers::unsortedSegmentIndicesValidate(block.launchContext(), idxSegments, numOfClasses, wrong), 0, "unsorted_segment_min: segment indices should be in range [0, %ld), but %ld > %ld",
|
||||
numOfClasses, wrong, numOfClasses);
|
||||
|
||||
helpers::unsortedSegmentMinFunctor(block.launchContext(), input, idxSegments, numOfClasses, segmentedOutput);
|
||||
REQUIRE_TRUE(reshapedSegments.isVector(), 0, "unsorted_segment_sum: segment indexes array should be a vector, but it rank is %i.", idxSegments->rankOf());
|
||||
|
||||
helpers::unsortedSegmentMinFunctor(block.launchContext(), &reshapedInput, &reshapedSegments, numOfClasses, segmentedOutput);
|
||||
return ND4J_STATUS_OK;
|
||||
|
||||
}
|
||||
|
||||
DECLARE_SHAPE_FN(unsorted_segment_min) {
|
||||
|
||||
|
||||
auto in = inputShape->at(0);
|
||||
int outRank = shape::rank(in);
|
||||
Nd4jLong* outputShape = nullptr;
|
||||
Nd4jLong numOfClasses = block.width() == 3 ? INPUT_VARIABLE(2)->e<Nd4jLong>(0) : INT_ARG(0);
|
||||
|
||||
ALLOCATE(outputShape, block.getWorkspace(), shape::shapeInfoLength(outRank), Nd4jLong);
|
||||
if(INPUT_VARIABLE(0)->rankOf() >= 2) {
|
||||
ALLOCATE(outputShape, block.getWorkspace(), shape::shapeInfoLength(outRank), Nd4jLong);
|
||||
outputShape[0] = outRank;
|
||||
outputShape[1] = numOfClasses;
|
||||
for(int i = 1; i < outRank; i++)
|
||||
outputShape[i + 1] = shape::sizeAt(in, i);
|
||||
|
||||
outputShape[0] = outRank;
|
||||
outputShape[1] = numOfClasses;
|
||||
for(int i = 1; i < outRank; ++i)
|
||||
outputShape[i + 1] = shape::sizeAt(in, i);
|
||||
ShapeUtils::updateStridesAndType(outputShape, in, shape::order(in));
|
||||
|
||||
} else {
|
||||
ALLOCATE(outputShape, block.getWorkspace(), shape::shapeInfoLength(1), Nd4jLong);
|
||||
outputShape[0] = 1;
|
||||
outputShape[1] = numOfClasses;
|
||||
shape::printShapeInfo(outputShape);
|
||||
ShapeUtils::updateStridesAndType(outputShape, in, shape::order(in));
|
||||
}
|
||||
|
||||
ShapeUtils::updateStridesAndType(outputShape, in, shape::order(in));
|
||||
|
||||
return SHAPELIST(CONSTANT(outputShape));
|
||||
}
|
||||
|
@ -77,7 +89,7 @@ namespace sd {
|
|||
DECLARE_TYPES(unsorted_segment_min_bp) {
|
||||
getOpDescriptor()
|
||||
->setAllowedOutputTypes(0, {ALL_FLOATS, ALL_INTS})
|
||||
->setAllowedOutputTypes(1, {ALL_INTS})
|
||||
->setAllowedOutputTypes(1, {ALL_INTS})
|
||||
->setAllowedInputTypes(0, {ALL_FLOATS, ALL_INTS})
|
||||
->setAllowedInputTypes(1, {ALL_INTS})
|
||||
->setAllowedInputTypes(2, {ALL_FLOATS, ALL_INTS})
|
||||
|
|
|
@ -27,18 +27,21 @@ namespace sd {
|
|||
namespace ops {
|
||||
CUSTOM_OP_IMPL(unsorted_segment_prod, 2, 1, false, 0, 0) {
|
||||
auto input = INPUT_VARIABLE(0);
|
||||
auto reshapedInput = *input;
|
||||
/* if(!input->isVector()) {
|
||||
reshapedInput = input->reshape('c',{input->lengthOf()},false);
|
||||
}*/
|
||||
|
||||
auto idxSegments = INPUT_VARIABLE(1);
|
||||
auto reshapedSegments = *idxSegments;
|
||||
if(!idxSegments->isVector() && idxSegments->rankOf() > 1) {
|
||||
reshapedSegments = idxSegments->reshape('c',{idxSegments->lengthOf()},false);
|
||||
}
|
||||
|
||||
auto segmentedOutput = OUTPUT_NULLIFIED(0);
|
||||
Nd4jLong numOfClasses = block.width() == 3 ? INPUT_VARIABLE(2)->e<Nd4jLong>(0) : INT_ARG(0);
|
||||
REQUIRE_TRUE(idxSegments->isVector(), 0, "unsorted_segment_prod: segment indexes array should be a vector, but it rank is %i.", idxSegments->rankOf());
|
||||
REQUIRE_TRUE(idxSegments->lengthOf() == input->sizeAt(0), 0, "unsorted_segment_prod: segment indexes array length should be equal to the input first dimension, but %ld != %ld.", idxSegments->lengthOf(), input->sizeAt(0));
|
||||
|
||||
Nd4jLong wrong = 0;
|
||||
|
||||
REQUIRE_TRUE(helpers::unsortedSegmentIndicesValidate(block.launchContext(), idxSegments, numOfClasses, wrong), 0, "unsorted_segment_prod: segment indices should be in range [0, %ld), but %ld != %ld",
|
||||
numOfClasses, wrong, numOfClasses);
|
||||
|
||||
helpers::unsortedSegmentProdFunctor(block.launchContext(), input, idxSegments, numOfClasses, segmentedOutput);
|
||||
REQUIRE_TRUE(reshapedSegments.isVector(), 0, "unsorted_segment_sum: segment indexes array should be a vector, but it rank is %i.", idxSegments->rankOf());
|
||||
helpers::unsortedSegmentProdFunctor(block.launchContext(), &reshapedInput, &reshapedSegments, numOfClasses, segmentedOutput);
|
||||
|
||||
return ND4J_STATUS_OK;
|
||||
}
|
||||
|
@ -50,14 +53,23 @@ namespace sd {
|
|||
Nd4jLong* outputShape = nullptr;
|
||||
Nd4jLong numOfClasses = block.width() == 3 ? INPUT_VARIABLE(2)->e<Nd4jLong>(0) : INT_ARG(0);
|
||||
|
||||
ALLOCATE(outputShape, block.getWorkspace(), shape::shapeInfoLength(outRank), Nd4jLong);
|
||||
if(INPUT_VARIABLE(0)->rankOf() >= 2) {
|
||||
ALLOCATE(outputShape, block.getWorkspace(), shape::shapeInfoLength(outRank), Nd4jLong);
|
||||
outputShape[0] = outRank;
|
||||
outputShape[1] = numOfClasses;
|
||||
for(int i = 1; i < outRank; i++)
|
||||
outputShape[i + 1] = shape::sizeAt(in, i);
|
||||
|
||||
outputShape[0] = outRank;
|
||||
outputShape[1] = numOfClasses;
|
||||
for(int i = 1; i < outRank; ++i)
|
||||
outputShape[i + 1] = shape::sizeAt(in, i);
|
||||
ShapeUtils::updateStridesAndType(outputShape, in, shape::order(in));
|
||||
|
||||
} else {
|
||||
ALLOCATE(outputShape, block.getWorkspace(), shape::shapeInfoLength(1), Nd4jLong);
|
||||
outputShape[0] = 1;
|
||||
outputShape[1] = numOfClasses;
|
||||
shape::printShapeInfo(outputShape);
|
||||
ShapeUtils::updateStridesAndType(outputShape, in, shape::order(in));
|
||||
}
|
||||
|
||||
ShapeUtils::updateStridesAndType(outputShape, in, shape::order(in));
|
||||
|
||||
return SHAPELIST(CONSTANT(outputShape));
|
||||
}
|
||||
|
@ -90,7 +102,7 @@ namespace sd {
|
|||
DECLARE_TYPES(unsorted_segment_prod_bp) {
|
||||
getOpDescriptor()
|
||||
->setAllowedOutputTypes(0, {ALL_FLOATS})
|
||||
->setAllowedOutputTypes(1, {ALL_INDICES})
|
||||
->setAllowedOutputTypes(1, {ALL_INDICES})
|
||||
->setAllowedInputTypes(0, {ALL_FLOATS})
|
||||
->setAllowedInputTypes(1, {ALL_INDICES})
|
||||
->setAllowedInputTypes(2,{ALL_FLOATS, ALL_INTS})
|
||||
|
|
|
@ -27,18 +27,18 @@ namespace sd {
|
|||
namespace ops {
|
||||
CUSTOM_OP_IMPL(unsorted_segment_sqrt_n, 2, 1, false, 0, 0) {
|
||||
auto input = INPUT_VARIABLE(0);
|
||||
auto reshapedInput = *input;
|
||||
|
||||
auto idxSegments = INPUT_VARIABLE(1);
|
||||
auto reshapedSegments = *idxSegments;
|
||||
if(!idxSegments->isVector() && idxSegments->rankOf() > 1) {
|
||||
reshapedSegments = idxSegments->reshape('c',{idxSegments->lengthOf()},false);
|
||||
}
|
||||
|
||||
auto segmentedOutput = OUTPUT_NULLIFIED(0);
|
||||
Nd4jLong numOfClasses = block.width() == 3 ? INPUT_VARIABLE(2)->e<Nd4jLong>(0) : INT_ARG(0);
|
||||
REQUIRE_TRUE(idxSegments->isVector(), 0, "unsorted_segment_sqrt_n: segment indexes array should be a vector, but it rank is %i.", idxSegments->rankOf());
|
||||
REQUIRE_TRUE(idxSegments->lengthOf() == input->sizeAt(0), 0, "unsorted_segment_sqrt_n: segment indexes array length should be equal to the input first dimension, but %ld != %ld.", idxSegments->lengthOf(), input->sizeAt(0));
|
||||
|
||||
Nd4jLong wrong;
|
||||
|
||||
REQUIRE_TRUE(helpers::unsortedSegmentIndicesValidate(block.launchContext(), idxSegments, numOfClasses, wrong), 0, "unsorted_segment_sqrt_n: segment indices should be in range [0, %ld), but %ld != %ld",
|
||||
numOfClasses, wrong, numOfClasses);
|
||||
|
||||
helpers::unsortedSegmentSqrtNFunctor(block.launchContext(), input, idxSegments, numOfClasses, segmentedOutput);
|
||||
REQUIRE_TRUE(reshapedSegments.isVector(), 0, "unsorted_segment_sum: segment indexes array should be a vector, but it rank is %i.", idxSegments->rankOf());
|
||||
helpers::unsortedSegmentSqrtNFunctor(block.launchContext(), &reshapedInput, &reshapedSegments, numOfClasses, segmentedOutput);
|
||||
|
||||
return ND4J_STATUS_OK;
|
||||
}
|
||||
|
@ -50,14 +50,23 @@ namespace sd {
|
|||
Nd4jLong* outputShape = nullptr;
|
||||
Nd4jLong numOfClasses = block.width() == 3 ? INPUT_VARIABLE(2)->e<Nd4jLong>(0) : INT_ARG(0);
|
||||
|
||||
ALLOCATE(outputShape, block.getWorkspace(), shape::shapeInfoLength(outRank), Nd4jLong);
|
||||
if(INPUT_VARIABLE(0)->rankOf() >= 2) {
|
||||
ALLOCATE(outputShape, block.getWorkspace(), shape::shapeInfoLength(outRank), Nd4jLong);
|
||||
outputShape[0] = outRank;
|
||||
outputShape[1] = numOfClasses;
|
||||
for(int i = 1; i < outRank; i++)
|
||||
outputShape[i + 1] = shape::sizeAt(in, i);
|
||||
|
||||
outputShape[0] = outRank;
|
||||
outputShape[1] = numOfClasses;
|
||||
for(int i = 1; i < outRank; ++i)
|
||||
outputShape[i + 1] = shape::sizeAt(in, i);
|
||||
ShapeUtils::updateStridesAndType(outputShape, in, shape::order(in));
|
||||
|
||||
} else {
|
||||
ALLOCATE(outputShape, block.getWorkspace(), shape::shapeInfoLength(1), Nd4jLong);
|
||||
outputShape[0] = 1;
|
||||
outputShape[1] = numOfClasses;
|
||||
shape::printShapeInfo(outputShape);
|
||||
ShapeUtils::updateStridesAndType(outputShape, in, shape::order(in));
|
||||
}
|
||||
|
||||
ShapeUtils::updateStridesAndType(outputShape, in, shape::order(in));
|
||||
|
||||
return SHAPELIST(CONSTANT(outputShape));
|
||||
}
|
||||
|
@ -75,7 +84,7 @@ namespace sd {
|
|||
DECLARE_TYPES(unsorted_segment_sqrt_n_bp) {
|
||||
getOpDescriptor()
|
||||
->setAllowedOutputTypes(0, {ALL_FLOATS})
|
||||
->setAllowedOutputTypes(1, {ALL_INTS})
|
||||
->setAllowedOutputTypes(1, {ALL_INTS})
|
||||
->setAllowedInputTypes(0, {ALL_FLOATS})
|
||||
->setAllowedInputTypes(1, {ALL_INTS})
|
||||
->setAllowedInputTypes(2, {ALL_FLOATS})
|
||||
|
|
|
@ -27,18 +27,19 @@ namespace sd {
|
|||
namespace ops {
|
||||
CUSTOM_OP_IMPL(unsorted_segment_sum, 2, 1, false, 0, 0) {
|
||||
auto input = INPUT_VARIABLE(0);
|
||||
auto reshapedInput = *input;
|
||||
|
||||
|
||||
auto idxSegments = INPUT_VARIABLE(1);
|
||||
auto reshapedSegments = *idxSegments;
|
||||
if(!idxSegments->isVector() || idxSegments->rankOf() > 1) {
|
||||
reshapedSegments = idxSegments->reshape('c',{idxSegments->lengthOf()},false);
|
||||
}
|
||||
|
||||
auto segmentedOutput = OUTPUT_NULLIFIED(0);
|
||||
Nd4jLong numOfClasses = block.width() == 3 ? INPUT_VARIABLE(2)->e<Nd4jLong>(0) : INT_ARG(0);
|
||||
REQUIRE_TRUE(idxSegments->isVector(), 0, "unsorted_segment_sum: segment indexes array should be a vector, but it rank is %i.", idxSegments->rankOf());
|
||||
REQUIRE_TRUE(idxSegments->lengthOf() == input->sizeAt(0), 0, "unsorted_segment_sum: segment indexes array length should be equal to the input first dimension, but %ld != %ld", idxSegments->lengthOf(), input->sizeAt(0));
|
||||
|
||||
Nd4jLong wrong;
|
||||
|
||||
REQUIRE_TRUE(helpers::unsortedSegmentIndicesValidate(block.launchContext(), idxSegments, numOfClasses, wrong), 0, "unsorted_segment_sum: segment indices should be in range [0, %ld), but %ld > %ld",
|
||||
numOfClasses, wrong, numOfClasses);
|
||||
|
||||
helpers::unsortedSegmentSumFunctor(block.launchContext(), input, idxSegments, numOfClasses, segmentedOutput);
|
||||
REQUIRE_TRUE(reshapedSegments.isVector(), 0, "unsorted_segment_sum: segment indexes array should be a vector, but it rank is %i.", idxSegments->rankOf());
|
||||
helpers::unsortedSegmentSumFunctor(block.launchContext(), &reshapedInput, &reshapedSegments, numOfClasses, segmentedOutput);
|
||||
|
||||
return ND4J_STATUS_OK;
|
||||
}
|
||||
|
@ -57,14 +58,23 @@ namespace sd {
|
|||
Nd4jLong* outputShape = nullptr;
|
||||
Nd4jLong numOfClasses = block.width() == 3 ? INPUT_VARIABLE(2)->e<Nd4jLong>(0) : INT_ARG(0);
|
||||
|
||||
ALLOCATE(outputShape, block.getWorkspace(), shape::shapeInfoLength(outRank), Nd4jLong);
|
||||
if(INPUT_VARIABLE(0)->rankOf() >= 2) {
|
||||
ALLOCATE(outputShape, block.getWorkspace(), shape::shapeInfoLength(outRank), Nd4jLong);
|
||||
outputShape[0] = outRank;
|
||||
outputShape[1] = numOfClasses;
|
||||
for(int i = 1; i < outRank; i++)
|
||||
outputShape[i + 1] = shape::sizeAt(in, i);
|
||||
|
||||
outputShape[0] = outRank;
|
||||
outputShape[1] = numOfClasses;
|
||||
for(int i = 1; i < outRank; ++i)
|
||||
outputShape[i + 1] = shape::sizeAt(in, i);
|
||||
ShapeUtils::updateStridesAndType(outputShape, in, shape::order(in));
|
||||
|
||||
} else {
|
||||
ALLOCATE(outputShape, block.getWorkspace(), shape::shapeInfoLength(1), Nd4jLong);
|
||||
outputShape[0] = 1;
|
||||
outputShape[1] = numOfClasses;
|
||||
shape::printShapeInfo(outputShape);
|
||||
ShapeUtils::updateStridesAndType(outputShape, in, shape::order(in));
|
||||
}
|
||||
|
||||
ShapeUtils::updateStridesAndType(outputShape, in, shape::order(in));
|
||||
|
||||
return SHAPELIST(CONSTANT(outputShape));
|
||||
}
|
||||
|
@ -86,7 +96,7 @@ namespace sd {
|
|||
DECLARE_TYPES(unsorted_segment_sum_bp) {
|
||||
getOpDescriptor()
|
||||
->setAllowedOutputTypes(0, {ALL_FLOATS})
|
||||
->setAllowedOutputTypes(1, {ALL_INTS})
|
||||
->setAllowedOutputTypes(1, {ALL_INTS})
|
||||
->setAllowedInputTypes(sd::DataType::ANY)
|
||||
->setSameMode(false);
|
||||
}
|
||||
|
|
|
@ -36,17 +36,24 @@ namespace sd {
|
|||
* uniform distribution
|
||||
* takes 1 ndarray
|
||||
*
|
||||
* T argumens map:
|
||||
* T arguments map:
|
||||
* TArgs[0] - min for rng
|
||||
* TArgs[1] - max for rng
|
||||
*/
|
||||
CUSTOM_OP_IMPL(randomuniform, 1, 1, true, 0, 0) {
|
||||
CUSTOM_OP_IMPL(randomuniform, -1, 1, true, 0, -1) {
|
||||
// uniform distribution
|
||||
auto rng = block.randomGenerator();
|
||||
auto dtype = DataType::FLOAT32;
|
||||
if (block.getIArguments()->size())
|
||||
dtype = (DataType)INT_ARG(0);
|
||||
|
||||
if(block.getIArguments()->size() > 1) {
|
||||
auto seed = INT_ARG(1);
|
||||
rng.setStates(seed,seed ^ 0xdeadbeef);
|
||||
nd4j_debug("randomuniform: Setting seed %d\n",seed);
|
||||
//rng.setSeed(seed);
|
||||
}
|
||||
|
||||
auto min = block.width() > 1 ? INPUT_VARIABLE(1) : (NDArray*) nullptr;
|
||||
auto max = block.width() > 2 ? INPUT_VARIABLE(2) : (NDArray*) nullptr;
|
||||
bool disposable = false;
|
||||
|
|
|
@ -50,7 +50,7 @@ namespace sd {
|
|||
* 0 - uniformly distributed values of given type (between min and max)
|
||||
*/
|
||||
#if NOT_EXCLUDED(OP_randomuniform)
|
||||
DECLARE_CUSTOM_OP(randomuniform, 1, 1, false, 0, 0);
|
||||
DECLARE_CUSTOM_OP(randomuniform, 1, 1, false, 0, -1);
|
||||
#endif
|
||||
/*
|
||||
* multinomial (categorical) random generator draws samples from a multinomial distribution
|
||||
|
|
File diff suppressed because it is too large
Load Diff
|
@ -672,6 +672,7 @@ public class InferenceSession extends AbstractSession<INDArray, Pair<SameDiffOp,
|
|||
if (tArr == null && allIterInputs != null) {
|
||||
tArr = lookup(inTensorArray.name(), allIterInputs, false);
|
||||
}
|
||||
|
||||
List<INDArray> l = tensorArrays.get(tArr);
|
||||
Preconditions.checkState(l != null, "Could not find TensorArray: %s", tArr);
|
||||
|
||||
|
@ -703,6 +704,14 @@ public class InferenceSession extends AbstractSession<INDArray, Pair<SameDiffOp,
|
|||
if (valuesArr.rank() == 1 && get.rank() > 0) {
|
||||
get = get.reshape();
|
||||
}
|
||||
|
||||
//reflect the expanded storage
|
||||
if(outIdx >= l.size()) {
|
||||
while(l.size() < outIdx) {
|
||||
l.add(null);
|
||||
}
|
||||
}
|
||||
|
||||
l.set(outIdx, get);
|
||||
|
||||
//Add dependency for values array until end of execution
|
||||
|
|
|
@ -146,6 +146,10 @@ public class Concat extends DynamicCustomOp {
|
|||
|
||||
@Override
|
||||
public List<DataType> calculateOutputDataTypes(List<DataType> dataTypes){
|
||||
if(!dArguments.isEmpty()) {
|
||||
return Collections.singletonList(dArguments.get(0));
|
||||
}
|
||||
|
||||
DataType first = dataTypes.get(0);
|
||||
|
||||
for( int i = 1; i < dataTypes.size() - (isDynamicAxis ? 1 : 0); i++) {
|
||||
|
|
|
@ -89,7 +89,7 @@ public class BinCount extends DynamicCustomOp {
|
|||
inputTypes, getClass());
|
||||
|
||||
//If weights present, same type as weights. Otherwise specified dtype
|
||||
if(inputTypes.size() == 2 || inputTypes.size() == 4) {
|
||||
if(inputTypes.size() >= 2) {
|
||||
//weights available case or TF import case (args 2/3 are min/max)
|
||||
return Collections.singletonList(inputTypes.get(1));
|
||||
} else {
|
||||
|
|
|
@ -138,7 +138,10 @@ public class Fill extends DynamicCustomOp {
|
|||
}
|
||||
|
||||
@Override
|
||||
public List<DataType> calculateOutputDataTypes(List<DataType> dataTypes){
|
||||
public List<DataType> calculateOutputDataTypes(List<DataType> dataTypes) {
|
||||
if(!dArguments.isEmpty()) {
|
||||
return Collections.singletonList(dArguments.get(0));
|
||||
}
|
||||
//1 or 2 possible: 2 for TF import (fill with specified value
|
||||
Preconditions.checkState(dataTypes != null && (dataTypes.size() == 1 || dataTypes.size() == 2),
|
||||
"Expected 1 or 2 input datatypes for %s, got %s", getClass(), dataTypes);
|
||||
|
|
|
@ -27,6 +27,7 @@ import org.nd4j.linalg.api.buffer.DataType;
|
|||
import org.nd4j.linalg.api.ndarray.INDArray;
|
||||
import org.nd4j.linalg.api.ops.impl.transforms.BaseDynamicTransformOp;
|
||||
|
||||
import java.util.Arrays;
|
||||
import java.util.Collections;
|
||||
import java.util.List;
|
||||
|
||||
|
@ -73,8 +74,10 @@ public class Identity extends BaseDynamicTransformOp {
|
|||
}
|
||||
|
||||
@Override
|
||||
public List<DataType> calculateOutputDataTypes(List<DataType> dataTypes){
|
||||
public List<DataType> calculateOutputDataTypes(List<DataType> dataTypes) {
|
||||
Preconditions.checkState(dataTypes != null && dataTypes.size() == 1, "Expected exactly 1 input datatype for %s, got input %s", getClass(), dataTypes);
|
||||
if(!dArguments.isEmpty())
|
||||
return Arrays.asList(dArguments.get(0));
|
||||
return dataTypes;
|
||||
}
|
||||
|
||||
|
|
|
@ -65,6 +65,9 @@ public class UnsortedSegmentMax extends DynamicCustomOp {
|
|||
|
||||
@Override
|
||||
public List<DataType> calculateOutputDataTypes(List<DataType> inputDataTypes){
|
||||
if(!dArguments.isEmpty()) {
|
||||
return Collections.singletonList(dArguments.get(0));
|
||||
}
|
||||
Preconditions.checkState(inputDataTypes != null && (inputDataTypes.size() == 2 || inputDataTypes.size() == 3),
|
||||
"Expected exactly 2 input data types for %s, got %s", getClass(), inputDataTypes);
|
||||
return Collections.singletonList(inputDataTypes.get(0));
|
||||
|
|
|
@ -62,6 +62,9 @@ public class UnsortedSegmentMean extends DynamicCustomOp {
|
|||
|
||||
@Override
|
||||
public List<DataType> calculateOutputDataTypes(List<DataType> inputDataTypes){
|
||||
if(!dArguments.isEmpty()) {
|
||||
return Collections.singletonList(dArguments.get(0));
|
||||
}
|
||||
Preconditions.checkState(inputDataTypes != null && (inputDataTypes.size() == 2 || inputDataTypes.size() == 3),
|
||||
"Expected exactly 2 input data types for %s, got %s", getClass(), inputDataTypes);
|
||||
return Collections.singletonList(inputDataTypes.get(0));
|
||||
|
|
|
@ -66,6 +66,9 @@ public class UnsortedSegmentMin extends DynamicCustomOp {
|
|||
|
||||
@Override
|
||||
public List<DataType> calculateOutputDataTypes(List<DataType> inputDataTypes){
|
||||
if(!dArguments.isEmpty()) {
|
||||
return Collections.singletonList(dArguments.get(0));
|
||||
}
|
||||
Preconditions.checkState(inputDataTypes != null && (inputDataTypes.size() == 2 || inputDataTypes.size() == 3),
|
||||
"Expected exactly 2 input data types for %s, got %s", getClass(), inputDataTypes);
|
||||
return Collections.singletonList(inputDataTypes.get(0));
|
||||
|
|
|
@ -66,8 +66,11 @@ public class UnsortedSegmentProd extends DynamicCustomOp {
|
|||
|
||||
@Override
|
||||
public List<DataType> calculateOutputDataTypes(List<DataType> inputDataTypes){
|
||||
if(!dArguments.isEmpty()) {
|
||||
return Collections.singletonList(dArguments.get(0));
|
||||
}
|
||||
Preconditions.checkState(inputDataTypes != null && (inputDataTypes.size() == 2 || inputDataTypes.size() == 3),
|
||||
"Expected exactly 2 input data types for %s, got %s", getClass(), inputDataTypes);
|
||||
"Expected exactly at least 2 input data types for %s, got %s", getClass(), inputDataTypes);
|
||||
return Collections.singletonList(inputDataTypes.get(0));
|
||||
}
|
||||
}
|
||||
|
|
|
@ -30,6 +30,7 @@ import org.nd4j.linalg.api.ops.DynamicCustomOp;
|
|||
import org.nd4j.linalg.api.ops.impl.transforms.segment.bp.UnsortedSegmentSqrtNBp;
|
||||
|
||||
import java.util.ArrayList;
|
||||
import java.util.Collections;
|
||||
import java.util.List;
|
||||
|
||||
@NoArgsConstructor
|
||||
|
@ -61,10 +62,14 @@ public class UnsortedSegmentSqrtN extends DynamicCustomOp {
|
|||
|
||||
@Override
|
||||
public List<DataType> calculateOutputDataTypes(List<DataType> inputDataTypes){
|
||||
if(!dArguments.isEmpty()) {
|
||||
return Collections.singletonList(dArguments.get(0));
|
||||
}
|
||||
|
||||
Preconditions.checkState(inputDataTypes != null && (inputDataTypes.size() == 2 || inputDataTypes.size() == 3),
|
||||
"Expected exactly 2 input data types for %s, got %s", getClass(), inputDataTypes);
|
||||
List<DataType> out = new ArrayList<>();
|
||||
for( int i=0; i<numSegments; i++ ){
|
||||
for( int i = 0; i < numSegments; i++) {
|
||||
out.add(inputDataTypes.get(0));
|
||||
}
|
||||
return out;
|
||||
|
|
|
@ -66,7 +66,10 @@ public class UnsortedSegmentSum extends DynamicCustomOp {
|
|||
}
|
||||
|
||||
@Override
|
||||
public List<DataType> calculateOutputDataTypes(List<DataType> inputDataTypes){
|
||||
public List<DataType> calculateOutputDataTypes(List<DataType> inputDataTypes) {
|
||||
if(!dArguments.isEmpty()) {
|
||||
return Collections.singletonList(dArguments.get(0));
|
||||
}
|
||||
Preconditions.checkState(inputDataTypes != null && (inputDataTypes.size() == 2 || inputDataTypes.size() == 3),
|
||||
"Expected exactly 2 input data types for %s, got %s", getClass(), inputDataTypes);
|
||||
//TODO Allow customizing output type
|
||||
|
|
|
@ -87,7 +87,8 @@ public class RandomFactory {
|
|||
}
|
||||
|
||||
/**
|
||||
* This method returns new onject implementing Random interface, initialized with seed value, with size of elements in buffer
|
||||
* This method returns a new object implementing {@link Random}
|
||||
* interface, initialized with seed value, with size of elements in buffer
|
||||
*
|
||||
* @param seed rng seed
|
||||
* @param size size of underlying buffer
|
||||
|
|
|
@ -69,28 +69,40 @@ public class TFGraphTestAllSameDiff { //Note: Can't extend BaseNd4jTest here a
|
|||
* the status of the test failing. No tests will run.
|
||||
*/
|
||||
public final static List<String> EXECUTE_ONLY_MODELS = Arrays.asList(
|
||||
"conv2d_transpose/channels_last_b1_k2_s2_SAME",
|
||||
"conv2d_transpose/channels_last_b1_k2_s1_SAME",
|
||||
"bincount/rank1",
|
||||
"bincount/rank1_weights",
|
||||
"bincount/rank1_max5",
|
||||
"emptyArrayTests/zeros/ones_rank3",
|
||||
"conv2d_transpose/channels_last_b2_k2_s1_SAME_nobias",
|
||||
"emptyArrayTests/identity_n/rank3.",
|
||||
"emptyReduceAxisTests/reduce_sum/rank1",
|
||||
"emptyReduceAxisTests/reduce_sum/rank1_keep",
|
||||
"emptyReduceAxisTests/reduce_sum/rank3",
|
||||
"emptyReduceAxisTests/reduce_any/rank2",
|
||||
"embedding_lookup/rank2_multiple_div_nomaxnorm",
|
||||
"emptyReduceAxisTests/reduce_all/rank2_keep",
|
||||
"conv2d_transpose/channels_first_b1_k2_s1_SAME_sigmoid",
|
||||
"conv2d_transpose/channels_first_b1_k2_s1_SAME_elu",
|
||||
"emptyReduceAxisTests/reduce_prod/rank1",
|
||||
"conv2d_transpose/channels_first_b2_k2_s1_SAME_nobias",
|
||||
"conv2d_transpose/channels_last_b2_k2_s1_SAME_regularizers",
|
||||
"conv2d_transpose/channels_last_b1_k2_s1_SAME_elu",
|
||||
"conv2d_transpose/channels_first_b1_k2_s1_SAME_selu_nobias",
|
||||
"embedding_lookup/rank2_multiple_mod_maxnorm1"
|
||||
/*"layers_dropout/rank2_d01_train",
|
||||
"layers_dropout/rank4_d05_train",
|
||||
"layers_dropout/rank3_d05_train_mask2",
|
||||
"layers_dropout/rank4_d05_train_mask",
|
||||
"layers_dropout/rank3_d05_train_mask1",
|
||||
"layers_dropout/rank2_d09_train",
|
||||
"layers_dropout/rank2_d05_train",*/
|
||||
/* "primitive_gru_dynamic",
|
||||
"layers_dropout/rank4_d05_train",
|
||||
"fused_batch_norm/float16_nhwc",
|
||||
"rnn/lstmblockcell/dynamic_b1_n5-3_ts4_noPH_noClip_fB1_noIS_withTM",
|
||||
"rnn/lstmcell/dynamic_b1_nIn5_nOut3_ts4_noPH_noClip_fB1_Tanh_noIS_float_withTM",
|
||||
"rnn/grublockcellv2/dynamic_b1_n3-2_ts1_noIS_noTM"*/
|
||||
/* "unsorted_segment/unsorted_segment_mean_rank3",
|
||||
"unsorted_segment/unsorted_segment_sqrt_n_rank2",
|
||||
"unsorted_segment/unsorted_segment_mean_rank2",
|
||||
"unsorted_segment/unsorted_segment_mean_rank3",
|
||||
"unsorted_segment/unsorted_segment_sum_rank3",
|
||||
"unsorted_segment/unsorted_segment_min_rank2",
|
||||
"unsorted_segment/unsorted_segment_prod_rank2",
|
||||
"unsorted_segment/unsorted_segment_max_rank2",*/
|
||||
"bincount/rank0_weights",
|
||||
"bincount/rank2_weights"
|
||||
/* "compare_and_bitpack/bool",
|
||||
"compare_and_bitpack/float32",
|
||||
"compare_and_bitpack/float64",
|
||||
"compare_and_bitpack/half",
|
||||
"compare_and_bitpack/int32",
|
||||
"compare_and_bitpack/int8",
|
||||
"compare_and_bitpack/int64",
|
||||
"compare_and_bitpack/int16"*/
|
||||
|
||||
|
||||
|
||||
);
|
||||
|
||||
public static final String[] IGNORE_REGEXES = new String[]{
|
||||
|
@ -98,7 +110,12 @@ public class TFGraphTestAllSameDiff { //Note: Can't extend BaseNd4jTest here a
|
|||
// Still failing 2020/04/27 java.lang.IllegalStateException: Requested output variable Bincount does not exist in SameDiff instance
|
||||
//Invalid test cases. Verified by running graph against actual TF.
|
||||
"slogdet/.*",
|
||||
|
||||
//IGNORE THIS: the TF results from comparing against an actual TF java run compared to this seem to be different.
|
||||
"fused_batch_norm/float16_nhwc",
|
||||
//Don't bother to test RNG. We can test subsets of ops with dropout to make sure they are consistent
|
||||
//These tests have random uniform and other RNG in them that don't need to be perfectly compatible to be acceptable.
|
||||
//We need different test cases here.
|
||||
"layers_dropout/.*",
|
||||
//TODO floormod and truncatemod behave differently - i.e., "c" vs. "python" semantics. Need to check implementations too
|
||||
// Still failing 2020/04/27 java.lang.IllegalStateException: Could not find class for TF Ops: TruncateMod
|
||||
"truncatemod/.*",
|
||||
|
@ -109,15 +126,11 @@ public class TFGraphTestAllSameDiff { //Note: Can't extend BaseNd4jTest here a
|
|||
|
||||
//2019/09/11 - Couple of tests failing (InferenceSession issues)
|
||||
// Still failing 2020/04/27 Requested output variable concat does not exist in SameDiff instance
|
||||
"rnn/bstack/d_.*",
|
||||
|
||||
//2019/05/21 - Failing on AVX2/512 intermittently (Linux, OSX), passing elsewhere
|
||||
//"unsorted_segment/.*",
|
||||
|
||||
//2019/05/21 - Failing on windows-x86_64-cuda-9.2 only -
|
||||
"conv_4",
|
||||
"g_09",
|
||||
//"unsorted_segment/unsorted_segment_mean_rank2",
|
||||
|
||||
//2019/05/28 - JVM crash on ppc64le only - See issue 7657
|
||||
"g_11",
|
||||
|
@ -130,13 +143,10 @@ public class TFGraphTestAllSameDiff { //Note: Can't extend BaseNd4jTest here a
|
|||
// Still failing 2020/04/27 java.lang.IllegalStateException: Could not find descriptor for op: deconv3d_tf - class: org.nd4j.linalg.api.ops.impl.layers.convolution.DeConv3DTF
|
||||
"conv3d_transpose.*",
|
||||
|
||||
//2019/11/15 - mapping is not present yet https://github.com/eclipse/deeplearning4j/issues/8397
|
||||
//2019/11/15 - mapping is not present yet https://github.com/eclipse/deepleRaggedRange arning4j/issues/8397
|
||||
// Still failing 2020/04/27 java.lang.AssertionError: Predictions do not match on ragged/reduce_mean/2d_a1, node RaggedReduceMean/truediv
|
||||
"ragged/reduce_mean/.*",
|
||||
|
||||
// 01.05.2020 - https://github.com/eclipse/deeplearning4j/issues/8898
|
||||
"primitive_gru",
|
||||
|
||||
|
||||
//08.05.2020 - https://github.com/eclipse/deeplearning4j/issues/8927
|
||||
"random_gamma/.*",
|
||||
|
@ -144,15 +154,14 @@ public class TFGraphTestAllSameDiff { //Note: Can't extend BaseNd4jTest here a
|
|||
//08.05.2020 - https://github.com/eclipse/deeplearning4j/issues/8928
|
||||
"Conv3DBackpropInputV2/.*",
|
||||
|
||||
//12.05.2020 - https://github.com/eclipse/deeplearning4j/issues/8940
|
||||
"compare_and_bitpack/.*",
|
||||
|
||||
|
||||
//12.05.2020 - https://github.com/eclipse/deeplearning4j/issues/8946
|
||||
"non_max_suppression_v4/.*","non_max_suppression_v5/.*",
|
||||
|
||||
|
||||
// 18.05.2020 - https://github.com/eclipse/deeplearning4j/issues/8963
|
||||
// 18.05.2020 - :wq:wq
|
||||
|
||||
"random_uniform_int/.*",
|
||||
"random_uniform/.*",
|
||||
"random_poisson_v2/.*"
|
||||
|
@ -163,10 +172,11 @@ public class TFGraphTestAllSameDiff { //Note: Can't extend BaseNd4jTest here a
|
|||
If a test name matches any regex here, an ExecPrintListener will be added to the listeners, and all output
|
||||
arrays will be printed during execution
|
||||
*/
|
||||
private final List<String> debugModeRegexes = null; //Arrays.asList("resize_nearest_neighbor/.*", "add_n.*");
|
||||
private final List<String> debugModeRegexes = Arrays.asList("fused_batch_norm/float16_nhwc");
|
||||
|
||||
@BeforeClass
|
||||
public static void beforeClass() {
|
||||
Nd4j.scalar(1.0);
|
||||
Nd4j.setDataType(DataType.FLOAT);
|
||||
Nd4j.getExecutioner().setProfilingMode(OpExecutioner.ProfilingMode.SCOPE_PANIC);
|
||||
}
|
||||
|
|
|
@ -1,3 +1,3 @@
|
|||
in_0/read,in_0/read
|
||||
MaxPoolWithArgmax,MaxPoolWithArgmax
|
||||
MaxPoolWithArgmax:1,MaxPoolWithArgmax
|
||||
in_1/read,in_1/read
|
||||
UnsortedSegmentSum,UnsortedSegmentSum
|
||||
|
|
|
@ -449,7 +449,7 @@ fun loadDataBufferFromRawData(inputTensor: TensorNamespace.TensorProto): INDArra
|
|||
val rawDataBuffer = Nd4j.createBuffer(byteBuffer, dtype, totalLen, 0)
|
||||
if(shape.isNotEmpty() && totalLen > 0) {
|
||||
if(rawDataBuffer.length() > 1)
|
||||
return Nd4j.create(rawDataBuffer).reshape(*shape)
|
||||
return Nd4j.create(rawDataBuffer).reshape('c',*shape)
|
||||
return Nd4j.empty(dtype)
|
||||
}
|
||||
return Nd4j.create(rawDataBuffer)
|
||||
|
|
|
@ -443,6 +443,7 @@ open class ImportGraph <GRAPH_TYPE: GeneratedMessageV3,
|
|||
//a common example is when ops convert input ndarrays to integers or float inputs
|
||||
val resolvedArgInputs = importInfo[name]!!.second.argDescriptorList.filter {input -> input.argType == OpNamespace.ArgDescriptor.ArgType.INPUT_TENSOR}
|
||||
.sortedBy { argDescriptor -> argDescriptor.argIndex }
|
||||
|
||||
val numInputsToTake = resolvedArgInputs.size
|
||||
|
||||
if(numInputsToTake != inNames.size) {
|
||||
|
@ -496,17 +497,6 @@ open class ImportGraph <GRAPH_TYPE: GeneratedMessageV3,
|
|||
val dt2 = if (v2 == null) v1!!.dataType() else v2.dataType()
|
||||
newInDtypes.add(dt1)
|
||||
newInDtypes.add(dt2)
|
||||
} else if(df is Concat) {
|
||||
//note we use the nd4j data types here so we only have input data types indexed by the actual
|
||||
//output from nd4j. A common scenario import is dimensions being converted to ints
|
||||
//Dimensions are converted from inputs in the input framework to plain integers elsewhere.
|
||||
//This lets the import process dictate the actual ordering of the data types.
|
||||
for (s in inputNames) {
|
||||
val v = sd.getVariable(s)
|
||||
newInDtypes.add(v.dataType())
|
||||
}
|
||||
|
||||
op.inputsToOp = inputNames
|
||||
}
|
||||
else {
|
||||
for (s in newInNames) {
|
||||
|
|
File diff suppressed because it is too large
Load Diff
|
@ -338,19 +338,9 @@ val binCount = TensorflowMappingProcess(
|
|||
opMappingRegistry = tensorflowOpRegistry,
|
||||
opName = "bincount",
|
||||
inputFrameworkOpName = "Bincount",
|
||||
tensorMappingRules = listOf(mappingNDArrayInputs(mutableMapOf("weights" to "weights","values" to "arr","min" to "size","max" to "size"))),
|
||||
tensorMappingRules = listOf(mappingNDArrayInputs(mutableMapOf("weights" to "weights","values" to "arr","min" to "size"))),
|
||||
attributeMappingRules = listOf(
|
||||
argDescriptorConstant(listOf(
|
||||
ArgDescriptor {
|
||||
name = "minLength"
|
||||
argIndex = 0
|
||||
argType = OpNamespace.ArgDescriptor.ArgType.INT64
|
||||
int64Value = 0
|
||||
}
|
||||
)),
|
||||
convertNDArrayInputToNumericalAttr(mutableMapOf("maxLength" to "size")),
|
||||
valueMapping(mutableMapOf("outputType" to "T"))),
|
||||
inputIndexOverrides = mapOf(1 to 2,2 to 1))
|
||||
valueMapping(mutableMapOf("outputType" to "T"))))
|
||||
|
||||
|
||||
val bitCast = TensorflowMappingProcess(
|
||||
|
@ -495,15 +485,13 @@ val clipByValue = TensorflowMappingProcess(
|
|||
|
||||
|
||||
//TODO: our compare and bit pack operation seems to do something different than TFs?
|
||||
/*
|
||||
val compareAndBitPack = TensorflowMappingProcess(
|
||||
opName = "compare_and_bitpack",
|
||||
opMappingRegistry = tensorflowOpRegistry,
|
||||
inputFrameworkOpName = "CompareAndBitpack",
|
||||
attributeMappingRules = listOf(convertNDArrayInputToNumericalAttr(mutableMapOf("threshold" to "threshold"))),
|
||||
attributeMappingRules = listOf(valueMapping(mutableMapOf("dtype" to "T"))),
|
||||
tensorMappingRules = listOf(mappingNDArrayInputs(mutableMapOf("input" to "input","y" to "threshold")))
|
||||
)
|
||||
*/
|
||||
|
||||
|
||||
val concat = TensorflowMappingProcess(
|
||||
|
@ -556,7 +544,7 @@ val mergeAdd = TensorflowMappingProcess(
|
|||
opMappingRegistry = tensorflowOpRegistry,
|
||||
opName = "concat",
|
||||
inputFrameworkOpName = "ConcatV2",
|
||||
tensorMappingRules = listOf(mappingListNDArrays(mutableMapOf("input" to "values","concatDimension" to "axis"))),
|
||||
tensorMappingRules = listOf(passThroughNDArrayInputs()),
|
||||
attributeMappingRules = listOf(convertNDArrayInputToNumericalAttr(mutableMapOf("concatDimension" to "axis")),
|
||||
booleanConstant(inputName = "isDynamicAxis",constantValue = true,argumentIndex = 0)[0]))
|
||||
|
||||
|
@ -779,7 +767,7 @@ val deconv2d = TensorflowMappingProcess(
|
|||
inputFrameworkOpName = "Conv2DBackpropInput",
|
||||
opName = "deconv2d_tf",
|
||||
tensorMappingRules = listOf(mappingNDArrayInputs(mutableMapOf(
|
||||
"gradIShape" to "input_sizes","weights" to "filter"))),
|
||||
"gradIShape" to "input_sizes","weights" to "filter","gradO" to "out_backprop"))),
|
||||
attributeMappingRules = listOf(
|
||||
intConstant(inputName = "pH",constantValue = 0 ,argumentIndex = 4)[0],
|
||||
intConstant(inputName = "pW",constantValue = 0 ,argumentIndex = 5)[0],
|
||||
|
@ -1032,7 +1020,8 @@ val identity = multipleNameMapping(
|
|||
opName = "identity",
|
||||
inputFrameworkOpNames = listOf("DeepCopy"),
|
||||
tensorNames = mutableMapOf("input" to "x"),
|
||||
attributeMappingRules = booleanConstant(inputName = "inPlace",constantValue = false,argumentIndex = 0)
|
||||
attributeMappingRules = listOf(booleanConstant(inputName = "inPlace",constantValue = false,argumentIndex = 0)[0],
|
||||
valueMapping(mutableMapOf("dataType" to "T")))
|
||||
,tensorflowOpRegistry = tensorflowOpRegistry)
|
||||
|
||||
|
||||
|
@ -1040,14 +1029,15 @@ val identityCopyToHost = multipleNameMapping(
|
|||
opName = "identity",
|
||||
inputFrameworkOpNames = listOf("CopyHost"),
|
||||
tensorNames = mutableMapOf("input" to "input"),
|
||||
attributeMappingRules = booleanConstant(inputName = "inPlace",constantValue = false,argumentIndex = 0),
|
||||
attributeMappingRules = listOf(booleanConstant(inputName = "inPlace",constantValue = false,argumentIndex = 0)[0],
|
||||
valueMapping(mutableMapOf("dataType" to "T"))),
|
||||
tensorflowOpRegistry = tensorflowOpRegistry)
|
||||
|
||||
val identityN = TensorflowMappingProcess(
|
||||
opName = "identity_n",
|
||||
inputFrameworkOpName = "IdentityN",
|
||||
opMappingRegistry = tensorflowOpRegistry,
|
||||
tensorMappingRules = listOf(mappingNDArrayInputs(mutableMapOf("input" to "input")))
|
||||
tensorMappingRules = listOf(passThroughNDArrayInputs())
|
||||
)
|
||||
|
||||
val ifOp = TensorflowMappingProcess(
|
||||
|
@ -1071,9 +1061,8 @@ val fill = TensorflowMappingProcess(
|
|||
inputFrameworkOpName = "Fill",
|
||||
opMappingRegistry = tensorflowOpRegistry,
|
||||
attributeMappingRules = listOf(convertNDArrayInputToNumericalAttr(mutableMapOf("value" to "value")),
|
||||
dataTypeToInt(mutableMapOf("dtype" to "T")),
|
||||
valueMapping(mutableMapOf("dtype" to "T"))),
|
||||
tensorMappingRules = listOf(mappingNDArrayInputs(mutableMapOf("shapeArray" to "dims")))
|
||||
tensorMappingRules = listOf(mappingNDArrayInputs(mutableMapOf("shape" to "dims","outputs" to "value")))
|
||||
)
|
||||
|
||||
|
||||
|
@ -1382,7 +1371,7 @@ val maxPoolArgmax = multipleNameMapping(
|
|||
intConstant(inputName = "extraParam0",constantValue = 0 ,argumentIndex = 9)[0],
|
||||
intConstant(inputName = "isNHWC",argumentIndex = 10,constantValue = 1 )[0],
|
||||
intConstant(inputName = "sameMode",argumentIndex = 8,constantValue = 8 )[0],
|
||||
valueMapping(mutableMapOf("dtype" to "T"))
|
||||
valueMapping(mutableMapOf("dtype" to "Targmax"))
|
||||
)
|
||||
,tensorflowOpRegistry = tensorflowOpRegistry
|
||||
)
|
||||
|
@ -1451,10 +1440,7 @@ val mirrorPadding = mapTensorNamesWithOp(inputFrameworkOpName = "MirrorPad",opNa
|
|||
booleanConstant(inputName = "isSymmetric",constantValue = true,argumentIndex = 0)[0])
|
||||
,tensorflowOpRegistry = tensorflowOpRegistry)
|
||||
|
||||
/**
|
||||
* TODO: Need to add a constant mapping or something for NonMaxSuppression
|
||||
* v1 and 2 which do not have a scoreThreshold to map. V3 does.
|
||||
*/
|
||||
|
||||
|
||||
val matrixBandPart = mapTensorNamesWithOp(inputFrameworkOpName = "MatrixBandPart",opName = "matrix_band_part",
|
||||
tensorNames = mutableMapOf("input" to "input","minLowerT" to "num_lower",
|
||||
|
@ -1476,7 +1462,7 @@ val nonMaxSuppressionV1 = multipleNameMapping(inputFrameworkOpNames = listOf("No
|
|||
argIndex = 1
|
||||
}
|
||||
)),
|
||||
valueMapping(mutableMapOf("iouThreshold" to "iou_threshold")),
|
||||
valueMapping(mutableMapOf("overlayThreshold" to "iou_threshold")),
|
||||
convertNDArrayInputToNumericalAttr(mutableMapOf("maxOutputSize" to "max_output_size")))
|
||||
,tensorflowOpRegistry = tensorflowOpRegistry)
|
||||
|
||||
|
@ -1485,7 +1471,7 @@ val nonMaxSuppressionV1 = multipleNameMapping(inputFrameworkOpNames = listOf("No
|
|||
val nonMaxSuppressionV2 = multipleNameMapping(inputFrameworkOpNames = listOf("NonMaxSuppressionV2"),
|
||||
opName = "non_max_suppression",
|
||||
tensorNames = mutableMapOf("boxes" to "boxes","scales" to "scores",
|
||||
"iouThreshold" to "iou_threshold","maxOutputSize" to "max_output_size"),
|
||||
"overlayThreshold" to "iou_threshold","maxOutputSize" to "max_output_size"),
|
||||
attributeMappingRules = listOf(
|
||||
argDescriptorConstant(listOf(
|
||||
ArgDescriptor {
|
||||
|
@ -1711,34 +1697,10 @@ val randomUniform = multipleNameMapping(
|
|||
opName = "randomuniform",
|
||||
tensorNames = mutableMapOf("shape" to "shape"),
|
||||
attributeMappingRules = listOf(
|
||||
doubleConstant(inputName = "max",constantValue = 1.0,argumentIndex = 1)[0],
|
||||
doubleConstant(inputName = "min",constantValue = 0.0,argumentIndex = 0)[0],
|
||||
dataTypeToInt(mutableMapOf("dtype" to "dtype")),
|
||||
valueMapping(mutableMapOf("dataType" to "dtype")),
|
||||
argDescriptorConstant(listOf(
|
||||
ArgDescriptor {
|
||||
name = "min"
|
||||
doubleValue = 0.0
|
||||
argType = OpNamespace.ArgDescriptor.ArgType.DOUBLE
|
||||
argIndex = 0
|
||||
},
|
||||
ArgDescriptor {
|
||||
name = "max"
|
||||
doubleValue = 1.0
|
||||
argType = OpNamespace.ArgDescriptor.ArgType.DOUBLE
|
||||
argIndex = 1
|
||||
},
|
||||
ArgDescriptor {
|
||||
name = "min"
|
||||
argIndex = 1
|
||||
inputValue = nameSpaceTensorFromNDarray(Nd4j.scalar(1.0))
|
||||
argType = OpNamespace.ArgDescriptor.ArgType.INPUT_TENSOR
|
||||
},
|
||||
ArgDescriptor {
|
||||
name = "max"
|
||||
argIndex = 2
|
||||
inputValue = nameSpaceTensorFromNDarray(Nd4j.scalar(1.0))
|
||||
argType = OpNamespace.ArgDescriptor.ArgType.INPUT_TENSOR
|
||||
}
|
||||
)))
|
||||
valueMapping(mutableMapOf("dataType" to "dtype","seed" to "seed")))
|
||||
,tensorflowOpRegistry = tensorflowOpRegistry
|
||||
)
|
||||
|
||||
|
@ -1748,34 +1710,11 @@ val statelessRandomUniform = multipleNameMapping(
|
|||
opName = "randomuniform",
|
||||
tensorNames = mutableMapOf("shape" to "shape"),
|
||||
attributeMappingRules = listOf(
|
||||
doubleConstant(inputName = "max",constantValue = 1.0,argumentIndex = 1)[0],
|
||||
doubleConstant(inputName = "min",constantValue = 0.0,argumentIndex = 0)[0],
|
||||
ndarrayToIntList(mutableMapOf("seed" to "seed")),
|
||||
dataTypeToInt(mutableMapOf("dtype" to "dtype")),
|
||||
valueMapping(mutableMapOf("dataType" to "dtype")),
|
||||
argDescriptorConstant(listOf(
|
||||
ArgDescriptor {
|
||||
name = "min"
|
||||
doubleValue = 0.0
|
||||
argType = OpNamespace.ArgDescriptor.ArgType.DOUBLE
|
||||
argIndex = 0
|
||||
},
|
||||
ArgDescriptor {
|
||||
name = "max"
|
||||
doubleValue = 1.0
|
||||
argType = OpNamespace.ArgDescriptor.ArgType.DOUBLE
|
||||
argIndex = 1
|
||||
},
|
||||
ArgDescriptor {
|
||||
name = "min"
|
||||
argIndex = 1
|
||||
inputValue = nameSpaceTensorFromNDarray(Nd4j.scalar(1.0))
|
||||
argType = OpNamespace.ArgDescriptor.ArgType.INPUT_TENSOR
|
||||
},
|
||||
ArgDescriptor {
|
||||
name = "max"
|
||||
argIndex = 2
|
||||
inputValue = nameSpaceTensorFromNDarray(Nd4j.scalar(1.0))
|
||||
argType = OpNamespace.ArgDescriptor.ArgType.INPUT_TENSOR
|
||||
}
|
||||
)))
|
||||
valueMapping(mutableMapOf("dataType" to "dtype")))
|
||||
,tensorflowOpRegistry = tensorflowOpRegistry
|
||||
)
|
||||
|
||||
|
@ -1783,8 +1722,9 @@ val statelessRandomUniform = multipleNameMapping(
|
|||
val randomUniformInt = TensorflowMappingProcess(
|
||||
inputFrameworkOpName = "RandomUniformInt",
|
||||
opName = "randomuniform",
|
||||
tensorMappingRules = listOf(mappingNDArrayInputs(mutableMapOf("shape" to "shape","min" to "minval","max" to "maxval"))),
|
||||
tensorMappingRules = listOf(mappingNDArrayInputs(mutableMapOf("shape" to "shape"))),
|
||||
attributeMappingRules = listOf(
|
||||
valueMapping(mutableMapOf("seed" to "seed")),
|
||||
convertNDArrayInputToNumericalAttr(mutableMapOf("min" to "minval","max" to "maxval")),
|
||||
dataTypeToInt(mutableMapOf("dtype" to "Tout")),valueMapping(mutableMapOf("dataType" to "Tout"))
|
||||
),
|
||||
|
@ -1818,7 +1758,7 @@ val resizeBiCubic = multipleNameMapping(inputFrameworkOpNames = listOf("ResizeBi
|
|||
tensorNames = mutableMapOf("image" to "images","size" to "size"),tensorflowOpRegistry = tensorflowOpRegistry)
|
||||
|
||||
val resizeBiLinear = multipleNameMapping(inputFrameworkOpNames = listOf("ResizeBilinear"),opName = "resize_bilinear",
|
||||
attributeMappingRules = listOf(valueMapping(mutableMapOf("alignCorners" to "align_corners","halfPixelCenter" to "half_pixel_centers"))),
|
||||
attributeMappingRules = listOf(valueMapping(mutableMapOf("alignCorners" to "align_corners","halfPixelCenters" to "half_pixel_centers"))),
|
||||
tensorNames = mutableMapOf("image" to "images","newImageSize" to "size"),tensorflowOpRegistry = tensorflowOpRegistry)
|
||||
|
||||
val resizeNearestNeighbor = multipleNameMapping(inputFrameworkOpNames = listOf("ResizeNearestNeighbor"),opName = "resize_nearest_neighbor",
|
||||
|
@ -1837,7 +1777,7 @@ val reverseSequence = multipleNameMapping(inputFrameworkOpNames = listOf("Revers
|
|||
|
||||
val roll = multipleNameMapping(inputFrameworkOpNames = listOf("Roll"),opName = "roll",
|
||||
attributeMappingRules = listOf(ndarrayToIntList(mutableMapOf("shift" to "shift"))),
|
||||
tensorNames = mutableMapOf("input" to "input","dimensions" to "axis")
|
||||
tensorNames = mutableMapOf("input" to "input","dimensions" to "axis","shiftsI" to "shift")
|
||||
,tensorflowOpRegistry = tensorflowOpRegistry)
|
||||
|
||||
//TODO: verify usingLocking property, it's not showing up in descriptors
|
||||
|
@ -1978,8 +1918,9 @@ val softPlus = mapTensorNamesWithOp(inputFrameworkOpName = "Softplus",opName = "
|
|||
val softSign = mapTensorNamesWithOp(inputFrameworkOpName = "Softsign",opName = "softsign",tensorNames = mutableMapOf("input" to "features"),
|
||||
attributeMappingRules = booleanConstant(inputName = "inPlace",constantValue = false,argumentIndex = 0),tensorflowOpRegistry = tensorflowOpRegistry)
|
||||
|
||||
val shapeN = mapTensorNamesWithOp(inputFrameworkOpName = "ShapeN",opName = "shapes_of",tensorNames = mutableMapOf("input" to "input"),
|
||||
attributeMappingRules = booleanConstant(inputName = "inPlace",constantValue = false,argumentIndex = 0),tensorflowOpRegistry = tensorflowOpRegistry)
|
||||
val shapeN = TensorflowMappingProcess(inputFrameworkOpName = "ShapeN",opName = "shapes_of",tensorMappingRules = listOf(
|
||||
passThroughNDArrayInputs()),
|
||||
attributeMappingRules = booleanConstant(inputName = "inPlace",constantValue = false,argumentIndex = 0),opMappingRegistry = tensorflowOpRegistry)
|
||||
|
||||
val softMax = mapTensorNamesWithOp(inputFrameworkOpName = "Softmax",opName = "softmax",tensorNames = mutableMapOf("input" to "logits"),attributeMappingRules =
|
||||
listOf(argDescriptorConstant(
|
||||
|
@ -2118,7 +2059,6 @@ val squeeze = TensorflowMappingProcess(
|
|||
inputFrameworkOpName = "Squeeze",
|
||||
tensorMappingRules = listOf(mappingNDArrayInputs(mutableMapOf("input" to "input"))),
|
||||
attributeMappingRules = listOf(
|
||||
listNumberToNDarray(mutableMapOf("a" to "squeeze_dims")),
|
||||
listNumberToListNumber(outputAttributeValue = "_a",inputAttributeValue = "squeeze_dims")),
|
||||
opMappingRegistry = tensorflowOpRegistry
|
||||
)
|
||||
|
@ -2347,27 +2287,27 @@ val unpack = multipleNameMapping(inputFrameworkOpNames = listOf("Unpack"),
|
|||
val unsortedSegmentMax = mapTensorNamesWithOp(inputFrameworkOpName = "UnsortedSegmentMax",
|
||||
opName = "unsorted_segment_max",
|
||||
attributeMappingRules = listOf(
|
||||
convertNDArrayInputToNumericalAttr(mutableMapOf("numSegments" to "num_segments","numSegments" to "num_segments"))),
|
||||
tensorNames = mutableMapOf("input" to "data","idxSegments" to "segment_ids")
|
||||
convertNDArrayInputToNumericalAttr(mutableMapOf("numSegments" to "num_segments"))),
|
||||
tensorNames = mutableMapOf("input" to "data","idxSegments" to "segment_ids","numSegments" to "num_segments")
|
||||
,tensorflowOpRegistry = tensorflowOpRegistry)
|
||||
|
||||
val unsortedSegmentMin = mapTensorNamesWithOp(inputFrameworkOpName = "UnsortedSegmentMin",
|
||||
opName = "unsorted_segment_min",
|
||||
attributeMappingRules = listOf(convertNDArrayInputToNumericalAttr(mutableMapOf("numSegments" to "num_segments"))),
|
||||
tensorNames = mutableMapOf("input" to "data","idxSegments" to "segment_ids")
|
||||
tensorNames = mutableMapOf("input" to "data","idxSegments" to "segment_ids","numSegments" to "num_segments")
|
||||
,tensorflowOpRegistry = tensorflowOpRegistry)
|
||||
|
||||
val unsortedSegmentProd = mapTensorNamesWithOp(inputFrameworkOpName = "UnsortedSegmentProd",
|
||||
opName = "unsorted_segment_prod",
|
||||
attributeMappingRules = listOf(
|
||||
convertNDArrayInputToNumericalAttr(mutableMapOf("numSegments" to "num_segments"))),
|
||||
tensorNames = mutableMapOf("input" to "data","idxSegments" to "segment_ids"),tensorflowOpRegistry = tensorflowOpRegistry)
|
||||
tensorNames = mutableMapOf("input" to "data","idxSegments" to "segment_ids","numSegments" to "num_segments"),tensorflowOpRegistry = tensorflowOpRegistry)
|
||||
|
||||
|
||||
val unsortedSegmentSum = mapTensorNamesWithOp(inputFrameworkOpName = "UnsortedSegmentSum",
|
||||
opName = "unsorted_segment_sum",
|
||||
attributeMappingRules = listOf(convertNDArrayInputToNumericalAttr(mutableMapOf("numSegments" to "num_segments"))),
|
||||
tensorNames = mutableMapOf("input" to "data","idxSegments" to "segment_ids")
|
||||
tensorNames = mutableMapOf("input" to "data","idxSegments" to "segment_ids","numSegments" to "num_segments")
|
||||
,tensorflowOpRegistry = tensorflowOpRegistry)
|
||||
|
||||
//TODO: Figure out if need to map
|
||||
|
|
|
@ -1250,6 +1250,47 @@ mappings {
|
|||
ruleType: "tensor"
|
||||
inputFrameworkOpName: "StatelessRandomUniform"
|
||||
}
|
||||
rule {
|
||||
ruleName: "argdescriptorconstant"
|
||||
functionName: "argdescriptorconstant"
|
||||
inputFloatName: "max"
|
||||
ruleType: "attribute"
|
||||
transformerArgs {
|
||||
key: "value"
|
||||
transformerArgs {
|
||||
name: "max"
|
||||
doubleValue: 1.0
|
||||
argType: DOUBLE
|
||||
argIndex: 1
|
||||
}
|
||||
}
|
||||
inputFrameworkOpName: "StatelessRandomUniform"
|
||||
}
|
||||
rule {
|
||||
ruleName: "argdescriptorconstant"
|
||||
functionName: "argdescriptorconstant"
|
||||
inputFloatName: "min"
|
||||
ruleType: "attribute"
|
||||
transformerArgs {
|
||||
key: "value"
|
||||
transformerArgs {
|
||||
name: "min"
|
||||
argType: DOUBLE
|
||||
}
|
||||
}
|
||||
inputFrameworkOpName: "StatelessRandomUniform"
|
||||
}
|
||||
rule {
|
||||
ruleName: "ndarraytointattributevalue"
|
||||
functionName: "ndarraytointattributevalue"
|
||||
outputIntName: "seed"
|
||||
inputToOutput {
|
||||
key: "seed"
|
||||
value: "seed"
|
||||
}
|
||||
ruleType: "attribute"
|
||||
inputFrameworkOpName: "StatelessRandomUniform"
|
||||
}
|
||||
rule {
|
||||
ruleName: "datatypetoint"
|
||||
functionName: "datatypetoint"
|
||||
|
@ -1274,140 +1315,6 @@ mappings {
|
|||
ruleType: "attribute"
|
||||
inputFrameworkOpName: "StatelessRandomUniform"
|
||||
}
|
||||
rule {
|
||||
ruleName: "argdescriptorconstant"
|
||||
functionName: "argdescriptorconstant"
|
||||
inputFloatName: "min"
|
||||
inputFloatName: "max"
|
||||
inputTensorName: "min"
|
||||
inputTensorName: "max"
|
||||
ruleType: "attribute"
|
||||
transformerArgs {
|
||||
key: "value"
|
||||
transformerArgs {
|
||||
name: "min"
|
||||
argType: DOUBLE
|
||||
}
|
||||
transformerArgs {
|
||||
name: "max"
|
||||
doubleValue: 1.0
|
||||
argType: DOUBLE
|
||||
argIndex: 1
|
||||
}
|
||||
transformerArgs {
|
||||
name: "min"
|
||||
inputValue {
|
||||
data_type: 11
|
||||
double_data: 1.0
|
||||
}
|
||||
argType: INPUT_TENSOR
|
||||
argIndex: 1
|
||||
}
|
||||
transformerArgs {
|
||||
name: "max"
|
||||
inputValue {
|
||||
data_type: 11
|
||||
double_data: 1.0
|
||||
}
|
||||
argType: INPUT_TENSOR
|
||||
argIndex: 2
|
||||
}
|
||||
}
|
||||
transformerArgs {
|
||||
key: "value"
|
||||
transformerArgs {
|
||||
name: "min"
|
||||
argType: DOUBLE
|
||||
}
|
||||
transformerArgs {
|
||||
name: "max"
|
||||
doubleValue: 1.0
|
||||
argType: DOUBLE
|
||||
argIndex: 1
|
||||
}
|
||||
transformerArgs {
|
||||
name: "min"
|
||||
inputValue {
|
||||
data_type: 11
|
||||
double_data: 1.0
|
||||
}
|
||||
argType: INPUT_TENSOR
|
||||
argIndex: 1
|
||||
}
|
||||
transformerArgs {
|
||||
name: "max"
|
||||
inputValue {
|
||||
data_type: 11
|
||||
double_data: 1.0
|
||||
}
|
||||
argType: INPUT_TENSOR
|
||||
argIndex: 2
|
||||
}
|
||||
}
|
||||
transformerArgs {
|
||||
key: "value"
|
||||
transformerArgs {
|
||||
name: "min"
|
||||
argType: DOUBLE
|
||||
}
|
||||
transformerArgs {
|
||||
name: "max"
|
||||
doubleValue: 1.0
|
||||
argType: DOUBLE
|
||||
argIndex: 1
|
||||
}
|
||||
transformerArgs {
|
||||
name: "min"
|
||||
inputValue {
|
||||
data_type: 11
|
||||
double_data: 1.0
|
||||
}
|
||||
argType: INPUT_TENSOR
|
||||
argIndex: 1
|
||||
}
|
||||
transformerArgs {
|
||||
name: "max"
|
||||
inputValue {
|
||||
data_type: 11
|
||||
double_data: 1.0
|
||||
}
|
||||
argType: INPUT_TENSOR
|
||||
argIndex: 2
|
||||
}
|
||||
}
|
||||
transformerArgs {
|
||||
key: "value"
|
||||
transformerArgs {
|
||||
name: "min"
|
||||
argType: DOUBLE
|
||||
}
|
||||
transformerArgs {
|
||||
name: "max"
|
||||
doubleValue: 1.0
|
||||
argType: DOUBLE
|
||||
argIndex: 1
|
||||
}
|
||||
transformerArgs {
|
||||
name: "min"
|
||||
inputValue {
|
||||
data_type: 11
|
||||
double_data: 1.0
|
||||
}
|
||||
argType: INPUT_TENSOR
|
||||
argIndex: 1
|
||||
}
|
||||
transformerArgs {
|
||||
name: "max"
|
||||
inputValue {
|
||||
data_type: 11
|
||||
double_data: 1.0
|
||||
}
|
||||
argType: INPUT_TENSOR
|
||||
argIndex: 2
|
||||
}
|
||||
}
|
||||
inputFrameworkOpName: "StatelessRandomUniform"
|
||||
}
|
||||
}
|
||||
mappings {
|
||||
frameworkName: "tensorflow"
|
||||
|
@ -1721,16 +1628,6 @@ mappings {
|
|||
ruleType: "tensor"
|
||||
inputFrameworkOpName: "Squeeze"
|
||||
}
|
||||
rule {
|
||||
ruleName: "listnumbertondarray"
|
||||
functionName: "listnumbertondarray"
|
||||
inputToOutput {
|
||||
key: "a"
|
||||
value: "squeeze_dims"
|
||||
}
|
||||
ruleType: "attribute"
|
||||
inputFrameworkOpName: "Squeeze"
|
||||
}
|
||||
rule {
|
||||
ruleName: "listnumbertolistnumber"
|
||||
functionName: "listnumbertolistnumber"
|
||||
|
@ -1787,8 +1684,10 @@ mappings {
|
|||
functionName: "ndarraymapping"
|
||||
inputTensorName: "data"
|
||||
inputTensorName: "segment_ids"
|
||||
inputTensorName: "num_segments"
|
||||
outputTensorName: "input"
|
||||
outputTensorName: "idxSegments"
|
||||
outputTensorName: "numSegments"
|
||||
inputToOutput {
|
||||
key: "input"
|
||||
value: "data"
|
||||
|
@ -1797,13 +1696,16 @@ mappings {
|
|||
key: "idxSegments"
|
||||
value: "segment_ids"
|
||||
}
|
||||
inputToOutput {
|
||||
key: "numSegments"
|
||||
value: "num_segments"
|
||||
}
|
||||
ruleType: "tensor"
|
||||
inputFrameworkOpName: "UnsortedSegmentProd"
|
||||
}
|
||||
rule {
|
||||
ruleName: "ndarrayinputtonumericalattribute"
|
||||
functionName: "ndarrayinputtonumericalattribute"
|
||||
outputIntName: "numSegments"
|
||||
inputToOutput {
|
||||
key: "numSegments"
|
||||
value: "num_segments"
|
||||
|
@ -6547,6 +6449,36 @@ mappings {
|
|||
ruleType: "tensor"
|
||||
inputFrameworkOpName: "RandomUniform"
|
||||
}
|
||||
rule {
|
||||
ruleName: "argdescriptorconstant"
|
||||
functionName: "argdescriptorconstant"
|
||||
inputFloatName: "max"
|
||||
ruleType: "attribute"
|
||||
transformerArgs {
|
||||
key: "value"
|
||||
transformerArgs {
|
||||
name: "max"
|
||||
doubleValue: 1.0
|
||||
argType: DOUBLE
|
||||
argIndex: 1
|
||||
}
|
||||
}
|
||||
inputFrameworkOpName: "RandomUniform"
|
||||
}
|
||||
rule {
|
||||
ruleName: "argdescriptorconstant"
|
||||
functionName: "argdescriptorconstant"
|
||||
inputFloatName: "min"
|
||||
ruleType: "attribute"
|
||||
transformerArgs {
|
||||
key: "value"
|
||||
transformerArgs {
|
||||
name: "min"
|
||||
argType: DOUBLE
|
||||
}
|
||||
}
|
||||
inputFrameworkOpName: "RandomUniform"
|
||||
}
|
||||
rule {
|
||||
ruleName: "datatypetoint"
|
||||
functionName: "datatypetoint"
|
||||
|
@ -6562,149 +6494,21 @@ mappings {
|
|||
rule {
|
||||
ruleName: "valuemapping"
|
||||
functionName: "valuemapping"
|
||||
inputIntName: "seed"
|
||||
outputIntName: "seed"
|
||||
inputDataTypeName: "dtype"
|
||||
outputDataTypeName: "dataType"
|
||||
inputToOutput {
|
||||
key: "dataType"
|
||||
value: "dtype"
|
||||
}
|
||||
inputToOutput {
|
||||
key: "seed"
|
||||
value: "seed"
|
||||
}
|
||||
ruleType: "attribute"
|
||||
inputFrameworkOpName: "RandomUniform"
|
||||
}
|
||||
rule {
|
||||
ruleName: "argdescriptorconstant"
|
||||
functionName: "argdescriptorconstant"
|
||||
inputFloatName: "min"
|
||||
inputFloatName: "max"
|
||||
inputTensorName: "min"
|
||||
inputTensorName: "max"
|
||||
ruleType: "attribute"
|
||||
transformerArgs {
|
||||
key: "value"
|
||||
transformerArgs {
|
||||
name: "min"
|
||||
argType: DOUBLE
|
||||
}
|
||||
transformerArgs {
|
||||
name: "max"
|
||||
doubleValue: 1.0
|
||||
argType: DOUBLE
|
||||
argIndex: 1
|
||||
}
|
||||
transformerArgs {
|
||||
name: "min"
|
||||
inputValue {
|
||||
data_type: 11
|
||||
double_data: 1.0
|
||||
}
|
||||
argType: INPUT_TENSOR
|
||||
argIndex: 1
|
||||
}
|
||||
transformerArgs {
|
||||
name: "max"
|
||||
inputValue {
|
||||
data_type: 11
|
||||
double_data: 1.0
|
||||
}
|
||||
argType: INPUT_TENSOR
|
||||
argIndex: 2
|
||||
}
|
||||
}
|
||||
transformerArgs {
|
||||
key: "value"
|
||||
transformerArgs {
|
||||
name: "min"
|
||||
argType: DOUBLE
|
||||
}
|
||||
transformerArgs {
|
||||
name: "max"
|
||||
doubleValue: 1.0
|
||||
argType: DOUBLE
|
||||
argIndex: 1
|
||||
}
|
||||
transformerArgs {
|
||||
name: "min"
|
||||
inputValue {
|
||||
data_type: 11
|
||||
double_data: 1.0
|
||||
}
|
||||
argType: INPUT_TENSOR
|
||||
argIndex: 1
|
||||
}
|
||||
transformerArgs {
|
||||
name: "max"
|
||||
inputValue {
|
||||
data_type: 11
|
||||
double_data: 1.0
|
||||
}
|
||||
argType: INPUT_TENSOR
|
||||
argIndex: 2
|
||||
}
|
||||
}
|
||||
transformerArgs {
|
||||
key: "value"
|
||||
transformerArgs {
|
||||
name: "min"
|
||||
argType: DOUBLE
|
||||
}
|
||||
transformerArgs {
|
||||
name: "max"
|
||||
doubleValue: 1.0
|
||||
argType: DOUBLE
|
||||
argIndex: 1
|
||||
}
|
||||
transformerArgs {
|
||||
name: "min"
|
||||
inputValue {
|
||||
data_type: 11
|
||||
double_data: 1.0
|
||||
}
|
||||
argType: INPUT_TENSOR
|
||||
argIndex: 1
|
||||
}
|
||||
transformerArgs {
|
||||
name: "max"
|
||||
inputValue {
|
||||
data_type: 11
|
||||
double_data: 1.0
|
||||
}
|
||||
argType: INPUT_TENSOR
|
||||
argIndex: 2
|
||||
}
|
||||
}
|
||||
transformerArgs {
|
||||
key: "value"
|
||||
transformerArgs {
|
||||
name: "min"
|
||||
argType: DOUBLE
|
||||
}
|
||||
transformerArgs {
|
||||
name: "max"
|
||||
doubleValue: 1.0
|
||||
argType: DOUBLE
|
||||
argIndex: 1
|
||||
}
|
||||
transformerArgs {
|
||||
name: "min"
|
||||
inputValue {
|
||||
data_type: 11
|
||||
double_data: 1.0
|
||||
}
|
||||
argType: INPUT_TENSOR
|
||||
argIndex: 1
|
||||
}
|
||||
transformerArgs {
|
||||
name: "max"
|
||||
inputValue {
|
||||
data_type: 11
|
||||
double_data: 1.0
|
||||
}
|
||||
argType: INPUT_TENSOR
|
||||
argIndex: 2
|
||||
}
|
||||
}
|
||||
inputFrameworkOpName: "RandomUniform"
|
||||
}
|
||||
}
|
||||
mappings {
|
||||
frameworkName: "tensorflow"
|
||||
|
@ -6847,8 +6651,10 @@ mappings {
|
|||
functionName: "ndarraymapping"
|
||||
inputTensorName: "input"
|
||||
inputTensorName: "axis"
|
||||
inputTensorName: "shift"
|
||||
outputTensorName: "input"
|
||||
outputTensorName: "dimensions"
|
||||
outputTensorName: "shiftsI"
|
||||
inputToOutput {
|
||||
key: "input"
|
||||
value: "input"
|
||||
|
@ -6857,6 +6663,10 @@ mappings {
|
|||
key: "dimensions"
|
||||
value: "axis"
|
||||
}
|
||||
inputToOutput {
|
||||
key: "shiftsI"
|
||||
value: "shift"
|
||||
}
|
||||
ruleType: "tensor"
|
||||
inputFrameworkOpName: "Roll"
|
||||
}
|
||||
|
@ -6972,8 +6782,10 @@ mappings {
|
|||
functionName: "ndarraymapping"
|
||||
inputTensorName: "data"
|
||||
inputTensorName: "segment_ids"
|
||||
inputTensorName: "num_segments"
|
||||
outputTensorName: "input"
|
||||
outputTensorName: "idxSegments"
|
||||
outputTensorName: "numSegments"
|
||||
inputToOutput {
|
||||
key: "input"
|
||||
value: "data"
|
||||
|
@ -6982,13 +6794,16 @@ mappings {
|
|||
key: "idxSegments"
|
||||
value: "segment_ids"
|
||||
}
|
||||
inputToOutput {
|
||||
key: "numSegments"
|
||||
value: "num_segments"
|
||||
}
|
||||
ruleType: "tensor"
|
||||
inputFrameworkOpName: "UnsortedSegmentMin"
|
||||
}
|
||||
rule {
|
||||
ruleName: "ndarrayinputtonumericalattribute"
|
||||
functionName: "ndarrayinputtonumericalattribute"
|
||||
outputIntName: "numSegments"
|
||||
inputToOutput {
|
||||
key: "numSegments"
|
||||
value: "num_segments"
|
||||
|
@ -7239,7 +7054,6 @@ mappings {
|
|||
rule {
|
||||
ruleName: "ndarrayinputtonumericalattribute"
|
||||
functionName: "ndarrayinputtonumericalattribute"
|
||||
outputDoubleName: "start"
|
||||
outputDoubleName: "stop"
|
||||
inputToOutput {
|
||||
key: "start"
|
||||
|
@ -7255,8 +7069,8 @@ mappings {
|
|||
rule {
|
||||
ruleName: "valuemapping"
|
||||
functionName: "valuemapping"
|
||||
outputIntName: "dataType"
|
||||
inputDataTypeName: "T"
|
||||
outputDataTypeName: "dataType"
|
||||
inputToOutput {
|
||||
key: "dataType"
|
||||
value: "T"
|
||||
|
@ -7380,8 +7194,10 @@ mappings {
|
|||
functionName: "ndarraymapping"
|
||||
inputTensorName: "data"
|
||||
inputTensorName: "segment_ids"
|
||||
inputTensorName: "num_segments"
|
||||
outputTensorName: "input"
|
||||
outputTensorName: "idxSegments"
|
||||
outputTensorName: "numSegments"
|
||||
inputToOutput {
|
||||
key: "input"
|
||||
value: "data"
|
||||
|
@ -7390,13 +7206,16 @@ mappings {
|
|||
key: "idxSegments"
|
||||
value: "segment_ids"
|
||||
}
|
||||
inputToOutput {
|
||||
key: "numSegments"
|
||||
value: "num_segments"
|
||||
}
|
||||
ruleType: "tensor"
|
||||
inputFrameworkOpName: "UnsortedSegmentSum"
|
||||
}
|
||||
rule {
|
||||
ruleName: "ndarrayinputtonumericalattribute"
|
||||
functionName: "ndarrayinputtonumericalattribute"
|
||||
outputIntName: "numSegments"
|
||||
inputToOutput {
|
||||
key: "numSegments"
|
||||
value: "num_segments"
|
||||
|
@ -9065,7 +8884,7 @@ mappings {
|
|||
inputTensorName: "max_output_size"
|
||||
outputTensorName: "boxes"
|
||||
outputTensorName: "scales"
|
||||
outputTensorName: "iouThreshold"
|
||||
outputTensorName: "overlayThreshold"
|
||||
outputTensorName: "maxOutputSize"
|
||||
inputToOutput {
|
||||
key: "boxes"
|
||||
|
@ -9076,7 +8895,7 @@ mappings {
|
|||
value: "scores"
|
||||
}
|
||||
inputToOutput {
|
||||
key: "iouThreshold"
|
||||
key: "overlayThreshold"
|
||||
value: "iou_threshold"
|
||||
}
|
||||
inputToOutput {
|
||||
|
@ -9184,8 +9003,6 @@ mappings {
|
|||
rule {
|
||||
ruleName: "ndarrayinputtonumericalattribute"
|
||||
functionName: "ndarrayinputtonumericalattribute"
|
||||
outputDoubleName: "on"
|
||||
outputDoubleName: "off"
|
||||
inputToOutput {
|
||||
key: "on"
|
||||
value: "on_value"
|
||||
|
@ -9298,6 +9115,41 @@ mappings {
|
|||
inputFrameworkOpName: "Square"
|
||||
}
|
||||
}
|
||||
mappings {
|
||||
frameworkName: "tensorflow"
|
||||
opName: "compare_and_bitpack"
|
||||
inputFrameworkOpName: "CompareAndBitpack"
|
||||
rule {
|
||||
ruleName: "ndarraymapping"
|
||||
functionName: "ndarraymapping"
|
||||
inputTensorName: "input"
|
||||
inputTensorName: "threshold"
|
||||
outputTensorName: "input"
|
||||
outputTensorName: "y"
|
||||
inputToOutput {
|
||||
key: "input"
|
||||
value: "input"
|
||||
}
|
||||
inputToOutput {
|
||||
key: "y"
|
||||
value: "threshold"
|
||||
}
|
||||
ruleType: "tensor"
|
||||
inputFrameworkOpName: "CompareAndBitpack"
|
||||
}
|
||||
rule {
|
||||
ruleName: "valuemapping"
|
||||
functionName: "valuemapping"
|
||||
inputDataTypeName: "T"
|
||||
outputDataTypeName: "dtype"
|
||||
inputToOutput {
|
||||
key: "dtype"
|
||||
value: "T"
|
||||
}
|
||||
ruleType: "attribute"
|
||||
inputFrameworkOpName: "CompareAndBitpack"
|
||||
}
|
||||
}
|
||||
mappings {
|
||||
frameworkName: "tensorflow"
|
||||
opName: "segment_min"
|
||||
|
@ -9353,8 +9205,10 @@ mappings {
|
|||
functionName: "ndarraymapping"
|
||||
inputTensorName: "data"
|
||||
inputTensorName: "segment_ids"
|
||||
inputTensorName: "num_segments"
|
||||
outputTensorName: "input"
|
||||
outputTensorName: "idxSegments"
|
||||
outputTensorName: "numSegments"
|
||||
inputToOutput {
|
||||
key: "input"
|
||||
value: "data"
|
||||
|
@ -9363,13 +9217,16 @@ mappings {
|
|||
key: "idxSegments"
|
||||
value: "segment_ids"
|
||||
}
|
||||
inputToOutput {
|
||||
key: "numSegments"
|
||||
value: "num_segments"
|
||||
}
|
||||
ruleType: "tensor"
|
||||
inputFrameworkOpName: "UnsortedSegmentMax"
|
||||
}
|
||||
rule {
|
||||
ruleName: "ndarrayinputtonumericalattribute"
|
||||
functionName: "ndarrayinputtonumericalattribute"
|
||||
outputIntName: "numSegments"
|
||||
inputToOutput {
|
||||
key: "numSegments"
|
||||
value: "num_segments"
|
||||
|
@ -9429,13 +9286,13 @@ mappings {
|
|||
inputBooleanName: "align_corners"
|
||||
inputBooleanName: "half_pixel_centers"
|
||||
outputBooleanName: "alignCorners"
|
||||
outputBooleanName: "halfPixelCenter"
|
||||
outputBooleanName: "halfPixelCenters"
|
||||
inputToOutput {
|
||||
key: "alignCorners"
|
||||
value: "align_corners"
|
||||
}
|
||||
inputToOutput {
|
||||
key: "halfPixelCenter"
|
||||
key: "halfPixelCenters"
|
||||
value: "half_pixel_centers"
|
||||
}
|
||||
ruleType: "attribute"
|
||||
|
@ -9833,7 +9690,7 @@ mappings {
|
|||
functionName: "valuemapping"
|
||||
inputFloatName: "iou_threshold"
|
||||
inputToOutput {
|
||||
key: "iouThreshold"
|
||||
key: "overlayThreshold"
|
||||
value: "iou_threshold"
|
||||
}
|
||||
ruleType: "attribute"
|
||||
|
@ -10185,11 +10042,9 @@ mappings {
|
|||
inputTensorName: "weights"
|
||||
inputTensorName: "arr"
|
||||
inputTensorName: "size"
|
||||
inputTensorName: "size"
|
||||
outputTensorName: "weights"
|
||||
outputTensorName: "values"
|
||||
outputTensorName: "min"
|
||||
outputTensorName: "max"
|
||||
inputToOutput {
|
||||
key: "weights"
|
||||
value: "weights"
|
||||
|
@ -10202,38 +10057,9 @@ mappings {
|
|||
key: "min"
|
||||
value: "size"
|
||||
}
|
||||
inputToOutput {
|
||||
key: "max"
|
||||
value: "size"
|
||||
}
|
||||
ruleType: "tensor"
|
||||
inputFrameworkOpName: "Bincount"
|
||||
}
|
||||
rule {
|
||||
ruleName: "argdescriptorconstant"
|
||||
functionName: "argdescriptorconstant"
|
||||
inputIntName: "minLength"
|
||||
ruleType: "attribute"
|
||||
transformerArgs {
|
||||
key: "value"
|
||||
transformerArgs {
|
||||
name: "minLength"
|
||||
argType: INT64
|
||||
}
|
||||
}
|
||||
inputFrameworkOpName: "Bincount"
|
||||
}
|
||||
rule {
|
||||
ruleName: "ndarrayinputtonumericalattribute"
|
||||
functionName: "ndarrayinputtonumericalattribute"
|
||||
outputIntName: "maxLength"
|
||||
inputToOutput {
|
||||
key: "maxLength"
|
||||
value: "size"
|
||||
}
|
||||
ruleType: "attribute"
|
||||
inputFrameworkOpName: "Bincount"
|
||||
}
|
||||
rule {
|
||||
ruleName: "valuemapping"
|
||||
functionName: "valuemapping"
|
||||
|
@ -10246,14 +10072,6 @@ mappings {
|
|||
ruleType: "attribute"
|
||||
inputFrameworkOpName: "Bincount"
|
||||
}
|
||||
indexOverrides {
|
||||
key: 1
|
||||
value: 2
|
||||
}
|
||||
indexOverrides {
|
||||
key: 2
|
||||
value: 1
|
||||
}
|
||||
}
|
||||
mappings {
|
||||
frameworkName: "tensorflow"
|
||||
|
@ -10483,31 +10301,29 @@ mappings {
|
|||
ruleName: "ndarraymapping"
|
||||
functionName: "ndarraymapping"
|
||||
inputTensorName: "shape"
|
||||
inputTensorName: "minval"
|
||||
inputTensorName: "maxval"
|
||||
outputTensorName: "shape"
|
||||
outputTensorName: "min"
|
||||
outputTensorName: "max"
|
||||
inputToOutput {
|
||||
key: "shape"
|
||||
value: "shape"
|
||||
}
|
||||
inputToOutput {
|
||||
key: "min"
|
||||
value: "minval"
|
||||
}
|
||||
inputToOutput {
|
||||
key: "max"
|
||||
value: "maxval"
|
||||
}
|
||||
ruleType: "tensor"
|
||||
inputFrameworkOpName: "RandomUniformInt"
|
||||
}
|
||||
rule {
|
||||
ruleName: "valuemapping"
|
||||
functionName: "valuemapping"
|
||||
inputIntName: "seed"
|
||||
outputIntName: "seed"
|
||||
inputToOutput {
|
||||
key: "seed"
|
||||
value: "seed"
|
||||
}
|
||||
ruleType: "attribute"
|
||||
inputFrameworkOpName: "RandomUniformInt"
|
||||
}
|
||||
rule {
|
||||
ruleName: "ndarrayinputtonumericalattribute"
|
||||
functionName: "ndarrayinputtonumericalattribute"
|
||||
outputDoubleName: "min"
|
||||
outputDoubleName: "max"
|
||||
inputToOutput {
|
||||
key: "min"
|
||||
value: "minval"
|
||||
|
@ -10822,14 +10638,8 @@ mappings {
|
|||
opName: "shapes_of"
|
||||
inputFrameworkOpName: "ShapeN"
|
||||
rule {
|
||||
ruleName: "ndarraymapping"
|
||||
functionName: "ndarraymapping"
|
||||
inputTensorName: "input"
|
||||
outputTensorName: "input"
|
||||
inputToOutput {
|
||||
key: "input"
|
||||
value: "input"
|
||||
}
|
||||
ruleName: "passthrough"
|
||||
functionName: "passthrough"
|
||||
ruleType: "tensor"
|
||||
inputFrameworkOpName: "ShapeN"
|
||||
}
|
||||
|
@ -10943,8 +10753,10 @@ mappings {
|
|||
functionName: "ndarraymapping"
|
||||
inputTensorName: "input_sizes"
|
||||
inputTensorName: "filter"
|
||||
inputTensorName: "out_backprop"
|
||||
outputTensorName: "gradIShape"
|
||||
outputTensorName: "weights"
|
||||
outputTensorName: "gradO"
|
||||
inputToOutput {
|
||||
key: "gradIShape"
|
||||
value: "input_sizes"
|
||||
|
@ -10953,6 +10765,10 @@ mappings {
|
|||
key: "weights"
|
||||
value: "filter"
|
||||
}
|
||||
inputToOutput {
|
||||
key: "gradO"
|
||||
value: "out_backprop"
|
||||
}
|
||||
ruleType: "tensor"
|
||||
inputFrameworkOpName: "Conv2DBackpropInput"
|
||||
}
|
||||
|
@ -11629,6 +11445,18 @@ mappings {
|
|||
}
|
||||
inputFrameworkOpName: "CopyHost"
|
||||
}
|
||||
rule {
|
||||
ruleName: "valuemapping"
|
||||
functionName: "valuemapping"
|
||||
inputDataTypeName: "T"
|
||||
outputDataTypeName: "dataType"
|
||||
inputToOutput {
|
||||
key: "dataType"
|
||||
value: "T"
|
||||
}
|
||||
ruleType: "attribute"
|
||||
inputFrameworkOpName: "CopyHost"
|
||||
}
|
||||
}
|
||||
mappings {
|
||||
frameworkName: "tensorflow"
|
||||
|
@ -12011,11 +11839,17 @@ mappings {
|
|||
ruleName: "ndarraymapping"
|
||||
functionName: "ndarraymapping"
|
||||
inputTensorName: "dims"
|
||||
outputTensorName: "shapeArray"
|
||||
inputTensorName: "value"
|
||||
outputTensorName: "shape"
|
||||
outputTensorName: "outputs"
|
||||
inputToOutput {
|
||||
key: "shapeArray"
|
||||
key: "shape"
|
||||
value: "dims"
|
||||
}
|
||||
inputToOutput {
|
||||
key: "outputs"
|
||||
value: "value"
|
||||
}
|
||||
ruleType: "tensor"
|
||||
inputFrameworkOpName: "Fill"
|
||||
}
|
||||
|
@ -12030,18 +11864,6 @@ mappings {
|
|||
ruleType: "attribute"
|
||||
inputFrameworkOpName: "Fill"
|
||||
}
|
||||
rule {
|
||||
ruleName: "datatypetoint"
|
||||
functionName: "datatypetoint"
|
||||
outputIntName: "dtype"
|
||||
inputDataTypeName: "T"
|
||||
inputToOutput {
|
||||
key: "dtype"
|
||||
value: "T"
|
||||
}
|
||||
ruleType: "attribute"
|
||||
inputFrameworkOpName: "Fill"
|
||||
}
|
||||
rule {
|
||||
ruleName: "valuemapping"
|
||||
functionName: "valuemapping"
|
||||
|
@ -12306,11 +12128,11 @@ mappings {
|
|||
rule {
|
||||
ruleName: "valuemapping"
|
||||
functionName: "valuemapping"
|
||||
inputDataTypeName: "T"
|
||||
inputDataTypeName: "Targmax"
|
||||
outputDataTypeName: "dtype"
|
||||
inputToOutput {
|
||||
key: "dtype"
|
||||
value: "T"
|
||||
value: "Targmax"
|
||||
}
|
||||
ruleType: "attribute"
|
||||
inputFrameworkOpName: "MaxPoolWithArgmax"
|
||||
|
@ -13288,14 +13110,8 @@ mappings {
|
|||
opName: "identity_n"
|
||||
inputFrameworkOpName: "IdentityN"
|
||||
rule {
|
||||
ruleName: "ndarraymapping"
|
||||
functionName: "ndarraymapping"
|
||||
inputTensorName: "input"
|
||||
outputTensorName: "input"
|
||||
inputToOutput {
|
||||
key: "input"
|
||||
value: "input"
|
||||
}
|
||||
ruleName: "passthrough"
|
||||
functionName: "passthrough"
|
||||
ruleType: "tensor"
|
||||
inputFrameworkOpName: "IdentityN"
|
||||
}
|
||||
|
@ -13379,9 +13195,6 @@ mappings {
|
|||
rule {
|
||||
ruleName: "ndarrayinputtonumericalattribute"
|
||||
functionName: "ndarrayinputtonumericalattribute"
|
||||
outputDoubleName: "from"
|
||||
outputDoubleName: "to"
|
||||
outputDoubleName: "step"
|
||||
inputToOutput {
|
||||
key: "from"
|
||||
value: "start"
|
||||
|
@ -14760,20 +14573,8 @@ mappings {
|
|||
opName: "concat"
|
||||
inputFrameworkOpName: "ConcatV2"
|
||||
rule {
|
||||
ruleName: "multiinputindex"
|
||||
functionName: "multiinputindex"
|
||||
inputTensorName: "values"
|
||||
inputTensorName: "axis"
|
||||
outputTensorName: "input"
|
||||
outputTensorName: "concatDimension"
|
||||
inputToOutput {
|
||||
key: "input"
|
||||
value: "values"
|
||||
}
|
||||
inputToOutput {
|
||||
key: "concatDimension"
|
||||
value: "axis"
|
||||
}
|
||||
ruleName: "passthrough"
|
||||
functionName: "passthrough"
|
||||
ruleType: "tensor"
|
||||
inputFrameworkOpName: "ConcatV2"
|
||||
}
|
||||
|
@ -15641,6 +15442,18 @@ mappings {
|
|||
}
|
||||
inputFrameworkOpName: "DeepCopy"
|
||||
}
|
||||
rule {
|
||||
ruleName: "valuemapping"
|
||||
functionName: "valuemapping"
|
||||
inputDataTypeName: "T"
|
||||
outputDataTypeName: "dataType"
|
||||
inputToOutput {
|
||||
key: "dataType"
|
||||
value: "T"
|
||||
}
|
||||
ruleType: "attribute"
|
||||
inputFrameworkOpName: "DeepCopy"
|
||||
}
|
||||
}
|
||||
mappings {
|
||||
frameworkName: "tensorflow"
|
||||
|
|
|
@ -108,6 +108,14 @@ class TestTensorflowIR {
|
|||
val importedGraph = TFGraphMapper.importGraph(textGraph)
|
||||
val graph = tfImporter.importFromGraph(textGraph,inputMap)
|
||||
val tfOutput = tfGraphRunner.run(inputMap)
|
||||
|
||||
/**
|
||||
* TODO: UnsortedSegmentSum ,Solution is almost there, need to figure out how to
|
||||
* output correct shape.
|
||||
*
|
||||
* Shape in TF is 5 x 5 but actual real output seems to be 1 x 10.
|
||||
* We need to change the output shape to work like TF does.
|
||||
*/
|
||||
val output2 = importedGraph.outputAll(inputMap)
|
||||
val output = graph.outputAll(inputMap)
|
||||
|
||||
|
@ -117,10 +125,12 @@ class TestTensorflowIR {
|
|||
val names = tensorflowIRGraph.nodeList().map { input -> input.nodeName() }
|
||||
val skipValidation = setOf("parallel_stack/ExpandDims/dim")
|
||||
//assertEquals(output.keys,output2.keys)
|
||||
/* val notEquals = HashSet<String>()
|
||||
val notEquals = HashSet<String>()
|
||||
val notEqualsTf = HashSet<String>()
|
||||
names.forEach {
|
||||
val value = output[it]
|
||||
val value2 = output2[it]
|
||||
val tfValue = tfOutput[it]
|
||||
if(value!! != (value2!!)) {
|
||||
val oldOps = importedGraph.ops[it]
|
||||
val newOps = graph.ops[it]
|
||||
|
@ -128,10 +138,19 @@ class TestTensorflowIR {
|
|||
val newVar = graph.variables[it]
|
||||
notEquals.add(it)
|
||||
}
|
||||
}*/
|
||||
|
||||
//println(notEquals)
|
||||
if(tfValue!! != (value!!)) {
|
||||
val oldOps = importedGraph.ops[it]
|
||||
val newOps = graph.ops[it]
|
||||
val oldVar = importedGraph.variables[it]
|
||||
val newVar = graph.variables[it]
|
||||
notEqualsTf.add(it)
|
||||
}
|
||||
}
|
||||
|
||||
println(notEquals)
|
||||
println(notEqualsTf)
|
||||
println()
|
||||
// assertEquals(output,output2)
|
||||
//assertEquals(tfOutput,output)
|
||||
}
|
||||
|
|
|
@ -1250,6 +1250,47 @@ mappings {
|
|||
ruleType: "tensor"
|
||||
inputFrameworkOpName: "StatelessRandomUniform"
|
||||
}
|
||||
rule {
|
||||
ruleName: "argdescriptorconstant"
|
||||
functionName: "argdescriptorconstant"
|
||||
inputFloatName: "max"
|
||||
ruleType: "attribute"
|
||||
transformerArgs {
|
||||
key: "value"
|
||||
transformerArgs {
|
||||
name: "max"
|
||||
doubleValue: 1.0
|
||||
argType: DOUBLE
|
||||
argIndex: 1
|
||||
}
|
||||
}
|
||||
inputFrameworkOpName: "StatelessRandomUniform"
|
||||
}
|
||||
rule {
|
||||
ruleName: "argdescriptorconstant"
|
||||
functionName: "argdescriptorconstant"
|
||||
inputFloatName: "min"
|
||||
ruleType: "attribute"
|
||||
transformerArgs {
|
||||
key: "value"
|
||||
transformerArgs {
|
||||
name: "min"
|
||||
argType: DOUBLE
|
||||
}
|
||||
}
|
||||
inputFrameworkOpName: "StatelessRandomUniform"
|
||||
}
|
||||
rule {
|
||||
ruleName: "ndarraytointattributevalue"
|
||||
functionName: "ndarraytointattributevalue"
|
||||
outputIntName: "seed"
|
||||
inputToOutput {
|
||||
key: "seed"
|
||||
value: "seed"
|
||||
}
|
||||
ruleType: "attribute"
|
||||
inputFrameworkOpName: "StatelessRandomUniform"
|
||||
}
|
||||
rule {
|
||||
ruleName: "datatypetoint"
|
||||
functionName: "datatypetoint"
|
||||
|
@ -1274,140 +1315,6 @@ mappings {
|
|||
ruleType: "attribute"
|
||||
inputFrameworkOpName: "StatelessRandomUniform"
|
||||
}
|
||||
rule {
|
||||
ruleName: "argdescriptorconstant"
|
||||
functionName: "argdescriptorconstant"
|
||||
inputFloatName: "min"
|
||||
inputFloatName: "max"
|
||||
inputTensorName: "min"
|
||||
inputTensorName: "max"
|
||||
ruleType: "attribute"
|
||||
transformerArgs {
|
||||
key: "value"
|
||||
transformerArgs {
|
||||
name: "min"
|
||||
argType: DOUBLE
|
||||
}
|
||||
transformerArgs {
|
||||
name: "max"
|
||||
doubleValue: 1.0
|
||||
argType: DOUBLE
|
||||
argIndex: 1
|
||||
}
|
||||
transformerArgs {
|
||||
name: "min"
|
||||
inputValue {
|
||||
data_type: 11
|
||||
double_data: 1.0
|
||||
}
|
||||
argType: INPUT_TENSOR
|
||||
argIndex: 1
|
||||
}
|
||||
transformerArgs {
|
||||
name: "max"
|
||||
inputValue {
|
||||
data_type: 11
|
||||
double_data: 1.0
|
||||
}
|
||||
argType: INPUT_TENSOR
|
||||
argIndex: 2
|
||||
}
|
||||
}
|
||||
transformerArgs {
|
||||
key: "value"
|
||||
transformerArgs {
|
||||
name: "min"
|
||||
argType: DOUBLE
|
||||
}
|
||||
transformerArgs {
|
||||
name: "max"
|
||||
doubleValue: 1.0
|
||||
argType: DOUBLE
|
||||
argIndex: 1
|
||||
}
|
||||
transformerArgs {
|
||||
name: "min"
|
||||
inputValue {
|
||||
data_type: 11
|
||||
double_data: 1.0
|
||||
}
|
||||
argType: INPUT_TENSOR
|
||||
argIndex: 1
|
||||
}
|
||||
transformerArgs {
|
||||
name: "max"
|
||||
inputValue {
|
||||
data_type: 11
|
||||
double_data: 1.0
|
||||
}
|
||||
argType: INPUT_TENSOR
|
||||
argIndex: 2
|
||||
}
|
||||
}
|
||||
transformerArgs {
|
||||
key: "value"
|
||||
transformerArgs {
|
||||
name: "min"
|
||||
argType: DOUBLE
|
||||
}
|
||||
transformerArgs {
|
||||
name: "max"
|
||||
doubleValue: 1.0
|
||||
argType: DOUBLE
|
||||
argIndex: 1
|
||||
}
|
||||
transformerArgs {
|
||||
name: "min"
|
||||
inputValue {
|
||||
data_type: 11
|
||||
double_data: 1.0
|
||||
}
|
||||
argType: INPUT_TENSOR
|
||||
argIndex: 1
|
||||
}
|
||||
transformerArgs {
|
||||
name: "max"
|
||||
inputValue {
|
||||
data_type: 11
|
||||
double_data: 1.0
|
||||
}
|
||||
argType: INPUT_TENSOR
|
||||
argIndex: 2
|
||||
}
|
||||
}
|
||||
transformerArgs {
|
||||
key: "value"
|
||||
transformerArgs {
|
||||
name: "min"
|
||||
argType: DOUBLE
|
||||
}
|
||||
transformerArgs {
|
||||
name: "max"
|
||||
doubleValue: 1.0
|
||||
argType: DOUBLE
|
||||
argIndex: 1
|
||||
}
|
||||
transformerArgs {
|
||||
name: "min"
|
||||
inputValue {
|
||||
data_type: 11
|
||||
double_data: 1.0
|
||||
}
|
||||
argType: INPUT_TENSOR
|
||||
argIndex: 1
|
||||
}
|
||||
transformerArgs {
|
||||
name: "max"
|
||||
inputValue {
|
||||
data_type: 11
|
||||
double_data: 1.0
|
||||
}
|
||||
argType: INPUT_TENSOR
|
||||
argIndex: 2
|
||||
}
|
||||
}
|
||||
inputFrameworkOpName: "StatelessRandomUniform"
|
||||
}
|
||||
}
|
||||
mappings {
|
||||
frameworkName: "tensorflow"
|
||||
|
@ -1721,16 +1628,6 @@ mappings {
|
|||
ruleType: "tensor"
|
||||
inputFrameworkOpName: "Squeeze"
|
||||
}
|
||||
rule {
|
||||
ruleName: "listnumbertondarray"
|
||||
functionName: "listnumbertondarray"
|
||||
inputToOutput {
|
||||
key: "a"
|
||||
value: "squeeze_dims"
|
||||
}
|
||||
ruleType: "attribute"
|
||||
inputFrameworkOpName: "Squeeze"
|
||||
}
|
||||
rule {
|
||||
ruleName: "listnumbertolistnumber"
|
||||
functionName: "listnumbertolistnumber"
|
||||
|
@ -1787,8 +1684,10 @@ mappings {
|
|||
functionName: "ndarraymapping"
|
||||
inputTensorName: "data"
|
||||
inputTensorName: "segment_ids"
|
||||
inputTensorName: "num_segments"
|
||||
outputTensorName: "input"
|
||||
outputTensorName: "idxSegments"
|
||||
outputTensorName: "numSegments"
|
||||
inputToOutput {
|
||||
key: "input"
|
||||
value: "data"
|
||||
|
@ -1797,13 +1696,16 @@ mappings {
|
|||
key: "idxSegments"
|
||||
value: "segment_ids"
|
||||
}
|
||||
inputToOutput {
|
||||
key: "numSegments"
|
||||
value: "num_segments"
|
||||
}
|
||||
ruleType: "tensor"
|
||||
inputFrameworkOpName: "UnsortedSegmentProd"
|
||||
}
|
||||
rule {
|
||||
ruleName: "ndarrayinputtonumericalattribute"
|
||||
functionName: "ndarrayinputtonumericalattribute"
|
||||
outputIntName: "numSegments"
|
||||
inputToOutput {
|
||||
key: "numSegments"
|
||||
value: "num_segments"
|
||||
|
@ -6547,6 +6449,36 @@ mappings {
|
|||
ruleType: "tensor"
|
||||
inputFrameworkOpName: "RandomUniform"
|
||||
}
|
||||
rule {
|
||||
ruleName: "argdescriptorconstant"
|
||||
functionName: "argdescriptorconstant"
|
||||
inputFloatName: "max"
|
||||
ruleType: "attribute"
|
||||
transformerArgs {
|
||||
key: "value"
|
||||
transformerArgs {
|
||||
name: "max"
|
||||
doubleValue: 1.0
|
||||
argType: DOUBLE
|
||||
argIndex: 1
|
||||
}
|
||||
}
|
||||
inputFrameworkOpName: "RandomUniform"
|
||||
}
|
||||
rule {
|
||||
ruleName: "argdescriptorconstant"
|
||||
functionName: "argdescriptorconstant"
|
||||
inputFloatName: "min"
|
||||
ruleType: "attribute"
|
||||
transformerArgs {
|
||||
key: "value"
|
||||
transformerArgs {
|
||||
name: "min"
|
||||
argType: DOUBLE
|
||||
}
|
||||
}
|
||||
inputFrameworkOpName: "RandomUniform"
|
||||
}
|
||||
rule {
|
||||
ruleName: "datatypetoint"
|
||||
functionName: "datatypetoint"
|
||||
|
@ -6562,149 +6494,21 @@ mappings {
|
|||
rule {
|
||||
ruleName: "valuemapping"
|
||||
functionName: "valuemapping"
|
||||
inputIntName: "seed"
|
||||
outputIntName: "seed"
|
||||
inputDataTypeName: "dtype"
|
||||
outputDataTypeName: "dataType"
|
||||
inputToOutput {
|
||||
key: "dataType"
|
||||
value: "dtype"
|
||||
}
|
||||
inputToOutput {
|
||||
key: "seed"
|
||||
value: "seed"
|
||||
}
|
||||
ruleType: "attribute"
|
||||
inputFrameworkOpName: "RandomUniform"
|
||||
}
|
||||
rule {
|
||||
ruleName: "argdescriptorconstant"
|
||||
functionName: "argdescriptorconstant"
|
||||
inputFloatName: "min"
|
||||
inputFloatName: "max"
|
||||
inputTensorName: "min"
|
||||
inputTensorName: "max"
|
||||
ruleType: "attribute"
|
||||
transformerArgs {
|
||||
key: "value"
|
||||
transformerArgs {
|
||||
name: "min"
|
||||
argType: DOUBLE
|
||||
}
|
||||
transformerArgs {
|
||||
name: "max"
|
||||
doubleValue: 1.0
|
||||
argType: DOUBLE
|
||||
argIndex: 1
|
||||
}
|
||||
transformerArgs {
|
||||
name: "min"
|
||||
inputValue {
|
||||
data_type: 11
|
||||
double_data: 1.0
|
||||
}
|
||||
argType: INPUT_TENSOR
|
||||
argIndex: 1
|
||||
}
|
||||
transformerArgs {
|
||||
name: "max"
|
||||
inputValue {
|
||||
data_type: 11
|
||||
double_data: 1.0
|
||||
}
|
||||
argType: INPUT_TENSOR
|
||||
argIndex: 2
|
||||
}
|
||||
}
|
||||
transformerArgs {
|
||||
key: "value"
|
||||
transformerArgs {
|
||||
name: "min"
|
||||
argType: DOUBLE
|
||||
}
|
||||
transformerArgs {
|
||||
name: "max"
|
||||
doubleValue: 1.0
|
||||
argType: DOUBLE
|
||||
argIndex: 1
|
||||
}
|
||||
transformerArgs {
|
||||
name: "min"
|
||||
inputValue {
|
||||
data_type: 11
|
||||
double_data: 1.0
|
||||
}
|
||||
argType: INPUT_TENSOR
|
||||
argIndex: 1
|
||||
}
|
||||
transformerArgs {
|
||||
name: "max"
|
||||
inputValue {
|
||||
data_type: 11
|
||||
double_data: 1.0
|
||||
}
|
||||
argType: INPUT_TENSOR
|
||||
argIndex: 2
|
||||
}
|
||||
}
|
||||
transformerArgs {
|
||||
key: "value"
|
||||
transformerArgs {
|
||||
name: "min"
|
||||
argType: DOUBLE
|
||||
}
|
||||
transformerArgs {
|
||||
name: "max"
|
||||
doubleValue: 1.0
|
||||
argType: DOUBLE
|
||||
argIndex: 1
|
||||
}
|
||||
transformerArgs {
|
||||
name: "min"
|
||||
inputValue {
|
||||
data_type: 11
|
||||
double_data: 1.0
|
||||
}
|
||||
argType: INPUT_TENSOR
|
||||
argIndex: 1
|
||||
}
|
||||
transformerArgs {
|
||||
name: "max"
|
||||
inputValue {
|
||||
data_type: 11
|
||||
double_data: 1.0
|
||||
}
|
||||
argType: INPUT_TENSOR
|
||||
argIndex: 2
|
||||
}
|
||||
}
|
||||
transformerArgs {
|
||||
key: "value"
|
||||
transformerArgs {
|
||||
name: "min"
|
||||
argType: DOUBLE
|
||||
}
|
||||
transformerArgs {
|
||||
name: "max"
|
||||
doubleValue: 1.0
|
||||
argType: DOUBLE
|
||||
argIndex: 1
|
||||
}
|
||||
transformerArgs {
|
||||
name: "min"
|
||||
inputValue {
|
||||
data_type: 11
|
||||
double_data: 1.0
|
||||
}
|
||||
argType: INPUT_TENSOR
|
||||
argIndex: 1
|
||||
}
|
||||
transformerArgs {
|
||||
name: "max"
|
||||
inputValue {
|
||||
data_type: 11
|
||||
double_data: 1.0
|
||||
}
|
||||
argType: INPUT_TENSOR
|
||||
argIndex: 2
|
||||
}
|
||||
}
|
||||
inputFrameworkOpName: "RandomUniform"
|
||||
}
|
||||
}
|
||||
mappings {
|
||||
frameworkName: "tensorflow"
|
||||
|
@ -6847,8 +6651,10 @@ mappings {
|
|||
functionName: "ndarraymapping"
|
||||
inputTensorName: "input"
|
||||
inputTensorName: "axis"
|
||||
inputTensorName: "shift"
|
||||
outputTensorName: "input"
|
||||
outputTensorName: "dimensions"
|
||||
outputTensorName: "shiftsI"
|
||||
inputToOutput {
|
||||
key: "input"
|
||||
value: "input"
|
||||
|
@ -6857,6 +6663,10 @@ mappings {
|
|||
key: "dimensions"
|
||||
value: "axis"
|
||||
}
|
||||
inputToOutput {
|
||||
key: "shiftsI"
|
||||
value: "shift"
|
||||
}
|
||||
ruleType: "tensor"
|
||||
inputFrameworkOpName: "Roll"
|
||||
}
|
||||
|
@ -6972,8 +6782,10 @@ mappings {
|
|||
functionName: "ndarraymapping"
|
||||
inputTensorName: "data"
|
||||
inputTensorName: "segment_ids"
|
||||
inputTensorName: "num_segments"
|
||||
outputTensorName: "input"
|
||||
outputTensorName: "idxSegments"
|
||||
outputTensorName: "numSegments"
|
||||
inputToOutput {
|
||||
key: "input"
|
||||
value: "data"
|
||||
|
@ -6982,13 +6794,16 @@ mappings {
|
|||
key: "idxSegments"
|
||||
value: "segment_ids"
|
||||
}
|
||||
inputToOutput {
|
||||
key: "numSegments"
|
||||
value: "num_segments"
|
||||
}
|
||||
ruleType: "tensor"
|
||||
inputFrameworkOpName: "UnsortedSegmentMin"
|
||||
}
|
||||
rule {
|
||||
ruleName: "ndarrayinputtonumericalattribute"
|
||||
functionName: "ndarrayinputtonumericalattribute"
|
||||
outputIntName: "numSegments"
|
||||
inputToOutput {
|
||||
key: "numSegments"
|
||||
value: "num_segments"
|
||||
|
@ -7239,7 +7054,6 @@ mappings {
|
|||
rule {
|
||||
ruleName: "ndarrayinputtonumericalattribute"
|
||||
functionName: "ndarrayinputtonumericalattribute"
|
||||
outputDoubleName: "start"
|
||||
outputDoubleName: "stop"
|
||||
inputToOutput {
|
||||
key: "start"
|
||||
|
@ -7255,8 +7069,8 @@ mappings {
|
|||
rule {
|
||||
ruleName: "valuemapping"
|
||||
functionName: "valuemapping"
|
||||
outputIntName: "dataType"
|
||||
inputDataTypeName: "T"
|
||||
outputDataTypeName: "dataType"
|
||||
inputToOutput {
|
||||
key: "dataType"
|
||||
value: "T"
|
||||
|
@ -7380,8 +7194,10 @@ mappings {
|
|||
functionName: "ndarraymapping"
|
||||
inputTensorName: "data"
|
||||
inputTensorName: "segment_ids"
|
||||
inputTensorName: "num_segments"
|
||||
outputTensorName: "input"
|
||||
outputTensorName: "idxSegments"
|
||||
outputTensorName: "numSegments"
|
||||
inputToOutput {
|
||||
key: "input"
|
||||
value: "data"
|
||||
|
@ -7390,13 +7206,16 @@ mappings {
|
|||
key: "idxSegments"
|
||||
value: "segment_ids"
|
||||
}
|
||||
inputToOutput {
|
||||
key: "numSegments"
|
||||
value: "num_segments"
|
||||
}
|
||||
ruleType: "tensor"
|
||||
inputFrameworkOpName: "UnsortedSegmentSum"
|
||||
}
|
||||
rule {
|
||||
ruleName: "ndarrayinputtonumericalattribute"
|
||||
functionName: "ndarrayinputtonumericalattribute"
|
||||
outputIntName: "numSegments"
|
||||
inputToOutput {
|
||||
key: "numSegments"
|
||||
value: "num_segments"
|
||||
|
@ -9065,7 +8884,7 @@ mappings {
|
|||
inputTensorName: "max_output_size"
|
||||
outputTensorName: "boxes"
|
||||
outputTensorName: "scales"
|
||||
outputTensorName: "iouThreshold"
|
||||
outputTensorName: "overlayThreshold"
|
||||
outputTensorName: "maxOutputSize"
|
||||
inputToOutput {
|
||||
key: "boxes"
|
||||
|
@ -9076,7 +8895,7 @@ mappings {
|
|||
value: "scores"
|
||||
}
|
||||
inputToOutput {
|
||||
key: "iouThreshold"
|
||||
key: "overlayThreshold"
|
||||
value: "iou_threshold"
|
||||
}
|
||||
inputToOutput {
|
||||
|
@ -9184,8 +9003,6 @@ mappings {
|
|||
rule {
|
||||
ruleName: "ndarrayinputtonumericalattribute"
|
||||
functionName: "ndarrayinputtonumericalattribute"
|
||||
outputDoubleName: "on"
|
||||
outputDoubleName: "off"
|
||||
inputToOutput {
|
||||
key: "on"
|
||||
value: "on_value"
|
||||
|
@ -9298,6 +9115,41 @@ mappings {
|
|||
inputFrameworkOpName: "Square"
|
||||
}
|
||||
}
|
||||
mappings {
|
||||
frameworkName: "tensorflow"
|
||||
opName: "compare_and_bitpack"
|
||||
inputFrameworkOpName: "CompareAndBitpack"
|
||||
rule {
|
||||
ruleName: "ndarraymapping"
|
||||
functionName: "ndarraymapping"
|
||||
inputTensorName: "input"
|
||||
inputTensorName: "threshold"
|
||||
outputTensorName: "input"
|
||||
outputTensorName: "y"
|
||||
inputToOutput {
|
||||
key: "input"
|
||||
value: "input"
|
||||
}
|
||||
inputToOutput {
|
||||
key: "y"
|
||||
value: "threshold"
|
||||
}
|
||||
ruleType: "tensor"
|
||||
inputFrameworkOpName: "CompareAndBitpack"
|
||||
}
|
||||
rule {
|
||||
ruleName: "valuemapping"
|
||||
functionName: "valuemapping"
|
||||
inputDataTypeName: "T"
|
||||
outputDataTypeName: "dtype"
|
||||
inputToOutput {
|
||||
key: "dtype"
|
||||
value: "T"
|
||||
}
|
||||
ruleType: "attribute"
|
||||
inputFrameworkOpName: "CompareAndBitpack"
|
||||
}
|
||||
}
|
||||
mappings {
|
||||
frameworkName: "tensorflow"
|
||||
opName: "segment_min"
|
||||
|
@ -9353,8 +9205,10 @@ mappings {
|
|||
functionName: "ndarraymapping"
|
||||
inputTensorName: "data"
|
||||
inputTensorName: "segment_ids"
|
||||
inputTensorName: "num_segments"
|
||||
outputTensorName: "input"
|
||||
outputTensorName: "idxSegments"
|
||||
outputTensorName: "numSegments"
|
||||
inputToOutput {
|
||||
key: "input"
|
||||
value: "data"
|
||||
|
@ -9363,13 +9217,16 @@ mappings {
|
|||
key: "idxSegments"
|
||||
value: "segment_ids"
|
||||
}
|
||||
inputToOutput {
|
||||
key: "numSegments"
|
||||
value: "num_segments"
|
||||
}
|
||||
ruleType: "tensor"
|
||||
inputFrameworkOpName: "UnsortedSegmentMax"
|
||||
}
|
||||
rule {
|
||||
ruleName: "ndarrayinputtonumericalattribute"
|
||||
functionName: "ndarrayinputtonumericalattribute"
|
||||
outputIntName: "numSegments"
|
||||
inputToOutput {
|
||||
key: "numSegments"
|
||||
value: "num_segments"
|
||||
|
@ -9429,13 +9286,13 @@ mappings {
|
|||
inputBooleanName: "align_corners"
|
||||
inputBooleanName: "half_pixel_centers"
|
||||
outputBooleanName: "alignCorners"
|
||||
outputBooleanName: "halfPixelCenter"
|
||||
outputBooleanName: "halfPixelCenters"
|
||||
inputToOutput {
|
||||
key: "alignCorners"
|
||||
value: "align_corners"
|
||||
}
|
||||
inputToOutput {
|
||||
key: "halfPixelCenter"
|
||||
key: "halfPixelCenters"
|
||||
value: "half_pixel_centers"
|
||||
}
|
||||
ruleType: "attribute"
|
||||
|
@ -9833,7 +9690,7 @@ mappings {
|
|||
functionName: "valuemapping"
|
||||
inputFloatName: "iou_threshold"
|
||||
inputToOutput {
|
||||
key: "iouThreshold"
|
||||
key: "overlayThreshold"
|
||||
value: "iou_threshold"
|
||||
}
|
||||
ruleType: "attribute"
|
||||
|
@ -10185,11 +10042,9 @@ mappings {
|
|||
inputTensorName: "weights"
|
||||
inputTensorName: "arr"
|
||||
inputTensorName: "size"
|
||||
inputTensorName: "size"
|
||||
outputTensorName: "weights"
|
||||
outputTensorName: "values"
|
||||
outputTensorName: "min"
|
||||
outputTensorName: "max"
|
||||
inputToOutput {
|
||||
key: "weights"
|
||||
value: "weights"
|
||||
|
@ -10202,38 +10057,9 @@ mappings {
|
|||
key: "min"
|
||||
value: "size"
|
||||
}
|
||||
inputToOutput {
|
||||
key: "max"
|
||||
value: "size"
|
||||
}
|
||||
ruleType: "tensor"
|
||||
inputFrameworkOpName: "Bincount"
|
||||
}
|
||||
rule {
|
||||
ruleName: "argdescriptorconstant"
|
||||
functionName: "argdescriptorconstant"
|
||||
inputIntName: "minLength"
|
||||
ruleType: "attribute"
|
||||
transformerArgs {
|
||||
key: "value"
|
||||
transformerArgs {
|
||||
name: "minLength"
|
||||
argType: INT64
|
||||
}
|
||||
}
|
||||
inputFrameworkOpName: "Bincount"
|
||||
}
|
||||
rule {
|
||||
ruleName: "ndarrayinputtonumericalattribute"
|
||||
functionName: "ndarrayinputtonumericalattribute"
|
||||
outputIntName: "maxLength"
|
||||
inputToOutput {
|
||||
key: "maxLength"
|
||||
value: "size"
|
||||
}
|
||||
ruleType: "attribute"
|
||||
inputFrameworkOpName: "Bincount"
|
||||
}
|
||||
rule {
|
||||
ruleName: "valuemapping"
|
||||
functionName: "valuemapping"
|
||||
|
@ -10246,14 +10072,6 @@ mappings {
|
|||
ruleType: "attribute"
|
||||
inputFrameworkOpName: "Bincount"
|
||||
}
|
||||
indexOverrides {
|
||||
key: 1
|
||||
value: 2
|
||||
}
|
||||
indexOverrides {
|
||||
key: 2
|
||||
value: 1
|
||||
}
|
||||
}
|
||||
mappings {
|
||||
frameworkName: "tensorflow"
|
||||
|
@ -10483,31 +10301,29 @@ mappings {
|
|||
ruleName: "ndarraymapping"
|
||||
functionName: "ndarraymapping"
|
||||
inputTensorName: "shape"
|
||||
inputTensorName: "minval"
|
||||
inputTensorName: "maxval"
|
||||
outputTensorName: "shape"
|
||||
outputTensorName: "min"
|
||||
outputTensorName: "max"
|
||||
inputToOutput {
|
||||
key: "shape"
|
||||
value: "shape"
|
||||
}
|
||||
inputToOutput {
|
||||
key: "min"
|
||||
value: "minval"
|
||||
}
|
||||
inputToOutput {
|
||||
key: "max"
|
||||
value: "maxval"
|
||||
}
|
||||
ruleType: "tensor"
|
||||
inputFrameworkOpName: "RandomUniformInt"
|
||||
}
|
||||
rule {
|
||||
ruleName: "valuemapping"
|
||||
functionName: "valuemapping"
|
||||
inputIntName: "seed"
|
||||
outputIntName: "seed"
|
||||
inputToOutput {
|
||||
key: "seed"
|
||||
value: "seed"
|
||||
}
|
||||
ruleType: "attribute"
|
||||
inputFrameworkOpName: "RandomUniformInt"
|
||||
}
|
||||
rule {
|
||||
ruleName: "ndarrayinputtonumericalattribute"
|
||||
functionName: "ndarrayinputtonumericalattribute"
|
||||
outputDoubleName: "min"
|
||||
outputDoubleName: "max"
|
||||
inputToOutput {
|
||||
key: "min"
|
||||
value: "minval"
|
||||
|
@ -10822,14 +10638,8 @@ mappings {
|
|||
opName: "shapes_of"
|
||||
inputFrameworkOpName: "ShapeN"
|
||||
rule {
|
||||
ruleName: "ndarraymapping"
|
||||
functionName: "ndarraymapping"
|
||||
inputTensorName: "input"
|
||||
outputTensorName: "input"
|
||||
inputToOutput {
|
||||
key: "input"
|
||||
value: "input"
|
||||
}
|
||||
ruleName: "passthrough"
|
||||
functionName: "passthrough"
|
||||
ruleType: "tensor"
|
||||
inputFrameworkOpName: "ShapeN"
|
||||
}
|
||||
|
@ -10943,8 +10753,10 @@ mappings {
|
|||
functionName: "ndarraymapping"
|
||||
inputTensorName: "input_sizes"
|
||||
inputTensorName: "filter"
|
||||
inputTensorName: "out_backprop"
|
||||
outputTensorName: "gradIShape"
|
||||
outputTensorName: "weights"
|
||||
outputTensorName: "gradO"
|
||||
inputToOutput {
|
||||
key: "gradIShape"
|
||||
value: "input_sizes"
|
||||
|
@ -10953,6 +10765,10 @@ mappings {
|
|||
key: "weights"
|
||||
value: "filter"
|
||||
}
|
||||
inputToOutput {
|
||||
key: "gradO"
|
||||
value: "out_backprop"
|
||||
}
|
||||
ruleType: "tensor"
|
||||
inputFrameworkOpName: "Conv2DBackpropInput"
|
||||
}
|
||||
|
@ -11629,6 +11445,18 @@ mappings {
|
|||
}
|
||||
inputFrameworkOpName: "CopyHost"
|
||||
}
|
||||
rule {
|
||||
ruleName: "valuemapping"
|
||||
functionName: "valuemapping"
|
||||
inputDataTypeName: "T"
|
||||
outputDataTypeName: "dataType"
|
||||
inputToOutput {
|
||||
key: "dataType"
|
||||
value: "T"
|
||||
}
|
||||
ruleType: "attribute"
|
||||
inputFrameworkOpName: "CopyHost"
|
||||
}
|
||||
}
|
||||
mappings {
|
||||
frameworkName: "tensorflow"
|
||||
|
@ -12011,11 +11839,17 @@ mappings {
|
|||
ruleName: "ndarraymapping"
|
||||
functionName: "ndarraymapping"
|
||||
inputTensorName: "dims"
|
||||
outputTensorName: "shapeArray"
|
||||
inputTensorName: "value"
|
||||
outputTensorName: "shape"
|
||||
outputTensorName: "outputs"
|
||||
inputToOutput {
|
||||
key: "shapeArray"
|
||||
key: "shape"
|
||||
value: "dims"
|
||||
}
|
||||
inputToOutput {
|
||||
key: "outputs"
|
||||
value: "value"
|
||||
}
|
||||
ruleType: "tensor"
|
||||
inputFrameworkOpName: "Fill"
|
||||
}
|
||||
|
@ -12030,18 +11864,6 @@ mappings {
|
|||
ruleType: "attribute"
|
||||
inputFrameworkOpName: "Fill"
|
||||
}
|
||||
rule {
|
||||
ruleName: "datatypetoint"
|
||||
functionName: "datatypetoint"
|
||||
outputIntName: "dtype"
|
||||
inputDataTypeName: "T"
|
||||
inputToOutput {
|
||||
key: "dtype"
|
||||
value: "T"
|
||||
}
|
||||
ruleType: "attribute"
|
||||
inputFrameworkOpName: "Fill"
|
||||
}
|
||||
rule {
|
||||
ruleName: "valuemapping"
|
||||
functionName: "valuemapping"
|
||||
|
@ -12306,11 +12128,11 @@ mappings {
|
|||
rule {
|
||||
ruleName: "valuemapping"
|
||||
functionName: "valuemapping"
|
||||
inputDataTypeName: "T"
|
||||
inputDataTypeName: "Targmax"
|
||||
outputDataTypeName: "dtype"
|
||||
inputToOutput {
|
||||
key: "dtype"
|
||||
value: "T"
|
||||
value: "Targmax"
|
||||
}
|
||||
ruleType: "attribute"
|
||||
inputFrameworkOpName: "MaxPoolWithArgmax"
|
||||
|
@ -13288,14 +13110,8 @@ mappings {
|
|||
opName: "identity_n"
|
||||
inputFrameworkOpName: "IdentityN"
|
||||
rule {
|
||||
ruleName: "ndarraymapping"
|
||||
functionName: "ndarraymapping"
|
||||
inputTensorName: "input"
|
||||
outputTensorName: "input"
|
||||
inputToOutput {
|
||||
key: "input"
|
||||
value: "input"
|
||||
}
|
||||
ruleName: "passthrough"
|
||||
functionName: "passthrough"
|
||||
ruleType: "tensor"
|
||||
inputFrameworkOpName: "IdentityN"
|
||||
}
|
||||
|
@ -13379,9 +13195,6 @@ mappings {
|
|||
rule {
|
||||
ruleName: "ndarrayinputtonumericalattribute"
|
||||
functionName: "ndarrayinputtonumericalattribute"
|
||||
outputDoubleName: "from"
|
||||
outputDoubleName: "to"
|
||||
outputDoubleName: "step"
|
||||
inputToOutput {
|
||||
key: "from"
|
||||
value: "start"
|
||||
|
@ -14760,20 +14573,8 @@ mappings {
|
|||
opName: "concat"
|
||||
inputFrameworkOpName: "ConcatV2"
|
||||
rule {
|
||||
ruleName: "multiinputindex"
|
||||
functionName: "multiinputindex"
|
||||
inputTensorName: "values"
|
||||
inputTensorName: "axis"
|
||||
outputTensorName: "input"
|
||||
outputTensorName: "concatDimension"
|
||||
inputToOutput {
|
||||
key: "input"
|
||||
value: "values"
|
||||
}
|
||||
inputToOutput {
|
||||
key: "concatDimension"
|
||||
value: "axis"
|
||||
}
|
||||
ruleName: "passthrough"
|
||||
functionName: "passthrough"
|
||||
ruleType: "tensor"
|
||||
inputFrameworkOpName: "ConcatV2"
|
||||
}
|
||||
|
@ -15641,6 +15442,18 @@ mappings {
|
|||
}
|
||||
inputFrameworkOpName: "DeepCopy"
|
||||
}
|
||||
rule {
|
||||
ruleName: "valuemapping"
|
||||
functionName: "valuemapping"
|
||||
inputDataTypeName: "T"
|
||||
outputDataTypeName: "dataType"
|
||||
inputToOutput {
|
||||
key: "dataType"
|
||||
value: "T"
|
||||
}
|
||||
ruleType: "attribute"
|
||||
inputFrameworkOpName: "DeepCopy"
|
||||
}
|
||||
}
|
||||
mappings {
|
||||
frameworkName: "tensorflow"
|
||||
|
|
Loading…
Reference in New Issue