commit
552c5d0b72
|
@ -7,5 +7,5 @@ if test "$#" -eq 0; then
|
|||
echo "Usage example 2 (all namespaces): ./generate.sh all"
|
||||
else
|
||||
mvn clean package -DskipTests
|
||||
java -cp target/codegen-1.0.0-SNAPSHOT-shaded.jar org.nd4j.codegen.cli.CLI -dir ../../ -namespaces "$@"
|
||||
java -cp target/codegen-1.0.0-SNAPSHOT-shaded.jar org.nd4j.codegen.cli.CLI -dir ../../../ -namespaces "$@"
|
||||
fi
|
|
@ -102,7 +102,7 @@
|
|||
<dependency>
|
||||
<groupId>org.nd4j</groupId>
|
||||
<artifactId>nd4j-api</artifactId>
|
||||
<version>1.0.0-SNAPSHOT</version>
|
||||
<version>${project.version}</version>
|
||||
</dependency>
|
||||
</dependencies>
|
||||
|
||||
|
|
|
@ -29,7 +29,6 @@ import org.nd4j.codegen.api.doc.DocScope
|
|||
import org.nd4j.codegen.dsl.*
|
||||
import org.nd4j.codegen.api.DataType.*
|
||||
import org.nd4j.codegen.mixins.*
|
||||
import org.nd4j.linalg.api.buffer.DataType
|
||||
import java.lang.Boolean.FALSE
|
||||
|
||||
fun SDBaseOps() = Namespace("BaseOps"){
|
||||
|
@ -773,6 +772,19 @@ fun SDBaseOps() = Namespace("BaseOps"){
|
|||
useMixin(keepDimsDoc)
|
||||
}
|
||||
|
||||
Op("split") {
|
||||
javaPackage = "org.nd4j.linalg.api.ops.impl.shape"
|
||||
javaOpClass = "Split"
|
||||
Input(NUMERIC,"input") {description = "Input to split"}
|
||||
Arg(INT, "numSplit") { description = "Number of splits" }
|
||||
Arg(INT, "splitDim") { description = "The dimension to split on" }
|
||||
Doc(Language.ANY, DocScope.ALL){
|
||||
"""
|
||||
Split a value in to a list of ndarrays.
|
||||
""".trimIndent()
|
||||
}
|
||||
}
|
||||
|
||||
Op("oneHot") {
|
||||
javaPackage = "org.nd4j.linalg.api.ops.impl.shape"
|
||||
Input(NUMERIC, "indices") { description = "Indices - value 0 to depth-1" }
|
||||
|
@ -780,7 +792,7 @@ fun SDBaseOps() = Namespace("BaseOps"){
|
|||
Arg(INT, "axis") { description = "" }
|
||||
Arg(NUMERIC, "on") { description = "" }
|
||||
Arg(NUMERIC, "off") { description = "" }
|
||||
Arg(DATA_TYPE, "dataType") { description = "Output data type"; defaultValue = DataType.FLOAT }
|
||||
Arg(DATA_TYPE, "dataType") { description = "Output data type"; defaultValue = org.nd4j.linalg.api.buffer.DataType.FLOAT }
|
||||
Output(NUMERIC, "output"){ description = "Output variable" }
|
||||
|
||||
Doc(Language.ANY, DocScope.ALL){
|
||||
|
|
|
@ -77,7 +77,6 @@ import static org.junit.Assert.*;
|
|||
* @author dave@skymind.io, Max Pumperla
|
||||
*/
|
||||
@Slf4j
|
||||
@Ignore
|
||||
public class KerasModelEndToEndTest extends BaseDL4JTest {
|
||||
private static final String GROUP_ATTR_INPUTS = "inputs";
|
||||
private static final String GROUP_ATTR_OUTPUTS = "outputs";
|
||||
|
|
|
@ -197,7 +197,6 @@
|
|||
<directory>deeplearning4j-modelimport</directory>
|
||||
<directory>deeplearning4j-modelexport-solr</directory>
|
||||
<directory>deeplearning4j-zoo</directory>
|
||||
<directory>deeplearning4j-nearestneighbors-parent</directory>
|
||||
</directories>
|
||||
</configuration>
|
||||
</plugin>
|
||||
|
|
|
@ -3799,6 +3799,32 @@ public class SDBaseOps {
|
|||
return sd.updateVariableNameAndReference(out, name);
|
||||
}
|
||||
|
||||
/**
|
||||
* Split a value in to a list of ndarrays.<br>
|
||||
*
|
||||
* @param input Input to split (NUMERIC type)
|
||||
* @param numSplit Number of splits
|
||||
* @param splitDim The dimension to split on
|
||||
*/
|
||||
public SDVariable[] split(SDVariable input, int numSplit, int splitDim) {
|
||||
SDValidation.validateNumerical("split", "input", input);
|
||||
return new org.nd4j.linalg.api.ops.impl.shape.Split(sd,input, numSplit, splitDim).outputVariables();
|
||||
}
|
||||
|
||||
/**
|
||||
* Split a value in to a list of ndarrays.<br>
|
||||
*
|
||||
* @param names names May be null. Arrays of names for the output variables.
|
||||
* @param input Input to split (NUMERIC type)
|
||||
* @param numSplit Number of splits
|
||||
* @param splitDim The dimension to split on
|
||||
*/
|
||||
public SDVariable[] split(String[] names, SDVariable input, int numSplit, int splitDim) {
|
||||
SDValidation.validateNumerical("split", "input", input);
|
||||
SDVariable[] out = new org.nd4j.linalg.api.ops.impl.shape.Split(sd,input, numSplit, splitDim).outputVariables();
|
||||
return sd.updateVariableNamesAndReferences(out, names);
|
||||
}
|
||||
|
||||
/**
|
||||
* Squared L2 norm: see norm2(String, SDVariable, boolean, int...)<br>
|
||||
*
|
||||
|
|
|
@ -5411,14 +5411,12 @@ public final class TensorNamespace {
|
|||
* Serializations can either use one of the fields above, or use this
|
||||
* raw bytes field. The only exception is the string case, where one is
|
||||
* required to store the content in the repeated bytes string_data field.
|
||||
*
|
||||
* When this raw_data field is used to store tensor value, elements MUST
|
||||
* be stored in as fixed-width, little-endian order.
|
||||
* Floating-point data types MUST be stored in IEEE 754 format.
|
||||
* Complex64 elements must be written as two consecutive FLOAT values, real component first.
|
||||
* Complex128 elements must be written as two consecutive DOUBLE values, real component first.
|
||||
* Boolean type MUST be written one byte per tensor element (00000001 for true, 00000000 for false).
|
||||
*
|
||||
* Note: the advantage of specific field rather than the raw_data field is
|
||||
* that in some cases (e.g. int data), protobuf does a better packing via
|
||||
* variable length storage, and may lead to smaller binary footprint.
|
||||
|
@ -5657,7 +5655,6 @@ public final class TensorNamespace {
|
|||
/**
|
||||
* <pre>
|
||||
* Tensors
|
||||
*
|
||||
* A serialized tensor value.
|
||||
* </pre>
|
||||
*
|
||||
|
@ -7013,14 +7010,12 @@ public final class TensorNamespace {
|
|||
* Serializations can either use one of the fields above, or use this
|
||||
* raw bytes field. The only exception is the string case, where one is
|
||||
* required to store the content in the repeated bytes string_data field.
|
||||
*
|
||||
* When this raw_data field is used to store tensor value, elements MUST
|
||||
* be stored in as fixed-width, little-endian order.
|
||||
* Floating-point data types MUST be stored in IEEE 754 format.
|
||||
* Complex64 elements must be written as two consecutive FLOAT values, real component first.
|
||||
* Complex128 elements must be written as two consecutive DOUBLE values, real component first.
|
||||
* Boolean type MUST be written one byte per tensor element (00000001 for true, 00000000 for false).
|
||||
*
|
||||
* Note: the advantage of specific field rather than the raw_data field is
|
||||
* that in some cases (e.g. int data), protobuf does a better packing via
|
||||
* variable length storage, and may lead to smaller binary footprint.
|
||||
|
@ -7771,7 +7766,6 @@ public final class TensorNamespace {
|
|||
/**
|
||||
* <pre>
|
||||
* Tensors
|
||||
*
|
||||
* A serialized tensor value.
|
||||
* </pre>
|
||||
*
|
||||
|
@ -9086,14 +9080,12 @@ public final class TensorNamespace {
|
|||
* Serializations can either use one of the fields above, or use this
|
||||
* raw bytes field. The only exception is the string case, where one is
|
||||
* required to store the content in the repeated bytes string_data field.
|
||||
*
|
||||
* When this raw_data field is used to store tensor value, elements MUST
|
||||
* be stored in as fixed-width, little-endian order.
|
||||
* Floating-point data types MUST be stored in IEEE 754 format.
|
||||
* Complex64 elements must be written as two consecutive FLOAT values, real component first.
|
||||
* Complex128 elements must be written as two consecutive DOUBLE values, real component first.
|
||||
* Boolean type MUST be written one byte per tensor element (00000001 for true, 00000000 for false).
|
||||
*
|
||||
* Note: the advantage of specific field rather than the raw_data field is
|
||||
* that in some cases (e.g. int data), protobuf does a better packing via
|
||||
* variable length storage, and may lead to smaller binary footprint.
|
||||
|
@ -9110,14 +9102,12 @@ public final class TensorNamespace {
|
|||
* Serializations can either use one of the fields above, or use this
|
||||
* raw bytes field. The only exception is the string case, where one is
|
||||
* required to store the content in the repeated bytes string_data field.
|
||||
*
|
||||
* When this raw_data field is used to store tensor value, elements MUST
|
||||
* be stored in as fixed-width, little-endian order.
|
||||
* Floating-point data types MUST be stored in IEEE 754 format.
|
||||
* Complex64 elements must be written as two consecutive FLOAT values, real component first.
|
||||
* Complex128 elements must be written as two consecutive DOUBLE values, real component first.
|
||||
* Boolean type MUST be written one byte per tensor element (00000001 for true, 00000000 for false).
|
||||
*
|
||||
* Note: the advantage of specific field rather than the raw_data field is
|
||||
* that in some cases (e.g. int data), protobuf does a better packing via
|
||||
* variable length storage, and may lead to smaller binary footprint.
|
||||
|
@ -9140,14 +9130,12 @@ public final class TensorNamespace {
|
|||
* Serializations can either use one of the fields above, or use this
|
||||
* raw bytes field. The only exception is the string case, where one is
|
||||
* required to store the content in the repeated bytes string_data field.
|
||||
*
|
||||
* When this raw_data field is used to store tensor value, elements MUST
|
||||
* be stored in as fixed-width, little-endian order.
|
||||
* Floating-point data types MUST be stored in IEEE 754 format.
|
||||
* Complex64 elements must be written as two consecutive FLOAT values, real component first.
|
||||
* Complex128 elements must be written as two consecutive DOUBLE values, real component first.
|
||||
* Boolean type MUST be written one byte per tensor element (00000001 for true, 00000000 for false).
|
||||
*
|
||||
* Note: the advantage of specific field rather than the raw_data field is
|
||||
* that in some cases (e.g. int data), protobuf does a better packing via
|
||||
* variable length storage, and may lead to smaller binary footprint.
|
||||
|
|
|
@ -58,6 +58,11 @@ public class Split extends DynamicCustomOp {
|
|||
super(null, new INDArray[]{in}, wrapOrNull(out), null, (List<Integer>)null);
|
||||
}
|
||||
|
||||
public Split(INDArray input, int numSplit, int splitDim) {
|
||||
super(null,input,null,Collections.emptyList(),new int[0]);
|
||||
addIArgument(numSplit,splitDim);
|
||||
}
|
||||
|
||||
|
||||
@Override
|
||||
public String opName() {
|
||||
|
|
|
@ -1800,6 +1800,18 @@ public class NDBase {
|
|||
return Nd4j.exec(new org.nd4j.linalg.api.ops.impl.shape.Slice(input, begin, size))[0];
|
||||
}
|
||||
|
||||
/**
|
||||
* Split a value in to a list of ndarrays.<br>
|
||||
*
|
||||
* @param input Input to split (NUMERIC type)
|
||||
* @param numSplit Number of splits
|
||||
* @param splitDim The dimension to split on
|
||||
*/
|
||||
public INDArray[] split(INDArray input, int numSplit, int splitDim) {
|
||||
NDValidation.validateNumerical("split", "input", input);
|
||||
return Nd4j.exec(new org.nd4j.linalg.api.ops.impl.shape.Split(input, numSplit, splitDim));
|
||||
}
|
||||
|
||||
/**
|
||||
* Squared L2 norm: see norm2(String, SDVariable, boolean, int...)<br>
|
||||
*
|
||||
|
|
|
@ -25870,6 +25870,61 @@ public static final double TAD_THRESHOLD = TAD_THRESHOLD();
|
|||
}
|
||||
// #endif
|
||||
|
||||
/**
|
||||
* Implementation of CTC loss function
|
||||
*
|
||||
* Input arrays:
|
||||
* 0: labels - labels NDArray {BATCH_LEN, MAX_TARGET_LEN}, type integer
|
||||
* 1: logits - logits NDArray {BATCH_LEN, FRAME_LEN, CLASS_LEN }. log softmax of rnn output. It should include a blank label as well, type float
|
||||
* 2: targetLabelLengths - Length of label sequence in labels NDArray {BATCH_LEN}, type integer
|
||||
* 3: logitsLengths - Length of input sequence in logits NDArray {BATCH_LEN}, type integer
|
||||
*
|
||||
*
|
||||
* Input integer arguments:
|
||||
* 0: blank index - index of the blank label in logits
|
||||
*
|
||||
* Output array:
|
||||
* 0: loss values, type float. NDArray {BATCH_LEN} negative log probabilities of loss
|
||||
*/
|
||||
// #if NOT_EXCLUDED(OP_ctc_loss)
|
||||
@Namespace("sd::ops") public static class ctc_loss extends DeclarableCustomOp {
|
||||
static { Loader.load(); }
|
||||
/** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
|
||||
public ctc_loss(Pointer p) { super(p); }
|
||||
/** Native array allocator. Access with {@link Pointer#position(long)}. */
|
||||
public ctc_loss(long size) { super((Pointer)null); allocateArray(size); }
|
||||
private native void allocateArray(long size);
|
||||
@Override public ctc_loss position(long position) {
|
||||
return (ctc_loss)super.position(position);
|
||||
}
|
||||
@Override public ctc_loss getPointer(long i) {
|
||||
return new ctc_loss((Pointer)this).position(position + i);
|
||||
}
|
||||
|
||||
public ctc_loss() { super((Pointer)null); allocate(); }
|
||||
private native void allocate();
|
||||
public native ShapeList calculateOutputShape(ShapeList inputShape, @ByRef Context block);
|
||||
}
|
||||
@Namespace("sd::ops") public static class ctc_loss_grad extends DeclarableCustomOp {
|
||||
static { Loader.load(); }
|
||||
/** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
|
||||
public ctc_loss_grad(Pointer p) { super(p); }
|
||||
/** Native array allocator. Access with {@link Pointer#position(long)}. */
|
||||
public ctc_loss_grad(long size) { super((Pointer)null); allocateArray(size); }
|
||||
private native void allocateArray(long size);
|
||||
@Override public ctc_loss_grad position(long position) {
|
||||
return (ctc_loss_grad)super.position(position);
|
||||
}
|
||||
@Override public ctc_loss_grad getPointer(long i) {
|
||||
return new ctc_loss_grad((Pointer)this).position(position + i);
|
||||
}
|
||||
|
||||
public ctc_loss_grad() { super((Pointer)null); allocate(); }
|
||||
private native void allocate();
|
||||
public native ShapeList calculateOutputShape(ShapeList inputShape, @ByRef Context block);
|
||||
}
|
||||
// #endif
|
||||
|
||||
|
||||
|
||||
|
||||
|
|
|
@ -2123,7 +2123,7 @@ public class TransformOpValidation extends BaseOpValidation {
|
|||
//TODO: Methods failed ResizeLanczos5, ResizeMitchelcubic, ResizeArea
|
||||
|
||||
for (ImageResizeMethod method : ImageResizeMethod.values()) {
|
||||
if (method==ImageResizeMethod.ResizeLanczos5 || method==ImageResizeMethod.ResizeArea || method==ImageResizeMethod.ResizeMitchellcubic)
|
||||
if (method==ImageResizeMethod.ResizeLanczos5 || method==ImageResizeMethod.ResizeArea || method == ImageResizeMethod.ResizeMitchelcubic)
|
||||
{continue;}
|
||||
|
||||
log.info("Trying {}", method);
|
||||
|
|
|
@ -69,7 +69,7 @@ class GroupConvPreProcessingRule: PreImportHook {
|
|||
val listOfFunctions = ArrayList<DifferentialFunction>()
|
||||
val weights = sd.getVariable(op.inputsToOp[1])
|
||||
//for onnx, this is the number of ops
|
||||
val split = sd.split(op.name + "_split",weights,numSizeSplits.toInt(),1)
|
||||
val split = sd.split(listOf(op.name + "_split").toTypedArray(),weights,numSizeSplits.toInt(),1)
|
||||
val resultMap = HashMap<String,List<SDVariable>>()
|
||||
/**
|
||||
* NOTE: Need to look in to how to wire up inputs and outputs properly.
|
||||
|
|
Loading…
Reference in New Issue