93 lines
3.0 KiB
Java

/*
* ******************************************************************************
* *
* *
* * This program and the accompanying materials are made available under the
* * terms of the Apache License, Version 2.0 which is available at
* * https://www.apache.org/licenses/LICENSE-2.0.
* *
* * See the NOTICE file distributed with this work for additional
* * information regarding copyright ownership.
* * Unless required by applicable law or agreed to in writing, software
* * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* * License for the specific language governing permissions and limitations
* * under the License.
* *
* * SPDX-License-Identifier: Apache-2.0
* *****************************************************************************
*/
package org.deeplearning4j.nn.api;
import java.util.Map;
import org.nd4j.linalg.api.ndarray.INDArray;
public interface ITrainableLayer {
Map<String, INDArray> getParamTable();
Map<String, INDArray> getParamTable(boolean isBackprop);
void setParamTable(Map<String, INDArray> paramTable);
/**
* @return Training configuration
*/
ITraininableLayerConfiguration getTrainingConfig();
/**
* @return Number of parameters
*/
long numParams();
/**
* @return 1d parameter vector
*/
INDArray getParams();
/**
* The param table
*
* @return
Map<String, INDArray> getParamTable();
*/
/**
* Table of parameters by key, for backprop. For many models (dense layers, etc) - all parameters
* are backprop parameters
*
* @param backpropParamsOnly If true, return backprop params only. If false: return all params
* (equivalent to paramsTable())
Map<String, INDArray> getParamTable(boolean backpropParamsOnly);
*/
/**
* Setter for the param table
*
* @param paramTable
void setParamTable(Map<String, INDArray> paramTable);
*/
/**
* DL4J layers typically produce the sum of the gradients during the backward pass for each layer, and if required
* (if minibatch=true) then divide by the minibatch size.<br>
* However, there are some exceptions, such as the batch norm mean/variance estimate parameters: these "gradients"
* are actually not gradients, but are updates to be applied directly to the parameter vector. Put another way,
* most gradients should be divided by the minibatch to get the average; some "gradients" are actually final updates
* already, and should not be divided by the minibatch size.
*
* @param paramName Name of the parameter
* @return True if gradients should be divided by minibatch (most params); false otherwise (edge cases like batch norm mean/variance estimates)
*/
boolean updaterDivideByMinibatch(String paramName);
/**
* @return 1D gradients view array
*/
INDArray getGradientsViewArray();
}