TensorRT  7.2.1.6
NVIDIA TensorRT
Looking for a C++ dev who knows TensorRT?
I'm looking for work. Hire me!
nvinfer1::INetworkDefinition Class Referenceabstract

A network definition for input to the builder. More...

Collaboration diagram for nvinfer1::INetworkDefinition:

Public Member Functions

virtual ITensoraddInput (const char *name, DataType type, Dims dimensions)=0
 Add an input tensor to the network. More...
 
virtual void markOutput (ITensor &tensor)=0
 Mark a tensor as a network output. More...
 
 __attribute__ ((deprecated)) virtual IConvolutionLayer *addConvolution(ITensor &input
 Add a convolution layer to the network. More...
 
virtual IFullyConnectedLayeraddFullyConnected (ITensor &input, int32_t nbOutputs, Weights kernelWeights, Weights biasWeights)=0
 Add a fully connected layer to the network. More...
 
virtual IActivationLayeraddActivation (ITensor &input, ActivationType type)=0
 Add an activation layer to the network. More...
 
 __attribute__ ((deprecated)) virtual IPoolingLayer *addPooling(ITensor &input
 Add a pooling layer to the network. More...
 
virtual ILRNLayeraddLRN (ITensor &input, int32_t window, float alpha, float beta, float k)=0
 Add a LRN layer to the network. More...
 
virtual IScaleLayeraddScale (ITensor &input, ScaleMode mode, Weights shift, Weights scale, Weights power)=0
 Add a Scale layer to the network. More...
 
virtual ISoftMaxLayeraddSoftMax (ITensor &input)=0
 Add a SoftMax layer to the network. More...
 
virtual IConcatenationLayeraddConcatenation (ITensor *const *inputs, int32_t nbInputs)=0
 Add a concatenation layer to the network. More...
 
 __attribute__ ((deprecated)) virtual IDeconvolutionLayer *addDeconvolution(ITensor &input
 Add a deconvolution layer to the network. More...
 
virtual IElementWiseLayeraddElementWise (ITensor &input1, ITensor &input2, ElementWiseOperation op)=0
 Add an elementwise layer to the network. More...
 
 __attribute__ ((deprecated)) virtual IRNNLayer *addRNN(ITensor &inputs
 Add an layerCount deep RNN layer to the network with a sequence length of maxSeqLen and hiddenSize internal state per layer. More...
 
 __attribute__ ((deprecated)) virtual IPluginLayer *addPlugin(ITensor *const *inputs
 Add a plugin layer to the network. More...
 
virtual IUnaryLayeraddUnary (ITensor &input, UnaryOperation operation)=0
 Add a unary layer to the network. More...
 
 __attribute__ ((deprecated)) virtual IPaddingLayer *addPadding(ITensor &input
 Add a padding layer to the network. More...
 
virtual IShuffleLayeraddShuffle (ITensor &input)=0
 Add a shuffle layer to the network. More...
 
 __attribute__ ((deprecated)) virtual void setPoolingOutputDimensionsFormula(IOutputDimensionsFormula *formula)=0
 Set the pooling output dimensions formula. More...
 
 __attribute__ ((deprecated)) virtual IOutputDimensionsFormula &getPoolingOutputDimensionsFormula() const =0
 Get the pooling output dimensions formula. More...
 
 __attribute__ ((deprecated)) virtual void setConvolutionOutputDimensionsFormula(IOutputDimensionsFormula *formula)=0
 Set the convolution output dimensions formula. More...
 
 __attribute__ ((deprecated)) virtual IOutputDimensionsFormula &getConvolutionOutputDimensionsFormula() const =0
 Get the convolution output dimensions formula. More...
 
 __attribute__ ((deprecated)) virtual void setDeconvolutionOutputDimensionsFormula(IOutputDimensionsFormula *formula)=0
 Set the deconvolution output dimensions formula. More...
 
 __attribute__ ((deprecated)) virtual IOutputDimensionsFormula &getDeconvolutionOutputDimensionsFormula() const =0
 Get the deconvolution output dimensions formula. More...
 
virtual int32_t getNbLayers () const =0
 Get the number of layers in the network. More...
 
virtual ILayergetLayer (int32_t index) const =0
 Get the layer specified by the given index. More...
 
virtual int32_t getNbInputs () const =0
 Get the number of inputs in the network. More...
 
virtual ITensorgetInput (int32_t index) const =0
 Get the input tensor specified by the given index. More...
 
virtual int32_t getNbOutputs () const =0
 Get the number of outputs in the network. More...
 
virtual ITensorgetOutput (int32_t index) const =0
 Get the output tensor specified by the given index. More...
 
virtual void destroy ()=0
 Destroy this INetworkDefinition object. More...
 
virtual IReduceLayeraddReduce (ITensor &input, ReduceOperation operation, uint32_t reduceAxes, bool keepDimensions)=0
 Add a reduce layer to the network. More...
 
virtual ITopKLayeraddTopK (ITensor &input, TopKOperation op, int32_t k, uint32_t reduceAxes)=0
 Add a TopK layer to the network. More...
 
virtual IGatherLayeraddGather (ITensor &data, ITensor &indices, int32_t axis)=0
 Add a gather layer to the network. More...
 
virtual IRaggedSoftMaxLayeraddRaggedSoftMax (ITensor &input, ITensor &bounds)=0
 Add a RaggedSoftMax layer to the network. More...
 
virtual IMatrixMultiplyLayeraddMatrixMultiply (ITensor &input0, MatrixOperation op0, ITensor &input1, MatrixOperation op1)=0
 Add a MatrixMultiply layer to the network. More...
 
 __attribute__ ((deprecated)) virtual IMatrixMultiplyLayer *addMatrixMultiply(ITensor &input0
 Add a MatrixMultiply layer to the network. More...
 
virtual IConstantLayeraddConstant (Dims dimensions, Weights weights)=0
 Add a constant layer to the network. More...
 
 __attribute__ ((deprecated)) virtual IRNNv2Layer *addRNNv2(ITensor &input
 Add an layerCount deep RNN layer to the network with hiddenSize internal states that can take a batch with fixed or variable sequence lengths. More...
 
 __attribute__ ((deprecated)) virtual IPluginLayer *addPluginExt(ITensor *const *inputs
 Add a plugin layer to the network using an IPluginExt interface. More...
 
virtual IIdentityLayeraddIdentity (ITensor &input)=0
 Add an identity layer. More...
 
virtual void removeTensor (ITensor &tensor)=0
 remove a tensor from the network definition. More...
 
virtual void unmarkOutput (ITensor &tensor)=0
 unmark a tensor as a network output. More...
 
virtual IPluginV2LayeraddPluginV2 (ITensor *const *inputs, int32_t nbInputs, IPluginV2 &plugin)=0
 Add a plugin layer to the network using the IPluginV2 interface. More...
 
virtual ISliceLayeraddSlice (ITensor &input, Dims start, Dims size, Dims stride)=0
 Add a slice layer to the network. More...
 
virtual void setName (const char *name)=0
 Sets the name of the network. More...
 
virtual const char * getName () const =0
 Returns the name associated with the network. More...
 
virtual IShapeLayeraddShape (ITensor &input)=0
 Add a shape layer to the network. More...
 
virtual bool hasImplicitBatchDimension () const =0
 Query whether the network was created with an implicit batch dimension. More...
 
virtual bool markOutputForShapes (ITensor &tensor)=0
 Enable tensor's value to be computed by IExecutionContext::getShapeBinding. More...
 
virtual bool unmarkOutputForShapes (ITensor &tensor)=0
 Undo markOutputForShapes. More...
 
virtual IParametricReLULayeraddParametricReLU (ITensor &input, ITensor &slope) noexcept=0
 Add a parametric ReLU layer to the network. More...
 
virtual IConvolutionLayeraddConvolutionNd (ITensor &input, int32_t nbOutputMaps, Dims kernelSize, Weights kernelWeights, Weights biasWeights)=0
 Add a multi-dimension convolution layer to the network. More...
 
virtual IPoolingLayeraddPoolingNd (ITensor &input, PoolingType type, Dims windowSize)=0
 Add a multi-dimension pooling layer to the network. More...
 
virtual IDeconvolutionLayeraddDeconvolutionNd (ITensor &input, int32_t nbOutputMaps, Dims kernelSize, Weights kernelWeights, Weights biasWeights)=0
 Add a multi-dimension deconvolution layer to the network. More...
 
virtual IScaleLayeraddScaleNd (ITensor &input, ScaleMode mode, Weights shift, Weights scale, Weights power, int32_t channelAxis)=0
 Add a multi-dimension scale layer to the network. More...
 
virtual IResizeLayeraddResize (ITensor &input)=0
 Add a resize layer to the network. More...
 
virtual bool hasExplicitPrecision () const =0
 True if network is an explicit precision network. More...
 
virtual ILoopaddLoop () noexcept=0
 Add a loop to the network. More...
 
virtual ISelectLayeraddSelect (ITensor &condition, ITensor &thenInput, ITensor &elseInput)=0
 Add a select layer to the network. More...
 
virtual IFillLayeraddFill (Dims dimensions, FillOperation op) noexcept=0
 Add a fill layer to the network. More...
 
virtual IPaddingLayeraddPaddingNd (ITensor &input, Dims prePadding, Dims postPadding)=0
 Add a padding layer to the network. More...
 

Public Attributes

int32_t nbOutputMaps
 
int32_t DimsHW kernelSize
 
int32_t DimsHW Weights kernelWeights
 
int32_t DimsHW Weights Weights biasWeights = 0
 
PoolingType type
 
PoolingType DimsHW windowSize = 0
 
int32_t layerCount
 
int32_t std::size_t hiddenSize
 
int32_t std::size_t int32_t maxSeqLen
 
int32_t std::size_t int32_t RNNOperation op
 
int32_t std::size_t int32_t RNNOperation RNNInputMode mode
 
int32_t std::size_t int32_t RNNOperation RNNInputMode RNNDirection dir
 
int32_t std::size_t int32_t RNNOperation RNNInputMode RNNDirection Weights weights
 
int32_t std::size_t int32_t RNNOperation RNNInputMode RNNDirection Weights Weights bias = 0
 
int32_t nbInputs
 
int32_t IPluginplugin = 0
 
DimsHW prePadding
 
DimsHW DimsHW postPadding = 0
 
bool transpose0
 
bool ITensorinput1
 
bool ITensor bool transpose1 = 0
 
int32_t int32_t hiddenSize
 
int32_t int32_t int32_t maxSeqLen
 
int32_t int32_t int32_t RNNOperation op = 0
 
int32_t IPluginExtplugin = 0
 

Protected Member Functions

virtual ~INetworkDefinition ()
 

Detailed Description

A network definition for input to the builder.

A network definition defines the structure of the network, and combined with a IBuilderConfig, is built into an engine using an IBuilder. An INetworkDefinition can either have an implicit batch dimensions, specified at runtime, or all dimensions explicit, full dims mode, in the network definition. When a network has been created using createNetwork(), only implicit batch size mode is supported. The function hasImplicitBatchSize() is used to query the mode of the network.

A network with implicit batch dimensions returns the dimensions of a layer without the implicit dimension, and instead the batch is specified at execute/enqueue time. If the network has all dimensions specified, then the first dimension follows elementwise broadcast rules: if it is 1 for some inputs and is some value N for all other inputs, then the first dimension of each outut is N, and the inputs with 1 for the first dimension are broadcast. Having divergent batch sizes across inputs to a layer is not supported.

Warning
Do not inherit from this class, as doing so will break forward-compatibility of the API and ABI.

Constructor & Destructor Documentation

◆ ~INetworkDefinition()

virtual nvinfer1::INetworkDefinition::~INetworkDefinition ( )
inlineprotectedvirtual

Member Function Documentation

◆ addInput()

virtual ITensor* nvinfer1::INetworkDefinition::addInput ( const char *  name,
DataType  type,
Dims  dimensions 
)
pure virtual

Add an input tensor to the network.

The name of the input tensor is used to find the index into the buffer array for an engine built from the network. The volume of the dimensions must be less than 2^30 elements. For networks with an implicit batch dimension, this volume includes the batch dimension with its length set to the maximum batch size. For networks with all explicit dimensions and with wildcard dimensions, the volume is based on the maxima specified by an IOptimizationProfile.Dimensions are normally non-negative integers. The exception is that in networks with all explicit dimensions, -1 can be used as a wildcard for a dimension to be specified at runtime. Input tensors with such a wildcard must have a corresponding entry in the IOptimizationProfiles indicating the permitted extrema, and the input dimensions must be set by IExecutionContext::setBindingDimensions. Different IExecutionContext instances can have different dimensions. Wildcard dimensions are only supported for EngineCapability::kDEFAULT. They are not supported in safety contexts. DLA does not support Wildcard dimensions.

Tensor dimensions are specified independent of format. For example, if a tensor is formatted in "NHWC" or a vectorized format, the dimensions are still specified in the order{N, C, H, W}. For 2D images with a channel dimension, the last three dimensions are always {C,H,W}. For 3D images with a channel dimension, the last four dimensions are always {C,D,H,W}.

Parameters
nameThe name of the tensor.
typeThe type of the data held in the tensor.
dimensionsThe dimensions of the tensor.
Warning
It is an error to specify a wildcard value on a dimension that is determined by trained parameters.
If run on DLA with explicit dimensions, only leading dimension can be a wildcard. And provided profile must have same minimum, optimum, and maximum dimensions.
See also
ITensor
Returns
The new tensor or nullptr if there is an error.
Here is the caller graph for this function:

◆ markOutput()

virtual void nvinfer1::INetworkDefinition::markOutput ( ITensor tensor)
pure virtual

Mark a tensor as a network output.

Parameters
tensorThe tensor to mark as an output tensor.
Warning
It is an error to mark a network input as an output.
Here is the caller graph for this function:

◆ __attribute__() [1/15]

nvinfer1::INetworkDefinition::__attribute__ ( (deprecated)  ) &

Add a convolution layer to the network.

Parameters
inputThe input tensor to the convolution.
nbOutputMapsThe number of output feature maps for the convolution.
kernelSizeThe HW-dimensions of the convolution kernel.
kernelWeightsThe kernel weights for the convolution.
biasWeightsThe optional bias weights for the convolution.
See also
IConvolutionLayer
Warning
It is an error to specify a wildcard value for the 'C' dimension of the input tensor.
Int32 tensors are not valid input tensors.
Returns
The new convolution layer, or nullptr if it could not be created.
Deprecated:
Superseded by addConvolutionNd and will be removed in TensorRT 9.0.

◆ addFullyConnected()

virtual IFullyConnectedLayer* nvinfer1::INetworkDefinition::addFullyConnected ( ITensor input,
int32_t  nbOutputs,
Weights  kernelWeights,
Weights  biasWeights 
)
pure virtual

Add a fully connected layer to the network.

Parameters
inputThe input tensor to the layer.
nbOutputsThe number of outputs of the layer.
kernelWeightsThe kernel weights for the fully connected layer.
biasWeightsThe optional bias weights for the fully connected layer.
See also
IFullyConnectedLayer
Warning
It is an error to specify a wildcard value for the 'C' dimension of the input tensor.
Int32 tensors are not valid input tensors.
Returns
The new fully connected layer, or nullptr if it could not be created.
Here is the caller graph for this function:

◆ addActivation()

virtual IActivationLayer* nvinfer1::INetworkDefinition::addActivation ( ITensor input,
ActivationType  type 
)
pure virtual

Add an activation layer to the network.

Parameters
inputThe input tensor to the layer.
typeThe type of activation function to apply.

Note that the setAlpha() and setBeta() methods must be used on the output for activations that require these parameters.

See also
IActivationLayer ActivationType
Warning
Int32 tensors are not valid input tensors.
Returns
The new activation layer, or nullptr if it could not be created.
Here is the caller graph for this function:

◆ __attribute__() [2/15]

nvinfer1::INetworkDefinition::__attribute__ ( (deprecated)  ) &

Add a pooling layer to the network.

Parameters
inputThe input tensor to the layer.
typeThe type of pooling to apply.
windowSizeThe size of the pooling window.
See also
IPoolingLayer PoolingType
Warning
Int32 tensors are not valid input tensors.
Returns
The new pooling layer, or nullptr if it could not be created.
Deprecated:
Superseded by addPoolingNd and will be removed in TensorRT 9.0.

◆ addLRN()

virtual ILRNLayer* nvinfer1::INetworkDefinition::addLRN ( ITensor input,
int32_t  window,
float  alpha,
float  beta,
float  k 
)
pure virtual

Add a LRN layer to the network.

Parameters
inputThe input tensor to the layer.
windowThe size of the window.
alphaThe alpha value for the LRN computation.
betaThe beta value for the LRN computation.
kThe k value for the LRN computation.
See also
ILRNLayer
Warning
Int32 tensors are not valid input tensors.
Returns
The new LRN layer, or nullptr if it could not be created.
Here is the caller graph for this function:

◆ addScale()

virtual IScaleLayer* nvinfer1::INetworkDefinition::addScale ( ITensor input,
ScaleMode  mode,
Weights  shift,
Weights  scale,
Weights  power 
)
pure virtual

Add a Scale layer to the network.

Parameters
inputThe input tensor to the layer. This tensor is required to have a minimum of 3 dimensions.
modeThe scaling mode.
shiftThe shift value.
scaleThe scale value.
powerThe power value.

If the weights are available, then the size of weights are dependent on the ScaleMode. For ::kUNIFORM, the number of weights equals 1. For ::kCHANNEL, the number of weights equals the channel dimension. For ::kELEMENTWISE, the number of weights equals the product of the last three dimensions of the input.

See also
addScaleNd
IScaleLayer
Warning
Int32 tensors are not valid input tensors.
Returns
The new Scale layer, or nullptr if it could not be created.
Here is the caller graph for this function:

◆ addSoftMax()

virtual ISoftMaxLayer* nvinfer1::INetworkDefinition::addSoftMax ( ITensor input)
pure virtual

Add a SoftMax layer to the network.

See also
ISoftMaxLayer
Warning
Int32 tensors are not valid input tensors.
Returns
The new SoftMax layer, or nullptr if it could not be created.
Here is the caller graph for this function:

◆ addConcatenation()

virtual IConcatenationLayer* nvinfer1::INetworkDefinition::addConcatenation ( ITensor *const *  inputs,
int32_t  nbInputs 
)
pure virtual

Add a concatenation layer to the network.

Parameters
inputsThe input tensors to the layer.
nbInputsThe number of input tensors.
See also
IConcatenationLayer
Returns
The new concatenation layer, or nullptr if it could not be created.
Warning
All tensors must have the same dimensions for all dimensions except for channel.
Here is the caller graph for this function:

◆ __attribute__() [3/15]

nvinfer1::INetworkDefinition::__attribute__ ( (deprecated)  ) &

Add a deconvolution layer to the network.

Parameters
inputThe input tensor to the layer.
nbOutputMapsThe number of output feature maps.
kernelSizeThe HW-dimensions of the deconvolution kernel.
kernelWeightsThe kernel weights for the deconvolution.
biasWeightsThe optional bias weights for the deconvolution.
See also
IDeconvolutionLayer
Warning
It is an error to specify a wildcard value for the 'C' dimension of the input tensor.
Int32 tensors are not valid input tensors.
Returns
The new deconvolution layer, or nullptr if it could not be created.
Deprecated:
Superseded by addDeconvolutionNd and will be removed in TensorRT 9.0.

◆ addElementWise()

virtual IElementWiseLayer* nvinfer1::INetworkDefinition::addElementWise ( ITensor input1,
ITensor input2,
ElementWiseOperation  op 
)
pure virtual

Add an elementwise layer to the network.

Parameters
input1The first input tensor to the layer.
input2The second input tensor to the layer.
opThe binary operation that the layer applies.

The input tensors must have the same number of dimensions. For each dimension, their lengths must match, or one of them must be one. In the latter case, the tensor is broadcast along that axis.

The output tensor has the same number of dimensions as the inputs. For each dimension, its length is the maximum of the lengths of the corresponding input dimension.

See also
IElementWiseLayer
Warning
For shape tensors, ElementWiseOperation::kPOW is not a valid op.
Returns
The new elementwise layer, or nullptr if it could not be created.
Here is the caller graph for this function:

◆ __attribute__() [4/15]

nvinfer1::INetworkDefinition::__attribute__ ( (deprecated)  ) &

Add an layerCount deep RNN layer to the network with a sequence length of maxSeqLen and hiddenSize internal state per layer.

Parameters
inputsThe input tensor to the layer.
layerCountThe number of layers in the RNN.
hiddenSizeThe size of the internal hidden state for each layer.
maxSeqLenThe maximum length of the time sequence.
opThe type of RNN to execute.
modeThe input mode for the RNN.
dirThe direction to run the RNN.
weightsThe weights for the weight matrix parameters of the RNN.
biasThe weights for the bias vectors parameters of the RNN.

The inputs tensor must be of the type DataType::kFLOAT or DataType::kHALF, and have non-zero volume.

See IRNNLayer::setWeights() and IRNNLayer::setBias() for details on the required input format for weights and bias.

The layout for the input tensor should be {1, S_max, N, E}, where:

  • S_max is the maximum allowed sequence length (number of RNN iterations)
  • N is the batch size
  • E specifies the embedding length (unless ::kSKIP is set, in which case it should match getHiddenSize()).

The first output tensor is the output of the final RNN layer across all timesteps, with dimensions {S_max, N, H}:

  • S_max is the maximum allowed sequence length (number of RNN iterations)
  • N is the batch size
  • H is an output hidden state (equal to getHiddenSize() or 2x getHiddenSize())

The second tensor is the final hidden state of the RNN across all layers, and if the RNN is an LSTM (i.e. getOperation() is ::kLSTM), then the third tensor is the final cell state of the RNN across all layers. Both the second and third output tensors have dimensions {L, N, H}:

  • L is equal to getLayerCount() if getDirection is ::kUNIDIRECTION, and 2*getLayerCount() if getDirection is ::kBIDIRECTION. In the bi-directional case, layer l's final forward hidden state is stored in L = 2*l, and final backward hidden state is stored in L = 2*l + 1.
  • N is the batch size
  • H is getHiddenSize().

Note that in bidirectional RNNs, the full "hidden state" for a layer l is the concatenation of its forward hidden state and its backward hidden state, and its size is 2*H.

Deprecated:
Superseded by addRNNv2 and will be removed in TensorRT 8.0.
See also
IRNNLayer
Warning
This layer does not support wildcard dimensions or explicit batch size networks.
Int32 tensors are not valid input tensors.
Returns
The new RNN layer, or nullptr if it could not be created.

◆ __attribute__() [5/15]

nvinfer1::INetworkDefinition::__attribute__ ( (deprecated)  ) const

Add a plugin layer to the network.

Parameters
inputsThe input tensors to the layer.
nbInputsThe number of input tensors.
pluginThe layer plugin.
See also
IPluginLayer
Deprecated:
Superseded by addPluginV2 and will be removed in TensorRT 8.0.
Warning
Plugin inputs do not support wildcard dimensions or explicit batch size networks.
Int32 tensors are not valid input tensors.
Returns
the new plugin layer, or nullptr if it could not be created.

◆ addUnary()

virtual IUnaryLayer* nvinfer1::INetworkDefinition::addUnary ( ITensor input,
UnaryOperation  operation 
)
pure virtual

Add a unary layer to the network.

Parameters
inputThe input tensor to the layer.
operationThe operation to apply.
See also
IUnaryLayer
Warning
Int32 tensors are not valid input tensors.
Shape tensors are not supported as outputs.
Returns
The new unary layer, or nullptr if it could not be created
Here is the caller graph for this function:

◆ __attribute__() [6/15]

nvinfer1::INetworkDefinition::__attribute__ ( (deprecated)  ) &

Add a padding layer to the network.

Parameters
inputThe input tensor to the layer.
prePaddingThe padding to apply to the start of the tensor.
postPaddingThe padding to apply to the end of the tensor.
See also
IPaddingLayer
Returns
The new padding layer, or nullptr if it could not be created.
Deprecated:
Superseded by addPaddingNd and will be removed in TensorRT 9.0.

◆ addShuffle()

virtual IShuffleLayer* nvinfer1::INetworkDefinition::addShuffle ( ITensor input)
pure virtual

Add a shuffle layer to the network.

Parameters
inputThe input tensor to the layer.
See also
IShuffleLayer
Returns
The new shuffle layer, or nullptr if it could not be created.
Here is the caller graph for this function:

◆ __attribute__() [7/15]

nvinfer1::INetworkDefinition::__attribute__ ( (deprecated)  )
pure virtual

Set the pooling output dimensions formula.

Deprecated:
This method does not currently work reliably and will be removed in TensorRT 8.0.
Parameters
formulaThe formula from computing the pooling output dimensions. If null is passed, the default formula is used.

The default formula in each dimension is (inputDim + padding * 2 - kernelSize) / stride + 1.

Warning
Custom output dimensions formulas are not supported with wildcard dimensions.
See also
IOutputDimensionsFormula getPoolingOutputDimensionsFormula()

◆ __attribute__() [8/15]

nvinfer1::INetworkDefinition::__attribute__ ( (deprecated)  ) const &
pure virtual

Get the pooling output dimensions formula.

Deprecated:
This method does not currently work reliably and will be removed in TensorRT 8.0.
Returns
The formula from computing the pooling output dimensions.
Warning
Custom output dimensions formulas are not supported with wildcard dimensions.
See also
IOutputDimensionsFormula setPoolingOutputDimensionsFormula()

◆ __attribute__() [9/15]

nvinfer1::INetworkDefinition::__attribute__ ( (deprecated)  )
pure virtual

Set the convolution output dimensions formula.

Deprecated:
This method does not currently work reliably and will be removed in TensorRT 8.0.
Parameters
formulaThe formula from computing the convolution output dimensions. If null is passed, the default formula is used.

The default formula in each dimension is (inputDim + padding * 2 - kernelSize) / stride + 1.

Warning
Custom output dimensions formulas are not supported with wildcard dimensions.
See also
IOutputDimensionsFormula getConvolutionOutputDimensionsFormula()

◆ __attribute__() [10/15]

nvinfer1::INetworkDefinition::__attribute__ ( (deprecated)  ) const &
pure virtual

Get the convolution output dimensions formula.

Deprecated:
This method does not currently work reliably and will be removed in TensorRT 8.0.
Returns
The formula from computing the convolution output dimensions.
Warning
Custom output dimensions formulas are not supported with wildcard dimensions.
See also
IOutputDimensionsFormula setConvolutionOutputDimensionsFormula()

◆ __attribute__() [11/15]

nvinfer1::INetworkDefinition::__attribute__ ( (deprecated)  )
pure virtual

Set the deconvolution output dimensions formula.

Deprecated:
This method does not currently work reliably and will be removed in TensorRT 8.0.
Parameters
formulaThe formula from computing the deconvolution output dimensions. If null is passed, the default! formula is used.

The default formula in each dimension is (inputDim - 1) * stride + kernelSize - 2 * padding.

Warning
Custom output dimensions formulas are not supported with wildcard dimensions.
See also
IOutputDimensionsFormula getDevonvolutionOutputDimensionsFormula()

◆ __attribute__() [12/15]

nvinfer1::INetworkDefinition::__attribute__ ( (deprecated)  ) const &
pure virtual

Get the deconvolution output dimensions formula.

Returns
The formula from computing the deconvolution output dimensions.
Deprecated:
This method does not currently work reliably and will be removed in TensorRT 8.0.
Warning
Custom output dimensions formulas are not supported with wildcard dimensions.
See also
IOutputDimensionsFormula setDeconvolutionOutputDimensionsFormula()

◆ getNbLayers()

virtual int32_t nvinfer1::INetworkDefinition::getNbLayers ( ) const
pure virtual

Get the number of layers in the network.

Returns
The number of layers in the network.
See also
getLayer()
Here is the caller graph for this function:

◆ getLayer()

virtual ILayer* nvinfer1::INetworkDefinition::getLayer ( int32_t  index) const
pure virtual

Get the layer specified by the given index.

Parameters
indexThe index of the layer.
Returns
The layer, or nullptr if the index is out of range.
See also
getNbLayers()
Here is the caller graph for this function:

◆ getNbInputs()

virtual int32_t nvinfer1::INetworkDefinition::getNbInputs ( ) const
pure virtual

Get the number of inputs in the network.

Returns
The number of inputs in the network.
See also
getInput()
Here is the caller graph for this function:

◆ getInput()

virtual ITensor* nvinfer1::INetworkDefinition::getInput ( int32_t  index) const
pure virtual

Get the input tensor specified by the given index.

Parameters
indexThe index of the input tensor.
Returns
The input tensor, or nullptr if the index is out of range.
See also
getNbInputs()
Here is the caller graph for this function:

◆ getNbOutputs()

virtual int32_t nvinfer1::INetworkDefinition::getNbOutputs ( ) const
pure virtual

Get the number of outputs in the network.

The outputs include those marked by markOutput or markOutputForShapes.

Returns
The number of outputs in the network.
See also
getOutput()
Here is the caller graph for this function:

◆ getOutput()

virtual ITensor* nvinfer1::INetworkDefinition::getOutput ( int32_t  index) const
pure virtual

Get the output tensor specified by the given index.

Parameters
indexThe index of the output tensor.
Returns
The output tensor, or nullptr if the index is out of range.
See also
getNbOutputs()
Here is the caller graph for this function:

◆ destroy()

virtual void nvinfer1::INetworkDefinition::destroy ( )
pure virtual

Destroy this INetworkDefinition object.

◆ addReduce()

virtual IReduceLayer* nvinfer1::INetworkDefinition::addReduce ( ITensor input,
ReduceOperation  operation,
uint32_t  reduceAxes,
bool  keepDimensions 
)
pure virtual

Add a reduce layer to the network.

Parameters
inputThe input tensor to the layer.
operationThe reduction operation to perform.
reduceAxesThe reduction dimensions. The bit in position i of bitmask reduceAxes corresponds to explicit dimension i if result. E.g., the least significant bit corresponds to the first explicit dimension and the next to least significant bit corresponds to the second explicit dimension.
keepDimensionsThe boolean that specifies whether or not to keep the reduced dimensions in the output of the layer.

The reduce layer works by performing an operation specified by operation to reduce the tensor input across the axes specified by reduceAxes.

See also
IReduceLayer
Warning
If output is a shape tensor, ReduceOperation::kAVG is unsupported.
Returns
The new reduce layer, or nullptr if it could not be created.
Here is the caller graph for this function:

◆ addTopK()

virtual ITopKLayer* nvinfer1::INetworkDefinition::addTopK ( ITensor input,
TopKOperation  op,
int32_t  k,
uint32_t  reduceAxes 
)
pure virtual

Add a TopK layer to the network.

The TopK layer has two outputs of the same dimensions. The first contains data values, the second contains index positions for the values. Output values are sorted, largest first for operation kMAX and smallest first for operation kMIN.

Currently only values of K up to 1024 are supported.

Parameters
inputThe input tensor to the layer.
opOperation to perform.
kNumber of elements to keep.
reduceAxesThe reduction dimensions. The bit in position i of bitmask reduceAxes corresponds to explicit dimension i of the result. E.g., the least significant bit corresponds to the first explicit dimension and the next to least significant bit corresponds to the second explicit dimension.

Currently reduceAxes must specify exactly one dimension, and it must be one of the last four dimensions.

See also
ITopKLayer
Warning
Int32 tensors are not valid input tensors.
Returns
The new TopK layer, or nullptr if it could not be created.
Here is the caller graph for this function:

◆ addGather()

virtual IGatherLayer* nvinfer1::INetworkDefinition::addGather ( ITensor data,
ITensor indices,
int32_t  axis 
)
pure virtual

Add a gather layer to the network.

Parameters
dataThe tensor to gather values from.
indicesThe tensor to get indices from to populate the output tensor.
axisThe axis in the data tensor to gather on.
See also
IGatherLayer
Returns
The new gather layer, or nullptr if it could not be created.
Here is the caller graph for this function:

◆ addRaggedSoftMax()

virtual IRaggedSoftMaxLayer* nvinfer1::INetworkDefinition::addRaggedSoftMax ( ITensor input,
ITensor bounds 
)
pure virtual

Add a RaggedSoftMax layer to the network.

Parameters
inputThe ZxS input tensor.
boundsThe Zx1 bounds tensor.
See also
IRaggedSoftMaxLayer
Warning
The bounds tensor cannot have the last dimension be the wildcard character.
Int32 tensors are not valid input tensors.
Returns
The new RaggedSoftMax layer, or nullptr if it could not be created.
Here is the caller graph for this function:

◆ addMatrixMultiply()

virtual IMatrixMultiplyLayer* nvinfer1::INetworkDefinition::addMatrixMultiply ( ITensor input0,
MatrixOperation  op0,
ITensor input1,
MatrixOperation  op1 
)
pure virtual

Add a MatrixMultiply layer to the network.

Parameters
input0The first input tensor (commonly A).
op0The operation to apply to input0.
input1The second input tensor (commonly B).
op1The operation to apply to input1.
See also
IMatrixMultiplyLayer
Warning
Int32 tensors are not valid input tensors.
Returns
The new matrix multiply layer, or nullptr if it could not be created.
Here is the caller graph for this function:

◆ __attribute__() [13/15]

nvinfer1::INetworkDefinition::__attribute__ ( (deprecated)  ) &

Add a MatrixMultiply layer to the network.

Parameters
input0The first input tensor (commonly A).
transpose0If true, op(input0)=transpose(input0), else op(input0)=input0.
input1The second input tensor (commonly B).
transpose1If true, op(input1)=transpose(input1), else op(input1)=input1.
See also
IMatrixMultiplyLayer
Returns
The new matrix multiply layer, or nullptr if it could not be created.
Warning
Int32 tensors are not valid input tensors.
Deprecated:
This interface is superseded by the overload that replaces bool with MatrixOperation and will be removed in TensorRT 8.0.

◆ addConstant()

virtual IConstantLayer* nvinfer1::INetworkDefinition::addConstant ( Dims  dimensions,
Weights  weights 
)
pure virtual

Add a constant layer to the network.

Parameters
dimensionsThe dimensions of the constant.
weightsThe constant value, represented as weights.
See also
IConstantLayer
Returns
The new constant layer, or nullptr if it could not be created.

If weights.type is DataType::kINT32, the output is a tensor of 32-bit indices. Otherwise the output is a tensor of real values and the output type will be follow TensorRT's normal precision rules.

If tensors in the network have an implicit batch dimension, the constant is broadcast over that dimension.

If a wildcard dimension is used, the volume of the runtime dimensions must equal the number of weights specified.

Here is the caller graph for this function:

◆ __attribute__() [14/15]

nvinfer1::INetworkDefinition::__attribute__ ( (deprecated)  ) &

Add an layerCount deep RNN layer to the network with hiddenSize internal states that can take a batch with fixed or variable sequence lengths.

Parameters
inputThe input tensor to the layer (see below).
layerCountThe number of layers in the RNN.
hiddenSizeSize of the internal hidden state for each layer.
maxSeqLenMaximum sequence length for the input.
opThe type of RNN to execute.

By default, the layer is configured with RNNDirection::kUNIDIRECTION and RNNInputMode::kLINEAR. To change these settings, use IRNNv2Layer::setDirection() and IRNNv2Layer::setInputMode().

Weights and biases for the added layer should be set using IRNNv2Layer::setWeightsForGate() and IRNNv2Layer::setBiasForGate() prior to building an engine using this network.

The input tensors must be of the type DataType::kFLOAT or DataType::kHALF. The layout of the weights is row major and must be the same datatype as the input tensor. weights contain 8 matrices and bias contains 8 vectors.

See IRNNv2Layer::setWeightsForGate() and IRNNv2Layer::setBiasForGate() for details on the required input format for weights and bias.

The input ITensor should contain zero or more index dimensions {N1, ..., Np}, followed by two dimensions, defined as follows:

  • S_max is the maximum allowed sequence length (number of RNN iterations)
  • E specifies the embedding length (unless ::kSKIP is set, in which case it should match getHiddenSize()).

By default, all sequences in the input are assumed to be size maxSeqLen. To provide explicit sequence lengths for each input sequence in the batch, use IRNNv2Layer::setSequenceLengths().

The RNN layer outputs up to three tensors.

The first output tensor is the output of the final RNN layer across all timesteps, with dimensions {N1, ..., Np, S_max, H}:

  • N1..Np are the index dimensions specified by the input tensor
  • S_max is the maximum allowed sequence length (number of RNN iterations)
  • H is an output hidden state (equal to getHiddenSize() or 2x getHiddenSize())

The second tensor is the final hidden state of the RNN across all layers, and if the RNN is an LSTM (i.e. getOperation() is ::kLSTM), then the third tensor is the final cell state of the RNN across all layers. Both the second and third output tensors have dimensions {N1, ..., Np, L, H}:

  • N1..Np are the index dimensions specified by the input tensor
  • L is the number of layers in the RNN, equal to getLayerCount() if getDirection is ::kUNIDIRECTION, and 2x getLayerCount() if getDirection is ::kBIDIRECTION. In the bi-directional case, layer l's final forward hidden state is stored in L = 2*l, and final backward hidden state is stored in L= 2*l + 1.
  • H is the hidden state for each layer, equal to getHiddenSize().
See also
IRNNv2Layer
Deprecated:
Superseded by ILoop::addLoop and will be removed in TensorRT 9.0.
Warning
RNN inputs do not support wildcard dimensions or explicit batch size networks.
Int32 tensors are not valid input tensors, only for sequence lengths.
Returns
The new RNN layer, or nullptr if it could not be created.

◆ __attribute__() [15/15]

nvinfer1::INetworkDefinition::__attribute__ ( (deprecated)  ) const

Add a plugin layer to the network using an IPluginExt interface.

Parameters
inputsThe input tensors to the layer.
nbInputsThe number of input tensors.
pluginThe layer plugin.
See also
IPluginLayer
Deprecated:
Superseded by addPluginV2 and will be removed in TensorRT 8.0.
Warning
Plugin inputs do not support wildcard dimensions or explicit batch size networks.
Int32 tensors are not valid input tensors.
Returns
The new plugin layer, or nullptr if it could not be created.

◆ addIdentity()

virtual IIdentityLayer* nvinfer1::INetworkDefinition::addIdentity ( ITensor input)
pure virtual

Add an identity layer.

Parameters
inputThe input tensor to the layer.
See also
IIdentityLayer
Warning
Int32 tensors are not valid input tensors.
Returns
The new identity layer, or nullptr if it could not be created.

◆ removeTensor()

virtual void nvinfer1::INetworkDefinition::removeTensor ( ITensor tensor)
pure virtual

remove a tensor from the network definition.

Parameters
tensorthe tensor to remove

It is illegal to remove a tensor that is the input or output of a layer. if this method is called with such a tensor, a warning will be emitted on the log and the call will be ignored. Its intended use is to remove detached tensors after e.g. concatenating two networks with Layer::setInput().

◆ unmarkOutput()

virtual void nvinfer1::INetworkDefinition::unmarkOutput ( ITensor tensor)
pure virtual

unmark a tensor as a network output.

Parameters
tensorThe tensor to unmark as an output tensor.

see markOutput()

◆ addPluginV2()

virtual IPluginV2Layer* nvinfer1::INetworkDefinition::addPluginV2 ( ITensor *const *  inputs,
int32_t  nbInputs,
IPluginV2 plugin 
)
pure virtual

Add a plugin layer to the network using the IPluginV2 interface.

Parameters
inputsThe input tensors to the layer.
nbInputsThe number of input tensors.
pluginThe layer plugin.
See also
IPluginV2Layer
Warning
Dimension wildcard are only supported with IPluginV2DynamicExt or IPluginV2IOExt plugins.
Int32 tensors are not valid input tensors.
Returns
The new plugin layer, or nullptr if it could not be created.
Here is the caller graph for this function:

◆ addSlice()

virtual ISliceLayer* nvinfer1::INetworkDefinition::addSlice ( ITensor input,
Dims  start,
Dims  size,
Dims  stride 
)
pure virtual

Add a slice layer to the network.

Parameters
inputThe input tensor to the layer.
startThe start offset
sizeThe output dimension
strideThe slicing stride

Positive, negative, zero stride values, and combinations of them in different dimensions are allowed.

See also
ISliceLayer
Returns
The new slice layer, or nullptr if it could not be created.
Here is the caller graph for this function:

◆ setName()

virtual void nvinfer1::INetworkDefinition::setName ( const char *  name)
pure virtual

Sets the name of the network.

Parameters
nameThe name to assign to this network.

Set the name of the network so that it can be associated with a built engine. The name must be a zero delimited C-style string of length no greater than 128 characters. TensorRT makes no use of this string except storing it as part of the engine so that it may be retrieved at runtime. A name unique to the builder will be generated by default.

This method copies the name string.

See also
INetworkDefinition::getName(), ISafeCudaEngine::getName()
Returns
none
Here is the caller graph for this function:

◆ getName()

virtual const char* nvinfer1::INetworkDefinition::getName ( ) const
pure virtual

Returns the name associated with the network.

The memory pointed to by getName() is owned by the INetworkDefinition object.

See also
INetworkDefinition::setName()
Returns
A zero delimited C-style string representing the name of the network.

◆ addShape()

virtual IShapeLayer* nvinfer1::INetworkDefinition::addShape ( ITensor input)
pure virtual

Add a shape layer to the network.

Parameters
inputThe input tensor to the layer.
See also
IShapeLayer
Warning
addShape is only supported when hasImplicitBatchDimensions is false.
input to addShape cannot contain wildcard dimension values.
Returns
The new shape layer, or nullptr if it could not be created.

◆ hasImplicitBatchDimension()

virtual bool nvinfer1::INetworkDefinition::hasImplicitBatchDimension ( ) const
pure virtual

Query whether the network was created with an implicit batch dimension.

Returns
True if tensors have implicit batch dimension, false otherwise.

This is a network-wide property. Either all tensors in the network have an implicit batch dimension or none of them do.

hasImplicitBatchDimension() is true if and only if this INetworkDefinition was created with createNetwork() or createNetworkV2() without NetworkDefinitionCreationFlag::kEXPLICIT_BATCH flag.

See also
createNetworkV2
Here is the caller graph for this function:

◆ markOutputForShapes()

virtual bool nvinfer1::INetworkDefinition::markOutputForShapes ( ITensor tensor)
pure virtual

Enable tensor's value to be computed by IExecutionContext::getShapeBinding.

Returns
True if successful, false if tensor is already marked as an output.

The tensor must be of type DataType::kINT32 and have no more than one dimension.

Warning
The tensor must have dimensions that can be determined to be constants at build time.
It is an error to mark a network input as a shape output.
See also
isShapeBinding(), getShapeBinding()

◆ unmarkOutputForShapes()

virtual bool nvinfer1::INetworkDefinition::unmarkOutputForShapes ( ITensor tensor)
pure virtual

Undo markOutputForShapes.

Warning
inputs to addShape cannot contain wildcard dimension values.
Returns
True if successful, false if tensor is not marked as an output.

◆ addParametricReLU()

virtual IParametricReLULayer* nvinfer1::INetworkDefinition::addParametricReLU ( ITensor input,
ITensor slope 
)
pure virtualnoexcept

Add a parametric ReLU layer to the network.

Parameters
inputThe input tensor to the layer.
slopeThe slope tensor to the layer. This tensor should be unidirectionally broadcastable to the input tensor.
See also
IParametricReLULayer
Warning
Int32 tensors are not valid input tensors.
Returns
The new parametric ReLU layer, or nullptr if it could not be created.
Here is the caller graph for this function:

◆ addConvolutionNd()

virtual IConvolutionLayer* nvinfer1::INetworkDefinition::addConvolutionNd ( ITensor input,
int32_t  nbOutputMaps,
Dims  kernelSize,
Weights  kernelWeights,
Weights  biasWeights 
)
pure virtual

Add a multi-dimension convolution layer to the network.

Parameters
inputThe input tensor to the convolution.
nbOutputMapsThe number of output feature maps for the convolution.
kernelSizeThe multi-dimensions of the convolution kernel.
kernelWeightsThe kernel weights for the convolution.
biasWeightsThe optional bias weights for the convolution.
See also
IConvolutionLayer
Warning
It is an error to specify a wildcard value for the 'C' dimension of the input tensor.
Int32 tensors are not valid input tensors.
Only 2D or 3D convolution is supported.
Returns
The new convolution layer, or nullptr if it could not be created.
Here is the caller graph for this function:

◆ addPoolingNd()

virtual IPoolingLayer* nvinfer1::INetworkDefinition::addPoolingNd ( ITensor input,
PoolingType  type,
Dims  windowSize 
)
pure virtual

Add a multi-dimension pooling layer to the network.

Parameters
inputThe input tensor to the layer.
typeThe type of pooling to apply.
windowSizeThe size of the pooling window.
See also
IPoolingLayer PoolingType
Warning
Int32 tensors are not valid input tensors.
Only 2D or 3D pooling is supported.
Returns
The new pooling layer, or nullptr if it could not be created.

◆ addDeconvolutionNd()

virtual IDeconvolutionLayer* nvinfer1::INetworkDefinition::addDeconvolutionNd ( ITensor input,
int32_t  nbOutputMaps,
Dims  kernelSize,
Weights  kernelWeights,
Weights  biasWeights 
)
pure virtual

Add a multi-dimension deconvolution layer to the network.

Parameters
inputThe input tensor to the layer.
nbOutputMapsThe number of output feature maps.
kernelSizeThe multi-dimensions of the deconvolution kernel.
kernelWeightsThe kernel weights for the deconvolution.
biasWeightsThe optional bias weights for the deconvolution.
See also
IDeconvolutionLayer
Warning
It is an error to specify a wildcard value for the 'C' dimension of the input tensor.
Int32 tensors are not valid input tensors.
Only 2D or 3D deconvolution is supported.
Returns
The new deconvolution layer, or nullptr if it could not be created.

◆ addScaleNd()

virtual IScaleLayer* nvinfer1::INetworkDefinition::addScaleNd ( ITensor input,
ScaleMode  mode,
Weights  shift,
Weights  scale,
Weights  power,
int32_t  channelAxis 
)
pure virtual

Add a multi-dimension scale layer to the network.

Parameters
inputThe input tensor to the layer.
modeThe scaling mode.
shiftThe shift value.
scaleThe scale value.
powerThe power value.
channelAxisThe channel axis.

If the weights are available, then the size of weights are dependent on the ScaleMode. For ::kUNIFORM, the number of weights equals 1. For ::kCHANNEL, the number of weights equals the channel dimension. For ::kELEMENTWISE, the number of weights equals the product of all input dimensions at channelAxis and beyond.

For example, if the inputs dimensions are [A,B,C,D,E,F], and channelAxis=2: For ::kUNIFORM, the number of weights is equal to 1. For ::kCHANNEL, the number of weights is C. For ::kELEMENTWISE, the number of weights is C*D*E*F.

See also
IScaleLayer
Warning
Int32 tensors are not valid input tensors.
Only 2D or 3D scale is supported.
Returns
The new Scale layer, or nullptr if it could not be created.

◆ addResize()

virtual IResizeLayer* nvinfer1::INetworkDefinition::addResize ( ITensor input)
pure virtual

Add a resize layer to the network.

Parameters
inputThe input tensor to the layer.
See also
IResizeLayer
Warning
Int32 tensors are not valid input tensors.
Returns
The new resize layer, or nullptr if it could not be created.

◆ hasExplicitPrecision()

virtual bool nvinfer1::INetworkDefinition::hasExplicitPrecision ( ) const
pure virtual

True if network is an explicit precision network.

hasExplicitPrecision() is true if and only if this INetworkDefinition was created with createNetworkV2() with NetworkDefinitionCreationFlag::kEXPLICIT_PRECISION set.

See also
createNetworkV2
Returns
True if network has explicit precision, false otherwise.

◆ addLoop()

virtual ILoop* nvinfer1::INetworkDefinition::addLoop ( )
pure virtualnoexcept

Add a loop to the network.

An ILoop provides a way to specify a recurrent subgraph.

Returns
Pointer to ILoop that can be used to add loop boundary layers for the loop, or nullptr if network has an implicit batch dimension or this version of TensorRT does not support loops.
Here is the caller graph for this function:

◆ addSelect()

virtual ISelectLayer* nvinfer1::INetworkDefinition::addSelect ( ITensor condition,
ITensor thenInput,
ITensor elseInput 
)
pure virtual

Add a select layer to the network.

Parameters
conditionThe condition tensor to the layer.
thenInputThe "then" input tensor to the layer.
elseInputThe "else" input tensor to the layer.
See also
ISelectLayer
Returns
The new select layer, or nullptr if it could not be created.

◆ addFill()

virtual IFillLayer* nvinfer1::INetworkDefinition::addFill ( Dims  dimensions,
FillOperation  op 
)
pure virtualnoexcept

Add a fill layer to the network.

Parameters
dimensionsThe output tensor dimensions.
opThe fill operation that the layer applies.
Warning
The dimensions's nbDims must be 1.
See also
IFillLayer
Returns
The new fill layer, or nullptr if it could not be created.

◆ addPaddingNd()

virtual IPaddingLayer* nvinfer1::INetworkDefinition::addPaddingNd ( ITensor input,
Dims  prePadding,
Dims  postPadding 
)
pure virtual

Add a padding layer to the network.

Only 2D padding is currently supported.

Parameters
inputThe input tensor to the layer.
prePaddingThe padding to apply to the start of the tensor.
postPaddingThe padding to apply to the end of the tensor.
See also
IPaddingLayer
Returns
The new padding layer, or nullptr if it could not be created.

Member Data Documentation

◆ nbOutputMaps

int32_t nvinfer1::INetworkDefinition::nbOutputMaps

◆ kernelSize

int32_t DimsHW nvinfer1::INetworkDefinition::kernelSize

◆ kernelWeights

int32_t DimsHW Weights nvinfer1::INetworkDefinition::kernelWeights

◆ biasWeights

int32_t DimsHW Weights Weights nvinfer1::INetworkDefinition::biasWeights = 0

◆ type

PoolingType nvinfer1::INetworkDefinition::type

◆ windowSize

PoolingType DimsHW nvinfer1::INetworkDefinition::windowSize = 0

◆ layerCount

int32_t nvinfer1::INetworkDefinition::layerCount

◆ hiddenSize [1/2]

int32_t std::size_t nvinfer1::INetworkDefinition::hiddenSize

◆ maxSeqLen [1/2]

int32_t std::size_t int32_t nvinfer1::INetworkDefinition::maxSeqLen

◆ op [1/2]

int32_t std::size_t int32_t RNNOperation nvinfer1::INetworkDefinition::op

◆ mode

int32_t std::size_t int32_t RNNOperation RNNInputMode nvinfer1::INetworkDefinition::mode

◆ dir

int32_t std::size_t int32_t RNNOperation RNNInputMode RNNDirection nvinfer1::INetworkDefinition::dir

◆ weights

int32_t std::size_t int32_t RNNOperation RNNInputMode RNNDirection Weights nvinfer1::INetworkDefinition::weights

◆ bias

int32_t std::size_t int32_t RNNOperation RNNInputMode RNNDirection Weights Weights nvinfer1::INetworkDefinition::bias = 0

◆ nbInputs

int32_t nvinfer1::INetworkDefinition::nbInputs

◆ plugin [1/2]

int32_t IPlugin& nvinfer1::INetworkDefinition::plugin = 0

◆ prePadding

DimsHW nvinfer1::INetworkDefinition::prePadding

◆ postPadding

DimsHW DimsHW nvinfer1::INetworkDefinition::postPadding = 0

◆ transpose0

bool nvinfer1::INetworkDefinition::transpose0

◆ input1

bool ITensor& nvinfer1::INetworkDefinition::input1

◆ transpose1

bool ITensor bool nvinfer1::INetworkDefinition::transpose1 = 0

◆ hiddenSize [2/2]

int32_t int32_t nvinfer1::INetworkDefinition::hiddenSize

◆ maxSeqLen [2/2]

int32_t int32_t int32_t nvinfer1::INetworkDefinition::maxSeqLen

◆ op [2/2]

int32_t int32_t int32_t RNNOperation nvinfer1::INetworkDefinition::op = 0

◆ plugin [2/2]

int32_t IPluginExt& nvinfer1::INetworkDefinition::plugin = 0

The documentation for this class was generated from the following file: