|
| void | backward_network (Darknet::Network &net, Darknet::NetworkState state) |
| |
| void | calculate_binary_weights (DarknetNetworkPtr ptr) |
| | This is part of the original C API.
|
| |
| void | copy_cudnn_descriptors (const Darknet::Layer &src, Darknet::Layer *dst) |
| |
| void | copy_weights_net (const Darknet::Network &net_train, Darknet::Network *net_map) |
| |
| void | custom_get_region_detections (const Darknet::Layer &l, int w, int h, int net_w, int net_h, float thresh, int *map, float hier, int relative, Darknet::Detection *dets, int letter) |
| |
| void | ema_apply (Darknet::Network &net) |
| |
| void | ema_update (Darknet::Network &net, float ema_alpha) |
| |
| void | fill_network_boxes (Darknet::Network *net, int w, int h, float thresh, float hier, int *map, int relative, Darknet::Detection *dets, int letter) |
| |
| void | fill_network_boxes_batch (Darknet::Network *net, int w, int h, float thresh, float hier, int *map, int relative, Darknet::Detection *dets, int letter, int batch) |
| |
| static void | fill_network_boxes_v3 (Darknet::Network *net, int w, int h, float thresh, float hier, int *map, int relative, Darknet::Detection *dets, int letter, Darknet::Output_Object_Cache &cache) |
| |
| void | forward_blank_layer (Darknet::Layer &l, Darknet::NetworkState state) |
| |
| void | forward_network (Darknet::Network &net, Darknet::NetworkState state) |
| |
| void | free_batch_detections (det_num_pair *det_num_pairs, int n) |
| |
| void | free_detections (detection *dets, int n) |
| | This is part of the original C API. Do not use in new code.
|
| |
| void | free_network (Darknet::Network &net) |
| | Free all memory allocations for the given neural network.
|
| |
| void | free_network_ptr (DarknetNetworkPtr ptr) |
| | This is part of the original C API.
|
| |
| void | free_network_recurrent_state (Darknet::Network &net) |
| |
| void | fuse_conv_batchnorm (Darknet::Network &net) |
| | ChatGPT: The function fuse_conv_batchnorm() in the Darknet/YOLO codebase is responsible for fusing the convolutional layer and batch normalization layer into a single operation.
|
| |
| int | get_current_batch (const Darknet::Network &net) |
| |
| int64_t | get_current_iteration (const Darknet::Network &net) |
| |
| float | get_current_rate (const Darknet::Network &net) |
| |
| float | get_current_seq_subdivisions (const Darknet::Network &net) |
| |
| detection * | get_network_boxes (DarknetNetworkPtr ptr, int w, int h, float thresh, float hier, int *map, int relative, int *num, int letter) |
| | This is part of the original C API.
|
| |
| float | get_network_cost (const Darknet::Network &net) |
| |
| Darknet::Image | get_network_image (Darknet::Network &net) |
| |
| Darknet::Image | get_network_image_layer (Darknet::Network &net, int i) |
| |
| int | get_network_input_size (Darknet::Network &net) |
| |
| float * | get_network_output (Darknet::Network &net) |
| |
| int | get_network_output_size (Darknet::Network &net) |
| |
| int | get_sequence_value (const Darknet::Network &net) |
| |
| int | is_ema_initialized (const Darknet::Network &net) |
| |
| static float | lrelu (float src) |
| |
| Darknet::Network | make_network (int n) |
| | Think of this as the constructor for the Darknet::Network object.
|
| |
| detection * | make_network_boxes (DarknetNetworkPtr ptr, float thresh, int *num) |
| | This is part of the original C API.
|
| |
| Darknet::Detection * | make_network_boxes_batch (Darknet::Network *net, float thresh, int *num, int batch) |
| |
| Darknet::Detection * | make_network_boxes_v3 (Darknet::Network *net, const float thresh, int *num, Darknet::Output_Object_Cache &cache) |
| |
| int | network_height (Darknet::Network *net) |
| |
| float * | network_predict (Darknet::Network &net, float *input) |
| |
| det_num_pair * | network_predict_batch (Darknet::Network *net, Darknet::Image im, int batch_size, int w, int h, float thresh, float hier, int *map, int relative, int letter) |
| |
| matrix | network_predict_data (Darknet::Network &net, data test) |
| |
| matrix | network_predict_data_multi (Darknet::Network &net, data test, int n) |
| |
| float * | network_predict_image (DarknetNetworkPtr ptr, const DarknetImage im) |
| | This is part of the original C API.
|
| |
| float * | network_predict_image_letterbox (DarknetNetworkPtr ptr, DarknetImage im) |
| | This is part of the original C API.
|
| |
| float * | network_predict_ptr (DarknetNetworkPtr ptr, float *input) |
| | This is part of the original C API.
|
| |
| int | network_width (Darknet::Network *net) |
| |
| int | num_detections (Darknet::Network *net, float thresh) |
| |
| int | num_detections_batch (Darknet::Network *net, float thresh, int batch) |
| |
| int | num_detections_v3 (Darknet::Network *net, float thresh, Darknet::Output_Object_Cache &cache) |
| | Basically a wrapper for yolo_num_detections_v3().
|
| |
| int | recalculate_workspace_size (Darknet::Network *net) |
| |
| void | reject_similar_weights (Darknet::Network &net, float sim_threshold) |
| |
| void | reset_network_state (Darknet::Network *net, int b) |
| |
| void | reset_rnn (DarknetNetworkPtr ptr) |
| | This is part of the original C API.
|
| |
| int | resize_network (Darknet::Network *net, int w, int h) |
| |
| void | restore_network_recurrent_state (Darknet::Network &net) |
| |
| void | set_batch_network (Darknet::Network *net, int b) |
| |
| float | train_network (Darknet::Network &net, data d) |
| |
| float | train_network_batch (Darknet::Network net, data d, int n) |
| |
| float | train_network_datum (Darknet::Network &net, float *x, float *y) |
| |
| float | train_network_waitkey (Darknet::Network &net, data d, int wait_key) |
| |
| void | update_network (Darknet::Network &net) |
| |
| void | visualize_network (Darknet::Network &net) |
| |
ChatGPT: The function fuse_conv_batchnorm() in the Darknet/YOLO codebase is responsible for fusing the convolutional layer and batch normalization layer into a single operation.
This process is essential for both model optimization and inference efficiency, as it reduces the number of operations needed during model execution.
In Darknet, when you have a convolutional layer followed by a batch normalization layer, the two layers can be fused into a single convolution-like operation that combines both the convolution and normalization. This reduces the need for the separate batch normalization step during inference, thus improving speed.
The fused operation allows the model to perform one pass of convolution instead of:
- Convolution operation.
- Batch normalization (normalization, scaling, shifting).
- Activation function (like ReLU).