版权:本人原创,可转载,请勿copy。
环境:darknet框架
最近做烟火检测项目,对性能的要求不能使用大型网络。选用efficientnet_b0网络的原因是:efficientnet_b0网络结构是压缩和解压结构,对特征微弱的目标检测性能好,在烟火检测实验效果显示efficientnet_b0性能确实优于一般的backbone(resnnet、darknet53等)。于是将理解性、表达性较好的efficientnet_b0网络和yolov3-tiny的head相结合,构建了如下efficientnet_b0_yolov3_tiny网络cfg配置文件,训练出的模型只有23M,拿去用吧。
[net]
# Testing
#batch=1
#subdivisions=1
# Training
batch=24
subdivisions=4
width=512
height=512
channels=3
momentum=0.9
decay=0.0005
angle=0
saturation = 1.5
exposure = 1.5
hue=.1
learning_rate=0.00161
burn_in=1000
max_batches = 150200
policy=steps
steps=80000,120000
scales=.1,.1
#weights_reject_freq=1001
#ema_alpha=0.9998
#equidistant_point=1000
#num_sigmas_reject_badlabels=3
#badlabels_rejection_percentage=0.2
### CONV1 - 1 (1)
# conv1
[convolutional]
filters=32
size=3
pad=1
stride=2
batch_normalize=1
activation=swish
### CONV2 - MBConv1 - 1 (1)
# conv2_1_expand
[convolutional]
filters=32
size=1
stride=1
pad=0
batch_normalize=1
activation=swish
# conv2_1_dwise
[convolutional]
groups=32
filters=32
size=3
stride=1
pad=1
batch_normalize=1
activation=swish
#squeeze-n-excitation
[avgpool]
# squeeze ratio r=4 (recommended r=16)
[convolutional]
filters=8
size=1
stride=1
activation=swish
# excitation
[convolutional]
filters=32
size=1
stride=1
activation=logistic
# multiply channels
[scale_channels]
from=-4
# conv2_1_linear
[convolutional]
filters=16
size=1
stride=1
pad=0
batch_normalize=1
activation=linear
### CONV3 - MBConv6 - 1 (2)
# conv2_2_expand
[convolutional]
filters=96
size=1
stride=1
pad=0
batch_normalize=1
activation=swish
# conv2_2_dwise
[convolutional]
groups=96
filters=96
size=3
pad=1
stride=2
batch_normalize=1
activation=swish
#squeeze-n-excitation
[avgpool]
# squeeze ratio r=8 (recommended r=16)
[convolutional]
filters=16
size=1
stride=1
activation=swish
# excitation
[convolutional]
filters=96
size=1
stride=1
activation=logistic
# multiply channels
[scale_channels]
from=-4
# conv2_2_linear
[convolutional]
filters=24
size=1
stride=1
pad=0
batch_normalize=1
activation=linear
### CONV3 - MBConv6 - 2 (2)
# conv3_1_expand
[convolutional]
filters=144
size=1
stride=1
pad=0
batch_normalize=1
activation=swish
# conv3_1_dwise
[convolutional]
groups=144
filters=144
size=3
stride=1
pad=1
batch_normalize=1
activation=swish
#squeeze-n-excitation
[avgpool]
# squeeze ratio r=16 (recommended r=16)
[convolutional]
filters=8
size=1
stride=1
activation=swish
# excitation
[convolutional]
filters=144
size=1
stride=1
activation=logistic
# multiply channels
[scale_channels]
from=-4
# conv3_1_linear
[convolutional]
filters=24
size=1
stride=1
pad=0
batch_normalize=1
activation=linear
### CONV4 - MBConv6 - 1 (2)
# dropout only before residual connection
[dropout]
probability=.2
# block_3_1
[shortcut]
from=-9
activation=linear
# conv_3_2_expand
[convolutional]
filters=144
size=1
stride=1
pad=0
batch_normalize=1
activation=swish
# conv_3_2_dwise
[convolutional]
groups=144
filters=144
size=5
pad=1
stride=2
batch_normalize=1
activation=swish
#squeeze-n-excitation
[avgpool]
# squeeze ratio r=16 (recommended r=16)
[convolutional]
filters=8
size=1
stride=1
activation=swish
# excitation
[convolutional]
filters=144
size=1
stride=1
activation=logistic
# multiply channels
[scale_channels]
from=-4
# conv_3_2_linear
[convolutional]
filters=40
size=1
stride=1
pad=0
batch_normalize=1
activation=linear
### CONV4 - MBConv6 - 2 (2)
# conv_4_1_expand
[convolutional]
filters=192
size=1
stride=1
pad=0
batch_normalize=1
activation=swish
# conv_4_1_dwise
[convolutional]
groups=192
filters=192
size=5
stride=1
pad=1
batch_normalize=1
activation=swish
#squeeze-n-excitation
[avgpool]
# squeeze ratio r=16 (recommended r=16)
[convolutional]
filters=16
size=1
stride=1
activation=swish
# excitation
[convolutional]
filters=192
size=1
stride=1
activation=logistic
# multiply channels
[scale_channels]
from=-4
# conv_4_1_linear
[convolutional]
filters=40
size=1
stride=1
pad=0
batch_normalize=1
activation=linear
### CONV5 - MBConv6 - 1 (3)
# dropout only before residual connection
[dropout]
probability=.2
# block_4_2
[shortcut]
from=-9
activation=linear
# conv_4_3_expand
[convolutional]
filters=192
size=1
stride=1
pad=0
batch_normalize=1
activation=swish
# conv_4_3_dwise
[convolutional]
groups=192
filters=192
size=3
stride=1
pad=1
batch_normalize=1
activation=swish
#squeeze-n-excitation
[avgpool]
# squeeze ratio r=16 (recommended r=16)
[convolutional]
filters=16
size=1
stride=1
activation=swish
# excitation
[convolutional]
filters=192
size=1
stride=1
activation=logistic
# multiply channels
[scale_channels]
from=-4
# conv_4_3_linear
[convolutional]
filters=80
size=1
stride=1
pad=0
batch_normalize=1
activation=linear
### CONV5 - MBConv6 - 2 (3)
# conv_4_4_expand
[convolutional]
filters=384
size=1
stride=1
pad=0
batch_normalize=1
activation=swish
# conv_4_4_dwise
[convolutional]
groups=384
filters=384
size=3
stride=1
pad=1
batch_normalize=1
activation=swish
#squeeze-n-excitation
[avgpool]
# squeeze ratio r=16 (recommended r=16)
[convolutional]
filters=24
size=1
stride=1
activation=swish
# excitation
[convolutional]
filters=384
size=1
stride=1
activation=logistic
# multiply channels
[scale_channels]
from=-4
# conv_4_4_linear
[convolutional]
filters=80
size=1
stride=1
pad=0
batch_normalize=1
activation=linear
### CONV5 - MBConv6 - 3 (3)
# dropout only before residual connection
[dropout]
probability=.2
# block_4_4
[shortcut]
from=-9
activation=linear
# conv_4_5_expand
[convolutional]
filters=384
size=1
stride=1
pad=0
batch_normalize=1
activation=swish
# conv_4_5_dwise
[convolutional]
groups=384
filters=384
size=3
stride=1
pad=1
batch_normalize=1
activation=swish
#squeeze-n-excitation
[avgpool]
# squeeze ratio r=16 (recommended r=16)
[convolutional]
filters=24
size=1
stride=1
activation=swish
# excitation
[convolutional]
filters=384
size=1
stride=1
activation=logistic
# multiply channels
[scale_channels]
from=-4
# conv_4_5_linear
[convolutional]
filters=80
size=1
stride=1
pad=0
batch_normalize=1
activation=linear
### CONV6 - MBConv6 - 1 (3)
# dropout only before residual connection
[dropout]
probability=.2
# block_4_6
[shortcut]
from=-9
activation=linear
# conv_4_7_expand
[convolutional]
filters=384
size=1
stride=1
pad=0
batch_normalize=1
activation=swish
# conv_4_7_dwise
[convolutional]
groups=384
filters=384
size=5
pad=1
stride=2
batch_normalize=1
activation=swish
#squeeze-n-excitation
[avgpool]
# squeeze ratio r=16 (recommended r=16)
[convolutional]
filters=24
size=1
stride=1
activation=swish
# excitation
[convolutional]
filters=384
size=1
stride=1
activation=logistic
# multiply channels
[scale_channels]
from=-4
# conv_4_7_linear
[convolutional]
filters=112
size=1
stride=1
pad=0
batch_normalize=1
activation=linear
### CONV6 - MBConv6 - 2 (3)
# conv_5_1_expand
[convolutional]
filters=576
size=1
stride=1
pad=0
batch_normalize=1
activation=swish
# conv_5_1_dwise
[convolutional]
groups=576
filters=576
size=5
stride=1
pad=1
batch_normalize=1
activation=swish
#squeeze-n-excitation
[avgpool]
# squeeze ratio r=16 (recommended r=16)
[convolutional]
filters=32
size=1
stride=1
activation=swish
# excitation
[convolutional]
filters=576
size=1
stride=1
activation=logistic
# multiply channels
[scale_channels]
from=-4
# conv_5_1_linear
[convolutional]
filters=112
size=1
stride=1
pad=0
batch_normalize=1
activation=linear
### CONV6 - MBConv6 - 3 (3)
# dropout only before residual connection
[dropout]
probability=.2
# block_5_1
[shortcut]
from=-9
activation=linear
# conv_5_2_expand
[convolutional]
filters=576
size=1
stride=1
pad=0
batch_normalize=1
activation=swish
# conv_5_2_dwise
[convolutional]
groups=576
filters=576
size=5
stride=1
pad=1
batch_normalize=1
activation=swish
#squeeze-n-excitation
[avgpool]
# squeeze ratio r=16 (recommended r=16)
[convolutional]
filters=32
size=1
stride=1
activation=swish
# excitation
[convolutional]
filters=576
size=1
stride=1
activation=logistic
# multiply channels
[scale_channels]
from=-4
# conv_5_2_linear
[convolutional]
filters=112
size=1
stride=1
pad=0
batch_normalize=1
activation=linear
### CONV7 - MBConv6 - 1 (4)
# dropout only before residual connection
[dropout]
probability=.2
# block_5_2
[shortcut]
from=-9
activation=linear
# conv_5_3_expand
[convolutional]
filters=576
size=1
stride=1
pad=0
batch_normalize=1
activation=swish
# conv_5_3_dwise
[convolutional]
groups=576
filters=576
size=5
pad=1
stride=2
batch_normalize=1
activation=swish
#squeeze-n-excitation
[avgpool]
# squeeze ratio r=16 (recommended r=16)
[convolutional]
filters=32
size=1
stride=1
activation=swish
# excitation
[convolutional]
filters=576
size=1
stride=1
activation=logistic
# multiply channels
[scale_channels]
from=-4
# conv_5_3_linear
[convolutional]
filters=192
size=1
stride=1
pad=0
batch_normalize=1
activation=linear
### CONV7 - MBConv6 - 2 (4)
# conv_6_1_expand
[convolutional]
filters=960
size=1
stride=1
pad=0
batch_normalize=1
activation=swish
# conv_6_1_dwise
[convolutional]
groups=960
filters=960
size=5
stride=1
pad=1
batch_normalize=1
activation=swish
#squeeze-n-excitation
[avgpool]
# squeeze ratio r=16 (recommended r=16)
[convolutional]
filters=64
size=1
stride=1
activation=swish
# excitation
[convolutional]
filters=960
size=1
stride=1
activation=logistic
# multiply channels
[scale_channels]
from=-4
# conv_6_1_linear
[convolutional]
filters=192
size=1
stride=1
pad=0
batch_normalize=1
activation=linear
### CONV7 - MBConv6 - 3 (4)
# dropout only before residual connection
[dropout]
probability=.2
# block_6_1
[shortcut]
from=-9
activation=linear
# conv_6_2_expand
[convolutional]
filters=960
size=1
stride=1
pad=0
batch_normalize=1
activation=swish
# conv_6_2_dwise
[convolutional]
groups=960
filters=960
size=5
stride=1
pad=1
batch_normalize=1
activation=swish
#squeeze-n-excitation
[avgpool]
# squeeze ratio r=16 (recommended r=16)
[convolutional]
filters=64
size=1
stride=1
activation=swish
# excitation
[convolutional]
filters=960
size=1
stride=1
activation=logistic
# multiply channels
[scale_channels]
from=-4
# conv_6_2_linear
[convolutional]
filters=192
size=1
stride=1
pad=0
batch_normalize=1
activation=linear
### CONV7 - MBConv6 - 4 (4)
# dropout only before residual connection
[dropout]
probability=.2
# block_6_1
[shortcut]
from=-9
activation=linear
# conv_6_2_expand
[convolutional]
filters=960
size=1
stride=1
pad=0
batch_normalize=1
activation=swish
# conv_6_2_dwise
[convolutional]
groups=960
filters=960
size=5
stride=1
pad=1
batch_normalize=1
activation=swish
#squeeze-n-excitation
[avgpool]
# squeeze ratio r=16 (recommended r=16)
[convolutional]
filters=64
size=1
stride=1
activation=swish
# excitation
[convolutional]
filters=960
size=1
stride=1
activation=logistic
# multiply channels
[scale_channels]
from=-4
# conv_6_2_linear
[convolutional]
filters=192
size=1
stride=1
pad=0
batch_normalize=1
activation=linear
### CONV8 - MBConv6 - 1 (1)
# dropout only before residual connection
[dropout]
probability=.2
# block_6_2
[shortcut]
from=-9
activation=linear
# conv_6_3_expand
[convolutional]
filters=960
size=1
stride=1
pad=0
batch_normalize=1
activation=swish
# conv_6_3_dwise
[convolutional]
groups=960
filters=960
size=3
stride=1
pad=1
batch_normalize=1
activation=swish
#squeeze-n-excitation
[avgpool]
# squeeze ratio r=16 (recommended r=16)
[convolutional]
filters=64
size=1
stride=1
activation=swish
# excitation
[convolutional]
filters=960
size=1
stride=1
activation=logistic
# multiply channels
[scale_channels]
from=-4
# conv_6_3_linear
[convolutional]
filters=320
size=1
stride=1
pad=0
batch_normalize=1
activation=linear
[convolutional]
batch_normalize=1
filters=640
size=3
stride=1
pad=1
activation=swish
[convolutional]
size=1
stride=1
pad=1
filters=18
activation=linear
[yolo]
mask = 3,4,5
anchors = 10,14, 23,27, 37,58, 81,82, 135,169, 344,319
classes=1
num=6
jitter=.3
scale_x_y = 1.05
cls_normalizer=1.0
iou_normalizer=0.07
iou_loss=ciou
ignore_thresh = .7
truth_thresh = 1
random=1
resize=1.5
nms_kind=greedynms
beta_nms=0.6
[route]
layers = -4
[convolutional]
batch_normalize=1
filters=128
size=1
stride=1
pad=1
activation=leaky
[upsample]
stride=2
[route]
layers = -1, 90
[convolutional]
batch_normalize=1
filters=256
size=3
stride=1
pad=1
activation=leaky
[convolutional]
size=1
stride=1
pad=1
filters=18
activation=linear
[yolo]
mask = 0,1,2
anchors = 10,14, 23,27, 37,58, 81,82, 135,169, 344,319
classes=1
num=6
jitter=.3
scale_x_y = 1.05
cls_normalizer=1.0
iou_normalizer=0.07
iou_loss=ciou
ignore_thresh = .7
truth_thresh = 1
random=1
resize=1.5
nms_kind=greedynms
beta_nms=0.6
版权声明:本文为qq_39523365原创文章,遵循CC 4.0 BY-SA版权协议,转载请附上原文出处链接和本声明。