文中代码参考的博客连接如下:
基于Pytorch的特征图提取
pytorch 提取卷积神经网络的特征图可视化
我把需要修改和注意的地方在代码中注释出来了,用的时候只要关注注释部分就可以了。
代码如下:
import os
import torch
import torchvision as tv
import torchvision.transforms as transforms
import torch.nn as nn
import torch.optim as optim
import argparse
import skimage.data
import skimage.io
import skimage.transform
import numpy as np
import matplotlib.pyplot as plt
import torchvision.models as models
from net import model #加载调用自己搭建的网络模型
from PIL import Image
import cv2
class FeatureExtractor(nn.Module):
def __init__(self, submodule, extracted_layers):
super(FeatureExtractor, self).__init__()
self.submodule = submodule
self.extracted_layers = extracted_layers
def forward(self, x):
outputs = {}
for name, module in self.submodule._modules.items():
if "fc" in name:
x = x.view(x.size(0), -1)
x = module(x)
print(name)
if self.extracted_layers is None or name in self.extracted_layers and 'fc' not in name:
outputs[name] = x
return outputs
def get_picture(pic_name, transform):
img = skimage.io.imread(pic_name)
img = skimage.transform.resize(img, (256, 256))
img = np.asarray(img, dtype=np.float32)
return transform(img)
def make_dirs(path):
if os.path.exists(path) is False:
os.makedirs(path)
def get_feature():
pic_dir = '0001.jpg' #加载要可视化的图像路径
transform = transforms.ToTensor()
img = get_picture(pic_dir, transform)
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
img = img.unsqueeze(0)
img = img.to(device)
net = model().to(device) #把model改为自己的模型就可以了
#net.load_state_dict(torch.load('model/model_e9.pth'))
exact_list = ['conv1','avg_pool', 'max_pool'] #加载自己要可视化特征图的哪一层
dst = 'tuxiang' #特征图可视化之后保存的路径
therd_size = 256
myexactor = FeatureExtractor(net, exact_list)
outs = myexactor(img)
for k, v in outs.items():
features = v[0]
iter_range = features.shape[0]
for i in range(iter_range):
#plt.imshow(x[0].data.numpy()[0,i,:,:],cmap='jet')
if 'fc' in k:
continue
feature = features.data.cpu().numpy()
feature_img = feature[i,:,:]
feature_img = np.asarray(feature_img * 255, dtype=np.uint8)
dst_path = os.path.join(dst, k)
make_dirs(dst_path)
feature_img = cv2.applyColorMap(feature_img, cv2.COLORMAP_JET)
if feature_img.shape[0] < therd_size:
tmp_file = os.path.join(dst_path, str(i) + '_' + str(therd_size) + '.png')
tmp_img = feature_img.copy()
tmp_img = cv2.resize(tmp_img, (therd_size,therd_size), interpolation = cv2.INTER_NEAREST)
cv2.imwrite(tmp_file, tmp_img)
dst_file = os.path.join(dst_path, str(i) + '.png')
cv2.imwrite(dst_file, feature_img)
if __name__ == '__main__':
get_feature()
原图如下:
效果图如下:
下图为可视化第一层卷积中某个通道的图像。
简单说一下,直接可视化特征图比不上热图的效果,可视化后面卷积层的特征图就已经不是人类可以判别的地步了。图如下:
版权声明:本文为qq_48884833原创文章,遵循CC 4.0 BY-SA版权协议,转载请附上原文出处链接和本声明。