摘要
干扰特征非常多的二分类问题,解题步骤如下
- 首先使用高斯模糊对原图片降噪,kernel 为 (9,9) ,再二值化图片,阈值为 220,使用形态学腐蚀来填充孔洞,kernel 为 (5,5),得到掩码图像
- 获取图像中最大轮廓的旋转包围盒(中心点,宽高,旋转角度)以及顶点,生成透视变换矩阵,对掩码图像对应的部分进行透视变换,得到卡片图像,得到的卡片不分上下方向
- 经过分析得 YGO 类别的卡片边缘特征比较统一,遮盖中心无用特征,将卡片图像 [50:355,22:280] 的区域设为白色,留下边缘特征,从遮盖后的图像中找向上和向下方向各两张图片作为模板图片保存到当前目录下
temp1.png和temp2.png - 每张图对两个模板分别使用 ORB 算法进行相似度检测,如果与两个模板相似度之和 > 0.04,就判定为 YGO,否则为 PTCG
代码
import cv2
import math
import io
import numpy as np
import zipfile
from tqdm import tqdm
from pwn import *
def Img_Outline(original_img):
gray_img = cv2.cvtColor(original_img, cv2.COLOR_BGR2GRAY)
blurred = cv2.GaussianBlur(gray_img, (9, 9), 0)
_, RedThresh = cv2.threshold(blurred, 220, 255, cv2.THRESH_BINARY)
h, w = original_img.shape[:2]
kernel = cv2.getStructuringElement(cv2.MORPH_RECT, (5,5))
marker = np.zeros_like(gray_img)
marker[0, :] = 255
marker[-1, :] = 255
marker[:, 0] = 255
marker[:, -1] = 255
while True:
marker_pre = marker
dilation = cv2.dilate(marker, kernel=kernel)
marker = np.min((dilation, RedThresh), axis=0)
if (marker_pre == marker).all():
break
marker = cv2.morphologyEx(marker, cv2.MORPH_OPEN, kernel)
marker[0:5,:] = 255
marker[-5:,:] = 255
marker[:,0:5] = 255
marker[:,-5:] = 255
return original_img, gray_img, marker
def findContours_img(original_img, marker):
contours, hierarchy = cv2.findContours(marker, cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE)
c = sorted(contours, key=cv2.contourArea, reverse=True)[1]
rect = cv2.minAreaRect(c)
box = np.int0(cv2.boxPoints(rect))
draw_img = cv2.drawContours(original_img.copy(), [box], -1, (0, 0, 255), 3)
return box,draw_img
def Perspective_transform(box,original_img):
orignal_W = math.ceil(np.sqrt((box[3][1] - box[2][1])**2 + (box[3][0] - box[2][0])**2))
orignal_H= math.ceil(np.sqrt((box[3][1] - box[0][1])**2 + (box[3][0] - box[0][0])**2))
pts1 = np.float32([box[0], box[1], box[2], box[3]])
pts2 = np.float32([[int(orignal_W+1),int(orignal_H+1)], [0, int(orignal_H+1)], [0, 0], [int(orignal_W+1), 0]])
M = cv2.getPerspectiveTransform(pts1, pts2)
result_img = cv2.warpPerspective(original_img, M, (int(orignal_W+3),int(orignal_H+1)))
if orignal_H < orignal_W:
result_img = cv2.flip(cv2.transpose(result_img), 1)
return result_img
def img_similarity(img1,img2):
try:
orb = cv2.ORB_create()
kp1, des1 = orb.detectAndCompute(img1, None)
kp2, des2 = orb.detectAndCompute(img2, None)
bf = cv2.BFMatcher(cv2.NORM_HAMMING)
matches = bf.knnMatch(des1, trainDescriptors=des2, k=2)
good = [m for (m, n) in matches if m.distance < 0.75 * n.distance]
similary = len(good) / len(matches)
return similary
except:
print('无法计算两张图片相似度')
exit()
if __name__=="__main__":
if not os.path.exists('./result'):
os.makedirs('result')
final = ''
temp1 = cv2.imread('./temp1.png', cv2.IMREAD_GRAYSCALE)
temp2 = cv2.imread('./temp2.png', cv2.IMREAD_GRAYSCALE)
r = remote('week-3.hgame.lwsec.cn', 30802)
r.recvuntil('...')
r.send('\n')
b64 = r.recvline()[:-1]
out = open('row.zip', "wb")
base64.decode(io.BytesIO(b64), out)
out.close()
zipfile_path = './row.zip'
with zipfile.ZipFile(zipfile_path, mode='r') as zfile:
nWaitTime = 1
for name in tqdm(zfile.namelist()):
if '.png' not in name:
continue
with zfile.open(name,mode='r') as image_file:
content = image_file.read()
image = np.asarray(bytearray(content), dtype='uint8')
image = cv2.imdecode(image, cv2.IMREAD_ANYCOLOR)
original_img, gray_img, RedThresh = Img_Outline(image)
box, draw_img = findContours_img(original_img, RedThresh)
result_img = Perspective_transform(box,original_img)
result_img[50:355,22:280] = 255.0
cv2.imwrite(f"./result/{name}", result_img)
result_img = cv2.cvtColor(result_img, cv2.COLOR_BGR2GRAY)
similarity1 = img_similarity(temp1, result_img)
similarity2 = img_similarity(temp2, result_img)
if (similarity1 + similarity2) > 0.04:
final += '1'
else:
final += '0'
print(final)
r.sendline(final)
r.interactive()
版权声明:本文为weixin_44334901原创文章,遵循CC 4.0 BY-SA版权协议,转载请附上原文出处链接和本声明。