hytos / DTI_PID / DTI_PID / DTI_PID.py @ ee9d5bf3
이력 | 보기 | 이력해설 | 다운로드 (52.7 KB)
1 |
#region import libs
|
---|---|
2 |
import http.client |
3 |
import urllib, base64, json |
4 |
import cv2 |
5 |
import numpy as np |
6 |
import SymbolBase |
7 |
import symbol |
8 |
import TextInfo as ti |
9 |
import azure_ocr_module as OCR |
10 |
from PIL import Image |
11 |
from io import BytesIO |
12 |
import gc |
13 |
import os |
14 |
import glob |
15 |
import math, operator |
16 |
import threading |
17 |
import concurrent.futures as futures |
18 |
import XmlGenerator as xg |
19 |
import pytesseract |
20 |
import tesseract_ocr_module as TOCR |
21 |
import potrace |
22 |
import sys |
23 |
from PyQt5.QtCore import * |
24 |
from PyQt5.QtGui import * |
25 |
from PyQt5.QtWidgets import * |
26 |
from PyQt5.QtSvg import * |
27 |
import DTI_PID_UI |
28 |
import QtImageViewer |
29 |
|
30 |
sys.path.insert(0, os.path.dirname(os.path.realpath(__file__)) + '\\Commands') |
31 |
import CreateCommand |
32 |
import CropCommand |
33 |
|
34 |
sys.path.insert(0, os.path.dirname(os.path.realpath(__file__)) + '\\Shapes') |
35 |
import QGraphicsPolylineItem |
36 |
from QEngineeringLineItem import QEngineeringLineItem |
37 |
from SymbolSvgItem import SymbolSvgItem |
38 |
from QGraphicsBoundingBoxItem import QGraphicsBoundingBoxItem |
39 |
from AppDocData import AppDocData |
40 |
#endregion
|
41 |
|
42 |
## Tesseract path
|
43 |
pytesseract.pytesseract.tesseract_cmd = os.environ['TESSERACT_HOME'] + '\\tesseract.exe' |
44 |
tesseract_cmd = os.environ['TESSERACT_HOME'] + '\\tesseract.exe' |
45 |
|
46 |
#region Symbol Image path List for test
|
47 |
targetSymbolList = [] |
48 |
#endregion
|
49 |
|
50 |
#region Global variables
|
51 |
searchedSymbolList = [] |
52 |
src = [] |
53 |
srcGray = [] |
54 |
ocrCompletedSrc = [] |
55 |
afterDenoising = [] |
56 |
canvas = [] |
57 |
textInfoList = [] |
58 |
noteTextInfoList = [] |
59 |
|
60 |
WHITE_LIST_CHARS = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ1234567890-"
|
61 |
|
62 |
MIN_TEXT_SIZE = 10
|
63 |
|
64 |
THREAD_MAX_WORKER = os.cpu_count() |
65 |
threadLock = threading.Lock() |
66 |
|
67 |
ACCEPT_OVERLAY_AREA = 10
|
68 |
#endregion
|
69 |
|
70 |
'''
|
71 |
@history 2018.06.28 Jeongwoo Remove useless condition
|
72 |
'''
|
73 |
def checkTextInSymbol(pt): |
74 |
global searchedSymbolList
|
75 |
|
76 |
result = False
|
77 |
for sym in searchedSymbolList: |
78 |
symId = sym.getId() |
79 |
symSp = sym.getSp() |
80 |
symWidth = sym.getWidth() |
81 |
symHeight = sym.getHeight() |
82 |
symOcrOption = sym.getOcrOption() |
83 |
|
84 |
categoryCode = symId // 100
|
85 |
|
86 |
if symOcrOption != SymbolBase.OCR_OPTION_NOT_EXEC:
|
87 |
if (pt[0] >= symSp[0] and pt[0] <= symSp[0] + symWidth) and (pt[1] >= symSp[1] and pt[1] <= symSp[1] + symHeight): |
88 |
result = True
|
89 |
break
|
90 |
|
91 |
return result
|
92 |
|
93 |
'''
|
94 |
@brief remove text from image by using ocr image
|
95 |
@author
|
96 |
'''
|
97 |
def removeText(img, pt, imgOCR): |
98 |
x = round(pt[0]) |
99 |
y = round(pt[1]) |
100 |
width, height = imgOCR.shape[::-1]
|
101 |
imgOCR = cv2.dilate(imgOCR, np.ones((1,1), np.uint8)) |
102 |
imgXOR = cv2.bitwise_xor(img[y:y+height, x:x+width], cv2.bitwise_not(imgOCR)) |
103 |
imgXOR = cv2.dilate(imgXOR, np.ones((2,2), np.uint8)) |
104 |
img[y:y+height, x:x+width] = imgXOR |
105 |
return img
|
106 |
|
107 |
#Convert into Grayscale image
|
108 |
def cvtGrayImage(img): |
109 |
return cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
|
110 |
|
111 |
'''
|
112 |
@brief rotate (x,y) by given angle
|
113 |
@author Jeongwoo
|
114 |
@date 2018.??.??
|
115 |
@history humkyung 2018.04.13 fixed code when angle is 90 or 270
|
116 |
Jeongwoo 2018.04.27 Change calculation method with QTransform
|
117 |
'''
|
118 |
def getCoordOnRotatedImage(rAngle, x, y, originImageWidth, originImageHeight): |
119 |
rx = None
|
120 |
ry = None
|
121 |
transform = QTransform() |
122 |
if rAngle == 90 or rAngle == 270: |
123 |
transform.translate(originImageHeight*0.5, originImageWidth*0.5) |
124 |
elif rAngle == 0 or rAngle == 180: |
125 |
transform.translate(originImageWidth*0.5, originImageHeight*0.5) |
126 |
transform.rotate(-abs(rAngle))
|
127 |
transform.translate(-originImageWidth*0.5, -originImageHeight*0.5) |
128 |
point = QPoint(x, y) |
129 |
point = transform.map(point) |
130 |
rx = point.x() |
131 |
ry = point.y() |
132 |
return (rx, ry)
|
133 |
|
134 |
def convertDirectionCodeToValue(directionCode): |
135 |
if directionCode == "UP": |
136 |
return 0 |
137 |
elif directionCode == "RIGHT": |
138 |
return 1 |
139 |
elif directionCode == "DOWN": |
140 |
return 2 |
141 |
elif directionCode == "LEFT": |
142 |
return 3 |
143 |
else:
|
144 |
return -1 |
145 |
|
146 |
def convertValueToDirectionCode(value): |
147 |
if value == 0: |
148 |
return "UP" |
149 |
elif value == 1: |
150 |
return "RIGHT" |
151 |
elif value == 2: |
152 |
return "DOWN" |
153 |
elif value == 3: |
154 |
return "LEFT" |
155 |
else:
|
156 |
return "NONE" |
157 |
|
158 |
'''
|
159 |
@brief Remake rotated child symbol info
|
160 |
'''
|
161 |
def getRotatedChildInfo(additionalSymbol): |
162 |
tempChildInfo = ""
|
163 |
if additionalSymbol:
|
164 |
childList = additionalSymbol.split("/")
|
165 |
for index in range(len(childList)): |
166 |
child = childList[index] |
167 |
direction = convertDirectionCodeToValue(child.split(",")[0]) |
168 |
childName = child.split(",")[1] |
169 |
direction = (direction - 1) if direction > 0 else 3 |
170 |
if index != 0: |
171 |
tempChildInfo = tempChildInfo + "/"
|
172 |
tempChildInfo = tempChildInfo + convertValueToDirectionCode(direction) + "," + childName
|
173 |
return tempChildInfo
|
174 |
|
175 |
|
176 |
#Check object contains pt
|
177 |
#obj is item in searchedSymbolList
|
178 |
def contains(obj, pt, tw, th): |
179 |
sp = obj.getSp() |
180 |
width = obj.getWidth() |
181 |
height = obj.getHeight() |
182 |
|
183 |
if sp[0] > pt[0]+tw: |
184 |
return 0 |
185 |
if sp[0]+width < pt[0]: |
186 |
return 0 |
187 |
if sp[1] > pt[1]+th: |
188 |
return 0 |
189 |
if sp[1]+height < pt[1]: |
190 |
return 0 |
191 |
|
192 |
#shared area
|
193 |
x = max(sp[0], pt[0]); |
194 |
y = max(sp[1], pt[1]); |
195 |
w = min(sp[0] + width, pt[0] + tw) - x; |
196 |
h = min(sp[1] + height, pt[1] + th) - y; |
197 |
|
198 |
return float((w * h)) / float((tw * th)) * 100 |
199 |
|
200 |
def getSplitSrcList(srcPid, splitCount, splitWidth, splitHeight): |
201 |
splitRoiList = [] |
202 |
for hi in range(splitCount): |
203 |
for wi in range(splitCount): |
204 |
roiSp = (splitWidth*wi, splitHeight*hi) |
205 |
roiEp = (splitWidth*(wi+1), splitHeight*(hi+1)) |
206 |
splitRoiList.append((roiSp, roiEp, srcPid[roiSp[1]:roiEp[1], roiSp[0]:roiEp[0]])) |
207 |
return splitRoiList
|
208 |
|
209 |
'''
|
210 |
@history 2018.06.12 Jeongwoo Type changed (int → float)
|
211 |
'''
|
212 |
def getCalculatedOriginalPoint(additionalSymbol, symbolOriginalPoint, symbolRotatedAngle, rotateSymbolWidth, rotateSymbolHeight, originalSymbolWidth, originalSymbolHeight): |
213 |
originalPoint = ''
|
214 |
if additionalSymbol is None and symbolOriginalPoint is None: |
215 |
originalPoint = str(rotateSymbolWidth//2)+','+str(rotateSymbolHeight//2) |
216 |
else:
|
217 |
opx = float(symbolOriginalPoint.split(',')[0]) |
218 |
opy = float(symbolOriginalPoint.split(',')[1]) |
219 |
rPt = getCoordOnRotatedImage(symbolRotatedAngle, opx, opy, originalSymbolWidth, originalSymbolHeight) |
220 |
originalPoint = str(float(rPt[0])) + ',' + str(float(rPt[1])) |
221 |
return originalPoint
|
222 |
|
223 |
'''
|
224 |
@history 2018.06.12 Jeongwoo Type changed (int → float)
|
225 |
'''
|
226 |
def getCalculatedConnectionPoint(symbolConnectionPointStr, symbolRotatedAngle, rotateSymbolWidth, rotateSymbolHeight, originalSymbolWidth, originalSymbolHeight): |
227 |
convertedConnectionPoint = ""
|
228 |
if symbolConnectionPointStr is not None: |
229 |
splitConnectionPointStr = symbolConnectionPointStr.split("/")
|
230 |
for index in range(len(splitConnectionPointStr)): |
231 |
if index != 0: |
232 |
convertedConnectionPoint = convertedConnectionPoint + "/"
|
233 |
item = splitConnectionPointStr[index] |
234 |
cpx = float(item.split(',')[0]) |
235 |
cpy = float(item.split(',')[1]) |
236 |
rPt = getCoordOnRotatedImage(symbolRotatedAngle, cpx, cpy, originalSymbolWidth, originalSymbolHeight) |
237 |
temp = str(float(rPt[0])) + ',' + str(float(rPt[1])) |
238 |
convertedConnectionPoint = convertedConnectionPoint + temp |
239 |
return convertedConnectionPoint
|
240 |
|
241 |
'''
|
242 |
@brief Add symbols
|
243 |
@author jwkim
|
244 |
@date
|
245 |
@history Change parameter (mpCount → hitRate)
|
246 |
'''
|
247 |
def addSearchedSymbol(id, sName, sType |
248 |
, sp, w, h, threshold, minMatchCount, hitRate, rotatedAngle |
249 |
, isDetectOnOrigin, rotateCount, ocrOption, isContainChild |
250 |
, originalPoint, connectionPoint, baseSymbol, additionalSymbol, isExceptDetect): |
251 |
global searchedSymbolList
|
252 |
newSym = symbol.Symbol(id, sName, sType
|
253 |
, sp, w, h, threshold, minMatchCount, hitRate, rotatedAngle |
254 |
, isDetectOnOrigin, rotateCount, ocrOption, isContainChild |
255 |
, originalPoint, connectionPoint, baseSymbol, additionalSymbol, isExceptDetect) |
256 |
|
257 |
searchedSymbolList.append(newSym) |
258 |
|
259 |
|
260 |
#Calculate count of keypoint match result
|
261 |
def getMatchPointCount(src, cmp): |
262 |
orb = cv2.ORB_create(1000, 2.0, 2, 1) |
263 |
|
264 |
kp1, des1 = orb.detectAndCompute(src, None)
|
265 |
kp2, des2 = orb.detectAndCompute(cmp, None) |
266 |
|
267 |
FLANN_INDEX_LSH = 6
|
268 |
# table_number : The number of hash tables use
|
269 |
# key_size : The length of the key in the hash tables
|
270 |
# multi_probe_level : Number of levels to use in multi-probe (0 for standard LSH)
|
271 |
# It controls how neighboring buckets are searched
|
272 |
# Recommended value is 2
|
273 |
# checks : specifies the maximum leafs to visit when searching for neighbours.
|
274 |
# LSH : Locality-Sensitive Hashing
|
275 |
# ref : https://www.cs.ubc.ca/research/flann/uploads/FLANN/flann_manual-1.8.4.pdf
|
276 |
index_params = dict(algorithm = FLANN_INDEX_LSH, table_number = 20, key_size = 10, multi_probe_level = 4) |
277 |
search_params = dict(checks = 100) |
278 |
|
279 |
flann = cv2.FlannBasedMatcher(index_params,search_params) |
280 |
|
281 |
matches = flann.knnMatch(des1, des2, k = 2)
|
282 |
matchesMask = [[0, 0] for i in range(len(matches))] #Python 3.x |
283 |
|
284 |
count = 0
|
285 |
# ratio test as per Lowe's paper
|
286 |
for i in range(len(matches)): |
287 |
if len(matches[i]) == 2: |
288 |
m = matches[i][0]
|
289 |
n = matches[i][1]
|
290 |
if m.distance < 0.85 * n.distance: |
291 |
count = count + 1
|
292 |
|
293 |
matchCount = count |
294 |
|
295 |
#print("match Count : " + str(matchCount))
|
296 |
return matchCount
|
297 |
|
298 |
'''
|
299 |
@brief detect symbols on PID
|
300 |
@history humkyung 2018.06.08 add parameteres for signal
|
301 |
'''
|
302 |
def detectSymbolsOnPid(mainRes, targetSymbols, listWidget, updateProgressSignal): |
303 |
for detailTarget in targetSymbols: |
304 |
detectSymbolOnPid(mainRes, detailTarget, listWidget, updateProgressSignal) |
305 |
|
306 |
'''
|
307 |
@brief detect symbol on PID
|
308 |
@author jwkim
|
309 |
@date
|
310 |
@history humkyung 2018.04.06 check if symbol file exists
|
311 |
Jeongwoo 2018.05.29 Change method to adjust detail symbol location with hit-rate. Not feature point count
|
312 |
Change parameter on add symbol part (mpCount → hitRate)
|
313 |
Remove unusing calculation (avg)
|
314 |
Jeongwoo 2018.06.27 Remove part to split P&ID image and for loop
|
315 |
'''
|
316 |
def detectSymbolOnPid(mainRes, targetSymbol, listWidget, updateProgressSignal): |
317 |
global src
|
318 |
global srcGray
|
319 |
global ocrCompletedSrc
|
320 |
global afterDenoising
|
321 |
global threadLock
|
322 |
global searchedSymbolList
|
323 |
global maxProgressValue
|
324 |
|
325 |
try:
|
326 |
symId = targetSymbol.getId() |
327 |
symbolName = targetSymbol.getName() |
328 |
symbolType = targetSymbol.getType() |
329 |
symbolPath = targetSymbol.getPath() |
330 |
symbolThreshold = targetSymbol.getThreshold() |
331 |
symbolMinMatchCount = targetSymbol.getMinMatchCount() |
332 |
isDetectOnOrigin = targetSymbol.getIsDetectOnOrigin() |
333 |
symbolRotateCount = targetSymbol.getRotationCount() |
334 |
symbolOcrOption = targetSymbol.getOcrOption() |
335 |
isContainChild = targetSymbol.getIsContainChild() |
336 |
symbolOriginalPoint = targetSymbol.getOriginalPoint() |
337 |
symbolConnectionPoint = targetSymbol.getConnectionPoint() |
338 |
baseSymbol = targetSymbol.getBaseSymbol() |
339 |
additionalSymbol = targetSymbol.getAdditionalSymbol() |
340 |
isExceptDetect = targetSymbol.getIsExceptDetect() |
341 |
|
342 |
# check if symbol file is target or not
|
343 |
if isExceptDetect == 1: |
344 |
item = QListWidgetItem('{} file is not target'.format(os.path.split(os.path.basename(symbolPath))[0])) |
345 |
item.setBackground(QColor('green'))
|
346 |
listWidget.addItem(item) |
347 |
return
|
348 |
|
349 |
foundSymbolCount = 0
|
350 |
|
351 |
# check if symbol file exists
|
352 |
if not os.path.isfile(symbolPath): |
353 |
item = QListWidgetItem('{} file not found'.format(os.path.split(os.path.basename(symbolPath))[0])) |
354 |
item.setBackground(QColor('red'))
|
355 |
listWidget.addItem(item) |
356 |
return
|
357 |
# up to here
|
358 |
|
359 |
sym = cv2.imread(symbolPath, 1)
|
360 |
symGray = cvtGrayImage(sym) |
361 |
## TODO: 이진화 시켰을때 심볼이 검출되지 않음
|
362 |
## symGray = cv2.threshold(cvtGrayImage(sym), 127, 255, cv2.THRESH_BINARY)[1]
|
363 |
## cv2.imshow('symbol', symGray)
|
364 |
## cv2.waitKey(0)
|
365 |
sow, soh = symGray.shape[::-1] # symbol original w, h |
366 |
|
367 |
offsetDrawingArea=[] |
368 |
area = AppDocData.instance().getArea('Drawing')
|
369 |
if area is not None: |
370 |
copiedBasePid = area.img.copy() |
371 |
offsetDrawingArea.append(area.x) |
372 |
offsetDrawingArea.append(area.y) |
373 |
else:
|
374 |
offsetDrawingArea.append(0)
|
375 |
offsetDrawingArea.append(0)
|
376 |
if isDetectOnOrigin == 1: |
377 |
copiedBasePid = srcGray.copy() |
378 |
else:
|
379 |
copiedBasePid = ocrCompletedSrc.copy() |
380 |
srcWidth, srcHeight = copiedBasePid.shape[::-1]
|
381 |
|
382 |
roiItemSp = (0,0) |
383 |
roiItemEp = (srcWidth, srcHeight) |
384 |
roiItem = copiedBasePid |
385 |
|
386 |
symbolRotatedAngle = 0
|
387 |
for rc in range(symbolRotateCount + 1): ## Rotation Count를 사용자 기준으로 받아서 1을 더한 후 사용 |
388 |
sw, sh = symGray.shape[::-1]
|
389 |
roiw = (roiItemEp[0] - roiItemSp[0]) |
390 |
roih = (roiItemEp[1] - roiItemSp[1]) |
391 |
|
392 |
## Case : Bigger Symbol than Split ROI
|
393 |
if roiw < sw or roih < sh: |
394 |
symGray = cv2.rotate(symGray, cv2.ROTATE_90_COUNTERCLOCKWISE) |
395 |
symbolRotatedAngle = symbolRotatedAngle + 90
|
396 |
|
397 |
if baseSymbol is not None and additionalSymbol is not None: |
398 |
additionalSymbol = getRotatedChildInfo(additionalSymbol) |
399 |
continue
|
400 |
|
401 |
## get Rotated Original Point
|
402 |
originalPoint = getCalculatedOriginalPoint(additionalSymbol, symbolOriginalPoint, symbolRotatedAngle, sw, sh, sow, soh) |
403 |
connectionPoint = getCalculatedConnectionPoint(symbolConnectionPoint, symbolRotatedAngle, sw, sh, sow, soh) |
404 |
|
405 |
## Template Matching
|
406 |
tmRes = cv2.matchTemplate(roiItem, symGray, cv2.TM_CCOEFF_NORMED) |
407 |
loc = np.where(tmRes >= symbolThreshold) |
408 |
|
409 |
for pt in zip(*loc[::-1]): |
410 |
overlapArea = 0
|
411 |
mpCount = 0 # Match Point Count |
412 |
symbolIndex = -1
|
413 |
|
414 |
roi = roiItem[pt[1]:pt[1]+sh, pt[0]:pt[0]+sw] |
415 |
mpCount = getMatchPointCount(roi, symGray) |
416 |
if not (mpCount >= symbolMinMatchCount): |
417 |
continue
|
418 |
|
419 |
searchedItemSp = (roiItemSp[0]+pt[0] + round(offsetDrawingArea[0]), roiItemSp[1]+pt[1] + round(offsetDrawingArea[1])) |
420 |
|
421 |
for i in range(len(searchedSymbolList)): |
422 |
overlapArea = contains(searchedSymbolList[i], searchedItemSp, sw, sh) |
423 |
if overlapArea > ACCEPT_OVERLAY_AREA:
|
424 |
symbolIndex = i |
425 |
break
|
426 |
|
427 |
hitRate = tmRes[pt[1], pt[0]] |
428 |
## DEBUG
|
429 |
print('{}:{}-{}'.format(symbolName, searchedItemSp, hitRate))
|
430 |
## up to here
|
431 |
|
432 |
## 겹치는 영역이 기준값보다 작을 경우
|
433 |
if overlapArea <= ACCEPT_OVERLAY_AREA:
|
434 |
threadLock.acquire() |
435 |
foundSymbolCount = foundSymbolCount + 1
|
436 |
addSearchedSymbol(symId, symbolName, symbolType |
437 |
, searchedItemSp, sw, sh, symbolThreshold, symbolMinMatchCount, hitRate, symbolRotatedAngle |
438 |
, isDetectOnOrigin, symbolRotateCount, symbolOcrOption, isContainChild |
439 |
, originalPoint, connectionPoint, baseSymbol, additionalSymbol,isExceptDetect) |
440 |
threadLock.release() |
441 |
## 겹치는 영역이 기준값보다 클 경우
|
442 |
else:
|
443 |
if symbolIndex != -1 and symbolIndex < len(searchedSymbolList): |
444 |
searchedSymbol = searchedSymbolList[symbolIndex] |
445 |
## 현재 심볼과 검출된 심볼이 같을 경우 Match Point가 더 높은 정보로 교체
|
446 |
if symbolName == searchedSymbol.getName():
|
447 |
symbolHitRate = searchedSymbol.getHitRate() |
448 |
if symbolHitRate < hitRate:
|
449 |
threadLock.acquire() |
450 |
searchedSymbolList[symbolIndex] = symbol.Symbol(symId, symbolName, symbolType |
451 |
, searchedItemSp, sw, sh, symbolThreshold, symbolMinMatchCount, hitRate, symbolRotatedAngle |
452 |
, isDetectOnOrigin, symbolRotateCount, symbolOcrOption, isContainChild |
453 |
, originalPoint, connectionPoint, baseSymbol, additionalSymbol,isExceptDetect) |
454 |
threadLock.release() |
455 |
## 현재 심볼과 검출된 심볼이 같지 않을 경우 (포함)
|
456 |
else:
|
457 |
## 검출된 심볼 리스트 중에서 해당 영역에 같은 심볼이 있는지 여부 체크
|
458 |
matches = [sym for sym in searchedSymbolList if sym.getName() == symbolName and contains(sym, searchedItemSp, sw, sh) > ACCEPT_OVERLAY_AREA] |
459 |
|
460 |
## 현재 심볼과 검출된 심볼이 같을 경우 Match Point가 더 높은 정보로 교체
|
461 |
if len(matches) != 0: |
462 |
for s in matches: |
463 |
symbolHitRate = s.getHitRate() |
464 |
if symbolHitRate < hitRate:
|
465 |
threadLock.acquire() |
466 |
searchedSymbolList[searchedSymbolList.index(s)] = symbol.Symbol(symId, symbolName, symbolType |
467 |
, searchedItemSp, sw, sh, symbolThreshold, symbolMinMatchCount, hitRate, symbolRotatedAngle |
468 |
, isDetectOnOrigin, symbolRotateCount, symbolOcrOption, isContainChild |
469 |
, originalPoint, connectionPoint, baseSymbol, additionalSymbol,isExceptDetect) |
470 |
threadLock.release() |
471 |
else:
|
472 |
if searchedSymbol.getIsContainChild() == 1: |
473 |
##TODO: 특정 카테고리 심볼을 걸러냄 (ex - 9900 대 Drum)
|
474 |
if (searchedSymbol.getId() // 100) == (symId // 100): |
475 |
continue
|
476 |
else:
|
477 |
threadLock.acquire() |
478 |
foundSymbolCount = foundSymbolCount + 1
|
479 |
addSearchedSymbol(symId, symbolName, symbolType |
480 |
, searchedItemSp, sw, sh, symbolThreshold, hitRate, hitRate, symbolRotatedAngle |
481 |
, isDetectOnOrigin, symbolRotateCount, symbolOcrOption, isContainChild |
482 |
, originalPoint, connectionPoint, baseSymbol, additionalSymbol,isExceptDetect) |
483 |
threadLock.release() |
484 |
|
485 |
## Rotate Symbol
|
486 |
symGray = cv2.rotate(symGray, cv2.ROTATE_90_COUNTERCLOCKWISE) |
487 |
symbolRotatedAngle = symbolRotatedAngle + 90
|
488 |
|
489 |
if additionalSymbol is not None: |
490 |
additionalSymbol = getRotatedChildInfo(additionalSymbol) |
491 |
|
492 |
threadLock.acquire() |
493 |
listWidget.addItem('Found Symbol : ' + os.path.splitext(os.path.basename(symbolPath))[0] + ' - (' + str(foundSymbolCount) + ')') |
494 |
threadLock.release() |
495 |
|
496 |
updateProgressSignal.emit(maxProgressValue) |
497 |
except Exception as ex: |
498 |
print('error occured({}) in {}:{}'.format(ex, sys.exc_info()[-1].tb_frame.f_code.co_filename, sys.exc_info()[-1].tb_lineno)) |
499 |
|
500 |
'''
|
501 |
@history 2018.05.17 Jeongwoo Bitwise_not target changed (Original Image → Symbol Image)
|
502 |
'''
|
503 |
def removeDetectedSymbol(sym): |
504 |
global srcGray
|
505 |
global ocrCompletedSrc
|
506 |
global threadLock
|
507 |
|
508 |
path = sym.getPath() |
509 |
sp = sym.getSp() |
510 |
sw = sym.getWidth() |
511 |
sh = sym.getHeight() |
512 |
angle = sym.getRotatedAngle() |
513 |
symImg = cv2.imread(path) |
514 |
symImg = cv2.threshold(cvtGrayImage(symImg), 127, 255, cv2.THRESH_BINARY)[1] |
515 |
|
516 |
for i in range(angle//90): |
517 |
symImg = cv2.rotate(symImg, cv2.ROTATE_90_COUNTERCLOCKWISE) |
518 |
|
519 |
threadLock.acquire() |
520 |
temp = [] |
521 |
temp = srcGray[sp[1]:sp[1]+sh, sp[0]:sp[0]+sw] |
522 |
symImgBin = cv2.bitwise_not(symImg) |
523 |
result = cv2.bitwise_xor(symImgBin, temp) |
524 |
result = cv2.dilate(result, np.ones((5, 5), np.uint8)) |
525 |
srcGray[sp[1]:sp[1]+sh, sp[0]:sp[0]+sw] = result |
526 |
ocrCompletedSrc[sp[1]:sp[1]+sh, sp[0]:sp[0]+sw] = result |
527 |
threadLock.release() |
528 |
|
529 |
def drawRectOnSrc(sym): |
530 |
global src
|
531 |
global srcGray
|
532 |
global ocrCompletedSrc
|
533 |
|
534 |
path = sym.getPath() |
535 |
sp = sym.getSp() |
536 |
sw = sym.getWidth() |
537 |
sh = sym.getHeight() |
538 |
sAngle = sym.getRotatedAngle() |
539 |
symImg = cv2.imread(path) |
540 |
symImg = cvtGrayImage(symImg) |
541 |
|
542 |
cv2.rectangle(src, sp, (sp[0]+sw, sp[1]+sh), (0, 0, 255), 2) |
543 |
|
544 |
'''
|
545 |
@history 2018.04.27 Jeongwoo Remove Tesseract Log on listWidget
|
546 |
2018.05.04 Jeongwoo Change method to OCR with tesseract_ocr_module.py
|
547 |
2018.05.09 Jeongwoo Add global variable textInfoList, Remove text in symbol and Add tesseract result text
|
548 |
2018.05.10 Jeongwoo Remove not used if-statement
|
549 |
2018.06.19 Jeongwoo When detect text in symbol, use getTextAreaInfo() and Tesseract
|
550 |
2018.06.21 Jeongwoo Add if-statement for way to detect text by Type A
|
551 |
'''
|
552 |
def drawFoundSymbols(symbol, listWidget): |
553 |
global src
|
554 |
global canvas
|
555 |
global WHITE_LIST_CHARS
|
556 |
global searchedSymbolList
|
557 |
global textInfoList
|
558 |
|
559 |
symbolId = symbol.getId() |
560 |
symbolPath = symbol.getPath() |
561 |
symbolSp = symbol.getSp() |
562 |
symbolWidth = symbol.getWidth() |
563 |
symbolHeight = symbol.getHeight() |
564 |
symbolRotatedAngle = symbol.getRotatedAngle() |
565 |
symbolOcrOption = symbol.getOcrOption() |
566 |
|
567 |
symImg = cv2.imread(symbolPath, 1)
|
568 |
for i in range(symbolRotatedAngle//90): |
569 |
symImg = cv2.rotate(symImg, cv2.ROTATE_90_COUNTERCLOCKWISE) |
570 |
|
571 |
chan, w, h = symImg.shape[::-1]
|
572 |
canvas[symbolSp[1]:symbolSp[1]+h, symbolSp[0]:symbolSp[0]+w] = cv2.bitwise_and(canvas[symbolSp[1]:symbolSp[1]+h, symbolSp[0]:symbolSp[0]+w], symImg) |
573 |
|
574 |
'''
|
575 |
@brief draw found symbols and texts
|
576 |
@author Jeongwoo
|
577 |
'''
|
578 |
def drawFoundSymbolsOnCanvas(drawingPath , textInfos , listWidget): |
579 |
global src
|
580 |
global srcGray
|
581 |
global ocrCompletedSrc
|
582 |
global searchedSymbolList
|
583 |
global canvas
|
584 |
|
585 |
canvas = np.zeros(src.shape, np.uint8) |
586 |
canvas[::] = (255, 255, 255) |
587 |
|
588 |
try:
|
589 |
docData = AppDocData.instance() |
590 |
project = docData.getCurrentProject() |
591 |
|
592 |
for symbol in searchedSymbolList: |
593 |
drawFoundSymbols(symbol, listWidget) |
594 |
|
595 |
for text in textInfos: |
596 |
#if not checkTextInSymbol((text.getX(), text.getY())):
|
597 |
left = text.getX() |
598 |
top = text.getY() |
599 |
right = text.getX() + text.getW() |
600 |
bottom = text.getY() + text.getH() |
601 |
|
602 |
canvas[top:bottom, left:right] = src[top:bottom, left:right] |
603 |
|
604 |
cv2.imwrite(os.path.join(project.getTempPath(), "FOUND_" + os.path.basename(drawingPath)), canvas)
|
605 |
except Exception as ex: |
606 |
print('error occured({}) in {}:{}'.format(ex, sys.exc_info()[-1].tb_frame.f_code.co_filename, sys.exc_info()[-1].tb_lineno)) |
607 |
|
608 |
#Generate target symbol data list
|
609 |
'''
|
610 |
@history 2018.04.24 Jeongwoo Add isExceptDetect Field
|
611 |
2018.05.09 Jeongwoo Add targetSymbolList clear
|
612 |
'''
|
613 |
def initTargetSymbolDataList(): |
614 |
############ region SQLite
|
615 |
global targetSymbolList
|
616 |
targetSymbolList.clear() |
617 |
dict = {} |
618 |
tempTargetList = AppDocData.instance().getTargetSymbolList() |
619 |
|
620 |
## Init Symbol Data from SQLite
|
621 |
for target in tempTargetList: |
622 |
symId = target.getId() // 100 # symId |
623 |
if symId in dict: |
624 |
dict[symId].append(target)
|
625 |
else:
|
626 |
symGroup = [] |
627 |
symGroup.append(target) |
628 |
dict[symId] = symGroup
|
629 |
|
630 |
## Sort each symbol list by symbol id
|
631 |
for k, v in dict.items(): |
632 |
dict[k] = sorted(v, key=lambda symbol:symbol.id) |
633 |
|
634 |
## Insert each symbol list into targetSymbolList
|
635 |
for sym in list(dict.values()): |
636 |
targetSymbolList.append(sym) |
637 |
############ endregion SQLite
|
638 |
|
639 |
return targetSymbolList
|
640 |
|
641 |
'''
|
642 |
@brief detect text areas
|
643 |
@author humkyung
|
644 |
@date 2018.06.16
|
645 |
'''
|
646 |
def detectTextAreas(filePath): |
647 |
global srcGray
|
648 |
|
649 |
tInfoList = [] |
650 |
try:
|
651 |
if os.path.isfile(filePath):
|
652 |
area = AppDocData.instance().getArea('Drawing')
|
653 |
configs = AppDocData.instance().getConfigs('Text Area', 'Text Area') |
654 |
type = int(configs[0].value) if 1 == len(configs) else 0 |
655 |
|
656 |
if type == 0: |
657 |
(_tempOcrSrc, tInfoList) = OCR.removeTextFromNpArray(area.img if area is not None else srcGray, area.x if area is not None else 0, area.y if area is not None else 0) |
658 |
else:
|
659 |
tInfoList = getTextAreaInfo(filePath, area.img if area is not None else srcGray, area.x if area is not None else 0, area.y if area is not None else 0) |
660 |
except Exception as ex: |
661 |
print('error occured({}) in {}:{}'.format(ex, sys.exc_info()[-1].tb_frame.f_code.co_filename, sys.exc_info()[-1].tb_lineno)) |
662 |
|
663 |
return tInfoList
|
664 |
|
665 |
'''
|
666 |
@brief read image drawing and then remove text
|
667 |
@author jwkim
|
668 |
@date
|
669 |
@history humkyung 2018.04.06 check if file exists
|
670 |
Jeongwoo 2018.05.09 Use Tesseract OCR after Azure OCR (Azure OCR : Getting text area)
|
671 |
Jeongwoo 2018.05.25 Add condition on if-statement
|
672 |
Jeongwoo 2018.06.05 Get text area data list by config.type
|
673 |
Jeongwoo 2018.06.08 Add angle Parameter on TOCR.getTextInfo
|
674 |
humkyung 2018.06.16 update proessbar while recognizing text
|
675 |
'''
|
676 |
def initMainSrc(mainRes, tInfoList, updateProgressSignal): |
677 |
global srcGray
|
678 |
global ocrCompletedSrc
|
679 |
global textInfoList
|
680 |
global noteTextInfoList
|
681 |
global maxProgressValue
|
682 |
|
683 |
try:
|
684 |
docData = AppDocData.instance() |
685 |
project = docData.getCurrentProject() |
686 |
|
687 |
path = os.path.join(project.getTempPath(), 'OCR_' + os.path.basename(mainRes))
|
688 |
if os.path.isfile(path):
|
689 |
ocrCompletedSrc = srcGray.copy() |
690 |
ocrCompletedSrc = cv2.threshold(ocrCompletedSrc, 127, 255, cv2.THRESH_BINARY)[1] |
691 |
|
692 |
imgOCR = cv2.imread(path, 1)
|
693 |
imgOCR = cv2.threshold(cvtGrayImage(imgOCR), 127, 255, cv2.THRESH_BINARY)[1] |
694 |
|
695 |
global MIN_TEXT_SIZE
|
696 |
area = AppDocData.instance().getArea('Drawing')
|
697 |
for tInfo in tInfoList: |
698 |
if tInfo.getW() >= MIN_TEXT_SIZE or tInfo.getH() >= MIN_TEXT_SIZE: |
699 |
x = tInfo.getX() - round(area.x)
|
700 |
y = tInfo.getY() - round(area.y)
|
701 |
img = imgOCR[y:y+tInfo.getH(), x:x+tInfo.getW()] |
702 |
resultTextInfo = TOCR.getTextInfo(img, (x, y), tInfo.getAngle()) |
703 |
if resultTextInfo is not None and len(resultTextInfo) > 0: |
704 |
for result in resultTextInfo: |
705 |
result.setX(result.getX() + round(area.x))
|
706 |
result.setY(result.getY() + round(area.y))
|
707 |
textInfoList.extend(resultTextInfo) |
708 |
ocrCompletedSrc = removeText(ocrCompletedSrc, (tInfo.getX(), tInfo.getY()), img) |
709 |
else:
|
710 |
print(tInfo.getText()) |
711 |
updateProgressSignal.emit(maxProgressValue) |
712 |
|
713 |
cv2.imwrite(os.path.join(project.getTempPath(), "XOCR_" + os.path.basename(mainRes)), ocrCompletedSrc)
|
714 |
|
715 |
# parse Note
|
716 |
noteArea = AppDocData.instance().getArea('Note')
|
717 |
if noteArea is not None: |
718 |
noteArea.img = srcGray[round(noteArea.y-1):round(noteArea.y+noteArea.height-1), round(noteArea.x-1):round(noteArea.x+noteArea.width-1)] |
719 |
noteTextInfoList = TOCR.getTextInfo(noteArea.img, (noteArea.x, noteArea.y)) |
720 |
updateProgressSignal.emit(maxProgressValue) |
721 |
except Exception as ex: |
722 |
print('error occured({}) in {}:{}'.format(ex, sys.exc_info()[-1].tb_frame.f_code.co_filename, sys.exc_info()[-1].tb_lineno)) |
723 |
|
724 |
'''
|
725 |
@brief remove small objects from given image
|
726 |
@author humkyung
|
727 |
@date 2018.04.26
|
728 |
@history 2018.05.25 Jeongwoo Moved from MainWindow
|
729 |
'''
|
730 |
def removeSmallObjects(image): |
731 |
try:
|
732 |
docData = AppDocData.instance() |
733 |
configs = docData.getConfigs('Small Object Size', 'Min Area') |
734 |
minArea = int(configs[0].value) if 1 == len(configs) else 20 |
735 |
configs = docData.getConfigs('Small Object Size', 'Max Area') |
736 |
maxArea = int(configs[0].value) if 1 == len(configs) else 50 |
737 |
|
738 |
_,contours,_ = cv2.findContours(image, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE); |
739 |
selectedContours=[] |
740 |
for contour in contours: |
741 |
#if cv2.isContourConvex(contour):
|
742 |
#approx = cv2.approxPolyDP(contour, 0.2*cv2.arcLength(contour, True), True)
|
743 |
area = cv2.contourArea(contour) |
744 |
if area > minArea and area < maxArea: selectedContours.append(contour) |
745 |
contourImage = cv2.drawContours(image, selectedContours, -1, (255,255,255), -1); # draw contour with white color |
746 |
#path = os.path.join(AppDocData.instance().getCurrentProject().getTempPath(), 'contours.png')
|
747 |
#cv2.imwrite(path, contourImage)
|
748 |
except Exception as ex: |
749 |
print('error occured({}) in {}:{}'.format(ex, sys.exc_info()[-1].tb_frame.f_code.co_filename, sys.exc_info()[-1].tb_lineno)) |
750 |
|
751 |
return contourImage
|
752 |
|
753 |
'''
|
754 |
@history 2018.05.25 Jeongwoo Moved from MainWindow
|
755 |
2018.05.28 Jeongwoo Add xmlPath Parameter and append LineInfo into xml
|
756 |
2018.05.29 Jeongwoo Change method to add item
|
757 |
2018.05.30 Jeongwoo Remove parameter (xmlPath)
|
758 |
humkyung 2018.06.11 add drawing path to parameter and write recognized lines to image
|
759 |
'''
|
760 |
def recognizeLine(path, listWidget, graphicsView): |
761 |
from shapely.geometry import Point, LineString |
762 |
from SymbolSvgItem import SymbolSvgItem |
763 |
from QEngineeringFlowArrowItem import QEngineeringFlowArrowItem |
764 |
from QEngineeringLineNoTextItem import QEngineeringLineNoTextItem |
765 |
from QEngineeringTextItem import QEngineeringTextItem |
766 |
from QEngineeringLineItem import QEngineeringLineItem |
767 |
from LineDetector import LineDetector |
768 |
|
769 |
try:
|
770 |
#remove already existing line and flow arrow item
|
771 |
items = [item for item in graphicsView.scene.items() if (type(item) is QEngineeringLineItem) or (type(item) is QEngineeringFlowArrowItem)] |
772 |
for item in items: |
773 |
graphicsView.scene.removeItem(item) |
774 |
#up to here
|
775 |
|
776 |
# detect line
|
777 |
connectedLines = [] |
778 |
|
779 |
area = AppDocData.instance().getArea('Drawing')
|
780 |
area.img = removeSmallObjects(area.img) |
781 |
detector = LineDetector(area.img) |
782 |
|
783 |
symbols = [] |
784 |
for item in graphicsView.scene.items(): |
785 |
if issubclass(type(item), SymbolSvgItem): |
786 |
symbols.append(item) |
787 |
res = detector.detectConnectedLine(item, round(area.x), round(area.y)) |
788 |
if res is not None: |
789 |
connectedLines.extend(res) |
790 |
|
791 |
lineNos = [item for item in graphicsView.scene.items() if type(item) is QEngineeringLineNoTextItem] |
792 |
|
793 |
if len(connectedLines) > 1: |
794 |
detector.mergeLines(connectedLines, toler=5)
|
795 |
# connect line to symbol
|
796 |
try:
|
797 |
for line in connectedLines: |
798 |
matches = [symbol for symbol in symbols if symbol.isConnectable(line, (round(area.x), round(area.y)), toler=20)] |
799 |
for symbol in matches: |
800 |
detector.connectLineToSymbol(line, (round(area.x), round(area.y)), symbol) |
801 |
except Exception as ex: |
802 |
print('error occured({}) in {}:{}'.format(ex, sys.exc_info()[-1].tb_frame.f_code.co_filename, sys.exc_info()[-1].tb_lineno)) |
803 |
# up to here
|
804 |
|
805 |
# connect line to line
|
806 |
toler = 10
|
807 |
try:
|
808 |
for line in connectedLines: |
809 |
matches = [it for it in connectedLines if (it is not line) and (not detector.isParallel(line, it))] |
810 |
|
811 |
# get closest line
|
812 |
selected = [] |
813 |
shapelyLine = LineString(line) |
814 |
for match in matches: |
815 |
dist = [shapelyLine.distance(Point(match[0][0], match[0][1])),shapelyLine.distance(Point(match[1][0], match[1][1]))] |
816 |
if dist[0] < toler or dist[1] < toler: |
817 |
selected.append(match) |
818 |
# up to here
|
819 |
|
820 |
for match in selected: |
821 |
detector.connectLineToLine(match, line, toler) |
822 |
except Exception as ex: |
823 |
print('error occured({}) in {}:{}'.format(ex, sys.exc_info()[-1].tb_frame.f_code.co_filename, sys.exc_info()[-1].tb_lineno)) |
824 |
# up to here
|
825 |
|
826 |
width, height = area.img.shape[::-1]
|
827 |
size = max(width, height)
|
828 |
lines = [] |
829 |
for pts in connectedLines: |
830 |
processLine = QEngineeringLineItem(vertices=[(area.x + param[0], area.y + param[1]) for param in pts]) |
831 |
processLine.buildItem() |
832 |
processLine.addLineItemToScene(graphicsView.scene) |
833 |
lines.append(processLine) |
834 |
|
835 |
if processLine.length() > 100: # TODO: check critical length |
836 |
processLine.addFlowArrow() |
837 |
except Exception as ex: |
838 |
print('error occured({}) in {}:{}'.format(ex, sys.exc_info()[-1].tb_frame.f_code.co_filename, sys.exc_info()[-1].tb_lineno)) |
839 |
finally:
|
840 |
listWidget.addItem('Finish Line searching')
|
841 |
|
842 |
'''
|
843 |
@brief Main function
|
844 |
@author Jeongwoo
|
845 |
@date
|
846 |
@history humkyung 2018.04.06 change error display from message box to print
|
847 |
Jeongwoo 2018.04.25 Remove 'Current Symbol : ' QListItem
|
848 |
Jeongwoo 2018.05.09 Make Comments OCR.removeTextFromNpArray block
|
849 |
Jeongwoo 2018.05.25 Remove imgLineList variable and parameter on writeXml()
|
850 |
humkyung 2018.05.26 add parameters(graphicsView, isSymbolTextChecked, isLineChecked)
|
851 |
Jeongwoo 2018.05.28 Add/Remove Parameters(Add : signal / Remove : graphicsView, isLineChecked)
|
852 |
Jeongwoo 2018.05.30 Remove return value
|
853 |
humkyung 2018.06.08 add signal for progressbar to parameter
|
854 |
humkyung 2018.06.11 get difference between original and recognized image
|
855 |
Jeongwoo 2018.06.21 If noteTextInfoList is None, change from None to empty list
|
856 |
'''
|
857 |
def executeRecognition(signal, updateProgressSignal, path, listWidget, isSymbolTextChecked): |
858 |
global src
|
859 |
global srcGray
|
860 |
global ocrCompletedSrc
|
861 |
global searchedSymbolList
|
862 |
global threadLock
|
863 |
global textInfoList
|
864 |
global noteTextInfoList
|
865 |
global maxProgressValue
|
866 |
|
867 |
try:
|
868 |
docData = AppDocData.instance() |
869 |
project = docData.getCurrentProject() |
870 |
|
871 |
srcList = [] |
872 |
srcList.append(path) |
873 |
|
874 |
initTargetSymbolDataList() |
875 |
|
876 |
for mainRes in srcList: |
877 |
#Init src
|
878 |
src = [] |
879 |
srcGray = [] |
880 |
ocrCompletedSrc = [] |
881 |
searchedSymbolList = [] |
882 |
textInfoList = [] |
883 |
|
884 |
if not os.path.isfile(mainRes): |
885 |
item = QListWidgetItem('{} file is not found'.format(os.path.basename(mainRes)))
|
886 |
item.setBackground(Qt.red) |
887 |
listWidget.addItem(item) |
888 |
continue
|
889 |
|
890 |
#load original src & symbol
|
891 |
src = cv2.imread(mainRes, 1)
|
892 |
|
893 |
#gray scale
|
894 |
if len(src.shape) == 3: |
895 |
srcGray = cvtGrayImage(src) |
896 |
else:
|
897 |
srcGray = src.copy() |
898 |
srcGray = cv2.threshold(srcGray, 127, 255, cv2.THRESH_BINARY)[1] |
899 |
|
900 |
area = docData.getArea('Drawing')
|
901 |
if area is not None: |
902 |
#TODO: 영역을 설정한 값과 이미지 좌표계를 차이를 보정
|
903 |
area.img = srcGray[round(area.y):round(area.y+area.height), round(area.x):round(area.x+area.width)] |
904 |
|
905 |
listWidget.addItem("Start recognition : " + mainRes)
|
906 |
|
907 |
if isSymbolTextChecked:
|
908 |
threadLock.acquire() |
909 |
textAreas = detectTextAreas(mainRes) |
910 |
### calculate total count of symbol
|
911 |
maxProgressValue = len(textAreas) + 1 |
912 |
for targetItem in targetSymbolList: |
913 |
if type(targetItem) is list: |
914 |
maxProgressValue += len(targetItem)
|
915 |
else:
|
916 |
maxProgressValue += 1
|
917 |
### up to here
|
918 |
threadLock.release() |
919 |
|
920 |
initMainSrc(mainRes, textAreas, updateProgressSignal) |
921 |
|
922 |
pool = futures.ThreadPoolExecutor(max_workers = THREAD_MAX_WORKER) |
923 |
for targetItem in targetSymbolList: |
924 |
if type(targetItem) is list: |
925 |
pool.submit(detectSymbolsOnPid, mainRes, targetItem, listWidget, updateProgressSignal) |
926 |
else:
|
927 |
pool.submit(detectSymbolOnPid, mainRes, targetItem, listWidget, updateProgressSignal) |
928 |
pool.shutdown(wait = True)
|
929 |
|
930 |
## DEBUG
|
931 |
print('----------')
|
932 |
for item in searchedSymbolList: |
933 |
print('{}:{}-{}'.format(item.getName(), item.getSp(), item.hitRate))
|
934 |
_img = srcGray[round(item.getSp()[1]):round(item.getSp()[1]+item.getHeight()), round(item.getSp()[0]):round(item.getSp()[0]+item.getWidth())] |
935 |
cv2.imwrite(os.path.join(project.getTempPath(), 'Tile', item.getName()+'.png'), _img) |
936 |
## up to here
|
937 |
|
938 |
chan, docData.imgWidth, docData.imgHeight = src.shape[::-1]
|
939 |
drawFoundSymbolsOnCanvas(mainRes, textInfoList, listWidget) |
940 |
|
941 |
docData.imgName = os.path.splitext(os.path.basename(mainRes))[0]
|
942 |
|
943 |
pool = futures.ThreadPoolExecutor(max_workers = THREAD_MAX_WORKER) |
944 |
for sym in searchedSymbolList: |
945 |
pool.submit(removeDetectedSymbol, sym) |
946 |
pool.submit(drawRectOnSrc, sym) |
947 |
pool.shutdown(wait = True)
|
948 |
|
949 |
global MIN_TEXT_SIZE
|
950 |
for textInfo in textInfoList: |
951 |
if textInfo.getW() >= MIN_TEXT_SIZE or textInfo.getH() >= MIN_TEXT_SIZE: |
952 |
removeText(srcGray, (textInfo.getX(), textInfo.getY()), srcGray[textInfo.getY():textInfo.getY()+textInfo.getH(), textInfo.getX():textInfo.getX()+textInfo.getW()]) |
953 |
|
954 |
## Remove Noise
|
955 |
kernel1 = np.ones((2, 2), np.uint8) |
956 |
srcGray = cv2.dilate(srcGray, kernel1) |
957 |
#kernel2 = np.ones((4, 4), np.uint8)
|
958 |
srcGray = cv2.erode(srcGray, kernel1) |
959 |
|
960 |
removedSymbolImgPath = os.path.join(project.getTempPath(), os.path.basename(path)) |
961 |
cv2.imwrite(removedSymbolImgPath, srcGray) |
962 |
area = AppDocData.instance().getArea('Drawing')
|
963 |
if area is not None: |
964 |
area.img = srcGray[round(area.y+1):round(area.y+area.height), round(area.x+1):round(area.x+area.width)] |
965 |
cv2.imwrite(os.path.join(project.getTempPath(), "RECT_" + os.path.basename(path)), src)
|
966 |
|
967 |
listWidget.addItem("Recognized symbol count : " + str(len(searchedSymbolList))) |
968 |
|
969 |
# get difference between original and recognized image
|
970 |
foundFilePath = os.path.join(project.getTempPath(), "FOUND_" + os.path.basename(path))
|
971 |
getDifference(path, foundFilePath) |
972 |
# up to here
|
973 |
|
974 |
signal.emit(searchedSymbolList, textInfoList, noteTextInfoList if noteTextInfoList is not None else []) |
975 |
except Exception as ex: |
976 |
print('error occured({}) in {}:{}'.format(ex, sys.exc_info()[-1].tb_frame.f_code.co_filename, sys.exc_info()[-1].tb_lineno)) |
977 |
|
978 |
'''
|
979 |
@brief draw contour to image
|
980 |
@author humkyung
|
981 |
@date 2018.06.18
|
982 |
'''
|
983 |
def drawContour(img, contour): |
984 |
area = cv2.contourArea(contour, True)
|
985 |
if area >= 0: |
986 |
cv2.drawContours(img, [contour], -1, (0,0,0), -1) |
987 |
cv2.drawContours(img, [contour], -1, (255,255,255), 1) |
988 |
else:
|
989 |
cv2.drawContours(img, [contour], -1, (255,255,255), -1) |
990 |
|
991 |
'''
|
992 |
@brief Get Text Area info by contour
|
993 |
@author Jeongwoo
|
994 |
@date 2018.06.05
|
995 |
@history 2018.06.08 Jeongwoo Add angle
|
996 |
humkyung 2018.06.18 fixed logic to detect text area
|
997 |
'''
|
998 |
def getTextAreaInfo(filePath, imgGray, offsetX, offsetY): |
999 |
from AppDocData import AppDocData |
1000 |
|
1001 |
docData = AppDocData.instance() |
1002 |
project = docData.getCurrentProject() |
1003 |
|
1004 |
configs = docData.getConfigs('Text Size', 'Max Text Size') |
1005 |
maxTextSize = int(configs[0].value) if 1 == len(configs) else 100 |
1006 |
|
1007 |
contourImg = np.ones(imgGray.shape, np.uint8) * 255
|
1008 |
binaryImg,mask = cv2.threshold(imgGray, 127, 255, cv2.THRESH_BINARY) |
1009 |
|
1010 |
image, contours, hierarchy = cv2.findContours(mask, cv2.RETR_TREE, cv2.CHAIN_APPROX_NONE) |
1011 |
for contour in contours: |
1012 |
# remove too big one
|
1013 |
[x, y, w, h] = cv2.boundingRect(contour) |
1014 |
if (w > maxTextSize and h > maxTextSize): continue |
1015 |
|
1016 |
area = cv2.contourArea(contour, True)
|
1017 |
if area >= 0: |
1018 |
cv2.drawContours(contourImg, [contour], -1, (0,0,0), -1) |
1019 |
cv2.drawContours(contourImg, [contour], -1, (255,255,255), 1) |
1020 |
else:
|
1021 |
cv2.drawContours(contourImg, [contour], -1, (255,255,255), -1) |
1022 |
|
1023 |
path = os.path.join(project.getTempPath(), 'OCR_' + os.path.basename(filePath))
|
1024 |
cv2.imwrite(path, contourImg) |
1025 |
|
1026 |
rects = [] |
1027 |
configs = docData.getConfigs('Text Recognition', 'Expand Size') |
1028 |
expandSize = int(configs[0].value) if 1 == len(configs) else 10 |
1029 |
configs = docData.getConfigs('Text Recognition', 'Shrink Size') |
1030 |
shrinkSize = int(configs[0].value) if 1 == len(configs) else 0 |
1031 |
|
1032 |
eroded = cv2.erode(contourImg, np.ones((expandSize,expandSize), np.uint8)) |
1033 |
|
1034 |
path = os.path.join(project.getTempPath(), 'ERODED_OCR_' + os.path.basename(filePath))
|
1035 |
cv2.imwrite(path, eroded) |
1036 |
|
1037 |
eroded = cv2.bitwise_not(eroded) |
1038 |
|
1039 |
image, contours, hierarchy = cv2.findContours(eroded, cv2.RETR_CCOMP, cv2.CHAIN_APPROX_SIMPLE) |
1040 |
for contour in contours: |
1041 |
area = cv2.contourArea(contour, True)
|
1042 |
if area < 0: |
1043 |
[x, y, w, h] = cv2.boundingRect(contour) |
1044 |
if (w < 10 and h < 10) or (w > maxTextSize and h > maxTextSize): continue; # skip too small or big one |
1045 |
|
1046 |
img = contourImg[y:y+h, x:x+w] |
1047 |
img = cv2.bitwise_not(img) |
1048 |
|
1049 |
horizontal = 0
|
1050 |
vertical = 0
|
1051 |
if w > maxTextSize:
|
1052 |
horizontal = 1
|
1053 |
elif h > maxTextSize:
|
1054 |
vertical = 1
|
1055 |
else:
|
1056 |
if shrinkSize > 0: |
1057 |
img = cv2.erode(img, np.ones((shrinkSize,shrinkSize), np.uint8)) |
1058 |
|
1059 |
_, _contours, _ = cv2.findContours(img, cv2.RETR_CCOMP, cv2.CHAIN_APPROX_SIMPLE) |
1060 |
for xx in _contours: |
1061 |
[_x, _y, _w, _h] = cv2.boundingRect(xx) |
1062 |
cv2.rectangle(img, (_x, _y), (_x+_w, _y+_h), 255, 1) |
1063 |
|
1064 |
if (_w < _h) or (_w > maxTextSize and _h < maxTextSize): # width is greater than height |
1065 |
horizontal += 1 + (_w*_h)/(w*h)
|
1066 |
else:
|
1067 |
vertical += 1 + (_w*_h)/(w*h)
|
1068 |
|
1069 |
if horizontal > vertical:
|
1070 |
filePath = os.path.join(project.getTempPath(), "Tile", "H-{}-{}-{}-{}.png".format(x,y,w,h)) |
1071 |
else:
|
1072 |
filePath = os.path.join(project.getTempPath(), "Tile", "V-{}-{}-{}-{}.png".format(x,y,w,h)) |
1073 |
|
1074 |
cv2.imwrite(filePath, img) |
1075 |
|
1076 |
rects.append([0 if horizontal > vertical else 90, QRect(x, y, w, h)]) |
1077 |
|
1078 |
configs = docData.getConfigs('Text Recognition', 'Merge Size') |
1079 |
mergeSize = int(configs[0].value) if 1 == len(configs) else 10 |
1080 |
# merge rectangles
|
1081 |
intersected = True
|
1082 |
while intersected:
|
1083 |
intersected = False
|
1084 |
for rect in rects[:]: |
1085 |
if 0 == rect[0]: |
1086 |
rectExpand = rect[1].adjusted(-mergeSize, 0, mergeSize, 0) |
1087 |
else:
|
1088 |
rectExpand = rect[1].adjusted(0, -mergeSize, 0, mergeSize) |
1089 |
|
1090 |
matches = [x for x in rects if (x[0] == rect[0]) and rectExpand.intersects(x[1])] |
1091 |
if len(matches) > 1: |
1092 |
united = matches[0]
|
1093 |
for _rect in matches: |
1094 |
united[1] = united[1].united(_rect[1]) |
1095 |
if _rect in rects: rects.remove(_rect) |
1096 |
rects.append(united) |
1097 |
intersected = True
|
1098 |
break
|
1099 |
|
1100 |
list = [] |
1101 |
for rect in rects: |
1102 |
angle = rect[0]
|
1103 |
list.append(ti.TextInfo('', round(offsetX) + rect[1].x(), round(offsetY) + rect[1].y(), rect[1].width(), rect[1].height(), angle)) |
1104 |
|
1105 |
return list |
1106 |
|
1107 |
'''
|
1108 |
@brief get difference between given original and recognized image
|
1109 |
@author humkyung
|
1110 |
@date 2018.06.11
|
1111 |
'''
|
1112 |
def getDifference(orgImagePath, recImagePath): |
1113 |
global srcGray
|
1114 |
global ocrCompletedSrc
|
1115 |
global textInfoList
|
1116 |
global noteTextInfoList
|
1117 |
|
1118 |
try:
|
1119 |
if os.path.isfile(orgImagePath) and os.path.isfile(recImagePath): |
1120 |
imgOriginal = cv2.threshold(cvtGrayImage(cv2.imread(orgImagePath, 1)), 127, 255, cv2.THRESH_BINARY)[1] |
1121 |
imgRecognized = cv2.threshold(cvtGrayImage(cv2.imread(recImagePath, 1)), 127, 255, cv2.THRESH_BINARY)[1] |
1122 |
|
1123 |
imgDiff = np.ones(imgOriginal.shape, np.uint8)*255
|
1124 |
|
1125 |
area = AppDocData.instance().getArea('Drawing')
|
1126 |
if area is not None: |
1127 |
x = round(area.x)
|
1128 |
y = round(area.y)
|
1129 |
width = round(area.width)
|
1130 |
height = round(area.height)
|
1131 |
imgNotOper = cv2.bitwise_not(imgRecognized[y:y+height, x:x+width]) |
1132 |
imgDiff[y:y+height, x:x+width] = cv2.bitwise_xor(imgOriginal[y:y+height, x:x+width], imgNotOper) |
1133 |
|
1134 |
# remove noise
|
1135 |
imgDiff = cv2.dilate(imgDiff, np.ones((2, 2), np.uint8)) |
1136 |
|
1137 |
docData = AppDocData.instance() |
1138 |
project = docData.getCurrentProject() |
1139 |
cv2.imwrite(os.path.join(project.getTempPath(), "DIFF_" + os.path.basename(orgImagePath)), imgDiff)
|
1140 |
except Exception as ex: |
1141 |
print('error occured({}) in {}:{}'.format(ex, sys.exc_info()[-1].tb_frame.f_code.co_filename, sys.exc_info()[-1].tb_lineno)) |
1142 |
|
1143 |
if __name__ == '__main__': |
1144 |
import DTI_PID_UI |
1145 |
from ProjectDialog import Ui_Dialog |
1146 |
import timeit |
1147 |
from PyQt5.QtCore import QRect |
1148 |
from operator import itemgetter, attrgetter |
1149 |
|
1150 |
start = timeit.default_timer() |
1151 |
img = cv2.imread('D:/Visual Studio Project/DTIPID/DTIPID/DTI_PID/DTI_PID/res/Result/PC-K/PC-K-2203_P1_800DPI.png', 1) |
1152 |
imgGray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) |
1153 |
contourImg = np.ones(imgGray.shape, np.uint8)*255
|
1154 |
contourOcrImg = contourImg.copy() |
1155 |
binaryImg,mask = cv2.threshold(imgGray, 0, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU) |
1156 |
imgFinal = cv2.bitwise_and(imgGray, imgGray, mask = mask) |
1157 |
ret, newImg = cv2.threshold(imgFinal, 0, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU) |
1158 |
|
1159 |
image, contours, hierarchy = cv2.findContours(mask, cv2.RETR_TREE, cv2.CHAIN_APPROX_NONE) |
1160 |
#holes = [contours[i] for i in range(len(contours)) if hierarchy[i][3] >= 0]
|
1161 |
for contour in contours: |
1162 |
area = cv2.contourArea(contour, True)
|
1163 |
if area >= 0: |
1164 |
[x, y, w, h] = cv2.boundingRect(contour) |
1165 |
|
1166 |
# remove too big or small one
|
1167 |
if (w > 100 or h > 100) or (w < 5 or h < 5): continue |
1168 |
|
1169 |
cv2.drawContours(contourImg, [contour], -1, (0,0,0), 1) |
1170 |
cv2.drawContours(contourOcrImg, [contour], -1, (0,0,0), -1) |
1171 |
else:
|
1172 |
cv2.drawContours(contourOcrImg, [contour], -1, (255,255,255), -1) |
1173 |
|
1174 |
''' contourImg = cv2.bitwise_not(contourImg) circles = cv2.HoughCircles(contourImg, cv2.HOUGH_GRADIENT, 1, 100) circles = np.uint16(np.around(circles))
|
1175 |
for i in circles[0,:]:
|
1176 |
cv2.circle(contourImg, (i[0], i[1]), i[2], (255,255,0), 1)
|
1177 |
'''
|
1178 |
|
1179 |
rects = [] |
1180 |
kernel = cv2.getStructuringElement(cv2.MORPH_CROSS, (8, 8)) |
1181 |
#kernel1 = cv2.getStructuringElement(cv2.MORPH_CROSS, (2,2))
|
1182 |
#eroded = cv2.dilate(contourImg, kernel1)
|
1183 |
#cv2.imwrite('D:/Visual Studio Project/DTIPID/DTIPID/DTI_PID/DTI_PID/res/Result/PC-K/Temp/Dilate_PC-K-2203_P1_800DPI___partial.png', eroded)
|
1184 |
eroded = cv2.erode(contourImg, kernel) |
1185 |
image, contours, hierarchy = cv2.findContours(eroded, cv2.RETR_CCOMP, cv2.CHAIN_APPROX_NONE) |
1186 |
for contour in contours: |
1187 |
area = cv2.contourArea(contour, True)
|
1188 |
if area >= 0: |
1189 |
[x, y, w, h] = cv2.boundingRect(contour) |
1190 |
|
1191 |
# remove small one less than character size
|
1192 |
if (w < 20 or h < 20): continue |
1193 |
#if w > h:
|
1194 |
# rects.append(QRect(x, y, w, h)) # expand rect
|
1195 |
#elif w < h:
|
1196 |
# rects.append(QRect(x, y, w, h)) # expand rect
|
1197 |
rects.append(QRect(x, y, w, h)) # expand rect
|
1198 |
|
1199 |
intersected = True
|
1200 |
while intersected:
|
1201 |
intersected = False
|
1202 |
for rect in rects[:]: |
1203 |
matches = [x for x in rects if rect.intersects(x)] |
1204 |
if len(matches) > 1: |
1205 |
united = matches[0]
|
1206 |
for _rect in matches: |
1207 |
united = united.united(_rect) |
1208 |
if _rect in rects: rects.remove(_rect) |
1209 |
rects.append(united) |
1210 |
intersected = True
|
1211 |
break
|
1212 |
|
1213 |
for rect in rects: |
1214 |
cv2.rectangle(img, (rect.x(), rect.y()), (rect.x() + rect.width(), rect.y() + rect.height()), (255, 0, 255), 1) |
1215 |
|
1216 |
cv2.imwrite('D:/Visual Studio Project/DTIPID/DTIPID/DTI_PID/DTI_PID/res/Result/PC-K/Temp/PC-K-2203_P1_800DPI___partial.png', img)
|
1217 |
cv2.imwrite('D:/Visual Studio Project/DTIPID/DTIPID/DTI_PID/DTI_PID/res/Result/PC-K/Temp/Contour_PC-K-2203_P1_800DPI___partial.png', contourOcrImg)
|
1218 |
cv2.imwrite('D:/Visual Studio Project/DTIPID/DTIPID/DTI_PID/DTI_PID/res/Result/PC-K/Temp/Erode_PC-K-2203_P1_800DPI___partial.png', eroded)
|
1219 |
|
1220 |
chan, imgW, imgH = img.shape[::-1]
|
1221 |
index = 0
|
1222 |
for rect in rects: |
1223 |
index = index + 1
|
1224 |
isVertical = False
|
1225 |
textInfoList = None
|
1226 |
if rect.width() >= rect.height() or rect.height() < 50: |
1227 |
isVertical = False
|
1228 |
textInfoList = TOCR.getTextInfo(contourOcrImg[rect.y():rect.y()+rect.height(), rect.x():rect.x()+rect.width()], (rect.x(), rect.y())) |
1229 |
else:
|
1230 |
isVertical = True
|
1231 |
transform = QTransform() |
1232 |
transform.translate(imgH*0.5, imgW*0.5) |
1233 |
transform.rotate(90)
|
1234 |
transform.translate(-imgW*0.5, -imgH*0.5) |
1235 |
transRect = transform.mapRect(rect) |
1236 |
rotatedContourOcrImg = cv2.rotate(contourOcrImg, cv2.ROTATE_90_CLOCKWISE) |
1237 |
textInfoList = TOCR.getTextInfo(rotatedContourOcrImg[transRect.y():transRect.y()+transRect.height(), transRect.x():transRect.x()+transRect.width()], (transRect.x(), transRect.y())) |
1238 |
|
1239 |
if isVertical:
|
1240 |
img = cv2.rotate(img, cv2.ROTATE_90_CLOCKWISE) |
1241 |
if textInfoList is not None: |
1242 |
for textInfo in textInfoList: |
1243 |
cv2.putText(img, textInfo.getText(), (textInfo.getX(), textInfo.getY()+textInfo.getH()), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 0, 255), 3) |
1244 |
if isVertical:
|
1245 |
img = cv2.rotate(img, cv2.ROTATE_90_COUNTERCLOCKWISE) |
1246 |
|
1247 |
print(str(index) + " / " + str(len(rects)) + " Finished") |
1248 |
#cv2.imshow('test', img[rect.y():rect.y()+rect.height(), rect.x():rect.x()+rect.width()])
|
1249 |
#cv2.waitKey(0)
|
1250 |
#cv2.destroyAllWindows()
|
1251 |
|
1252 |
cv2.imwrite('D:/Visual Studio Project/DTIPID/DTIPID/DTI_PID/DTI_PID/res/Result/PC-K/Temp/OCR_PC-K-2203_P1_800DPI___partial.png', img)
|
1253 |
stop = timeit.default_timer() |
1254 |
print('FINISHED : ' + str((stop-start)/60) + ' min') |
1255 |
|
1256 |
#app = QApplication(sys.argv)
|
1257 |
|
1258 |
#try:
|
1259 |
# dlg = Ui_Dialog()
|
1260 |
# selectedProject = dlg.showDialog()
|
1261 |
# if selectedProject is not None:
|
1262 |
# form = ExampleApp()
|
1263 |
# form.show()
|
1264 |
#except Exception as ex:
|
1265 |
# print('에러가 발생했습니다.\n', ex)
|
1266 |
|
1267 |
#sys.exit(app.exec_())
|