hytos / DTI_PID / DTI_PID / DTI_PID.py @ 0e9b3635
이력 | 보기 | 이력해설 | 다운로드 (52.5 KB)
1 |
#region import libs
|
---|---|
2 |
import http.client |
3 |
import urllib, base64, json |
4 |
import cv2 |
5 |
import numpy as np |
6 |
import SymbolBase |
7 |
import symbol |
8 |
import TextInfo as ti |
9 |
import azure_ocr_module as OCR |
10 |
from PIL import Image |
11 |
from io import BytesIO |
12 |
import gc |
13 |
import os |
14 |
import glob |
15 |
import math, operator |
16 |
import threading |
17 |
import concurrent.futures as futures |
18 |
import XmlGenerator as xg |
19 |
import pytesseract |
20 |
import tesseract_ocr_module as TOCR |
21 |
import potrace |
22 |
import sys |
23 |
from PyQt5.QtCore import * |
24 |
from PyQt5.QtGui import * |
25 |
from PyQt5.QtWidgets import * |
26 |
from PyQt5.QtSvg import * |
27 |
import DTI_PID_UI |
28 |
import QtImageViewer |
29 | |
30 |
sys.path.insert(0, os.path.dirname(os.path.realpath(__file__)) + '\\Commands') |
31 |
import CreateCommand |
32 |
import CropCommand |
33 | |
34 |
sys.path.insert(0, os.path.dirname(os.path.realpath(__file__)) + '\\Shapes') |
35 |
import QGraphicsPolylineItem |
36 |
from QEngineeringLineItem import QEngineeringLineItem |
37 |
from SymbolSvgItem import SymbolSvgItem |
38 |
from QGraphicsBoundingBoxItem import QGraphicsBoundingBoxItem |
39 |
from AppDocData import AppDocData |
40 |
#endregion
|
41 | |
42 |
## Tesseract path
|
43 |
pytesseract.pytesseract.tesseract_cmd = os.environ['TESSERACT_HOME'] + '\\tesseract.exe' |
44 |
tesseract_cmd = os.environ['TESSERACT_HOME'] + '\\tesseract.exe' |
45 | |
46 |
#region Symbol Image path List for test
|
47 |
targetSymbolList = [] |
48 |
#endregion
|
49 | |
50 |
#region Global variables
|
51 |
searchedSymbolList = [] |
52 |
src = [] |
53 |
srcGray = [] |
54 |
ocrCompletedSrc = [] |
55 |
afterDenoising = [] |
56 |
canvas = [] |
57 |
textInfoList = [] |
58 |
noteTextInfoList = [] |
59 | |
60 |
WHITE_LIST_CHARS = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ1234567890-"
|
61 | |
62 |
MIN_TEXT_SIZE = 10
|
63 | |
64 |
THREAD_MAX_WORKER = os.cpu_count() |
65 |
threadLock = threading.Lock() |
66 | |
67 |
ACCEPT_OVERLAY_AREA = 10
|
68 |
#endregion
|
69 | |
70 |
'''
|
71 |
@history 2018.06.28 Jeongwoo Remove useless condition
|
72 |
'''
|
73 |
def checkTextInSymbol(pt): |
74 |
global searchedSymbolList
|
75 | |
76 |
result = False
|
77 |
for sym in searchedSymbolList: |
78 |
symId = sym.getId() |
79 |
symSp = sym.getSp() |
80 |
symWidth = sym.getWidth() |
81 |
symHeight = sym.getHeight() |
82 |
symOcrOption = sym.getOcrOption() |
83 | |
84 |
categoryCode = symId // 100
|
85 | |
86 |
if symOcrOption != SymbolBase.OCR_OPTION_NOT_EXEC:
|
87 |
if (pt[0] >= symSp[0] and pt[0] <= symSp[0] + symWidth) and (pt[1] >= symSp[1] and pt[1] <= symSp[1] + symHeight): |
88 |
result = True
|
89 |
break
|
90 | |
91 |
return result
|
92 | |
93 |
'''
|
94 |
@brief remove text from image by using ocr image
|
95 |
@author
|
96 |
'''
|
97 |
def removeText(img, pt, imgOCR): |
98 |
x = round(pt[0]) |
99 |
y = round(pt[1]) |
100 |
width, height = imgOCR.shape[::-1]
|
101 |
imgOCR = cv2.dilate(imgOCR, np.ones((1,1), np.uint8)) |
102 |
imgXOR = cv2.bitwise_xor(img[y:y+height, x:x+width], cv2.bitwise_not(imgOCR)) |
103 |
imgXOR = cv2.dilate(imgXOR, np.ones((2,2), np.uint8)) |
104 |
img[y:y+height, x:x+width] = imgXOR |
105 |
return img
|
106 | |
107 |
#Convert into Grayscale image
|
108 |
def cvtGrayImage(img): |
109 |
return cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
|
110 | |
111 |
'''
|
112 |
@brief rotate (x,y) by given angle
|
113 |
@author Jeongwoo
|
114 |
@date 2018.??.??
|
115 |
@history humkyung 2018.04.13 fixed code when angle is 90 or 270
|
116 |
Jeongwoo 2018.04.27 Change calculation method with QTransform
|
117 |
'''
|
118 |
def getCoordOnRotatedImage(rAngle, x, y, originImageWidth, originImageHeight): |
119 |
rx = None
|
120 |
ry = None
|
121 |
transform = QTransform() |
122 |
if rAngle == 90 or rAngle == 270: |
123 |
transform.translate(originImageHeight*0.5, originImageWidth*0.5) |
124 |
elif rAngle == 0 or rAngle == 180: |
125 |
transform.translate(originImageWidth*0.5, originImageHeight*0.5) |
126 |
transform.rotate(-abs(rAngle))
|
127 |
transform.translate(-originImageWidth*0.5, -originImageHeight*0.5) |
128 |
point = QPoint(x, y) |
129 |
point = transform.map(point) |
130 |
rx = point.x() |
131 |
ry = point.y() |
132 |
return (rx, ry)
|
133 | |
134 |
def convertDirectionCodeToValue(directionCode): |
135 |
if directionCode == "UP": |
136 |
return 0 |
137 |
elif directionCode == "RIGHT": |
138 |
return 1 |
139 |
elif directionCode == "DOWN": |
140 |
return 2 |
141 |
elif directionCode == "LEFT": |
142 |
return 3 |
143 |
else:
|
144 |
return -1 |
145 | |
146 |
def convertValueToDirectionCode(value): |
147 |
if value == 0: |
148 |
return "UP" |
149 |
elif value == 1: |
150 |
return "RIGHT" |
151 |
elif value == 2: |
152 |
return "DOWN" |
153 |
elif value == 3: |
154 |
return "LEFT" |
155 |
else:
|
156 |
return "NONE" |
157 | |
158 |
'''
|
159 |
@brief Remake rotated child symbol info
|
160 |
'''
|
161 |
def getRotatedChildInfo(additionalSymbol): |
162 |
tempChildInfo = ""
|
163 |
if additionalSymbol:
|
164 |
childList = additionalSymbol.split("/")
|
165 |
for index in range(len(childList)): |
166 |
child = childList[index] |
167 |
direction = convertDirectionCodeToValue(child.split(",")[0]) |
168 |
childName = child.split(",")[1] |
169 |
direction = (direction - 1) if direction > 0 else 3 |
170 |
if index != 0: |
171 |
tempChildInfo = tempChildInfo + "/"
|
172 |
tempChildInfo = tempChildInfo + convertValueToDirectionCode(direction) + "," + childName
|
173 |
return tempChildInfo
|
174 | |
175 | |
176 |
#Check object contains pt
|
177 |
#obj is item in searchedSymbolList
|
178 |
def contains(obj, pt, tw, th): |
179 |
sp = obj.getSp() |
180 |
width = obj.getWidth() |
181 |
height = obj.getHeight() |
182 | |
183 |
if sp[0] > pt[0]+tw: |
184 |
return 0 |
185 |
if sp[0]+width < pt[0]: |
186 |
return 0 |
187 |
if sp[1] > pt[1]+th: |
188 |
return 0 |
189 |
if sp[1]+height < pt[1]: |
190 |
return 0 |
191 |
|
192 |
#shared area
|
193 |
x = max(sp[0], pt[0]); |
194 |
y = max(sp[1], pt[1]); |
195 |
w = min(sp[0] + width, pt[0] + tw) - x; |
196 |
h = min(sp[1] + height, pt[1] + th) - y; |
197 | |
198 |
return float((w * h)) / float((tw * th)) * 100 |
199 | |
200 |
def getSplitSrcList(srcPid, splitCount, splitWidth, splitHeight): |
201 |
splitRoiList = [] |
202 |
for hi in range(splitCount): |
203 |
for wi in range(splitCount): |
204 |
roiSp = (splitWidth*wi, splitHeight*hi) |
205 |
roiEp = (splitWidth*(wi+1), splitHeight*(hi+1)) |
206 |
splitRoiList.append((roiSp, roiEp, srcPid[roiSp[1]:roiEp[1], roiSp[0]:roiEp[0]])) |
207 |
return splitRoiList
|
208 | |
209 |
'''
|
210 |
@history 2018.06.12 Jeongwoo Type changed (int → float)
|
211 |
'''
|
212 |
def getCalculatedOriginalPoint(additionalSymbol, symbolOriginalPoint, symbolRotatedAngle, rotateSymbolWidth, rotateSymbolHeight, originalSymbolWidth, originalSymbolHeight): |
213 |
originalPoint = ''
|
214 |
if additionalSymbol is None and symbolOriginalPoint is None: |
215 |
originalPoint = str(rotateSymbolWidth//2)+','+str(rotateSymbolHeight//2) |
216 |
else:
|
217 |
opx = float(symbolOriginalPoint.split(',')[0]) |
218 |
opy = float(symbolOriginalPoint.split(',')[1]) |
219 |
rPt = getCoordOnRotatedImage(symbolRotatedAngle, opx, opy, originalSymbolWidth, originalSymbolHeight) |
220 |
originalPoint = str(float(rPt[0])) + ',' + str(float(rPt[1])) |
221 |
return originalPoint
|
222 | |
223 |
'''
|
224 |
@history 2018.06.12 Jeongwoo Type changed (int → float)
|
225 |
'''
|
226 |
def getCalculatedConnectionPoint(symbolConnectionPointStr, symbolRotatedAngle, rotateSymbolWidth, rotateSymbolHeight, originalSymbolWidth, originalSymbolHeight): |
227 |
convertedConnectionPoint = ""
|
228 |
if symbolConnectionPointStr is not None: |
229 |
splitConnectionPointStr = symbolConnectionPointStr.split("/")
|
230 |
for index in range(len(splitConnectionPointStr)): |
231 |
if index != 0: |
232 |
convertedConnectionPoint = convertedConnectionPoint + "/"
|
233 |
item = splitConnectionPointStr[index] |
234 |
cpx = float(item.split(',')[0]) |
235 |
cpy = float(item.split(',')[1]) |
236 |
rPt = getCoordOnRotatedImage(symbolRotatedAngle, cpx, cpy, originalSymbolWidth, originalSymbolHeight) |
237 |
temp = str(float(rPt[0])) + ',' + str(float(rPt[1])) |
238 |
convertedConnectionPoint = convertedConnectionPoint + temp |
239 |
return convertedConnectionPoint
|
240 |
|
241 |
'''
|
242 |
@brief Add symbols
|
243 |
@author jwkim
|
244 |
@date
|
245 |
@history Change parameter (mpCount → hitRate)
|
246 |
'''
|
247 |
def addSearchedSymbol(id, sName, sType |
248 |
, sp, w, h, threshold, minMatchCount, hitRate, rotatedAngle |
249 |
, isDetectOnOrigin, rotateCount, ocrOption, isContainChild |
250 |
, originalPoint, connectionPoint, baseSymbol, additionalSymbol, isExceptDetect): |
251 |
global searchedSymbolList
|
252 |
newSym = symbol.Symbol(id, sName, sType
|
253 |
, sp, w, h, threshold, minMatchCount, hitRate, rotatedAngle |
254 |
, isDetectOnOrigin, rotateCount, ocrOption, isContainChild |
255 |
, originalPoint, connectionPoint, baseSymbol, additionalSymbol, isExceptDetect) |
256 | |
257 |
searchedSymbolList.append(newSym) |
258 | |
259 | |
260 |
#Calculate count of keypoint match result
|
261 |
def getMatchPointCount(src, cmp): |
262 |
orb = cv2.ORB_create(1000, 2.0, 2, 1) |
263 | |
264 |
kp1, des1 = orb.detectAndCompute(src, None)
|
265 |
kp2, des2 = orb.detectAndCompute(cmp, None) |
266 |
|
267 |
FLANN_INDEX_LSH = 6
|
268 |
# table_number : The number of hash tables use
|
269 |
# key_size : The length of the key in the hash tables
|
270 |
# multi_probe_level : Number of levels to use in multi-probe (0 for standard LSH)
|
271 |
# It controls how neighboring buckets are searched
|
272 |
# Recommended value is 2
|
273 |
# checks : specifies the maximum leafs to visit when searching for neighbours.
|
274 |
# LSH : Locality-Sensitive Hashing
|
275 |
# ref : https://www.cs.ubc.ca/research/flann/uploads/FLANN/flann_manual-1.8.4.pdf
|
276 |
index_params = dict(algorithm = FLANN_INDEX_LSH, table_number = 20, key_size = 10, multi_probe_level = 4) |
277 |
search_params = dict(checks = 100) |
278 |
|
279 |
flann = cv2.FlannBasedMatcher(index_params,search_params) |
280 |
|
281 |
matches = flann.knnMatch(des1, des2, k = 2)
|
282 |
matchesMask = [[0, 0] for i in range(len(matches))] #Python 3.x |
283 |
|
284 |
count = 0
|
285 |
# ratio test as per Lowe's paper
|
286 |
for i in range(len(matches)): |
287 |
if len(matches[i]) == 2: |
288 |
m = matches[i][0]
|
289 |
n = matches[i][1]
|
290 |
if m.distance < 0.85 * n.distance: |
291 |
count = count + 1
|
292 | |
293 |
matchCount = count |
294 | |
295 |
#print("match Count : " + str(matchCount))
|
296 |
return matchCount
|
297 | |
298 |
'''
|
299 |
@brief detect symbols on PID
|
300 |
@history humkyung 2018.06.08 add parameteres for signal
|
301 |
'''
|
302 |
def detectSymbolsOnPid(mainRes, targetSymbols, listWidget, updateProgressSignal): |
303 |
for detailTarget in targetSymbols: |
304 |
detectSymbolOnPid(mainRes, detailTarget, listWidget, updateProgressSignal) |
305 | |
306 |
'''
|
307 |
@brief detect symbol on PID
|
308 |
@author jwkim
|
309 |
@date
|
310 |
@history humkyung 2018.04.06 check if symbol file exists
|
311 |
Jeongwoo 2018.05.29 Change method to adjust detail symbol location with hit-rate. Not feature point count
|
312 |
Change parameter on add symbol part (mpCount → hitRate)
|
313 |
Remove unusing calculation (avg)
|
314 |
Jeongwoo 2018.06.27 Remove part to split P&ID image and for loop
|
315 |
'''
|
316 |
def detectSymbolOnPid(mainRes, targetSymbol, listWidget, updateProgressSignal): |
317 |
global src
|
318 |
global srcGray
|
319 |
global ocrCompletedSrc
|
320 |
global afterDenoising
|
321 |
global threadLock
|
322 |
global searchedSymbolList
|
323 |
global maxProgressValue
|
324 |
|
325 |
try:
|
326 |
symId = targetSymbol.getId() |
327 |
symbolName = targetSymbol.getName() |
328 |
symbolType = targetSymbol.getType() |
329 |
symbolPath = targetSymbol.getPath() |
330 |
symbolThreshold = targetSymbol.getThreshold() |
331 |
symbolMinMatchCount = targetSymbol.getMinMatchCount() |
332 |
isDetectOnOrigin = targetSymbol.getIsDetectOnOrigin() |
333 |
symbolRotateCount = targetSymbol.getRotationCount() |
334 |
symbolOcrOption = targetSymbol.getOcrOption() |
335 |
isContainChild = targetSymbol.getIsContainChild() |
336 |
symbolOriginalPoint = targetSymbol.getOriginalPoint() |
337 |
symbolConnectionPoint = targetSymbol.getConnectionPoint() |
338 |
baseSymbol = targetSymbol.getBaseSymbol() |
339 |
additionalSymbol = targetSymbol.getAdditionalSymbol() |
340 |
isExceptDetect = targetSymbol.getIsExceptDetect() |
341 | |
342 |
# check if symbol file is target or not
|
343 |
if isExceptDetect == 1: |
344 |
item = QListWidgetItem('{} file is not target'.format(os.path.split(os.path.basename(symbolPath))[0])) |
345 |
item.setBackground(QColor('green'))
|
346 |
listWidget.addItem(item) |
347 |
return
|
348 | |
349 |
foundSymbolCount = 0
|
350 | |
351 |
# check if symbol file exists
|
352 |
if not os.path.isfile(symbolPath): |
353 |
item = QListWidgetItem('{} file not found'.format(os.path.split(os.path.basename(symbolPath))[0])) |
354 |
item.setBackground(QColor('red'))
|
355 |
listWidget.addItem(item) |
356 |
return
|
357 |
# up to here
|
358 | |
359 |
sym = cv2.imread(symbolPath, 1)
|
360 |
symGray = cvtGrayImage(sym) |
361 |
## TODO: 이진화 시켰을때 심볼이 검출되지 않음
|
362 |
## symGray = cv2.threshold(cvtGrayImage(sym), 127, 255, cv2.THRESH_BINARY)[1]
|
363 |
## cv2.imshow('symbol', symGray)
|
364 |
## cv2.waitKey(0)
|
365 |
sow, soh = symGray.shape[::-1] # symbol original w, h |
366 | |
367 |
offsetDrawingArea=[] |
368 |
docData = AppDocData.instance() |
369 |
area = docData.getArea('Drawing')
|
370 |
if area is not None: |
371 |
copiedBasePid = area.img.copy() |
372 |
offsetDrawingArea.append(area.x) |
373 |
offsetDrawingArea.append(area.y) |
374 |
else:
|
375 |
offsetDrawingArea.append(0)
|
376 |
offsetDrawingArea.append(0)
|
377 |
if isDetectOnOrigin == 1: |
378 |
copiedBasePid = srcGray.copy() |
379 |
else:
|
380 |
copiedBasePid = ocrCompletedSrc.copy() |
381 |
srcWidth, srcHeight = copiedBasePid.shape[::-1]
|
382 | |
383 |
roiItemSp = (0,0) |
384 |
roiItemEp = (srcWidth, srcHeight) |
385 |
roiItem = copiedBasePid |
386 | |
387 |
symbolRotatedAngle = 0
|
388 |
for rc in range(symbolRotateCount + 1): ## Rotation Count를 사용자 기준으로 받아서 1을 더한 후 사용 |
389 |
sw, sh = symGray.shape[::-1]
|
390 |
roiw = (roiItemEp[0] - roiItemSp[0]) |
391 |
roih = (roiItemEp[1] - roiItemSp[1]) |
392 | |
393 |
## Case : Bigger Symbol than Split ROI
|
394 |
if roiw < sw or roih < sh: |
395 |
symGray = cv2.rotate(symGray, cv2.ROTATE_90_COUNTERCLOCKWISE) |
396 |
symbolRotatedAngle = symbolRotatedAngle + 90
|
397 | |
398 |
if baseSymbol is not None and additionalSymbol is not None: |
399 |
additionalSymbol = getRotatedChildInfo(additionalSymbol) |
400 |
continue
|
401 |
|
402 |
## get Rotated Original Point
|
403 |
originalPoint = getCalculatedOriginalPoint(additionalSymbol, symbolOriginalPoint, symbolRotatedAngle, sw, sh, sow, soh) |
404 |
connectionPoint = getCalculatedConnectionPoint(symbolConnectionPoint, symbolRotatedAngle, sw, sh, sow, soh) |
405 | |
406 |
## Template Matching
|
407 |
tmRes = cv2.matchTemplate(roiItem, symGray, cv2.TM_CCOEFF_NORMED) |
408 |
loc = np.where(tmRes >= symbolThreshold) |
409 | |
410 |
for pt in zip(*loc[::-1]): |
411 |
overlapArea = 0
|
412 |
mpCount = 0 # Match Point Count |
413 |
symbolIndex = -1
|
414 | |
415 |
roi = roiItem[pt[1]:pt[1]+sh, pt[0]:pt[0]+sw] |
416 | |
417 |
if symbolMinMatchCount > 0: |
418 |
mpCount = getMatchPointCount(roi, symGray) |
419 |
if not (mpCount >= symbolMinMatchCount): |
420 |
continue
|
421 | |
422 |
searchedItemSp = (roiItemSp[0]+pt[0] + round(offsetDrawingArea[0]), roiItemSp[1]+pt[1] + round(offsetDrawingArea[1])) |
423 | |
424 |
for i in range(len(searchedSymbolList)): |
425 |
overlapArea = contains(searchedSymbolList[i], searchedItemSp, sw, sh) |
426 |
if overlapArea > ACCEPT_OVERLAY_AREA:
|
427 |
symbolIndex = i |
428 |
break
|
429 |
|
430 |
hitRate = tmRes[pt[1], pt[0]] |
431 |
## DEBUG
|
432 |
#print('{}:{}-{}'.format(symbolName, searchedItemSp, hitRate))
|
433 |
## up to here
|
434 | |
435 |
## 겹치는 영역이 기준값보다 작을 경우
|
436 |
if overlapArea <= ACCEPT_OVERLAY_AREA:
|
437 |
threadLock.acquire() |
438 |
foundSymbolCount = foundSymbolCount + 1
|
439 |
addSearchedSymbol(symId, symbolName, symbolType |
440 |
, searchedItemSp, sw, sh, symbolThreshold, symbolMinMatchCount, hitRate, symbolRotatedAngle |
441 |
, isDetectOnOrigin, symbolRotateCount, symbolOcrOption, isContainChild |
442 |
, originalPoint, connectionPoint, baseSymbol, additionalSymbol,isExceptDetect) |
443 |
threadLock.release() |
444 |
## 겹치는 영역이 기준값보다 클 경우
|
445 |
else:
|
446 |
if symbolIndex != -1 and symbolIndex < len(searchedSymbolList): |
447 |
searchedSymbol = searchedSymbolList[symbolIndex] |
448 |
## 현재 심볼과 검출된 심볼이 같을 경우 Match Point가 더 높은 정보로 교체
|
449 |
if symbolName == searchedSymbol.getName():
|
450 |
symbolHitRate = searchedSymbol.getHitRate() |
451 |
if symbolHitRate < hitRate:
|
452 |
threadLock.acquire() |
453 |
searchedSymbolList[symbolIndex] = symbol.Symbol(symId, symbolName, symbolType |
454 |
, searchedItemSp, sw, sh, symbolThreshold, symbolMinMatchCount, hitRate, symbolRotatedAngle |
455 |
, isDetectOnOrigin, symbolRotateCount, symbolOcrOption, isContainChild |
456 |
, originalPoint, connectionPoint, baseSymbol, additionalSymbol,isExceptDetect) |
457 |
## DEBUG
|
458 |
#print('//// {}:{}-{} ////'.format(symbolName, searchedItemSp, hitRate))
|
459 |
## up to here
|
460 | |
461 |
threadLock.release() |
462 |
## 현재 심볼과 검출된 심볼이 같지 않을 경우 (포함)
|
463 |
elif docData.isEquipmentType(searchedSymbol.getType()):
|
464 |
## DEBUG
|
465 |
print('{}->{}:{}-{}'.format(searchedSymbol.getName(), symbolName, searchedItemSp, hitRate))
|
466 |
## up to here
|
467 | |
468 |
threadLock.acquire() |
469 |
foundSymbolCount = foundSymbolCount + 1
|
470 |
addSearchedSymbol(symId, symbolName, symbolType |
471 |
, searchedItemSp, sw, sh, symbolThreshold, hitRate, hitRate, symbolRotatedAngle |
472 |
, isDetectOnOrigin, symbolRotateCount, symbolOcrOption, isContainChild |
473 |
, originalPoint, connectionPoint, baseSymbol, additionalSymbol,isExceptDetect) |
474 |
threadLock.release() |
475 |
|
476 |
## Rotate Symbol
|
477 |
symGray = cv2.rotate(symGray, cv2.ROTATE_90_COUNTERCLOCKWISE) |
478 |
symbolRotatedAngle = symbolRotatedAngle + 90
|
479 | |
480 |
if additionalSymbol is not None: |
481 |
additionalSymbol = getRotatedChildInfo(additionalSymbol) |
482 | |
483 |
threadLock.acquire() |
484 |
listWidget.addItem('Found Symbol : ' + os.path.splitext(os.path.basename(symbolPath))[0] + ' - (' + str(foundSymbolCount) + ')') |
485 |
threadLock.release() |
486 | |
487 |
updateProgressSignal.emit(maxProgressValue) |
488 |
except Exception as ex: |
489 |
print('error occured({}) in {}:{}'.format(ex, sys.exc_info()[-1].tb_frame.f_code.co_filename, sys.exc_info()[-1].tb_lineno)) |
490 | |
491 |
'''
|
492 |
@history 2018.05.17 Jeongwoo Bitwise_not target changed (Original Image → Symbol Image)
|
493 |
'''
|
494 |
def removeDetectedSymbol(sym): |
495 |
global srcGray
|
496 |
global ocrCompletedSrc
|
497 |
global threadLock
|
498 |
|
499 |
path = sym.getPath() |
500 |
sp = sym.getSp() |
501 |
sw = sym.getWidth() |
502 |
sh = sym.getHeight() |
503 |
angle = sym.getRotatedAngle() |
504 |
symImg = cv2.imread(path) |
505 |
symImg = cv2.threshold(cvtGrayImage(symImg), 127, 255, cv2.THRESH_BINARY)[1] |
506 |
|
507 |
for i in range(angle//90): |
508 |
symImg = cv2.rotate(symImg, cv2.ROTATE_90_COUNTERCLOCKWISE) |
509 | |
510 |
threadLock.acquire() |
511 |
temp = [] |
512 |
temp = srcGray[sp[1]:sp[1]+sh, sp[0]:sp[0]+sw] |
513 |
symImgBin = cv2.bitwise_not(symImg) |
514 |
result = cv2.bitwise_xor(symImgBin, temp) |
515 |
result = cv2.dilate(result, np.ones((5, 5), np.uint8)) |
516 |
srcGray[sp[1]:sp[1]+sh, sp[0]:sp[0]+sw] = result |
517 |
ocrCompletedSrc[sp[1]:sp[1]+sh, sp[0]:sp[0]+sw] = result |
518 |
threadLock.release() |
519 | |
520 |
def drawRectOnSrc(sym): |
521 |
global src
|
522 |
global srcGray
|
523 |
global ocrCompletedSrc
|
524 |
|
525 |
path = sym.getPath() |
526 |
sp = sym.getSp() |
527 |
sw = sym.getWidth() |
528 |
sh = sym.getHeight() |
529 |
sAngle = sym.getRotatedAngle() |
530 |
symImg = cv2.imread(path) |
531 |
symImg = cvtGrayImage(symImg) |
532 | |
533 |
cv2.rectangle(src, sp, (sp[0]+sw, sp[1]+sh), (0, 0, 255), 2) |
534 | |
535 |
'''
|
536 |
@history 2018.04.27 Jeongwoo Remove Tesseract Log on listWidget
|
537 |
2018.05.04 Jeongwoo Change method to OCR with tesseract_ocr_module.py
|
538 |
2018.05.09 Jeongwoo Add global variable textInfoList, Remove text in symbol and Add tesseract result text
|
539 |
2018.05.10 Jeongwoo Remove not used if-statement
|
540 |
2018.06.19 Jeongwoo When detect text in symbol, use getTextAreaInfo() and Tesseract
|
541 |
2018.06.21 Jeongwoo Add if-statement for way to detect text by Type A
|
542 |
'''
|
543 |
def drawFoundSymbols(symbol, listWidget): |
544 |
global src
|
545 |
global canvas
|
546 |
global WHITE_LIST_CHARS
|
547 |
global searchedSymbolList
|
548 |
global textInfoList
|
549 | |
550 |
symbolId = symbol.getId() |
551 |
symbolPath = symbol.getPath() |
552 |
symbolSp = symbol.getSp() |
553 |
symbolWidth = symbol.getWidth() |
554 |
symbolHeight = symbol.getHeight() |
555 |
symbolRotatedAngle = symbol.getRotatedAngle() |
556 |
symbolOcrOption = symbol.getOcrOption() |
557 | |
558 |
symImg = cv2.imread(symbolPath, 1)
|
559 |
for i in range(symbolRotatedAngle//90): |
560 |
symImg = cv2.rotate(symImg, cv2.ROTATE_90_COUNTERCLOCKWISE) |
561 | |
562 |
chan, w, h = symImg.shape[::-1]
|
563 |
canvas[symbolSp[1]:symbolSp[1]+h, symbolSp[0]:symbolSp[0]+w] = cv2.bitwise_and(canvas[symbolSp[1]:symbolSp[1]+h, symbolSp[0]:symbolSp[0]+w], symImg) |
564 |
|
565 |
'''
|
566 |
@brief draw found symbols and texts
|
567 |
@author Jeongwoo
|
568 |
'''
|
569 |
def drawFoundSymbolsOnCanvas(drawingPath , textInfos , listWidget): |
570 |
global src
|
571 |
global srcGray
|
572 |
global ocrCompletedSrc
|
573 |
global searchedSymbolList
|
574 |
global canvas
|
575 | |
576 |
canvas = np.zeros(src.shape, np.uint8) |
577 |
canvas[::] = (255, 255, 255) |
578 | |
579 |
try:
|
580 |
docData = AppDocData.instance() |
581 |
project = docData.getCurrentProject() |
582 | |
583 |
for symbol in searchedSymbolList: |
584 |
drawFoundSymbols(symbol, listWidget) |
585 | |
586 |
for text in textInfos: |
587 |
#if not checkTextInSymbol((text.getX(), text.getY())):
|
588 |
left = text.getX() |
589 |
top = text.getY() |
590 |
right = text.getX() + text.getW() |
591 |
bottom = text.getY() + text.getH() |
592 | |
593 |
canvas[top:bottom, left:right] = src[top:bottom, left:right] |
594 | |
595 |
cv2.imwrite(os.path.join(project.getTempPath(), "FOUND_" + os.path.basename(drawingPath)), canvas)
|
596 |
except Exception as ex: |
597 |
print('error occured({}) in {}:{}'.format(ex, sys.exc_info()[-1].tb_frame.f_code.co_filename, sys.exc_info()[-1].tb_lineno)) |
598 |
|
599 |
#Generate target symbol data list
|
600 |
'''
|
601 |
@history 2018.04.24 Jeongwoo Add isExceptDetect Field
|
602 |
2018.05.09 Jeongwoo Add targetSymbolList clear
|
603 |
'''
|
604 |
def initTargetSymbolDataList(): |
605 |
############ region SQLite
|
606 |
global targetSymbolList
|
607 | |
608 |
targetSymbolList.clear() |
609 |
dict = {} |
610 |
symbolList = AppDocData.instance().getTargetSymbolList() |
611 |
targetSymbolList.extend(symbolList) |
612 | |
613 |
## Init Symbol Data from SQLite
|
614 |
'''
|
615 |
for target in tempTargetList:
|
616 |
symId = target.getId() // 100 # symId
|
617 |
if symId in dict:
|
618 |
dict[symId].append(target)
|
619 |
else:
|
620 |
symGroup = []
|
621 |
symGroup.append(target)
|
622 |
dict[symId] = symGroup
|
623 |
|
624 |
## Sort each symbol list by symbol id
|
625 |
for k, v in dict.items():
|
626 |
dict[k] = sorted(v, key=lambda symbol:symbol.id)
|
627 |
|
628 |
## Insert each symbol list into targetSymbolList
|
629 |
for sym in list(dict.values()):
|
630 |
targetSymbolList.append(sym)
|
631 |
############ endregion SQLite
|
632 |
'''
|
633 | |
634 |
return targetSymbolList
|
635 | |
636 |
'''
|
637 |
@brief detect text areas
|
638 |
@author humkyung
|
639 |
@date 2018.06.16
|
640 |
'''
|
641 |
def detectTextAreas(filePath): |
642 |
global srcGray
|
643 | |
644 |
tInfoList = [] |
645 |
try:
|
646 |
if os.path.isfile(filePath):
|
647 |
area = AppDocData.instance().getArea('Drawing')
|
648 |
configs = AppDocData.instance().getConfigs('Text Area', 'Text Area') |
649 |
type = int(configs[0].value) if 1 == len(configs) else 0 |
650 | |
651 |
if type == 0: |
652 |
(_tempOcrSrc, tInfoList) = OCR.removeTextFromNpArray(area.img if area is not None else srcGray, area.x if area is not None else 0, area.y if area is not None else 0) |
653 |
else:
|
654 |
tInfoList = getTextAreaInfo(filePath, area.img if area is not None else srcGray, area.x if area is not None else 0, area.y if area is not None else 0) |
655 |
except Exception as ex: |
656 |
print('error occured({}) in {}:{}'.format(ex, sys.exc_info()[-1].tb_frame.f_code.co_filename, sys.exc_info()[-1].tb_lineno)) |
657 | |
658 |
return tInfoList
|
659 | |
660 |
'''
|
661 |
@brief read image drawing and then remove text
|
662 |
@author jwkim
|
663 |
@date
|
664 |
@history humkyung 2018.04.06 check if file exists
|
665 |
Jeongwoo 2018.05.09 Use Tesseract OCR after Azure OCR (Azure OCR : Getting text area)
|
666 |
Jeongwoo 2018.05.25 Add condition on if-statement
|
667 |
Jeongwoo 2018.06.05 Get text area data list by config.type
|
668 |
Jeongwoo 2018.06.08 Add angle Parameter on TOCR.getTextInfo
|
669 |
humkyung 2018.06.16 update proessbar while recognizing text
|
670 |
'''
|
671 |
def initMainSrc(mainRes, tInfoList, updateProgressSignal, listWidget): |
672 |
global srcGray
|
673 |
global ocrCompletedSrc
|
674 |
global textInfoList
|
675 |
global noteTextInfoList
|
676 |
global maxProgressValue
|
677 | |
678 |
try:
|
679 |
docData = AppDocData.instance() |
680 |
project = docData.getCurrentProject() |
681 | |
682 |
path = os.path.join(project.getTempPath(), 'OCR_' + os.path.basename(mainRes))
|
683 |
if os.path.isfile(path):
|
684 |
ocrCompletedSrc = srcGray.copy() |
685 |
ocrCompletedSrc = cv2.threshold(ocrCompletedSrc, 127, 255, cv2.THRESH_BINARY)[1] |
686 |
|
687 |
imgOCR = cv2.imread(path, 1)
|
688 |
imgOCR = cv2.threshold(cvtGrayImage(imgOCR), 127, 255, cv2.THRESH_BINARY)[1] |
689 | |
690 |
global MIN_TEXT_SIZE
|
691 |
area = AppDocData.instance().getArea('Drawing')
|
692 |
for tInfo in tInfoList: |
693 |
if tInfo.getW() >= MIN_TEXT_SIZE or tInfo.getH() >= MIN_TEXT_SIZE: |
694 |
x = tInfo.getX() - round(area.x)
|
695 |
y = tInfo.getY() - round(area.y)
|
696 |
img = imgOCR[y:y+tInfo.getH(), x:x+tInfo.getW()] |
697 |
resultTextInfo = TOCR.getTextInfo(img, (x, y), tInfo.getAngle()) |
698 |
if resultTextInfo is not None and len(resultTextInfo) > 0: |
699 |
for result in resultTextInfo: |
700 |
result.setX(result.getX() + round(area.x))
|
701 |
result.setY(result.getY() + round(area.y))
|
702 |
textInfoList.extend(resultTextInfo) |
703 |
ocrCompletedSrc = removeText(ocrCompletedSrc, (tInfo.getX(), tInfo.getY()), img) |
704 |
|
705 |
item = QListWidgetItem('{},{},{} is recognized'.format(resultTextInfo[0].getX(), resultTextInfo[0].getY(), resultTextInfo[0].getText())) |
706 |
listWidget.addItem(item) |
707 |
else:
|
708 |
pass
|
709 | |
710 |
updateProgressSignal.emit(maxProgressValue) |
711 | |
712 |
cv2.imwrite(os.path.join(project.getTempPath(), "XOCR_" + os.path.basename(mainRes)), ocrCompletedSrc)
|
713 | |
714 |
# parse Note
|
715 |
noteArea = AppDocData.instance().getArea('Note')
|
716 |
if noteArea is not None: |
717 |
noteArea.img = srcGray[round(noteArea.y-1):round(noteArea.y+noteArea.height-1), round(noteArea.x-1):round(noteArea.x+noteArea.width-1)] |
718 |
noteTextInfoList = TOCR.getTextInfo(noteArea.img, (noteArea.x, noteArea.y)) |
719 |
updateProgressSignal.emit(maxProgressValue) |
720 |
except Exception as ex: |
721 |
print('error occured({}) in {}:{}'.format(ex, sys.exc_info()[-1].tb_frame.f_code.co_filename, sys.exc_info()[-1].tb_lineno)) |
722 | |
723 |
'''
|
724 |
@brief remove small objects from given image
|
725 |
@author humkyung
|
726 |
@date 2018.04.26
|
727 |
@history 2018.05.25 Jeongwoo Moved from MainWindow
|
728 |
'''
|
729 |
def removeSmallObjects(image): |
730 |
try:
|
731 |
docData = AppDocData.instance() |
732 |
configs = docData.getConfigs('Small Object Size', 'Min Area') |
733 |
minArea = int(configs[0].value) if 1 == len(configs) else 20 |
734 |
configs = docData.getConfigs('Small Object Size', 'Max Area') |
735 |
maxArea = int(configs[0].value) if 1 == len(configs) else 50 |
736 | |
737 |
_,contours,_ = cv2.findContours(image, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE); |
738 |
selectedContours=[] |
739 |
for contour in contours: |
740 |
#if cv2.isContourConvex(contour):
|
741 |
#approx = cv2.approxPolyDP(contour, 0.2*cv2.arcLength(contour, True), True)
|
742 |
area = cv2.contourArea(contour) |
743 |
if area > minArea and area < maxArea: selectedContours.append(contour) |
744 |
contourImage = cv2.drawContours(image, selectedContours, -1, (255,255,255), -1); # draw contour with white color |
745 |
#path = os.path.join(AppDocData.instance().getCurrentProject().getTempPath(), 'contours.png')
|
746 |
#cv2.imwrite(path, contourImage)
|
747 |
except Exception as ex: |
748 |
print('error occured({}) in {}:{}'.format(ex, sys.exc_info()[-1].tb_frame.f_code.co_filename, sys.exc_info()[-1].tb_lineno)) |
749 | |
750 |
return contourImage
|
751 | |
752 |
'''
|
753 |
@history 2018.05.25 Jeongwoo Moved from MainWindow
|
754 |
2018.05.28 Jeongwoo Add xmlPath Parameter and append LineInfo into xml
|
755 |
2018.05.29 Jeongwoo Change method to add item
|
756 |
2018.05.30 Jeongwoo Remove parameter (xmlPath)
|
757 |
humkyung 2018.06.11 add drawing path to parameter and write recognized lines to image
|
758 |
'''
|
759 |
def recognizeLine(path, listWidget, graphicsView): |
760 |
from shapely.geometry import Point, LineString |
761 |
from SymbolSvgItem import SymbolSvgItem |
762 |
from QEngineeringFlowArrowItem import QEngineeringFlowArrowItem |
763 |
from QEngineeringLineNoTextItem import QEngineeringLineNoTextItem |
764 |
from QEngineeringTextItem import QEngineeringTextItem |
765 |
from QEngineeringLineItem import QEngineeringLineItem |
766 |
from LineDetector import LineDetector |
767 | |
768 |
try:
|
769 |
#remove already existing line and flow arrow item
|
770 |
items = [item for item in graphicsView.scene.items() if (type(item) is QEngineeringLineItem) or (type(item) is QEngineeringFlowArrowItem)] |
771 |
for item in items: |
772 |
graphicsView.scene.removeItem(item) |
773 |
#up to here
|
774 | |
775 |
# detect line
|
776 |
connectedLines = [] |
777 | |
778 |
area = AppDocData.instance().getArea('Drawing')
|
779 |
area.img = removeSmallObjects(area.img) |
780 |
detector = LineDetector(area.img) |
781 | |
782 |
symbols = [] |
783 |
for item in graphicsView.scene.items(): |
784 |
if issubclass(type(item), SymbolSvgItem): |
785 |
symbols.append(item) |
786 |
res = detector.detectConnectedLine(item, round(area.x), round(area.y)) |
787 |
if res is not None: |
788 |
connectedLines.extend(res) |
789 | |
790 |
lineNos = [item for item in graphicsView.scene.items() if type(item) is QEngineeringLineNoTextItem] |
791 | |
792 |
if len(connectedLines) > 1: |
793 |
detector.mergeLines(connectedLines, toler=5)
|
794 |
# connect line to symbol
|
795 |
try:
|
796 |
for line in connectedLines: |
797 |
matches = [symbol for symbol in symbols if symbol.isConnectable(line, (round(area.x), round(area.y)), toler=20)] |
798 |
for symbol in matches: |
799 |
detector.connectLineToSymbol(line, (round(area.x), round(area.y)), symbol) |
800 |
except Exception as ex: |
801 |
print('error occured({}) in {}:{}'.format(ex, sys.exc_info()[-1].tb_frame.f_code.co_filename, sys.exc_info()[-1].tb_lineno)) |
802 |
# up to here
|
803 | |
804 |
# connect line to line
|
805 |
toler = 10
|
806 |
try:
|
807 |
for line in connectedLines: |
808 |
matches = [it for it in connectedLines if (it is not line) and (not detector.isParallel(line, it))] |
809 | |
810 |
# get closest line
|
811 |
selected = [] |
812 |
shapelyLine = LineString(line) |
813 |
for match in matches: |
814 |
dist = [shapelyLine.distance(Point(match[0][0], match[0][1])),shapelyLine.distance(Point(match[1][0], match[1][1]))] |
815 |
if dist[0] < toler or dist[1] < toler: |
816 |
selected.append(match) |
817 |
# up to here
|
818 | |
819 |
for match in selected: |
820 |
detector.connectLineToLine(match, line, toler) |
821 |
except Exception as ex: |
822 |
print('error occured({}) in {}:{}'.format(ex, sys.exc_info()[-1].tb_frame.f_code.co_filename, sys.exc_info()[-1].tb_lineno)) |
823 |
# up to here
|
824 | |
825 |
width, height = area.img.shape[::-1]
|
826 |
size = max(width, height)
|
827 |
lines = [] |
828 |
for pts in connectedLines: |
829 |
processLine = QEngineeringLineItem(vertices=[(area.x + param[0], area.y + param[1]) for param in pts]) |
830 |
processLine.buildItem() |
831 |
processLine.addLineItemToScene(graphicsView.scene) |
832 |
lines.append(processLine) |
833 | |
834 |
if processLine.length() > 100: # TODO: check critical length |
835 |
processLine.addFlowArrow() |
836 |
except Exception as ex: |
837 |
print('error occured({}) in {}:{}'.format(ex, sys.exc_info()[-1].tb_frame.f_code.co_filename, sys.exc_info()[-1].tb_lineno)) |
838 |
finally:
|
839 |
listWidget.addItem('Finish Line searching')
|
840 |
|
841 |
'''
|
842 |
@brief Main function
|
843 |
@author Jeongwoo
|
844 |
@date
|
845 |
@history humkyung 2018.04.06 change error display from message box to print
|
846 |
Jeongwoo 2018.04.25 Remove 'Current Symbol : ' QListItem
|
847 |
Jeongwoo 2018.05.09 Make Comments OCR.removeTextFromNpArray block
|
848 |
Jeongwoo 2018.05.25 Remove imgLineList variable and parameter on writeXml()
|
849 |
humkyung 2018.05.26 add parameters(graphicsView, isSymbolTextChecked, isLineChecked)
|
850 |
Jeongwoo 2018.05.28 Add/Remove Parameters(Add : signal / Remove : graphicsView, isLineChecked)
|
851 |
Jeongwoo 2018.05.30 Remove return value
|
852 |
humkyung 2018.06.08 add signal for progressbar to parameter
|
853 |
humkyung 2018.06.11 get difference between original and recognized image
|
854 |
Jeongwoo 2018.06.21 If noteTextInfoList is None, change from None to empty list
|
855 |
'''
|
856 |
def executeRecognition(signal, updateProgressSignal, path, listWidget, isSymbolTextChecked): |
857 |
import re |
858 | |
859 |
global src
|
860 |
global srcGray
|
861 |
global ocrCompletedSrc
|
862 |
global searchedSymbolList
|
863 |
global threadLock
|
864 |
global textInfoList
|
865 |
global noteTextInfoList
|
866 |
global maxProgressValue
|
867 |
|
868 |
try:
|
869 |
docData = AppDocData.instance() |
870 |
project = docData.getCurrentProject() |
871 | |
872 |
srcList = [] |
873 |
srcList.append(path) |
874 | |
875 |
initTargetSymbolDataList() |
876 | |
877 |
for mainRes in srcList: |
878 |
#Init src
|
879 |
src = [] |
880 |
srcGray = [] |
881 |
ocrCompletedSrc = [] |
882 |
searchedSymbolList = [] |
883 |
textInfoList = [] |
884 | |
885 |
if not os.path.isfile(mainRes): |
886 |
item = QListWidgetItem('{} file is not found'.format(os.path.basename(mainRes)))
|
887 |
item.setBackground(Qt.red) |
888 |
listWidget.addItem(item) |
889 |
continue
|
890 | |
891 |
#load original src & symbol
|
892 |
src = cv2.imread(mainRes, 1)
|
893 |
|
894 |
#gray scale
|
895 |
if len(src.shape) == 3: |
896 |
srcGray = cvtGrayImage(src) |
897 |
else:
|
898 |
srcGray = src.copy() |
899 |
srcGray = cv2.threshold(srcGray, 127, 255, cv2.THRESH_BINARY)[1] |
900 |
|
901 |
# remove equipment desc. area
|
902 |
configs = docData.getConfigs('{} Equipment Desc Area'.format(docData.imgName))
|
903 |
for config in configs: |
904 |
found = re.findall('\d+', config.value)
|
905 |
if len(found) == 4: |
906 |
cv2.rectangle(srcGray, (int(found[0]), int(found[1])), (int(found[0])+int(found[2]), int(found[1])+int(found[3])), 255, -1) |
907 |
# up to here
|
908 |
|
909 |
area = docData.getArea('Drawing')
|
910 |
if area is not None: |
911 |
area.img = srcGray[round(area.y):round(area.y+area.height), round(area.x):round(area.x+area.width)] |
912 | |
913 |
listWidget.addItem("Start recognition : " + mainRes)
|
914 | |
915 |
if isSymbolTextChecked:
|
916 |
threadLock.acquire() |
917 |
textAreas = detectTextAreas(mainRes) |
918 |
### calculate total count of symbol
|
919 |
maxProgressValue = len(textAreas) + 1 |
920 |
for targetItem in targetSymbolList: |
921 |
if type(targetItem) is list: |
922 |
maxProgressValue += len(targetItem)
|
923 |
else:
|
924 |
maxProgressValue += 1
|
925 |
### up to here
|
926 |
threadLock.release() |
927 | |
928 |
initMainSrc(mainRes, textAreas, updateProgressSignal, listWidget) |
929 | |
930 |
pool = futures.ThreadPoolExecutor(max_workers = THREAD_MAX_WORKER) |
931 |
for targetItem in targetSymbolList: |
932 |
if type(targetItem) is list: |
933 |
pool.submit(detectSymbolsOnPid, mainRes, targetItem, listWidget, updateProgressSignal) |
934 |
else:
|
935 |
pool.submit(detectSymbolOnPid, mainRes, targetItem, listWidget, updateProgressSignal) |
936 |
pool.shutdown(wait = True)
|
937 | |
938 |
## DEBUG
|
939 |
print('----------')
|
940 |
for item in searchedSymbolList: |
941 |
print('{}:{}-{}'.format(item.getName(), item.getSp(), item.hitRate))
|
942 |
_img = srcGray[round(item.getSp()[1]):round(item.getSp()[1]+item.getHeight()), round(item.getSp()[0]):round(item.getSp()[0]+item.getWidth())] |
943 |
cv2.imwrite(os.path.join(project.getTempPath(), 'Tile', item.getName()+'.png'), _img) |
944 |
## up to here
|
945 | |
946 |
chan, docData.imgWidth, docData.imgHeight = src.shape[::-1]
|
947 |
drawFoundSymbolsOnCanvas(mainRes, textInfoList, listWidget) |
948 |
|
949 |
docData.imgName = os.path.splitext(os.path.basename(mainRes))[0]
|
950 | |
951 |
pool = futures.ThreadPoolExecutor(max_workers = THREAD_MAX_WORKER) |
952 |
for sym in searchedSymbolList: |
953 |
pool.submit(removeDetectedSymbol, sym) |
954 |
pool.submit(drawRectOnSrc, sym) |
955 |
pool.shutdown(wait = True)
|
956 |
|
957 |
global MIN_TEXT_SIZE
|
958 |
for textInfo in textInfoList: |
959 |
if textInfo.getW() >= MIN_TEXT_SIZE or textInfo.getH() >= MIN_TEXT_SIZE: |
960 |
removeText(srcGray, (textInfo.getX(), textInfo.getY()), srcGray[textInfo.getY():textInfo.getY()+textInfo.getH(), textInfo.getX():textInfo.getX()+textInfo.getW()]) |
961 | |
962 |
## Remove Noise
|
963 |
kernel1 = np.ones((2, 2), np.uint8) |
964 |
srcGray = cv2.dilate(srcGray, kernel1) |
965 |
#kernel2 = np.ones((4, 4), np.uint8)
|
966 |
srcGray = cv2.erode(srcGray, kernel1) |
967 | |
968 |
removedSymbolImgPath = os.path.join(project.getTempPath(), os.path.basename(path)) |
969 |
cv2.imwrite(removedSymbolImgPath, srcGray) |
970 |
area = AppDocData.instance().getArea('Drawing')
|
971 |
if area is not None: |
972 |
area.img = srcGray[round(area.y+1):round(area.y+area.height), round(area.x+1):round(area.x+area.width)] |
973 |
cv2.imwrite(os.path.join(project.getTempPath(), "RECT_" + os.path.basename(path)), src)
|
974 | |
975 |
listWidget.addItem("Recognized symbol count : " + str(len(searchedSymbolList))) |
976 |
|
977 |
# get difference between original and recognized image
|
978 |
foundFilePath = os.path.join(project.getTempPath(), "FOUND_" + os.path.basename(path))
|
979 |
getDifference(path, foundFilePath) |
980 |
# up to here
|
981 |
|
982 |
signal.emit(searchedSymbolList, textInfoList, noteTextInfoList if noteTextInfoList is not None else []) |
983 |
except Exception as ex: |
984 |
print('error occured({}) in {}:{}'.format(ex, sys.exc_info()[-1].tb_frame.f_code.co_filename, sys.exc_info()[-1].tb_lineno)) |
985 |
|
986 |
'''
|
987 |
@brief draw contour to image
|
988 |
@author humkyung
|
989 |
@date 2018.06.18
|
990 |
'''
|
991 |
def drawContour(img, contour): |
992 |
area = cv2.contourArea(contour, True)
|
993 |
if area >= 0: |
994 |
cv2.drawContours(img, [contour], -1, (0,0,0), -1) |
995 |
cv2.drawContours(img, [contour], -1, (255,255,255), 1) |
996 |
else:
|
997 |
cv2.drawContours(img, [contour], -1, (255,255,255), -1) |
998 | |
999 |
'''
|
1000 |
@brief Get Text Area info by contour
|
1001 |
@author Jeongwoo
|
1002 |
@date 2018.06.05
|
1003 |
@history 2018.06.08 Jeongwoo Add angle
|
1004 |
humkyung 2018.06.18 fixed logic to detect text area
|
1005 |
'''
|
1006 |
def getTextAreaInfo(filePath, imgGray, offsetX, offsetY): |
1007 |
from AppDocData import AppDocData |
1008 | |
1009 |
docData = AppDocData.instance() |
1010 |
project = docData.getCurrentProject() |
1011 | |
1012 |
configs = docData.getConfigs('Text Size', 'Max Text Size') |
1013 |
maxTextSize = int(configs[0].value) if 1 == len(configs) else 100 |
1014 | |
1015 |
contourImg = np.ones(imgGray.shape, np.uint8) * 255
|
1016 |
binaryImg,mask = cv2.threshold(imgGray, 127, 255, cv2.THRESH_BINARY) |
1017 |
|
1018 |
image, contours, hierarchy = cv2.findContours(mask, cv2.RETR_TREE, cv2.CHAIN_APPROX_NONE) |
1019 |
for contour in contours: |
1020 |
# remove too big one
|
1021 |
[x, y, w, h] = cv2.boundingRect(contour) |
1022 |
if (w > maxTextSize and h > maxTextSize): continue |
1023 | |
1024 |
area = cv2.contourArea(contour, True)
|
1025 |
if area >= 0: |
1026 |
cv2.drawContours(contourImg, [contour], -1, (0,0,0), -1) |
1027 |
cv2.drawContours(contourImg, [contour], -1, (255,255,255), 1) |
1028 |
else:
|
1029 |
cv2.drawContours(contourImg, [contour], -1, (255,255,255), -1) |
1030 |
|
1031 |
path = os.path.join(project.getTempPath(), 'OCR_' + os.path.basename(filePath))
|
1032 |
cv2.imwrite(path, contourImg) |
1033 | |
1034 |
rects = [] |
1035 |
configs = docData.getConfigs('Text Recognition', 'Expand Size') |
1036 |
expandSize = int(configs[0].value) if 1 == len(configs) else 10 |
1037 |
configs = docData.getConfigs('Text Recognition', 'Shrink Size') |
1038 |
shrinkSize = int(configs[0].value) if 1 == len(configs) else 0 |
1039 | |
1040 |
eroded = cv2.erode(contourImg, np.ones((expandSize,expandSize), np.uint8)) |
1041 |
|
1042 |
path = os.path.join(project.getTempPath(), 'ERODED_OCR_' + os.path.basename(filePath))
|
1043 |
cv2.imwrite(path, eroded) |
1044 | |
1045 |
eroded = cv2.bitwise_not(eroded) |
1046 |
|
1047 |
image, contours, hierarchy = cv2.findContours(eroded, cv2.RETR_CCOMP, cv2.CHAIN_APPROX_SIMPLE) |
1048 |
for contour in contours: |
1049 |
area = cv2.contourArea(contour, True)
|
1050 |
if area < 0: |
1051 |
[x, y, w, h] = cv2.boundingRect(contour) |
1052 |
if (w < 10 and h < 10) or (w > maxTextSize and h > maxTextSize): continue; # skip too small or big one |
1053 |
|
1054 |
img = contourImg[y:y+h, x:x+w] |
1055 |
img = cv2.bitwise_not(img) |
1056 | |
1057 |
horizontal = 0
|
1058 |
vertical = 0
|
1059 |
if w > maxTextSize:
|
1060 |
horizontal = 1
|
1061 |
elif h > maxTextSize:
|
1062 |
vertical = 1
|
1063 |
else:
|
1064 |
if shrinkSize > 0: |
1065 |
img = cv2.erode(img, np.ones((shrinkSize,shrinkSize), np.uint8)) |
1066 | |
1067 |
_, _contours, _ = cv2.findContours(img, cv2.RETR_CCOMP, cv2.CHAIN_APPROX_SIMPLE) |
1068 |
for xx in _contours: |
1069 |
[_x, _y, _w, _h] = cv2.boundingRect(xx) |
1070 |
cv2.rectangle(img, (_x, _y), (_x+_w, _y+_h), 255, 1) |
1071 | |
1072 |
if (_w < _h) or (_w > maxTextSize and _h < maxTextSize): # width is greater than height |
1073 |
horizontal += 1 + (_w*_h)/(w*h)
|
1074 |
else:
|
1075 |
vertical += 1 + (_w*_h)/(w*h)
|
1076 | |
1077 |
if horizontal > vertical:
|
1078 |
filePath = os.path.join(project.getTempPath(), "Tile", "H-{}-{}-{}-{}.png".format(x,y,w,h)) |
1079 |
else:
|
1080 |
filePath = os.path.join(project.getTempPath(), "Tile", "V-{}-{}-{}-{}.png".format(x,y,w,h)) |
1081 | |
1082 |
cv2.imwrite(filePath, img) |
1083 | |
1084 |
rects.append([0 if horizontal > vertical else 90, QRect(x, y, w, h)]) |
1085 | |
1086 |
configs = docData.getConfigs('Text Recognition', 'Merge Size') |
1087 |
mergeSize = int(configs[0].value) if 1 == len(configs) else 10 |
1088 |
# merge rectangles
|
1089 |
intersected = True
|
1090 |
while intersected:
|
1091 |
intersected = False
|
1092 |
for rect in rects[:]: |
1093 |
if 0 == rect[0]: |
1094 |
rectExpand = rect[1].adjusted(-mergeSize, 0, mergeSize, 0) |
1095 |
else:
|
1096 |
rectExpand = rect[1].adjusted(0, -mergeSize, 0, mergeSize) |
1097 | |
1098 |
matches = [x for x in rects if (x[0] == rect[0]) and rectExpand.intersects(x[1])] |
1099 |
if len(matches) > 1: |
1100 |
united = matches[0]
|
1101 |
for _rect in matches: |
1102 |
united[1] = united[1].united(_rect[1]) |
1103 |
if _rect in rects: rects.remove(_rect) |
1104 |
rects.append(united) |
1105 |
intersected = True
|
1106 |
break
|
1107 | |
1108 |
list = [] |
1109 |
for rect in rects: |
1110 |
angle = rect[0]
|
1111 |
list.append(ti.TextInfo('', round(offsetX) + rect[1].x(), round(offsetY) + rect[1].y(), rect[1].width(), rect[1].height(), angle)) |
1112 | |
1113 |
return list |
1114 |
|
1115 |
'''
|
1116 |
@brief get difference between given original and recognized image
|
1117 |
@author humkyung
|
1118 |
@date 2018.06.11
|
1119 |
'''
|
1120 |
def getDifference(orgImagePath, recImagePath): |
1121 |
import re |
1122 | |
1123 |
global srcGray
|
1124 |
global ocrCompletedSrc
|
1125 |
global textInfoList
|
1126 |
global noteTextInfoList
|
1127 | |
1128 |
try:
|
1129 |
docData = AppDocData.instance() |
1130 |
if os.path.isfile(orgImagePath) and os.path.isfile(recImagePath): |
1131 |
imgOriginal = cv2.threshold(cvtGrayImage(cv2.imread(orgImagePath, 1)), 127, 255, cv2.THRESH_BINARY)[1] |
1132 |
# remove equipment desc. area
|
1133 |
configs = docData.getConfigs('{} Equipment Desc Area'.format(docData.imgName))
|
1134 |
for config in configs: |
1135 |
found = re.findall('\d+', config.value)
|
1136 |
if len(found) == 4: |
1137 |
cv2.rectangle(imgOriginal, (int(found[0]), int(found[1])), (int(found[0])+int(found[2]), int(found[1])+int(found[3])), 255, -1) |
1138 |
# up to here
|
1139 | |
1140 |
imgRecognized = cv2.threshold(cvtGrayImage(cv2.imread(recImagePath, 1)), 127, 255, cv2.THRESH_BINARY)[1] |
1141 | |
1142 |
imgDiff = np.ones(imgOriginal.shape, np.uint8)*255
|
1143 | |
1144 |
area = AppDocData.instance().getArea('Drawing')
|
1145 |
if area is not None: |
1146 |
x = round(area.x)
|
1147 |
y = round(area.y)
|
1148 |
width = round(area.width)
|
1149 |
height = round(area.height)
|
1150 |
imgNotOper = cv2.bitwise_not(imgRecognized[y:y+height, x:x+width]) |
1151 |
imgDiff[y:y+height, x:x+width] = cv2.bitwise_xor(imgOriginal[y:y+height, x:x+width], imgNotOper) |
1152 |
|
1153 |
# remove noise
|
1154 |
imgDiff = cv2.dilate(imgDiff, np.ones((2, 2), np.uint8)) |
1155 | |
1156 |
docData = AppDocData.instance() |
1157 |
project = docData.getCurrentProject() |
1158 |
cv2.imwrite(os.path.join(project.getTempPath(), "DIFF_" + os.path.basename(orgImagePath)), imgDiff)
|
1159 |
except Exception as ex: |
1160 |
print('error occured({}) in {}:{}'.format(ex, sys.exc_info()[-1].tb_frame.f_code.co_filename, sys.exc_info()[-1].tb_lineno)) |
1161 |
|
1162 |
if __name__ == '__main__': |
1163 |
import DTI_PID_UI |
1164 |
from ProjectDialog import Ui_Dialog |
1165 |
import timeit |
1166 |
from PyQt5.QtCore import QRect |
1167 |
from operator import itemgetter, attrgetter |
1168 | |
1169 |
start = timeit.default_timer() |
1170 |
img = cv2.imread('D:/Visual Studio Project/DTIPID/DTIPID/DTI_PID/DTI_PID/res/Result/PC-K/PC-K-2203_P1_800DPI.png', 1) |
1171 |
imgGray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) |
1172 |
contourImg = np.ones(imgGray.shape, np.uint8)*255
|
1173 |
contourOcrImg = contourImg.copy() |
1174 |
binaryImg,mask = cv2.threshold(imgGray, 0, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU) |
1175 |
imgFinal = cv2.bitwise_and(imgGray, imgGray, mask = mask) |
1176 |
ret, newImg = cv2.threshold(imgFinal, 0, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU) |
1177 | |
1178 |
image, contours, hierarchy = cv2.findContours(mask, cv2.RETR_TREE, cv2.CHAIN_APPROX_NONE) |
1179 |
#holes = [contours[i] for i in range(len(contours)) if hierarchy[i][3] >= 0]
|
1180 |
for contour in contours: |
1181 |
area = cv2.contourArea(contour, True)
|
1182 |
if area >= 0: |
1183 |
[x, y, w, h] = cv2.boundingRect(contour) |
1184 | |
1185 |
# remove too big or small one
|
1186 |
if (w > 100 or h > 100) or (w < 5 or h < 5): continue |
1187 | |
1188 |
cv2.drawContours(contourImg, [contour], -1, (0,0,0), 1) |
1189 |
cv2.drawContours(contourOcrImg, [contour], -1, (0,0,0), -1) |
1190 |
else:
|
1191 |
cv2.drawContours(contourOcrImg, [contour], -1, (255,255,255), -1) |
1192 | |
1193 |
''' contourImg = cv2.bitwise_not(contourImg) circles = cv2.HoughCircles(contourImg, cv2.HOUGH_GRADIENT, 1, 100) circles = np.uint16(np.around(circles))
|
1194 |
for i in circles[0,:]:
|
1195 |
cv2.circle(contourImg, (i[0], i[1]), i[2], (255,255,0), 1)
|
1196 |
'''
|
1197 | |
1198 |
rects = [] |
1199 |
kernel = cv2.getStructuringElement(cv2.MORPH_CROSS, (8, 8)) |
1200 |
#kernel1 = cv2.getStructuringElement(cv2.MORPH_CROSS, (2,2))
|
1201 |
#eroded = cv2.dilate(contourImg, kernel1)
|
1202 |
#cv2.imwrite('D:/Visual Studio Project/DTIPID/DTIPID/DTI_PID/DTI_PID/res/Result/PC-K/Temp/Dilate_PC-K-2203_P1_800DPI___partial.png', eroded)
|
1203 |
eroded = cv2.erode(contourImg, kernel) |
1204 |
image, contours, hierarchy = cv2.findContours(eroded, cv2.RETR_CCOMP, cv2.CHAIN_APPROX_NONE) |
1205 |
for contour in contours: |
1206 |
area = cv2.contourArea(contour, True)
|
1207 |
if area >= 0: |
1208 |
[x, y, w, h] = cv2.boundingRect(contour) |
1209 | |
1210 |
# remove small one less than character size
|
1211 |
if (w < 20 or h < 20): continue |
1212 |
#if w > h:
|
1213 |
# rects.append(QRect(x, y, w, h)) # expand rect
|
1214 |
#elif w < h:
|
1215 |
# rects.append(QRect(x, y, w, h)) # expand rect
|
1216 |
rects.append(QRect(x, y, w, h)) # expand rect
|
1217 | |
1218 |
intersected = True
|
1219 |
while intersected:
|
1220 |
intersected = False
|
1221 |
for rect in rects[:]: |
1222 |
matches = [x for x in rects if rect.intersects(x)] |
1223 |
if len(matches) > 1: |
1224 |
united = matches[0]
|
1225 |
for _rect in matches: |
1226 |
united = united.united(_rect) |
1227 |
if _rect in rects: rects.remove(_rect) |
1228 |
rects.append(united) |
1229 |
intersected = True
|
1230 |
break
|
1231 | |
1232 |
for rect in rects: |
1233 |
cv2.rectangle(img, (rect.x(), rect.y()), (rect.x() + rect.width(), rect.y() + rect.height()), (255, 0, 255), 1) |
1234 | |
1235 |
cv2.imwrite('D:/Visual Studio Project/DTIPID/DTIPID/DTI_PID/DTI_PID/res/Result/PC-K/Temp/PC-K-2203_P1_800DPI___partial.png', img)
|
1236 |
cv2.imwrite('D:/Visual Studio Project/DTIPID/DTIPID/DTI_PID/DTI_PID/res/Result/PC-K/Temp/Contour_PC-K-2203_P1_800DPI___partial.png', contourOcrImg)
|
1237 |
cv2.imwrite('D:/Visual Studio Project/DTIPID/DTIPID/DTI_PID/DTI_PID/res/Result/PC-K/Temp/Erode_PC-K-2203_P1_800DPI___partial.png', eroded)
|
1238 | |
1239 |
chan, imgW, imgH = img.shape[::-1]
|
1240 |
index = 0
|
1241 |
for rect in rects: |
1242 |
index = index + 1
|
1243 |
isVertical = False
|
1244 |
textInfoList = None
|
1245 |
if rect.width() >= rect.height() or rect.height() < 50: |
1246 |
isVertical = False
|
1247 |
textInfoList = TOCR.getTextInfo(contourOcrImg[rect.y():rect.y()+rect.height(), rect.x():rect.x()+rect.width()], (rect.x(), rect.y())) |
1248 |
else:
|
1249 |
isVertical = True
|
1250 |
transform = QTransform() |
1251 |
transform.translate(imgH*0.5, imgW*0.5) |
1252 |
transform.rotate(90)
|
1253 |
transform.translate(-imgW*0.5, -imgH*0.5) |
1254 |
transRect = transform.mapRect(rect) |
1255 |
rotatedContourOcrImg = cv2.rotate(contourOcrImg, cv2.ROTATE_90_CLOCKWISE) |
1256 |
textInfoList = TOCR.getTextInfo(rotatedContourOcrImg[transRect.y():transRect.y()+transRect.height(), transRect.x():transRect.x()+transRect.width()], (transRect.x(), transRect.y())) |
1257 | |
1258 |
if isVertical:
|
1259 |
img = cv2.rotate(img, cv2.ROTATE_90_CLOCKWISE) |
1260 |
if textInfoList is not None: |
1261 |
for textInfo in textInfoList: |
1262 |
cv2.putText(img, textInfo.getText(), (textInfo.getX(), textInfo.getY()+textInfo.getH()), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 0, 255), 3) |
1263 |
if isVertical:
|
1264 |
img = cv2.rotate(img, cv2.ROTATE_90_COUNTERCLOCKWISE) |
1265 | |
1266 |
print(str(index) + " / " + str(len(rects)) + " Finished") |
1267 |
#cv2.imshow('test', img[rect.y():rect.y()+rect.height(), rect.x():rect.x()+rect.width()])
|
1268 |
#cv2.waitKey(0)
|
1269 |
#cv2.destroyAllWindows()
|
1270 |
|
1271 |
cv2.imwrite('D:/Visual Studio Project/DTIPID/DTIPID/DTI_PID/DTI_PID/res/Result/PC-K/Temp/OCR_PC-K-2203_P1_800DPI___partial.png', img)
|
1272 |
stop = timeit.default_timer() |
1273 |
print('FINISHED : ' + str((stop-start)/60) + ' min') |
1274 |
|
1275 |
#app = QApplication(sys.argv)
|
1276 | |
1277 |
#try:
|
1278 |
# dlg = Ui_Dialog()
|
1279 |
# selectedProject = dlg.showDialog()
|
1280 |
# if selectedProject is not None:
|
1281 |
# form = ExampleApp()
|
1282 |
# form.show()
|
1283 |
#except Exception as ex:
|
1284 |
# print('에러가 발생했습니다.\n', ex)
|
1285 | |
1286 |
#sys.exit(app.exec_())
|