hytos / DTI_PID / DTI_PID / DTI_PID.py @ 31520934
이력 | 보기 | 이력해설 | 다운로드 (54.9 KB)
1 |
# coding: utf-8
|
---|---|
2 | |
3 |
#region import libs
|
4 |
import http.client |
5 |
import urllib, base64, json |
6 |
import cv2 |
7 |
import numpy as np |
8 |
import SymbolBase |
9 |
import symbol |
10 |
import TextInfo as ti |
11 |
import azure_ocr_module as OCR |
12 |
from PIL import Image |
13 |
from io import BytesIO |
14 |
import gc |
15 |
import os |
16 |
import glob |
17 |
import math, operator |
18 |
import threading |
19 |
import concurrent.futures as futures |
20 |
import XmlGenerator as xg |
21 |
import pytesseract |
22 |
import tesseract_ocr_module as TOCR |
23 |
import potrace |
24 |
import sys |
25 |
from PyQt5.QtCore import * |
26 |
from PyQt5.QtGui import * |
27 |
from PyQt5.QtWidgets import * |
28 |
from PyQt5.QtSvg import * |
29 |
import QtImageViewer |
30 | |
31 |
sys.path.insert(0, os.path.dirname(os.path.realpath(__file__)) + '\\Commands') |
32 |
import CreateCommand |
33 |
import CropCommand |
34 | |
35 |
sys.path.insert(0, os.path.dirname(os.path.realpath(__file__)) + '\\Shapes') |
36 |
from EngineeringPolylineItem import QEngineeringPolylineItem |
37 |
from EngineeringLineItem import QEngineeringLineItem |
38 |
from SymbolSvgItem import SymbolSvgItem |
39 |
from QGraphicsBoundingBoxItem import QGraphicsBoundingBoxItem |
40 |
from AppDocData import AppDocData |
41 |
#endregion
|
42 | |
43 |
## Tesseract path
|
44 |
pytesseract.pytesseract.tesseract_cmd = os.path.join(os.environ['TESSERACT_HOME'], 'tesseract.exe') |
45 |
tesseract_cmd = os.path.join(os.environ['TESSERACT_HOME'], 'tesseract.exe') |
46 | |
47 |
#region Symbol Image path List for test
|
48 |
targetSymbolList = [] |
49 |
#endregion
|
50 | |
51 |
#region Global variables
|
52 |
searchedSymbolList = [] |
53 |
src = [] |
54 |
#srcGray = []
|
55 |
ocrCompletedSrc = [] |
56 |
afterDenoising = [] |
57 |
canvas = [] |
58 |
textInfoList = [] |
59 |
noteTextInfoList = [] |
60 | |
61 |
WHITE_LIST_CHARS = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ1234567890-"
|
62 | |
63 |
MIN_TEXT_SIZE = 10
|
64 | |
65 |
THREAD_MAX_WORKER = os.cpu_count() |
66 |
threadLock = threading.Lock() |
67 | |
68 |
ACCEPT_OVERLAY_AREA = 10
|
69 |
#endregion
|
70 | |
71 |
'''
|
72 |
@history 2018.06.28 Jeongwoo Remove useless condition
|
73 |
'''
|
74 |
def checkTextInSymbol(pt): |
75 |
global searchedSymbolList
|
76 | |
77 |
result = False
|
78 |
for sym in searchedSymbolList: |
79 |
#symId = sym.getId()
|
80 |
symSp = sym.getSp() |
81 |
symWidth = sym.getWidth() |
82 |
symHeight = sym.getHeight() |
83 |
symOcrOption = sym.getOcrOption() |
84 | |
85 |
categoryCode = symId // 100
|
86 | |
87 |
if symOcrOption != SymbolBase.OCR_OPTION_NOT_EXEC:
|
88 |
if (pt[0] >= symSp[0] and pt[0] <= symSp[0] + symWidth) and (pt[1] >= symSp[1] and pt[1] <= symSp[1] + symHeight): |
89 |
result = True
|
90 |
break
|
91 | |
92 |
return result
|
93 | |
94 |
#Convert into Grayscale image
|
95 |
def cvtGrayImage(img): |
96 |
return cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
|
97 | |
98 |
'''
|
99 |
@brief rotate (x,y) by given angle
|
100 |
@author Jeongwoo
|
101 |
@date 2018.??.??
|
102 |
@history humkyung 2018.04.13 fixed code when angle is 90 or 270
|
103 |
Jeongwoo 2018.04.27 Change calculation method with QTransform
|
104 |
'''
|
105 |
def getCoordOnRotatedImage(rAngle, x, y, originImageWidth, originImageHeight): |
106 |
rx = None
|
107 |
ry = None
|
108 |
transform = QTransform() |
109 |
if rAngle == 90 or rAngle == 270: |
110 |
transform.translate(originImageHeight*0.5, originImageWidth*0.5) |
111 |
elif rAngle == 0 or rAngle == 180: |
112 |
transform.translate(originImageWidth*0.5, originImageHeight*0.5) |
113 |
transform.rotate(-abs(rAngle))
|
114 |
transform.translate(-originImageWidth*0.5, -originImageHeight*0.5) |
115 |
point = QPoint(x, y) |
116 |
point = transform.map(point) |
117 |
rx = point.x() |
118 |
ry = point.y() |
119 |
return (rx, ry)
|
120 | |
121 |
def convertDirectionCodeToValue(directionCode): |
122 |
if directionCode == "UP": |
123 |
return 0 |
124 |
elif directionCode == "RIGHT": |
125 |
return 1 |
126 |
elif directionCode == "DOWN": |
127 |
return 2 |
128 |
elif directionCode == "LEFT": |
129 |
return 3 |
130 |
else:
|
131 |
return -1 |
132 | |
133 |
def convertValueToDirectionCode(value): |
134 |
if value == 0: |
135 |
return "UP" |
136 |
elif value == 1: |
137 |
return "RIGHT" |
138 |
elif value == 2: |
139 |
return "DOWN" |
140 |
elif value == 3: |
141 |
return "LEFT" |
142 |
else:
|
143 |
return "NONE" |
144 | |
145 |
'''
|
146 |
@brief Remake rotated child symbol info
|
147 |
'''
|
148 |
def getRotatedChildInfo(additionalSymbol): |
149 |
tempChildInfo = ""
|
150 |
if additionalSymbol:
|
151 |
childList = additionalSymbol.split("/")
|
152 |
for index in range(len(childList)): |
153 |
child = childList[index] |
154 |
direction = convertDirectionCodeToValue(child.split(",")[0]) |
155 |
childName = child.split(",")[1] |
156 |
direction = (direction - 1) if direction > 0 else 3 |
157 |
if index != 0: |
158 |
tempChildInfo = tempChildInfo + "/"
|
159 |
tempChildInfo = tempChildInfo + convertValueToDirectionCode(direction) + "," + childName
|
160 |
return tempChildInfo
|
161 | |
162 | |
163 |
#Check object contains pt
|
164 |
#obj is item in searchedSymbolList
|
165 |
def contains(obj, pt, tw, th): |
166 |
sp = obj.getSp() |
167 |
width = obj.getWidth() |
168 |
height = obj.getHeight() |
169 | |
170 |
if sp[0] > pt[0]+tw: |
171 |
return 0 |
172 |
if sp[0]+width < pt[0]: |
173 |
return 0 |
174 |
if sp[1] > pt[1]+th: |
175 |
return 0 |
176 |
if sp[1]+height < pt[1]: |
177 |
return 0 |
178 |
|
179 |
#shared area
|
180 |
x = max(sp[0], pt[0]); |
181 |
y = max(sp[1], pt[1]); |
182 |
w = min(sp[0] + width, pt[0] + tw) - x; |
183 |
h = min(sp[1] + height, pt[1] + th) - y; |
184 | |
185 |
return float((w * h)) / float((tw * th)) * 100 |
186 | |
187 |
'''
|
188 |
@history 2018.06.12 Jeongwoo Type changed (int → float)
|
189 |
humkyung 2018.07.07 change return type as like [x,y]
|
190 |
'''
|
191 |
def getCalculatedOriginalPoint(additionalSymbol, symbolOriginalPoint, symbolRotatedAngle, rotateSymbolWidth, rotateSymbolHeight, originalSymbolWidth, originalSymbolHeight): |
192 |
res = [] |
193 | |
194 |
if additionalSymbol is None and symbolOriginalPoint is None: |
195 |
res.append(rotateSymbolWidth//2)
|
196 |
res.append(rotateSymbolHeight//2)
|
197 |
else:
|
198 |
opx = float(symbolOriginalPoint.split(',')[0]) |
199 |
opy = float(symbolOriginalPoint.split(',')[1]) |
200 |
rPt = getCoordOnRotatedImage(symbolRotatedAngle, opx, opy, originalSymbolWidth, originalSymbolHeight) |
201 |
res.append(float(rPt[0])) |
202 |
res.append(float(rPt[1])) |
203 | |
204 |
return res
|
205 | |
206 |
'''
|
207 |
@history 2018.06.12 Jeongwoo Type changed (int → float)
|
208 |
humkyung 2018.07.07 change return type as like [[x,y],...]
|
209 |
'''
|
210 |
def getCalculatedConnectionPoint(symbolConnectionPointStr, symbolRotatedAngle, rotateSymbolWidth, rotateSymbolHeight, originalSymbolWidth, originalSymbolHeight): |
211 |
res = [] |
212 |
if symbolConnectionPointStr is not None: |
213 |
splitConnectionPointStr = symbolConnectionPointStr.split("/")
|
214 |
for index in range(len(splitConnectionPointStr)): |
215 |
item = splitConnectionPointStr[index] |
216 |
cpx = float(item.split(',')[0]) |
217 |
cpy = float(item.split(',')[1]) |
218 |
rPt = getCoordOnRotatedImage(symbolRotatedAngle, cpx, cpy, originalSymbolWidth, originalSymbolHeight) |
219 |
res.append([float(rPt[0]), float(rPt[1])]) |
220 | |
221 |
return res
|
222 |
|
223 |
'''
|
224 |
@brief Add symbols
|
225 |
@author jwkim
|
226 |
@date
|
227 |
@history Change parameter (mpCount → hitRate)
|
228 |
Yecheol 2018.07.04 Delete Symbol Id
|
229 |
'''
|
230 |
def addSearchedSymbol(sName, sType |
231 |
, sp, w, h, threshold, minMatchCount, hitRate, rotatedAngle |
232 |
, isDetectOnOrigin, rotateCount, ocrOption, isContainChild |
233 |
, originalPoint, connectionPoint, baseSymbol, additionalSymbol, isExceptDetect): |
234 |
global searchedSymbolList
|
235 |
newSym = symbol.Symbol(sName, sType |
236 |
, sp, w, h, threshold, minMatchCount, hitRate, rotatedAngle |
237 |
, isDetectOnOrigin, rotateCount, ocrOption, isContainChild |
238 |
, ','.join(str(x) for x in originalPoint), '/'.join('{},{}'.format(x[0],x[1]) for x in connectionPoint), baseSymbol, additionalSymbol, isExceptDetect) |
239 | |
240 |
searchedSymbolList.append(newSym) |
241 | |
242 |
return newSym
|
243 | |
244 |
#Calculate count of keypoint match result
|
245 |
def getMatchPointCount(src, cmp): |
246 |
orb = cv2.ORB_create(1000, 2.0, 2, 1) |
247 | |
248 |
kp1, des1 = orb.detectAndCompute(src, None)
|
249 |
kp2, des2 = orb.detectAndCompute(cmp, None) |
250 |
|
251 |
FLANN_INDEX_LSH = 6
|
252 |
# table_number : The number of hash tables use
|
253 |
# key_size : The length of the key in the hash tables
|
254 |
# multi_probe_level : Number of levels to use in multi-probe (0 for standard LSH)
|
255 |
# It controls how neighboring buckets are searched
|
256 |
# Recommended value is 2
|
257 |
# checks : specifies the maximum leafs to visit when searching for neighbours.
|
258 |
# LSH : Locality-Sensitive Hashing
|
259 |
# ref : https://www.cs.ubc.ca/research/flann/uploads/FLANN/flann_manual-1.8.4.pdf
|
260 |
index_params = dict(algorithm = FLANN_INDEX_LSH, table_number = 20, key_size = 10, multi_probe_level = 4) |
261 |
search_params = dict(checks = 100) |
262 |
|
263 |
flann = cv2.FlannBasedMatcher(index_params,search_params) |
264 |
|
265 |
matches = flann.knnMatch(des1, des2, k = 2)
|
266 |
matchesMask = [[0, 0] for i in range(len(matches))] #Python 3.x |
267 |
|
268 |
count = 0
|
269 |
# ratio test as per Lowe's paper
|
270 |
for i in range(len(matches)): |
271 |
if len(matches[i]) == 2: |
272 |
m = matches[i][0]
|
273 |
n = matches[i][1]
|
274 |
if m.distance < 0.85 * n.distance: |
275 |
count = count + 1
|
276 | |
277 |
matchCount = count |
278 | |
279 |
#print("match Count : " + str(matchCount))
|
280 |
return matchCount
|
281 | |
282 |
'''
|
283 |
@brief detect symbols on PID
|
284 |
@history humkyung 2018.06.08 add parameteres for signal
|
285 |
'''
|
286 |
def detectSymbolsOnPid(mainRes, targetSymbols, listWidget, updateProgressSignal): |
287 |
for detailTarget in targetSymbols: |
288 |
detectSymbolOnPid(mainRes, detailTarget, listWidget, updateProgressSignal) |
289 | |
290 |
'''
|
291 |
@brief detect symbol on PID
|
292 |
@author jwkim
|
293 |
@date
|
294 |
@history humkyung 2018.04.06 check if symbol file exists
|
295 |
Jeongwoo 2018.05.29 Change method to adjust detail symbol location with hit-rate. Not feature point count
|
296 |
Change parameter on add symbol part (mpCount → hitRate)
|
297 |
Remove unusing calculation (avg)
|
298 |
Jeongwoo 2018.06.27 Remove part to split P&ID image and for loop
|
299 |
humkyung 2018.07.07 return searched symbols
|
300 |
'''
|
301 |
def detectSymbolOnPid(mainRes, targetSymbol, listWidget, updateProgressSignal): |
302 |
global ocrCompletedSrc
|
303 |
global afterDenoising
|
304 |
global threadLock
|
305 |
global searchedSymbolList
|
306 |
global maxProgressValue
|
307 |
|
308 |
try:
|
309 |
symbolName = targetSymbol.getName() |
310 |
symbolType = targetSymbol.getType() |
311 |
symbolPath = targetSymbol.getPath() |
312 |
symbolThreshold = targetSymbol.getThreshold() |
313 |
symbolMinMatchCount = targetSymbol.getMinMatchCount() |
314 |
isDetectOnOrigin = targetSymbol.getIsDetectOnOrigin() |
315 |
symbolRotateCount = targetSymbol.getRotationCount() |
316 |
symbolOcrOption = targetSymbol.getOcrOption() |
317 |
isContainChild = targetSymbol.getIsContainChild() |
318 |
symbolOriginalPoint = targetSymbol.getOriginalPoint() |
319 |
symbolConnectionPoint = targetSymbol.getConnectionPoint() |
320 |
baseSymbol = targetSymbol.getBaseSymbol() |
321 |
additionalSymbol = targetSymbol.getAdditionalSymbol() |
322 |
isExceptDetect = targetSymbol.getIsExceptDetect() |
323 | |
324 |
# check if symbol file is target or not
|
325 |
if isExceptDetect == 1: |
326 |
item = QListWidgetItem('{} file is not target'.format(os.path.split(os.path.basename(symbolPath))[0])) |
327 |
item.setBackground(QColor('green'))
|
328 |
listWidget.addItem(item) |
329 |
return
|
330 | |
331 |
foundSymbolCount = 0
|
332 | |
333 |
# check if symbol file exists
|
334 |
if not os.path.isfile(symbolPath): |
335 |
item = QListWidgetItem('{} file not found'.format(os.path.split(os.path.basename(symbolPath))[0])) |
336 |
item.setBackground(QColor('red'))
|
337 |
listWidget.addItem(item) |
338 |
return
|
339 |
# up to here
|
340 | |
341 |
sym = cv2.imread(symbolPath, 1)
|
342 |
symGray = cvtGrayImage(sym) |
343 |
## TODO: 이진화 시켰을때 심볼이 검출되지 않음
|
344 |
## symGray = cv2.threshold(cvtGrayImage(sym), 127, 255, cv2.THRESH_BINARY)[1]
|
345 |
## cv2.imshow('symbol', symGray)
|
346 |
## cv2.waitKey(0)
|
347 |
sow, soh = symGray.shape[::-1] # symbol original w, h |
348 | |
349 |
offsetDrawingArea=[] |
350 |
appDocData = AppDocData.instance() |
351 |
area = appDocData.getArea('Drawing')
|
352 |
if area is not None: |
353 |
copiedBasePid = area.img.copy() |
354 |
offsetDrawingArea.append(area.x) |
355 |
offsetDrawingArea.append(area.y) |
356 |
else:
|
357 |
offsetDrawingArea.append(0)
|
358 |
offsetDrawingArea.append(0)
|
359 |
if isDetectOnOrigin == 1: |
360 |
copiedBasePid = appDocData.imgSrc.copy() |
361 |
else:
|
362 |
copiedBasePid = ocrCompletedSrc.copy() |
363 |
srcWidth, srcHeight = copiedBasePid.shape[::-1]
|
364 | |
365 |
roiItemSp = (0,0) |
366 |
roiItemEp = (srcWidth, srcHeight) |
367 |
roiItem = copiedBasePid |
368 | |
369 |
symbolRotatedAngle = 0
|
370 |
for rc in range(symbolRotateCount + 1): ## Rotation Count를 사용자 기준으로 받아서 1을 더한 후 사용 |
371 |
sw, sh = symGray.shape[::-1]
|
372 |
roiw = (roiItemEp[0] - roiItemSp[0]) |
373 |
roih = (roiItemEp[1] - roiItemSp[1]) |
374 | |
375 |
## Case : Bigger Symbol than Split ROI
|
376 |
if roiw < sw or roih < sh: |
377 |
symGray = cv2.rotate(symGray, cv2.ROTATE_90_COUNTERCLOCKWISE) |
378 |
symbolRotatedAngle = symbolRotatedAngle + 90
|
379 | |
380 |
if baseSymbol is not None and additionalSymbol is not None: |
381 |
additionalSymbol = getRotatedChildInfo(additionalSymbol) |
382 |
continue
|
383 |
|
384 |
## get Rotated Original Point
|
385 |
originalPoint = getCalculatedOriginalPoint(additionalSymbol, symbolOriginalPoint, symbolRotatedAngle, sw, sh, sow, soh) |
386 |
connectionPoint = getCalculatedConnectionPoint(symbolConnectionPoint, symbolRotatedAngle, sw, sh, sow, soh) |
387 | |
388 |
## Template Matching
|
389 |
tmRes = cv2.matchTemplate(roiItem, symGray, cv2.TM_CCOEFF_NORMED) |
390 |
loc = np.where(tmRes >= symbolThreshold) |
391 | |
392 |
for pt in zip(*loc[::-1]): |
393 |
mpCount = 0 # Match Point Count |
394 | |
395 |
roi = roiItem[pt[1]:pt[1]+sh, pt[0]:pt[0]+sw] |
396 | |
397 |
if symbolMinMatchCount > 0: |
398 |
mpCount = getMatchPointCount(roi, symGray) |
399 |
if not (mpCount >= symbolMinMatchCount): |
400 |
continue
|
401 | |
402 |
searchedItemSp = (roiItemSp[0]+pt[0] + round(offsetDrawingArea[0]), roiItemSp[1]+pt[1] + round(offsetDrawingArea[1])) |
403 |
|
404 |
overlapArea = 0
|
405 |
symbolIndex = -1
|
406 |
for i in range(len(searchedSymbolList)): |
407 |
'''
|
408 |
_pt = searchedSymbolList[i].getSp()
|
409 |
rect = QRectF(_pt[0], _pt[1], searchedSymbolList[i].getWidth(), searchedSymbolList[i].getHeight())
|
410 |
_rect = QRectF(searchedItemSp[0], searchedItemSp[1], sw, sh)
|
411 |
if rect.intersects(_rect):
|
412 |
intersect = rect.intersected(_rect)
|
413 |
overlapArea = intersect.width()*intersect.height()
|
414 |
if overlapArea > sw*sh*0.1:
|
415 |
symbolIndex = i
|
416 |
break
|
417 |
'''
|
418 |
overlapArea = contains(searchedSymbolList[i], searchedItemSp, sw, sh) |
419 |
if overlapArea > ACCEPT_OVERLAY_AREA:
|
420 |
categories = [appDocData.isEquipmentType(symbolType), appDocData.isEquipmentType(searchedSymbolList[i].getType())] |
421 |
if categories[0] == categories[1]: |
422 |
symbolIndex = i |
423 |
break
|
424 |
|
425 |
hitRate = tmRes[pt[1], pt[0]] |
426 | |
427 |
## 겹치는 영역이 기준값보다 작을 경우
|
428 |
if overlapArea <= ACCEPT_OVERLAY_AREA:
|
429 |
threadLock.acquire() |
430 |
foundSymbolCount = foundSymbolCount + 1
|
431 |
addSearchedSymbol(symbolName, symbolType |
432 |
, searchedItemSp, sw, sh, symbolThreshold, symbolMinMatchCount, hitRate, symbolRotatedAngle |
433 |
, isDetectOnOrigin, symbolRotateCount, symbolOcrOption, isContainChild |
434 |
, originalPoint, connectionPoint, baseSymbol, additionalSymbol,isExceptDetect) |
435 |
threadLock.release() |
436 |
else: ## 겹치는 영역이 기준값보다 클 경우 |
437 |
if symbolIndex != -1 and symbolIndex < len(searchedSymbolList): |
438 |
searchedSymbol = searchedSymbolList[symbolIndex] |
439 |
## 현재 심볼과 검출된 심볼이 같을 경우 Match Point가 더 높은 정보로 교체
|
440 |
if symbolName == searchedSymbol.getName():
|
441 |
symbolHitRate = searchedSymbol.getHitRate() |
442 |
if symbolHitRate < hitRate:
|
443 |
threadLock.acquire() |
444 |
searchedSymbolList[symbolIndex] = symbol.Symbol(symbolName, symbolType |
445 |
, searchedItemSp, sw, sh, symbolThreshold, symbolMinMatchCount, hitRate, symbolRotatedAngle |
446 |
, isDetectOnOrigin, symbolRotateCount, symbolOcrOption, isContainChild |
447 |
, ','.join(str(x) for x in originalPoint), '/'.join('{},{}'.format(x[0],x[1]) for x in connectionPoint), baseSymbol, additionalSymbol,isExceptDetect) |
448 |
## DEBUG
|
449 |
#print('//// {}:{}-{} ////'.format(symbolName, searchedItemSp, hitRate))
|
450 |
## up to here
|
451 | |
452 |
threadLock.release() |
453 |
## 현재 심볼과 검출된 심볼이 같지 않을 경우 (포함)
|
454 |
elif appDocData.isEquipmentType(searchedSymbol.getType()):
|
455 |
## DEBUG
|
456 |
print('{}->{}:{}-{}'.format(searchedSymbol.getName(), symbolName, searchedItemSp, hitRate))
|
457 |
## up to here
|
458 | |
459 |
threadLock.acquire() |
460 |
foundSymbolCount = foundSymbolCount + 1
|
461 |
addSearchedSymbol(symbolName, symbolType |
462 |
, searchedItemSp, sw, sh, symbolThreshold, hitRate, hitRate, symbolRotatedAngle |
463 |
, isDetectOnOrigin, symbolRotateCount, symbolOcrOption, isContainChild |
464 |
, originalPoint, connectionPoint, baseSymbol, additionalSymbol,isExceptDetect) |
465 |
threadLock.release() |
466 |
|
467 |
## Rotate Symbol
|
468 |
symGray = cv2.rotate(symGray, cv2.ROTATE_90_COUNTERCLOCKWISE) |
469 |
symbolRotatedAngle = symbolRotatedAngle + 90
|
470 | |
471 |
if additionalSymbol is not None: |
472 |
additionalSymbol = getRotatedChildInfo(additionalSymbol) |
473 | |
474 |
threadLock.acquire() |
475 |
listWidget.addItem('Found Symbol : ' + os.path.splitext(os.path.basename(symbolPath))[0] + ' - (' + str(foundSymbolCount) + ')') |
476 |
threadLock.release() |
477 | |
478 |
updateProgressSignal.emit(maxProgressValue) |
479 | |
480 |
return [symbol for symbol in searchedSymbolList if symbol.getName() == symbolName] |
481 |
except Exception as ex: |
482 |
print('error occured({}) in {}:{}'.format(ex, sys.exc_info()[-1].tb_frame.f_code.co_filename, sys.exc_info()[-1].tb_lineno)) |
483 |
|
484 |
return []
|
485 | |
486 |
'''
|
487 |
@brief detect nozzle
|
488 |
@author humkyung
|
489 |
@date 2018.07.07
|
490 |
@history humkyhung 2018.07.17 pass equpment as parameter instead of image
|
491 |
'''
|
492 |
def detectNozzleOnPid(equipment, nozzle, listWidget, updateProgressSignal): |
493 |
global src
|
494 |
global threadLock
|
495 |
global searchedSymbolList
|
496 |
global maxProgressValue
|
497 |
|
498 |
try:
|
499 |
symbolName = nozzle.getName() |
500 |
symbolType = nozzle.getType() |
501 |
symbolPath = nozzle.getPath() |
502 |
symbolThreshold = nozzle.getThreshold() |
503 |
symbolMinMatchCount = nozzle.getMinMatchCount() |
504 |
isDetectOnOrigin = nozzle.getIsDetectOnOrigin() |
505 |
symbolRotateCount = nozzle.getRotationCount() |
506 |
symbolOcrOption = nozzle.getOcrOption() |
507 |
isContainChild = nozzle.getIsContainChild() |
508 |
symbolOriginalPoint = nozzle.getOriginalPoint() |
509 |
symbolConnectionPoint = nozzle.getConnectionPoint() |
510 |
baseSymbol = nozzle.getBaseSymbol() |
511 |
additionalSymbol = nozzle.getAdditionalSymbol() |
512 |
isExceptDetect = nozzle.getIsExceptDetect() |
513 | |
514 |
foundSymbolCount = 0
|
515 | |
516 |
# check if symbol file exists
|
517 |
if not os.path.isfile(symbolPath): |
518 |
item = QListWidgetItem('{} file not found'.format(os.path.split(os.path.basename(symbolPath))[0])) |
519 |
item.setBackground(QColor('red'))
|
520 |
listWidget.addItem(item) |
521 |
return
|
522 |
# up to here
|
523 | |
524 |
symGray = cvtGrayImage(cv2.imread(symbolPath, 1))
|
525 |
sow, soh = symGray.shape[::-1] # symbol original w, h |
526 | |
527 |
# get image of equipment with offset of nozzle size
|
528 |
appDocData = AppDocData.instance() |
529 |
pt = equipment.getSp() |
530 |
nozzleSize = max(sow, soh)
|
531 |
sx = round(pt[0]) - nozzleSize |
532 |
sy = round(pt[1]) - nozzleSize |
533 |
ex = round(pt[0] + equipment.getWidth()) + nozzleSize |
534 |
ey = round(pt[1] + equipment.getHeight()) + nozzleSize |
535 |
offset = (sx, sy) |
536 |
eqpSize = (pt[0], pt[1], equipment.getWidth(), equipment.getHeight()) |
537 |
img = appDocData.imgSrc[sy:ey, sx:ex] |
538 |
srcWidth, srcHeight = img.shape[::-1]
|
539 |
# up to here
|
540 | |
541 |
roiItemSp = (0,0) |
542 |
roiItemEp = (srcWidth, srcHeight) |
543 |
roiItem = img |
544 | |
545 |
symbolAngle = 0
|
546 |
for rc in range(symbolRotateCount + 1): ## Rotation Count를 사용자 기준으로 받아서 1을 더한 후 사용 |
547 |
sw, sh = symGray.shape[::-1]
|
548 |
roiw = (roiItemEp[0] - roiItemSp[0]) |
549 |
roih = (roiItemEp[1] - roiItemSp[1]) |
550 | |
551 |
## get Rotated Original Point
|
552 |
originalPoint = getCalculatedOriginalPoint(additionalSymbol, symbolOriginalPoint, symbolAngle, sw, sh, sow, soh) |
553 |
connectionPoint = getCalculatedConnectionPoint(symbolConnectionPoint, symbolAngle, sw, sh, sow, soh) |
554 |
dx = connectionPoint[0][0] - originalPoint[0] |
555 |
dy = connectionPoint[0][1] - originalPoint[1] |
556 | |
557 |
## Template Matching
|
558 |
tmRes = cv2.matchTemplate(roiItem, symGray, cv2.TM_CCOEFF_NORMED) |
559 |
loc = np.where(tmRes >= symbolThreshold) |
560 | |
561 |
for pt in zip(*loc[::-1]): |
562 |
mpCount = 0 # Match Point Count |
563 |
symbolIndex = -1
|
564 | |
565 |
roi = roiItem[pt[1]:pt[1]+sh, pt[0]:pt[0]+sw] |
566 | |
567 |
if symbolMinMatchCount > 0: |
568 |
mpCount = getMatchPointCount(roi, symGray) |
569 |
if not (mpCount >= symbolMinMatchCount): |
570 |
continue
|
571 |
|
572 |
mid = (offset[0] + pt[0] + (originalPoint[0] + connectionPoint[0][0])*0.5, offset[1] + pt[1] + (originalPoint[1] + connectionPoint[0][1])*0.5) |
573 |
searchedItemSp = (roiItemSp[0]+pt[0]+offset[0], roiItemSp[1]+pt[1]+offset[1]) |
574 |
# check searched nozzle location
|
575 |
if abs(dx) > abs(dy): |
576 |
if dx > 0: |
577 |
if mid[0] < eqpSize[0] + eqpSize[2]*0.5: continue |
578 |
else:
|
579 |
if mid[0] > eqpSize[0] + eqpSize[2]*0.5: continue |
580 |
else:
|
581 |
if dy > 0: |
582 |
if mid[1] < eqpSize[1] + eqpSize[3]*0.5: continue |
583 |
else:
|
584 |
if mid[1] > eqpSize[1] + eqpSize[3]*0.5: continue |
585 |
# up to here
|
586 | |
587 |
overlapArea = 0
|
588 |
nozzles = [symbol for symbol in searchedSymbolList if symbol.getType() == 'Nozzles'] |
589 |
for i in range(len(nozzles)): |
590 |
_pt = nozzles[i].getSp() |
591 |
rect = QRectF(_pt[0], _pt[1], nozzles[i].getWidth(), nozzles[i].getHeight()) |
592 |
_rect = QRectF(searchedItemSp[0], searchedItemSp[1], sw, sh) |
593 |
if rect.intersects(_rect):
|
594 |
intersect = rect.intersected(_rect) |
595 |
overlapArea = intersect.width()*intersect.height() |
596 |
if overlapArea > ACCEPT_OVERLAY_AREA:
|
597 |
symbolIndex = i |
598 |
break
|
599 |
|
600 |
hitRate = tmRes[pt[1], pt[0]] |
601 | |
602 |
## 겹치는 영역이 기준값보다 작을 경우
|
603 |
if overlapArea <= ACCEPT_OVERLAY_AREA:
|
604 |
threadLock.acquire() |
605 |
foundSymbolCount = foundSymbolCount + 1
|
606 |
searched = addSearchedSymbol(symbolName, symbolType |
607 |
, searchedItemSp, sw, sh, symbolThreshold, symbolMinMatchCount, hitRate, symbolAngle |
608 |
, isDetectOnOrigin, symbolRotateCount, symbolOcrOption, isContainChild |
609 |
, originalPoint, connectionPoint, baseSymbol, additionalSymbol,isExceptDetect) |
610 |
searched.owner = equipment |
611 |
threadLock.release() |
612 |
## 겹치는 영역이 기준값보다 클 경우
|
613 |
else:
|
614 |
if symbolIndex != -1 and symbolIndex < len(nozzles): |
615 |
searchedSymbol = nozzles[symbolIndex] |
616 |
## 현재 심볼과 검출된 심볼이 같을 경우 Match Point가 더 높은 정보로 교체
|
617 |
if symbolName == searchedSymbol.getName():
|
618 |
symbolHitRate = searchedSymbol.getHitRate() |
619 |
if symbolHitRate < hitRate:
|
620 |
threadLock.acquire() |
621 |
nozzles[symbolIndex] = symbol.Symbol(symbolName, symbolType |
622 |
, searchedItemSp, sw, sh, symbolThreshold, symbolMinMatchCount, hitRate, symbolAngle |
623 |
, isDetectOnOrigin, symbolRotateCount, symbolOcrOption, isContainChild |
624 |
, ','.join(str(x) for x in originalPoint), '/'.join('{},{}'.format(x[0],x[1]) for x in connectionPoint), baseSymbol, additionalSymbol,isExceptDetect) |
625 |
threadLock.release() |
626 |
## 현재 심볼과 검출된 심볼이 같지 않을 경우 (포함)
|
627 |
elif appDocData.isEquipmentType(searchedSymbol.getType()):
|
628 |
threadLock.acquire() |
629 |
foundSymbolCount = foundSymbolCount + 1
|
630 |
searched = addSearchedSymbol(symbolName, symbolType |
631 |
, searchedItemSp, sw, sh, symbolThreshold, hitRate, hitRate, symbolAngle |
632 |
, isDetectOnOrigin, symbolRotateCount, symbolOcrOption, isContainChild |
633 |
, originalPoint, connectionPoint, baseSymbol, additionalSymbol,isExceptDetect) |
634 |
searched.owner = equipment |
635 |
threadLock.release() |
636 |
|
637 |
## Rotate Symbol
|
638 |
symGray = cv2.rotate(symGray, cv2.ROTATE_90_COUNTERCLOCKWISE) |
639 |
symbolAngle = symbolAngle + 90
|
640 | |
641 |
if additionalSymbol is not None: |
642 |
additionalSymbol = getRotatedChildInfo(additionalSymbol) |
643 | |
644 |
threadLock.acquire() |
645 |
listWidget.addItem('Found Symbol : ' + os.path.splitext(os.path.basename(symbolPath))[0] + ' - (' + str(foundSymbolCount) + ')') |
646 |
threadLock.release() |
647 | |
648 |
updateProgressSignal.emit(maxProgressValue) |
649 | |
650 |
return [symbol for symbol in searchedSymbolList if symbol.getName() == symbolName] |
651 |
except Exception as ex: |
652 |
print('error occured({}) in {}:{}'.format(ex, sys.exc_info()[-1].tb_frame.f_code.co_filename, sys.exc_info()[-1].tb_lineno)) |
653 | |
654 |
return []
|
655 | |
656 |
'''
|
657 |
@brief detect equipment
|
658 |
@author humkyung
|
659 |
@date 2018.07.07
|
660 |
'''
|
661 |
def detectEquipmentOnPid(mainRes, targetSymbol, listWidget, updateProgressSignal): |
662 |
try:
|
663 |
equipments = detectSymbolOnPid(mainRes, targetSymbol, listWidget, updateProgressSignal) |
664 |
for equipment in equipments: |
665 |
# detect nozzles around equimpent
|
666 |
for nozzle in targetSymbolList[1]: |
667 |
detectNozzleOnPid(equipment, nozzle, listWidget, updateProgressSignal) |
668 |
# up to here
|
669 |
except Exception as ex: |
670 |
print('error occured({}) in {}:{}'.format(ex, sys.exc_info()[-1].tb_frame.f_code.co_filename, sys.exc_info()[-1].tb_lineno)) |
671 |
|
672 |
'''
|
673 |
@history 2018.05.17 Jeongwoo Bitwise_not target changed (Original Image → Symbol Image)
|
674 |
humkyung 2018.07.11 add parameter for image
|
675 |
'''
|
676 |
def removeDetectedSymbol(sym, imgSrc): |
677 |
global ocrCompletedSrc
|
678 |
global threadLock
|
679 |
|
680 |
path = sym.getPath() |
681 |
sp = sym.getSp() |
682 |
sw = sym.getWidth() |
683 |
sh = sym.getHeight() |
684 |
angle = sym.getRotatedAngle() |
685 |
symImg = cv2.imread(path) |
686 |
symImg = cv2.threshold(cvtGrayImage(symImg), 127, 255, cv2.THRESH_BINARY)[1] |
687 |
|
688 |
for i in range(angle//90): |
689 |
symImg = cv2.rotate(symImg, cv2.ROTATE_90_COUNTERCLOCKWISE) |
690 | |
691 |
threadLock.acquire() |
692 |
temp = [] |
693 |
temp = imgSrc[sp[1]:sp[1]+sh, sp[0]:sp[0]+sw] |
694 |
symImgBin = cv2.bitwise_not(symImg) |
695 |
result = cv2.bitwise_xor(symImgBin, temp) |
696 |
result = cv2.dilate(result, np.ones((5, 5), np.uint8)) |
697 |
imgSrc[sp[1]:sp[1]+sh, sp[0]:sp[0]+sw] = result |
698 |
threadLock.release() |
699 | |
700 |
'''
|
701 |
@history 2018.04.27 Jeongwoo Remove Tesseract Log on listWidget
|
702 |
2018.05.04 Jeongwoo Change method to OCR with tesseract_ocr_module.py
|
703 |
2018.05.09 Jeongwoo Add global variable textInfoList, Remove text in symbol and Add tesseract result text
|
704 |
2018.05.10 Jeongwoo Remove not used if-statement
|
705 |
2018.06.19 Jeongwoo When detect text in symbol, use getTextAreaInfo() and Tesseract
|
706 |
2018.06.21 Jeongwoo Add if-statement for way to detect text by Type A
|
707 |
'''
|
708 |
def drawFoundSymbols(symbol, listWidget): |
709 |
global src
|
710 |
global canvas
|
711 |
global WHITE_LIST_CHARS
|
712 |
global searchedSymbolList
|
713 |
global textInfoList
|
714 | |
715 |
#symbolId = symbol.getId()
|
716 |
symbolPath = symbol.getPath() |
717 |
symbolSp = symbol.getSp() |
718 |
symbolWidth = symbol.getWidth() |
719 |
symbolHeight = symbol.getHeight() |
720 |
symbolRotatedAngle = symbol.getRotatedAngle() |
721 |
symbolOcrOption = symbol.getOcrOption() |
722 | |
723 |
symImg = cv2.cvtColor(cv2.imread(symbolPath, 1), cv2.COLOR_BGR2GRAY)
|
724 |
for i in range(symbolRotatedAngle//90): |
725 |
symImg = cv2.rotate(symImg, cv2.ROTATE_90_COUNTERCLOCKWISE) |
726 | |
727 |
w, h = symImg.shape[::-1]
|
728 |
canvas[symbolSp[1]:symbolSp[1]+h, symbolSp[0]:symbolSp[0]+w] = cv2.bitwise_and(canvas[symbolSp[1]:symbolSp[1]+h, symbolSp[0]:symbolSp[0]+w], symImg) |
729 |
|
730 |
'''
|
731 |
@brief draw found symbols and texts
|
732 |
@author Jeongwoo
|
733 |
'''
|
734 |
def drawFoundSymbolsOnCanvas(drawingPath , textInfos , listWidget): |
735 |
global src
|
736 |
global ocrCompletedSrc
|
737 |
global searchedSymbolList
|
738 |
global canvas
|
739 | |
740 |
appDocData = AppDocData.instance() |
741 |
canvas = np.zeros(appDocData.imgSrc.shape, np.uint8) |
742 |
canvas[::] = 255
|
743 | |
744 |
try:
|
745 |
appDocData = AppDocData.instance() |
746 |
project = appDocData.getCurrentProject() |
747 | |
748 |
for symbol in searchedSymbolList: |
749 |
drawFoundSymbols(symbol, listWidget) |
750 | |
751 |
for text in textInfos: |
752 |
left = text.getX() |
753 |
top = text.getY() |
754 |
right = text.getX() + text.getW() |
755 |
bottom = text.getY() + text.getH() |
756 | |
757 |
canvas[top:bottom, left:right] = appDocData.imgSrc[top:bottom, left:right] |
758 | |
759 |
cv2.imwrite(os.path.join(project.getTempPath(), "FOUND_" + os.path.basename(drawingPath)), canvas)
|
760 |
except Exception as ex: |
761 |
print('error occured({}) in {}:{}'.format(ex, sys.exc_info()[-1].tb_frame.f_code.co_filename, sys.exc_info()[-1].tb_lineno)) |
762 |
|
763 |
'''
|
764 |
@history 2018.04.24 Jeongwoo Add isExceptDetect Field
|
765 |
2018.05.09 Jeongwoo Add targetSymbolList clear
|
766 |
humkyung 2018.07.07 store symbols to list as like [equipments],[nozzles],[symbols]
|
767 |
'''
|
768 |
def initTargetSymbolDataList(): |
769 |
global targetSymbolList
|
770 | |
771 |
targetSymbolList.clear() |
772 |
appDocData = AppDocData.instance() |
773 |
symbolList = appDocData.getTargetSymbolList() |
774 |
equipments = [item for item in symbolList if appDocData.getSymbolCategoryByType(item.getType()) == 'Equipment'] |
775 |
nozzles = [item for item in symbolList if item.getType() == 'Nozzles'] |
776 |
# [[equipments],[nozzles],[symbols]]
|
777 |
targetSymbolList.append(equipments) |
778 |
targetSymbolList.append(nozzles) |
779 |
targetSymbolList.append([item for item in symbolList if item not in equipments and item not in nozzles]) |
780 | |
781 |
return targetSymbolList
|
782 | |
783 |
'''
|
784 |
@brief remove small objects from given image
|
785 |
@author humkyung
|
786 |
@date 2018.04.26
|
787 |
@history 2018.05.25 Jeongwoo Moved from MainWindow
|
788 |
'''
|
789 |
def removeSmallObjects(image): |
790 |
try:
|
791 |
appDocData = AppDocData.instance() |
792 |
configs = appDocData.getConfigs('Small Object Size', 'Min Area') |
793 |
minArea = int(configs[0].value) if 1 == len(configs) else 20 |
794 |
configs = appDocData.getConfigs('Small Object Size', 'Max Area') |
795 |
maxArea = int(configs[0].value) if 1 == len(configs) else 50 |
796 | |
797 |
_,contours,_ = cv2.findContours(image, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE); |
798 |
selectedContours=[] |
799 |
for contour in contours: |
800 |
area = cv2.contourArea(contour) |
801 |
if area > minArea and area < maxArea: selectedContours.append(contour) |
802 |
contourImage = cv2.drawContours(image, selectedContours, -1, (255,255,255), -1); # draw contour with white color |
803 |
except Exception as ex: |
804 |
print('error occured({}) in {}:{}'.format(ex, sys.exc_info()[-1].tb_frame.f_code.co_filename, sys.exc_info()[-1].tb_lineno)) |
805 | |
806 |
return contourImage
|
807 | |
808 |
'''
|
809 |
@history 2018.05.25 Jeongwoo Moved from MainWindow
|
810 |
2018.05.28 Jeongwoo Add xmlPath Parameter and append LineInfo into xml
|
811 |
2018.05.29 Jeongwoo Change method to add item
|
812 |
2018.05.30 Jeongwoo Remove parameter (xmlPath)
|
813 |
humkyung 2018.06.11 add drawing path to parameter and write recognized lines to image
|
814 |
humkyung 2018.07.04 call arrangeLinePosition after creating line
|
815 |
'''
|
816 |
def recognizeLine(path, listWidget, graphicsView): |
817 |
from shapely.geometry import Point, LineString |
818 |
from SymbolSvgItem import SymbolSvgItem |
819 |
from QEngineeringFlowArrowItem import QEngineeringFlowArrowItem |
820 |
from QEngineeringLineNoTextItem import QEngineeringLineNoTextItem |
821 |
from QEngineeringTextItem import QEngineeringTextItem |
822 |
from EngineeringLineItem import QEngineeringLineItem |
823 |
from LineDetector import LineDetector |
824 | |
825 |
try:
|
826 |
listWidget.addItem('Starting line recognization')
|
827 | |
828 |
#remove already existing line and flow arrow item
|
829 |
items = [item for item in graphicsView.scene.items() if (type(item) is QEngineeringLineItem) or (type(item) is QEngineeringFlowArrowItem)] |
830 |
for item in items: |
831 |
graphicsView.scene.removeItem(item) |
832 |
#up to here
|
833 | |
834 |
# detect line
|
835 |
connectedLines = [] |
836 | |
837 |
area = AppDocData.instance().getArea('Drawing')
|
838 |
area.img = removeSmallObjects(area.img) |
839 |
detector = LineDetector(area.img) |
840 | |
841 |
symbols = [] |
842 |
for item in graphicsView.scene.items(): |
843 |
if issubclass(type(item), SymbolSvgItem): |
844 |
symbols.append(item) |
845 |
res = detector.detectConnectedLine(item, round(area.x), round(area.y)) |
846 |
if res is not None: |
847 |
connectedLines.extend(res) |
848 | |
849 |
listWidget.addItem('Connecting lines')
|
850 |
if len(connectedLines) > 1: |
851 |
detector.mergeLines(connectedLines, toler=5)
|
852 |
# connect line to symbol
|
853 |
try:
|
854 |
for line in connectedLines: |
855 |
matches = [symbol for symbol in symbols if symbol.isConnectable(line, (round(area.x), round(area.y)), toler=20)] |
856 |
for symbol in matches: |
857 |
detector.connectLineToSymbol(line, (round(area.x), round(area.y)), symbol) |
858 |
except Exception as ex: |
859 |
print('error occured({}) in {}:{}'.format(ex, sys.exc_info()[-1].tb_frame.f_code.co_filename, sys.exc_info()[-1].tb_lineno)) |
860 |
# up to here
|
861 | |
862 |
# connect line to line
|
863 |
toler = 10
|
864 |
try:
|
865 |
for line in connectedLines: |
866 |
matches = [it for it in connectedLines if (it is not line) and (not detector.isParallel(line, it))] |
867 | |
868 |
# get closest line
|
869 |
selected = [] |
870 |
shapelyLine = LineString(line) |
871 |
for match in matches: |
872 |
dist = [shapelyLine.distance(Point(match[0][0], match[0][1])),shapelyLine.distance(Point(match[1][0], match[1][1]))] |
873 |
if dist[0] < toler or dist[1] < toler: |
874 |
selected.append(match) |
875 |
# up to here
|
876 | |
877 |
for match in selected: |
878 |
detector.connectLineToLine(match, line, toler) |
879 |
except Exception as ex: |
880 |
print('error occured({}) in {}:{}'.format(ex, sys.exc_info()[-1].tb_frame.f_code.co_filename, sys.exc_info()[-1].tb_lineno)) |
881 |
# up to here
|
882 | |
883 |
lines = [] |
884 |
for pts in connectedLines: |
885 |
processLine = QEngineeringLineItem(vertices=[(area.x + param[0], area.y + param[1]) for param in pts]) |
886 |
graphicsView.scene.addItem(processLine) |
887 |
lines.append(processLine) |
888 | |
889 |
if processLine.length() > 100: # TODO: check critical length |
890 |
processLine.addFlowArrow() |
891 |
|
892 |
# re-order process line's start,end according to flow mark
|
893 |
arrangeLinePosition(lines, symbols, listWidget) |
894 |
# up to here
|
895 |
except Exception as ex: |
896 |
print('error occured({}) in {}:{}'.format(ex, sys.exc_info()[-1].tb_frame.f_code.co_filename, sys.exc_info()[-1].tb_lineno)) |
897 |
finally:
|
898 |
listWidget.addItem('Finished line recognization')
|
899 |
|
900 |
'''
|
901 |
@brief arrange line's position
|
902 |
@author humkyung
|
903 |
@date 2018.07.04
|
904 |
'''
|
905 |
def arrangeLinePosition(lines, symbols, listWidget): |
906 |
try:
|
907 |
listWidget.addItem('Apply flow direction')
|
908 |
pool = [line for line in lines if line.flowMark is not None] |
909 |
visited = [] |
910 |
visited.extend(pool) |
911 |
while len(pool) > 0: |
912 |
line = pool.pop() |
913 |
print('{} - ({})'.format(line, len(pool))) |
914 |
rhs = [item for item in lines if item not in visited and item.isJointed(line)] |
915 |
if rhs:
|
916 |
pool.extend(rhs) |
917 |
visited.extend(rhs) |
918 |
for item in rhs: |
919 |
item.arrangeVertexOrder(line) |
920 |
|
921 |
# skip jointed symbols
|
922 |
symbolPool = [item for item in symbols if item not in visited and item.isJointed(line)] |
923 |
if symbolPool:
|
924 |
selected = [] |
925 |
visited.extend(symbolPool) |
926 |
while len(symbolPool) > 0: |
927 |
symbol = symbolPool.pop() |
928 | |
929 |
rhs = [item for item in symbols if item not in visited and item.isJointed(symbol)] |
930 |
if rhs:
|
931 |
symbolPool.extend(rhs) |
932 |
visited.extend(rhs) |
933 |
selected.extend(rhs) |
934 |
else:
|
935 |
selected.append(symbol) |
936 | |
937 |
# find lines which are connected last symbol
|
938 |
for symbol in selected: |
939 |
rhs = [item for item in lines if item not in visited and item.isJointed(symbol)] |
940 |
if rhs:
|
941 |
pool.extend(rhs) |
942 |
visited.extend(rhs) |
943 |
for item in rhs: |
944 |
item.arrangeVertexOrder(line) |
945 |
# up to here
|
946 |
# up to here
|
947 |
except Exception as ex: |
948 |
print('error occured({}) in {}:{}'.format(ex, sys.exc_info()[-1].tb_frame.f_code.co_filename, sys.exc_info()[-1].tb_lineno)) |
949 | |
950 |
'''
|
951 |
@brief Main function
|
952 |
@author Jeongwoo
|
953 |
@date
|
954 |
@history humkyung 2018.04.06 change error display from message box to print
|
955 |
Jeongwoo 2018.04.25 Remove 'Current Symbol : ' QListItem
|
956 |
Jeongwoo 2018.05.09 Make Comments OCR.removeTextFromNpArray block
|
957 |
Jeongwoo 2018.05.25 Remove imgLineList variable and parameter on writeXml()
|
958 |
humkyung 2018.05.26 add parameters(graphicsView, isSymbolTextChecked, isLineChecked)
|
959 |
Jeongwoo 2018.05.28 Add/Remove Parameters(Add : signal / Remove : graphicsView, isLineChecked)
|
960 |
Jeongwoo 2018.05.30 Remove return value
|
961 |
humkyung 2018.06.08 add signal for progressbar to parameter
|
962 |
humkyung 2018.06.11 get difference between original and recognized image
|
963 |
Jeongwoo 2018.06.21 If noteTextInfoList is None, change from None to empty list
|
964 |
'''
|
965 |
def executeRecognition(signal, updateProgressSignal, path, listWidget, isSymbolTextChecked): |
966 |
import re |
967 |
from TextDetector import TextDetector |
968 | |
969 |
global ocrCompletedSrc
|
970 |
global searchedSymbolList
|
971 |
global threadLock
|
972 |
global textInfoList
|
973 |
global noteTextInfoList
|
974 |
global maxProgressValue
|
975 |
|
976 |
try:
|
977 |
appDocData = AppDocData.instance() |
978 |
project = appDocData.getCurrentProject() |
979 |
textDetector = TextDetector() |
980 | |
981 |
srcList = [] |
982 |
srcList.append(path) |
983 | |
984 |
initTargetSymbolDataList() |
985 | |
986 |
for mainRes in srcList: |
987 |
ocrCompletedSrc = [] |
988 |
searchedSymbolList = [] |
989 |
textInfoList = [] |
990 | |
991 |
if not os.path.isfile(mainRes): |
992 |
item = QListWidgetItem('{} file is not found'.format(os.path.basename(mainRes)))
|
993 |
item.setBackground(Qt.red) |
994 |
listWidget.addItem(item) |
995 |
continue
|
996 | |
997 |
# remove equipment desc. area
|
998 |
configs = appDocData.getConfigs('{} Equipment Desc Area'.format(appDocData.imgName))
|
999 |
for config in configs: |
1000 |
found = re.findall('\d+', config.value)
|
1001 |
if len(found) == 4: |
1002 |
cv2.rectangle(appDocData.imgSrc, (int(found[0]), int(found[1])), (int(found[0])+int(found[2]), int(found[1])+int(found[3])), 255, -1) |
1003 |
# up to here
|
1004 |
|
1005 |
# acquire note image and remove from imgSrc
|
1006 |
noteArea = appDocData.getArea('Note')
|
1007 |
if noteArea is not None: |
1008 |
noteArea.img = appDocData.imgSrc[round(noteArea.y):round(noteArea.y+noteArea.height), round(noteArea.x):round(noteArea.x+noteArea.width)].copy() |
1009 |
cv2.rectangle(appDocData.imgSrc, (round(noteArea.x), round(noteArea.y)), (round(noteArea.x + noteArea.width), round(noteArea.y + noteArea.height)), 255, -1) |
1010 |
# up to here
|
1011 | |
1012 |
area = appDocData.getArea('Drawing')
|
1013 |
if area is not None: |
1014 |
area.img = appDocData.imgSrc[round(area.y):round(area.y+area.height), round(area.x):round(area.x+area.width)] |
1015 | |
1016 |
listWidget.addItem("Start recognition : " + mainRes)
|
1017 | |
1018 |
if isSymbolTextChecked:
|
1019 |
threadLock.acquire() |
1020 |
offset = (area.x, area.y) if area is not None else (0,0) |
1021 |
textAreas = textDetector.detectTextAreas(area.img if area is not None else appDocData.imgSrc, offset) |
1022 |
### calculate total count of symbol
|
1023 |
maxProgressValue = len(textAreas) + 1 |
1024 |
for targetItem in targetSymbolList: |
1025 |
if type(targetItem) is list: |
1026 |
maxProgressValue += len(targetItem)
|
1027 |
else:
|
1028 |
maxProgressValue += 1
|
1029 |
### up to here
|
1030 |
threadLock.release() |
1031 | |
1032 |
# detect equipments
|
1033 |
pool = futures.ThreadPoolExecutor(max_workers = THREAD_MAX_WORKER) |
1034 |
for symbol in targetSymbolList[0]: |
1035 |
pool.submit(detectEquipmentOnPid, mainRes, symbol, listWidget, updateProgressSignal) |
1036 |
pool.shutdown(wait = True)
|
1037 |
# up to here
|
1038 | |
1039 |
pool = futures.ThreadPoolExecutor(max_workers = THREAD_MAX_WORKER) |
1040 |
for symbol in targetSymbolList[2]: |
1041 |
if type(symbol) is list: |
1042 |
pool.submit(detectSymbolsOnPid, mainRes, symbol, listWidget, updateProgressSignal) |
1043 |
else:
|
1044 |
pool.submit(detectSymbolOnPid, mainRes, symbol, listWidget, updateProgressSignal) |
1045 |
pool.shutdown(wait = True)
|
1046 | |
1047 |
## DEBUG
|
1048 |
for item in searchedSymbolList: |
1049 |
_img = appDocData.imgSrc[round(item.getSp()[1]):round(item.getSp()[1]+item.getHeight()), round(item.getSp()[0]):round(item.getSp()[0]+item.getWidth())] |
1050 |
cv2.imwrite(os.path.join(project.getTempPath(), 'Tile', item.getName()+'.png'), _img) |
1051 |
## up to here
|
1052 | |
1053 |
textDetector.recognizeText(appDocData.imgSrc, offset, textAreas, searchedSymbolList, updateProgressSignal, listWidget, maxProgressValue) |
1054 |
textInfoList = textDetector.textInfoList.copy() if textDetector.textInfoList is not None else None |
1055 |
noteTextInfoList = textDetector.noteTextInfoList.copy() if textDetector.noteTextInfoList is not None else None |
1056 | |
1057 |
appDocData.imgWidth, appDocData.imgHeight = appDocData.imgSrc.shape[::-1]
|
1058 |
drawFoundSymbolsOnCanvas(mainRes, textInfoList, listWidget) |
1059 |
|
1060 |
# remove text from image
|
1061 |
textDetector.removeTextFromImage(appDocData.imgSrc, offset) |
1062 |
# up to here
|
1063 |
|
1064 |
appDocData.imgName = os.path.splitext(os.path.basename(mainRes))[0]
|
1065 | |
1066 |
pool = futures.ThreadPoolExecutor(max_workers = THREAD_MAX_WORKER) |
1067 |
for sym in searchedSymbolList: |
1068 |
pool.submit(removeDetectedSymbol, sym, appDocData.imgSrc) |
1069 |
pool.shutdown(wait = True)
|
1070 | |
1071 |
## Remove Noise
|
1072 |
kernel1 = np.ones((2, 2), np.uint8) |
1073 |
appDocData.imgSrc = cv2.dilate(appDocData.imgSrc, kernel1) |
1074 |
appDocData.imgSrc = cv2.erode(appDocData.imgSrc, kernel1) |
1075 | |
1076 |
removedSymbolImgPath = os.path.join(project.getTempPath(), os.path.basename(path)) |
1077 |
cv2.imwrite(removedSymbolImgPath, appDocData.imgSrc) |
1078 | |
1079 |
area = AppDocData.instance().getArea('Drawing')
|
1080 |
if area is not None: |
1081 |
area.img = appDocData.imgSrc[round(area.y+1):round(area.y+area.height), round(area.x+1):round(area.x+area.width)] |
1082 |
cv2.imwrite(os.path.join(project.getTempPath(), "RECT_" + os.path.basename(path)), appDocData.imgSrc)
|
1083 | |
1084 |
listWidget.addItem("Recognized symbol count : " + str(len(searchedSymbolList))) |
1085 |
|
1086 |
# get difference between original and recognized image
|
1087 |
foundFilePath = os.path.join(project.getTempPath(), "FOUND_" + os.path.basename(path))
|
1088 |
getDifference(path, foundFilePath) |
1089 |
# up to here
|
1090 |
|
1091 |
signal.emit(searchedSymbolList, textInfoList, noteTextInfoList if noteTextInfoList is not None else []) |
1092 |
except Exception as ex: |
1093 |
print('error occured({}) in {}:{}'.format(ex, sys.exc_info()[-1].tb_frame.f_code.co_filename, sys.exc_info()[-1].tb_lineno)) |
1094 |
|
1095 |
'''
|
1096 |
@brief draw contour to image
|
1097 |
@author humkyung
|
1098 |
@date 2018.06.18
|
1099 |
'''
|
1100 |
def drawContour(img, contour): |
1101 |
area = cv2.contourArea(contour, True)
|
1102 |
if area >= 0: |
1103 |
cv2.drawContours(img, [contour], -1, (0,0,0), -1) |
1104 |
cv2.drawContours(img, [contour], -1, (255,255,255), 1) |
1105 |
else:
|
1106 |
cv2.drawContours(img, [contour], -1, (255,255,255), -1) |
1107 |
|
1108 |
'''
|
1109 |
@brief get difference between given original and recognized image
|
1110 |
@author humkyung
|
1111 |
@date 2018.06.11
|
1112 |
'''
|
1113 |
def getDifference(orgImagePath, recImagePath): |
1114 |
import re |
1115 | |
1116 |
global ocrCompletedSrc
|
1117 |
global textInfoList
|
1118 |
global noteTextInfoList
|
1119 | |
1120 |
try:
|
1121 |
appDocData = AppDocData.instance() |
1122 |
if os.path.isfile(orgImagePath) and os.path.isfile(recImagePath): |
1123 |
imgOriginal = cv2.threshold(cvtGrayImage(cv2.imread(orgImagePath, 1)), 127, 255, cv2.THRESH_BINARY)[1] |
1124 |
# remove equipment desc. area
|
1125 |
configs = appDocData.getConfigs('{} Equipment Desc Area'.format(appDocData.imgName))
|
1126 |
for config in configs: |
1127 |
found = re.findall('\d+', config.value)
|
1128 |
if len(found) == 4: |
1129 |
cv2.rectangle(imgOriginal, (int(found[0]), int(found[1])), (int(found[0])+int(found[2]), int(found[1])+int(found[3])), 255, -1) |
1130 |
# up to here
|
1131 | |
1132 |
imgRecognized = cv2.threshold(cvtGrayImage(cv2.imread(recImagePath, 1)), 127, 255, cv2.THRESH_BINARY)[1] |
1133 | |
1134 |
imgDiff = np.ones(imgOriginal.shape, np.uint8)*255
|
1135 | |
1136 |
area = AppDocData.instance().getArea('Drawing')
|
1137 |
if area is not None: |
1138 |
x = round(area.x)
|
1139 |
y = round(area.y)
|
1140 |
width = round(area.width)
|
1141 |
height = round(area.height)
|
1142 |
imgNotOper = cv2.bitwise_not(imgRecognized[y:y+height, x:x+width]) |
1143 |
imgDiff[y:y+height, x:x+width] = cv2.bitwise_xor(imgOriginal[y:y+height, x:x+width], imgNotOper) |
1144 |
|
1145 |
# remove noise
|
1146 |
imgDiff = cv2.dilate(imgDiff, np.ones((2, 2), np.uint8)) |
1147 | |
1148 |
appDocData = AppDocData.instance() |
1149 |
project = appDocData.getCurrentProject() |
1150 |
cv2.imwrite(os.path.join(project.getTempPath(), "DIFF_" + os.path.basename(orgImagePath)), imgDiff)
|
1151 |
except Exception as ex: |
1152 |
print('error occured({}) in {}:{}'.format(ex, sys.exc_info()[-1].tb_frame.f_code.co_filename, sys.exc_info()[-1].tb_lineno)) |
1153 |
|
1154 |
if __name__ == '__main__': |
1155 |
import DTI_PID_UI |
1156 |
from ProjectDialog import Ui_Dialog |
1157 |
import timeit |
1158 |
from PyQt5.QtCore import QRect |
1159 |
from operator import itemgetter, attrgetter |
1160 | |
1161 |
start = timeit.default_timer() |
1162 |
img = cv2.imread('D:/Visual Studio Project/DTIPID/DTIPID/DTI_PID/DTI_PID/res/Result/PC-K/PC-K-2203_P1_800DPI.png', 1) |
1163 |
imgGray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) |
1164 |
contourImg = np.ones(imgGray.shape, np.uint8)*255
|
1165 |
contourOcrImg = contourImg.copy() |
1166 |
binaryImg,mask = cv2.threshold(imgGray, 0, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU) |
1167 |
imgFinal = cv2.bitwise_and(imgGray, imgGray, mask = mask) |
1168 |
ret, newImg = cv2.threshold(imgFinal, 0, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU) |
1169 | |
1170 |
image, contours, hierarchy = cv2.findContours(mask, cv2.RETR_TREE, cv2.CHAIN_APPROX_NONE) |
1171 |
#holes = [contours[i] for i in range(len(contours)) if hierarchy[i][3] >= 0]
|
1172 |
for contour in contours: |
1173 |
area = cv2.contourArea(contour, True)
|
1174 |
if area >= 0: |
1175 |
[x, y, w, h] = cv2.boundingRect(contour) |
1176 | |
1177 |
# remove too big or small one
|
1178 |
if (w > 100 or h > 100) or (w < 5 or h < 5): continue |
1179 | |
1180 |
cv2.drawContours(contourImg, [contour], -1, (0,0,0), 1) |
1181 |
cv2.drawContours(contourOcrImg, [contour], -1, (0,0,0), -1) |
1182 |
else:
|
1183 |
cv2.drawContours(contourOcrImg, [contour], -1, (255,255,255), -1) |
1184 | |
1185 |
''' contourImg = cv2.bitwise_not(contourImg) circles = cv2.HoughCircles(contourImg, cv2.HOUGH_GRADIENT, 1, 100) circles = np.uint16(np.around(circles))
|
1186 |
for i in circles[0,:]:
|
1187 |
cv2.circle(contourImg, (i[0], i[1]), i[2], (255,255,0), 1)
|
1188 |
'''
|
1189 | |
1190 |
rects = [] |
1191 |
kernel = cv2.getStructuringElement(cv2.MORPH_CROSS, (8, 8)) |
1192 |
#kernel1 = cv2.getStructuringElement(cv2.MORPH_CROSS, (2,2))
|
1193 |
#eroded = cv2.dilate(contourImg, kernel1)
|
1194 |
#cv2.imwrite('D:/Visual Studio Project/DTIPID/DTIPID/DTI_PID/DTI_PID/res/Result/PC-K/Temp/Dilate_PC-K-2203_P1_800DPI___partial.png', eroded)
|
1195 |
eroded = cv2.erode(contourImg, kernel) |
1196 |
image, contours, hierarchy = cv2.findContours(eroded, cv2.RETR_CCOMP, cv2.CHAIN_APPROX_NONE) |
1197 |
for contour in contours: |
1198 |
area = cv2.contourArea(contour, True)
|
1199 |
if area >= 0: |
1200 |
[x, y, w, h] = cv2.boundingRect(contour) |
1201 | |
1202 |
# remove small one less than character size
|
1203 |
if (w < 20 or h < 20): continue |
1204 |
#if w > h:
|
1205 |
# rects.append(QRect(x, y, w, h)) # expand rect
|
1206 |
#elif w < h:
|
1207 |
# rects.append(QRect(x, y, w, h)) # expand rect
|
1208 |
rects.append(QRect(x, y, w, h)) # expand rect
|
1209 | |
1210 |
intersected = True
|
1211 |
while intersected:
|
1212 |
intersected = False
|
1213 |
for rect in rects[:]: |
1214 |
matches = [x for x in rects if rect.intersects(x)] |
1215 |
if len(matches) > 1: |
1216 |
united = matches[0]
|
1217 |
for _rect in matches: |
1218 |
united = united.united(_rect) |
1219 |
if _rect in rects: rects.remove(_rect) |
1220 |
rects.append(united) |
1221 |
intersected = True
|
1222 |
break
|
1223 | |
1224 |
for rect in rects: |
1225 |
cv2.rectangle(img, (rect.x(), rect.y()), (rect.x() + rect.width(), rect.y() + rect.height()), (255, 0, 255), 1) |
1226 | |
1227 |
cv2.imwrite('D:/Visual Studio Project/DTIPID/DTIPID/DTI_PID/DTI_PID/res/Result/PC-K/Temp/PC-K-2203_P1_800DPI___partial.png', img)
|
1228 |
cv2.imwrite('D:/Visual Studio Project/DTIPID/DTIPID/DTI_PID/DTI_PID/res/Result/PC-K/Temp/Contour_PC-K-2203_P1_800DPI___partial.png', contourOcrImg)
|
1229 |
cv2.imwrite('D:/Visual Studio Project/DTIPID/DTIPID/DTI_PID/DTI_PID/res/Result/PC-K/Temp/Erode_PC-K-2203_P1_800DPI___partial.png', eroded)
|
1230 | |
1231 |
chan, imgW, imgH = img.shape[::-1]
|
1232 |
index = 0
|
1233 |
for rect in rects: |
1234 |
index = index + 1
|
1235 |
isVertical = False
|
1236 |
textInfoList = None
|
1237 |
if rect.width() >= rect.height() or rect.height() < 50: |
1238 |
isVertical = False
|
1239 |
textInfoList = TOCR.getTextInfo(contourOcrImg[rect.y():rect.y()+rect.height(), rect.x():rect.x()+rect.width()], (rect.x(), rect.y())) |
1240 |
else:
|
1241 |
isVertical = True
|
1242 |
transform = QTransform() |
1243 |
transform.translate(imgH*0.5, imgW*0.5) |
1244 |
transform.rotate(90)
|
1245 |
transform.translate(-imgW*0.5, -imgH*0.5) |
1246 |
transRect = transform.mapRect(rect) |
1247 |
rotatedContourOcrImg = cv2.rotate(contourOcrImg, cv2.ROTATE_90_CLOCKWISE) |
1248 |
textInfoList = TOCR.getTextInfo(rotatedContourOcrImg[transRect.y():transRect.y()+transRect.height(), transRect.x():transRect.x()+transRect.width()], (transRect.x(), transRect.y())) |
1249 | |
1250 |
if isVertical:
|
1251 |
img = cv2.rotate(img, cv2.ROTATE_90_CLOCKWISE) |
1252 |
if textInfoList is not None: |
1253 |
for textInfo in textInfoList: |
1254 |
cv2.putText(img, textInfo.getText(), (textInfo.getX(), textInfo.getY()+textInfo.getH()), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 0, 255), 3) |
1255 |
if isVertical:
|
1256 |
img = cv2.rotate(img, cv2.ROTATE_90_COUNTERCLOCKWISE) |
1257 | |
1258 |
print(str(index) + " / " + str(len(rects)) + " Finished") |
1259 |
#cv2.imshow('test', img[rect.y():rect.y()+rect.height(), rect.x():rect.x()+rect.width()])
|
1260 |
#cv2.waitKey(0)
|
1261 |
#cv2.destroyAllWindows()
|
1262 |
|
1263 |
cv2.imwrite('D:/Visual Studio Project/DTIPID/DTIPID/DTI_PID/DTI_PID/res/Result/PC-K/Temp/OCR_PC-K-2203_P1_800DPI___partial.png', img)
|
1264 |
stop = timeit.default_timer() |
1265 |
print('FINISHED : ' + str((stop-start)/60) + ' min') |
1266 |
|
1267 |
#app = QApplication(sys.argv)
|
1268 | |
1269 |
#try:
|
1270 |
# dlg = Ui_Dialog()
|
1271 |
# selectedProject = dlg.showDialog()
|
1272 |
# if selectedProject is not None:
|
1273 |
# form = ExampleApp()
|
1274 |
# form.show()
|
1275 |
#except Exception as ex:
|
1276 |
# print('에러가 발생했습니다.\n', ex)
|
1277 | |
1278 |
#sys.exit(app.exec_())
|