7 |
7 |
import os
|
8 |
8 |
import cv2
|
9 |
9 |
import math
|
|
10 |
import threading
|
|
11 |
import multiprocessing
|
|
12 |
from multiprocessing import Process, Queue
|
10 |
13 |
|
|
14 |
from shapely.geometry import Point
|
|
15 |
from xml.etree.ElementTree import Element, SubElement, dump, ElementTree, parse
|
11 |
16 |
from PyQt5.QtCore import *
|
12 |
17 |
from PyQt5.QtGui import *
|
13 |
18 |
from PyQt5.QtWidgets import *
|
... | ... | |
17 |
22 |
|
18 |
23 |
class QDetectSymbolDialog(QDialog):
|
19 |
24 |
|
20 |
|
def __init__(self):
|
|
25 |
def __init__(self, parent):
|
21 |
26 |
QDialog.__init__(self)
|
22 |
|
|
|
27 |
self.parent = parent
|
23 |
28 |
self.ui = DetectSymbol_UI.Ui_DetectSymbolDialog()
|
24 |
29 |
self.ui.setupUi(self)
|
25 |
30 |
|
... | ... | |
45 |
50 |
if self.ui.listWidgetDrawings.count():
|
46 |
51 |
self.ui.listWidgetDrawings.setCurrentRow(0)
|
47 |
52 |
|
48 |
|
##
|
49 |
|
#from PyQt5 import QtGui
|
50 |
|
#cell = QLabel()
|
51 |
|
#cell.setPixmap(QtGui.QPixmap("D:/aa.png"))
|
52 |
|
#self.table.setCellWidget(0, 0, cell)
|
53 |
|
#cell = QLabel()
|
54 |
|
#cell.setPixmap(QtGui.QPixmap("D:/bb.png"))
|
55 |
|
#self.table.setCellWidget(0, 1, cell)
|
56 |
|
#self.table.resizeColumnsToContents()
|
57 |
|
#self.table.resizeRowsToContents()
|
58 |
|
##
|
59 |
|
|
|
53 |
self.table.cellDoubleClicked.connect(self.cellDoubleClickedEvent)
|
|
54 |
|
|
55 |
def cellDoubleClickedEvent(self, row, column):
|
|
56 |
cell = self.table.cellWidget(row,column)
|
|
57 |
if cell is not None:
|
|
58 |
import SymbolEditorDialog
|
|
59 |
symbolEditorDialog = SymbolEditorDialog.QSymbolEditorDialog(self, cell.pixmap(), AppDocData.instance().getCurrentProject())
|
|
60 |
symbolEditorDialog.showDialog()
|
|
61 |
self.parent.dirTreeWidget.initDirTreeWidget()
|
|
62 |
|
|
63 |
|
60 |
64 |
'''
|
61 |
65 |
@brief text changed Event
|
62 |
66 |
@author kyouho
|
... | ... | |
64 |
68 |
'''
|
65 |
69 |
def currentTextChangedEvent(self, text):
|
66 |
70 |
self.imageName = text
|
|
71 |
self.imgPath = self.drawingDir + self.ui.listWidgetDrawings.currentItem().data(32)
|
67 |
72 |
self.tableSetting()
|
68 |
73 |
|
69 |
74 |
'''
|
... | ... | |
77 |
82 |
|
78 |
83 |
from TextDetector import TextDetector
|
79 |
84 |
import numpy as np
|
80 |
|
import xml
|
|
85 |
xml = Element('BOXES')
|
81 |
86 |
|
82 |
87 |
appDocData = AppDocData.instance()
|
83 |
88 |
## textDetector에서 사용하기 때문에 설정
|
84 |
89 |
appDocData.imgName = self.imageName
|
85 |
90 |
|
86 |
91 |
## 흑색 이미지로 변환
|
87 |
|
imgPath = self.drawingDir + self.ui.listWidgetDrawings.currentItem().data(32)
|
88 |
|
img = cv2.imread(imgPath, 1)
|
|
92 |
img = cv2.imread(self.imgPath, 1)
|
89 |
93 |
imgGray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
|
90 |
94 |
|
91 |
95 |
## 프로젝트의 Drawing Area data
|
92 |
96 |
area = appDocData.getArea('Drawing')
|
|
97 |
|
|
98 |
## area 영역 자른 offset
|
|
99 |
offset = (area.x, area.y) if area is not None else (0,0)
|
93 |
100 |
|
94 |
101 |
## 이미지 이진화
|
95 |
102 |
thresholdImg = cv2.threshold(imgGray , 0, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU)[1]
|
96 |
103 |
## 프로젝트의 Area 영역만큼만 자른 이미지
|
97 |
104 |
areaImg = thresholdImg[round(area.y):round(area.y+area.height), round(area.x):round(area.x+area.width)]
|
98 |
105 |
|
99 |
|
## area 영역 자른 offset
|
100 |
|
offset = (area.x, area.y) if area is not None else (0,0)
|
|
106 |
## 선제거 전에 먼저 작은 영역 제거
|
|
107 |
## contours 추출을 위한 색반전
|
|
108 |
areaImg = cv2.bitwise_not(areaImg)
|
|
109 |
## contours 추출
|
|
110 |
image, contours, hierarchy = cv2.findContours(areaImg, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
|
|
111 |
for contour in contours:
|
|
112 |
[x, y, w, h] = cv2.boundingRect(contour)
|
101 |
113 |
|
102 |
|
textDetector = TextDetector()
|
103 |
|
## TextArea로 판별되는 부분 추출
|
104 |
|
textAreas = textDetector.detectTextAreas(areaImg, offset)
|
105 |
|
## TextArea를 OCR 적용
|
106 |
|
textDetector.recognizeText(thresholdImg, offset, textAreas, [], None, None, None, True)
|
107 |
|
## OCR 돌린 결과
|
108 |
|
textInfoList = textDetector.textInfoList.copy() if textDetector.textInfoList is not None else None
|
109 |
|
## threshold 이미지에서 TextArea 제거
|
110 |
|
textDetector.removeTextFromImage(thresholdImg, offset)
|
|
114 |
if (w < 40 or h < 40):
|
|
115 |
areaImg[y:y+h,x:x+w] = 0
|
|
116 |
## 다시 색반전
|
|
117 |
areaImg = cv2.bitwise_not(areaImg)
|
111 |
118 |
|
112 |
|
## 프로젝트의 Area 영역만큼만 자른 이미지
|
113 |
|
areaImg = thresholdImg[round(area.y):round(area.y+area.height), round(area.x):round(area.x+area.width)]
|
|
119 |
## for multiprocessing
|
|
120 |
verticalProcessList = []
|
|
121 |
verticalLineList = []
|
|
122 |
horizontalProcessList = []
|
|
123 |
horizontalLineList = []
|
|
124 |
|
|
125 |
## Find VerticalLine using multiprocessing
|
|
126 |
## Set Vertical Line
|
|
127 |
jumpValue = int(areaImg.shape[1] / os.cpu_count())
|
|
128 |
value = 0
|
|
129 |
for cpuIndex in range(os.cpu_count()):
|
|
130 |
_queue = Queue()
|
|
131 |
_range = value + jumpValue
|
|
132 |
if os.cpu_count() -1 == cpuIndex:
|
|
133 |
_range = areaImg.shape[1]
|
|
134 |
|
|
135 |
_process = Process(target=isVerticalLineThread, args=(value, _range, areaImg, _queue))
|
|
136 |
_process.daemon = True
|
|
137 |
verticalProcessList.append((_process, _queue))
|
|
138 |
value = value + jumpValue
|
114 |
139 |
|
115 |
|
## Canny Edge / 확률적 허프 변환 / Remove Line / threshold 60, minLength 30
|
116 |
|
edges = cv2.Canny(areaImg, 50, 200, apertureSize = 3)
|
117 |
|
lines = cv2.HoughLinesP(edges, 1, np.pi/180, 80, minLineLength = 100)
|
118 |
|
for line in lines:
|
119 |
|
for x1,y1,x2,y2 in line:
|
120 |
|
x1 = round(x1)
|
121 |
|
x2 = round(x2)
|
122 |
|
y1 = round(y1)
|
123 |
|
y2 = round(y2)
|
124 |
|
|
125 |
|
width = round(math.fabs(x1 - x2) if math.fabs(x1 - x2) != 0 else 1)
|
126 |
|
height = round(math.fabs(y1 - y2) if math.fabs(y1 - y2) != 0 else 1)
|
127 |
|
|
128 |
|
## 직선 (좌우, 상하)일 경우만 지움
|
129 |
|
if width !=1 and height != 1:
|
130 |
|
continue
|
131 |
|
|
132 |
|
## 색을 White , 두께를 7로
|
133 |
|
cv2.line(areaImg,(x1,y1),(x2,y2),(255,255,255), 7)
|
|
140 |
## Set Horizontal Line
|
|
141 |
jumpValue = int(areaImg.shape[0] / os.cpu_count())
|
|
142 |
value = 0
|
|
143 |
for cpuIndex in range(os.cpu_count()):
|
|
144 |
_queue = Queue()
|
|
145 |
_range = value + jumpValue
|
|
146 |
if os.cpu_count() -1 == cpuIndex:
|
|
147 |
_range = areaImg.shape[0]
|
|
148 |
|
|
149 |
_process = Process(target=isHorizontalLineThread, args=(value, _range, areaImg, _queue))
|
|
150 |
_process.daemon = True
|
|
151 |
horizontalProcessList.append((_process, _queue))
|
|
152 |
value = value + jumpValue
|
|
153 |
|
|
154 |
## process start
|
|
155 |
for process in verticalProcessList:
|
|
156 |
process[0].start()
|
|
157 |
|
|
158 |
## Wait Vertical And Start Horizontal
|
|
159 |
for index in range(len(verticalProcessList)):
|
|
160 |
verticalLineList.extend(verticalProcessList[index][1].get())
|
|
161 |
verticalProcessList[index][0].join()
|
|
162 |
verticalProcessList[index][1].close()
|
|
163 |
|
|
164 |
horizontalProcessList[index][0].start()
|
|
165 |
|
|
166 |
## Wait Horizontal
|
|
167 |
for process in horizontalProcessList:
|
|
168 |
horizontalLineList.extend(process[1].get())
|
|
169 |
process[0].join()
|
|
170 |
process[1].close()
|
|
171 |
|
|
172 |
## chage color
|
|
173 |
for vLine in verticalLineList:
|
|
174 |
p1 = vLine[0]
|
|
175 |
p2 = vLine[1]
|
|
176 |
x = p1[0]
|
|
177 |
areaImg[p1[1]:p2[1], x] = 255
|
|
178 |
## chage color
|
|
179 |
for vLine in horizontalLineList:
|
|
180 |
p1 = vLine[0]
|
|
181 |
p2 = vLine[1]
|
|
182 |
y = p1[1]
|
|
183 |
areaImg[y, p1[0]:p2[0]] = 255
|
134 |
184 |
|
|
185 |
|
135 |
186 |
## contours 추출을 위한 색반전
|
136 |
|
areaImg = cv2.bitwise_not(areaImg)
|
|
187 |
areaImg = cv2.bitwise_not(areaImg)
|
137 |
188 |
## contours 추출
|
138 |
189 |
image, contours, hierarchy = cv2.findContours(areaImg, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
|
|
190 |
|
|
191 |
## RecList 정리
|
|
192 |
lineWidth = 5
|
|
193 |
recList = []
|
|
194 |
tempRecList = []
|
139 |
195 |
for contour in contours:
|
140 |
196 |
[x, y, w, h] = cv2.boundingRect(contour)
|
|
197 |
tempRecList.append([x-lineWidth, y-lineWidth, x+w+lineWidth, y+h+lineWidth])
|
|
198 |
|
|
199 |
## Overlap Rec 합침
|
|
200 |
while len(tempRecList):
|
|
201 |
rec1 = tempRecList[0]
|
|
202 |
_temp = []
|
|
203 |
for index in range(1, len(tempRecList)):
|
|
204 |
rec2 = tempRecList[index]
|
|
205 |
if rec1[0] <= rec2[2] and rec1[2] >= rec2[0] and rec1[1] <= rec2[3] and rec1[3] >= rec2[1]:
|
|
206 |
_temp.append(rec2)
|
|
207 |
|
|
208 |
if len(_temp):
|
|
209 |
x1 = rec1[0]
|
|
210 |
y1 = rec1[1]
|
|
211 |
x2 = rec1[2]
|
|
212 |
y2 = rec1[3]
|
|
213 |
|
|
214 |
for rec in _temp:
|
|
215 |
x1 = min(x1, rec[0])
|
|
216 |
y1 = min(y1, rec[1])
|
|
217 |
x2 = max(x2, rec[2])
|
|
218 |
y2 = max(y2, rec[3])
|
|
219 |
tempRecList.remove(rec)
|
|
220 |
tempRecList.append([x1, y1, x2, y2])
|
|
221 |
|
|
222 |
else:
|
|
223 |
recList.append(rec1)
|
|
224 |
|
|
225 |
tempRecList.remove(rec1)
|
141 |
226 |
|
142 |
|
# remove too small one
|
143 |
|
if (w < 10 or h < 10): continue
|
|
227 |
## to xml
|
|
228 |
for rec in recList:
|
|
229 |
[x1, y1, x2, y2] = rec
|
|
230 |
w = x2-x1
|
|
231 |
h = y2-y1
|
|
232 |
if w < 20 or h < 20: continue
|
|
233 |
elif w*4<h or h*4<w: continue
|
|
234 |
|
|
235 |
boxElement = Element('BOX')
|
|
236 |
|
|
237 |
xElement = Element('X')
|
|
238 |
xElement.text = str(x1 + offset[0])
|
|
239 |
boxElement.append(xElement)
|
|
240 |
|
|
241 |
yElement = Element('Y')
|
|
242 |
yElement.text = str(y1 + offset[1])
|
|
243 |
boxElement.append(yElement)
|
|
244 |
|
|
245 |
widthElement = Element('WIDTH')
|
|
246 |
widthElement.text = str(x2-x1)
|
|
247 |
boxElement.append(widthElement)
|
|
248 |
|
|
249 |
heightElement = Element('HEIGHT')
|
|
250 |
heightElement.text = str(y2-y1)
|
|
251 |
boxElement.append(heightElement)
|
|
252 |
|
|
253 |
xml.append(boxElement)
|
|
254 |
|
|
255 |
ElementTree(xml).write(self.boxDir + self.imageName + '.box')
|
|
256 |
self.tableSetting()
|
144 |
257 |
|
|
258 |
def isLine(self, x, y, img, minLineSize, matrix):
|
|
259 |
maxX = img.shape[1]
|
|
260 |
maxY = img.shape[0]
|
145 |
261 |
|
|
262 |
## Horizontal
|
|
263 |
for moveX in range(x + 1, maxX):
|
|
264 |
if matrix[moveX][y][0]:
|
|
265 |
break
|
146 |
266 |
|
147 |
|
cv2.rectangle(test, (x, y), (x + w, y + h), (255, 0, 0), 1)
|
|
267 |
if img[y,moveX] != 0:
|
|
268 |
if moveX - x > minLineSize:
|
|
269 |
return (True, 'H', moveX)
|
|
270 |
break
|
|
271 |
matrix[moveX][y][0] = True
|
148 |
272 |
|
|
273 |
## Vertical
|
|
274 |
for moveY in range(y + 1, maxY):
|
|
275 |
if matrix[x][moveY][1]:
|
|
276 |
break
|
|
277 |
|
|
278 |
if img[moveY,x] != 0:
|
|
279 |
if moveY - y > minLineSize:
|
|
280 |
return (True, 'V', moveY)
|
|
281 |
break
|
|
282 |
matrix[x][moveY][1] = True
|
149 |
283 |
|
150 |
|
cv2.imwrite('D:/areaImg.png', areaImg)
|
|
284 |
return (False,)
|
151 |
285 |
|
|
286 |
def isCombinable(self, pointList1, pointList2):
|
|
287 |
for pt1 in pointList1:
|
|
288 |
for pt2 in pointList2:
|
|
289 |
if Point(pt1[0],pt1[1]).distance(Point(pt2[0],pt2[1])) < 10:
|
|
290 |
return True
|
152 |
291 |
'''
|
153 |
292 |
@brief text changed Event
|
154 |
293 |
@author kyouho
|
155 |
294 |
@date 2018.09.18
|
156 |
295 |
'''
|
157 |
296 |
def tableSetting(self):
|
|
297 |
columnCount = 3
|
|
298 |
self.table.setColumnCount(columnCount)
|
158 |
299 |
self.table.setRowCount(0)
|
|
300 |
boxCount = 0
|
159 |
301 |
|
160 |
|
if os.path.exists(self.boxDir + self.imageName + '.box'):
|
|
302 |
xmlPath = self.boxDir + self.imageName + '.box'
|
|
303 |
if os.path.exists(xmlPath):
|
|
304 |
_pixmap = QPixmap(self.imgPath)
|
161 |
305 |
|
162 |
|
pass
|
|
306 |
xml = parse(xmlPath)
|
|
307 |
root = xml.getroot()
|
163 |
308 |
|
|
309 |
for box in root.iter('BOX'):
|
|
310 |
rowIndex = int(boxCount / columnCount)
|
|
311 |
self.table.setRowCount(rowIndex + 1)
|
|
312 |
|
|
313 |
_x = int(box.find('X').text)
|
|
314 |
_y = int(box.find('Y').text)
|
|
315 |
_width = int(box.find('WIDTH').text)
|
|
316 |
_height = int(box.find('HEIGHT').text)
|
|
317 |
|
|
318 |
rect = QRect(_x, _y, _width, _height)
|
|
319 |
boxImg = _pixmap.copy(rect)
|
|
320 |
|
|
321 |
cell = QLabel()
|
|
322 |
cell.setPixmap(boxImg)
|
|
323 |
self.table.setCellWidget(rowIndex, boxCount % columnCount, cell)
|
|
324 |
|
|
325 |
boxCount = boxCount + 1
|
|
326 |
|
|
327 |
|
|
328 |
self.table.resizeColumnsToContents()
|
|
329 |
self.table.resizeRowsToContents()
|
164 |
330 |
|
165 |
331 |
'''
|
166 |
332 |
@brief save fluid codes to sqlite
|
... | ... | |
169 |
335 |
'''
|
170 |
336 |
def accept(self):
|
171 |
337 |
|
172 |
|
QDialog.accept(self)
|
|
338 |
QDialog.accept(self)
|
|
339 |
|
|
340 |
'''
|
|
341 |
@brief Check Vertical Line using Multiprocessing
|
|
342 |
@author kyouho
|
|
343 |
@date 2018.09.27
|
|
344 |
'''
|
|
345 |
def isVerticalLineThread(start, end, img, _queue):
|
|
346 |
minLineSize = 40
|
|
347 |
|
|
348 |
## Vertical
|
|
349 |
find = False
|
|
350 |
startY = 0
|
|
351 |
lineList = []
|
|
352 |
for x in range(start, end):
|
|
353 |
for y in range(0, img.shape[0]):
|
|
354 |
if img[y,x] == 0:
|
|
355 |
if find:
|
|
356 |
continue
|
|
357 |
else:
|
|
358 |
find = True
|
|
359 |
startY = y
|
|
360 |
else:
|
|
361 |
if find:
|
|
362 |
if y - startY > minLineSize:
|
|
363 |
lineList.append(((x,startY), (x,y)))
|
|
364 |
find = False
|
|
365 |
_queue.put(lineList)
|
|
366 |
|
|
367 |
'''
|
|
368 |
@brief Check Horizontal Line using Multiprocessing
|
|
369 |
@author kyouho
|
|
370 |
@date 2018.09.27
|
|
371 |
'''
|
|
372 |
def isHorizontalLineThread(start, end, img, _queue):
|
|
373 |
minLineSize = 40
|
|
374 |
|
|
375 |
## Horizontal
|
|
376 |
find = False
|
|
377 |
startX = 0
|
|
378 |
lineList = []
|
|
379 |
for y in range(start, end):
|
|
380 |
for x in range(0, img.shape[1]):
|
|
381 |
if img[y,x] == 0:
|
|
382 |
if find:
|
|
383 |
continue
|
|
384 |
else:
|
|
385 |
find = True
|
|
386 |
startX = x
|
|
387 |
else:
|
|
388 |
if find:
|
|
389 |
if x - startX > minLineSize:
|
|
390 |
lineList.append(((startX,y), (x,y)))
|
|
391 |
|
|
392 |
find = False
|
|
393 |
_queue.put(lineList)
|