hytos / DTI_PID / DTI_PID / TextDetector.py @ 85a460a6
이력 | 보기 | 이력해설 | 다운로드 (16.3 KB)
1 |
# coding: utf-8
|
---|---|
2 |
"""
|
3 |
This is text detector module
|
4 |
"""
|
5 |
import sys |
6 |
import os |
7 |
import cv2 |
8 |
import numpy as np |
9 |
from PyQt5.QtCore import * |
10 |
from PyQt5.QtGui import * |
11 |
from PyQt5.QtWidgets import * |
12 |
from PyQt5.QtSvg import * |
13 |
|
14 |
from AppDocData import * |
15 |
import TextInfo as ti |
16 |
import tesseract_ocr_module as TOCR |
17 |
|
18 |
MIN_TEXT_SIZE = 10
|
19 |
THREAD_MAX_WORKER = os.cpu_count() |
20 |
|
21 |
class TextDetector: |
22 |
'''
|
23 |
@brief constructor
|
24 |
@author humkyung
|
25 |
@date 2018.07.11
|
26 |
'''
|
27 |
def __init__(self): |
28 |
self.textInfoList = []
|
29 |
self.otherTextInfoList = []
|
30 |
self.titleBlockTextInfoList = []
|
31 |
|
32 |
'''
|
33 |
@brief detect text areas
|
34 |
@author humkyung
|
35 |
@date 2018.06.16
|
36 |
'''
|
37 |
def detectTextAreas(self, img, offset): |
38 |
tInfoList = [] |
39 |
try:
|
40 |
tInfoList = self.getTextAreaInfo(img, offset[0], offset[1]) |
41 |
except Exception as ex: |
42 |
print('error occured({}) in {}:{}'.format(ex, sys.exc_info()[-1].tb_frame.f_code.co_filename, sys.exc_info()[-1].tb_lineno)) |
43 |
|
44 |
return tInfoList
|
45 |
|
46 |
'''
|
47 |
@brief Get Text Area info by contour
|
48 |
@author Jeongwoo
|
49 |
@date 2018.06.05
|
50 |
@history 2018.06.08 Jeongwoo Add angle
|
51 |
humkyung 2018.06.18 fixed logic to detect text area
|
52 |
'''
|
53 |
def getTextAreaInfo(self, imgGray, offsetX, offsetY): |
54 |
from AppDocData import AppDocData |
55 |
|
56 |
appDocData = AppDocData.instance() |
57 |
project = appDocData.getCurrentProject() |
58 |
|
59 |
configs = appDocData.getConfigs('Text Size', 'Max Text Size') |
60 |
maxTextSize = int(configs[0].value) if 1 == len(configs) else 100 |
61 |
minSize = 5
|
62 |
|
63 |
contourImg = np.ones(imgGray.shape, np.uint8) * 255
|
64 |
binaryImg,mask = cv2.threshold(imgGray, 0, 255, cv2.THRESH_BINARY+cv2.THRESH_OTSU) |
65 |
|
66 |
image, contours, hierarchy = cv2.findContours(mask, cv2.RETR_TREE, cv2.CHAIN_APPROX_NONE) |
67 |
for contour in contours: |
68 |
# remove too big one or horizontal/vertical line
|
69 |
[x, y, w, h] = cv2.boundingRect(contour) |
70 |
area = cv2.contourArea(contour, True)
|
71 |
|
72 |
# skip one which size is greater than max size or less then minimum size
|
73 |
if area >= 0: |
74 |
if (w > maxTextSize or h > maxTextSize) or (w <= minSize and h <= minSize): continue |
75 |
|
76 |
if area >= 0: |
77 |
cv2.drawContours(contourImg, [contour], -1, (0,0,0), -1) |
78 |
cv2.drawContours(contourImg, [contour], -1, (255,255,255), 1) |
79 |
else:
|
80 |
cv2.drawContours(contourImg, [contour], -1, (255,255,255), -1) |
81 |
|
82 |
path = os.path.join(project.getTempPath(), 'OCR_{}.png'.format(appDocData.imgName))
|
83 |
cv2.imwrite(path, contourImg) |
84 |
|
85 |
rects = [] |
86 |
configs = appDocData.getConfigs('Text Recognition', 'Expand Size') |
87 |
expandSize = int(configs[0].value) if 1 == len(configs) else 10 |
88 |
configs = appDocData.getConfigs('Text Recognition', 'Shrink Size') |
89 |
shrinkSize = int(configs[0].value) if 1 == len(configs) else 0 |
90 |
|
91 |
eroded = cv2.erode(contourImg, np.ones((expandSize,expandSize), np.uint8)) |
92 |
#path = os.path.join(project.getTempPath(), 'ERODED_OCR_{}.png'.format(appDocData.imgName))
|
93 |
#cv2.imwrite(path, eroded)
|
94 |
|
95 |
eroded = cv2.bitwise_not(eroded) |
96 |
|
97 |
image, contours, hierarchy = cv2.findContours(eroded, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE) |
98 |
for contour in contours: |
99 |
area = cv2.contourArea(contour, True)
|
100 |
if area < 0: |
101 |
[x, y, w, h] = cv2.boundingRect(contour) |
102 |
|
103 |
img = contourImg[y:y+h, x:x+w] |
104 |
img = cv2.bitwise_not(img) |
105 |
|
106 |
horizontal,max_width = 0,0 |
107 |
vertical,max_height = 0,0 |
108 |
_, _contours, _ = cv2.findContours(img, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE) |
109 |
for xx in _contours: |
110 |
[_x, _y, _w, _h] = cv2.boundingRect(xx) |
111 |
cv2.rectangle(img, (_x, _y), (_x+_w, _y+_h), 255, 1) |
112 |
|
113 |
max_width = _x if _x > max_width else max_width |
114 |
max_height = _y if _y > max_height else max_height |
115 |
|
116 |
if (_w < _h) or (_w > maxTextSize and _h < maxTextSize): # width is greater than height |
117 |
horizontal += 1 + (_w*_h)/(w*h)
|
118 |
else:
|
119 |
vertical += 1 + (_w*_h)/(w*h)
|
120 |
|
121 |
if (w < 10 and h < 10) or (max_width > maxTextSize and max_height > maxTextSize): continue; # skip too small or big one |
122 |
|
123 |
"""
|
124 |
if w > maxTextSize:
|
125 |
horizontal = 1
|
126 |
elif h > maxTextSize:
|
127 |
vertical = 1
|
128 |
else:
|
129 |
if shrinkSize > 0:
|
130 |
img = cv2.erode(img, np.ones((shrinkSize,shrinkSize), np.uint8))
|
131 |
|
132 |
_, _contours, _ = cv2.findContours(img, cv2.RETR_CCOMP, cv2.CHAIN_APPROX_SIMPLE)
|
133 |
for xx in _contours:
|
134 |
[_x, _y, _w, _h] = cv2.boundingRect(xx)
|
135 |
cv2.rectangle(img, (_x, _y), (_x+_w, _y+_h), 255, 1)
|
136 |
|
137 |
if (_w < _h) or (_w > maxTextSize and _h < maxTextSize): # width is greater than height
|
138 |
horizontal += 1 + (_w*_h)/(w*h)
|
139 |
else:
|
140 |
vertical += 1 + (_w*_h)/(w*h)
|
141 |
"""
|
142 |
|
143 |
"""
|
144 |
if horizontal > vertical:
|
145 |
filePath = os.path.join(project.getTempPath(), "Tile", "H-{}-{}-{}-{}.png".format(x,y,w,h))
|
146 |
else:
|
147 |
filePath = os.path.join(project.getTempPath(), "Tile", "V-{}-{}-{}-{}.png".format(x,y,w,h))
|
148 |
cv2.imwrite(filePath, img)
|
149 |
"""
|
150 |
|
151 |
rects.append([0 if horizontal > vertical else 90, QRect(x, y, w, h)]) |
152 |
|
153 |
configs = appDocData.getConfigs('Text Recognition', 'Merge Size') |
154 |
mergeSize = int(configs[0].value) if 1 == len(configs) else 10 |
155 |
# merge rectangles
|
156 |
intersected = True
|
157 |
while intersected:
|
158 |
intersected = False
|
159 |
for rect in rects[:]: # clone rects |
160 |
if 0 == rect[0]: |
161 |
rectExpand = rect[1].adjusted(-mergeSize, 0, mergeSize, 0) |
162 |
else:
|
163 |
rectExpand = rect[1].adjusted(0, -mergeSize, 0, mergeSize) |
164 |
|
165 |
matches = [x for x in rects if (x[0] == rect[0]) and rectExpand.intersects(x[1])] |
166 |
if len(matches) > 1: |
167 |
united = matches[0]
|
168 |
for _rect in matches: |
169 |
united[1] = united[1].united(_rect[1]) |
170 |
if _rect in rects: rects.remove(_rect) |
171 |
rects.append(united) |
172 |
intersected = True
|
173 |
break
|
174 |
|
175 |
list = [] |
176 |
for rect in rects: |
177 |
angle = rect[0]
|
178 |
list.append(ti.TextInfo('', round(offsetX) + rect[1].x(), round(offsetY) + rect[1].y(), rect[1].width(), rect[1].height(), angle)) |
179 |
|
180 |
x = rect[1].x()
|
181 |
y = rect[1].y()
|
182 |
w = rect[1].width()
|
183 |
h = rect[1].height()
|
184 |
img = contourImg[y:y+h, x:x+w] |
185 |
if angle == 0: |
186 |
filePath = os.path.join(project.getTempPath(), "Tile", "H-{}-{}-{}-{}.png".format(x,y,w,h)) |
187 |
else:
|
188 |
filePath = os.path.join(project.getTempPath(), "Tile", "V-{}-{}-{}-{}.png".format(x,y,w,h)) |
189 |
cv2.imwrite(filePath, img) |
190 |
|
191 |
return list |
192 |
|
193 |
'''
|
194 |
@brief recognize text of given text info
|
195 |
@author humkyung
|
196 |
@date 2018.07.24
|
197 |
@history change parameter updateProgressSignal to worker
|
198 |
2018.11.08 euisung add white char list check process on db
|
199 |
'''
|
200 |
@staticmethod
|
201 |
def recognizeTextFromImage(tInfo, imgOCR, offset, searchedSymbolList, worker, listWidget, maxProgressValue): |
202 |
import re |
203 |
res = [] |
204 |
|
205 |
appDocData = AppDocData.instance() |
206 |
|
207 |
try:
|
208 |
x = tInfo.getX() - round(offset[0]) |
209 |
y = tInfo.getY() - round(offset[1]) |
210 |
img = imgOCR[y:y+tInfo.getH(), x:x+tInfo.getW()] |
211 |
|
212 |
# set angle 0 if symbol contains the text area is instrumentation
|
213 |
category = None
|
214 |
contains = [symbol for symbol in searchedSymbolList if symbol.contains(tInfo)] |
215 |
if contains:
|
216 |
_type = contains[0].getType()
|
217 |
category = appDocData.getSymbolCategoryByType(_type) |
218 |
if 'Instrumentation' == category: tInfo.setAngle(0) |
219 |
# up to here
|
220 |
|
221 |
whiteCharList = appDocData.getConfigs('Text Recognition', 'White Character List') |
222 |
if len(whiteCharList) is 0: |
223 |
resultTextInfo = TOCR.getTextInfo(img, (x, y), tInfo.getAngle(), language=appDocData.OCRData) |
224 |
else:
|
225 |
resultTextInfo = TOCR.getTextInfo(img, (x, y), tInfo.getAngle(), language=appDocData.OCRData, conf = whiteCharList[0].value)
|
226 |
|
227 |
if resultTextInfo is not None and len(resultTextInfo) > 0: |
228 |
for result in resultTextInfo: |
229 |
result.setX(result.getX() + round(offset[0])) |
230 |
result.setY(result.getY() + round(offset[1])) |
231 |
if 'Instrumentation' == category: |
232 |
text = re.sub('[^a-zA-Z0-9]+', '', result.getText()) |
233 |
result.setText(text) |
234 |
res.extend(resultTextInfo) |
235 |
|
236 |
if listWidget is not None: |
237 |
item = QListWidgetItem('{},{},{} is recognized'.format(resultTextInfo[0].getX(), resultTextInfo[0].getY(), resultTextInfo[0].getText())) |
238 |
listWidget.addItem(item) |
239 |
else:
|
240 |
pass
|
241 |
|
242 |
if worker is not None: worker.updateProgress.emit(maxProgressValue, resultTextInfo[0].getText() if resultTextInfo is not None and 1 == len(resultTextInfo) else None) |
243 |
except Exception as ex: |
244 |
message = 'error occured({}) in {}:{}'.format(ex, sys.exc_info()[-1].tb_frame.f_code.co_filename, sys.exc_info()[-1].tb_lineno) |
245 |
worker.displayLog.emit(MessageType.Error, message) |
246 |
|
247 |
return res
|
248 |
|
249 |
'''
|
250 |
@brief read image drawing and then remove text
|
251 |
@author jwkim
|
252 |
@date
|
253 |
@history humkyung 2018.04.06 check if file exists
|
254 |
Jeongwoo 2018.05.09 Use Tesseract OCR after Azure OCR (Azure OCR : Getting text area)
|
255 |
Jeongwoo 2018.05.25 Add condition on if-statement
|
256 |
Jeongwoo 2018.06.05 Get text area data list by config.type
|
257 |
Jeongwoo 2018.06.08 Add angle Parameter on TOCR.getTextInfo
|
258 |
humkyung 2018.06.16 update proessbar while recognizing text
|
259 |
humkyung 2018.07.03 remove white space and replace given oldStr with newStr
|
260 |
humkyung 2018.07.07 change method name to recognizeText
|
261 |
euisung 2018.11.08 add white char list check process on db
|
262 |
euisung 2018.11.12 add title block properties
|
263 |
'''
|
264 |
def recognizeText(self, imgSrc, offset, tInfoList, searchedSymbolList, worker, listWidget, maxProgressValue, onlyTextArea = False): |
265 |
import concurrent.futures as futures |
266 |
from Area import Area |
267 |
|
268 |
try:
|
269 |
self.otherTextInfoList = []
|
270 |
self.titleBlockTextInfoList = []
|
271 |
self.textInfoList = []
|
272 |
|
273 |
appDocData = AppDocData.instance() |
274 |
project = appDocData.getCurrentProject() |
275 |
|
276 |
path = os.path.join(project.getTempPath(), 'OCR_{}.png'.format(appDocData.imgName))
|
277 |
if os.path.isfile(path):
|
278 |
imgOCR = cv2.imread(path, 1)
|
279 |
imgOCR = cv2.threshold(cv2.cvtColor(imgOCR, cv2.COLOR_BGR2GRAY), 0, 255, cv2.THRESH_BINARY+cv2.THRESH_OTSU)[1] |
280 |
|
281 |
pool = futures.ThreadPoolExecutor(max_workers = THREAD_MAX_WORKER) |
282 |
for tInfo in tInfoList: |
283 |
future = pool.submit(TextDetector.recognizeTextFromImage, tInfo, imgOCR, offset, searchedSymbolList, worker, listWidget, maxProgressValue) |
284 |
data = future.result() |
285 |
if data: self.textInfoList.extend(data) |
286 |
pool.shutdown(wait = True)
|
287 |
|
288 |
if onlyTextArea:
|
289 |
return
|
290 |
# parse texts in area except Drawing area
|
291 |
whiteCharList = appDocData.getConfigs('Text Recognition', 'White Character List') |
292 |
for area in appDocData.getAreaList(): |
293 |
if area.name == 'Drawing': continue |
294 |
|
295 |
if area.name == 'Unit': |
296 |
img = imgSrc[round(area.y):round(area.y+area.height), round(area.x):round(area.x+area.width)] |
297 |
if len(whiteCharList) is 0: |
298 |
texts = TOCR.getTextInfo(img, (area.x, area.y), 0, language='eng') |
299 |
else:
|
300 |
texts = TOCR.getTextInfo(img, (area.x, area.y), 0, language='eng', conf = whiteCharList[0].value) |
301 |
if texts is not None and len(texts) > 0: |
302 |
appDocData.activeDrawing.setAttr('Unit', texts[0].getText()) |
303 |
self.otherTextInfoList.append([area.name, texts])
|
304 |
else:
|
305 |
if area is not None and hasattr(area, 'img') and area.img is not None: |
306 |
if len(whiteCharList) is 0: |
307 |
texts = TOCR.getTextInfo(area.img, (area.x, area.y), 0, language='eng') |
308 |
else:
|
309 |
texts = TOCR.getTextInfo(area.img, (area.x, area.y), 0, language='eng', conf=whiteCharList[0].value) |
310 |
self.otherTextInfoList.append([area.name, texts])
|
311 |
|
312 |
titleBlockProps = appDocData.getTitleBlockProperties() |
313 |
for titleBlockProp in titleBlockProps: |
314 |
area = Area(titleBlockProp[0])
|
315 |
area.parse(titleBlockProp[2])
|
316 |
img = imgSrc[round(area.y):round(area.y+area.height), round(area.x):round(area.x+area.width)] |
317 |
if len(whiteCharList) is 0: |
318 |
texts = TOCR.getTextInfo(img, (area.x, area.y), 0, language=appDocData.OCRData)
|
319 |
else:
|
320 |
texts = TOCR.getTextInfo(img, (area.x, area.y), 0, language='eng', conf=whiteCharList[0].value) |
321 |
self.titleBlockTextInfoList.append([area.name, texts])
|
322 |
|
323 |
if worker is not None: worker.updateProgress.emit(maxProgressValue, None) |
324 |
except Exception as ex: |
325 |
message = 'error occured({}) in {}:{}'.format(ex, sys.exc_info()[-1].tb_frame.f_code.co_filename, sys.exc_info()[-1].tb_lineno) |
326 |
worker.displayLog.emit(MessageType.Error, message) |
327 |
|
328 |
'''
|
329 |
@brief remove text from image
|
330 |
@author humkyung
|
331 |
@date 2018.07.24
|
332 |
'''
|
333 |
def removeTextFromImage(self, imgSrc, offset): |
334 |
appDocData = AppDocData.instance() |
335 |
project = appDocData.getCurrentProject() |
336 |
|
337 |
path = os.path.join(project.getTempPath(), 'OCR_{}.png'.format(appDocData.imgName))
|
338 |
if os.path.isfile(path):
|
339 |
imgOCR = cv2.imread(path, 1)
|
340 |
imgOCR = cv2.threshold(cv2.cvtColor(imgOCR, cv2.COLOR_BGR2GRAY), 127, 255, cv2.THRESH_BINARY+cv2.THRESH_OTSU)[1] |
341 |
|
342 |
# remove recognized text from image
|
343 |
for text in self.textInfoList: |
344 |
x = round(text.getX() - offset[0]) |
345 |
y = round(text.getY() - offset[1]) |
346 |
width = round(text.getW())
|
347 |
height = round(text.getH())
|
348 |
self.removeText(imgSrc, (round(text.getX()), round(text.getY())), imgOCR[y:y+height, x:x+width]) |
349 |
# up to here
|
350 |
|
351 |
'''
|
352 |
@brief remove text from image by using ocr image
|
353 |
@author
|
354 |
'''
|
355 |
def removeText(self, img, pt, imgOCR): |
356 |
try:
|
357 |
x = round(pt[0]) |
358 |
y = round(pt[1]) |
359 |
width, height = imgOCR.shape[::-1]
|
360 |
|
361 |
temp = img[y:y+height, x:x+width] |
362 |
imgOCR = cv2.erode(imgOCR, np.ones((3,3), np.uint8)) |
363 |
mask = cv2.bitwise_or(temp, cv2.bitwise_not(imgOCR)) |
364 |
imgXOR = cv2.bitwise_xor(temp, mask) |
365 |
img[y:y+height, x:x+width] = cv2.bitwise_not(imgXOR) |
366 |
|
367 |
except Exception as ex: |
368 |
print('error occured({}) in {}:{}'.format(ex, sys.exc_info()[-1].tb_frame.f_code.co_filename, sys.exc_info()[-1].tb_lineno)) |
369 |
|
370 |
return img
|