프로젝트

일반

사용자정보

개정판 a1f892b5

IDa1f892b5547db65d9870a0a8f26d7de529278fc5
상위 8850898b
하위 0845ed9a

백흠경이(가) 약 6년 전에 추가함

issue #000

차이점 보기:

DTI_PID/DTI_PID/RecognitionDialog.py
254 254
                listWidget.addItem("Start recognition : " + mainRes)
255 255
                threadLock.acquire()
256 256
                offset = (area.x, area.y) if area is not None else (0,0)
257
                if worker.isTextChecked:
258
                    textAreas = textDetector.detectTextAreas(area.img if area is not None else appDocData.imgSrc, offset)
259
                    maxProgressValue = len(textAreas) + 1 
257
                #if worker.isTextChecked:
258
                #    textAreas = textDetector.detectTextAreas(area.img if area is not None else appDocData.imgSrc, offset)
259
                #    maxProgressValue = len(textAreas) + 1 
260 260

  
261 261
                if worker.isSymbolChecked:
262 262
                    ### calculate total count of symbol
......
291 291
                        _img = appDocData.imgSrc[round(item.getSp()[1]):round(item.getSp()[1]+item.getHeight()), round(item.getSp()[0]):round(item.getSp()[0]+item.getWidth())]
292 292
                        cv2.imwrite(os.path.join(project.getTempPath(), 'Tile', item.getName()+'.png'), _img)
293 293
                    ## up to here
294
    
294
                
295
                    pool = futures.ThreadPoolExecutor(max_workers = THREAD_MAX_WORKER)
296
                    for sym in searchedSymbolList:
297
                        pool.submit(Worker.removeDetectedSymbol, sym, appDocData.imgSrc)
298
                    pool.shutdown(wait = True)
299

  
300
                ## Remove Noise
301
                appDocData.imgSrc = cv2.fastNlMeansDenoising(appDocData.imgSrc, None, 9, 13)
302
                #kernel1 = np.ones((2, 2), np.uint8)
303
                #appDocData.imgSrc = cv2.dilate(appDocData.imgSrc, kernel1)
304
                #appDocData.imgSrc = cv2.erode(appDocData.imgSrc, kernel1)
305

  
295 306
                if worker.isTextChecked:
307
                    textAreas = textDetector.detectTextAreas(area.img if area is not None else appDocData.imgSrc, offset)
308
                    maxProgressValue = len(textAreas) + 1 
309

  
296 310
                    worker.displayTitle.emit('텍스트 인식 중...')
297 311
                    textDetector.recognizeText(appDocData.imgSrc, offset, textAreas, searchedSymbolList, worker, listWidget, maxProgressValue)
298 312
                    textInfoList = textDetector.textInfoList.copy() if textDetector.textInfoList is not None else None
......
308 322
                   
309 323
                    appDocData.imgName = os.path.splitext(os.path.basename(mainRes))[0]
310 324
    
311
                pool = futures.ThreadPoolExecutor(max_workers = THREAD_MAX_WORKER)
312
                for sym in searchedSymbolList:
313
                    pool.submit(Worker.removeDetectedSymbol, sym, appDocData.imgSrc)
314
                pool.shutdown(wait = True)
315
    
316 325
                ## Remove Noise
317
                kernel1 = np.ones((2, 2), np.uint8)
318
                appDocData.imgSrc = cv2.dilate(appDocData.imgSrc, kernel1)
319
                appDocData.imgSrc = cv2.erode(appDocData.imgSrc, kernel1)
326
                #kernel1 = np.ones((2, 2), np.uint8)
327
                #appDocData.imgSrc = cv2.dilate(appDocData.imgSrc, kernel1)
328
                #appDocData.imgSrc = cv2.erode(appDocData.imgSrc, kernel1)
320 329

  
321 330
                removedSymbolImgPath = os.path.join(project.getTempPath(), os.path.basename(path))
322 331
                cv2.imwrite(removedSymbolImgPath, appDocData.imgSrc)
......
995 1004
            return 0
996 1005
        
997 1006
        #shared area
998
        x = max(sp[0], pt[0]);
999
        y = max(sp[1], pt[1]);
1000
        w = min(sp[0] + width, pt[0] + tw) - x;
1001
        h = min(sp[1] + height, pt[1] + th) - y;
1007
        x = max(sp[0], pt[0])
1008
        y = max(sp[1], pt[1])
1009
        w = min(sp[0] + width, pt[0] + tw) - x
1010
        h = min(sp[1] + height, pt[1] + th) - y
1002 1011
    
1003 1012
        return float((w * h)) / float((tw * th)) * 100
1004 1013
    
......
1189 1198
            symImg = cv2.rotate(symImg, cv2.ROTATE_90_COUNTERCLOCKWISE)
1190 1199
    
1191 1200
        threadLock.acquire()
1192
        temp = []
1193 1201
        temp = imgSrc[sp[1]:sp[1]+sh, sp[0]:sp[0]+sw]
1194
        symImgBin = cv2.bitwise_not(symImg)
1195
        result = cv2.bitwise_xor(symImgBin, temp)
1196
        result = cv2.dilate(result, np.ones((5, 5), np.uint8))
1197
        imgSrc[sp[1]:sp[1]+sh, sp[0]:sp[0]+sw] = result
1202
        #symImg = cv2.dilate(symImg, np.ones((2,2), np.uint8))
1203
        imgXOR = cv2.bitwise_xor(temp, symImg)
1204
        #imgXOR = cv2.dilate(imgXOR, np.ones((5, 5), np.uint8))
1205
        imgSrc[sp[1]:sp[1]+sh, sp[0]:sp[0]+sw] = cv2.bitwise_not(imgXOR)
1198 1206
        threadLock.release()
1199 1207

  
1200 1208
    '''
DTI_PID/DTI_PID/TextDetector.py
91 91

  
92 92
        eroded = cv2.bitwise_not(eroded)
93 93
        
94
        image, contours, hierarchy = cv2.findContours(eroded, cv2.RETR_CCOMP, cv2.CHAIN_APPROX_SIMPLE)
94
        image, contours, hierarchy = cv2.findContours(eroded, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
95 95
        for contour in contours:
96 96
            area = cv2.contourArea(contour, True)
97 97
            if area < 0:
......
102 102

  
103 103
                horizontal,max_width = 0,0
104 104
                vertical,max_height = 0,0
105
                _, _contours, _ = cv2.findContours(img, cv2.RETR_CCOMP, cv2.CHAIN_APPROX_SIMPLE)
105
                _, _contours, _ = cv2.findContours(img, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
106 106
                for xx in _contours:
107 107
                    [_x, _y, _w, _h] = cv2.boundingRect(xx)
108 108
                    cv2.rectangle(img, (_x, _y), (_x+_w, _y+_h), 255, 1)
......
260 260

  
261 261
                pool = futures.ThreadPoolExecutor(max_workers = THREAD_MAX_WORKER)
262 262
                for tInfo in tInfoList:
263
                    #future = pool.submit(TextDetector.recognizeTextFromImage, tInfo, imgSrc, (0,0), searchedSymbolList, worker, listWidget, maxProgressValue)
263 264
                    future = pool.submit(TextDetector.recognizeTextFromImage, tInfo, imgOCR, offset, searchedSymbolList, worker, listWidget, maxProgressValue)
264 265
                    data = future.result()
265 266
                    if data: self.textInfoList.extend(data)
266 267
                pool.shutdown(wait = True)
267 268

  
268
            if onlyTextArea:
269
                return
270
            # parse texts in area except Drawing area
271
            whiteCharList = appDocData.getConfigs('Text Recognition', 'White Character List')
272
            for area in appDocData.getAreaList():
273
                if area.name == 'Drawing': continue
269
                if onlyTextArea:
270
                    return
271
                # parse texts in area except Drawing area
272
                whiteCharList = appDocData.getConfigs('Text Recognition', 'White Character List')
273
                for area in appDocData.getAreaList():
274
                    if area.name == 'Drawing': continue
274 275

  
275
                if area.name == 'Unit':
276
                    if area.name == 'Unit':
277
                        img = imgSrc[round(area.y):round(area.y+area.height), round(area.x):round(area.x+area.width)]
278
                        if len(whiteCharList) is 0:
279
                            texts = TOCR.getTextInfo(img, (area.x, area.y), 0, language='eng')
280
                        else:
281
                            texts = TOCR.getTextInfo(img, (area.x, area.y), 0, language='eng', conf = whiteCharList[0].value)
282
                        if texts is not None and len(texts) > 0:
283
                            appDocData.activeDrawing.setAttr('Unit', texts[0].getText())
284
                            self.otherTextInfoList.append([area.name, texts])
285
                    else:
286
                        if area is not None and hasattr(area, 'img') and area.img is not None:
287
                            if len(whiteCharList) is 0:
288
                                texts = TOCR.getTextInfo(area.img, (area.x, area.y), 0, language='eng')
289
                            else:
290
                                texts = TOCR.getTextInfo(area.img, (area.x, area.y), 0, language='eng', conf=whiteCharList[0].value)
291
                            self.otherTextInfoList.append([area.name, texts])
292

  
293
                titleBlockProps = appDocData.getTitleBlockProperties()
294
                for titleBlockProp in titleBlockProps:
295
                    area = Area(titleBlockProp[0])
296
                    area.parse(titleBlockProp[2])
276 297
                    img = imgSrc[round(area.y):round(area.y+area.height), round(area.x):round(area.x+area.width)]
277 298
                    if len(whiteCharList) is 0:
278 299
                        texts = TOCR.getTextInfo(img, (area.x, area.y), 0, language='eng')
279 300
                    else:
280
                        texts = TOCR.getTextInfo(img, (area.x, area.y), 0, language='eng', conf = whiteCharList[0].value)
281
                    if texts is not None and len(texts) > 0:
282
                        appDocData.activeDrawing.setAttr('Unit', texts[0].getText())
283
                        self.otherTextInfoList.append([area.name, texts])
284
                else:
285
                    if area is not None and hasattr(area, 'img') and area.img is not None:
286
                        if len(whiteCharList) is 0:
287
                            texts = TOCR.getTextInfo(area.img, (area.x, area.y), 0, language='eng')
288
                        else:
289
                            texts = TOCR.getTextInfo(area.img, (area.x, area.y), 0, language='eng', conf=whiteCharList[0].value)
290
                        self.otherTextInfoList.append([area.name, texts])
291

  
292
            titleBlockProps = appDocData.getTitleBlockProperties()
293
            for titleBlockProp in titleBlockProps:
294
                area = Area(titleBlockProp[0])
295
                area.parse(titleBlockProp[2])
296
                img = imgSrc[round(area.y):round(area.y+area.height), round(area.x):round(area.x+area.width)]
297
                if len(whiteCharList) is 0:
298
                    texts = TOCR.getTextInfo(img, (area.x, area.y), 0, language='eng')
299
                else:
300
                    texts = TOCR.getTextInfo(img, (area.x, area.y), 0, language='eng', conf=whiteCharList[0].value)
301
                self.titleBlockTextInfoList.append([area.name, texts])
301
                        texts = TOCR.getTextInfo(img, (area.x, area.y), 0, language='eng', conf=whiteCharList[0].value)
302
                    self.titleBlockTextInfoList.append([area.name, texts])
302 303

  
303
            if worker is not None: worker.updateProgress.emit(maxProgressValue, None)
304
                if worker is not None: worker.updateProgress.emit(maxProgressValue, None)
304 305
        except Exception as ex:
305 306
            message = 'error occured({}) in {}:{}'.format(ex, sys.exc_info()[-1].tb_frame.f_code.co_filename, sys.exc_info()[-1].tb_lineno)
306 307
            worker.displayLog.emit(MessageType.Error, message)

내보내기 Unified diff

클립보드 이미지 추가 (최대 크기: 500 MB)