프로젝트

일반

사용자정보

개정판 a02da871

IDa02da871c7038cbc3368a918c489d92252277872
상위 da5cbb61
하위 db6ea25f

함의성이(가) 일년 이상 전에 추가함

gclude test

Change-Id: Ib77c0a741b566dc69f50486d588c2e4bf0cb779e

차이점 보기:

DTI_PID/DTI_PID/AppWebService.py
93 93
                                                           sys.exc_info()[-1].tb_lineno)
94 94
            App.mainWnd().addMessage.emit(MessageType.Error, message)
95 95
            return []
96
        
97
    def request_ocr_gcloud(self, img):
98
        # send uncroped image
99
        try:
100
            if not self.test_connection():
101
                return []
102

  
103
            text_box = '/recognition/ocr'
104

  
105
            _, bts = cv2.imencode('.png', img)
106
            bts = bts.tostring()
107

  
108
            response = requests.post(self._url + text_box, data=bts)
109

  
110
            return response.json()['text_box']
111
        except Exception as ex:
112
            from App import App
113
            from AppDocData import MessageType
114
            message = 'error occurred({}) in {}:{}'.format(ex, sys.exc_info()[-1].tb_frame.f_code.co_filename,
115
                                                           sys.exc_info()[-1].tb_lineno)
116
            App.mainWnd().addMessage.emit(MessageType.Error, message)
117
            return []            
96 118

  
97 119
    def request_text_box_tile(self, img_infos):
98 120
        """ main code """
DTI_PID/DTI_PID/RecognitionDialog.py
1171 1171
                #pool.shutdown(wait=True)
1172 1172
                # up to here
1173 1173

  
1174
                Worker.drawFoundTextsOnCanvas(mainRes, textInfoList)
1174
                #Worker.drawFoundTextsOnCanvas(mainRes, textInfoList)
1175 1175

  
1176 1176
                # remove text from image
1177 1177
                textDetector.remove_text_from_image(app_doc_data.imgSrc, [0, 0])
DTI_PID/DTI_PID/TextDetector.py
279 279
                #Image.fromarray(tile_info_list[-1][4]).show()
280 280

  
281 281
        return tile_info_list
282
    
283
    def get_text_using_gcloud(self, imgGray, areaInfos, offset, searchedSymbolList, worker):
284
        import math
285
        from AppWebService import AppWebService
286

  
287
        try:
288
            app_doc_data = AppDocData.instance()
289

  
290
            total_x = 0
291
            total_y = 0
292
            max_y = 0
293
            gap = 45
294
            line_gap = 15
295
            for areaInfo in areaInfos:
296
                if areaInfo.getAngle() == 0:
297
                    total_x = total_x + areaInfo.getW()
298
                    total_y = total_y + areaInfo.getH()
299
                    if max_y < areaInfo.getH():
300
                        max_y = areaInfo.getH()
301
                else:
302
                    total_x = total_y + areaInfo.getH()
303
                    total_y = total_x + areaInfo.getW()
304
                    if max_y < areaInfo.getW():
305
                        max_y = areaInfo.getW()
306

  
307
            average_x = (total_x / len(areaInfos)) + gap
308
            average_y = (total_y) / len(areaInfos) + gap
309
            num_x = int(math.sqrt(((average_y + gap) * len(areaInfos)) / (average_x + gap)))
310
            num_y = (len(areaInfos) / num_x) + 15
311

  
312
            _canvas = np.zeros((int(max_y * num_y) + gap, int(average_x * num_x) + 2 * gap), np.uint8)
313
            _canvas[::] = 255
314

  
315
            y_position = gap
316
            x_position = gap
317
            y_max = 0
318

  
319
            for text in areaInfos:
320
                # set angle 0 if symbol contains the text area is instrumentation
321
                category = None
322
                if searchedSymbolList:
323
                    contains = [symbol for symbol in searchedSymbolList if symbol.contains(text)]
324
                    if contains:
325
                        _type = contains[0].getType()
326
                        category = app_doc_data.getSymbolCategoryByType(_type)
327
                        if 'Instrumentation' == category:
328
                            text.setAngle(0)
329
                # up to here
330

  
331
                angle = text.getAngle()
332
                left = text.getX() - round(offset[0])
333
                top = text.getY() - round(offset[1])
334
                right = left + text.getW()
335
                bottom = top + text.getH()
336

  
337
                _textImg = imgGray[top:bottom, left:right]
338
                textImg = _textImg.copy()
339

  
340
                if textImg.shape[0] != text.getH() or textImg.shape[1] != text.getW():
341
                    continue
342

  
343
                if (angle == 0 and x_position + text.getW() + 2 * line_gap >= _canvas.shape[1]) or (angle != 0 and x_position + text.getH() + 2 * line_gap >= _canvas.shape[1]):
344
                    x_position = gap
345
                    y_position = y_position + y_max + gap
346
                    y_max = 0
347

  
348
                if angle == 0:
349
                    _canvas[y_position:y_position + text.getH(), x_position:x_position + text.getW()] = textImg[:]
350

  
351
                    cv2.rectangle(_canvas, (x_position - line_gap, y_position - line_gap), (x_position + text.getW() + 2 * line_gap, y_position + text.getH() + 2 * line_gap), 100, 5)
352

  
353
                    x_position = x_position + text.getW() + gap
354
                    if y_max < text.getH():
355
                        y_max = text.getH()
356
                else:
357
                    textImg = cv2.rotate(textImg, cv2.ROTATE_90_CLOCKWISE)
358
                    _canvas[y_position:y_position + text.getW(), x_position:x_position + text.getH()] = textImg[:]
359

  
360
                    cv2.rectangle(_canvas, (x_position - line_gap, y_position - line_gap), (x_position + text.getH() + 2 * line_gap, y_position + text.getW() + 2 * line_gap), 100, 5)
361

  
362
                    x_position = x_position + text.getH() + gap
363
                    if y_max < text.getW():
364
                        y_max = text.getW()
365

  
366
            indices = np.where(_canvas == [0])
367
            minx, maxx = min(indices[1]), max(indices[1])
368
            miny, maxy = min(indices[0]), max(indices[0])
369

  
370
            _canvas = _canvas[0:maxy + gap, 0:]
371

  
372
            cv2.imwrite(os.path.join(app_doc_data.getCurrentProject().getTempPath(), "request_img.png"), _canvas)
373

  
374
            app_web_service = AppWebService()
375
            img_infos = app_web_service.request_ocr_gcloud(_canvas)
376
            print(img_infos)
377

  
378
        except Exception as ex:
379
            message = 'error occurred({}) in {}:{}'.format(repr(ex), sys.exc_info()[-1].tb_frame.f_code.co_filename,
380
                                                           sys.exc_info()[-1].tb_lineno)
381
            if worker is not None:
382
                worker.displayLog.emit(MessageType.Error, message)
282 383

  
283 384
    def get_text_box_using_craft(self, imgGray, offset_x, offset_y, web=False):
284 385
        """ get text box by using craft """
......
683 784
            self.textInfoList = []
684 785

  
685 786
            app_doc_data = AppDocData.instance()
686
            project = app_doc_data.getCurrentProject()
787
            configs = app_doc_data.getConfigs('Engine', 'Text Area')
687 788

  
688 789
            if len(tInfoList):
689
                text_info_array = np.array_split(tInfoList, App.THREAD_MAX_WORKER
690
                    if len(tInfoList) > App.THREAD_MAX_WORKER else len(tInfoList))
691
                with futures.ThreadPoolExecutor(max_workers=App.THREAD_MAX_WORKER) as pool:
692
                    future_text = {pool.submit(TextDetector.recognizeTextFromImage, tInfo, imgSrc, offset,
693
                                           searchedSymbolList, worker, listWidget, maxProgressValue):
694
                                   tInfo for tInfo in text_info_array}
695

  
696
                    for future in futures.as_completed(future_text):
697
                        try:
698
                            data = future.result()
699
                            if data:
700
                                self.textInfoList.extend(data)
701
                        except Exception as ex:
702
                            message = 'error occurred({}) in {}:{}'.format(repr(ex), sys.exc_info()[-1].tb_frame.f_code.co_filename,
703
                                                                       sys.exc_info()[-1].tb_lineno)
704
                            if worker:
705
                                worker.displayLog.emit(MessageType.Error, message)
790
                if (configs and int(configs[0].value) is 1) or not configs:
791
                    text_info_array = np.array_split(tInfoList, App.THREAD_MAX_WORKER
792
                        if len(tInfoList) > App.THREAD_MAX_WORKER else len(tInfoList))
793
                    with futures.ThreadPoolExecutor(max_workers=App.THREAD_MAX_WORKER) as pool:
794
                        future_text = {pool.submit(TextDetector.recognizeTextFromImage, tInfo, imgSrc, offset,
795
                                            searchedSymbolList, worker, listWidget, maxProgressValue):
796
                                    tInfo for tInfo in text_info_array}
797

  
798
                        for future in futures.as_completed(future_text):
799
                            try:
800
                                data = future.result()
801
                                if data:
802
                                    self.textInfoList.extend(data)
803
                            except Exception as ex:
804
                                message = 'error occurred({}) in {}:{}'.format(repr(ex), sys.exc_info()[-1].tb_frame.f_code.co_filename,
805
                                                                        sys.exc_info()[-1].tb_lineno)
806
                                if worker:
807
                                    worker.displayLog.emit(MessageType.Error, message)
808
                else:
809
                    self.get_text_using_gcloud(imgSrc, tInfoList, offset, searchedSymbolList, worker)
706 810

  
707 811
            if onlyTextArea:
708 812
                return
DTI_PID/WebServer/CRAFT_pytorch_master/text_craft.py
112 112

  
113 113
    return boxes_list
114 114

  
115
def get_gcloud_ocr(img):
116
    return ['test']
117

  
115 118
def get_text_box(img, img_path=None, score_path=None, trained_model=None):
116 119
    if img.shape[0] == 2: img = img[0]
117 120
    if len(img.shape) == 2 : img = cv2.cvtColor(img, cv2.COLOR_GRAY2RGB)
DTI_PID/WebServer/app/api/controllers/RecognitionController.py
37 37
        args = text_recognition_parser.parse_args()
38 38
        img_file = request.files['img_file']
39 39

  
40
@api.route('/ocr')
41
@api.expect(text_recognition_parser)
42
class GcloudTextRecognition(Resource):
43
    def post(self):
44
        args = text_recognition_parser.parse_args()
45
        img_file = request.files['img_file']
46

  
40 47

  
41 48
@api.route('/stream_text_box')
42 49
@api.expect(text_recognition_parser)
DTI_PID/WebServer/app/recognition/index.py
78 78
            os.path.realpath(__file__)) + '\\CRAFT_pytorch_master\\weights\\craft_mlt_25k.pth')
79 79

  
80 80
        return jsonify({'text_box': boxes})
81
    
82
@recognition_service.route('/ocr', methods=['POST'])
83
def gcloud_ocr():
84
    if request.method == 'POST':
85
        r = request
86
        nparr = np.fromstring(r.data, np.uint8)
87

  
88
        img = cv2.imdecode(nparr, cv2.IMREAD_COLOR)
89
        # img = img.reshape(1, -1)
90

  
91
        boxes = text_craft.get_gcloud_ocr(img)
92

  
93
        return jsonify({'text_box': boxes})
81 94

  
82 95

  
83 96
@recognition_service.route('/stream_text_box', methods=['POST'])

내보내기 Unified diff

클립보드 이미지 추가 (최대 크기: 500 MB)