개정판 2e744c88
issue #700: skip ip check
Change-Id: Ib7d9ebe4fd8b5adafe832c94e92b723234aa626a
DTI_PID/DTI_PID/AppWebService.py | ||
---|---|---|
53 | 53 |
if not self.test_connection(): |
54 | 54 |
return [] |
55 | 55 | |
56 |
symbol_box = '/symbol_box' |
|
56 |
symbol_box = '/recognition/symbol_box'
|
|
57 | 57 | |
58 | 58 |
_, bts = cv2.imencode('.png', img) |
59 | 59 |
bts = bts.tostring() |
60 | 60 | |
61 | 61 |
response = requests.post(self._url + symbol_box, data=bts) |
62 | 62 | |
63 |
return response.json()['symbol_box'] |
|
63 |
return response.json()['symbol_box'] #
|
|
64 | 64 |
except Exception as ex: |
65 | 65 |
from App import App |
66 | 66 |
from AppDocData import MessageType |
... | ... | |
75 | 75 |
if not self.test_connection(): |
76 | 76 |
return [] |
77 | 77 | |
78 |
text_box = '/text_box' |
|
78 |
text_box = '/recognition/text_box'
|
|
79 | 79 | |
80 | 80 |
_, bts = cv2.imencode('.png', img) |
81 | 81 |
bts = bts.tostring() |
... | ... | |
97 | 97 |
if not self.test_connection(): |
98 | 98 |
return [] |
99 | 99 | |
100 |
text_box = '/stream_text_box' |
|
100 |
text_box = '/recognition/stream_text_box'
|
|
101 | 101 | |
102 | 102 |
imgs = [info[4] for info in img_infos] |
103 | 103 |
str_imgs = [] |
DTI_PID/DTI_PID/License.py | ||
---|---|---|
53 | 53 |
n = n + 1 |
54 | 54 | |
55 | 55 |
if n >= 2: |
56 |
return True |
|
57 |
''' |
|
56 | 58 |
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) |
57 | 59 |
s.connect(("8.8.8.8", 80)) |
58 | 60 |
ip = s.getsockname()[0] |
59 | 61 |
if ipaddress.ip_address(ip) in ipaddress.ip_network('66.85.31.0/255'): |
60 | 62 |
return True |
63 |
''' |
|
64 |
msg = QMessageBox() |
|
65 |
msg.setIcon(QMessageBox.Critical) |
|
66 |
msg.setText('Process check fail') |
|
67 |
msg.setWindowTitle('License') |
|
68 |
msg.setStandardButtons(QMessageBox.Ok) |
|
69 |
msg.exec_() |
|
61 | 70 | |
62 | 71 |
return False |
63 | 72 | |
64 | 73 |
@staticmethod |
65 |
def check_license_key(): |
|
74 |
def check_license_key(recursive=False):
|
|
66 | 75 |
""" check license key with computer name """ |
67 | 76 |
from AppDocData import AppDocData |
68 | 77 |
from datetime import datetime, timedelta |
... | ... | |
109 | 118 |
decoded = QLicenseDialog.decode(QLicenseDialog.KEY, configs[0].value) |
110 | 119 |
if decoded.upper() == os.environ['COMPUTERNAME'].upper(): return True |
111 | 120 |
''' |
112 | ||
113 |
dialog = QLicenseDialog(None) |
|
114 |
dialog.exec_() |
|
115 |
if dialog.isAccepted: return True |
|
121 |
if not recursive: |
|
122 |
dialog = QLicenseDialog(None)
|
|
123 |
dialog.exec_()
|
|
124 |
if dialog.isAccepted: return True
|
|
116 | 125 |
except Exception as ex: |
117 | 126 |
message = 'error occurred({}) in {}:{}'.format(ex, sys.exc_info()[-1].tb_frame.f_code.co_filename, |
118 | 127 |
sys.exc_info()[-1].tb_lineno) |
... | ... | |
163 | 172 |
self.deleteAppConfigs('app', 'mode') |
164 | 173 |
# up to here |
165 | 174 | |
166 |
if QLicenseDialog.check_license_key(): |
|
175 |
if QLicenseDialog.check_license_key(recursive=True):
|
|
167 | 176 |
self.isAccepted = True |
168 | 177 | |
169 | 178 |
QDialog.accept(self) |
DTI_PID/DTI_PID/RecognitionDialog.py | ||
---|---|---|
2092 | 2092 |
return tempChildInfo |
2093 | 2093 | |
2094 | 2094 |
@staticmethod |
2095 |
def calculate_exact_position(img, symImg, symbol, rect): |
|
2096 | ||
2097 | ||
2098 |
@staticmethod |
|
2095 | 2099 |
def detect_symbol_using_server(targetSymbols, listWidget, updateProgressSignal): |
2096 | 2100 |
from AppWebService import AppWebService |
2101 |
import copy |
|
2097 | 2102 |
|
2098 | 2103 |
res = [] |
2099 | 2104 | |
... | ... | |
2103 | 2108 |
app_web_service = AppWebService() |
2104 | 2109 |
symbols = app_web_service.request_symbol_box(area.img) |
2105 | 2110 | |
2106 |
for symbol in symbols: |
|
2107 |
symbol[1] = symbol[1] + area.x |
|
2108 |
symbol[2] = symbol[2] + area.y |
|
2109 | ||
2110 |
for targetSymbol in targetSymbol[2]: |
|
2111 |
for targetSymbol in targetSymbols[2]: |
|
2111 | 2112 |
symbolName = targetSymbol.getName() |
2112 | 2113 |
symbolType = targetSymbol.getType() |
2113 | 2114 |
symbolPath = targetSymbol.getPath() |
... | ... | |
2148 | 2149 |
symGrayOri = copy.copy(symGray) |
2149 | 2150 |
sow, soh = symGray.shape[::-1] # symbol original w, h |
2150 | 2151 | |
2152 |
symbolRotatedAngle = 0 |
|
2151 | 2153 |
sw, sh = symGray.shape[::-1] |
2152 |
''' |
|
2153 |
roiw = (roiItemEp[0] - roiItemSp[0]) |
|
2154 |
roih = (roiItemEp[1] - roiItemSp[1]) |
|
2155 | 2154 | |
2156 |
# Case : symbol is bigger than roi |
|
2157 |
if roiw < sw or roih < sh: |
|
2158 |
symGray = cv2.rotate(symGray, cv2.ROTATE_90_CLOCKWISE) |
|
2159 |
symbolRotatedAngle = (symbolRotatedAngle + 90) % 360 |
|
2160 | ||
2161 |
if baseSymbol is not None and additionalSymbol is not None: |
|
2162 |
additionalSymbol = Worker.getRotatedChildInfo(additionalSymbol) |
|
2163 |
continue |
|
2164 | ||
2165 |
# get Rotated Original Point |
|
2166 |
originalPoint = Worker.getCalculatedOriginalPoint(additionalSymbol, symbolOriginalPoint, |
|
2167 |
symbolRotatedAngle, sw, sh, sow, soh, flipped) |
|
2168 |
connectionPoint = Worker.getCalculatedConnectionPoint(symbolConnectionPoint, symbolRotatedAngle, sw, |
|
2169 |
sh, sow, soh, flipped) |
|
2170 |
''' |
|
2155 |
# get Rotated Original Point |
|
2156 |
originalPoint = Worker.getCalculatedOriginalPoint(additionalSymbol, symbolOriginalPoint, |
|
2157 |
symbolRotatedAngle, sw, sh, sow, soh, 0) |
|
2158 |
connectionPoint = Worker.getCalculatedConnectionPoint(symbolConnectionPoint, symbolRotatedAngle, sw, |
|
2159 |
sh, sow, soh, 0) |
|
2171 | 2160 | |
2172 | 2161 |
for symbol in symbols: |
2173 |
Worker.addSearchedSymbol(symbolName, symbolType, |
|
2174 |
searchedItemSp, sw, sh, symbolThreshold, symbolMinMatchCount, |
|
2175 |
hitRate, symbolRotatedAngle, |
|
2176 |
isDetectOnOrigin, symbolRotateCount, symbolOcrOption, |
|
2177 |
isContainChild, |
|
2178 |
originalPoint, connectionPoint, baseSymbol, additionalSymbol, |
|
2179 |
isExceptDetect, |
|
2180 |
detectFlip=1 if flipped else 0, |
|
2181 |
hasInstrumentLabel=hasInstrumentLabel, text_area=text_area) |
|
2162 |
if symbol[0] == symbolName: |
|
2163 |
searchedItemSp = (symbol[1] + area.x, symbol[2] +area.y) |
|
2164 |
hitRate = symbol[5] |
|
2165 | ||
2166 |
Worker.addSearchedSymbol(symbolName, symbolType, |
|
2167 |
searchedItemSp, sw, sh, symbolThreshold, symbolMinMatchCount, |
|
2168 |
hitRate, symbolRotatedAngle, |
|
2169 |
isDetectOnOrigin, symbolRotateCount, symbolOcrOption, |
|
2170 |
isContainChild, |
|
2171 |
originalPoint, connectionPoint, baseSymbol, additionalSymbol, |
|
2172 |
isExceptDetect, |
|
2173 |
detectFlip=0, |
|
2174 |
hasInstrumentLabel=hasInstrumentLabel, text_area=text_area) |
|
2182 | 2175 | |
2183 | 2176 |
return symbols |
2184 | 2177 |
''' |
DTI_PID/WebServer/app/recognition/index.py | ||
---|---|---|
6 | 6 |
from PIL import Image |
7 | 7 | |
8 | 8 |
# craft |
9 |
sys.path.insert(0, os.path.dirname(os.path.realpath(__file__)) + '\\CRAFT_pytorch_master') |
|
9 |
sys.path.insert(0, os.path.dirname(os.path.realpath(__file__)) + '\\..\\..\\CRAFT_pytorch_master')
|
|
10 | 10 |
# service streamer |
11 |
sys.path.insert(0, os.path.dirname(os.path.realpath(__file__)) + '\\service_streamer_master') |
|
11 |
sys.path.insert(0, os.path.dirname(os.path.realpath(__file__)) + '\\..\\..\\service_streamer_master')
|
|
12 | 12 |
# deep ocr |
13 | 13 |
# sys.path.insert(0, os.path.dirname(os.path.realpath(__file__)) + '\\deep_text_recognition_benchmark_master') |
14 | 14 |
# symbol |
15 |
sys.path.insert(0, os.path.dirname(os.path.realpath(__file__)) + '\\symbol_recognition') |
|
15 |
sys.path.insert(0, os.path.dirname(os.path.realpath(__file__)) + '\\..\\..\\symbol_recognition')
|
|
16 | 16 | |
17 | 17 |
recognition_service = Blueprint('recognition', __name__, url_prefix='/recognition') |
18 | 18 | |
... | ... | |
55 | 55 |
imgs.append(Image.fromarray(cv2.imdecode(nparr, cv2.IMREAD_COLOR))) |
56 | 56 | |
57 | 57 |
boxes = test_doftech_all_images.get_symbol(imgs, trained_model1=os.path.dirname( |
58 |
os.path.realpath(__file__)) + '\\symbol_recognition\\MODEL\\f_doftech_all_class_only_params.pth', \ |
|
58 |
os.path.realpath(__file__)) + '\\..\\..\\symbol_recognition\\MODEL\\f_doftech_all_class_only_params.pth', \
|
|
59 | 59 |
trained_model2=os.path.dirname(os.path.realpath( |
60 |
__file__)) + '\\symbol_recognition\\MODEL\\doftech_all_class_only_params_opc.pth') |
|
60 |
__file__)) + '\\..\\..\\symbol_recognition\\MODEL\\doftech_all_class_only_params_opc.pth')
|
|
61 | 61 | |
62 | 62 |
return jsonify({'symbol_box': boxes[0]}) |
63 | 63 | |
... | ... | |
109 | 109 |
''' |
110 | 110 | |
111 | 111 |
infos = ['Text Area', [None, None, os.path.dirname( |
112 |
os.path.realpath(__file__)) + '\\CRAFT_pytorch_master\\weights\\craft_mlt_25k.pth', imgs]] |
|
112 |
os.path.realpath(__file__)) + '\\..\\..\\CRAFT_pytorch_master\\weights\\craft_mlt_25k.pth', imgs]]
|
|
113 | 113 |
boxes = streamer.predict(infos) |
114 | 114 |
boxes_list = boxes[1][0] |
115 | 115 | |
116 |
return jsonify({'text_box_list': boxes_list}) |
|
117 | ||
116 |
return jsonify({'text_box_list': boxes_list}) |
내보내기 Unified diff