개정판 d8e1a15d
issue #1366: speed test and license off
Change-Id: Idd6470cde55d6e7ce4ea1e70d8afcc7dca0772d8
DTI_PID/DTI_PID/App.py | ||
---|---|---|
127 | 127 |
app._excepthook = sys.excepthook |
128 | 128 |
sys.excepthook = app.exception_handler.handler |
129 | 129 |
|
130 |
if QLicenseDialog.check_license_key(): |
|
130 |
if True:#QLicenseDialog.check_license_key():
|
|
131 | 131 |
dlg = Ui_Dialog() |
132 | 132 |
|
133 | 133 |
selectedProject = dlg.showDialog() |
DTI_PID/WebServer/CRAFT_pytorch_master/text_craft.py | ||
---|---|---|
97 | 97 |
|
98 | 98 |
return boxes, polys, ret_score_text |
99 | 99 |
|
100 |
''' |
|
100 | 101 |
def get_text_box_batch(infos): |
101 | 102 |
boxes_list = [] |
102 | 103 |
for info in infos: |
103 | 104 |
boxes_list.append(get_text_box(info[0], info[1], info[2], info[3])) |
104 | 105 |
|
105 | 106 |
return boxes_list |
107 |
''' |
|
108 |
|
|
109 |
def get_text_box_batch(infos): |
|
110 |
boxes_list = get_text_box(infos[3], infos[0], infos[1], infos[2]) |
|
111 |
|
|
112 |
return boxes_list |
|
106 | 113 |
|
107 | 114 |
def get_text_box(img, img_path=None, score_path=None, trained_model=None): |
108 | 115 |
if img.shape[0] == 2: img = img[0] |
... | ... | |
189 | 196 |
|
190 | 197 |
return box_craft |
191 | 198 |
|
199 |
def get_text_box_stream(imgs, img_path=None, score_path=None, trained_model=None): |
|
200 |
parser = argparse.ArgumentParser(description='CRAFT Text Detection') |
|
201 |
parser.add_argument('--trained_model', default=trained_model, type=str, help='pretrained model') |
|
202 |
parser.add_argument('--text_threshold', default=0.7, type=float, help='text confidence threshold') |
|
203 |
parser.add_argument('--low_text', default=0.4, type=float, help='text low-bound score') |
|
204 |
parser.add_argument('--link_threshold', default=0.4, type=float, help='link confidence threshold') |
|
205 |
parser.add_argument('--cuda', default=True, type=str2bool, help='Use cuda for inference') |
|
206 |
parser.add_argument('--canvas_size', default=1000, type=int, help='image size for inference') |
|
207 |
parser.add_argument('--mag_ratio', default=1, type=float, help='image magnification ratio') |
|
208 |
parser.add_argument('--poly', default=False, action='store_true', help='enable polygon type') |
|
209 |
parser.add_argument('--show_time', default=False, action='store_true', help='show processing time') |
|
210 |
parser.add_argument('--test_folder', default='E:\Projects\DTIPID_GERRIT\DTI_PID\DTI_PID\CRAFT_pytorch_master/image/', type=str, help='folder path to input images') |
|
211 |
parser.add_argument('--refine', default=False, action='store_true', help='enable link refiner') |
|
212 |
parser.add_argument('--refiner_model', default='E:\Projects\DTIPID_GERRIT\DTI_PID\DTI_PID\CRAFT_pytorch_master/weights/craft_refiner_CTW1500.pth', type=str, help='pretrained refiner model') |
|
213 |
|
|
214 |
args = parser.parse_args() |
|
215 |
|
|
216 |
# load net |
|
217 |
net = CRAFT() # initialize |
|
218 |
|
|
219 |
print('Loading weights from checkpoint (' + args.trained_model + ')') |
|
220 |
if args.cuda: |
|
221 |
net.load_state_dict(copyStateDict(torch.load(args.trained_model))) |
|
222 |
else: |
|
223 |
net.load_state_dict(copyStateDict(torch.load(args.trained_model, map_location='cpu'))) |
|
224 |
|
|
225 |
if args.cuda: |
|
226 |
net = net.cuda() |
|
227 |
net = torch.nn.DataParallel(net) |
|
228 |
cudnn.benchmark = False |
|
229 |
|
|
230 |
net.eval() |
|
231 |
|
|
232 |
# LinkRefiner |
|
233 |
refine_net = None |
|
234 |
if args.refine: |
|
235 |
from refinenet import RefineNet |
|
236 |
refine_net = RefineNet() |
|
237 |
print('Loading weights of refiner from checkpoint (' + args.refiner_model + ')') |
|
238 |
if args.cuda: |
|
239 |
refine_net.load_state_dict(copyStateDict(torch.load(args.refiner_model))) |
|
240 |
refine_net = refine_net.cuda() |
|
241 |
refine_net = torch.nn.DataParallel(refine_net) |
|
242 |
else: |
|
243 |
refine_net.load_state_dict(copyStateDict(torch.load(args.refiner_model, map_location='cpu'))) |
|
244 |
|
|
245 |
refine_net.eval() |
|
246 |
args.poly = True |
|
247 |
|
|
248 |
box_craft_list = [] |
|
249 |
for img in imgs: |
|
250 |
if img.shape[0] == 2: img = img[0] |
|
251 |
if len(img.shape) == 2 : img = cv2.cvtColor(img, cv2.COLOR_GRAY2RGB) |
|
252 |
if img.shape[2] == 4: img = img[:,:,:3] |
|
253 |
image = np.array(img) |
|
254 |
|
|
255 |
t = time.time() |
|
256 |
|
|
257 |
bboxes, polys, score_text = test_net(net, image, args.text_threshold, args.link_threshold, args.low_text, args.cuda, args.poly, refine_net, args) |
|
258 |
|
|
259 |
# save score text |
|
260 |
if score_path: |
|
261 |
cv2.imwrite(score_path, score_text) |
|
262 |
|
|
263 |
box_craft = [] |
|
264 |
for i, box in enumerate(polys): |
|
265 |
poly = np.array(box).astype(np.int32).reshape((-1)) |
|
266 |
box_craft.append([int(p) for p in poly]) |
|
267 |
|
|
268 |
#file_utils.saveResult(image_path, image[:,:,::-1], polys, dirname=result_folder) |
|
269 |
img = np.array(image[:,:,::-1]) |
|
270 |
|
|
271 |
# save result image |
|
272 |
for i, box in enumerate(polys): |
|
273 |
poly = np.array(box).astype(np.int32).reshape((-1)) |
|
274 |
|
|
275 |
poly = poly.reshape(-1, 2) |
|
276 |
cv2.polylines(img, [poly.reshape((-1, 1, 2))], True, color=(0, 0, 255), thickness=2) |
|
277 |
|
|
278 |
if img_path: |
|
279 |
cv2.imwrite(img_path, img) |
|
280 |
|
|
281 |
print("elapsed time : {}s".format(time.time() - t)) |
|
282 |
|
|
283 |
box_craft_list.append(box_craft) |
|
284 |
|
|
285 |
return box_craft_list |
|
286 |
|
|
192 | 287 |
|
193 | 288 |
if __name__ == '__main__': |
194 | 289 |
parser = argparse.ArgumentParser(description='CRAFT Text Detection') |
DTI_PID/WebServer/app.py | ||
---|---|---|
53 | 53 |
imgs.append(img) |
54 | 54 |
|
55 | 55 |
boxes_list = [] |
56 |
''' |
|
56 | 57 |
for img in imgs: |
58 |
# faster |
|
59 |
#boxes = streamer.predict([[img, None, None, os.path.dirname(os.path.realpath(__file__)) + '\\CRAFT_pytorch_master\\weights\\craft_ic15_20k.pth']]) |
|
60 |
|
|
61 |
# More accurate |
|
57 | 62 |
boxes = streamer.predict([[img, None, None, os.path.dirname(os.path.realpath(__file__)) + '\\CRAFT_pytorch_master\\weights\\craft_mlt_25k.pth']]) |
58 | 63 |
boxes_list.append(boxes[0]) |
64 |
''' |
|
65 |
|
|
66 |
''' |
|
67 |
infos = [] |
|
68 |
for img in imgs: |
|
69 |
infos.append([img, None, None, os.path.dirname(os.path.realpath(__file__)) + '\\CRAFT_pytorch_master\\weights\\craft_mlt_25k.pth']) |
|
70 |
boxes = streamer.predict(infos) |
|
71 |
boxes_list = boxes |
|
72 |
''' |
|
73 |
|
|
74 |
infos = [None, None, os.path.dirname(os.path.realpath(__file__)) + '\\CRAFT_pytorch_master\\weights\\craft_mlt_25k.pth', imgs] |
|
75 |
boxes = streamer.predict(infos) |
|
76 |
boxes_list = boxes |
|
77 |
|
|
59 | 78 |
return jsonify({'text_box_list': boxes_list}) |
60 | 79 |
|
61 | 80 |
if __name__ == '__main__': |
내보내기 Unified diff