개정판 0208d20d
build issue #000:
- fixed some codes in Perth
DTI_PID/DTI_PID/RecognitionDialog.py | ||
---|---|---|
322 | 322 |
maxProgressValue = len(textAreas) + 1 |
323 | 323 |
|
324 | 324 |
worker.displayTitle.emit('텍스트 인식 중...') |
325 |
textDetector.recognizeText(appDocData.imgSrc, (0,0), textAreas, searchedSymbolList, worker, listWidget, maxProgressValue) |
|
326 |
#textDetector.recognizeText(appDocData.imgSrc, offset, textAreas, searchedSymbolList, worker, listWidget, maxProgressValue)
|
|
325 |
#textDetector.recognizeText(appDocData.imgSrc, (0,0), textAreas, searchedSymbolList, worker, listWidget, maxProgressValue)
|
|
326 |
textDetector.recognizeText(appDocData.imgSrc, offset, textAreas, searchedSymbolList, worker, listWidget, maxProgressValue) |
|
327 | 327 |
textInfoList = textDetector.textInfoList.copy() if textDetector.textInfoList is not None else None |
328 | 328 |
otherTextInfoList = textDetector.otherTextInfoList.copy() if textDetector.otherTextInfoList is not None else None |
329 | 329 |
titleBlockTextInfoList = textDetector.titleBlockTextInfoList.copy() if textDetector.titleBlockTextInfoList is not None else None |
DTI_PID/DTI_PID/Shapes/EngineeringNoteItem.py | ||
---|---|---|
57 | 57 |
|
58 | 58 |
# Get NoteArea |
59 | 59 |
notesArea = AppDocData.instance().getArea('Note') |
60 |
if not notesArea: return res |
|
60 | 61 |
# Get all note contents |
61 | 62 |
items = self.scene().items(QRectF(notesArea.x, notesArea.y, notesArea.x + notesArea.width, notesArea.y + notesArea.height)) |
62 | 63 |
items = [item for item in items if type(item) is QEngineeringTextItem] # Filtering QEngineeringTextItem |
DTI_PID/DTI_PID/TextDetector.py | ||
---|---|---|
58 | 58 |
|
59 | 59 |
configs = appDocData.getConfigs('Text Size', 'Max Text Size') |
60 | 60 |
maxTextSize = int(configs[0].value) if 1 == len(configs) else 100 |
61 |
minSize = 10
|
|
61 |
minSize = 5
|
|
62 | 62 |
|
63 | 63 |
contourImg = np.ones(imgGray.shape, np.uint8) * 255 |
64 | 64 |
binaryImg,mask = cv2.threshold(imgGray, 0, 255, cv2.THRESH_BINARY+cv2.THRESH_OTSU) |
... | ... | |
69 | 69 |
[x, y, w, h] = cv2.boundingRect(contour) |
70 | 70 |
area = cv2.contourArea(contour, True) |
71 | 71 |
|
72 |
# skip one which size is greater than max size or less then minimum size |
|
72 | 73 |
if area >= 0: |
73 |
if (w > maxTextSize and h > maxTextSize) or (w <= minSize and h > maxTextSize) or\ |
|
74 |
(w > maxTextSize and h <= minSize) or (w <= minSize and h <= minSize): continue |
|
74 |
if (w > maxTextSize or h > maxTextSize) or (w <= minSize and h <= minSize): continue |
|
75 | 75 |
|
76 | 76 |
if area >= 0: |
77 | 77 |
cv2.drawContours(contourImg, [contour], -1, (0,0,0), -1) |
... | ... | |
156 | 156 |
intersected = True |
157 | 157 |
while intersected: |
158 | 158 |
intersected = False |
159 |
for rect in rects[:]: |
|
159 |
for rect in rects[:]: # clone rects
|
|
160 | 160 |
if 0 == rect[0]: |
161 | 161 |
rectExpand = rect[1].adjusted(-mergeSize, 0, mergeSize, 0) |
162 | 162 |
else: |
... | ... | |
209 | 209 |
y = tInfo.getY() - round(offset[1]) |
210 | 210 |
img = imgOCR[y:y+tInfo.getH(), x:x+tInfo.getW()] |
211 | 211 |
|
212 |
cv2.imwrite('c:\\temp\\ocr.png', img) |
|
213 |
|
|
212 | 214 |
# set angle 0 if symbol contains the text area is instrumentation |
213 | 215 |
category = None |
214 | 216 |
contains = [symbol for symbol in searchedSymbolList if symbol.contains(tInfo)] |
... | ... | |
280 | 282 |
|
281 | 283 |
pool = futures.ThreadPoolExecutor(max_workers = THREAD_MAX_WORKER) |
282 | 284 |
for tInfo in tInfoList: |
283 |
future = pool.submit(TextDetector.recognizeTextFromImage, tInfo, imgSrc, offset, searchedSymbolList, worker, listWidget, maxProgressValue)
|
|
285 |
future = pool.submit(TextDetector.recognizeTextFromImage, tInfo, imgOCR, offset, searchedSymbolList, worker, listWidget, maxProgressValue)
|
|
284 | 286 |
data = future.result() |
285 | 287 |
if data: self.textInfoList.extend(data) |
286 | 288 |
pool.shutdown(wait = True) |
DTI_PID/DTI_PID/TrainingImageListDialog.py | ||
---|---|---|
323 | 323 |
if index >= len(boxes): break |
324 | 324 |
loc = [i*(grid_size[0]), j*(grid_size[1])] |
325 | 325 |
box_image = images[boxes[index][5]].crop((boxes[index][1],images[boxes[index][5]].height - (boxes[index][2] + boxes[index][4]), boxes[index][1] + boxes[index][3],images[boxes[index][5]].height - boxes[index][2])) |
326 |
""" |
|
327 | 326 |
if boxes[index][0] == '"' or boxes[index][0] == '\'': |
328 |
train_image.paste(box_image, (loc[0] + space, loc[1] + space, loc[0] + space + box_image.width, loc[1] + space + box_image.height)) |
|
329 |
out_boxes.append([boxes[index][0], |
|
330 |
str(loc[0] + space), str(train_image.height - loc[1] - space - box_image.height), |
|
331 |
str(loc[0] + space + box_image.width), str(train_image.height - loc[1] - space)]) |
|
327 |
x = loc[0] + int((grid_size[0] - box_image.width)*0.5)#space |
|
328 |
y = loc[1] + space |
|
329 |
elif boxes[index][0] == ',': |
|
330 |
x = loc[0] + int((grid_size[0] - box_image.width)*0.5)#space |
|
331 |
y = loc[1] + grid_size[1] - box_image.height - space |
|
332 | 332 |
else: |
333 |
"""
|
|
334 |
x = loc[0] + int((grid_size[0] - box_image.width)*0.5)#space
|
|
335 |
y = loc[1] + int((grid_size[1] - box_image.height)*0.5)#grid_size[1] - (space + box_image.height) |
|
333 |
x = loc[0] + int((grid_size[0] - box_image.width)*0.5)#space
|
|
334 |
y = loc[1] + int((grid_size[1] - box_image.height)*0.5)#grid_size[1] - (space + box_image.height)
|
|
335 |
|
|
336 | 336 |
# //remove noise |
337 | 337 |
''' |
338 | 338 |
if self.isNoisable(boxes[index][0]): |
내보내기 Unified diff