개정판 ba0bca12
revised how to detect flow mark
DTI_PID/DTI_PID/Shapes/QEngineeringInstrumentItem.py | ||
---|---|---|
109 | 109 |
if rect.contains(attr.center()): |
110 | 110 |
self.attrs.append(attr) |
111 | 111 |
|
112 |
sorted(self.attrs, key=lambda attr: attr.loc[1]) # sort by y coordinate |
|
112 |
self.attrs = sorted(self.attrs, key=lambda attr: attr.loc[1]) # sort by y coordinate
|
|
113 | 113 |
attrs = [attr.text() for attr in self.attrs] # will be used in eval function's parameter |
114 | 114 |
|
115 | 115 |
docData = AppDocData.instance() |
DTI_PID/DTI_PID/Shapes/QEngineeringLineItem.py | ||
---|---|---|
311 | 311 |
''' |
312 | 312 |
def addFlowArrow(self): |
313 | 313 |
from QEngineeringFlowArrowItem import QEngineeringFlowArrowItem |
314 |
#from AppDocData import AppDocData
|
|
315 |
#import numpy as np
|
|
316 |
#import cv2
|
|
317 |
#import math
|
|
318 |
#import sys
|
|
314 |
from AppDocData import AppDocData |
|
315 |
import numpy as np |
|
316 |
import cv2 |
|
317 |
import math |
|
318 |
import sys |
|
319 | 319 |
|
320 |
#try: |
|
321 |
|
|
322 |
# rect = self.boundingRect() |
|
323 |
# adjustRect = None |
|
324 |
# adjustValue = 10 |
|
325 |
# if self.isVertical(): |
|
326 |
# adjustRect = rect.adjusted(-adjustValue, 0, adjustValue, 0) |
|
327 |
# else: |
|
328 |
# adjustRect = rect.adjusted(0, -adjustValue, 0, adjustValue) |
|
329 |
# img = np.array(AppDocData.instance().getCurrentPidSource().getPyImageOnRect(adjustRect)) |
|
330 |
# imgGray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) |
|
331 |
# _ret, thresh = cv2.threshold(imgGray, 127, 255, cv2.THRESH_BINARY) |
|
332 |
# img2, contours, hierarchy = cv2.findContours(thresh, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE) |
|
333 |
# cnt = contours |
|
334 |
# cv2.drawContours(img, cnt, 0, (0, 0, 255), 3) |
|
335 |
# cv2.imshow('img', img) |
|
336 |
# cv2.waitKey(0) |
|
337 |
# cv2.destroyAllWindows() |
|
338 |
|
|
339 |
# edges = cv2.Canny(imgGray, 0, 10, apertureSize = 3) |
|
340 |
# #lines = cv2.HoughLines(edges, 1, 90*np.pi/180, 10) |
|
341 |
# #lines = cv2.HoughLinesP(edges, 1, np.pi/180, 10, maxLineGap = 0) |
|
320 |
try: |
|
321 |
rect = self.boundingRect() |
|
322 |
adjustRect = None |
|
323 |
adjustValue = 10 |
|
324 |
if self.isVertical(): |
|
325 |
adjustRect = rect.adjusted(-adjustValue, 0, adjustValue, 0) |
|
326 |
else: |
|
327 |
adjustRect = rect.adjusted(0, -adjustValue, 0, adjustValue) |
|
328 |
img = np.array(AppDocData.instance().getCurrentPidSource().getPyImageOnRect(adjustRect)) |
|
329 |
imgGray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) |
|
330 |
|
|
331 |
edges = cv2.Canny(imgGray, 50, 150, apertureSize=3) |
|
332 |
lines = cv2.HoughLinesP(edges, 1, np.pi/180, 10, minLineLength=10, maxLineGap=5) |
|
342 | 333 |
|
343 | 334 |
# ####### HoughLinesP |
344 |
# #if lines is not None: |
|
345 |
# # for line in lines: |
|
346 |
# # for x1, y1, x2, y2 in line: |
|
347 |
# # if math.fabs(y2-y1) <= 1: |
|
348 |
# # y1 = y2 |
|
349 |
# # if math.fabs(x2-x1) <= 1: |
|
350 |
# # x1 = x2 |
|
351 |
# # #if (self.isVertical() and y1 == y2) or (self.isHorizontal() and x1 == x2): |
|
352 |
# # # cv2.line(img,(x1,y1),(x2,y2),(0,0,255),2) |
|
353 |
# # cv2.line(img,(x1,y1),(x2,y2),(0,0,255),2) |
|
354 |
|
|
355 |
# #if lines is not None: |
|
356 |
# # for i in range(len(lines)): |
|
357 |
# # for rho, theta in lines[i]: |
|
358 |
# # a = np.cos(theta) |
|
359 |
# # b = np.sin(theta) |
|
360 |
# # x0 = a*rho |
|
361 |
# # y0 = b*rho |
|
362 |
# # x1 = int(x0 + 1000 * (-b)) |
|
363 |
# # y1 = int(y0 + 1000 * (a)) |
|
364 |
# # x2 = int(x0 - 1000 * (-b)) |
|
365 |
# # y2 = int(y0 - 1000 * (a)) |
|
366 |
|
|
367 |
# # if math.fabs(y2-y1) <= 1: |
|
368 |
# # y1 = y2 |
|
369 |
# # if math.fabs(x2-x1) <= 1: |
|
370 |
# # x1 = x2 |
|
371 |
# # #degree = np.rad2deg(np.arctan2(y2 - y1, x2 - x1)) % 90 |
|
372 |
# # #if (degree >= 10 and degree <= 45) or (degree >= 135 and degree <= 170): |
|
373 |
# # # cv2.line(img,(x1,y1),(x2,y2),(0,0,255),2) |
|
374 |
# # if (self.isVertical() and y1 == y2) or (self.isHorizontal() and x1 == x2): |
|
375 |
# # cv2.line(img,(x1,y1),(x2,y2),(0,0,255),2) |
|
376 |
# # #cv2.line(img,(x1,y1),(x2,y2),(0,0,255),2) |
|
377 |
# #cv2.imshow('img', img) |
|
378 |
# #cv2.waitKey(0) |
|
379 |
# #cv2.destroyAllWindows() |
|
380 |
#except Exception as ex: |
|
381 |
# print('error occured({}) in {}:{}'.format(ex, sys.exc_info()[-1].tb_frame.f_code.co_filename, sys.exc_info()[-1].tb_lineno)) |
|
335 |
if lines is not None: |
|
336 |
for line in lines: |
|
337 |
for x1, y1, x2, y2 in line: |
|
338 |
dx = math.fabs(x2 - x1) |
|
339 |
dy = math.fabs(y2 - y1) |
|
340 |
length = math.sqrt(dx*dx + dy*dy) |
|
341 |
dx /= length |
|
342 |
dy /= length |
|
343 |
if (self.isVertical() and (dx < 0.001 or math.fabs(dx - 1) < 0.001)) or (self.isHorizontal() and (dx < 0.001 or math.fabs(dx - 1) < 0.001)): continue |
|
344 |
cv2.line(img,(x1,y1),(x2,y2),(0,0,255),2) |
|
345 |
|
|
346 |
cv2.imshow('img', img) |
|
347 |
cv2.waitKey(0) |
|
348 |
cv2.destroyAllWindows() |
|
349 |
except Exception as ex: |
|
350 |
print('error occured({}) in {}:{}'.format(ex, sys.exc_info()[-1].tb_frame.f_code.co_filename, sys.exc_info()[-1].tb_lineno)) |
|
382 | 351 |
|
383 | 352 |
startPt = self.startPoint() |
384 | 353 |
endPt = self.endPoint() |
내보내기 Unified diff