개정판 a9e478a9
issue #663: line type visual detection test
Change-Id: Ia16e4630c1a17e6776b86afd15f81939461f1d58
DTI_PID/DTI_PID/RecognitionDialog.py | ||
---|---|---|
1317 | 1317 |
|
1318 | 1318 |
# change line type using visual pattern |
1319 | 1319 |
try: |
1320 |
#Worker.changeVisualLineType(app_doc_data.lines)
|
|
1321 |
pass |
|
1320 |
Worker.changeVisualLineType(app_doc_data.lines, worker)
|
|
1321 |
#pass
|
|
1322 | 1322 |
|
1323 | 1323 |
except Exception as ex: |
1324 | 1324 |
message = f"error occurred({repr(ex)}) in {sys.exc_info()[-1].tb_frame.f_code.co_filename}:" \ |
... | ... | |
1352 | 1352 |
pass |
1353 | 1353 |
|
1354 | 1354 |
@staticmethod |
1355 |
def changeVisualLineType(lines): |
|
1355 |
def changeVisualLineType(lines, worker):
|
|
1356 | 1356 |
from LineDetector import LineDetector |
1357 | 1357 |
|
1358 |
app_doc_data = AppDocData.instance() |
|
1359 |
image = app_doc_data.activeDrawing.image_origin |
|
1360 |
imgNot = np.ones(image.shape, np.uint8) * 255 |
|
1361 |
image = cv2.bitwise_xor(image, imgNot) |
|
1362 |
|
|
1363 |
#electric = [137, [1,1,1,1,1,1,1,1,1], sys.maxsize] |
|
1364 |
#software = [187, [0.948,1.081,0.932,1.081,0.932,1.068,0.932,1.081,0.929], sys.maxsize] |
|
1365 |
#line_patterns = {'Electric':electric, 'Software':software } |
|
1366 |
line_patterns = {} |
|
1367 |
line_shapes = {} |
|
1368 |
|
|
1369 |
line_names = app_doc_data.getSymbolListByType('type', 'Line') |
|
1370 |
if len(line_names) != 0: |
|
1371 |
for line_name in line_names: |
|
1372 |
line = line_name |
|
1373 |
line_path = line.getPath() |
|
1374 |
if not os.path.isfile(line_path): |
|
1375 |
continue |
|
1358 |
try: |
|
1359 |
app_doc_data = AppDocData.instance() |
|
1360 |
image = app_doc_data.activeDrawing.image_origin |
|
1361 |
imgNot = np.ones(image.shape, np.uint8) * 255 |
|
1362 |
image = cv2.bitwise_xor(image, imgNot) |
|
1363 |
|
|
1364 |
#electric = [137, [1,1,1,1,1,1,1,1,1], sys.maxsize] |
|
1365 |
#software = [187, [0.948,1.081,0.932,1.081,0.932,1.068,0.932,1.081,0.929], sys.maxsize] |
|
1366 |
#line_patterns = {'Electric':electric, 'Software':software } |
|
1367 |
line_patterns = {} |
|
1368 |
line_shapes = {} |
|
1369 |
|
|
1370 |
line_names = app_doc_data.getSymbolListByType('type', 'Line') |
|
1371 |
if len(line_names) != 0: |
|
1372 |
for line_name in line_names: |
|
1373 |
line = line_name |
|
1374 |
line_path = line.getPath() |
|
1375 |
if not os.path.isfile(line_path): |
|
1376 |
continue |
|
1377 |
|
|
1378 |
line_img = cv2.cvtColor(cv2.imread(line_path), cv2.COLOR_BGR2GRAY) |
|
1379 |
imgNot = np.ones(line_img.shape, np.uint8) * 255 |
|
1380 |
line_img = cv2.bitwise_xor(line_img, imgNot) |
|
1381 |
|
|
1382 |
contours, _ = cv2.findContours(line_img, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE) |
|
1383 |
|
|
1384 |
if len(contours) < 3: |
|
1385 |
max_x, max_y, min_x, min_y = 0, 0, sys.maxsize, sys.maxsize |
|
1386 |
for rect in [cv2.boundingRect(contour) for contour in contours]: |
|
1387 |
if rect[0] < min_x: |
|
1388 |
min_x = rect[0] |
|
1389 |
if rect[0] + rect[2] > max_x: |
|
1390 |
max_x = rect[0] + rect[2] |
|
1391 |
if rect[1] < min_y: |
|
1392 |
min_y = rect[1] |
|
1393 |
if rect[1] + rect[3] > max_y: |
|
1394 |
max_y = rect[1] + rect[3] |
|
1395 |
|
|
1396 |
line_shapes[line.getBaseSymbol()] = [line_img[min_y:max_y, min_x:max_x], sys.maxsize] |
|
1397 |
continue |
|
1398 |
|
|
1399 |
i = 1 if line_img.shape[0] > line_img.shape[1] else 0 |
|
1400 |
boundingBoxes = [cv2.boundingRect(contour) for contour in contours] |
|
1401 |
(contours, boundingBoxes) = zip(*sorted(zip(contours, boundingBoxes), key=lambda b:b[1][i], reverse=False)) |
|
1402 |
|
|
1403 |
avg_area = sum([cv2.contourArea(contour) for contour in contours]) / len(contours) |
|
1404 |
ratio_area = [cv2.contourArea(contour) / avg_area for contour in contours] |
|
1405 |
|
|
1406 |
line_patterns[line.getBaseSymbol()] = [avg_area, ratio_area, sys.maxsize] |
|
1407 |
else: |
|
1408 |
return |
|
1376 | 1409 |
|
1377 |
line_img = cv2.cvtColor(cv2.imread(line_path), cv2.COLOR_BGR2GRAY) |
|
1378 |
imgNot = np.ones(line_img.shape, np.uint8) * 255 |
|
1379 |
line_img = cv2.bitwise_xor(line_img, imgNot) |
|
1410 |
lines_found = [] |
|
1411 |
lines_shape = [] |
|
1380 | 1412 |
|
1381 |
contours, _ = cv2.findContours(line_img, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE) |
|
1413 |
# detemine line type : broken |
|
1414 |
for line in lines: |
|
1415 |
rect = line.boundingRect() |
|
1416 |
image_line = image[round(rect.y() - 3):round(rect.y() + rect.height() + 3), round(rect.x() - 3):round(rect.x() + rect.width() + 3)] |
|
1417 |
contours, _ = cv2.findContours(image_line, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE) |
|
1382 | 1418 |
|
1419 |
# skip piping line |
|
1383 | 1420 |
if len(contours) < 3: |
1384 | 1421 |
max_x, max_y, min_x, min_y = 0, 0, sys.maxsize, sys.maxsize |
1385 |
for rect in [cv2.boundingRect(contours)]:
|
|
1422 |
for rect in [cv2.boundingRect(contour) for contour in contours]:
|
|
1386 | 1423 |
if rect[0] < min_x: |
1387 | 1424 |
min_x = rect[0] |
1388 | 1425 |
if rect[0] + rect[2] > max_x: |
... | ... | |
1392 | 1429 |
if rect[1] + rect[3] > max_y: |
1393 | 1430 |
max_y = rect[1] + rect[3] |
1394 | 1431 |
|
1395 |
line_shapes[line.getBaseSymbol()] = [line_img[min_y:max_y, min_x:max_x], sys.maxsize]
|
|
1432 |
lines_shape.append([line, image_line[min_y:max_y, min_x:max_x]])
|
|
1396 | 1433 |
continue |
1397 | 1434 |
|
1398 |
i = 1 if line_img.shape[0] > line_img.shape[1] else 0 |
|
1435 |
vertical = LineDetector.is_vertical([line.start_point()[0], line.start_point()[1], line.end_point()[0], line.end_point()[1]]) |
|
1436 |
|
|
1437 |
i = 1 if vertical else 0 |
|
1399 | 1438 |
boundingBoxes = [cv2.boundingRect(contour) for contour in contours] |
1400 | 1439 |
(contours, boundingBoxes) = zip(*sorted(zip(contours, boundingBoxes), key=lambda b:b[1][i], reverse=False)) |
1401 | 1440 |
|
1402 | 1441 |
avg_area = sum([cv2.contourArea(contour) for contour in contours]) / len(contours) |
1403 | 1442 |
ratio_area = [cv2.contourArea(contour) / avg_area for contour in contours] |
1404 | 1443 |
|
1405 |
line_patterns[line.getBaseSymbol()] = [avg_area, ratio_area, sys.maxsize] |
|
1406 |
else: |
|
1407 |
return |
|
1408 |
|
|
1409 |
lines_found = [] |
|
1410 |
lines_shape = [] |
|
1411 |
|
|
1412 |
# detemine line type : broken |
|
1413 |
for line in lines: |
|
1414 |
rect = line.boundingRect() |
|
1415 |
image_line = image[round(rect.y()):round(rect.y() + rect.height()), round(rect.x()):round(rect.x() + rect.width())] |
|
1416 |
contours, _ = cv2.findContours(image_line, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE) |
|
1417 |
|
|
1418 |
# skip piping line |
|
1419 |
if len(contours) < 3: |
|
1420 |
max_x, max_y, min_x, min_y = 0, 0, sys.maxsize, sys.maxsize |
|
1421 |
for rect in [cv2.boundingRect(contours)]: |
|
1422 |
if rect[0] < min_x: |
|
1423 |
min_x = rect[0] |
|
1424 |
if rect[0] + rect[2] > max_x: |
|
1425 |
max_x = rect[0] + rect[2] |
|
1426 |
if rect[1] < min_y: |
|
1427 |
min_y = rect[1] |
|
1428 |
if rect[1] + rect[3] > max_y: |
|
1429 |
max_y = rect[1] + rect[3] |
|
1430 |
|
|
1431 |
lines_shape.append([line, image_line[min_y:max_y, min_x:max_x]]) |
|
1432 |
continue |
|
1433 |
|
|
1434 |
vertical = LineDetector.is_vertical([line.start_point()[0], line.start_point()[1], line.end_point()[0], line.end_point()[1]]) |
|
1435 |
|
|
1436 |
i = 1 if vertical else 0 |
|
1437 |
boundingBoxes = [cv2.boundingRect(contour) for contour in contours] |
|
1438 |
(contours, boundingBoxes) = zip(*sorted(zip(contours, boundingBoxes), key=lambda b:b[1][i], reverse=False)) |
|
1439 |
|
|
1440 |
avg_area = sum([cv2.contourArea(contour) for contour in contours]) / len(contours) |
|
1441 |
ratio_area = [cv2.contourArea(contour) / avg_area for contour in contours] |
|
1442 |
|
|
1443 |
for line_type, line_pattern in line_patterns.items(): |
|
1444 |
line_pattern[2] = sys.maxsize |
|
1445 |
ratio_area_cal = [ratio * avg_area / line_pattern[0] for ratio in ratio_area] |
|
1446 |
long_line = ratio_area_cal if len(ratio_area_cal) > len(line_pattern[1]) else line_pattern[1] |
|
1447 |
short_line = line_pattern[1] if len(ratio_area_cal) > len(line_pattern[1]) else ratio_area_cal |
|
1448 |
|
|
1449 |
min_error = sys.maxsize |
|
1450 |
for offset in range(len(long_line) - len(short_line) + 1): |
|
1451 |
error = 0 |
|
1452 |
for index in range(len(short_line)): |
|
1453 |
error += abs(short_line[index] - long_line[index + offset]) |
|
1454 |
error = error / len(short_line) |
|
1455 |
if error < min_error: |
|
1456 |
min_error = error |
|
1457 |
|
|
1458 |
line_pattern[2] = min_error |
|
1459 |
|
|
1460 |
line_type_found = sorted([(line_type, line_pattern[2]) for line_type, line_pattern in line_patterns.items()], key=lambda error:error[1])[0] |
|
1461 |
if line_type_found[1] < 0.4: |
|
1462 |
lines_found.append([line, line_type_found]) |
|
1463 |
|
|
1464 |
''' |
|
1465 |
# feature matching not work |
|
1466 |
orb = cv2.ORB_create() |
|
1467 |
kp1, des1 = orb.detectAndCompute(image_line, None) |
|
1468 |
kp2, des2 = orb.detectAndCompute(image_line, None) |
|
1469 |
|
|
1470 |
bf = cv2.BFMatcher(cv2.NORM_HAMMING, crossCheck=True) |
|
1471 |
matches = bf.match(des1, des2) |
|
1472 |
matches = sorted(matches, key=lambda x: x.distance) |
|
1473 |
|
|
1474 |
good = [] |
|
1475 |
for m, n in matches: |
|
1476 |
if m.distance < 0.75 * n.distance: |
|
1477 |
good.append([m]) |
|
1478 |
|
|
1479 |
sift = cv2.xfeatures2d.SIFT_create() |
|
1480 |
kp1, des1 = sift.detectAndCompute(image_line, None) |
|
1481 |
kp2, des2 = sift.detectAndCompute(image_line, None) |
|
1482 |
|
|
1483 |
bf = cv2.BFMatcher(cv2.NORM_L2, crossCheck=True) |
|
1484 |
matches = bf.match(des1, des2) |
|
1485 |
matches = sorted(matches, key=lambda x: x.distance) |
|
1486 |
|
|
1487 |
good = [] |
|
1488 |
for m, n in matches: |
|
1489 |
if m.distance < 0.75 * n.distance: |
|
1490 |
good.append([m]) |
|
1491 |
''' |
|
1492 |
# determine line type : solid |
|
1493 |
for line, image_line in lines_shape: |
|
1494 |
for line_type, line_shape in line_shapes.items(): |
|
1495 |
shape = line_shape[0].copy() |
|
1496 |
line_shape[1] = sys.maxsize |
|
1497 |
|
|
1498 |
big = shape if max(shape.shape) > max(image_line.shape) else image_line |
|
1499 |
small = image_line if max(shape.shape) > max(image_line.shape) else shape |
|
1500 |
|
|
1501 |
if big.shape[0] == max(max(big.shape), max(small.shape)): |
|
1502 |
mask = np.ones([big.shape[0], max(big.shape[1], max(small.shape))], np.uint8) * 255 |
|
1503 |
else: |
|
1504 |
mask = np.ones([max(big.shape[0], max(small.shape)), big.shape[1]], np.uint8) * 255 |
|
1505 |
|
|
1506 |
mask[0:big.shape[0], 0:big.shape[1]] = big |
|
1444 |
for line_type, line_pattern in line_patterns.items(): |
|
1445 |
line_pattern[2] = sys.maxsize |
|
1446 |
ratio_area_cal = [ratio * avg_area / line_pattern[0] for ratio in ratio_area] |
|
1447 |
long_line = ratio_area_cal if len(ratio_area_cal) > len(line_pattern[1]) else line_pattern[1] |
|
1448 |
short_line = line_pattern[1] if len(ratio_area_cal) > len(line_pattern[1]) else ratio_area_cal |
|
1449 |
|
|
1450 |
min_error = sys.maxsize |
|
1451 |
for offset in range(len(long_line) - len(short_line) + 1): |
|
1452 |
error = 0 |
|
1453 |
for index in range(len(short_line)): |
|
1454 |
error += abs(short_line[index] - long_line[index + offset]) |
|
1455 |
error = error / len(short_line) |
|
1456 |
if error < min_error: |
|
1457 |
min_error = error |
|
1458 |
|
|
1459 |
line_pattern[2] = min_error |
|
1460 |
|
|
1461 |
line_type_found = sorted([(line_type, line_pattern[2]) for line_type, line_pattern in line_patterns.items()], key=lambda error:error[1])[0] |
|
1462 |
if line_type_found[1] < 0.4: |
|
1463 |
lines_found.append([line, line_type_found]) |
|
1464 |
|
|
1465 |
''' |
|
1466 |
# feature matching not work |
|
1467 |
orb = cv2.ORB_create() |
|
1468 |
kp1, des1 = orb.detectAndCompute(image_line, None) |
|
1469 |
kp2, des2 = orb.detectAndCompute(image_line, None) |
|
1470 |
|
|
1471 |
bf = cv2.BFMatcher(cv2.NORM_HAMMING, crossCheck=True) |
|
1472 |
matches = bf.match(des1, des2) |
|
1473 |
matches = sorted(matches, key=lambda x: x.distance) |
|
1474 |
|
|
1475 |
good = [] |
|
1476 |
for m, n in matches: |
|
1477 |
if m.distance < 0.75 * n.distance: |
|
1478 |
good.append([m]) |
|
1479 |
|
|
1480 |
sift = cv2.xfeatures2d.SIFT_create() |
|
1481 |
kp1, des1 = sift.detectAndCompute(image_line, None) |
|
1482 |
kp2, des2 = sift.detectAndCompute(image_line, None) |
|
1483 |
|
|
1484 |
bf = cv2.BFMatcher(cv2.NORM_L2, crossCheck=True) |
|
1485 |
matches = bf.match(des1, des2) |
|
1486 |
matches = sorted(matches, key=lambda x: x.distance) |
|
1487 |
|
|
1488 |
good = [] |
|
1489 |
for m, n in matches: |
|
1490 |
if m.distance < 0.75 * n.distance: |
|
1491 |
good.append([m]) |
|
1492 |
''' |
|
1493 |
# determine line type : solid |
|
1494 |
for line, image_line in lines_shape: |
|
1495 |
for line_type, line_shape in line_shapes.items(): |
|
1496 |
shape = line_shape[0].copy() |
|
1497 |
line_shape[1] = sys.maxsize |
|
1498 |
|
|
1499 |
big = shape if max(shape.shape) > max(image_line.shape) else image_line |
|
1500 |
small = image_line if max(shape.shape) > max(image_line.shape) else shape |
|
1501 |
|
|
1502 |
if big.shape[0] == max(max(big.shape), max(small.shape)): |
|
1503 |
mask = np.zeros([big.shape[0], max(big.shape[1], max(small.shape))], np.uint8) |
|
1504 |
else: |
|
1505 |
mask = np.zeros([max(big.shape[0], max(small.shape)), big.shape[1]], np.uint8) |
|
1506 |
|
|
1507 |
mask[0:big.shape[0], 0:big.shape[1]] = big |
|
1508 |
big = mask |
|
1509 |
|
|
1510 |
searchedInfos = [] |
|
1511 |
steps = [False, True] |
|
1512 |
for flipped in steps: |
|
1513 |
symGray = small.copy() |
|
1514 |
if flipped: |
|
1515 |
symGray = cv2.flip(symGray, 1) |
|
1516 |
|
|
1517 |
symbolRotatedAngle = 0 |
|
1518 |
for rc in range(4): |
|
1519 |
sw, sh = symGray.shape[::-1] |
|
1520 |
|
|
1521 |
r_w, r_h = big.shape[::-1] |
|
1522 |
if r_w < sw or r_h < sh: |
|
1523 |
symGray = cv2.rotate(symGray, cv2.ROTATE_90_CLOCKWISE) |
|
1524 |
symbolRotatedAngle = (symbolRotatedAngle + 90) % 360 |
|
1525 |
continue |
|
1507 | 1526 |
|
1508 |
searchedInfos = [] |
|
1509 |
steps = [False, True] |
|
1510 |
for flipped in steps: |
|
1511 |
if flipped: |
|
1512 |
symGray = small |
|
1513 |
symGray = cv2.flip(symGray, 1) |
|
1527 |
tmRes = cv2.matchTemplate(big, symGray, cv2.TM_CCOEFF_NORMED) |
|
1528 |
_, max_val, __, max_loc = cv2.minMaxLoc(tmRes) |
|
1514 | 1529 |
|
1515 |
symbolRotatedAngle = 0 |
|
1516 |
for rc in range(4): |
|
1517 |
sw, sh = symGray.shape[::-1] |
|
1530 |
if max_val > 0.6: |
|
1531 |
searchedInfos.append(1 - max_val) |
|
1518 | 1532 |
|
1519 |
r_w, r_h = big.shape[::-1] |
|
1520 |
if r_w < sw or r_h < sh: |
|
1521 | 1533 |
symGray = cv2.rotate(symGray, cv2.ROTATE_90_CLOCKWISE) |
1522 | 1534 |
symbolRotatedAngle = (symbolRotatedAngle + 90) % 360 |
1523 |
continue |
|
1524 |
|
|
1525 |
tmRes = cv2.matchTemplate(big, symGray, cv2.TM_CCOEFF_NORMED) |
|
1526 |
_, max_val, __, max_loc = cv2.minMaxLoc(tmRes) |
|
1527 | 1535 |
|
1528 |
if max_val > 0.6:
|
|
1529 |
searchedInfos.append(1 - max_val)
|
|
1536 |
if searchedInfos:
|
|
1537 |
line_shape[1] = sorted(searchedInfos)[0]
|
|
1530 | 1538 |
|
1531 |
symGray = cv2.rotate(symGray, cv2.ROTATE_90_CLOCKWISE) |
|
1532 |
symbolRotatedAngle = (symbolRotatedAngle + 90) % 360 |
|
1539 |
line_type_founds = sorted([(line_type, line_shape[1]) for line_type, line_shape in line_shapes.items()], key=lambda error: error[1]) |
|
1540 |
if line_type_founds and line_type_founds[0][1] < 0.4: |
|
1541 |
if line_type_founds[0] == 'Secondary' or line_type_founds[0] == 'Primary' and len(line_type_founds) > 1: |
|
1542 |
if line_type_founds[1][1] < 0.4: |
|
1543 |
lines_found.append([line, line_type_founds[1]]) |
|
1544 |
else: |
|
1545 |
lines_found.append([line, line_type_founds[0]]) |
|
1546 |
else: |
|
1547 |
lines_found.append([line, line_type_founds[0]]) |
|
1533 | 1548 |
|
1534 |
if searchedInfos: |
|
1535 |
line_shape[1] = sorted(searchedInfos)[0] |
|
1549 |
line_runs = [] |
|
1550 |
for line_found in lines_found: |
|
1551 |
inserted = False |
|
1552 |
for line_run in line_runs: |
|
1553 |
if line_found[0] in line_run: |
|
1554 |
inserted = True |
|
1555 |
break |
|
1536 | 1556 |
|
1537 |
line_type_found = sorted([(line_type, line_shape[1]) for line_type, line_shape in line_shapes.items()], key=lambda error: error[1])[0] |
|
1538 |
if line_type_found[1] < 0.4: |
|
1539 |
lines_found.append([line, line_type_found]) |
|
1557 |
if inserted: |
|
1558 |
continue |
|
1559 |
else: |
|
1560 |
run = [line_found[0]] |
|
1561 |
Worker.find_connected_line(run, line_found[0]) |
|
1562 |
line_runs.append(run) |
|
1540 | 1563 |
|
1541 |
line_runs = [] |
|
1542 |
for line_found in lines_found: |
|
1543 |
inserted = False |
|
1544 | 1564 |
for line_run in line_runs: |
1545 |
if line_found[0] in line_run: |
|
1546 |
inserted = True |
|
1547 |
break |
|
1548 |
|
|
1549 |
if inserted: |
|
1550 |
continue |
|
1551 |
else: |
|
1552 |
run = [line_found[0]] |
|
1553 |
Worker.find_connected_line(run, line_found[0]) |
|
1554 |
line_runs.append(run) |
|
1555 |
|
|
1556 |
for line_run in line_runs: |
|
1557 |
_lines_found = [] |
|
1558 |
for _line in line_run: |
|
1559 |
_lines = [line_found[0] for line_found in lines_found] |
|
1560 |
if _line in _lines: |
|
1561 |
index = _lines.index(_line) |
|
1562 |
_lines_found.append(lines_found[index]) |
|
1563 |
_line_found = sorted(_lines_found, key=lambda param:param[1][1])[0] |
|
1564 |
Worker.changeConnectedLineType(_line_found[0], _line_found[1][0]) |
|
1565 |
_lines_found = [] |
|
1566 |
for _line in line_run: |
|
1567 |
_lines = [line_found[0] for line_found in lines_found] |
|
1568 |
if _line in _lines: |
|
1569 |
index = _lines.index(_line) |
|
1570 |
_lines_found.append(lines_found[index]) |
|
1571 |
_line_found = sorted(_lines_found, key=lambda param:param[1][1])[0] |
|
1572 |
Worker.changeConnectedLineType(_line_found[0], _line_found[1][0]) |
|
1573 |
except Exception as ex: |
|
1574 |
message = 'error occurred({}) in {}:{}'.format(ex, sys.exc_info()[-1].tb_frame.f_code.co_filename, |
|
1575 |
sys.exc_info()[-1].tb_lineno) |
|
1576 |
worker.displayLog.emit(MessageType.Error, message) |
|
1577 |
return None |
|
1565 | 1578 |
|
1566 | 1579 |
@staticmethod |
1567 | 1580 |
def find_connected_line(lines, line): |
내보내기 Unified diff