프로젝트

일반

사용자정보

개정판 13759969

ID13759969d2415b44e01fff797ec4e355535f503a
상위 acb80620
하위 824f1a2b

함의성이(가) 약 5년 전에 추가함

issue #1366: add minimum import file

Change-Id: Ie8bb209519359596fdae8583f6d4707646ef1aba

차이점 보기:

DTI_PID/DTI_PID/AppWebService.py
127 127
                                                           sys.exc_info()[-1].tb_lineno)
128 128
            App.mainWnd().addMessage.emit(MessageType.Error, message)
129 129
            return []
130

  
131
    '''
132
    def request(self, url):
133
    response = urllib.request.urlopen(url)
134
    byte_data = response.read()
135
    text_data = byte_data.decode('utf-8')
136
    return text_data
137
    '''
DTI_PID/DTI_PID/RecognitionDialog.py
903 903
                        listWidget.addItem('Generating Data...')
904 904

  
905 905
                        for item in searchedSymbolList:
906
                            path = os.path.join(project.getTempPath(), 'Tile', item.getBaseSymbol())
906
                            path = os.path.join(project.getTempPath(), 'Tile', item.getName())
907 907
                            if not os.path.exists(path): os.makedirs(path)
908 908

  
909 909
                            _img = app_doc_data.imgSrc[round(item.getSp()[1]):round(item.getSp()[1] + item.getHeight()),
910 910
                                   round(item.getSp()[0]):round(item.getSp()[0] + item.getWidth())]
911 911
                            cv2.imwrite(os.path.join(path, str(uuid.uuid4()) + '.png'), _img)
912
                        worker.updateBatchProgress.emit(len(srcList), 4)
912

  
913
                        app_doc_data.activeDrawing.image = None
914
                        worker.updateBatchProgress.emit(len(drawings), 4)
913 915
                        continue
914 916

  
915 917
                    #cv2.imwrite('c:\\temp\\before-imgSrc.png', area.img)
......
1457 1459
            symbolName = targetSymbol.getName()
1458 1460
            symbolType = targetSymbol.getType()
1459 1461
            symbolPath = targetSymbol.getPath()
1460
            symbolThreshold = targetSymbol.getThreshold()  # if not forTraining else targetSymbol.getThreshold() / 3 * 2
1462
            symbolThreshold = targetSymbol.getThreshold() if not forTraining else targetSymbol.getThreshold() * 0.85
1461 1463
            symbolMinMatchCount = targetSymbol.getMinMatchCount()
1462 1464
            isDetectOnOrigin = targetSymbol.getIsDetectOnOrigin()
1463 1465
            symbolRotateCount = targetSymbol.getRotationCount()
......
1578 1580
                                                             hasInstrumentLabel=hasInstrumentLabel, text_area=text_area)
1579 1581
                                    threadLock.release()
1580 1582

  
1583
                    if forTraining:
1584
                        sw, sh = sw + 20, sh + 20
1585

  
1581 1586
                    # Template Matching
1582 1587
                    tmRes = cv2.matchTemplate(roiItem, symGray, cv2.TM_CCOEFF_NORMED)
1583 1588
                    loc = np.where(tmRes >= symbolThreshold)
......
1596 1601

  
1597 1602
                        searchedItemSp = (roiItemSp[0] + pt[0] + round(offsetDrawingArea[0]),
1598 1603
                                          roiItemSp[1] + pt[1] + round(offsetDrawingArea[1]))
1599
                        # print(searchedItemSp)
1604
                        
1605
                        if forTraining:
1606
                            searchedItemSp = [searchedItemSp[0] - 10, searchedItemSp[1] - 10]
1600 1607

  
1601 1608
                        overlapArea = 0
1602 1609
                        symbolIndex = -1
DTI_PID/WebServer/symbol_recognition/src/utils.py
1
"""
2
@author: Viet Nguyen <nhviet1009@gmail.com>
3
"""
4
import torch
5
from torch.autograd import Variable
6
from torch.utils.data.dataloader import default_collate
7

  
8

  
9
def custom_collate_fn(batch):
10
    items = list(zip(*batch))
11
    items[0] = default_collate(items[0])
12
    items[1] = list(items[1])
13
    return items
14

  
15

  
16
def post_processing(logits, image_size, gt_classes, anchors, conf_threshold, nms_threshold):
17
    num_anchors = len(anchors)
18
    anchors = torch.Tensor(anchors)
19
    if isinstance(logits, Variable):
20
        logits = logits.data
21

  
22
    if logits.dim() == 3:
23
        logits.unsqueeze_(0)
24

  
25
    batch = logits.size(0)
26
    h = logits.size(2)
27
    w = logits.size(3)
28

  
29
    # Compute xc,yc, w,h, box_score on Tensor
30
    lin_x = torch.linspace(0, w - 1, w).repeat(h, 1).view(h * w)
31
    lin_y = torch.linspace(0, h - 1, h).repeat(w, 1).t().contiguous().view(h * w)
32
    anchor_w = anchors[:, 0].contiguous().view(1, num_anchors, 1)
33
    anchor_h = anchors[:, 1].contiguous().view(1, num_anchors, 1)
34
    if torch.cuda.is_available():
35
        lin_x = lin_x.cuda()
36
        lin_y = lin_y.cuda()
37
        anchor_w = anchor_w.cuda()
38
        anchor_h = anchor_h.cuda()
39

  
40
    logits = logits.view(batch, num_anchors, -1, h * w)
41
    logits[:, :, 0, :].sigmoid_().add_(lin_x).div_(w)
42
    logits[:, :, 1, :].sigmoid_().add_(lin_y).div_(h)
43
    logits[:, :, 2, :].exp_().mul_(anchor_w).div_(w)
44
    logits[:, :, 3, :].exp_().mul_(anchor_h).div_(h)
45
    logits[:, :, 4, :].sigmoid_()
46

  
47
    with torch.no_grad():
48
        cls_scores = torch.nn.functional.softmax(logits[:, :, 5:, :], 2)
49
    cls_max, cls_max_idx = torch.max(cls_scores, 2)
50
    cls_max_idx = cls_max_idx.float()
51
    cls_max.mul_(logits[:, :, 4, :])
52

  
53
    score_thresh = cls_max > conf_threshold
54
    score_thresh_flat = score_thresh.view(-1)
55

  
56
    if score_thresh.sum() == 0:
57
        predicted_boxes = []
58
        for i in range(batch):
59
            predicted_boxes.append(torch.Tensor([]))
60
    else:
61
        coords = logits.transpose(2, 3)[..., 0:4]
62
        coords = coords[score_thresh[..., None].expand_as(coords)].view(-1, 4)
63
        scores = cls_max[score_thresh]
64
        idx = cls_max_idx[score_thresh]
65
        detections = torch.cat([coords, scores[:, None], idx[:, None]], dim=1)
66

  
67
        max_det_per_batch = num_anchors * h * w
68
        slices = [slice(max_det_per_batch * i, max_det_per_batch * (i + 1)) for i in range(batch)]
69
        det_per_batch = torch.IntTensor([score_thresh_flat[s].int().sum() for s in slices])
70
        split_idx = torch.cumsum(det_per_batch, dim=0)
71

  
72
        # Group detections per image of batch
73
        predicted_boxes = []
74
        start = 0
75
        for end in split_idx:
76
            predicted_boxes.append(detections[start: end])
77
            start = end
78

  
79
    selected_boxes = []
80
    for boxes in predicted_boxes:
81
        if boxes.numel() == 0:
82
            return boxes
83

  
84
        a = boxes[:, :2]
85
        b = boxes[:, 2:4]
86
        bboxes = torch.cat([a - b / 2, a + b / 2], 1)
87
        scores = boxes[:, 4]
88

  
89
        # Sort coordinates by descending score
90
        scores, order = scores.sort(0, descending=True)
91
        x1, y1, x2, y2 = bboxes[order].split(1, 1)
92

  
93
        # Compute dx and dy between each pair of boxes (these mat contain every pair twice...)
94
        dx = (x2.min(x2.t()) - x1.max(x1.t())).clamp(min=0)
95
        dy = (y2.min(y2.t()) - y1.max(y1.t())).clamp(min=0)
96

  
97
        # Compute iou
98
        intersections = dx * dy
99
        areas = (x2 - x1) * (y2 - y1)
100
        unions = (areas + areas.t()) - intersections
101
        ious = intersections / unions
102

  
103
        # Filter based on iou (and class)
104
        conflicting = (ious > nms_threshold).triu(1)
105

  
106
        keep = conflicting.sum(0).byte()
107
        keep = keep.cpu()
108
        conflicting = conflicting.cpu()
109

  
110
        keep_len = len(keep) - 1
111
        for i in range(1, keep_len):
112
            if keep[i] > 0:
113
                keep -= conflicting[i]
114
        if torch.cuda.is_available():
115
            keep = keep.cuda()
116

  
117
        keep = (keep == 0)
118
        selected_boxes.append(boxes[order][keep[:, None].expand_as(boxes)].view(-1, 6).contiguous())
119

  
120
    final_boxes = []
121
    for boxes in selected_boxes:
122
        if boxes.dim() == 0:
123
            final_boxes.append([])
124
        else:
125
            boxes[:, 0:3:2] *= image_size
126
            boxes[:, 0] -= boxes[:, 2] / 2
127
            boxes[:, 1:4:2] *= image_size
128
            boxes[:, 1] -= boxes[:, 3] / 2
129

  
130
            final_boxes.append([[box[0].item(), box[1].item(), box[2].item(), box[3].item(), box[4].item(),
131
                                 gt_classes[int(box[5].item())]] for box in boxes])
132
    return final_boxes
DTI_PID/WebServer/symbol_recognition/src/yolo_doftech.py
1
"""
2
@author: Viet Nguyen <nhviet1009@gmail.com>
3
"""
4
import torch.nn as nn
5
import torch
6

  
7

  
8
class YoloD(nn.Module):
9
    def __init__(self, pre_model, num_classes,
10
                 anchors=[(1.3221, 1.73145), (3.19275, 4.00944), (5.05587, 8.09892), (9.47112, 4.84053),
11
                          (11.2364, 10.0071)]):
12
        super(YoloD, self).__init__()
13

  
14

  
15
        self.num_classes = num_classes
16
        self.anchors = anchors
17

  
18
        self.stage1_conv1 = pre_model.stage1_conv1
19
        self.stage1_conv2 = pre_model.stage1_conv2
20
        self.stage1_conv3 = pre_model.stage1_conv3
21
        self.stage1_conv4 = pre_model.stage1_conv4
22
        self.stage1_conv5 = pre_model.stage1_conv5
23
        self.stage1_conv6 = pre_model.stage1_conv6
24
        self.stage1_conv7 = pre_model.stage1_conv7
25
        self.stage1_conv8 = pre_model.stage1_conv8
26
        self.stage1_conv9 = pre_model.stage1_conv9
27
        self.stage1_conv10 = pre_model.stage1_conv10
28
        self.stage1_conv11 = pre_model.stage1_conv11
29
        self.stage1_conv12 = pre_model.stage1_conv12
30
        self.stage1_conv13 = pre_model.stage1_conv13
31

  
32
        self.stage2_a_maxpl = pre_model.stage2_a_maxpl
33
        self.stage2_a_conv1 = pre_model.stage2_a_conv1
34
        self.stage2_a_conv2 = pre_model.stage2_a_conv2
35
        self.stage2_a_conv3 = pre_model.stage2_a_conv3
36
        self.stage2_a_conv4 = pre_model.stage2_a_conv4
37
        self.stage2_a_conv5 = pre_model.stage2_a_conv5
38
        self.stage2_a_conv6 = pre_model.stage2_a_conv6
39
        self.stage2_a_conv7 = pre_model.stage2_a_conv7
40

  
41
        self.stage2_b_conv = pre_model.stage2_b_conv
42

  
43
        self.stage3_conv1 = pre_model.stage3_conv1
44

  
45
        self.stage3_conv2 = nn.Conv2d(1024, len(self.anchors) * (5 + num_classes), 1, 1, 0, bias=False)
46

  
47
    def forward(self, input):
48
        output = self.stage1_conv1(input)
49
        output = self.stage1_conv2(output)
50
        output = self.stage1_conv3(output)
51
        output = self.stage1_conv4(output)
52
        output = self.stage1_conv5(output)
53
        output = self.stage1_conv6(output)
54
        output = self.stage1_conv7(output)
55
        output = self.stage1_conv8(output)
56
        output = self.stage1_conv9(output)
57
        output = self.stage1_conv10(output)
58
        output = self.stage1_conv11(output)
59
        output = self.stage1_conv12(output)
60
        output = self.stage1_conv13(output)
61

  
62
        residual = output
63

  
64
        output_1 = self.stage2_a_maxpl(output)
65
        output_1 = self.stage2_a_conv1(output_1)
66
        output_1 = self.stage2_a_conv2(output_1)
67
        output_1 = self.stage2_a_conv3(output_1)
68
        output_1 = self.stage2_a_conv4(output_1)
69
        output_1 = self.stage2_a_conv5(output_1)
70
        output_1 = self.stage2_a_conv6(output_1)
71
        output_1 = self.stage2_a_conv7(output_1)
72

  
73
        output_2 = self.stage2_b_conv(residual)
74
        batch_size, num_channel, height, width = output_2.data.size()
75
        output_2 = output_2.view(batch_size, int(num_channel / 4), height, 2, width, 2).contiguous()
76
        output_2 = output_2.permute(0, 3, 5, 1, 2, 4).contiguous()
77
        output_2 = output_2.view(batch_size, -1, int(height / 2), int(width / 2))
78

  
79
        output = torch.cat((output_1, output_2), 1)
80
        output = self.stage3_conv1(output)
81
        output = self.stage3_conv2(output)
82
        return output
83

  
84
if __name__ == '__main__':
85
    pre_path = '../trained_models/only_params_trained_yolo_voc'
86
    pre_model = torch.load(pre_path)
87
    print(pre_model.keys())
88
    yolo_model = YoloD(pre_model, 35)
89
    img = torch.zeros([2,3,448,448])
DTI_PID/WebServer/symbol_recognition/src/yolo_net.py
1
"""
2
@author: Viet Nguyen <nhviet1009@gmail.com>
3
"""
4
import torch.nn as nn
5
import torch
6

  
7

  
8
class Yolo(nn.Module):
9
    def __init__(self, num_classes,
10
                 anchors=[(1.3221, 1.73145), (3.19275, 4.00944), (5.05587, 8.09892), (9.47112, 4.84053),
11
                          (11.2364, 10.0071)]):
12
        super(Yolo, self).__init__()
13
        self.num_classes = num_classes
14
        self.anchors = anchors
15

  
16
        self.stage1_conv1 = nn.Sequential(nn.Conv2d(3, 32, 3, 1, 1, bias=False), nn.BatchNorm2d(32),
17
                                          nn.LeakyReLU(0.1, inplace=True), nn.MaxPool2d(2, 2))
18
        self.stage1_conv2 = nn.Sequential(nn.Conv2d(32, 64, 3, 1, 1, bias=False), nn.BatchNorm2d(64),
19
                                          nn.LeakyReLU(0.1, inplace=True), nn.MaxPool2d(2, 2))
20
        self.stage1_conv3 = nn.Sequential(nn.Conv2d(64, 128, 3, 1, 1, bias=False), nn.BatchNorm2d(128),
21
                                          nn.LeakyReLU(0.1, inplace=True))
22
        self.stage1_conv4 = nn.Sequential(nn.Conv2d(128, 64, 1, 1, 0, bias=False), nn.BatchNorm2d(64),
23
                                          nn.LeakyReLU(0.1, inplace=True))
24
        self.stage1_conv5 = nn.Sequential(nn.Conv2d(64, 128, 3, 1, 1, bias=False), nn.BatchNorm2d(128),
25
                                          nn.LeakyReLU(0.1, inplace=True), nn.MaxPool2d(2, 2))
26
        self.stage1_conv6 = nn.Sequential(nn.Conv2d(128, 256, 3, 1, 1, bias=False), nn.BatchNorm2d(256),
27
                                          nn.LeakyReLU(0.1, inplace=True))
28
        self.stage1_conv7 = nn.Sequential(nn.Conv2d(256, 128, 1, 1, 0, bias=False), nn.BatchNorm2d(128),
29
                                          nn.LeakyReLU(0.1, inplace=True))
30
        self.stage1_conv8 = nn.Sequential(nn.Conv2d(128, 256, 3, 1, 1, bias=False), nn.BatchNorm2d(256),
31
                                          nn.LeakyReLU(0.1, inplace=True), nn.MaxPool2d(2, 2))
32
        self.stage1_conv9 = nn.Sequential(nn.Conv2d(256, 512, 3, 1, 1, bias=False), nn.BatchNorm2d(512),
33
                                          nn.LeakyReLU(0.1, inplace=True))
34
        self.stage1_conv10 = nn.Sequential(nn.Conv2d(512, 256, 1, 1, 0, bias=False), nn.BatchNorm2d(256),
35
                                           nn.LeakyReLU(0.1, inplace=True))
36
        self.stage1_conv11 = nn.Sequential(nn.Conv2d(256, 512, 3, 1, 1, bias=False), nn.BatchNorm2d(512),
37
                                           nn.LeakyReLU(0.1, inplace=True))
38
        self.stage1_conv12 = nn.Sequential(nn.Conv2d(512, 256, 1, 1, 0, bias=False), nn.BatchNorm2d(256),
39
                                           nn.LeakyReLU(0.1, inplace=True))
40
        self.stage1_conv13 = nn.Sequential(nn.Conv2d(256, 512, 3, 1, 1, bias=False), nn.BatchNorm2d(512),
41
                                           nn.LeakyReLU(0.1, inplace=True))
42

  
43
        self.stage2_a_maxpl = nn.MaxPool2d(2, 2)
44
        self.stage2_a_conv1 = nn.Sequential(nn.Conv2d(512, 1024, 3, 1, 1, bias=False),
45
                                            nn.BatchNorm2d(1024), nn.LeakyReLU(0.1, inplace=True))
46
        self.stage2_a_conv2 = nn.Sequential(nn.Conv2d(1024, 512, 1, 1, 0, bias=False), nn.BatchNorm2d(512),
47
                                            nn.LeakyReLU(0.1, inplace=True))
48
        self.stage2_a_conv3 = nn.Sequential(nn.Conv2d(512, 1024, 3, 1, 1, bias=False), nn.BatchNorm2d(1024),
49
                                            nn.LeakyReLU(0.1, inplace=True))
50
        self.stage2_a_conv4 = nn.Sequential(nn.Conv2d(1024, 512, 1, 1, 0, bias=False), nn.BatchNorm2d(512),
51
                                            nn.LeakyReLU(0.1, inplace=True))
52
        self.stage2_a_conv5 = nn.Sequential(nn.Conv2d(512, 1024, 3, 1, 1, bias=False), nn.BatchNorm2d(1024),
53
                                            nn.LeakyReLU(0.1, inplace=True))
54
        self.stage2_a_conv6 = nn.Sequential(nn.Conv2d(1024, 1024, 3, 1, 1, bias=False), nn.BatchNorm2d(1024),
55
                                            nn.LeakyReLU(0.1, inplace=True))
56
        self.stage2_a_conv7 = nn.Sequential(nn.Conv2d(1024, 1024, 3, 1, 1, bias=False), nn.BatchNorm2d(1024),
57
                                            nn.LeakyReLU(0.1, inplace=True))
58

  
59
        self.stage2_b_conv = nn.Sequential(nn.Conv2d(512, 64, 1, 1, 0, bias=False), nn.BatchNorm2d(64),
60
                                           nn.LeakyReLU(0.1, inplace=True))
61

  
62
        self.stage3_conv1 = nn.Sequential(nn.Conv2d(256 + 1024, 1024, 3, 1, 1, bias=False), nn.BatchNorm2d(1024),
63
                                          nn.LeakyReLU(0.1, inplace=True))
64

  
65
        self.stage3_conv2 = nn.Conv2d(1024, len(self.anchors) * (5 + num_classes), 1, 1, 0, bias=False)
66

  
67
    def forward(self, input):
68
        output = self.stage1_conv1(input)
69
        output = self.stage1_conv2(output)
70
        output = self.stage1_conv3(output)
71
        output = self.stage1_conv4(output)
72
        output = self.stage1_conv5(output)
73
        output = self.stage1_conv6(output)
74
        output = self.stage1_conv7(output)
75
        output = self.stage1_conv8(output)
76
        output = self.stage1_conv9(output)
77
        output = self.stage1_conv10(output)
78
        output = self.stage1_conv11(output)
79
        output = self.stage1_conv12(output)
80
        output = self.stage1_conv13(output)
81

  
82
        residual = output
83

  
84
        output_1 = self.stage2_a_maxpl(output)
85
        output_1 = self.stage2_a_conv1(output_1)
86
        output_1 = self.stage2_a_conv2(output_1)
87
        output_1 = self.stage2_a_conv3(output_1)
88
        output_1 = self.stage2_a_conv4(output_1)
89
        output_1 = self.stage2_a_conv5(output_1)
90
        output_1 = self.stage2_a_conv6(output_1)
91
        output_1 = self.stage2_a_conv7(output_1)
92

  
93
        output_2 = self.stage2_b_conv(residual)
94
        batch_size, num_channel, height, width = output_2.data.size()
95
        output_2 = output_2.view(batch_size, int(num_channel / 4), height, 2, width, 2).contiguous()
96
        output_2 = output_2.permute(0, 3, 5, 1, 2, 4).contiguous()
97
        output_2 = output_2.view(batch_size, -1, int(height / 2), int(width / 2))
98

  
99
        output = torch.cat((output_1, output_2), 1)
100
        output = self.stage3_conv1(output)
101
        output = self.stage3_conv2(output)
102

  
103
        return output
104

  
105
if __name__ == "__main__":
106
    net = Yolo(20)
107
    print(net.stage1_conv1[0])

내보내기 Unified diff