当前位置: 首页 > 工具软件 > LipNet > 使用案例 >

LipNet 模型注释

澹台浩广
2023-12-01

LipNet 模型的注释

class LipNet(torch.nn.Module):
    def __init__(self, dropout_p=0.5):
        super(LipNet, self).__init__()
        # in_channels, out_channels, kernel_size, stride, padding
        #                            D,H,W        D,H,W   D,H,W  相比较2D多了一个时间维度(深度)
        # 若输入shape[batchs, channels, Depth, H, W] = [1, 3, 75, 64, 128]
        # ---->[1, 32, 75, 32, 64]
        self.conv1 = nn.Conv3d(3, 32, (3, 5, 5), (1, 2, 2), (1, 2, 2))
        #                           kernel_size, stride
        # (in + 2 * padding - dilation * (kernel_size - 1) - 1) / stride + 1
        # padding = 0, dilation = 1
        # ----> [1, 32, 75, 16, 32]
        self.pool1 = nn.MaxPool3d((1, 2, 2), (1, 2, 2))
        # ----> [1, 64, 75, 16, 32]
        self.conv2 = nn.Conv3d(32, 64, (3, 5, 5), (1, 1, 1), (1, 2, 2))
        # ----> [1, 64, 75, 8, 16]
        self.pool2 = nn.MaxPool3d((1, 2, 2), (1, 2, 2))
        # ----> [1, 96, 75, 8, 16]
        self.conv3 = nn.Conv3d(64, 96, (3, 3, 3), (1, 1, 1), (1, 1, 1))
        # ----> [1, 96, 75, 4, 8]
        self.pool3 = nn.MaxPool3d((1, 2, 2), (1, 2, 2))
        # input_size, hidden_size, num_layers, bidirectional
        # nn.GRU ---> (seq_len, batch_size, num_directions * hidden_size) = (75, 1, 2 * 256)
        self.gru1 = nn.GRU(96*4*8, 256, 1, bidirectional=True)
        # ----> (75, 1, 2 * 256)
        self.gru2 = nn.GRU(512, 256, 1, bidirectional=True)
        # ----> (75, 1, 28)
        self.FC = nn.Linear(512, 27+1)
        self.dropout_p = dropout_p

        self.relu = nn.ReLU(inplace=True)
        self.dropout = nn.Dropout(self.dropout_p)        
        self.dropout3d = nn.Dropout3d(self.dropout_p)  
        self._init()
    
    def _init(self):
        
        init.kaiming_normal_(self.conv1.weight, nonlinearity='relu')
        init.constant_(self.conv1.bias, 0)
        
        init.kaiming_normal_(self.conv2.weight, nonlinearity='relu')
        init.constant_(self.conv2.bias, 0)
        
        init.kaiming_normal_(self.conv3.weight, nonlinearity='relu')
        init.constant_(self.conv3.bias, 0)        
        
        init.kaiming_normal_(self.FC.weight, nonlinearity='sigmoid')
        init.constant_(self.FC.bias, 0)
        
        for m in (self.gru1, self.gru2):
            stdv = math.sqrt(2 / (96 * 3 * 6 + 256))
            for i in range(0, 256 * 3, 256):
                init.uniform_(m.weight_ih_l0[i: i + 256],
                            -math.sqrt(3) * stdv, math.sqrt(3) * stdv)
                init.orthogonal_(m.weight_hh_l0[i: i + 256])
                init.constant_(m.bias_ih_l0[i: i + 256], 0)
                init.uniform_(m.weight_ih_l0_reverse[i: i + 256],
                            -math.sqrt(3) * stdv, math.sqrt(3) * stdv)
                init.orthogonal_(m.weight_hh_l0_reverse[i: i + 256])
                init.constant_(m.bias_ih_l0_reverse[i: i + 256], 0)

    def forward(self, x):
        
        x = self.conv1(x)
        x = self.relu(x)
        x = self.dropout3d(x)
        x = self.pool1(x)
        
        x = self.conv2(x)
        x = self.relu(x)
        x = self.dropout3d(x)        
        x = self.pool2(x)
        
        x = self.conv3(x)
        x = self.relu(x)
        x = self.dropout3d(x)        
        x = self.pool3(x)
        
        # (B, C, T, H, W)->(T, B, C, H, W)
        # permute转置并未重新创建一个tensor,转置前后内存共享
        # contiguous 类似深拷贝,转置后开辟新空间
        x = x.permute(2, 0, 1, 3, 4).contiguous()
        # (B, C, T, H, W)->(T, B, C*H*W)
        x = x.view(x.size(0), x.size(1), -1)


        self.gru1.flabtten_parameters()
        self.gru2.flatten_parameters()

        x, h = self.gru1(x)        
        x = self.dropout(x)
        x, h = self.gru2(x)
        # x.shape = [75, 1, 512]
        x = self.dropout(x)
                
        x = self.FC(x)
        x = x.permute(1, 0, 2).contiguous()
        return x
 类似资料: