这种训练的时候加入mask,输出的时候根据mask做处理,直接mask回帖
conv.py
import torch
from torch import nn
from torch.nn import functional as F
class DepthwiseSeparableConv2d(nn.Module):
def init(self, in_channels, out_channels, kernel_size, stride=1, padding=0):
super(DepthwiseSeparableConv2d, self).init()
self.depthwise = nn.Sequential(
nn.Conv2d(in_channels, in_channels, kernel_size=kernel_size, stride=stride, padding=padding,groups=in_channels), # ��Ⱦ���
nn.BatchNorm2d(in_channels),
nn.ReLU6(inplace=True))
class Conv2d(nn.Module):
def init(self, cin, cout, kernel_size, stride, padding, residual=False, depth_wise = False, *args, **kwargs):
super().init(*args, **kwargs)
class nonorm_Conv2d(nn.Module):
def init(self, cin, cout, kernel_size, stride, padding, residual=False, *args, **kwargs):
super().init(*args, **kwargs)
self.conv_block = nn.Sequential(
nn.Conv2d(cin, cout, kernel_size, stride, padding),
)
self.act = nn.LeakyReLU(0.01, inplace=True)
class Conv2dTranspose(nn.Module):
def init(self, cin, cout, kernel_size, stride, padding, output_padding=0, *args, **kwargs):
super().init(*args, **kwargs)
self.conv_block = nn.Sequential(
nn.ConvTranspose2d(cin, cout, kernel_size, stride, padding, output_padding),
nn.BatchNorm2d(cout)
)
self.act = nn.ReLU()
wav2lip.py
import torch
from torch import nn
from torch.nn import functional as F
import math
from models.conv import Conv2dTranspose, Conv2d, nonorm_Conv2d
class Wav2Lip(nn.Module):
def init(self):
super(Wav2Lip, self).init()
class Wav2Lip_disc_qual(nn.Module):
def init(self):
super(Wav2Lip_disc_qual, self).init()