almost 0.1
This commit is contained in:
62
model/GAN/CycleGAN.py
Normal file
62
model/GAN/CycleGAN.py
Normal file
@@ -0,0 +1,62 @@
|
||||
import torch.nn as nn
|
||||
|
||||
from model.normalization import select_norm_layer
|
||||
from model.registry import MODEL
|
||||
from .base import ResidualBlock
|
||||
|
||||
|
||||
@MODEL.register_module("CyCle-Generator")
|
||||
class Generator(nn.Module):
|
||||
def __init__(self, in_channels, out_channels, base_channels=64, num_blocks=9, padding_mode='reflect',
|
||||
norm_type="IN"):
|
||||
super(Generator, self).__init__()
|
||||
assert num_blocks >= 0, f'Number of residual blocks must be non-negative, but got {num_blocks}.'
|
||||
norm_layer = select_norm_layer(norm_type)
|
||||
use_bias = norm_type == "IN"
|
||||
|
||||
self.start_conv = nn.Sequential(
|
||||
nn.Conv2d(in_channels, base_channels, kernel_size=7, stride=1, padding_mode=padding_mode, padding=3,
|
||||
bias=use_bias),
|
||||
norm_layer(num_features=base_channels),
|
||||
nn.ReLU(inplace=True)
|
||||
)
|
||||
|
||||
# down sampling
|
||||
submodules = []
|
||||
num_down_sampling = 2
|
||||
for i in range(num_down_sampling):
|
||||
multiple = 2 ** i
|
||||
submodules += [
|
||||
nn.Conv2d(in_channels=base_channels * multiple, out_channels=base_channels * multiple * 2,
|
||||
kernel_size=3, stride=2, padding=1, bias=use_bias),
|
||||
norm_layer(num_features=base_channels * multiple * 2),
|
||||
nn.ReLU(inplace=True)
|
||||
]
|
||||
self.encoder = nn.Sequential(*submodules)
|
||||
|
||||
res_block_channels = num_down_sampling ** 2 * base_channels
|
||||
self.resnet_middle = nn.Sequential(
|
||||
*[ResidualBlock(res_block_channels, padding_mode, norm_type) for _ in
|
||||
range(num_blocks)])
|
||||
|
||||
# up sampling
|
||||
submodules = []
|
||||
for i in range(num_down_sampling):
|
||||
multiple = 2 ** (num_down_sampling - i)
|
||||
submodules += [
|
||||
nn.ConvTranspose2d(base_channels * multiple, base_channels * multiple // 2, kernel_size=3, stride=2,
|
||||
padding=1, output_padding=1, bias=use_bias),
|
||||
norm_layer(num_features=base_channels * multiple // 2),
|
||||
nn.ReLU(inplace=True),
|
||||
]
|
||||
self.decoder = nn.Sequential(*submodules)
|
||||
|
||||
self.end_conv = nn.Sequential(
|
||||
nn.Conv2d(base_channels, out_channels, kernel_size=7, padding=3, padding_mode=padding_mode),
|
||||
nn.Tanh()
|
||||
)
|
||||
|
||||
def forward(self, x):
|
||||
x = self.encoder(self.start_conv(x))
|
||||
x = self.resnet_middle(x)
|
||||
return self.end_conv(self.decoder(x))
|
||||
@@ -45,7 +45,6 @@ class Generator(nn.Module):
|
||||
# Down-Sampling Bottleneck
|
||||
mult = 2 ** n_down_sampling
|
||||
for i in range(num_blocks):
|
||||
# TODO: change ResnetBlock to ResidualBlock, check use_bias param
|
||||
down_encoder += [ResidualBlock(base_channels * mult, use_bias=False)]
|
||||
self.down_encoder = nn.Sequential(*down_encoder)
|
||||
|
||||
|
||||
@@ -1,13 +1,68 @@
|
||||
import math
|
||||
|
||||
import torch
|
||||
import torch.nn as nn
|
||||
|
||||
from model.normalization import select_norm_layer
|
||||
from model import MODEL
|
||||
|
||||
|
||||
class GANImageBuffer(object):
|
||||
"""This class implements an image buffer that stores previously
|
||||
generated images.
|
||||
This buffer allows us to update the discriminator using a history of
|
||||
generated images rather than the ones produced by the latest generator
|
||||
to reduce model oscillation.
|
||||
Args:
|
||||
buffer_size (int): The size of image buffer. If buffer_size = 0,
|
||||
no buffer will be created.
|
||||
buffer_ratio (float): The chance / possibility to use the images
|
||||
previously stored in the buffer.
|
||||
"""
|
||||
|
||||
def __init__(self, buffer_size, buffer_ratio=0.5):
|
||||
self.buffer_size = buffer_size
|
||||
# create an empty buffer
|
||||
if self.buffer_size > 0:
|
||||
self.img_num = 0
|
||||
self.image_buffer = []
|
||||
self.buffer_ratio = buffer_ratio
|
||||
|
||||
def query(self, images):
|
||||
"""Query current image batch using a history of generated images.
|
||||
Args:
|
||||
images (Tensor): Current image batch without history information.
|
||||
"""
|
||||
if self.buffer_size == 0: # if the buffer size is 0, do nothing
|
||||
return images
|
||||
return_images = []
|
||||
for image in images:
|
||||
image = torch.unsqueeze(image.data, 0)
|
||||
# if the buffer is not full, keep inserting current images
|
||||
if self.img_num < self.buffer_size:
|
||||
self.img_num = self.img_num + 1
|
||||
self.image_buffer.append(image)
|
||||
return_images.append(image)
|
||||
else:
|
||||
use_buffer = torch.rand(1) < self.buffer_ratio
|
||||
# by self.buffer_ratio, the buffer will return a previously
|
||||
# stored image, and insert the current image into the buffer
|
||||
if use_buffer:
|
||||
random_id = torch.randint(0, self.buffer_size, (1,)).item()
|
||||
image_tmp = self.image_buffer[random_id].clone()
|
||||
self.image_buffer[random_id] = image
|
||||
return_images.append(image_tmp)
|
||||
# by (1 - self.buffer_ratio), the buffer will return the
|
||||
# current image
|
||||
else:
|
||||
return_images.append(image)
|
||||
# collect all the images and return
|
||||
return_images = torch.cat(return_images, 0)
|
||||
return return_images
|
||||
|
||||
|
||||
# based SPADE or pix2pixHD Discriminator
|
||||
@MODEL.register_module("pix2pixHD-PatchDiscriminator")
|
||||
@MODEL.register_module("PatchDiscriminator")
|
||||
class PatchDiscriminator(nn.Module):
|
||||
def __init__(self, in_channels, base_channels, num_conv=4, use_spectral=False, norm_type="IN",
|
||||
need_intermediate_feature=False):
|
||||
|
||||
@@ -1,182 +0,0 @@
|
||||
import torch
|
||||
import torch.nn as nn
|
||||
from model.registry import MODEL
|
||||
from model.normalization import select_norm_layer
|
||||
|
||||
|
||||
class GANImageBuffer(object):
|
||||
"""This class implements an image buffer that stores previously
|
||||
generated images.
|
||||
This buffer allows us to update the discriminator using a history of
|
||||
generated images rather than the ones produced by the latest generator
|
||||
to reduce model oscillation.
|
||||
Args:
|
||||
buffer_size (int): The size of image buffer. If buffer_size = 0,
|
||||
no buffer will be created.
|
||||
buffer_ratio (float): The chance / possibility to use the images
|
||||
previously stored in the buffer.
|
||||
"""
|
||||
|
||||
def __init__(self, buffer_size, buffer_ratio=0.5):
|
||||
self.buffer_size = buffer_size
|
||||
# create an empty buffer
|
||||
if self.buffer_size > 0:
|
||||
self.img_num = 0
|
||||
self.image_buffer = []
|
||||
self.buffer_ratio = buffer_ratio
|
||||
|
||||
def query(self, images):
|
||||
"""Query current image batch using a history of generated images.
|
||||
Args:
|
||||
images (Tensor): Current image batch without history information.
|
||||
"""
|
||||
if self.buffer_size == 0: # if the buffer size is 0, do nothing
|
||||
return images
|
||||
return_images = []
|
||||
for image in images:
|
||||
image = torch.unsqueeze(image.data, 0)
|
||||
# if the buffer is not full, keep inserting current images
|
||||
if self.img_num < self.buffer_size:
|
||||
self.img_num = self.img_num + 1
|
||||
self.image_buffer.append(image)
|
||||
return_images.append(image)
|
||||
else:
|
||||
use_buffer = torch.rand(1) < self.buffer_ratio
|
||||
# by self.buffer_ratio, the buffer will return a previously
|
||||
# stored image, and insert the current image into the buffer
|
||||
if use_buffer:
|
||||
random_id = torch.randint(0, self.buffer_size, (1,)).item()
|
||||
image_tmp = self.image_buffer[random_id].clone()
|
||||
self.image_buffer[random_id] = image
|
||||
return_images.append(image_tmp)
|
||||
# by (1 - self.buffer_ratio), the buffer will return the
|
||||
# current image
|
||||
else:
|
||||
return_images.append(image)
|
||||
# collect all the images and return
|
||||
return_images = torch.cat(return_images, 0)
|
||||
return return_images
|
||||
|
||||
|
||||
class ResidualBlock(nn.Module):
|
||||
def __init__(self, num_channels, padding_mode='reflect', norm_type="IN", use_dropout=False, use_bias=None):
|
||||
super(ResidualBlock, self).__init__()
|
||||
|
||||
if use_bias is None:
|
||||
# Only for IN, use bias since it does not have affine parameters.
|
||||
use_bias = norm_type == "IN"
|
||||
norm_layer = select_norm_layer(norm_type)
|
||||
models = [nn.Sequential(
|
||||
nn.Conv2d(num_channels, num_channels, kernel_size=3, padding=1, padding_mode=padding_mode, bias=use_bias),
|
||||
norm_layer(num_channels),
|
||||
nn.ReLU(inplace=True),
|
||||
)]
|
||||
if use_dropout:
|
||||
models.append(nn.Dropout(0.5))
|
||||
models.append(nn.Sequential(
|
||||
nn.Conv2d(num_channels, num_channels, kernel_size=3, padding=1, padding_mode=padding_mode, bias=use_bias),
|
||||
norm_layer(num_channels),
|
||||
))
|
||||
self.block = nn.Sequential(*models)
|
||||
|
||||
def forward(self, x):
|
||||
return x + self.block(x)
|
||||
|
||||
|
||||
@MODEL.register_module()
|
||||
class ResGenerator(nn.Module):
|
||||
def __init__(self, in_channels, out_channels, base_channels=64, num_blocks=9, padding_mode='reflect',
|
||||
norm_type="IN"):
|
||||
super(ResGenerator, self).__init__()
|
||||
assert num_blocks >= 0, f'Number of residual blocks must be non-negative, but got {num_blocks}.'
|
||||
norm_layer = select_norm_layer(norm_type)
|
||||
use_bias = norm_type == "IN"
|
||||
|
||||
self.start_conv = nn.Sequential(
|
||||
nn.Conv2d(in_channels, base_channels, kernel_size=7, stride=1, padding_mode=padding_mode, padding=3,
|
||||
bias=use_bias),
|
||||
norm_layer(num_features=base_channels),
|
||||
nn.ReLU(inplace=True)
|
||||
)
|
||||
|
||||
# down sampling
|
||||
submodules = []
|
||||
num_down_sampling = 2
|
||||
for i in range(num_down_sampling):
|
||||
multiple = 2 ** i
|
||||
submodules += [
|
||||
nn.Conv2d(in_channels=base_channels * multiple, out_channels=base_channels * multiple * 2,
|
||||
kernel_size=3, stride=2, padding=1, bias=use_bias),
|
||||
norm_layer(num_features=base_channels * multiple * 2),
|
||||
nn.ReLU(inplace=True)
|
||||
]
|
||||
self.encoder = nn.Sequential(*submodules)
|
||||
|
||||
res_block_channels = num_down_sampling ** 2 * base_channels
|
||||
self.resnet_middle = nn.Sequential(
|
||||
*[ResidualBlock(res_block_channels, padding_mode, norm_type) for _ in
|
||||
range(num_blocks)])
|
||||
|
||||
# up sampling
|
||||
submodules = []
|
||||
for i in range(num_down_sampling):
|
||||
multiple = 2 ** (num_down_sampling - i)
|
||||
submodules += [
|
||||
nn.ConvTranspose2d(base_channels * multiple, base_channels * multiple // 2, kernel_size=3, stride=2,
|
||||
padding=1, output_padding=1, bias=use_bias),
|
||||
norm_layer(num_features=base_channels * multiple // 2),
|
||||
nn.ReLU(inplace=True),
|
||||
]
|
||||
self.decoder = nn.Sequential(*submodules)
|
||||
|
||||
self.end_conv = nn.Sequential(
|
||||
nn.Conv2d(base_channels, out_channels, kernel_size=7, padding=3, padding_mode=padding_mode),
|
||||
nn.Tanh()
|
||||
)
|
||||
|
||||
def forward(self, x):
|
||||
x = self.encoder(self.start_conv(x))
|
||||
x = self.resnet_middle(x)
|
||||
return self.end_conv(self.decoder(x))
|
||||
|
||||
|
||||
@MODEL.register_module()
|
||||
class PatchDiscriminator(nn.Module):
|
||||
def __init__(self, in_channels, base_channels=64, num_conv=3, norm_type="IN"):
|
||||
super(PatchDiscriminator, self).__init__()
|
||||
assert num_conv >= 0, f'Number of conv blocks must be non-negative, but got {num_conv}.'
|
||||
norm_layer = select_norm_layer(norm_type)
|
||||
use_bias = norm_type == "IN"
|
||||
|
||||
kernel_size = 4
|
||||
padding = 1
|
||||
sequence = [
|
||||
nn.Conv2d(in_channels, base_channels, kernel_size=kernel_size, stride=2, padding=padding),
|
||||
nn.LeakyReLU(0.2, inplace=True),
|
||||
]
|
||||
|
||||
# stacked intermediate layers,
|
||||
# gradually increasing the number of filters
|
||||
multiple_now = 1
|
||||
for n in range(1, num_conv):
|
||||
multiple_prev = multiple_now
|
||||
multiple_now = min(2 ** n, 8)
|
||||
sequence += [
|
||||
nn.Conv2d(base_channels * multiple_prev, base_channels * multiple_now, kernel_size=kernel_size,
|
||||
padding=padding, stride=2, bias=use_bias),
|
||||
norm_layer(base_channels * multiple_now),
|
||||
nn.LeakyReLU(0.2, inplace=True)
|
||||
]
|
||||
multiple_prev = multiple_now
|
||||
multiple_now = min(2 ** num_conv, 8)
|
||||
sequence += [
|
||||
nn.Conv2d(base_channels * multiple_prev, base_channels * multiple_now, kernel_size, stride=1,
|
||||
padding=padding, bias=use_bias),
|
||||
norm_layer(base_channels * multiple_now),
|
||||
nn.LeakyReLU(0.2, inplace=True),
|
||||
nn.Conv2d(base_channels * multiple_now, 1, kernel_size, stride=1, padding=padding)
|
||||
]
|
||||
self.model = nn.Sequential(*sequence)
|
||||
|
||||
def forward(self, x):
|
||||
return self.model(x)
|
||||
@@ -1,7 +1,6 @@
|
||||
from model.registry import MODEL
|
||||
import model.GAN.residual_generator
|
||||
import model.GAN.CycleGAN
|
||||
import model.GAN.TAFG
|
||||
import model.GAN.UGATIT
|
||||
import model.fewshot
|
||||
import model.GAN.wrapper
|
||||
import model.GAN.base
|
||||
|
||||
105
model/fewshot.py
105
model/fewshot.py
@@ -1,105 +0,0 @@
|
||||
import math
|
||||
|
||||
import torch.nn as nn
|
||||
|
||||
from .registry import MODEL
|
||||
|
||||
|
||||
# --- gaussian initialize ---
|
||||
def init_layer(l):
|
||||
# Initialization using fan-in
|
||||
if isinstance(l, nn.Conv2d):
|
||||
n = l.kernel_size[0] * l.kernel_size[1] * l.out_channels
|
||||
l.weight.data.normal_(0, math.sqrt(2.0 / float(n)))
|
||||
elif isinstance(l, nn.BatchNorm2d):
|
||||
l.weight.data.fill_(1)
|
||||
l.bias.data.fill_(0)
|
||||
elif isinstance(l, nn.Linear):
|
||||
l.bias.data.fill_(0)
|
||||
|
||||
|
||||
class Flatten(nn.Module):
|
||||
def __init__(self):
|
||||
super(Flatten, self).__init__()
|
||||
|
||||
def forward(self, x):
|
||||
return x.view(x.size(0), -1)
|
||||
|
||||
|
||||
class SimpleBlock(nn.Module):
|
||||
def __init__(self, in_channels, out_channels, half_res, leakyrelu=False):
|
||||
super(SimpleBlock, self).__init__()
|
||||
self.in_channels = in_channels
|
||||
self.out_channels = out_channels
|
||||
|
||||
self.block = nn.Sequential(
|
||||
nn.Conv2d(in_channels, out_channels, kernel_size=3, stride=2 if half_res else 1, padding=1, bias=False),
|
||||
nn.BatchNorm2d(out_channels),
|
||||
nn.ReLU(inplace=True) if not leakyrelu else nn.LeakyReLU(0.2, inplace=True),
|
||||
nn.Conv2d(out_channels, out_channels, kernel_size=3, padding=1, bias=False),
|
||||
nn.BatchNorm2d(out_channels),
|
||||
)
|
||||
self.relu = nn.ReLU(inplace=True) if not leakyrelu else nn.LeakyReLU(0.2, inplace=True)
|
||||
if in_channels != out_channels:
|
||||
self.shortcut = nn.Sequential(
|
||||
nn.Conv2d(in_channels, out_channels, 1, 2 if half_res else 1, bias=False),
|
||||
nn.BatchNorm2d(out_channels)
|
||||
)
|
||||
else:
|
||||
self.shortcut = nn.Identity()
|
||||
|
||||
def forward(self, x):
|
||||
o = self.block(x)
|
||||
return self.relu(o + self.shortcut(x))
|
||||
|
||||
|
||||
class ResNet(nn.Module):
|
||||
def __init__(self, block, layers, dims, num_classes=None, classifier_type="linear", flatten=True, leakyrelu=False):
|
||||
super().__init__()
|
||||
assert len(layers) == 4, 'Can have only four stages'
|
||||
self.inplanes = 64
|
||||
|
||||
self.start = nn.Sequential(
|
||||
nn.Conv2d(3, self.inplanes, kernel_size=7, stride=2, padding=3, bias=False),
|
||||
nn.BatchNorm2d(self.inplanes),
|
||||
nn.ReLU(inplace=True) if not leakyrelu else nn.LeakyReLU(0.2, inplace=True),
|
||||
nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
|
||||
)
|
||||
|
||||
trunk = []
|
||||
in_channels = self.inplanes
|
||||
for i in range(4):
|
||||
for j in range(layers[i]):
|
||||
half_res = i >= 1 and j == 0
|
||||
trunk.append(block(in_channels, dims[i], half_res, leakyrelu))
|
||||
in_channels = dims[i]
|
||||
if flatten:
|
||||
trunk.append(nn.AvgPool2d(7))
|
||||
trunk.append(Flatten())
|
||||
if num_classes is not None:
|
||||
if classifier_type == "linear":
|
||||
trunk.append(nn.Linear(in_channels, num_classes))
|
||||
elif classifier_type == "distlinear":
|
||||
pass
|
||||
else:
|
||||
raise ValueError(f"invalid classifier_type:{classifier_type}")
|
||||
self.trunk = nn.Sequential(*trunk)
|
||||
self.apply(init_layer)
|
||||
|
||||
def forward(self, x):
|
||||
return self.trunk(self.start(x))
|
||||
|
||||
|
||||
@MODEL.register_module()
|
||||
def resnet10(num_classes=None, classifier_type="linear", flatten=True, leakyrelu=False):
|
||||
return ResNet(SimpleBlock, [1, 1, 1, 1], [64, 128, 256, 512], num_classes, classifier_type, flatten, leakyrelu)
|
||||
|
||||
|
||||
@MODEL.register_module()
|
||||
def resnet18(num_classes=None, classifier_type="linear", flatten=True, leakyrelu=False):
|
||||
return ResNet(SimpleBlock, [2, 2, 2, 2], [64, 128, 256, 512], num_classes, classifier_type, flatten, leakyrelu)
|
||||
|
||||
|
||||
@MODEL.register_module()
|
||||
def resnet34(num_classes=None, classifier_type="linear", flatten=True, leakyrelu=False):
|
||||
return ResNet(SimpleBlock, [3, 4, 6, 3], [64, 128, 256, 512], num_classes, classifier_type, flatten, leakyrelu)
|
||||
Reference in New Issue
Block a user