v2
This commit is contained in:
@@ -1,11 +1,13 @@
|
||||
from itertools import chain
|
||||
|
||||
import ignite.distributed as idist
|
||||
import torch
|
||||
|
||||
from engine.base.i2i import EngineKernel, run_kernel
|
||||
from engine.util.build import build_model
|
||||
from engine.util.container import GANImageBuffer, LossContainer
|
||||
from engine.util.loss import pixel_loss, gan_loss
|
||||
from engine.util.loss import pixel_loss, gan_loss, feature_match_loss
|
||||
from loss.I2I.edge_loss import EdgeLoss
|
||||
from loss.I2I.minimal_geometry_distortion_constraint_loss import MGCLoss
|
||||
from model.weight_init import generation_init_weights
|
||||
|
||||
@@ -17,7 +19,10 @@ class CycleGANEngineKernel(EngineKernel):
|
||||
self.gan_loss = gan_loss(config.loss.gan)
|
||||
self.cycle_loss = LossContainer(config.loss.cycle.weight, pixel_loss(config.loss.cycle.level))
|
||||
self.id_loss = LossContainer(config.loss.id.weight, pixel_loss(config.loss.id.level))
|
||||
self.mgc_loss = LossContainer(config.loss.mgc.weight, MGCLoss())
|
||||
self.mgc_loss = LossContainer(config.loss.mgc.weight, MGCLoss("opposite"))
|
||||
self.fm_loss = LossContainer(config.loss.fm.weight, feature_match_loss(1, "same"))
|
||||
self.edge_loss = LossContainer(config.loss.edge.weight, EdgeLoss(
|
||||
"HED", hed_pretrained_model_path=config.loss.edge.hed_pretrained_model_path).to(idist.device()))
|
||||
self.image_buffers = {k: GANImageBuffer(config.data.train.buffer_size or 50) for k in
|
||||
self.discriminators.keys()}
|
||||
|
||||
@@ -64,8 +69,12 @@ class CycleGANEngineKernel(EngineKernel):
|
||||
loss[f"cycle_{ph}"] = self.cycle_loss(generated["a2b2a" if ph == "a" else "b2a2b"], batch[ph])
|
||||
loss[f"id_{ph}"] = self.id_loss(generated[f"{ph}2{ph}"], batch[ph])
|
||||
loss[f"mgc_{ph}"] = self.mgc_loss(generated["a2b" if ph == "a" else "b2a"], batch[ph])
|
||||
loss[f"gan_{ph}"] = self.config.loss.gan.weight * self.gan_loss(
|
||||
self.discriminators[ph](generated["a2b" if ph == "b" else "b2a"]), True)
|
||||
prediction_fake = self.discriminators[ph](generated["a2b" if ph == "b" else "b2a"])
|
||||
loss[f"gan_{ph}"] = self.config.loss.gan.weight * self.gan_loss(prediction_fake, True)
|
||||
if self.fm_loss.weight > 0:
|
||||
prediction_real = self.discriminators[ph](batch[ph])
|
||||
loss[f"feature_match_{ph}"] = self.fm_loss(prediction_fake, prediction_real)
|
||||
loss[f"edge_{ph}"] = self.edge_loss(generated["a2b" if ph == "a" else "b2a"], batch[ph], gt_is_edge=False)
|
||||
return loss
|
||||
|
||||
def criterion_discriminators(self, batch, generated) -> dict:
|
||||
|
||||
@@ -101,9 +101,12 @@ class EngineKernel(object):
|
||||
|
||||
|
||||
def _remove_no_grad_loss(loss_dict):
|
||||
need_to_pop = []
|
||||
for k in loss_dict:
|
||||
if not isinstance(loss_dict[k], torch.Tensor):
|
||||
loss_dict.pop(k)
|
||||
need_to_pop.append(k)
|
||||
for k in need_to_pop:
|
||||
loss_dict.pop(k)
|
||||
return loss_dict
|
||||
|
||||
|
||||
|
||||
@@ -23,3 +23,19 @@ def mse_loss(x, target_flag):
|
||||
|
||||
def bce_loss(x, target_flag):
|
||||
return F.binary_cross_entropy_with_logits(x, torch.ones_like(x) if target_flag else torch.zeros_like(x))
|
||||
|
||||
|
||||
def feature_match_loss(level, weight_policy):
|
||||
compare_loss = pixel_loss(level)
|
||||
assert weight_policy in ["same", "exponential_decline"]
|
||||
|
||||
def fm_loss(generated_features, target_features):
|
||||
num_scale = len(generated_features)
|
||||
loss = 0
|
||||
for s_i in range(num_scale):
|
||||
for i in range(len(generated_features[s_i]) - 1):
|
||||
weight = 1 if weight_policy == "same" else 2 ** i
|
||||
loss += weight * compare_loss(generated_features[s_i][i], target_features[s_i][i].detach()) / num_scale
|
||||
return loss
|
||||
|
||||
return fm_loss
|
||||
|
||||
Reference in New Issue
Block a user