Source code for ada.models.layers

import torch
from torch.autograd import Function


[docs]def weight_init_glorot_uniform(m): classname = m.__class__.__name__ if classname.find("Linear") != -1: torch.nn.init.xavier_uniform_(m.weight) m.bias.data.fill_(0.01) if classname.find("Conv") != -1: m.weight.data.normal_(0.0, 0.02) elif classname.find("BatchNorm") != -1: m.weight.data.normal_(1.0, 0.02) m.bias.data.fill_(0)
[docs]class ReverseLayerF(Function):
[docs] @staticmethod def forward(ctx, x, alpha): ctx.alpha = alpha return x.view_as(x)
[docs] @staticmethod def backward(ctx, grad_output): output = grad_output.neg() * ctx.alpha return output, None