-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathModels.py
83 lines (64 loc) · 2.74 KB
/
Models.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
from __future__ import print_function
import argparse
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from torchvision import datasets, transforms
from torch.optim.lr_scheduler import StepLR
from torch.autograd import Variable
class Net(nn.Module):
def __init__(self, num_hidden_units=2, num_classes=10,s=2):
super(Net, self).__init__()
self.scale=s
self.conv1_1 = nn.Conv2d(1, 32, kernel_size=5, padding=2)
self.prelu1_1 = nn.PReLU()
self.conv1_2 = nn.Conv2d(32, 32, kernel_size=5, padding=2)
self.prelu1_2 = nn.PReLU()
self.conv2_1 = nn.Conv2d(32, 64, kernel_size=5, padding=2)
self.prelu2_1 = nn.PReLU()
self.conv2_2 = nn.Conv2d(64, 64, kernel_size=5, padding=2)
self.prelu2_2 = nn.PReLU()
self.conv3_1 = nn.Conv2d(64, 128, kernel_size=5, padding=2)
self.prelu3_1 = nn.PReLU()
self.conv3_2 = nn.Conv2d(128, 128, kernel_size=5, padding=2)
self.prelu3_2 = nn.PReLU()
self.preluip1 = nn.PReLU()
self.ip1 = nn.Linear(128 * 3 * 3, 2)
self.dce=dce_loss(num_classes,num_hidden_units)
def forward(self, x):
x = self.prelu1_1(self.conv1_1(x))
x = self.prelu1_2(self.conv1_2(x))
x = F.max_pool2d(x, 2)
x = self.prelu2_1(self.conv2_1(x))
x = self.prelu2_2(self.conv2_2(x))
x = F.max_pool2d(x, 2)
x = self.prelu3_1(self.conv3_1(x))
x = self.prelu3_2(self.conv3_2(x))
x = F.max_pool2d(x, 2)
x= x.view(-1, 128 * 3 * 3)
x1 = self.preluip1(self.ip1(x))
centers,x=self.dce(x1)
output = F.log_softmax(self.scale*x, dim=1)
return x1,centers,x,output
class dce_loss(torch.nn.Module):
def __init__(self, n_classes,feat_dim,init_weight=True):
super(dce_loss, self).__init__()
self.n_classes=n_classes
self.feat_dim=feat_dim
self.centers=nn.Parameter(torch.randn(self.feat_dim,self.n_classes).cuda(),requires_grad=True)
if init_weight:
self.__init_weight()
def __init_weight(self):
nn.init.kaiming_normal_(self.centers)
def forward(self, x):
features_square=torch.sum(torch.pow(x,2),1, keepdim=True)
centers_square=torch.sum(torch.pow(self.centers,2),0, keepdim=True)
features_into_centers=2*torch.matmul(x, (self.centers))
dist=features_square+centers_square-features_into_centers
return self.centers, -dist
def regularization(features, centers, labels):
distance=(features-torch.t(centers)[labels])
distance=torch.sum(torch.pow(distance,2),1, keepdim=True)
distance=(torch.sum(distance, 0, keepdim=True))/features.shape[0]
return distance