-
Notifications
You must be signed in to change notification settings - Fork 887
/
refinenet.py
executable file
·65 lines (53 loc) · 2.49 KB
/
refinenet.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
"""
Copyright (c) 2019-present NAVER Corp.
MIT License
"""
# -*- coding: utf-8 -*-
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable
from basenet.vgg16_bn import init_weights
class RefineNet(nn.Module):
def __init__(self):
super(RefineNet, self).__init__()
self.last_conv = nn.Sequential(
nn.Conv2d(34, 64, kernel_size=3, padding=1), nn.BatchNorm2d(64), nn.ReLU(inplace=True),
nn.Conv2d(64, 64, kernel_size=3, padding=1), nn.BatchNorm2d(64), nn.ReLU(inplace=True),
nn.Conv2d(64, 64, kernel_size=3, padding=1), nn.BatchNorm2d(64), nn.ReLU(inplace=True)
)
self.aspp1 = nn.Sequential(
nn.Conv2d(64, 128, kernel_size=3, dilation=6, padding=6), nn.BatchNorm2d(128), nn.ReLU(inplace=True),
nn.Conv2d(128, 128, kernel_size=1), nn.BatchNorm2d(128), nn.ReLU(inplace=True),
nn.Conv2d(128, 1, kernel_size=1)
)
self.aspp2 = nn.Sequential(
nn.Conv2d(64, 128, kernel_size=3, dilation=12, padding=12), nn.BatchNorm2d(128), nn.ReLU(inplace=True),
nn.Conv2d(128, 128, kernel_size=1), nn.BatchNorm2d(128), nn.ReLU(inplace=True),
nn.Conv2d(128, 1, kernel_size=1)
)
self.aspp3 = nn.Sequential(
nn.Conv2d(64, 128, kernel_size=3, dilation=18, padding=18), nn.BatchNorm2d(128), nn.ReLU(inplace=True),
nn.Conv2d(128, 128, kernel_size=1), nn.BatchNorm2d(128), nn.ReLU(inplace=True),
nn.Conv2d(128, 1, kernel_size=1)
)
self.aspp4 = nn.Sequential(
nn.Conv2d(64, 128, kernel_size=3, dilation=24, padding=24), nn.BatchNorm2d(128), nn.ReLU(inplace=True),
nn.Conv2d(128, 128, kernel_size=1), nn.BatchNorm2d(128), nn.ReLU(inplace=True),
nn.Conv2d(128, 1, kernel_size=1)
)
init_weights(self.last_conv.modules())
init_weights(self.aspp1.modules())
init_weights(self.aspp2.modules())
init_weights(self.aspp3.modules())
init_weights(self.aspp4.modules())
def forward(self, y, upconv4):
refine = torch.cat([y.permute(0,3,1,2), upconv4], dim=1)
refine = self.last_conv(refine)
aspp1 = self.aspp1(refine)
aspp2 = self.aspp2(refine)
aspp3 = self.aspp3(refine)
aspp4 = self.aspp4(refine)
#out = torch.add([aspp1, aspp2, aspp3, aspp4], dim=1)
out = aspp1 + aspp2 + aspp3 + aspp4
return out.permute(0, 2, 3, 1) # , refine.permute(0,2,3,1)