forked from dgriff777/rl_a3c_pytorch
-
Notifications
You must be signed in to change notification settings - Fork 0
/
model.py
executable file
·68 lines (58 loc) · 2.61 KB
/
model.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
import math
import torch
import torch.nn as nn
import torch.nn.functional as F
class A3Clstm(torch.nn.Module):
def __init__(self, num_inputs, action_space):
super(A3Clstm, self).__init__()
self.conv1 = nn.Conv2d(num_inputs, 32, 5, stride=1, padding=2)
self.maxp1 = nn.MaxPool2d(2, 2)
self.conv2 = nn.Conv2d(32, 32, 5, stride=1, padding=1)
self.maxp2 = nn.MaxPool2d(2, 2)
self.conv3 = nn.Conv2d(32, 64, 4, stride=1, padding=1)
self.maxp3 = nn.MaxPool2d(2, 2)
self.conv4 = nn.Conv2d(64, 64, 3, stride=1, padding=1)
self.maxp4 = nn.MaxPool2d(2, 2)
self.lstm = nn.LSTMCell(1024, 512)
num_outputs = action_space.n
self.critic_linear = nn.Linear(512, 1)
self.adv_linear = nn.Linear(512, num_outputs)
self.actor_linear = nn.Linear(512, num_outputs)
self.register_parameter(
'log_alpha',
nn.Parameter(torch.full((1, ), math.log(.01),
dtype=torch.float32)))
with torch.no_grad():
x = torch.full((num_outputs, ),
0.1 / num_outputs,
dtype=torch.float32)
x[0] += 0.9
self.constraint = torch.distributions.categorical.Categorical(
probs=x).entropy().item()
self.thres = torch.log(torch.tensor(1e-3, dtype=torch.float32)).item()
@torch.no_grad()
def keep_alpha(self):
self.log_alpha[0] = max(self.thres, self.log_alpha.item())
def forward(self, inputs):
inputs, (hx, cx) = inputs
x = F.relu(self.maxp1(self.conv1(inputs)))
x = F.relu(self.maxp2(self.conv2(x)))
x = F.relu(self.maxp3(self.conv3(x)))
x = F.relu(self.maxp4(self.conv4(x)))
x = x.view(x.size(0), -1)
hx, cx = self.lstm(x, (hx, cx))
x = hx
logit = self.actor_linear(x)
dist = torch.distributions.categorical.Categorical(
logits=logit)
adv_value = self.adv_linear(x)
#adv_value = adv_value - (F.softmax(logit, -1).detach() * adv_value).sum(-1, True) - dist.entropy().unsqueeze(-1).detach() * .01
#adv_value = adv_value - adv_value.mean(-1, True)
adv_value = adv_value - (F.softmax(logit, -1) * adv_value).sum(-1, True) - dist.entropy().unsqueeze(-1) * .01
v_value = self.critic_linear(x)
q_value = adv_value + v_value
act = dist.sample().unsqueeze(-1)
q_value_full = q_value
q_value = q_value.gather(-1, act)
#adv_value = adv_value.gather(-1, act)
return act, None, logit, (adv_value, q_value), (hx, cx)