Skip to content

Commit 2c407db

Browse files
committed
edit
1 parent d148f1f commit 2c407db

File tree

9 files changed

+533
-113
lines changed

9 files changed

+533
-113
lines changed

Reinforcement_learning_TUT/10_A3C/A3C_RNN.py

+5-7
Original file line numberDiff line numberDiff line change
@@ -30,7 +30,7 @@
3030
GAMMA = 0.9
3131
ENTROPY_BETA = 0.01
3232
LR_A = 0.0001 # learning rate for actor
33-
LR_C = 0.0005 # learning rate for critic
33+
LR_C = 0.001 # learning rate for critic
3434
GLOBAL_RUNNING_R = []
3535
GLOBAL_EP = 0
3636

@@ -60,8 +60,7 @@ def __init__(self, scope, globalAC=None):
6060

6161
td = tf.subtract(self.v_target, self.v, name='TD_error')
6262
with tf.name_scope('c_loss'):
63-
self.c_losses = tf.square(td) # shape (None, 1), use this to get sum of gradients over batch
64-
self.c_loss = tf.reduce_mean(self.c_losses)
63+
self.c_loss = tf.reduce_mean(tf.square(td))
6564

6665
with tf.name_scope('wrap_a_out'):
6766
mu, sigma = mu * A_BOUND[1], sigma + 1e-4
@@ -73,16 +72,15 @@ def __init__(self, scope, globalAC=None):
7372
exp_v = log_prob * td
7473
entropy = normal_dist.entropy() # encourage exploration
7574
self.exp_v = ENTROPY_BETA * entropy + exp_v
76-
self.a_losses = -self.exp_v # shape (None, 1), use this to get sum of gradients over batch
77-
self.a_loss = tf.reduce_mean(self.a_losses)
75+
self.a_loss = tf.reduce_mean(-self.exp_v)
7876

7977
with tf.name_scope('choose_a'): # use local params to choose action
8078
self.A = tf.clip_by_value(tf.squeeze(normal_dist.sample(1), axis=0), A_BOUND[0], A_BOUND[1])
8179
with tf.name_scope('local_grad'):
8280
self.a_params = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope=scope + '/actor')
8381
self.c_params = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope=scope + '/critic')
84-
self.a_grads = tf.gradients(self.a_losses, self.a_params) # use losses will give accumulated sum of gradients
85-
self.c_grads = tf.gradients(self.c_losses, self.c_params)
82+
self.a_grads = tf.gradients(self.a_loss, self.a_params)
83+
self.c_grads = tf.gradients(self.c_loss, self.c_params)
8684

8785
with tf.name_scope('sync'):
8886
with tf.name_scope('pull'):

Reinforcement_learning_TUT/10_A3C/A3C_continuous_action.py

+4-6
Original file line numberDiff line numberDiff line change
@@ -60,8 +60,7 @@ def __init__(self, scope, globalAC=None):
6060

6161
td = tf.subtract(self.v_target, self.v, name='TD_error')
6262
with tf.name_scope('c_loss'):
63-
self.c_losses = tf.square(td) # shape (None, 1), use this to get sum of gradients over batch
64-
self.c_loss = tf.reduce_mean(self.c_losses)
63+
self.c_loss = tf.reduce_mean(tf.square(td))
6564

6665
with tf.name_scope('wrap_a_out'):
6766
mu, sigma = mu * A_BOUND[1], sigma + 1e-4
@@ -73,16 +72,15 @@ def __init__(self, scope, globalAC=None):
7372
exp_v = log_prob * td
7473
entropy = normal_dist.entropy() # encourage exploration
7574
self.exp_v = ENTROPY_BETA * entropy + exp_v
76-
self.a_losses = -self.exp_v # shape (None, 1), use this to get sum of gradients over batch
77-
self.a_loss = tf.reduce_mean(self.a_losses)
75+
self.a_loss = tf.reduce_mean(-self.exp_v)
7876

7977
with tf.name_scope('choose_a'): # use local params to choose action
8078
self.A = tf.clip_by_value(tf.squeeze(normal_dist.sample(1), axis=0), A_BOUND[0], A_BOUND[1])
8179
with tf.name_scope('local_grad'):
8280
self.a_params = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope=scope + '/actor')
8381
self.c_params = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope=scope + '/critic')
84-
self.a_grads = tf.gradients(self.a_losses, self.a_params) # use losses will give accumulated sum of gradients
85-
self.c_grads = tf.gradients(self.c_losses, self.c_params)
82+
self.a_grads = tf.gradients(self.a_loss, self.a_params)
83+
self.c_grads = tf.gradients(self.c_loss, self.c_params)
8684

8785
with tf.name_scope('sync'):
8886
with tf.name_scope('pull'):

Reinforcement_learning_TUT/10_A3C/A3C_discrete_action.py

+4-6
Original file line numberDiff line numberDiff line change
@@ -59,22 +59,20 @@ def __init__(self, scope, globalAC=None):
5959

6060
td = tf.subtract(self.v_target, self.v, name='TD_error')
6161
with tf.name_scope('c_loss'):
62-
self.c_losses = tf.square(td) # shape (None, 1), use this to get sum of gradients over batch
63-
self.c_loss = tf.reduce_mean(self.c_losses)
62+
self.c_loss = tf.reduce_mean(tf.square(td))
6463

6564
with tf.name_scope('a_loss'):
6665
log_prob = tf.reduce_sum(tf.log(self.a_prob) * tf.one_hot(self.a_his, N_A, dtype=tf.float32), axis=1, keep_dims=True)
6766
exp_v = log_prob * td
6867
entropy = -tf.reduce_sum(self.a_prob * tf.log(self.a_prob), axis=1, keep_dims=True) # encourage exploration
6968
self.exp_v = ENTROPY_BETA * entropy + exp_v
70-
self.a_losses = -self.exp_v # shape (None, 1), use this to get sum of gradients over batch
71-
self.a_loss = tf.reduce_mean(self.a_losses)
69+
self.a_loss = tf.reduce_mean(-self.exp_v)
7270

7371
with tf.name_scope('local_grad'):
7472
self.a_params = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope=scope + '/actor')
7573
self.c_params = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope=scope + '/critic')
76-
self.a_grads = tf.gradients(self.a_losses, self.a_params) # use losses will give accumulated sum of gradients
77-
self.c_grads = tf.gradients(self.c_losses, self.c_params)
74+
self.a_grads = tf.gradients(self.a_loss, self.a_params)
75+
self.c_grads = tf.gradients(self.c_loss, self.c_params)
7876

7977
with tf.name_scope('sync'):
8078
with tf.name_scope('pull'):
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,214 @@
1+
"""
2+
Environment is a Robot Arm. The arm tries to get to the blue point.
3+
The environment will return a geographic (distance) information for the arm to learn.
4+
5+
The far away from blue point the less reward; touch blue r+=1; stop at blue for a while then get r=+10.
6+
7+
You can train this RL by using LOAD = False, after training, this model will be store in the a local folder.
8+
Using LOAD = True to reload the trained model for playing.
9+
10+
You can customize this script in a way you want.
11+
12+
View more on [莫烦Python] : https://morvanzhou.github.io/tutorials/
13+
14+
15+
Requirement:
16+
pyglet >= 1.2.4
17+
numpy >= 1.12.1
18+
tensorflow >= 1.0.1
19+
"""
20+
21+
import multiprocessing
22+
import threading
23+
import tensorflow as tf
24+
import numpy as np
25+
from arm_env import ArmEnv
26+
27+
28+
# np.random.seed(1)
29+
# tf.set_random_seed(1)
30+
31+
MAX_GLOBAL_EP = 2000
32+
MAX_EP_STEP = 300
33+
UPDATE_GLOBAL_ITER = 5
34+
N_WORKERS = multiprocessing.cpu_count()
35+
LR_A = 1e-4 # learning rate for actor
36+
LR_C = 2e-4 # learning rate for critic
37+
GAMMA = 0.9 # reward discount
38+
MODE = ['easy', 'hard']
39+
n_model = 1
40+
GLOBAL_NET_SCOPE = 'Global_Net'
41+
ENTROPY_BETA = 0.01
42+
GLOBAL_RUNNING_R = []
43+
GLOBAL_EP = 0
44+
45+
46+
env = ArmEnv(mode=MODE[n_model])
47+
N_S = env.state_dim
48+
N_A = env.action_dim
49+
A_BOUND = env.action_bound
50+
del env
51+
52+
53+
class ACNet(object):
54+
def __init__(self, scope, globalAC=None):
55+
56+
if scope == GLOBAL_NET_SCOPE: # get global network
57+
with tf.variable_scope(scope):
58+
self.s = tf.placeholder(tf.float32, [None, N_S], 'S')
59+
self._build_net()
60+
self.a_params = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope=scope + '/actor')
61+
self.c_params = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope=scope + '/critic')
62+
else: # local net, calculate losses
63+
with tf.variable_scope(scope):
64+
self.s = tf.placeholder(tf.float32, [None, N_S], 'S')
65+
self.a_his = tf.placeholder(tf.float32, [None, N_A], 'A')
66+
self.v_target = tf.placeholder(tf.float32, [None, 1], 'Vtarget')
67+
68+
mu, sigma, self.v = self._build_net()
69+
70+
td = tf.subtract(self.v_target, self.v, name='TD_error')
71+
with tf.name_scope('c_loss'):
72+
self.c_loss = tf.reduce_mean(tf.square(td))
73+
74+
with tf.name_scope('wrap_a_out'):
75+
self.test = sigma[0]
76+
mu, sigma = mu * A_BOUND[1], sigma + 1e-5
77+
78+
normal_dist = tf.contrib.distributions.Normal(mu, sigma)
79+
80+
with tf.name_scope('a_loss'):
81+
log_prob = normal_dist.log_prob(self.a_his)
82+
exp_v = log_prob * td
83+
entropy = normal_dist.entropy() # encourage exploration
84+
self.exp_v = ENTROPY_BETA * entropy + exp_v
85+
self.a_loss = tf.reduce_mean(-self.exp_v)
86+
87+
with tf.name_scope('choose_a'): # use local params to choose action
88+
self.A = tf.clip_by_value(tf.squeeze(normal_dist.sample(1), axis=0), *A_BOUND)
89+
with tf.name_scope('local_grad'):
90+
self.a_params = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope=scope + '/actor')
91+
self.c_params = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope=scope + '/critic')
92+
self.a_grads = tf.gradients(self.a_loss, self.a_params)
93+
self.c_grads = tf.gradients(self.c_loss, self.c_params)
94+
95+
with tf.name_scope('sync'):
96+
with tf.name_scope('pull'):
97+
self.pull_a_params_op = [l_p.assign(g_p) for l_p, g_p in zip(self.a_params, globalAC.a_params)]
98+
self.pull_c_params_op = [l_p.assign(g_p) for l_p, g_p in zip(self.c_params, globalAC.c_params)]
99+
with tf.name_scope('push'):
100+
self.update_a_op = OPT_A.apply_gradients(zip(self.a_grads, globalAC.a_params))
101+
self.update_c_op = OPT_C.apply_gradients(zip(self.c_grads, globalAC.c_params))
102+
103+
def _build_net(self):
104+
w_init = tf.contrib.layers.xavier_initializer()
105+
with tf.variable_scope('actor'):
106+
l_a = tf.layers.dense(self.s, 400, tf.nn.relu6, kernel_initializer=w_init, name='la')
107+
l_a = tf.layers.dense(l_a, 300, tf.nn.relu6, kernel_initializer=w_init, name='la2')
108+
mu = tf.layers.dense(l_a, N_A, tf.nn.tanh, kernel_initializer=w_init, name='mu')
109+
sigma = tf.layers.dense(l_a, N_A, tf.nn.softplus, kernel_initializer=w_init, name='sigma')
110+
with tf.variable_scope('critic'):
111+
l_c = tf.layers.dense(self.s, 400, tf.nn.relu6, kernel_initializer=w_init, name='lc')
112+
l_c = tf.layers.dense(l_c, 200, tf.nn.relu6, kernel_initializer=w_init, name='lc2')
113+
v = tf.layers.dense(l_c, 1, kernel_initializer=w_init, name='v') # state value
114+
return mu, sigma, v
115+
116+
def update_global(self, feed_dict): # run by a local
117+
_, _, t = SESS.run([self.update_a_op, self.update_c_op, self.test], feed_dict) # local grads applies to global net
118+
return t
119+
120+
def pull_global(self): # run by a local
121+
SESS.run([self.pull_a_params_op, self.pull_c_params_op])
122+
123+
def choose_action(self, s): # run by a local
124+
s = s[np.newaxis, :]
125+
return SESS.run(self.A, {self.s: s})[0]
126+
127+
128+
class Worker(object):
129+
def __init__(self, name, globalAC):
130+
self.env = ArmEnv(mode=MODE[n_model])
131+
self.name = name
132+
self.AC = ACNet(name, globalAC)
133+
134+
def work(self):
135+
global GLOBAL_RUNNING_R, GLOBAL_EP
136+
total_step = 1
137+
buffer_s, buffer_a, buffer_r = [], [], []
138+
while not COORD.should_stop() and GLOBAL_EP < MAX_GLOBAL_EP:
139+
s = self.env.reset()
140+
ep_r = 0
141+
for ep_t in range(MAX_EP_STEP):
142+
if self.name == 'W_0':
143+
self.env.render()
144+
a = self.AC.choose_action(s)
145+
s_, r, done = self.env.step(a)
146+
if ep_t == MAX_EP_STEP - 1: done = True
147+
ep_r += r
148+
buffer_s.append(s)
149+
buffer_a.append(a)
150+
buffer_r.append(r)
151+
152+
if total_step % UPDATE_GLOBAL_ITER == 0 or done: # update global and assign to local net
153+
if done:
154+
v_s_ = 0 # terminal
155+
else:
156+
v_s_ = SESS.run(self.AC.v, {self.AC.s: s_[np.newaxis, :]})[0, 0]
157+
buffer_v_target = []
158+
for r in buffer_r[::-1]: # reverse buffer r
159+
v_s_ = r + GAMMA * v_s_
160+
buffer_v_target.append(v_s_)
161+
buffer_v_target.reverse()
162+
163+
buffer_s, buffer_a, buffer_v_target = np.vstack(buffer_s), np.vstack(buffer_a), np.vstack(buffer_v_target)
164+
feed_dict = {
165+
self.AC.s: buffer_s,
166+
self.AC.a_his: buffer_a,
167+
self.AC.v_target: buffer_v_target,
168+
}
169+
test = self.AC.update_global(feed_dict)
170+
buffer_s, buffer_a, buffer_r = [], [], []
171+
self.AC.pull_global()
172+
173+
s = s_
174+
total_step += 1
175+
if done:
176+
if len(GLOBAL_RUNNING_R) == 0: # record running episode reward
177+
GLOBAL_RUNNING_R.append(ep_r)
178+
else:
179+
GLOBAL_RUNNING_R.append(0.9 * GLOBAL_RUNNING_R[-1] + 0.1 * ep_r)
180+
print(
181+
self.name,
182+
"Ep:", GLOBAL_EP,
183+
"| Ep_r: %i" % GLOBAL_RUNNING_R[-1],
184+
'| Var:', test,
185+
186+
)
187+
GLOBAL_EP += 1
188+
break
189+
190+
if __name__ == "__main__":
191+
SESS = tf.Session()
192+
193+
with tf.device("/cpu:0"):
194+
OPT_A = tf.train.RMSPropOptimizer(LR_A, name='RMSPropA')
195+
OPT_C = tf.train.RMSPropOptimizer(LR_C, name='RMSPropC')
196+
GLOBAL_AC = ACNet(GLOBAL_NET_SCOPE) # we only need its params
197+
workers = []
198+
# Create worker
199+
for i in range(N_WORKERS):
200+
i_name = 'W_%i' % i # worker name
201+
workers.append(Worker(i_name, GLOBAL_AC))
202+
203+
COORD = tf.train.Coordinator()
204+
SESS.run(tf.global_variables_initializer())
205+
206+
worker_threads = []
207+
for worker in workers:
208+
job = lambda: worker.work()
209+
t = threading.Thread(target=job)
210+
t.start()
211+
worker_threads.append(t)
212+
COORD.join(worker_threads)
213+
214+

Reinforcement_learning_TUT/experiments/Robot_arm/DDPG.py

+1
Original file line numberDiff line numberDiff line change
@@ -9,6 +9,7 @@
99
1010
You can customize this script in a way you want.
1111
12+
View more on [莫烦Python] : https://morvanzhou.github.io/tutorials/
1213
1314
Requirement:
1415
pyglet >= 1.2.4

Reinforcement_learning_TUT/experiments/Robot_arm/arm_env.py

+6-3
Original file line numberDiff line numberDiff line change
@@ -1,7 +1,10 @@
11
"""
22
Environment for Robot Arm.
33
You can customize this script in a way you want.
4-
4+
5+
View more on [莫烦Python] : https://morvanzhou.github.io/tutorials/
6+
7+
58
Requirement:
69
pyglet >= 1.2.4
710
numpy >= 1.12.1
@@ -101,13 +104,13 @@ def _r_func(self, distance):
101104
t = 50
102105
abs_distance = np.sqrt(np.sum(np.square(distance)))
103106
r = -abs_distance/200
104-
if abs_distance < self.viewer.point_l and (not self.get_point):
107+
if abs_distance < self.point_l and (not self.get_point):
105108
r += 1.
106109
self.grab_counter += 1
107110
if self.grab_counter > t:
108111
r += 10.
109112
self.get_point = True
110-
elif abs_distance > self.viewer.point_l:
113+
elif abs_distance > self.point_l:
111114
self.grab_counter = 0
112115
self.get_point = False
113116
return r

0 commit comments

Comments
 (0)