我试图创建一个强化学习代理,可以购买,出售或持有股票的立场。我的问题是,即使在超过2000集之后,经纪人仍然无法知道何时买入、卖出或持有。这是第2100集的一张图片,详细说明了我的意思,特工不会采取任何行动,除非是随机的。 代理使用重放内存进行学习,我已经进行了两次和三次检查,以确保没有错误。代理代码如下: 将numpy作为np导入 导入tensorflow作为tf 随机导入 从集合导入deque 从代理进口代理
class Agent(Agent):
def __init__(self, state_size = 7, window_size = 1, action_size = 3,
batch_size = 32, gamma=.95, epsilon=.95, epsilon_decay=.95, epsilon_min=.01,
learning_rate=.001, is_eval=False, model_name="", stock_name="", episode=1):
"""
state_size: Size of the state coming from the environment
action_size: How many decisions the algo will make in the end
gamma: Decay rate to discount future reward
epsilon: Rate of randomly decided action
epsilon_decay: Rate of decrease in epsilon
epsilon_min: The lowest epsilon can get (limit to the randomness)
learning_rate: Progress of neural net in each iteration
episodes: How many times data will be run through
"""
self.state_size = state_size
self.window_size = window_size
self.action_size = action_size
self.batch_size = batch_size
self.gamma = gamma
self.epsilon = epsilon
self.epsilon_decay = epsilon_decay
self.epsilon_min = epsilon_min
self.learning_rate = learning_rate
self.is_eval = is_eval
self.model_name = model_name
self.stock_name = stock_name
self.q_values = []
self.layers = [150, 150, 150]
tf.reset_default_graph()
self.sess = tf.Session(config=tf.ConfigProto(allow_soft_placement = True))
self.memory = deque()
if self.is_eval:
model_name = stock_name + "-" + str(episode)
self._model_init()
# "models/{}/{}/{}".format(stock_name, model_name, model_name + "-" + str(episode) + ".meta")
self.saver = tf.train.Saver()
self.saver.restore(self.sess, tf.train.latest_checkpoint("models/{}/{}".format(stock_name, model_name)))
# self.graph = tf.get_default_graph()
# names=[tensor.name for tensor in tf.get_default_graph().as_graph_def().node]
# self.X_input = self.graph.get_tensor_by_name("Inputs/Inputs:0")
# self.logits = self.graph.get_tensor_by_name("Output/Add:0")
else:
self._model_init()
self.sess.run(self.init)
self.saver = tf.train.Saver()
path = "models/{}/6".format(self.stock_name)
self.writer = tf.summary.FileWriter(path)
self.writer.add_graph(self.sess.graph)
def _model_init(self):
"""
Init tensorflow graph vars
"""
# (1,10,9)
with tf.device("/device:GPU:0"):
with tf.name_scope("Inputs"):
self.X_input = tf.placeholder(tf.float32, [None, self.state_size], name="Inputs")
self.Y_input = tf.placeholder(tf.float32, [None, self.action_size], name="Actions")
self.rewards = tf.placeholder(tf.float32, [None, ], name="Rewards")
# self.lstm_cells = [tf.contrib.rnn.GRUCell(num_units=layer)
# for layer in self.layers]
#lstm_cell = tf.contrib.rnn.LSTMCell(num_units=n_neurons, use_peepholes=True)
#gru_cell = tf.contrib.rnn.GRUCell(num_units=n_neurons)
# self.multi_cell = tf.contrib.rnn.MultiRNNCell(self.lstm_cells)
# self.outputs, self.states = tf.nn.dynamic_rnn(self.multi_cell, self.X_input, dtype=tf.float32)
# self.top_layer_h_state = self.states[-1]
# with tf.name_scope("Output"):
# self.out_weights=tf.Variable(tf.truncated_normal([self.layers[-1], self.action_size]))
# self.out_bias=tf.Variable(tf.zeros([self.action_size]))
# self.logits = tf.add(tf.matmul(self.top_layer_h_state,self.out_weights), self.out_bias)
fc1 = tf.contrib.layers.fully_connected(self.X_input, 512, activation_fn=tf.nn.relu)
fc2 = tf.contrib.layers.fully_connected(fc1, 512, activation_fn=tf.nn.relu)
fc3 = tf.contrib.layers.fully_connected(fc2, 512, activation_fn=tf.nn.relu)
fc4 = tf.contrib.layers.fully_connected(fc3, 512, activation_fn=tf.nn.relu)
self.logits = tf.contrib.layers.fully_connected(fc4, self.action_size, activation_fn=None)
with tf.name_scope("Cross_Entropy"):
self.loss_op = tf.losses.mean_squared_error(self.Y_input,self.logits)
self.optimizer = tf.train.RMSPropOptimizer(learning_rate=self.learning_rate)
self.train_op = self.optimizer.minimize(self.loss_op)
# self.correct = tf.nn.in_top_k(self.logits, self.Y_input, 1)
# self.accuracy = tf.reduce_mean(tf.cast(self., tf.float32))
tf.summary.scalar("Reward", tf.reduce_mean(self.rewards))
tf.summary.scalar("MSE", self.loss_op)
# Merge all of the summaries
self.summ = tf.summary.merge_all()
self.init = tf.global_variables_initializer()
def remember(self, state, action, reward, next_state, done):
self.memory.append((state, action, reward, next_state, done))
def act(self, state):
if np.random.rand() <= self.epsilon and not self.is_eval:
prediction = random.randrange(self.action_size)
if prediction == 1 or prediction == 2:
print("Random")
return prediction
act_values = self.sess.run(self.logits, feed_dict={self.X_input: state.reshape((1, self.state_size))})
if np.argmax(act_values[0]) == 1 or np.argmax(act_values[0]) == 2:
pass
return np.argmax(act_values[0])
def replay(self, time, episode):
print("Replaying")
mini_batch = []
l = len(self.memory)
for i in range(l - self.batch_size + 1, l):
mini_batch.append(self.memory[i])
mean_reward = []
x = np.zeros((self.batch_size, self.state_size))
y = np.zeros((self.batch_size, self.action_size))
for i, (state, action, reward, next_state, done) in enumerate(mini_batch):
target = reward
if not done:
self.target = reward + self.gamma * np.amax(self.sess.run(self.logits, feed_dict = {self.X_input: next_state.reshape((1, self.state_size))})[0])
current_q = (self.sess.run(self.logits, feed_dict={self.X_input: state.reshape((1, self.state_size))}))
current_q[0][action] = self.target
x[i] = state
y[i] = current_q.reshape((self.action_size))
mean_reward.append(self.target)
#target_f = np.array(target_f).reshape(self.batch_size - 1, self.action_size)
#target_state = np.array(target_state).reshape(self.batch_size - 1, self.window_size, self.state_size)
_, c, s = self.sess.run([self.train_op, self.loss_op, self.summ], feed_dict={self.X_input: x, self.Y_input: y, self.rewards: mean_reward}) # Add self.summ into the sess.run for tensorboard
self.writer.add_summary(s, global_step=(episode+1)/(time+1))
if self.epsilon > self.epsilon_min:
self.epsilon *= self.epsilon_decay
一旦重播内存大于批大小,它就会运行重播功能。代码可能看起来有点混乱,因为我已经花了好几天的时间试图弄清楚这一点。这是来自tensorboard的MSE的截图。 正如你在第200集看到的,MSE逐渐消失到0或几乎为0。我被难住了!我不知道发生了什么事。请帮我弄清楚。代码被发布here以查看整个过程,包括train和eval文件。我的主要经纪人是第一年在代理文件夹中。谢谢!你知道吗
正如在问题的评论中所讨论的,这似乎是一个高学习率衰退的问题。你知道吗
从本质上讲,每一集你的学习率乘以某个因子,这意味着你在n集/期之后的学习率将等于
lr=初始\u lr*j^n。在我们的示例中,衰减设置为0.95,这意味着仅经过几次迭代之后,学习率就已经显著下降。随后,更新将只执行分钟更正,而不是“了解”非常重要的变化了。你知道吗
这就引出了一个问题:为什么衰变是有意义的?一般来说,我们希望达到一个局部最优(可能非常狭窄)。为了做到这一点,我们试图“相对接近”这样一个最小值,然后只做较小的增量,使我们达到这个最佳值。如果我们继续保持原来的学习速度,那可能是我们每次都只是跳过最优解,永远达不到目标。 从视觉上看,问题可以用以下图形概括:
另一种方法除了衰减是简单地减少一定数量的学习率,一旦算法没有达到任何重要的更新了。这就避免了单纯通过多个情节来降低学习率的问题。你知道吗
在您的例子中,更高的衰减值(即较慢的衰减)似乎已经有很大帮助。你知道吗
强化学习中的Q值并不代表“奖励”,而是“回报”,即当前奖励和未来奖励的总和。当你的模型进入所有零动作的“死胡同”时,根据你的设置,奖励将为零。然后经过一段时间,你的回放将充满'零的行动给零的奖励'的记忆,所以无论你如何更新你的模型,它不能走出这个死胡同。你知道吗
正如@dennlinger所说,你可以增加你的epsilon,让你的模型有一些新的记忆来更新,你也可以使用优先的经验回放来训练“有用的”经验。你知道吗
不过,我建议你先看看环境本身。你的模型输出为零,因为没有更好的选择,是真的吗?正如你所说,你在交易股票,你确定有足够的信息,导致战略,将导致你的回报大于零?我认为在你对它进行任何调整之前,你需要先考虑一下这个问题。例如,如果股票是在一个纯粹的随机50/50的机会上下波动,那么你永远不会找到一个策略,使平均回报大于零。你知道吗
强化学习代理可能已经找到了最好的一个,尽管这不是你想要的。你知道吗
相关问题 更多 >
编程相关推荐