如何减少CNN股票预测模型中的过度拟合

2024-04-29 15:49:04 发布

您现在位置:Python中文网/ 问答频道 /正文

我正在尝试使用一维CNN函数模型进行股票走势预测,如何减少对我的验证的过度拟合?你知道吗

该模型的输入采用历史开盘、收盘、高、低和成交量的股票市场价格。因此,输入将是矩阵窗口大小X 5。输出将检查最后一天的下一个低价格,如果它是上升的,它将被标记为“上升”和“下降”,如果它的下降。我使用来自这个paper的引用创建了模型。你知道吗

在这篇文章中,他使用了6层conv,而每层32、64、128和256的过滤器大小用于最后3层。 我试过:

  • 列表项
  • 调整窗口大小(7、14、21、30)
  • 在conv层调整内核大小
  • 调整conv层中的过滤器大小
  • 损失函数:softmax
  • 添加退出层

这是我一直在使用的数据集csv file

这是full code的链接

以下是使用tensorflow的模型:

WINDOW = 30
n_channels = 5
n_classes = 2
batch_size = 100 # Batch size
learning_rate = 0.0001
epochs = 10000

graph = tf.Graph()

# Construct placeholders
with graph.as_default():
    inputs_ = tf.placeholder(tf.float32, [None, WINDOW, n_channels], name = 'inputs')
    labels_ = tf.placeholder(tf.float32, [None, n_classes], name = 'labels')
    keep_prob_ = tf.placeholder(tf.float32, name = 'keep')
    learning_rate_ = tf.placeholder(tf.float32, name = 'learning_rate')

with graph.as_default():
    # Layer-1
    conv = tf.layers.conv1d(inputs=inputs_, filters=32, kernel_size=3, strides=1, padding='valid', activation=tf.nn.relu)
    conv = tf.layers.batch_normalization(conv)

    # Layer-2
    conv = tf.layers.conv1d(inputs=conv, filters=64, kernel_size=3, strides=1, padding='valid')
    conv = tf.nn.leaky_relu(conv, alpha=0.01)
    conv = tf.layers.batch_normalization(conv)

    # Layer-3
    conv = tf.layers.conv1d(inputs=conv, filters=128, kernel_size=3, strides=1, padding='valid')
    conv = tf.nn.leaky_relu(conv, alpha=0.01)
    conv = tf.layers.batch_normalization(conv)


     # Layer-4
    conv = tf.layers.conv1d(inputs=conv, filters=256, kernel_size=3, strides=1, padding='valid')
    conv = tf.nn.leaky_relu(conv, alpha=0.01)
    conv = tf.layers.batch_normalization(conv)


    # Layer-5
    conv = tf.layers.conv1d(inputs=conv, filters=256, kernel_size=3, strides=1, padding='valid')
    conv = tf.nn.leaky_relu(conv, alpha=0.01)
    conv = tf.layers.batch_normalization(conv)


    # Layer-6
    conv = tf.layers.conv1d(inputs=conv, filters=256, kernel_size=3, strides=1, padding='valid')
    conv = tf.nn.leaky_relu(conv, alpha=0.01)
    conv = tf.layers.batch_normalization(conv)

with graph.as_default():
    # Flatten and add dropout

    flat = tf.layers.flatten(conv)
    flat = tf.nn.dropout(flat, keep_prob=keep_prob_)
    dense = tf.layers.dense(inputs=flat, units=256)
    dense = tf.nn.dropout(dense, rate=1-keep_prob_)
    dense = tf.layers.dense(inputs=dense, units=256)
    dense = tf.nn.dropout(dense, rate=1-keep_prob_)

    # Predictions
    logits = tf.layers.dense(flat, n_classes)

    # Cost function and optimizer
    cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=logits, labels=labels_))
    optimizer = tf.train.AdamOptimizer(learning_rate_).minimize(cost)

    # Accuracy
    correct_pred = tf.equal(tf.argmax(logits, 1), tf.argmax(labels_, 1))
    accuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32), name='accuracy')

validation_acc = []
validation_loss = []

train_acc = []
train_loss = []

with graph.as_default():
  saver = tf.train.Saver()

with tf.Session(graph=graph) as sess:
  sess.run(tf.global_variables_initializer())
  iteration = 1

  for e in range(epochs):

      # Loop over batches
      for x,y in get_batches(X_tr, y_tr, batch_size):
          # Feed dictionary
          feed = {inputs_ : x, labels_ : y, keep_prob_ : 0.2, learning_rate_ : learning_rate}

          # Loss
          loss, _ , acc = sess.run([cost, optimizer, accuracy], feed_dict = feed)
          train_acc.append(acc)
          train_loss.append(loss)

          # Print at each 5 iters
          if (iteration % 50 == 0):
              print("Epoch: {}/{}".format(e, epochs),
                    "Iteration: {:d}".format(iteration),
                    "Train loss: {:6f}".format(loss),
                    "Train acc: {:.6f}".format(acc))

          # Compute validation loss at every 10 iterations
          if (iteration%100 == 0):                
              val_acc_ = []
              val_loss_ = []

              for x_v, y_v in get_batches(X_vld, y_vld, batch_size):
                  # Feed
                  feed = {inputs_ : x_v, labels_ : y_v, keep_prob_ : 1.0}  

                  # Loss
                  loss_v, acc_v = sess.run([cost, accuracy], feed_dict = feed)                    
                  val_acc_.append(acc_v)
                  val_loss_.append(loss_v)
              # Print info
              print("Epoch: {}/{}".format(e, epochs),
                    "Iteration: {:d}".format(iteration),
                    "Validation loss: {:6f}".format(np.mean(val_loss_)),
                    "Validation acc: {:.6f}".format(np.mean(val_acc_)))

              # Store
              validation_acc.append(np.mean(val_acc_))
              validation_loss.append(np.mean(val_loss_))

          # Iterate 
          iteration += 1

  saver.save(sess,"/checkpoints-cnn/har.ckpt")

这是我的结果

lossacc


Tags: formatsizeratelayerstfbatchvalnn