使用Python TensorFlow和Keras的预测模型,预测时出错
我正在尝试根据训练数据进行预测。
我使用了10组训练数据,包括正确的输入和输出,训练结果的准确率是80%。
在训练完模型后,我用一个输入值进行测试,但结果完全不匹配。
注意:我用这些输入和输出数字进行训练,但没有得到预期的结果。
输入:61005363596
结果:35210192
有什么解决办法吗?
import tensorflow as tf
import numpy as np
import matplotlib.pyplot as plt
from tensorflow import keras
# input data
input_data_strings = [
"61002738295", "61001799553", "61002559170","61001872384", "61002034257", "61001608325", "61001979114", "61001523797", "61002991589", "61001764730","61000101645","61002509001","61001273013","61000070543","61001387706"
]
def to_decimal(Input_value):
return float(Input_value[0] + Input_value[1:])
input_decimals = np.array([to_decimal(input_str) for input_str in input_data_strings])
# Entry results
code = np.array([
31211436, 21812418, 68923185,91714338, 41860727, 55489164, 12635158, 24520971, 23118193, 27233139,18433122,50231115,37236137,52134193,13135168
])
mean_input = np.mean(input_decimals)
std_input = np.std(input_decimals)
input_normalized = (input_decimals - mean_input) / std_input
mean_code = np.mean(code)
std_code = np.std(code)
code_normalized = (code - mean_code) / std_code
# Defines additional variables
batch_size = 15
hidden_layer_size = 512
loss_function = 'mean_squared_error'
optimizer = keras.optimizers.Adam(learning_rate=0.001)
epochs = 8000
# Model
model = keras.Sequential([
keras.layers.Dense(units=hidden_layer_size, activation="relu", input_shape=[1]),
keras.layers.Dense(units=256, activation="relu"),
keras.layers.Dense(units=128, activation="relu"),
keras.layers.Dense(units=64, activation="relu"),
keras.layers.Dense(units=32, activation="relu"),
keras.layers.Dense(units=16, activation="relu"),
keras.layers.Dense(units=8, activation="relu"),
keras.layers.Dense(units=1, activation="linear")
])
model.compile(
optimizer=optimizer,
loss=loss_function
)
# Model training
print("Starting training for the model...")
historial = model.fit(input_normalized, code_normalized,
epochs=epochs, batch_size=batch_size, verbose=False)
print("Trained model")
# Display of loss during training
plt.xlabel("# Epoch")
plt.ylabel("Loss Magnitude")
plt.plot(historial.history["loss"])
plt.show()
# Function to check predictions with the prediction data.
def verify_prediction(modelo, Inputs, mean, std):
result = []
for input_predict in Inputs:
input_predict_numeric = np.array([to_decimal(input_predict)])
input_predict_normalized = (input_predict_numeric - mean) / std
resultado_normalizado = modelo.predict(input_predict_normalized)
resultado = resultado_normalizado * std_code + mean_code
result.append(resultado)
return result
# Verifying predictions with training data
training_prediction_result = verify_prediction(model, input_data_strings, mean_input, std_input)
print("Prediction results for training Inputs:")
for inputs, result in zip(input_data_strings, training_prediction_result):
print("Input:", inputs, "Result data:", result)
# Función para predecir con un nuevo INPUT
def predict_new_value(modelo, input_new, mean, std):
input_numeric = np.array([to_decimal(input_new)])
input_normalizado = (input_numeric - mean) / std
normalized_prediction = modelo.predict(input_normalizado)
prediccion = normalized_prediction * std_code + mean_code
return prediccion
# new value to predict input:61005363596 expected result for this value. 35210192
input_new_strings = [
"61005363596"
]
input_decimal_new = to_decimal(input_new_strings[0])
input_normalized_new = (input_decimal_new - mean_input) / std_input
# New input prediction
prediction_result_new = predict_new_value(model, input_new_strings[0], mean_input, std_input)
print("\nPrediction results for the new Input:")
print("Input new:", input_new_strings[0], "Result:", prediction_result_new)
0 个回答
暂无回答