如何压缩csv文件以将其用于神经网络?

2024-04-28 12:25:58 发布

您现在位置:Python中文网/ 问答频道 /正文

有神经网络的python代码。我想训练网络学习图片是否是技术图纸。因此,我得到了一组训练图像,大约45个文件,和一组测试图像,大约2000个文件。我把所有的文件重塑成28x28的大灰度图像,并把它们转换成一个train和一个测试csv文件。然后我调整了输出节点并更改了两个csvfile的路径。问题是csv文件太大。test.csv已经超过14GB。据我所知,用这么大的测试文件来测试神经网络是不可能的,因为我的电脑只有8GB的内存。有没有办法在不丢失信息的情况下压缩csv文件?你知道吗

这是神经网络的代码:

import numpy as np
import scipy.special
from tqdm import tqdm

class neuralNetwork:

    def __init__(self, inputnodes, hiddennodes, outputnodes, learningrate):
        self.inode = inputnodes
        self.hnode = hiddennodes
        self.onode = outputnodes
# =============================================================================
#         self.wih=np.random.normal(0.0,pow(self.inode,-0.5),(self.hnode,self.inode))
#         self.who=np.random.normal(0.0,pow(self.hnode,-0.5),(self.onode,self.hnode))
# =============================================================================
        self.wih = np.random.normal(0, pow(self.hnode, -0.5), (self.hnode, self.inode))
        self.who = np.random.normal(0, pow(self.onode, -0.5), (self.onode, self.hnode))
        self.lr = learningrate
        self.activation_function = lambda x: scipy.special.expit(x)
        pass

    def train(self, inputs_list, targets_list):
        inputs = np.array(inputs_list, ndmin=2).T
        targets = np.array(targets_list, ndmin=2).T
        hidden_inputs = np.dot(self.wih, inputs)
        hidden_outputs = self.activation_function(hidden_inputs)
        final_inputs = np.dot(self.who, hidden_outputs)
        final_outputs = self.activation_function(final_inputs)
        output_errors = targets - final_outputs
        hidden_errors = np.dot(self.who.T, output_errors) 
        self.who += self.lr * np.dot((output_errors * final_outputs 
        * (1.0 - final_outputs)), np.transpose(hidden_outputs))
        self.wih += self.lr * np.dot((hidden_errors * hidden_outputs 
        * (1.0 - hidden_outputs)), np.transpose(inputs))
        pass

    def test(self, inputs_list):
        inputs = np.array(inputs_list, ndmin=2).T
        hidden_inputs = np.dot(self.wih, inputs)
        hidden_outputs = self.activation_function(hidden_inputs)
        final_inputs = np.dot(self.who, hidden_outputs)
        final_outputs = self.activation_function(final_inputs)
        return final_outputs

input_nodes = 784
hidden_nodes = 500
output_nodes = 100000
learning_rate = 0.1
epochs = 5

n = neuralNetwork(input_nodes,hidden_nodes,output_nodes, learning_rate)
training_data_file = open("C:/Users/Anwender/Documents/Uni/KI/Python/train.csv", 'r')
training_data_list = training_data_file.readlines()
training_data_file.close()

for e in range(epochs):
    for record in tqdm(training_data_list):
        all_values = record.split(',')
        inputs = (np.asfarray(all_values[1:]) / 255.0 * 0.99) + 0.01
        targets = np.zeros(output_nodes) + 0.01
        targets[int(float(all_values[0]))] = 0.99
        n.train(inputs, targets)
        pass
    pass

test_data_file = open("C:/Users/Anwender/Documents/Uni/KI/Python/test.csv", 'r')
test_data_list = test_data_file.readlines()
test_data_file.close()

scorecard = []
for record in test_data_list:
    all_values = record.split(',')
    correct_label = int(all_values[0])
    inputs = (np.asfarray(all_values[1:]) / 255.0 * 0.99) + 0.01
    outputs = n.test(inputs)
    label = np.argmax(outputs)
    if (label == correct_label):
        scorecard.append(1)
    else:
        scorecard.append(0)
        pass
    pass

scorecard_array = np.asarray(scorecard)
print ("Genauigkeit = ", scorecard_array.sum() / scorecard_array.size)



Tags: 文件testselfdatanpoutputsdothidden