Tensorflow会话未运行

2024-06-02 06:58:06 发布

您现在位置:Python中文网/ 问答频道 /正文

我只是一个深入学习领域的初学者,所以我的高级道歉问愚蠢的疑问。 我试图用tf.nn.rnn_cell.LSTMCell实现一个神经网络模型。我使用的是jupyter笔记本,我编写了以下代码:

def tokenize(text):
    l = []
    for word in text.strip().split(' '):
        if word not in s:
            l.append(vocabulary[word])
    while len(l)<m:
        l.append(0)
    return np.array(l,dtype=np.float32)

def tokenize_tags(text):
    l = np.zeros((1,dic_len),dtype=np.float32)
    for word in text.strip().split(' '):
        l[0,dic[word]] = 1.0
    return l

class Predictor:
    def __init__(self):
        print('Object creation started')
        self.x = tf.placeholder(dtype=tf.float32,shape=[None,m,1])
        self.y = tf.placeholder(dtype=tf.float32,shape=[None,tag_num])
        self.n_hidden = 24
        self.weights = {'out': tf.Variable(tf.random_normal([self.n_hidden,tag_num]))}
        self.biases = {'out': tf.Variable(tf.random_normal([tag_num]))}
        self.rnn1 = tf.nn.rnn_cell.LSTMCell(num_units=self.n_hidden)
        self.rnn2 = tf.nn.rnn_cell.LSTMCell(num_units=self.n_hidden)
        self.rnn3 = tf.nn.rnn_cell.MultiRNNCell([self.rnn1,self.rnn2])
        self.y_1 = self.predict(self.x)
        self.loss = tf.reduce_mean(tf.square(tf.nn.softmax_cross_entropy_with_logits(logits=self.y_1,labels=self.y)))
        self.opt = tf.train.AdamOptimizer(learning_rate=0.001).minimize(self.loss)
        print('Object creation over')
    def predict(self,arr):
        arr = tf.reshape(arr,[-1,m])
        arr = tf.split(arr,m,1)
        output,states = tf.nn.static_rnn(self.rnn3,arr,dtype=tf.float32)
        return tf.matmul(output[-1],self.weights['out'])+self.biases['out']
    def train(self):
        df1 = df.sample(n=1000,random_state=1)
        with tf.Session() as ss:
            ss.run(tf.global_variables_initializer())
            losses = []
            for i in range(1000):
                input_x = tokenize(str(df['text'][i]))
                input_y = tokenize_tags(str(df['Tags'][i]))
                _,cost = ss.run([self.opt,self.loss],feed_dict={self.x:input_x,self.y:input_y})
                if(i%100==0):
                    print("cost = ",cost)
                if(i%10==0):
                    losses.append(cost)
        plt.plot(losses)
        plt.show()

此代码包含一个函数,用于对字符串进行标记并将其更改为一系列整数,以及我的模型的类定义。 现在我尝试创建这个类的一个对象:

prd = Predictor()

这就是我的代码停止工作的地方。这个单元需要花费大量的时间来执行,同时,我的系统的性能正在以惊人的速度下降。所以每次我都要停止脚本的执行

我试图在构造函数的开头和结尾打印开始和结束消息。第一个正在打印,但最后一个没有打印

这张图似乎陷入了一个无限循环。我哪里出错了?我甚至尝试了一个稍微不同的类定义(忽略每行末尾的换行符,我从一个json对象获取它):

class Predictor:\n",
        def __init__(self):\n",
            print('Object creation started')\n",
            with tf.variable_scope(\"Nodes\",reuse=tf.AUTO_REUSE):\n",
                self.x = tf.placeholder(dtype=tf.float32,shape=[None,m,1])\n",
                self.y = tf.placeholder(dtype=tf.float32,shape=[None,tag_num])\n",
                self.n_hidden = 24\n",
                self.weights = {'out': tf.Variable(tf.random_normal([self.n_hidden,tag_num]))}\n",
                self.biases = {'out': tf.Variable(tf.random_normal([tag_num]))}\n",
                self.rnn1 = tf.nn.rnn_cell.LSTMCell(num_units=self.n_hidden)\n",
                self.rnn2 = tf.nn.rnn_cell.LSTMCell(num_units=self.n_hidden)\n",
                self.rnn3 = tf.nn.rnn_cell.MultiRNNCell([self.rnn1,self.rnn2])\n",
                #self.outputs,self.states = tf.nn.static_rnn(self.rnn3,self.x,dtype=tf.float32)\n",
            print('Object creation over')\n",
        def predict(self,arr):\n",
            arr = tf.reshape(arr,[-1,m])\n",
            arr = tf.split(arr,m,1)\n",
            output,states = tf.nn.static_rnn(self.rnn3,arr,dtype=tf.float32)\n",
            return tf.matmul(output[-1],self.weights['out'])+self.biases['out']\n",
        def train(self):\n",
            with tf.variable_scope(\"losses\",reuse=tf.AUTO_REUSE):\n",
                y_1 = self.predict(self.x)\n",
                loss = tf.reduce_mean(tf.square(tf.nn.softmax_cross_entropy_with_logits(logits=y_1,labels=self.y)))\n",
                opt = tf.train.AdamOptimizer(learning_rate=0.001).minimize(loss)\n",
            with tf.variable_scope(\"saver\",reuse=tf.AUTO_REUSE):\n",
                saver = tf.train.Saver()\n",
            print(\"Starting the Computation\")\n",
            with tf.Session() as ss:\n",
                ss.run(tf.global_variables_initializer())\n",
                losses = []\n",
                for i in range(1000):\n",
                    print('Iteration: ',i)\n",
                    input_x = tokenize(str(df1['text'][i]))\n",
                    input_y = tokenize_tags(str(df1['Tags'][i]))\n",
                    _,cost = ss.run([opt,loss],feed_dict={self.x:input_x,self.y:input_y})\n",
                    if(i%100==0):\n",
                        print(\"cost = \",cost)\n",
                    if(i%10==0):\n",
                        losses.append(cost)\n",
                print('Training over')\n",
                saver.save(ss,'test_model')\n",
            plt.plot(losses)\n",
            plt.show()"

这一次,对象被创建了,但是它被train()函数击中了

有人能指出我哪里出错了吗

PS:参数m和tag_num分别为1753和17998


Tags: selfinputtfdefcellnnoutnum