QThread Tensorflow graph ValueError:张量必须与Tens来自同一个图形

2024-04-24 14:07:35 发布

您现在位置:Python中文网/ 问答频道 /正文

使用QThread的Tensorflow上的图形错误,我正在使用此代码https://github.com/FingerRec/real_time_video_action_recognition.git在TF中运行人类行为识别,代码运行良好,但我在尝试线程时遇到问题

我尝试将推理代码与主线程分离以提高速度

这是密码

    """
    build c3d model
    :return:
    norm_score:
    sess:
    """
    #model_name = "pretrained_model/c3d_ucf101_finetune_whole_iter_20000_TF.model.mdlp"
    #model_name = "pretrained_model/conv3d_deepnetA_sport1m_iter_1900000_TF.model"
    model_name = "pretrained_model/sports1m_finetuning_ucf101.model"
    # model_name = "new_model2/c3d_ucf_model-499"
    # Get the sets of images and labels for training, validation, and
    with tf.variable_scope('var_name') as var_scope:
        weights = {
            'wc1': _variable_with_weight_decay('wc1', [3, 3, 3, 3, 64], 0.04, 0.00),
            'wc2': _variable_with_weight_decay('wc2', [3, 3, 3, 64, 128], 0.04, 0.00),
            'wc3a': _variable_with_weight_decay('wc3a', [3, 3, 3, 128, 256], 0.04, 0.00),
            'wc3b': _variable_with_weight_decay('wc3b', [3, 3, 3, 256, 256], 0.04, 0.00),
            'wc4a': _variable_with_weight_decay('wc4a', [3, 3, 3, 256, 512], 0.04, 0.00),
            'wc4b': _variable_with_weight_decay('wc4b', [3, 3, 3, 512, 512], 0.04, 0.00),
            'wc5a': _variable_with_weight_decay('wc5a', [3, 3, 3, 512, 512], 0.04, 0.00),
            'wc5b': _variable_with_weight_decay('wc5b', [3, 3, 3, 512, 512], 0.04, 0.00),
            'wd1': _variable_with_weight_decay('wd1', [8192, 4096], 0.04, 0.001),
            'wd2': _variable_with_weight_decay('wd2', [4096, 4096], 0.04, 0.002),
            'out': _variable_with_weight_decay('wout', [4096, c3d_model.NUM_CLASSES], 0.04, 0.005)
        }
        biases = {
            'bc1': _variable_with_weight_decay('bc1', [64], 0.04, 0.0),
            'bc2': _variable_with_weight_decay('bc2', [128], 0.04, 0.0),
            'bc3a': _variable_with_weight_decay('bc3a', [256], 0.04, 0.0),
            'bc3b': _variable_with_weight_decay('bc3b', [256], 0.04, 0.0),
            'bc4a': _variable_with_weight_decay('bc4a', [512], 0.04, 0.0),
            'bc4b': _variable_with_weight_decay('bc4b', [512], 0.04, 0.0),
            'bc5a': _variable_with_weight_decay('bc5a', [512], 0.04, 0.0),
            'bc5b': _variable_with_weight_decay('bc5b', [512], 0.04, 0.0),
            'bd1': _variable_with_weight_decay('bd1', [4096], 0.04, 0.0),
            'bd2': _variable_with_weight_decay('bd2', [4096], 0.04, 0.0),
            'out': _variable_with_weight_decay('bout', [c3d_model.NUM_CLASSES], 0.04, 0.0),
        }
    logits = []
    # tf.reset_default_graph()
    for gpu_index in range(0, gpu_num):
        with tf.device('/gpu:%d' % gpu_index):
            logit = c3d_model.inference_c3d(
                images_placeholder[0 * FLAGS.batch_size:(0 + 1) * FLAGS.batch_size,:,:,:,:], 0.6,
                FLAGS.batch_size, weights, biases)
            logits.append(logit)
    logits = tf.concat(logits, 0)
    norm_score = tf.nn.softmax(logits)
    saver = tf.train.Saver()
    sess = tf.Session(config=tf.ConfigProto(allow_soft_placement=True))
    init = tf.global_variables_initializer()
    sess.run(init)
    # Create a saver for writing training checkpoints.
    saver.restore(sess, model_name)
    # saver = tf.train.import_meta_graph('pretrained_model/c3d_ucf_model-9.meta')
    # saver.restore(sess, 'pretrained_model/c3d_ucf_moel-9')
    return norm_score, sess

class DetectionThread(QThread):
    signal = pyqtSignal(tuple)

    def __init__(self):
        super(DetectionThread, self).__init__()
        self._stopped = True
        # self.img = None
        self.camera_name = ''
        self.video_imgs = list()
        self.prediction_result = tuple()

    def __del__(self):
        self.wait()

    def stop(self):
        self._stopped = True

    # run method gets called when we start the thread
    def run(self):
        # self._stopped = False
        # ''' actual prediction code'''

        norm_score, sess = build_c3d_model() 

这是我得到的结果

ValueError: Tensor("var_name/wc1:0", shape=(3, 3, 3, 3, 64), dtype=float32_ref, device=/device:CPU:0) must be from the same graph as Tensor("strided_slice:0", shape=(1, 16, 112, 112, 3), dtype=float32).
Aborted (core dumped)

我需要帮助线程这个代码,谢谢你


Tags: nameselfnormmodeltfwithvariablesess