CNN写作训练模式

2024-05-16 11:29:51 发布

您现在位置:Python中文网/ 问答频道 /正文

我正在为TwoStream-IQA编写训练代码,这是一个双流卷积神经网络。该模型预测通过两个网络流评估的补丁的质量分数。在下面的giti测试中,使用了giti在上面提供的集线器链接。在

培训代码如下:

import os
import time
import numpy as np
import argparse

import chainer

chainer.global_config.train=True

from chainer import cuda
from chainer import serializers
from chainer import optimizers
from chainer import iterators
from chainer import training 
from chainer.training import extensions
from PIL import Image
from sklearn.feature_extraction.image import extract_patches

from model import Model

parser = argparse.ArgumentParser(description='train.py')
parser.add_argument('--model', '-m', default='', 
                    help='path to the trained model')
parser.add_argument('--gpu', '-g', default=0, type=int, help='GPU ID')

args = parser.parse_args()


model = Model()

cuda.cudnn_enabled = True
cuda.check_cuda_available()
xp = cuda.cupy
model.to_gpu()

## prepare training data 
test_label_path = 'data_list/test.txt'
test_img_path = 'data/live/'
test_Graimg_path = 'data/live_grad/'
save_model_path = '/models/nr_sana_2stream.model'

patches_per_img = 256
patchSize = 32

print('-------------Load data-------------')
final_train_set = []
with open(test_label_path, 'rt') as f:
    for l in f:
        line, la = l.strip().split()  # for debug

        tic = time.time()
        full_path = os.path.join(test_img_path, line)
        Grafull_path = os.path.join(test_Graimg_path, line)

        inputImage = Image.open(full_path)
        Graf = Image.open(Grafull_path)
        img = np.asarray(inputImage, dtype=np.float32)
        Gra = np.asarray(Graf, dtype=np.float32)
        img = img.transpose(2, 0, 1)
        Gra = Gra.transpose(2, 0, 1)

        img1 = np.zeros((1, 3, Gra.shape[1], Gra.shape[2]))
        img1[0, :, :, :] = img
        Gra1 = np.zeros((1, 3, Gra.shape[1], Gra.shape[2]))
        Gra1[0, :, :, :] = Gra

        patches = extract_patches(img, (3, patchSize, patchSize), patchSize)
        Grapatches = extract_patches(Gra, (3, patchSize, patchSize), patchSize)

        X = patches.reshape((-1, 3, patchSize, patchSize))
        GraX = Grapatches.reshape((-1, 3, patchSize, patchSize))

        temp_slice1 = [X[int(float(index))] for index in range(256)]
        temp_slice2 = [GraX[int(float(index))] for index in range(256)]
        ##############################################  
        for j in range(len(temp_slice1)):
            temp_slice1[j] = xp.array(temp_slice1[j].astype(np.float32))
            temp_slice2[j] = xp.array(temp_slice2[j].astype(np.float32))

            final_train_set.append((
                np.asarray((temp_slice1[j], temp_slice2[j])).astype(np.float32),
                int(la)
                ))      
        ##############################################  
print('--------------Done!----------------')

print('--------------Iterator!----------------')    
train_iter = iterators.SerialIterator(final_train_set, batch_size=4)
optimizer = optimizers.Adam()
optimizer.use_cleargrads()
optimizer.setup(model)

updater = training.StandardUpdater(train_iter, optimizer, device=0)

print('--------------Trainer!----------------') 
trainer = training.Trainer(updater, (50, 'epoch'), out='result')

trainer.extend(extensions.LogReport())

trainer.extend(extensions.PrintReport(['epoch', 'iteration', 'main/loss', 'elapsed_time']))

print('--------------Running trainer!----------------') 
trainer.run()

但是代码在trainer.run()行产生错误,如下所示:

^{pr2}$

也许这是因为我的排列是错误的,因为模型的训练参数是:

length = x_data.shape[0]
x1 = Variable(x_data[0:length:2])
x2 = Variable(x_data[1:length:2])

y_data为:

t = xp.repeat(y_data[0:length:2], 1)

变量final_train_set预处理tuple (Numpy Array, 66)的数据集,其中每个Numpy Array都有维度(2, 3, 32, 32),它携带两个类型的补丁(3, 32, 32)。在

我使用了上面提供的github链接中的数据集。 我是个新手,请帮帮我!!在


Tags: pathfromtestimportimgdatamodelnp
2条回答

免责声明:这些代码都不是我写的

我发现了使用OpenCV、Scipy和其他几个用于质量评估的模块的Github存储库。代码如下:

# Python code for BRISQUE model
# Original paper title: No-Reference Image Quality Assessment in the Spatial Domain
# Link: http://ieeexplore.ieee.org/document/6272356/
import cv2
import numpy as np
from scipy import ndimage
import math

def get_gaussian_filter():
    [m,n] = [(ss - 1.0) / 2.0 for ss in (shape,shape)]
    [y,x] = np.ogrid[-m:m+1,-n:n+1]
    window = np.exp( -(x*x + y*y) / (2.0*sigma*sigma) )
    window[window < np.finfo(window.dtype).eps*window.max() ] = 0
    sum_window = window.sum()
    if sum_window != 0:
        window = np.divide(window, sum_window)
    return window

def lmom(X):
    (rows, cols)  = X.shape
    if cols == 1:
        X = X.reshape(1,rows)
    n = rows
    X.sort()    
    b = np.zeros(3)    
    b0 = X.mean()    
    for r in range(1,4):        
        Num = np.prod(np.tile(np.arange(r+1,n+1), (r,1))-np.tile(np.arange(1,r+1).reshape(r,1),(1,n-r)),0)        
        Num = Num.astype(np.float)                
        Den = np.prod(np.tile(n, (1, r)) - np.arange(1,r+1), 1)        
        b[r-1] = 1.0/n * sum(Num/Den * X[0,r:])
    L = np.zeros(4)
    L[0] = b0
    L[1] = 2*b[0] - b0
    L[2] = 6*b[1] - 6*b[0] + b0
    L[3] = 20*b[2] - 30*b[1] + 12*b[0] - b0
    return L

def compute_features(im):
    im = im.astype(np.float)
    window = get_gaussian_filter()
    scalenum = 2
    feat = []
    for itr_scale in range(scalenum):
        mu = cv2.filter2D(im, cv2.CV_64F, window, borderType=cv2.BORDER_CONSTANT)
        mu_sq = mu * mu
        sigma = np.sqrt(abs(cv2.filter2D(im*im, cv2.CV_64F, window, borderType=cv2.BORDER_CONSTANT) - mu_sq))        
        structdis = (im-mu)/(sigma+1)
        structdis_col_vector = np.reshape(structdis.transpose(), (structdis.size,1))
        L = lmom(structdis.reshape(structdis.size,1))
        feat = np.append(feat,[L[1], L[3]])
        shifts = [[0,1], [1,0], [1,1], [-1,1]]
        for itr_shift in shifts:
            shifted_structdis = np.roll(structdis, itr_shift[0], axis=0)
            shifted_structdis = np.roll(shifted_structdis, itr_shift[1], axis=1)

            shifted_structdis_col_vector = np.reshape(shifted_structdis.T, (shifted_structdis.size,1))
            pair = structdis_col_vector * shifted_structdis_col_vector
            L = lmom(pair.reshape(pair.size,1))
            feat = np.append(feat, L)
        im = cv2.resize(im, (0,0), fx=0.5, fy=0.5, interpolation=cv2.INTER_CUBIC)
    return feat


im = ndimage.imread('example.bmp', flatten=True)
feat = compute_features(im)
print feat

简而言之,不恰当地调用numpy.asarraynumpy.asarray不连接两个cupy.ndarray,而它连接两个numpy.ndarrays

简而言之,您的代码:

import numpy, cupy

final_train_set = []

N_PATCH_PER_IMAGE = 8

for i in range(10):
    label = 0

    temp_slice_1 = [numpy.zeros((3, 3)) for j in range(N_PATCH_PER_IMAGE)]
    temp_slice_2 = [numpy.zeros((3, 3)) for j in range(N_PATCH_PER_IMAGE)]

    for j in range(N_PATCH_PER_IMAGE):
        temp_slice_1[j] = cupy.array(temp_slice_1[j])
        temp_slice_2[j] = cupy.array(temp_slice_2[j])
        final_train_set.append(
            [
                # attempting to concatenate two cupy arrays by numpy.asarray 
                numpy.asarray([temp_slice_1[j], temp_slice_2[j]]),
                label
            ]
        )

臭虫

^{pr2}$ ^{3}$

解决方案:注释掉两行

import numpy  # not import cupy here

for i in range(10):
    label = 0

    temp_slice_1 = [numpy.zeros((3, 3)) for j in range(N_PATCH_PER_IMAGE)]
    temp_slice_2 = [numpy.zeros((3, 3)) for j in range(N_PATCH_PER_IMAGE)]

    for j in range(N_PATCH_PER_IMAGE):
        # temp_slice_1[j] = cupy.array(temp_slice_1[j]) <- comment out!
        # temp_slice_2[j] = cupy.array(temp_slice_2[j]) <- comment out!
        final_train_set.append(
            [
                # concatenate two numpy arrays: usually cupy should not be used in dataset
                numpy.asarray([temp_slice_1[j], temp_slice_2[j]]),
                label
            ]
        )

脚注

  1. 在您展示的代码中,xp没有指定,因此您无法从任何人那里获得答案。如果你不能把问题分开,请发布你的整个代码,包括模型。

  2. 我想您可能因为另一个原因而无法运行培训代码。在这段代码中,数据首先在final_train_set的构造中被带到主内存中。但是,如果图像的数量很大,主内存就会耗尽,MemoryError将被提升。(换言之,如果图像数量少,内存足够大,则不会发生错误) 在这种情况下,以下引用(Chainer at glanceDataset Abstraction)将有所帮助。

相关问题 更多 >