在PsychoPy实验中截屏
我正在尝试在我的psychopy任务中捕捉一个定时截图。我有一个注视十字,接着屏幕左边和右边各有两个面孔,然后是一个点。我只想要在这两个面孔出现在屏幕上的1秒钟时间内的截图。这个程序里有10对不同的面孔,并且这个程序会循环3次。理想情况下,我希望通过这段代码将30张图片保存到我的电脑上。以下是我目前的代码:
from __future__ import division # so that 1/3=0.333 instead of 1/3=0
from psychopy import visual, core, data, event, logging, sound, gui
from psychopy.constants import * # things like STARTED, FINISHED
import numpy as np # whole numpy lib is available, prepend 'np.'
from numpy import sin, cos, tan, log, log10, pi, average, sqrt, std, deg2rad, rad2deg, linspace, asarray)
from numpy.random import random, randint, normal, shuffle
import os # handy system and path functions
import socket
import time
# Store info about the experiment session
expName = 'DotProbe_EyeTracker_BSchool'
expInfo = {u'session': u'001', u'participant': u''}
dlg = gui.DlgFromDict(dictionary=expInfo, title=expName)
if dlg.OK == False: core.quit() # user pressed cancel
expInfo['date'] = data.getDateStr() # add a simple timestamp
expInfo['expName'] = expName
# Setup files for saving
if not os.path.isdir('data'):
os.makedirs('data') # if this fails (e.g. permissions) we will get error
filename = 'data' + os.path.sep + '%s_%s' %(expInfo['participant'], expInfo['date'])
logFile = logging.LogFile(filename+'.log', level=logging.EXP)
logging.console.setLevel(logging.WARNING) # this outputs to the screen, not a file
# An ExperimentHandler isn't essential but helps with data saving
thisExp = data.ExperimentHandler(name=expName, version='',
extraInfo=expInfo, runtimeInfo=None,
originPath=None,
savePickle=True, saveWideText=True,
dataFileName=filename)
# Start Code - component code to be run before the window creation
# Setup the Window
win = visual.Window(size=(1366, 768), fullscr=True, screen=0, allowGUI=False,
allowStencil=False, monitor='testMonitor', color=[-1,-1,-1], colorSpace='rgb')
myClock = core.Clock()
# store frame rate of monitor if we can measure it successfully
expInfo['frameRate']=win.getActualFrameRate()
if expInfo['frameRate']!=None:
frameDur = 1.0/round(expInfo['frameRate'])
else:
frameDur = 1.0/60.0 # couldn't get a reliable measure so guess
# Initialize components for Routine "instructions"
instructionsClock = core.Clock()
text = visual.TextStim(win=win, ori=0, name='text',
text='Respond to the probe once it appears. EIther click "2" when probe replaces left face or click "3" when probe replaces right face.', font='Arial',
pos=[0, 0], height=0.1, wrapWidth=None,
color='white', colorSpace='rgb', opacity=1,
depth=0.0)
# Initialize components for Routine "block1"
block1Clock = core.Clock()
fixation = visual.TextStim(win=win, ori=0, name='fixation',
text='+', font='Arial',
pos=[0, 0], height=0.1, wrapWidth=None,
color='white', colorSpace='rgb', opacity=1,
depth=0.0)
leftimage = visual.ImageStim(win=win, name='leftimage',
image='sin', mask=None,
ori=0, pos=[0,0], size=[1, 1.34],
color=[1,1,1], colorSpace='rgb', opacity=1,
texRes=128, interpolate=False, depth=-1.0)
rightimage = visual.ImageStim(win=win, name='rightimage',
image='sin', mask=None,
ori=0, pos=[0,0], size=[1, 1.34],
color=[1,1,1], colorSpace='rgb', opacity=1,
texRes=128, interpolate=False, depth=-2.0)
probe = visual.ImageStim(win=win, name='probe',
image='sin', mask=None,
ori=0, pos=[0,0], size=[0.5, 0.5],
color=[1,1,1], colorSpace='rgb', opacity=1,
texRes=128, interpolate=False, depth=-3.0)
#Get and save a screen shot" of everything in stimlist:
stimlist = [leftimage, rightimage]
t0 = myClock.getTime()
rect=(-1,1,1,-1)
screenshot = visual.BufferImageStim(win, stim=stimlist, rect=rect)
# rect is the screen rectangle to grab, (-1,1,1,-1) is whole screen
# as a list of the edges: Left Top Right Bottom, in norm units.
# Create some handy timers
globalClock = core.Clock() # to track the time since experiment started
routineTimer = core.CountdownTimer() # to track time remaining of each (non-slip) routine
3 个回答
1
我在使用PsychoPy构建器的时候,遇到了很多需要用到win.flip()的答案,但都没能解决我的问题。下面是我找到的一个有效的解决方案,它使用了PIL库:
from PIL import ImageGrab
import os
os.makedirs("./data/" + expInfo['participant'], exist_ok=True)
output_image_name = "./data/" + expInfo['participant'] + "/" + str(datetime.datetime.now()).replace("-", "_").replace(" ", "_").replace(".", "_").replace(":", "_") + ".png"
im = ImageGrab.grab()
im.save(output_image_name, 'png')
4
我现在的电脑上没有安装PsychoPy,所以我没法测试这个。不过,你可以试试用 Window.getMovieFrame()
和 Window.saveMovieFrames()
这两个方法,它们应该能帮你达到你想要的效果,比如:
screenshot = visual.BufferImageStim(win, stim=stimlist, rect=rect)
# rect is the screen rectangle to grab, (-1,1,1,-1) is whole screen
# as a list of the edges: Left Top Right Bottom, in norm units.
# screenshot is currently on the 'back' buffer as we haven't flipped yet
win.getMovieFrame(buffer='back')
win.saveMovieFrames('stimuli.png')
5
使用 win.getMovieFrame
和 win.saveMovieFrames
,就像其他人建议的那样。你不需要 visual.BufferImageStim
。在你完成脚本时,可能会遇到一个循环条件。我建议在实验进行时直接截屏,而不是提前“模拟”截屏。这样可以确保你的截屏准确反映实验中实际发生的情况——如果你在绘制时犯了错误,也能及时发现 :-) 当然,如果截屏的目的只是为了记录,那么在实际实验运行时,可以去掉或注释掉那些代码行,以提高性能。
# Loop through trials. You may organize them using ``data.TrialHandler`` or generate them yourself.
for trial in myTrialList:
# Draw whatever you need, probably dependent on the condition. E.g.:
if trial['condition'] == 'right':
rightimage.draw()
else:
leftimage.draw()
fixation.draw()
# Show your stimulus
win.flip()
# Save screenshot. Maybe outcomment these line during production.
win.getMovieFrame() # Defaults to front buffer, I.e. what's on screen now.
win.saveMovieFrames('screenshot' + trial['condition']) # save with a descriptive and unique filename. .