我正在尝试在我的神经病任务中捕捉到一个定时截图。我有一个固定的十字,在屏幕的左边和右边有两个面,然后是一个点。我只想要一个屏幕截图,这两个面孔出现在屏幕上的1秒的时间段。在例程中有10个不同的人脸对,并且例程循环3次。理想情况下,我希望有30个图像保存到我的电脑通过这个代码。到目前为止,我的代码如下:
from __future__ import division # so that 1/3=0.333 instead of 1/3=0
from psychopy import visual, core, data, event, logging, sound, gui
from psychopy.constants import * # things like STARTED, FINISHED
import numpy as np # whole numpy lib is available, prepend 'np.'
from numpy import sin, cos, tan, log, log10, pi, average, sqrt, std, deg2rad, rad2deg, linspace, asarray)
from numpy.random import random, randint, normal, shuffle
import os # handy system and path functions
import socket
import time
# Store info about the experiment session
expName = 'DotProbe_EyeTracker_BSchool'
expInfo = {u'session': u'001', u'participant': u''}
dlg = gui.DlgFromDict(dictionary=expInfo, title=expName)
if dlg.OK == False: core.quit() # user pressed cancel
expInfo['date'] = data.getDateStr() # add a simple timestamp
expInfo['expName'] = expName
# Setup files for saving
if not os.path.isdir('data'):
os.makedirs('data') # if this fails (e.g. permissions) we will get error
filename = 'data' + os.path.sep + '%s_%s' %(expInfo['participant'], expInfo['date'])
logFile = logging.LogFile(filename+'.log', level=logging.EXP)
logging.console.setLevel(logging.WARNING) # this outputs to the screen, not a file
# An ExperimentHandler isn't essential but helps with data saving
thisExp = data.ExperimentHandler(name=expName, version='',
extraInfo=expInfo, runtimeInfo=None,
originPath=None,
savePickle=True, saveWideText=True,
dataFileName=filename)
# Start Code - component code to be run before the window creation
# Setup the Window
win = visual.Window(size=(1366, 768), fullscr=True, screen=0, allowGUI=False,
allowStencil=False, monitor='testMonitor', color=[-1,-1,-1], colorSpace='rgb')
myClock = core.Clock()
# store frame rate of monitor if we can measure it successfully
expInfo['frameRate']=win.getActualFrameRate()
if expInfo['frameRate']!=None:
frameDur = 1.0/round(expInfo['frameRate'])
else:
frameDur = 1.0/60.0 # couldn't get a reliable measure so guess
# Initialize components for Routine "instructions"
instructionsClock = core.Clock()
text = visual.TextStim(win=win, ori=0, name='text',
text='Respond to the probe once it appears. EIther click "2" when probe replaces left face or click "3" when probe replaces right face.', font='Arial',
pos=[0, 0], height=0.1, wrapWidth=None,
color='white', colorSpace='rgb', opacity=1,
depth=0.0)
# Initialize components for Routine "block1"
block1Clock = core.Clock()
fixation = visual.TextStim(win=win, ori=0, name='fixation',
text='+', font='Arial',
pos=[0, 0], height=0.1, wrapWidth=None,
color='white', colorSpace='rgb', opacity=1,
depth=0.0)
leftimage = visual.ImageStim(win=win, name='leftimage',
image='sin', mask=None,
ori=0, pos=[0,0], size=[1, 1.34],
color=[1,1,1], colorSpace='rgb', opacity=1,
texRes=128, interpolate=False, depth=-1.0)
rightimage = visual.ImageStim(win=win, name='rightimage',
image='sin', mask=None,
ori=0, pos=[0,0], size=[1, 1.34],
color=[1,1,1], colorSpace='rgb', opacity=1,
texRes=128, interpolate=False, depth=-2.0)
probe = visual.ImageStim(win=win, name='probe',
image='sin', mask=None,
ori=0, pos=[0,0], size=[0.5, 0.5],
color=[1,1,1], colorSpace='rgb', opacity=1,
texRes=128, interpolate=False, depth=-3.0)
#Get and save a screen shot" of everything in stimlist:
stimlist = [leftimage, rightimage]
t0 = myClock.getTime()
rect=(-1,1,1,-1)
screenshot = visual.BufferImageStim(win, stim=stimlist, rect=rect)
# rect is the screen rectangle to grab, (-1,1,1,-1) is whole screen
# as a list of the edges: Left Top Right Bottom, in norm units.
# Create some handy timers
globalClock = core.Clock() # to track the time since experiment started
routineTimer = core.CountdownTimer() # to track time remaining of each (non-slip) routine发布于 2014-08-11 15:15:06
按照其他人的建议使用win.getMovieFrame和win.saveMovieFrames。你不需要visual.BufferImageStim。当您完成脚本时,您很可能会在条件上执行一次循环。我会在实际实验运行时截取屏幕截图,而不是事先“模拟”。它确保您的屏幕截图准确地描述了实验过程中实际发生的事情--如果您犯了错误,画错了东西:-)当然,如果屏幕截图的目的纯粹是为了文档,请在运行实际实验时删除/删除注释这些行,以提高性能。
# Loop through trials. You may organize them using ``data.TrialHandler`` or generate them yourself.
for trial in myTrialList:
# Draw whatever you need, probably dependent on the condition. E.g.:
if trial['condition'] == 'right':
rightimage.draw()
else:
leftimage.draw()
fixation.draw()
# Show your stimulus
win.flip()
# Save screenshot. Maybe outcomment these line during production.
win.getMovieFrame() # Defaults to front buffer, I.e. what's on screen now.
win.saveMovieFrames('screenshot' + trial['condition']) # save with a descriptive and unique filename. . 发布于 2014-08-11 08:07:19
我不能测试它,因为我没有在我当前的计算机上设置PyschoPy,但是使用Window.getMovieFrame()和Window.saveMovieFrames()应该会让你到达你需要的地方,例如:
screenshot = visual.BufferImageStim(win, stim=stimlist, rect=rect)
# rect is the screen rectangle to grab, (-1,1,1,-1) is whole screen
# as a list of the edges: Left Top Right Bottom, in norm units.
# screenshot is currently on the 'back' buffer as we haven't flipped yet
win.getMovieFrame(buffer='back')
win.saveMovieFrames('stimuli.png')发布于 2021-10-07 20:46:07
在使用PsychoPy构建器时,我在使用所提供的所有需要win.flip()的答案时遇到了问题。以下是适用于我并使用PIL的解决方案:
from PIL import ImageGrab
import os
os.makedirs("./data/" + expInfo['participant'], exist_ok=True)
output_image_name = "./data/" + expInfo['participant'] + "/" + str(datetime.datetime.now()).replace("-", "_").replace(" ", "_").replace(".", "_").replace(":", "_") + ".png"
im = ImageGrab.grab()
im.save(output_image_name, 'png') https://stackoverflow.com/questions/25233907
复制相似问题