├── .gitignore ├── Movietest.py ├── README.md ├── movie3_changed.py └── test_misc_utils.py /.gitignore: -------------------------------------------------------------------------------- 1 | *.DS_Store 2 | -------------------------------------------------------------------------------- /Movietest.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python2 2 | # -*- coding: utf-8 -*- 3 | """ 4 | This experiment was created using PsychoPy2 Experiment Builder (v1.81.02), November 29, 2014, at 08:30 5 | If you publish work using this script please cite the relevant PsychoPy publications 6 | Peirce, JW (2007) PsychoPy - Psychophysics software in Python. Journal of Neuroscience Methods, 162(1-2), 8-13. 7 | Peirce, JW (2009) Generating stimuli for neuroscience using PsychoPy. Frontiers in Neuroinformatics, 2:10. doi: 10.3389/neuro.11.010.2008 8 | """ 9 | 10 | from __future__ import division # so that 1/3=0.333 instead of 1/3=0 11 | 12 | from psychopy import prefs 13 | prefs.general['audioLib'] = ['pygame'] 14 | 15 | from psychopy import visual, core, data, event, logging, gui, sound 16 | 17 | from psychopy.constants import * # things like STARTED, FINISHED 18 | import numpy as np # whole numpy lib is available, prepend 'np.' 19 | from numpy import sin, cos, tan, log, log10, pi, average, sqrt, std, deg2rad, rad2deg, linspace, asarray, unique 20 | from numpy.random import random, randint, normal, shuffle 21 | import pickle 22 | import os # handy system and path functions 23 | 24 | from test_misc_utils import show_intro, check_subject_info, after_train_text, goodbye_screen, get_final_score, train_subject 25 | 26 | # Ensure that relative paths start from the same directory as this script 27 | _thisDir = os.path.dirname(os.path.abspath(__file__)) 28 | os.chdir(_thisDir) 29 | 30 | # Store info about the experiment session 31 | expName = 'Movietest' 32 | expInfo = {'Subject_id':'', 'session':''} 33 | dlg = gui.DlgFromDict(dictionary=expInfo, title=expName) 34 | if dlg.OK == False: core.quit() # user pressed cancel 35 | expInfo['date'] = data.getDateStr() # add a simple timestamp 36 | expInfo['expName'] = expName 37 | 38 | stored_scores_dir = _thisDir + os.sep + 'data/' 39 | save_dest = -1 40 | save_dest, valid_subject, subject_id, session = check_subject_info(expInfo, stored_scores_dir) 41 | 42 | if not valid_subject: 43 | core.quit() 44 | 45 | # An ExperimentHandler isn't essential but helps with data saving 46 | thisExp = data.ExperimentHandler(name=expName, version='', 47 | extraInfo=expInfo, runtimeInfo=None, 48 | originPath=None, 49 | savePickle=True, saveWideText=True, 50 | dataFileName=save_dest) 51 | #save a log file for detail verbose info 52 | logFile = logging.LogFile(save_dest+'.log', level=logging.EXP) 53 | logging.console.setLevel(logging.WARNING) # this outputs to the screen, not a file 54 | 55 | endExpNow = False # flag for 'escape' or other condition => quit the exp 56 | #escape_key_list = ["escape"] 57 | escape_key_list = [] 58 | 59 | # Setup the Window 60 | win = visual.Window(size = (1920, 1080), fullscr=True, screen=0, allowGUI=True, allowStencil=False, 61 | monitor='testMonitor', color=[0, 0, 0], colorSpace='rgb', 62 | blendMode='avg', useFBO=False, 63 | ) 64 | 65 | #win_not_gray = visual.Window(size=(1920, 1080), fullscr=True, screen=0, allowGUI=True, allowStencil=False, 66 | # monitor='testMonitor', color=[-1, -1, -1], colorSpace='rgb', 67 | # blendMode='avg', useFBO=True, 68 | # ) 69 | # store frame rate of monitor if we can measure it successfully 70 | expInfo['frameRate']=win.getActualFrameRate() 71 | if expInfo['frameRate']!=None: 72 | frameDur = 1.0/round(expInfo['frameRate']) 73 | else: 74 | frameDur = 1.0/60.0 # couldn't get a reliable measure so guess 75 | 76 | rating_continuous = visual.RatingScale(win=win, name='rating', precision = '100', marker=u'slider', textColor = 'black', markerStart=50, 77 | showValue = False, tickHeight = 0.0, size=1.5, pos=[0.0, -0.92], labels=[''], scale='', low=1, high=100, stretch = 0.75, mouseOnly=True, 78 | markerColor='Gray') 79 | rating_final = visual.RatingScale(win=win, name='rating', precision = '100', marker=u'slider', textColor = 'black', showValue = False, minTime=0.01, 80 | tickHeight = 0.0, size=1.0, pos=[0.0, -0.0], low=1, high=100, labels=[u'Bad', u' Excellent'], mouseOnly=True, 81 | scale=u'Please rate your overall quality of experience.') 82 | 83 | rating_continuous_train = visual.RatingScale(win=win, name='rating', precision = '100', marker=u'slider', textColor = 'black', markerStart=50, 84 | showValue = False, tickHeight = 0.0, size=1.5, pos=[0.0, -0.92], labels=[''], scale='', low=1, high=100, stretch = 0.75, mouseOnly=True, 85 | markerColor='Gray') 86 | rating_final_train = visual.RatingScale(win=win, name='rating', precision = '100', marker=u'slider', textColor = 'black', showValue = False, minTime=0.01, 87 | tickHeight = 0.0, size=1.0, pos=[0.0, -0.0], low=1, high=100,labels=[u'Bad', u' Excellent'], mouseOnly=True, 88 | scale=u'Please rate your overall quality of experience.') 89 | 90 | main_dir = '/media/christos/Subjective_Study/LIVE_NFLX_Plus_Sources/' 91 | 92 | playlist_dir = main_dir + 'Playlists/' 93 | video_dir = main_dir + 'assets_mp4/' 94 | audio_dir = main_dir + 'assets_mp4/Audio/' 95 | train_dir = main_dir + 'TrainingVideos/' 96 | 97 | video_train_files = [train_dir + 'train_1.mp4', train_dir + 'train_2.mp4', train_dir + 'train_3.mp4'] 98 | audio_train_files = [train_dir + 'train_1_44100.wav', train_dir + 'train_2_44100.wav', train_dir + 'train_3_44100.wav'] 99 | 100 | playlist_file = playlist_dir + 'Subject_' + str(subject_id) + '_session_' + str(session) + '.txt' 101 | 102 | with open(playlist_file, 'r') as f: 103 | multimedia_files = f.readlines() 104 | 105 | # load the video and audio files separately, by changing the movie3.py 106 | video_files = [] 107 | audio_files = [] 108 | for multimedia_file in multimedia_files: 109 | 110 | video_file = multimedia_file.split(" ")[0] 111 | audio_file = multimedia_file.split(" ")[1].split("\n")[0] 112 | 113 | video_files.append(video_dir + video_file) 114 | audio_files.append(audio_dir + audio_file) 115 | 116 | show_intro(win, escape_key_list) 117 | 118 | run_those = 0 119 | run_those_2 = 0 120 | 121 | need_training = True 122 | if session > 0: 123 | need_training = False 124 | 125 | if need_training: 126 | train_subject(win, video_train_files, audio_train_files, rating_continuous_train, rating_final_train, escape_key_list) 127 | after_train_text(win, escape_key_list) 128 | 129 | do_test = True 130 | 131 | if do_test: 132 | 133 | # Initialize components for Routine "trial" 134 | trialClock = core.Clock() 135 | 136 | movie = visual.MovieStim3_changed(win=win, name='movie',units='pix', 137 | filename=video_files[0], noAudio=False, audio_filename=audio_files[0], 138 | ori=0, pos=[0, 0], opacity=1, 139 | size=[1920, 1080], 140 | depth=0.0, 141 | ) 142 | 143 | # Create some handy timers 144 | globalClock = core.Clock() # to track the time since experiment started 145 | 146 | for video_file, audio_file in zip(video_files, audio_files): 147 | 148 | #------Prepare to start Routine "trial"------- 149 | t = 0 150 | trialClock.reset() # clock 151 | frameN = -1 152 | 153 | # update component parameters for each repeat 154 | movie.setMovie(video_file, audio_file) 155 | 156 | rating_continuous.reset() 157 | 158 | # keep track of which components have finished 159 | trialComponents = [movie, rating_continuous] 160 | for thisComponent in trialComponents: 161 | if hasattr(thisComponent, 'status'): 162 | thisComponent.status = NOT_STARTED 163 | 164 | current_frame_time = [] 165 | current_frame_time_many = [] 166 | continuous_scores = [] 167 | 168 | #-------Start Routine "trial"------- 169 | continueRoutine = True 170 | while continueRoutine: #and routineTimer.getTime() > 0: 171 | # get current time 172 | t = trialClock.getTime() 173 | frameN = frameN + 1 # number of completed frames (so 0 is the first frame) 174 | # update/draw components on each frame 175 | 176 | # *movie* updates 177 | if t >= 0.0 and movie.status == NOT_STARTED: 178 | # keep track of start time/frame for later 179 | win.setColor([-1, -1, -1]) 180 | movie.tStart = t # underestimates by a little under one frame 181 | movie.frameNStart = frameN # exact frame index 182 | movie.play() 183 | elif movie.status == STARTED:# and t <= timeout:#(timeout-win.monitorFramePeriod*0.75): #most of one frame period left 184 | movie.draw() 185 | if t > 0 and movie.status != FINISHED: 186 | rating_continuous.draw() 187 | #if movie.status != FINISHED: 188 | run_those += 1 189 | continuous_scores.append(rating_continuous.getRating()) 190 | current_frame_time.append(movie.getCurrentFrameTime()) 191 | 192 | run_those_2 += 1 193 | current_frame_time_many.append(movie.getCurrentFrameTime()) 194 | 195 | continueRoutine = False # will revert to True if at least one component still running 196 | for thisComponent in trialComponents: 197 | if hasattr(thisComponent, "status") and thisComponent.status != FINISHED: 198 | continueRoutine = True 199 | break # at least one component has not yet finished 200 | if movie.status == FINISHED: 201 | continueRoutine = False 202 | movie_audioStream = None 203 | win.setColor([0, 0, 0]) 204 | 205 | # check for quit (the Esc key) 206 | if endExpNow or event.getKeys(keyList=escape_key_list): 207 | core.quit() 208 | 209 | # refresh the screen 210 | if continueRoutine: # don't flip if this routine is over or we'll get a blank screen 211 | win.flip() 212 | 213 | #-------Ending Routine "trial"------- 214 | for thisComponent in trialComponents: 215 | if hasattr(thisComponent, "setAutoDraw"): 216 | thisComponent.setAutoDraw(False) 217 | 218 | final_score = get_final_score(win, rating_final, escape_key_list) 219 | 220 | Nrendered = len(current_frame_time) 221 | Nframes = len(unique(current_frame_time)) 222 | Nframes_many = len(unique(current_frame_time_many)) 223 | Nscores = len(continuous_scores) 224 | 225 | # store data for thisExp (ExperimentHandler) 226 | thisExp.addData('subject_id', subject_id) 227 | thisExp.addData('video', video_file) 228 | thisExp.addData('session', session) 229 | thisExp.addData('Nrendered', Nrendered) 230 | thisExp.addData('Nframes', Nframes) 231 | thisExp.addData('Nscores', Nscores) 232 | thisExp.addData('final_score', final_score) 233 | thisExp.addData('continuous_scores', continuous_scores) 234 | 235 | thisExp.nextEntry() 236 | 237 | goodbye_screen(win, escape_key_list) 238 | 239 | win.close() 240 | core.quit() 241 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | ================================================================= 2 | 3 | -----------COPYRIGHT NOTICE STARTS WITH THIS LINE------------ 4 | 5 | Copyright (c) 2018 The University of Texas at Austin 6 | 7 | All rights reserved. Permission is hereby granted, without written agreement and without license or royalty fees, to use, copy, modify, and distribute this code (the source files) and its documentation for any purpose, provided that the copyright notice in its entirety appear in all copies of this code, and the original source of this code, Laboratory for Image and Video Engineering (LIVE, http://live.ece.utexas.edu) and Center for Perceptual Systems (CPS, http://www.cps.utexas.edu) at the University of Texas at Austin (UT Austin, http://www.utexas.edu), is acknowledged in any publication that reports research using this code. The research is to be cited in the bibliography as: 8 | 9 | Christos G. Bampis, Zhi Li, Ioannis Katsavounidis, Te-Yuan Huang, Chaitanya Ekanadham and Alan C. Bovik, Towards Perceptually Optimized End-to-end Adaptive Video Streaming, submitted to IEEE Transactions on Image Processing. 10 | 11 | IN NO EVENT SHALL THE UNIVERSITY OF TEXAS AT AUSTIN BE LIABLE TO ANY PARTY FOR DIRECT, INDIRECT, SPECIAL, INCIDENTAL, OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE USE OF THIS DATABASE AND ITS DOCUMENTATION, EVEN IF THE UNIVERSITY OF TEXAS AT AUSTIN HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. THE UNIVERSITY OF TEXAS AT AUSTIN SPECIFICALLY DISCLAIMS ANY WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. THE DATABASE PROVIDED HEREUNDER IS ON AN "AS IS" BASIS, AND THE UNIVERSITY OF TEXAS AT AUSTIN HAS NO OBLIGATION TO PROVIDE MAINTENANCE, SUPPORT, UPDATES, ENHANCEMENTS, OR MODIFICATIONS. 12 | 13 | -----------COPYRIGHT NOTICE ENDS WITH THIS LINE------------% 14 | 15 | Author : Christos Bampis 16 | 17 | The author is with the Laboratory for Image and Video Engineering (LIVE), Department of Electrical and Computer Engineering, The University of Texas at Austin, Austin, TX. 18 | 19 | Kindly report any suggestions or corrections to cbampis@gmail.com. 20 | 21 | ================================================================= 22 | 23 | This is a demo software implementation for the Psychopy-based subjective interface used in the LIVE-NFLX-II experiment. To use the code please first clone the latest Psychopy version from the repository and add movie3_changed.py in Psychopy/psychopy/psychopy/visual/ so that you are able to load the video and audio files separately, otherwise Psychopy was leading to a small audio glitch in the end of each video file. 24 | 25 | The interface will allow you to extract continuous-time (per-frame) scores. 26 | -------------------------------------------------------------------------------- /movie3_changed.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | # -*- coding: utf-8 -*- 3 | 4 | """ 5 | A stimulus class for playing movies (mp4, divx, avi etc...) in PsychoPy. 6 | Demo using the experimental movie3 stim to play a video file. Path of video 7 | needs to updated to point to a video you have. movie2 does /not/ require 8 | avbin to be installed. 9 | 10 | Movie3 does require: 11 | ~~~~~~~~~~~~~~~~~~~~~ 12 | 13 | moviepy (which requires imageio, Decorator). These can be installed 14 | (including dependencies) on a standard Python install using 15 | `pip install moviepy` 16 | imageio will download further compiled libs (ffmpeg) as needed 17 | 18 | Current known issues: 19 | ~~~~~~~~~~~~~~~~~~~~~~ 20 | 21 | volume control not implemented 22 | movie is long then audio will be huge and currently the whole thing gets 23 | loaded in one go. We should provide streaming audio from disk. 24 | 25 | """ 26 | from __future__ import division 27 | 28 | # Part of the PsychoPy library 29 | # Copyright (C) 2015 Jonathan Peirce 30 | # Distributed under the terms of the GNU General Public License (GPL). 31 | # 32 | from builtins import str 33 | from past.utils import old_div 34 | reportNDroppedFrames = 10 35 | 36 | import os 37 | 38 | from psychopy import logging 39 | from psychopy.tools.arraytools import val2array 40 | from psychopy.tools.attributetools import logAttrib, setAttribute 41 | from psychopy.visual.basevisual import BaseVisualStim, ContainerMixin 42 | 43 | from moviepy.video.io.VideoFileClip import VideoFileClip 44 | 45 | import ctypes 46 | import numpy 47 | from psychopy.clock import Clock 48 | from psychopy.constants import FINISHED, NOT_STARTED, PAUSED, PLAYING, STOPPED 49 | 50 | import pyglet.gl as GL 51 | 52 | 53 | class MovieStim3_changed(BaseVisualStim, ContainerMixin): 54 | """A stimulus class for playing movies (mpeg, avi, etc...) in PsychoPy 55 | that does not require avbin. Instead it requires the cv2 python package 56 | for OpenCV. The VLC media player also needs to be installed on the 57 | psychopy computer. 58 | 59 | **Example**:: 60 | 61 | See Movie2Stim.py for demo. 62 | """ 63 | 64 | def __init__(self, win, 65 | filename="", 66 | units='pix', 67 | size=None, 68 | pos=(0.0, 0.0), 69 | ori=0.0, 70 | flipVert=False, 71 | flipHoriz=False, 72 | color=(1.0, 1.0, 1.0), 73 | colorSpace='rgb', 74 | opacity=1.0, 75 | volume=1.0, 76 | name='', 77 | loop=False, 78 | autoLog=True, 79 | depth=0.0, 80 | noAudio=False, 81 | vframe_callback=None, 82 | fps=None, 83 | interpolate=True, 84 | audio_filename=None): 85 | """ 86 | :Parameters: 87 | 88 | filename : 89 | a string giving the relative or absolute path to the movie. 90 | flipVert : True or *False* 91 | If True then the movie will be top-bottom flipped 92 | flipHoriz : True or *False* 93 | If True then the movie will be right-left flipped 94 | volume : 95 | The nominal level is 100, and 0 is silence. 96 | loop : bool, optional 97 | Whether to start the movie over from the beginning if draw is 98 | called and the movie is done. 99 | 100 | """ 101 | # what local vars are defined (these are the init params) for use 102 | # by __repr__ 103 | self._initParams = dir() 104 | self._initParams.remove('self') 105 | super(MovieStim3_christos, self).__init__(win, units=units, name=name, 106 | autoLog=False) 107 | 108 | retraceRate = win._monitorFrameRate 109 | if retraceRate is None: 110 | retraceRate = win.getActualFrameRate() 111 | if retraceRate is None: 112 | logging.warning("FrameRate could not be supplied by psychopy; " 113 | "defaulting to 60.0") 114 | retraceRate = 60.0 115 | self._retraceInterval = old_div(1.0, retraceRate) 116 | self.filename = filename 117 | self.audio_filename = audio_filename 118 | self.loop = loop 119 | self.flipVert = flipVert 120 | self.flipHoriz = flipHoriz 121 | self.pos = numpy.asarray(pos, float) 122 | self.depth = depth 123 | self.opacity = float(opacity) 124 | self.interpolate = interpolate 125 | self.noAudio = noAudio 126 | self._audioStream = None 127 | self.useTexSubImage2D = True 128 | 129 | if noAudio: # to avoid dependency problems in silent movies 130 | self.sound = None 131 | else: 132 | from psychopy import sound 133 | self.sound = sound 134 | 135 | self._videoClock = Clock() 136 | self.loadMovie(self.filename, self.audio_filename) 137 | self.setVolume(volume) 138 | self.nDroppedFrames = 0 139 | 140 | # size 141 | if size is None: 142 | self.size = numpy.array([self._mov.w, self._mov.h], 143 | float) 144 | else: 145 | self.size = val2array(size) 146 | self.ori = ori 147 | self._updateVertices() 148 | # set autoLog (now that params have been initialised) 149 | self.autoLog = autoLog 150 | if autoLog: 151 | logging.exp("Created %s = %s" % (self.name, str(self))) 152 | 153 | def reset(self): 154 | self._numpyFrame = None 155 | self._nextFrameT = None 156 | self._texID = None 157 | self.status = NOT_STARTED 158 | 159 | def setMovie(self, filename, audio_filename, log=True): 160 | """See `~MovieStim.loadMovie` (the functions are identical). 161 | 162 | This form is provided for syntactic consistency with other visual 163 | stimuli. 164 | """ 165 | self.loadMovie(filename, audio_filename, log=log) 166 | 167 | def loadMovie(self, filename, audio_filename, log=True): 168 | """Load a movie from file 169 | 170 | :Parameters: 171 | 172 | filename: string 173 | The name of the file, including path if necessary 174 | 175 | After the file is loaded MovieStim.duration is updated with the movie 176 | duration (in seconds). 177 | """ 178 | self.reset() # set status and timestamps etc 179 | 180 | # Create Video Stream stuff 181 | if os.path.isfile(filename): 182 | self._mov = VideoFileClip(filename, audio=(1 - self.noAudio)) 183 | if (not self.noAudio) and (self._mov.audio is not None): 184 | sound = self.sound 185 | if audio_filename == None: 186 | try: 187 | jwe_tmp = self._mov.subclip(0, round(self._mov.duration)) 188 | self._audioStream = sound.Sound( 189 | self.jwe_tmp.audio.to_soundarray(), 190 | sampleRate=self._mov.audio.fps) 191 | except: 192 | # JWE added this as a patch for a moviepy oddity where the 193 | # duration is inflated in the saved file causes the 194 | # audioclip to be the wrong length, so round down and it 195 | # should work 196 | jwe_tmp = self._mov.subclip(0, round(self._mov.duration)) 197 | self._audioStream = sound.Sound( 198 | jwe_tmp.audio.to_soundarray(), 199 | sampleRate=self._mov.audio.fps) 200 | del(jwe_tmp) 201 | else: 202 | self._audioStream = sound.Sound(audio_filename, sampleRate=self._mov.audio.fps) 203 | #print(self._mov.audio.fps) 204 | else: # make sure we set to None (in case prev clip had audio) 205 | self._audioStream = None 206 | else: 207 | raise IOError("Movie file '%s' was not found" % filename) 208 | # mov has attributes: 209 | # size, duration, fps 210 | # mov.audio has attributes 211 | # duration, fps (aka sampleRate), to_soundarray() 212 | self._frameInterval = old_div(1.0, self._mov.fps) 213 | self.duration = self._mov.duration 214 | self.filename = filename 215 | self._updateFrameTexture() 216 | logAttrib(self, log, 'movie', filename) 217 | 218 | def play(self, log=True): 219 | """Continue a paused movie from current position. 220 | """ 221 | status = self.status 222 | if status != PLAYING: 223 | if self._audioStream is not None: 224 | self._audioStream.play() 225 | if status == PAUSED: 226 | if self.getCurrentFrameTime() < 0: 227 | self._audioSeek(0) 228 | else: 229 | self._audioSeek(self.getCurrentFrameTime()) 230 | self.status = PLAYING 231 | self._videoClock.reset(-self.getCurrentFrameTime()) 232 | 233 | if log and self.autoLog: 234 | self.win.logOnFlip("Set %s playing" % (self.name), 235 | level=logging.EXP, obj=self) 236 | self._updateFrameTexture() 237 | 238 | def pause(self, log=True): 239 | """ 240 | Pause the current point in the movie (sound will stop, current frame 241 | will not advance). If play() is called again both will restart. 242 | """ 243 | if self.status == PLAYING: 244 | self.status = PAUSED 245 | if self._audioStream: 246 | self._audioStream.stop() 247 | if log and self.autoLog: 248 | self.win.logOnFlip("Set %s paused" % 249 | (self.name), level=logging.EXP, obj=self) 250 | return True 251 | if log and self.autoLog: 252 | self.win.logOnFlip("Failed Set %s paused" % 253 | (self.name), level=logging.EXP, obj=self) 254 | return False 255 | 256 | def stop(self, log=True): 257 | """Stop the current point in the movie (sound will stop, current frame 258 | will not advance). Once stopped the movie cannot be restarted - 259 | it must be loaded again. Use pause() if you may need to restart 260 | the movie. 261 | """ 262 | if self.status != STOPPED: 263 | self.status = STOPPED 264 | self._unload() 265 | self.reset() 266 | if log and self.autoLog: 267 | self.win.logOnFlip("Set %s stopped" % (self.name), 268 | level=logging.EXP, obj=self) 269 | 270 | def setVolume(self, volume): 271 | pass # to do 272 | 273 | def setFlipHoriz(self, newVal=True, log=True): 274 | """If set to True then the movie will be flipped horizontally 275 | (left-to-right). Note that this is relative to the original, 276 | not relative to the current state. 277 | """ 278 | self.flipHoriz = newVal 279 | logAttrib(self, log, 'flipHoriz') 280 | self._needVertexUpdate = True 281 | 282 | def setFlipVert(self, newVal=True, log=True): 283 | """If set to True then the movie will be flipped vertically 284 | (top-to-bottom). Note that this is relative to the original, 285 | not relative to the current state. 286 | """ 287 | self.flipVert = newVal 288 | logAttrib(self, log, 'flipVert') 289 | self._needVertexUpdate = True 290 | 291 | def getFPS(self): 292 | """ 293 | Returns the movie frames per second playback speed. 294 | """ 295 | return self._mov.fps 296 | 297 | def getCurrentFrameTime(self): 298 | """Get the time that the movie file specified the current 299 | video frame as having. 300 | """ 301 | return self._nextFrameT - self._frameInterval 302 | 303 | def _updateFrameTexture(self): 304 | if self._nextFrameT is None: 305 | # movie has no current position, need to reset the clock 306 | # to zero in order to have the timing logic work 307 | # otherwise the video stream would skip frames until the 308 | # time since creating the movie object has passed 309 | self._videoClock.reset() 310 | self._nextFrameT = 0 311 | 312 | # only advance if next frame (half of next retrace rate) 313 | if self._nextFrameT > self.duration: 314 | self._onEos() 315 | elif self._numpyFrame is not None: 316 | if self._nextFrameT > (self._videoClock.getTime() - 317 | old_div(self._retraceInterval, 2.0)): 318 | return None 319 | self._numpyFrame = self._mov.get_frame(self._nextFrameT) 320 | useSubTex = self.useTexSubImage2D 321 | if self._texID is None: 322 | self._texID = GL.GLuint() 323 | GL.glGenTextures(1, ctypes.byref(self._texID)) 324 | useSubTex = False 325 | 326 | # bind the texture in openGL 327 | GL.glEnable(GL.GL_TEXTURE_2D) 328 | # bind that name to the target 329 | GL.glBindTexture(GL.GL_TEXTURE_2D, self._texID) 330 | # makes the texture map wrap (this is actually default anyway) 331 | GL.glTexParameteri( 332 | GL.GL_TEXTURE_2D, GL.GL_TEXTURE_WRAP_S, GL.GL_REPEAT) 333 | # data from PIL/numpy is packed, but default for GL is 4 bytes 334 | GL.glPixelStorei(GL.GL_UNPACK_ALIGNMENT, 1) 335 | # important if using bits++ because GL_LINEAR 336 | # sometimes extrapolates to pixel vals outside range 337 | if self.interpolate: 338 | GL.glTexParameteri( 339 | GL.GL_TEXTURE_2D, GL.GL_TEXTURE_MAG_FILTER, GL.GL_LINEAR) 340 | GL.glTexParameteri( 341 | GL.GL_TEXTURE_2D, GL.GL_TEXTURE_MIN_FILTER, GL.GL_LINEAR) 342 | if useSubTex is False: 343 | GL.glTexImage2D(GL.GL_TEXTURE_2D, 0, GL.GL_RGB8, 344 | self._numpyFrame.shape[1], 345 | self._numpyFrame.shape[0], 0, 346 | GL.GL_RGB, GL.GL_UNSIGNED_BYTE, 347 | self._numpyFrame.ctypes) 348 | else: 349 | GL.glTexSubImage2D(GL.GL_TEXTURE_2D, 0, 0, 0, 350 | self._numpyFrame.shape[1], 351 | self._numpyFrame.shape[0], 352 | GL.GL_RGB, GL.GL_UNSIGNED_BYTE, 353 | self._numpyFrame.ctypes) 354 | else: 355 | GL.glTexParameteri( 356 | GL.GL_TEXTURE_2D, GL.GL_TEXTURE_MAG_FILTER, GL.GL_NEAREST) 357 | GL.glTexParameteri( 358 | GL.GL_TEXTURE_2D, GL.GL_TEXTURE_MIN_FILTER, GL.GL_NEAREST) 359 | if useSubTex is False: 360 | GL.glTexImage2D(GL.GL_TEXTURE_2D, 0, GL.GL_RGB8, 361 | self._numpyFrame.shape[1], 362 | self._numpyFrame.shape[0], 0, 363 | GL.GL_BGR, GL.GL_UNSIGNED_BYTE, 364 | self._numpyFrame.ctypes) 365 | else: 366 | GL.glTexSubImage2D(GL.GL_TEXTURE_2D, 0, 0, 0, 367 | self._numpyFrame.shape[1], 368 | self._numpyFrame.shape[0], 369 | GL.GL_BGR, GL.GL_UNSIGNED_BYTE, 370 | self._numpyFrame.ctypes) 371 | GL.glTexEnvi(GL.GL_TEXTURE_ENV, GL.GL_TEXTURE_ENV_MODE, 372 | GL.GL_MODULATE) # ?? do we need this - think not! 373 | 374 | if not self.status == PAUSED: 375 | self._nextFrameT += self._frameInterval 376 | 377 | def draw(self, win=None): 378 | """Draw the current frame to a particular visual.Window (or to the 379 | default win for this object if not specified). The current 380 | position in the movie will be determined automatically. 381 | 382 | This method should be called on every frame that the movie is 383 | meant to appear. 384 | """ 385 | 386 | if (self.status == NOT_STARTED or 387 | (self.status == FINISHED and self.loop)): 388 | self.play() 389 | elif self.status == FINISHED and not self.loop: 390 | return 391 | if win is None: 392 | win = self.win 393 | self._selectWindow(win) 394 | self._updateFrameTexture() # will check if it's needed 395 | 396 | # scale the drawing frame and get to centre of field 397 | GL.glPushMatrix() # push before drawing, pop after 398 | # push the data for client attributes 399 | GL.glPushClientAttrib(GL.GL_CLIENT_ALL_ATTRIB_BITS) 400 | 401 | self.win.setScale('pix') 402 | # move to centre of stimulus and rotate 403 | vertsPix = self.verticesPix 404 | 405 | # bind textures 406 | GL.glActiveTexture(GL.GL_TEXTURE1) 407 | GL.glBindTexture(GL.GL_TEXTURE_2D, 0) 408 | GL.glEnable(GL.GL_TEXTURE_2D) 409 | GL.glActiveTexture(GL.GL_TEXTURE0) 410 | GL.glBindTexture(GL.GL_TEXTURE_2D, self._texID) 411 | GL.glEnable(GL.GL_TEXTURE_2D) 412 | 413 | # sets opacity (1,1,1 = RGB placeholder) 414 | GL.glColor4f(1, 1, 1, self.opacity) 415 | 416 | array = (GL.GLfloat * 32)( 417 | 1, 1, # texture coords 418 | vertsPix[0, 0], vertsPix[0, 1], 0., # vertex 419 | 0, 1, 420 | vertsPix[1, 0], vertsPix[1, 1], 0., 421 | 0, 0, 422 | vertsPix[2, 0], vertsPix[2, 1], 0., 423 | 1, 0, 424 | vertsPix[3, 0], vertsPix[3, 1], 0., 425 | ) 426 | 427 | # 2D texture array, 3D vertex array 428 | GL.glInterleavedArrays(GL.GL_T2F_V3F, 0, array) 429 | GL.glDrawArrays(GL.GL_QUADS, 0, 4) 430 | GL.glPopClientAttrib() 431 | GL.glPopAttrib() 432 | GL.glPopMatrix() 433 | # unbind the textures 434 | GL.glActiveTexture(GL.GL_TEXTURE0) 435 | GL.glBindTexture(GL.GL_TEXTURE_2D, 0) 436 | GL.glEnable(GL.GL_TEXTURE_2D) # implicitly disables 1D 437 | 438 | def seek(self, t): 439 | """Go to a specific point in time for both the audio and video streams 440 | """ 441 | # video is easy: set both times to zero and update the frame texture 442 | self._nextFrameT = t 443 | self._videoClock.reset(t) 444 | self._audioSeek(t) 445 | 446 | def _audioSeek(self, t): 447 | sound = self.sound 448 | # for sound we need to extract the array again and just begin at new 449 | # loc 450 | if self._audioStream is None: 451 | return # do nothing 452 | self._audioStream.stop() 453 | sndArray = self._mov.audio.to_soundarray() 454 | startIndex = int(t * self._mov.audio.fps) 455 | self._audioStream = sound.Sound( 456 | sndArray[startIndex:, :], sampleRate=self._mov.audio.fps) 457 | self._audioStream.play() 458 | 459 | def _getAudioStreamTime(self): 460 | return self._audio_stream_clock.getTime() 461 | 462 | def _unload(self): 463 | try: 464 | # remove textures from graphics card to prevent crash 465 | self.clearTextures() 466 | except Exception: 467 | pass 468 | self._mov = None 469 | self._numpyFrame = None 470 | self._audioStream = None 471 | self.status = FINISHED 472 | 473 | def _onEos(self): 474 | if self.loop: 475 | self.seek(0.0) 476 | else: 477 | self.status = FINISHED 478 | self.stop() 479 | 480 | if self.autoLog: 481 | self.win.logOnFlip("Set %s finished" % self.name, 482 | level=logging.EXP, obj=self) 483 | 484 | def __del__(self): 485 | self._unload() 486 | 487 | def setAutoDraw(self, val, log=None): 488 | """Add or remove a stimulus from the list of stimuli that will be 489 | automatically drawn on each flip 490 | 491 | :parameters: 492 | - val: True/False 493 | True to add the stimulus to the draw list, False to remove it 494 | """ 495 | if val: 496 | self.play(log=False) # set to play in case stopped 497 | else: 498 | self.pause(log=False) 499 | # add to drawing list and update status 500 | setAttribute(self, 'autoDraw', val, log) 501 | -------------------------------------------------------------------------------- /test_misc_utils.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python2 2 | # -*- coding: utf-8 -*- 3 | """ 4 | This experiment was created using PsychoPy2 Experiment Builder (v1.81.02), November 29, 2014, at 08:30 5 | If you publish work using this script please cite the relevant PsychoPy publications 6 | Peirce, JW (2007) PsychoPy - Psychophysics software in Python. Journal of Neuroscience Methods, 162(1-2), 8-13. 7 | Peirce, JW (2009) Generating stimuli for neuroscience using PsychoPy. Frontiers in Neuroinformatics, 2:10. doi: 10.3389/neuro.11.010.2008 8 | """ 9 | 10 | from __future__ import division # so that 1/3=0.333 instead of 1/3=0 11 | from psychopy import visual, core, data, event, logging, gui 12 | from psychopy.constants import * # things like STARTED, FINISHED 13 | import numpy as np # whole numpy lib is available, prepend 'np.' 14 | from numpy import sin, cos, tan, log, log10, pi, average, sqrt, std, deg2rad, rad2deg, linspace, asarray 15 | from numpy.random import random, randint, normal, shuffle 16 | import os # handy system and path functions 17 | 18 | def show_intro(win, escape_key_list): 19 | 20 | # Initialize components for Routine "start" 21 | startClock = core.Clock() 22 | 23 | text_2 = visual.TextStim(win=win, name='text_2', 24 | text=u'Thank you for your participation in our subjective testing!\nYou will be watching a set of video sequences one after the other.\nWhile the video is playing, please record your quality of experience.\nAfter each video finishes, please record your overall quality of experience.\nIf you have any questions please ask them now.\n\nHit Enter to start!', 25 | font=u'Arial', 26 | pos=(0, 0), height=0.1, wrapWidth=1.5, ori=0, 27 | color=u'black', colorSpace='rgb', opacity=1, 28 | depth=0.0); 29 | 30 | if 0: 31 | 32 | mouse = event.Mouse(win=win) 33 | 34 | polygon = visual.Rect( 35 | win=win, name='polygon', 36 | width=(0.4, 0.4)[0], height=(0.4, 0.4)[1], 37 | ori=0, pos=(0, -.5), 38 | lineWidth=1, lineColor=[1,1,1], lineColorSpace='rgb', 39 | fillColor=[1,1,1], fillColorSpace='rgb', 40 | opacity=1, depth=-1.0, interpolate=True) 41 | 42 | continue_text = visual.TextStim(win=win, name='Continue', 43 | text='Continue', 44 | font='Arial', 45 | pos=(0, -.5), height=0.1, wrapWidth=None, ori=0, 46 | color='black', colorSpace='rgb', opacity=1, 47 | depth=-2.0) 48 | 49 | 50 | while True: # Forever. 51 | 52 | text_2.draw() 53 | polygon.draw() 54 | continue_text.draw() 55 | win.flip() 56 | 57 | if mouse.isPressedIn(polygon): 58 | continueRoutine = False 59 | break 60 | 61 | if 1: 62 | 63 | # ------Prepare to start Routine "start"------- 64 | t = 0 65 | startClock.reset() # clock 66 | frameN = -1 67 | continueRoutine = True 68 | #routineTimer.add(10.000000) 69 | # update component parameters for each repeat 70 | # keep track of which components have finished 71 | startComponents = [text_2] 72 | for thisComponent in startComponents: 73 | if hasattr(thisComponent, 'status'): 74 | thisComponent.status = NOT_STARTED 75 | 76 | # -------Start Routine "start"------- 77 | while continueRoutine:# and routineTimer.getTime() > 0: 78 | # get current time 79 | t = startClock.getTime() 80 | frameN = frameN + 1 # number of completed frames (so 0 is the first frame) 81 | # update/draw components on each frame 82 | 83 | # *text_2* updates 84 | if t >= 0.0 and text_2.status == NOT_STARTED: 85 | # keep track of start time/frame for later 86 | text_2.tStart = t 87 | text_2.frameNStart = frameN # exact frame index 88 | text_2.setAutoDraw(True) 89 | #frameRemains = 0.0 + 10.0- win.monitorFramePeriod * 0.75 # most of one frame period left 90 | #if text_2.status == STARTED:# and t >= frameRemains: 91 | #text_2.setAutoDraw(False) 92 | 93 | # check if all components have finished 94 | if not continueRoutine: # a component has requested a forced-end of Routine 95 | break 96 | continueRoutine = False # will revert to True if at least one component still running 97 | for thisComponent in startComponents: 98 | if hasattr(thisComponent, "status") and thisComponent.status != FINISHED: 99 | continueRoutine = True 100 | #break # at least one component has not yet finished 101 | 102 | # check for quit (the Esc key) 103 | if event.getKeys(keyList=escape_key_list): 104 | print(1)#core.quit() 105 | if event.getKeys(keyList=["return"]): 106 | continueRoutine = False 107 | 108 | # refresh the screen 109 | if continueRoutine: # don't flip if this routine is over or we'll get a blank screen 110 | win.flip() 111 | 112 | # -------Ending Routine "start"------- 113 | for thisComponent in startComponents: 114 | if hasattr(thisComponent, "setAutoDraw"): 115 | thisComponent.setAutoDraw(False) 116 | 117 | def check_subject_info(expInfo, stored_scores_dir): 118 | 119 | valid_subject = 1 120 | subject_id = '-1' 121 | subject_session = '-1' 122 | 123 | if expInfo['Subject_id'] == '': 124 | print("Subject did not enter id, exiting.") 125 | valid_subject = 0 126 | else: 127 | subject_id = int(expInfo['Subject_id']) 128 | 129 | if expInfo['session'] == '': 130 | print("Subject did not enter session, exiting.") 131 | valid_subject = 0 132 | elif np.int(expInfo['session']) < 0 or np.int(expInfo['session']) > 2: 133 | print("Session # invalid, exiting.") 134 | valid_subject = 0 135 | else: 136 | subject_session = int(expInfo['session']) 137 | 138 | save_dest = stored_scores_dir + 'Subject_' + str(subject_id) + '_Session_' + str(subject_session) 139 | 140 | if os.path.isfile(save_dest + '.csv'): 141 | print("This session and subject id already exists.") 142 | valid_subject = 0 143 | 144 | return save_dest, valid_subject, subject_id, subject_session 145 | 146 | def after_train_text(win, escape_key_list): 147 | 148 | after_train_screen = visual.TextStim(win=win, name='after_train_screen', 149 | text=u'This ends your training. Hit Enter to proceed to testing!', 150 | font=u'Arial', 151 | pos=(0, 0), height=0.1, wrapWidth=1.5, ori=0, 152 | color=u'black', colorSpace='rgb', opacity=1, 153 | depth=0.0); 154 | 155 | continueRoutine = True 156 | 157 | startComponents = [after_train_screen] 158 | for thisComponent in startComponents: 159 | if hasattr(thisComponent, 'status'): 160 | thisComponent.status = NOT_STARTED 161 | 162 | while continueRoutine: 163 | 164 | if after_train_screen.status == NOT_STARTED: 165 | after_train_screen.setAutoDraw(True) 166 | 167 | # check if all components have finished 168 | if not continueRoutine: # a component has requested a forced-end of Routine 169 | break 170 | continueRoutine = False # will revert to True if at least one component still running 171 | for thisComponent in startComponents: 172 | if hasattr(thisComponent, "status") and thisComponent.status != FINISHED: 173 | continueRoutine = True 174 | 175 | if event.getKeys(keyList=["return"]): 176 | continueRoutine = False 177 | 178 | # refresh the screen 179 | if continueRoutine: # don't flip if this routine is over or we'll get a blank screen 180 | win.flip() 181 | 182 | # -------Ending Routine "start"------- 183 | for thisComponent in startComponents: 184 | if hasattr(thisComponent, "setAutoDraw"): 185 | thisComponent.setAutoDraw(False) 186 | 187 | return 1 188 | 189 | def goodbye_screen(win, escape_key_list): 190 | 191 | endClock = core.Clock() 192 | 193 | text = visual.TextStim(win=win, name='text', 194 | text=u'The test is now over.\nYou may now exit the room.\nThank you for your participation!', 195 | font=u'Arial', 196 | pos=(0, 0), height=0.1, wrapWidth=None, ori=0, 197 | color=u'black', colorSpace='rgb', opacity=1, 198 | depth=0.0); 199 | 200 | continueRoutine = True 201 | 202 | endComponents = [text] 203 | for thisComponent in endComponents: 204 | if hasattr(thisComponent, 'status'): 205 | thisComponent.status = NOT_STARTED 206 | 207 | endClock.reset() 208 | 209 | tzero = endClock.getTime() 210 | 211 | while continueRoutine: 212 | 213 | t = endClock.getTime() 214 | 215 | if text.status == NOT_STARTED: 216 | 217 | text.setAutoDraw(True) 218 | 219 | if t > 5 + tzero: 220 | break 221 | 222 | if not continueRoutine: # a component has requested a forced-end of Routine 223 | break 224 | 225 | continueRoutine = False # will revert to True if at least one component still running 226 | for thisComponent in endComponents: 227 | if hasattr(thisComponent, "status") and thisComponent.status != FINISHED: 228 | continueRoutine = True 229 | break # at least one component has not yet finished 230 | 231 | # refresh the screen 232 | if continueRoutine: # don't flip if this routine is over or we'll get a blank screen 233 | win.flip() 234 | 235 | # -------Ending Routine "end"------- 236 | for thisComponent in endComponents: 237 | if hasattr(thisComponent, "setAutoDraw"): 238 | thisComponent.setAutoDraw(False) 239 | 240 | return 1 241 | 242 | def get_final_score(win, rating_final, escape_key_list): 243 | 244 | continueRoutine = True 245 | rating_final.reset() 246 | 247 | trialComponents = [rating_final] 248 | for thisComponent in trialComponents: 249 | if hasattr(thisComponent, 'status'): 250 | thisComponent.status = NOT_STARTED 251 | 252 | while continueRoutine: 253 | 254 | rating_final.draw() 255 | continueRoutine = False # will revert to True if at least one component still running 256 | for thisComponent in trialComponents: 257 | if hasattr(thisComponent, "status") and thisComponent.status != FINISHED: 258 | continueRoutine = True 259 | break # at least one component has not yet finished 260 | 261 | # check for quit (the Esc key) 262 | if event.getKeys(keyList=escape_key_list): 263 | core.quit() 264 | 265 | # refresh the screen 266 | if continueRoutine: # don't flip if this routine is over or we'll get a blank screen 267 | win.flip() 268 | 269 | final_score = rating_final.getRating() 270 | return final_score 271 | 272 | def train_subject(win, train_videos, train_audios, rating_continuous, rating_final, escape_key_list): 273 | 274 | movie = visual.MovieStim3_christos(win=win, name='movie',units='pix', 275 | filename=train_videos[0], noAudio=False, audio_filename=train_audios[0], 276 | ori=0, pos=[0, 0], opacity=1, 277 | size=[1920, 1080], 278 | depth=0.0, 279 | ) 280 | 281 | for video_file, audio_file in zip(train_videos, train_audios): 282 | 283 | #------Prepare to start Routine "trial"------- 284 | t = 0 285 | 286 | # update component parameters for each repeat 287 | movie.setMovie(video_file, audio_file) 288 | 289 | rating_continuous.reset() 290 | 291 | # keep track of which components have finished 292 | trialComponents = [movie, rating_continuous] 293 | for thisComponent in trialComponents: 294 | if hasattr(thisComponent, 'status'): 295 | thisComponent.status = NOT_STARTED 296 | 297 | current_frame_time = [] 298 | continuous_scores = [] 299 | 300 | #-------Start Routine "trial"------- 301 | continueRoutine = True 302 | while continueRoutine: #and routineTimer.getTime() > 0: 303 | 304 | # *movie* updates 305 | if t >= 0.0 and movie.status == NOT_STARTED: 306 | win.setColor([-1, -1, -1]) 307 | # keep track of start time/frame for later 308 | movie.tStart = t # underestimates by a little under one frame 309 | movie.play() 310 | elif movie.status == STARTED:# and t <= timeout:#(timeout-win.monitorFramePeriod*0.75): #most of one frame period left 311 | movie.draw() 312 | rating_continuous.draw() 313 | if movie.status == STARTED: 314 | continuous_scores.append(rating_continuous.getRating()) 315 | current_frame_time.append(movie.getCurrentFrameTime()) 316 | 317 | continueRoutine = False # will revert to True if at least one component still running 318 | for thisComponent in trialComponents: 319 | if hasattr(thisComponent, "status") and thisComponent.status != FINISHED: 320 | continueRoutine = True 321 | break # at least one component has not yet finished 322 | if movie.status == FINISHED: 323 | continueRoutine = False 324 | win.setColor([0, 0, 0]) 325 | 326 | # check for quit (the Esc key) 327 | if event.getKeys(keyList=escape_key_list): 328 | core.quit() 329 | 330 | # refresh the screen 331 | if continueRoutine: # don't flip if this routine is over or we'll get a blank screen 332 | win.flip() 333 | 334 | #-------Ending Routine "trial"------- 335 | for thisComponent in trialComponents: 336 | if hasattr(thisComponent, "setAutoDraw"): 337 | thisComponent.setAutoDraw(False) 338 | 339 | final_score = get_final_score(win, rating_final, escape_key_list) 340 | 341 | 342 | 343 | --------------------------------------------------------------------------------