diff --git a/.github/workflows/run_tests.yml b/.github/workflows/run_tests.yml index 3131c32..5ad9e5c 100644 --- a/.github/workflows/run_tests.yml +++ b/.github/workflows/run_tests.yml @@ -12,34 +12,6 @@ on: jobs: - # Test on Python 2.7 - test-linux-27: - - name: Linux (Python 2.7) - runs-on: ubuntu-20.04 - container: python:2.7 - - env: - SDL_VIDEODRIVER: dummy - SDL_AUDIODRIVER: dummy - SDL_RENDER_DRIVER: software - - steps: - - uses: actions/checkout@v2 - - - name: Install dependencies for testing - run: | - apt update && apt install -y --fix-missing libgl1-mesa-dev - python -m pip install --upgrade pip - python -m pip install pytest mock - - - name: Install and test KLibs - run: | - python -m pip install . - klibs -h - pytest -vvl -rxXP - - # Test on all supported Python 3.x versions with Linux test-linux: diff --git a/docs/CHANGELOG.rst b/docs/CHANGELOG.rst index 069f373..802c742 100644 --- a/docs/CHANGELOG.rst +++ b/docs/CHANGELOG.rst @@ -53,6 +53,10 @@ Runtime Changes: returning to avoid registering spurious input. * KLibs will now raise an error on launch if any required tables or columns are missing from the database. +* Demographics collection has been changed so that queries in + `user_queries.json` are skipped if they do not correspond to a column in the + `participants` table of the database. Additionally, the query for the + participant's unique identifer is now always collected first. API Changes: diff --git a/klibs/KLCommunication.py b/klibs/KLCommunication.py index af2351c..1c52e2b 100755 --- a/klibs/KLCommunication.py +++ b/klibs/KLCommunication.py @@ -3,8 +3,10 @@ import os import re +import random from os.path import join -from shutil import copyfile, copytree +from shutil import copyfile, copytree, rmtree +from collections import OrderedDict from sdl2 import (SDL_StartTextInput, SDL_StopTextInput, SDL_KEYDOWN, SDLK_ESCAPE, SDLK_BACKSPACE, SDLK_RETURN, SDLK_KP_ENTER, SDL_TEXTINPUT) @@ -16,10 +18,10 @@ from klibs.KLEventQueue import pump, flush from klibs.KLUtilities import pretty_list, now, utf8, make_hash from klibs.KLUtilities import colored_stdout as cso -from klibs.KLDatabase import EntryTemplate +from klibs.KLDatabase import _get_user_tables from klibs.KLRuntimeInfo import runtime_info_init from klibs.KLGraphics import blit, clear, fill, flip -from klibs.KLUserInterface import ui_request, any_key +from klibs.KLUserInterface import ui_request, key_pressed from klibs.KLText import TextStyle, add_text_style @@ -37,80 +39,234 @@ def alert(text): flip() -def collect_demographics(anonymous=False): - '''Collects participant demographics and writes them to the 'participants' table in the - experiment's database, based on the queries in the "demographic" section of the project's - user_queries.json file. +def _simple_prompt(msg, loc=None, resp_keys=[]): + # Shows a message and waits for a key response + if not loc: + loc = P.screen_c + fill() + blit(msg, 5, location=loc) + flip() + resp = None + while not resp: + q = pump() + ui_request(queue=q) + # If specific response keys defined, check each of them + for key in resp_keys: + if key_pressed(key, queue=q): + resp = key + break + # Otherwise, end the loop if any key is pressed + if not len(resp_keys): + if key_pressed(queue=q): + resp = True + return resp + + +def _get_demographics_queries(db, queries): + # Get all columns that need to be filled during demographics + required = [] + exclude = ['id', 'created', 'random_seed', 'klibs_commit'] + col_info = db.table_schemas['participants'] + for col in db.get_columns('participants'): + if col not in exclude and not col_info[col]['allow_null']: + required.append(col) + + # Ensure all required demographics cols have corresponding queries + query_cols = [q.database_field for q in queries] + missing = set(required).difference(set(query_cols)) + if len(missing): + e = "Missing entries in '{0}' for the following database fields: {1}" + raise RuntimeError(e.format("user_queries.json", str(list(missing)))) - If P.manual_demographics_collection = True, this function should be called at some point during - the setup() section of your experiment class. Otherwise, this function will be run - automatically when the experiment is launched. + # Gather queries into a dict for easy use + query_set = OrderedDict() + for q in queries: + if q.database_field in required: + query_set[q.database_field] = q - Args: - anonymous (bool, optional): If True, this function will log all of the anonymous values for - the experiment's demographic queries to the database immediately without prompting the - user for input. + return query_set - ''' - from klibs.KLEnvironment import exp, db - # ie. demographic questions aren't being asked for this experiment - if not P.collect_demographics and not anonymous: return +def collect_demographics(anonymous=False, unique_id=None): + """Initializes the participant ID and collects any demographics queries. - # first insert required, automatically-populated fields - demographics = EntryTemplate('participants') - demographics.log('created', now(True)) - try: - # columns moved to session_info in newer templates - demographics.log("random_seed", P.random_seed) - demographics.log("klibs_commit", P.klibs_commit) - except ValueError: - pass - - # collect a response and handle errors for each question - for q in user_queries.demographic: - if q.active: - # if querying unique identifier, make sure it doesn't already exist in db - if q.database_field == P.unique_identifier: - existing = [utf8(pid) for pid in db.get_unique_ids()] - while True: - value = query(q, anonymous=anonymous) - if utf8(value) in existing: - err = ("A participant with that ID already exists!\n" - "Please try a different identifier.") - fill() - blit(message(err, "alert", align='center', blit_txt=False), 5, P.screen_c) - flip() - any_key() - else: - break + Calling this function collects a unique identifier from the participant (e.g. + 'P03') and initializes the session. + + If no participant with that identifier already exists, this function will perform + demographics collection and add the participant to the database. All queries + in the project's ``user_queries.json`` file that have corresponding columns in the + 'participants' table in the database will be collected. If klibs was launched in + development mode, demographics will skipped and filled in with default values. + + If an entered ID already exists in the database, a few different things can + happen: + * If the participant exists but did not fully complete the last session of the + task, they will be prompted whether to a) restart the last session, b) resume + the last session from the last completed trial, or c) skip to the next session + (if the project is multi-session and not on the last session). + * If the participant completed the last session and the project is multi-session, + they will be asked if they want to reload the participant and start the next + session. If the participant has already completed all sessions, they will be + asked to try a different unique ID. + * If the participant completed the task and the project is `not` multi-session, + they will be told that the ID already exists and to try a different one. + + By default, this function is run automatically when an experiment is launched. + However, you can disable this by setting ``manual_demographics_collection`` to + True in the project's params file and call it manually at some later point + yourself. This function must be called before the start of the first block of + the task. + + Args: + anonymous (bool, optional): If True, this function will auto-fill all + demographics fields with their anonymous values instead of collecting + responses from the participant. Defaults to False. + unique_id (str, optional): If provided, the initial unique ID prompt will be + skipped and this ID will be tried instead. + + """ + from klibs.KLEnvironment import db + + # Define user init prompt strings + txt = { + 'exists': + ("A participant with that ID already exists!\n" + "Please try a different identifier."), + 'next_session': + ("This participant has completed {0} of {1} sessions.\n" + "Begin next session? (Yes / No)"), + 'all_done': + ("This participant has already completed all sessions of the task.\n" + "Please enter a different identifier."), + 'incomplete': + ("This participant did not complete {0} of the task.\n\n" + "Would you like to (r)estart from the beginning, or (c)ontinue from\n" + "the last completed trial?"), + 'incomplete_alt': + ("This participant did not complete {0} of the task.\n\n" + "Would you like to (r)estart the session, (c)ontinue from the\n" + "last completed trial, or (s)kip to the next session?"), + } + + # If demographics already collected, raise error + if P.demographics_collected: + e = "Demographics have already been collected for this participant." + raise RuntimeError(e) + + # Gather demographic queries, separating id query from others + queries = _get_demographics_queries(db, user_queries.demographic) + id_query = queries[P.unique_identifier] + queries.pop(P.unique_identifier) + + # Collect the unique identifier for the participant + if not unique_id: + unique_id = query(id_query, anonymous=anonymous) + p_id = db.get_db_id(unique_id) + while p_id is not None: + last_session = db.get_session_progress(p_id) + if not last_session['completed'] and not P.multi_user: + # Participant exists but didn't complete last session, so ask what to do + s = "the last session" if P.session_count > 1 else "all blocks" + if last_session['num'] == P.session_count: + prompt = txt['incomplete'].format(s) + options = ['r', 'c'] else: - value = query(q, anonymous=anonymous) - demographics.log(q.database_field, value) - - # typical use; P.collect_demographics is True and called automatically by klibs - if not P.demographics_collected: - P.participant_id = db.insert(demographics) - P.p_id = P.participant_id - P.demographics_collected = True - # Log info about current runtime environment to database - if 'session_info' in db.table_schemas.keys(): - runtime_info = EntryTemplate('session_info') - for col, value in runtime_info_init().items(): - runtime_info.log(col, value) - if P.condition and 'condition' in runtime_info.schema.keys(): - runtime_info.log('condition', P.condition) - db.insert(runtime_info) - # Save copy of experiment.py and config files as they were for participant - if not P.development_mode: - pid = P.random_seed if P.multi_user else P.participant_id # pid set at end for multiuser - P.version_dir = join(P.versions_dir, "p{0}_{1}".format(pid, now(True))) - os.mkdir(P.version_dir) - copyfile("experiment.py", join(P.version_dir, "experiment.py")) - copytree(P.config_dir, join(P.version_dir, "Config")) - else: - # The context for this is: collect_demographics is set to false but then explicitly called later - db.update(demographics.table, demographics.defined) + prompt = txt['incomplete_alt'].format(s) + options = ['r', 'c', 's'] + msg = message(prompt, align="center") + resp = _simple_prompt(msg, resp_keys=options) + if resp == "r": + # Delete all data from existing incomplete session & start again + # NOTE: Add prompt confirming deletion of old data? + P.session_number = last_session['num'] + last = {'participant_id': p_id} + if P.session_count > 1: + last[P.session_column] = P.session_number + for table in _get_user_tables(db): + db.delete(table, where=last) + last = {'participant_id': p_id, 'session_number': P.session_number} + db.delete('session_info', where=last) + elif resp == "c": + # Get last completed block/trial numbers from db and set them + P.session_number = last_session['num'] + P.block_number = last_session['last_block'] + P.trial_number = last_session['last_trial'] + 1 + P.random_seed = last_session['random_seed'] + random.seed(P.random_seed) + P.resumed_session = True + elif resp == "s": + # Increment session number and continue + P.session_number = last_session['num'] + 1 + P.condition = last_session['condition'] + break + elif P.session_count > 1: + session_num = last_session['num'] + 1 + # Already completed all sessions of the task. Create new ID? + if session_num > P.session_count: + msg = message(txt['all_done'], align="center") + _simple_prompt(msg) + # Participant has completed X of N sessions. Begin next session? + else: + msg = message( + txt['next_session'].format(last_session['num'], P.session_count), + align="center" + ) + resp = _simple_prompt(msg, resp_keys=["y", "n", "return"]) + if resp != "n": + P.condition = last_session['condition'] + P.session_number = session_num + break + else: + # Participant exists and not multisession, so try another + msg = message(txt['exists'], style="alert", align="center") + _simple_prompt(msg) + # Retry with another id + unique_id = query(id_query, anonymous=anonymous) + p_id = db.get_db_id(unique_id) + + # If not reloading an existing participant, collect demographics + if p_id is None: + # Initialize demographics info for participant + demographics = { + P.unique_identifier: unique_id, + "created": now(True), + } + if "random_seed" in db.get_columns("participants"): + # Required for compatibility with older projects + demographics["random_seed"] = P.random_seed + demographics["klibs_commit"] = P.klibs_commit + + # Collect all demographics queries + for db_col, q in queries.items(): + demographics[db_col] = query(q, anonymous=anonymous) + + # Insert demographics in database and get db id number + p_id = db.insert(demographics, "participants") + + P.participant_id = P.p_id = p_id + P.demographics_collected = True + + # Log info about current runtime environment to database + runtime_info = runtime_info_init() + if "session_count" in db.get_columns("session_info"): + runtime_info["session_count"] = P.session_count + if P.condition: + runtime_info["condition"] = P.condition + if not P.resumed_session: + db.insert(runtime_info, "session_info") + + # Save copy of experiment.py and config files as they were for participant + if not P.development_mode: + # TODO: Break this into a separate function, make it more useful + # TODO: FileExistsError if re-creating ID within same minute + pid = P.random_seed if P.multi_user else P.participant_id # pid set at end for multiuser + P.version_dir = join(P.versions_dir, "p{0}_{1}".format(pid, now(True))) + if os.path.exists(P.version_dir): + rmtree(P.version_dir) + os.mkdir(P.version_dir) + copyfile("experiment.py", join(P.version_dir, "experiment.py")) + copytree(P.config_dir, join(P.version_dir, "Config")) def init_default_textstyles(): diff --git a/klibs/KLDatabase.py b/klibs/KLDatabase.py index 27f5fc2..c617bac 100755 --- a/klibs/KLDatabase.py +++ b/klibs/KLDatabase.py @@ -315,24 +315,25 @@ def _build_table_schemas(self): table_cols = OrderedDict() self.cursor.execute("PRAGMA table_info({0})".format(table)) columns = self.cursor.fetchall() - + # convert sqlite3 types to python types for col in columns: - if col[2].lower() == SQL_STR: - col_type = PY_STR - elif col[2].lower() == SQL_BIN: - col_type = PY_BIN - elif col[2].lower() in (SQL_INT, SQL_KEY): - col_type = PY_INT - elif col[2].lower() in (SQL_FLOAT, SQL_REAL, SQL_NUMERIC): - col_type = PY_FLOAT - elif col[2].lower() == SQL_BOOL: - col_type = PY_BOOL + colname, coltype, not_null, default= col[1:5] + if coltype.lower() == SQL_STR: + py_type = PY_STR + elif coltype.lower() == SQL_BIN: + py_type = PY_BIN + elif coltype.lower() in (SQL_INT, SQL_KEY): + py_type = PY_INT + elif coltype.lower() in (SQL_FLOAT, SQL_REAL, SQL_NUMERIC): + py_type = PY_FLOAT + elif coltype.lower() == SQL_BOOL: + py_type = PY_BOOL else: err_str = "Invalid or unsupported type ({0}) for {1}.{2}'" - raise ValueError(err_str.format(col[2], table, col[1])) - allow_null = col[3] == 0 - table_cols[col[1]] = {'type': col_type, 'allow_null': allow_null} + raise ValueError(err_str.format(coltype, table, colname)) + allow_null = (not_null == 0 or default is not None) + table_cols[colname] = {'type': py_type, 'allow_null': allow_null} tables[table] = table_cols return tables @@ -586,7 +587,7 @@ def __init__(self, path, local_path=None): self._local_path = local_path # Initialize connections to database(s) self._primary = Database(path) - self._validate_structure(self._primary) + self._validate_structure(self._primary, P.session_count > 1) self._local = None if self.multi_user: shutil.copy(path, local_path) @@ -603,7 +604,7 @@ def _current(self): # mode and the normal database otherwise return self._local if self.multi_user else self._primary - def _validate_structure(self, db): + def _validate_structure(self, db, multisession=False): # Ensure basic required tables exist e = "Required table '{0}' is not present in the database." required = ['participants', P.primary_table] @@ -623,20 +624,32 @@ def _validate_structure(self, db): for table in _get_user_tables(db): if not 'participant_id' in db.get_columns(table): raise RuntimeError(e.format(table)) + # Ensure that the required columns are present for multisession + e = "Missing required column for multi-session project '{0}' in table '{1}'." + if multisession: + user_tables = _get_user_tables(db) + for table in user_tables: + if not P.session_column in db.get_columns(table): + raise RuntimeError(e.format(P.session_column, table)) + def _is_complete(self, pid): - # TODO: For multisession projects, need to know the number of sessions - # per experiment for this to work correctly: currently, this only checks - # whether all sessions so far were completed, even if there are more - # sessions remaining. - if 'session_info' in self._primary.table_schemas: - q = "SELECT complete FROM session_info WHERE participant_id = ?" - sessions = self._primary.query(q, q_vars=[pid]) + this_id = {'participant_id': pid} + db = self._primary # Always use primary db for data export + if 'session_info' in db.tables: + # Ensure participant has completed all sessions of task + if "session_count" in db.get_columns("session_info"): + last_session, num_sessions = db.select( + 'session_info', ['session_number', 'session_count'], where=this_id + )[-1] + if last_session < num_sessions: + return False + # Ensure all sessions were successfully completed + sessions = db.select('session_info', ['complete'], where=this_id) complete = [bool(s[0]) for s in sessions] return all(complete) else: - q = "SELECT id FROM trials WHERE participant_id = ?" - trialcount = len(self._primary.query(q, q_vars=[pid])) + trialcount = len(db.select('trials', ['id'], where=this_id)) return trialcount >= P.trials_per_block * P.blocks_per_experiment def _log_export(self, pid, table): @@ -655,13 +668,67 @@ def _already_exported(self, pid, table): matches = self._primary.select('export_history', where=this_id) return len(matches) > 0 + def get_db_id(self, unique_id): + """Gets the numeric database ID for a given unique identifier. - def get_unique_ids(self): - """Retrieves all existing unique id values from the main database. + If no matching unique ID exists in the 'participants' table, this will + return None. + + Args: + unique_id (str): The participant identifier (e.g. 'P03') for which + to retrieve the corresponding database ID. + Returns: + int or None: The numeric database ID corresponding to the given + unique identifier, or None if no match found. """ - id_rows = self._primary.select('participants', columns=[P.unique_identifier]) - return [row[0] for row in id_rows] + id_filter = {P.unique_identifier: unique_id} + ret = self._primary.select('participants', columns=['id'], where=id_filter) + if not ret: + return None + return ret[0][0] + + + def get_session_progress(self, pid): + """Gets information about the last session for a given database ID. + + This retrieves the task condition, session number, and random seed, + as well the participants' progress through the task (last block/trial + number) and whether they fully completed the last session. + + This is used internally for reloading multisession projects. + + Args: + pid (int): The database ID for the participant. + + Returns: + dict: A dictonary containing information about the participant's + last session. + + """ + db = self._primary + # Gathers previous session info for a given database ID + cols = ['condition', 'session_number', 'complete', 'random_seed'] + info = db.select('session_info', columns=cols, where={'participant_id': pid}) + cond, last_session_num, completed, random_seed = info[-1] + # Gather info about the participant's progress on the last session + where = {'participant_id': pid} + if P.session_column in db.get_columns(P.primary_table): + where[P.session_column] = last_session_num + last_trial, last_block = (0, 0) + progress = db.select( + P.primary_table, [P.trial_column, P.block_column], where=where + ) + if len(progress): + last_trial, last_block = progress[-1] + return { + 'condition': cond, + 'num': last_session_num, + 'completed': completed, + 'random_seed': random_seed, + 'last_block': last_block, + 'last_trial': last_trial, + } def write_local_to_master(self): @@ -705,11 +772,9 @@ def close(self): def collect_export_data(self, base_table, multi_file=True, join_tables=[]): - uid = P.unique_identifier - participant_ids = self._primary.query("SELECT `id`, `{0}` FROM `participants`".format(uid)) - - colnames = [] + cols = {'p': []} sub = {P.unique_identifier: 'participant'} + multisession = P.session_column in self._primary.get_columns(base_table) # if P.default_participant_fields(_sf) is defined use that, but otherwise use # P.exclude_data_cols since that's the better way of doing things @@ -718,38 +783,54 @@ def collect_export_data(self, base_table, multi_file=True, join_tables=[]): for field in fields: if iterable(field): sub[field[0]] = field[1] - colnames.append(field[0]) + cols['p'].append(field[0]) else: - colnames.append(field) + cols['p'].append(field) else: for colname in self._primary.get_columns('participants'): if colname not in ['id'] + P.exclude_data_cols: - colnames.append(colname) + cols['p'].append(colname) for colname in P.append_info_cols: + if not 'info' in cols.keys(): + cols['info'] = [] if colname not in self._primary.get_columns('session_info'): err = "Column '{0}' does not exist in the session_info table." raise RuntimeError(err.format(colname)) - colnames.append(colname) + cols['info'].append(colname) for t in [base_table] + join_tables: + cols[t] = [] for colname in self._primary.get_columns(t): if colname not in ['id', P.id_field_name] + P.exclude_data_cols: - colnames.append(colname) + cols[t].append(colname) + + select_names = [] + colnames = [] + for t in ['p', 'info', base_table] + join_tables: + if not t in cols.keys(): + continue + for col in cols[t]: + select_names.append("{0}.`{1}`".format(t, col)) + colnames.append(col) + column_names = TAB.join(colnames) for colname in sub.keys(): column_names = column_names.replace(colname, sub[colname]) - + + uid = P.unique_identifier + participant_ids = self._primary.query("SELECT `id` FROM participants") data = [] for p in participant_ids: - selected_cols = ",".join(["`"+col+"`" for col in colnames]) - q = "SELECT " + selected_cols + " FROM participants " - if len(P.append_info_cols) and 'session_info' in self._primary.table_schemas: - info_cols = ",".join(['participant_id'] + P.append_info_cols) - q += "JOIN (SELECT " + info_cols + " FROM session_info) AS info " - q += "ON participants.id = info.participant_id " + q = "SELECT {0} ".format(", ".join(select_names)) + q += "FROM participants AS p " + if 'info' in cols.keys(): + q += "JOIN session_info AS info ON p.id = info.participant_id " for t in [base_table] + join_tables: - q += "JOIN {0} ON participants.id = {0}.participant_id ".format(t) - q += " WHERE participants.id = ?" - p_data = [] + q += "JOIN {0} ON p.id = {0}.participant_id ".format(t) + if multisession: + session_col = "{0}.{1}".format(t, P.session_column) + q += "AND info.session_number = {0} ".format(session_col) + q += "WHERE p.id = ? " + p_data = [] for trial in self._primary.query(q, q_vars=tuple([p[0]])): row_str = TAB.join(utf8(col) for col in trial) p_data.append(row_str) diff --git a/klibs/KLExperiment.py b/klibs/KLExperiment.py index a14d0af..ddc7ae9 100755 --- a/klibs/KLExperiment.py +++ b/klibs/KLExperiment.py @@ -35,8 +35,6 @@ def __init__(self): self._evm = EventManager() self.trial_factory = TrialFactory() - if P.manual_trial_generation is False: - self.trial_factory.generate() self.event_code_generator = None @@ -45,18 +43,44 @@ def __execute_experiment__(self, *args, **kwargs): """ from klibs.KLGraphics import clear + from klibs.KLTrialFactory import TrialIterator + + if not P.demographics_collected: + e = "Demographics must be collected before the first block of the task." + raise RuntimeError(e) if self.blocks == None: + # If structure provided, just ignore trial factory and use structure to generate? self.blocks = self.trial_factory.export_trials() - P.block_number = 0 + # Check whether we're resuming from an incomplete session and fast-forward if we are + resume_session = False + if P.block_number > 0 or P.trial_number > 0: + # Drop completed blocks + trimmed = [block for block in self.blocks][(P.block_number - 1): ] + # Drop completed trials + if P.trial_number < len(trimmed[0].trials): + practice_first = trimmed[0].practice + trimmed[0] = TrialIterator(trimmed[0][(P.trial_number - 1): ]) + trimmed[0].practice = practice_first # re-set practice flag if needed + else: + # If at end of current block, jump to next block + trimmed = trimmed[1:] + P.block_number += 1 + P.trial_number = 1 + # Prepare for resuming session + self.blocks = trimmed + P.block_number -= 1 + resume_session = True + P.trial_id = 0 for block in self.blocks: P.recycle_count = 0 P.block_number += 1 P.practicing = block.practice self.block() - P.trial_number = 1 + P.trial_number = P.trial_number if resume_session else 1 + resume_session = False for trial in block: # ie. list of trials try: P.trial_id += 1 # Increments regardless of recycling @@ -317,6 +341,9 @@ def run(self, *args, **kwargs): if not P.manual_eyelink_setup: self.el.setup() + if P.manual_trial_generation is False: + self.trial_factory.generate() + self.setup() try: self.__execute_experiment__(*args, **kwargs) diff --git a/klibs/KLParams.py b/klibs/KLParams.py index 836a5a7..23347ae 100755 --- a/klibs/KLParams.py +++ b/klibs/KLParams.py @@ -29,6 +29,7 @@ block_number = 0 session_number = 1 recycle_count = 0 # reset on a per-block basis +resumed_session = False # Runtime Attributes project_name = None @@ -47,13 +48,12 @@ collect_demographics = True manual_demographics_collection = False manual_trial_generation = False -multi_session_project = False multi_user = False # creates temp copy of db that gets merged into master at end trials_per_block = 0 blocks_per_experiment = 0 +session_count = 1 conditions = [] default_condition = None -table_defaults = {} # default column values for db tables when using EntryTemplate run_practice_blocks = True # (not implemented in klibs itself) color_output = False # whether cso() outputs colorized text or not @@ -109,6 +109,9 @@ id_field_name = "participant_id" primary_table = "trials" unique_identifier = "userhash" +session_column = "session_num" +block_column = "block_num" +trial_column = "trial_num" default_participant_fields = [] # for legacy use default_participant_fields_sf = [] # for legacy use exclude_data_cols = ["created"] diff --git a/klibs/KLRuntimeInfo.py b/klibs/KLRuntimeInfo.py index 1af6b0b..f3567ea 100644 --- a/klibs/KLRuntimeInfo.py +++ b/klibs/KLRuntimeInfo.py @@ -22,10 +22,11 @@ date text not null, time text not null, klibs_commit text not null, - random_seed text not null, + random_seed integer not null, trials_per_block integer not null, blocks_per_session integer not null, + session_count integer not null, os_version text not null, python_version text not null, diff --git a/klibs/KLTrialFactory.py b/klibs/KLTrialFactory.py index 96bc372..86cec4e 100755 --- a/klibs/KLTrialFactory.py +++ b/klibs/KLTrialFactory.py @@ -112,6 +112,12 @@ def __init__(self, block_of_trials): self.i = 0 self.__practice = False + def __getitem__(self, i): + return self.trials[i] + + def __setitem__(self, i, x): + self.trials[i] = x + def __next__(self): if self.i >= self.length: self.i = 0 diff --git a/klibs/cli.py b/klibs/cli.py index 22ea949..a7c034e 100644 --- a/klibs/cli.py +++ b/klibs/cli.py @@ -321,6 +321,10 @@ def run(screen_size, path, condition, devmode, no_tracker, seed): cond_list = "', '".join(P.conditions) err("'{0}' is not a valid condition for this experiment (must be one of '{1}'). " "Please relaunch the experiment.".format(P.condition, cond_list)) + + # Error if trying to use multi-user and multi-session at the same time + if P.multi_user and P.session_count > 1: + err("Multi-user mode is not currently supported for multi-session projects.") # set some basic global Params if devmode: diff --git a/klibs/resources/template/params.py b/klibs/resources/template/params.py index 553361f..498bd2d 100755 --- a/klibs/resources/template/params.py +++ b/klibs/resources/template/params.py @@ -39,9 +39,9 @@ ######################################### # Experiment Structure ######################################### -multi_session_project = False trials_per_block = 0 blocks_per_experiment = 1 +session_count = 1 conditions = [] default_condition = None diff --git a/klibs/tests/conftest.py b/klibs/tests/conftest.py index adcda06..99cc5d4 100644 --- a/klibs/tests/conftest.py +++ b/klibs/tests/conftest.py @@ -24,8 +24,7 @@ def _init_params_pytest(): def with_sdl(): sdl2.SDL_ClearError() ret = sdl2.SDL_Init(sdl2.SDL_INIT_VIDEO | sdl2.SDL_INIT_TIMER) - assert sdl2.SDL_GetError() == b"" - assert ret == 0 + assert ret == 0, sdl2.SDL_GetError().decode('utf-8', 'replace') yield sdl2.SDL_Quit() diff --git a/klibs/tests/test_KLDatabase.py b/klibs/tests/test_KLDatabase.py index 4b2756a..85fb232 100644 --- a/klibs/tests/test_KLDatabase.py +++ b/klibs/tests/test_KLDatabase.py @@ -111,6 +111,7 @@ def test_insert(self, db): # Test handling of 'allow null' columns _init_params_pytest() data = runtime_info_init() + data['session_count'] = 1 db.insert(data, table='session_info') assert db.last_row_id('session_info') == 1 # Test exception when unable to coerce value to column type @@ -220,18 +221,15 @@ def test_init_multi_user(self, db_test_path): assert dat.table_schemas['participants']['age']['type'] == klibs.PY_INT dat.close() - def test_get_unique_ids(self, db_test_path): + def test_get_db_id(self, db_test_path): dat = kldb.DatabaseManager(db_test_path) # Add test data id_data = build_test_data() for row in id_data: dat.insert(row, table='participants') for pid in (1, 2, 3): - dat.insert(generate_data_row(pid), table='trials') - dat.insert(generate_data_row(pid, trial=2), table='trials') - assert "P0{0}".format(pid) in dat.get_unique_ids() - assert len(dat.get_unique_ids()) == 3 - assert not "P04" in dat.get_unique_ids() + assert dat.get_db_id("P0{0}".format(pid)) == pid + assert not dat.get_db_id("P04") dat.close() def test_remove_data(self, db_test_path): diff --git a/klibs/tests/test_KLExperiment.py b/klibs/tests/test_KLExperiment.py index c29eaae..e46798b 100755 --- a/klibs/tests/test_KLExperiment.py +++ b/klibs/tests/test_KLExperiment.py @@ -14,6 +14,7 @@ def experiment(): template_path = resource_filename('klibs', 'resources/template') P.ind_vars_file_path = os.path.join(template_path, "independent_variables.py") P.ind_vars_file_local_path = os.path.join(template_path, "doesnt_exist.py") + P.demographics_collected = True P.manual_trial_generation = True P.project_name = "PROJECT_NAME" return Experiment()