Commit cbdfd145 authored by Stefan Appelhoff's avatar Stefan Appelhoff

turn dict to collections.OrderedDict to preserve order in py<3.6

parent 96d2822f
"""Initialize sp_experiment."""
__version__ = '0.2.2-dev'
......@@ -7,6 +7,7 @@ define_variable_meanings.make_events_json_dict: action_type, action, outcome
"""
import itertools
from collections import OrderedDict
import numpy as np
import pandas as pd
......@@ -151,7 +152,7 @@ def get_random_payoff_dict(payoff_settings, pseudorand=False, df=None):
Returns
-------
payoff_dict : dict
payoff_dict : collections.OrderedDict
Dict with keys [0, 1] and each key containing as values a list of
possible outcomes, the frequency of which corresponds to a probability.
For example payoff_dict[0] = [0, 0, ,0 ,0, 0, 0, 0, 1, 1, 1] for a
......@@ -171,7 +172,7 @@ def get_random_payoff_dict(payoff_settings, pseudorand=False, df=None):
# Form a payoff dictionary from the selected setting
payoff_setting = payoff_settings[selected_row_idx, :]
payoff_dict = dict()
payoff_dict = OrderedDict()
payoff_dict[0] = [int(payoff_setting[0])] * int(payoff_setting[2]*10)
payoff_dict[0] += [int(payoff_setting[1])] * int(payoff_setting[3]*10)
payoff_dict[1] = [int(payoff_setting[4])] * int(payoff_setting[6]*10)
......
......@@ -6,11 +6,12 @@ For more information, see also the "event_value" key within the
define_variable_meanings.make_events_json_dict.
"""
from collections import OrderedDict
def provide_trigger_dict():
"""Provide a dictionnary mapping str names to byte values."""
trigger_dict = dict()
trigger_dict = OrderedDict()
# At the beginning and end of the experiment ... take these triggers to
# crop the meaningful EEG data. Make sure to include some time BEFORE and
......
......@@ -3,6 +3,7 @@ import os
import os.path as op
import json
from shutil import copyfile
from collections import OrderedDict
import sp_experiment
from sp_experiment.define_ttl_triggers import provide_trigger_dict
......@@ -10,7 +11,7 @@ from sp_experiment.define_ttl_triggers import provide_trigger_dict
def make_description_task_json():
"""Provide variable meanings for description task."""
events_json_dict = dict()
events_json_dict = OrderedDict()
# Start populating the dict
events_json_dict['onset'] = {
......@@ -38,7 +39,7 @@ def make_events_json_dict():
"""Provide a dict to describe all collected variables."""
# Get the trigger values
trigger_dict = provide_trigger_dict()
events_json_dict = dict()
events_json_dict = OrderedDict()
# Start populating the dict
events_json_dict['onset'] = {
......@@ -217,8 +218,9 @@ def make_events_json_dict():
}
# Keys in levels for "value" are bytes: we need to turn them into integers
events_json_dict['value']['Levels'] = {ord(key): val for key, val in
events_json_dict['value']['Levels'].items()} # noqa: E501
events_json_dict['value']['Levels'] = OrderedDict((ord(key), val)
for key, val
in events_json_dict['value']['Levels'].items()) # noqa: E501
# return
return events_json_dict
......
......@@ -18,6 +18,7 @@ To do
"""
import os
import os.path as op
from collections import OrderedDict
import numpy as np
import pandas as pd
......@@ -89,7 +90,7 @@ def navigation(nav='initial', bonus='', lang='en', yoke_map=None,
"""
if yoke_map is None:
yoke_map = {i: i for i in range(100)}
yoke_map = OrderedDict((i, i) for i in range(100))
run = False
auto = False
next = ''
......@@ -824,7 +825,7 @@ if __name__ == '__main__':
test_block_size = 1
# First 10 subjs are mapped to themselves
yoke_map = dict(zip(list(range(1, 11)), list(range(1, 11))))
yoke_map = OrderedDict(zip(list(range(1, 11)), list(range(1, 11))))
# Next 10 are mapped to first ten
for i, j in zip(list(range(11, 21)), list(range(1, 11))):
yoke_map[i] = j
......
"""Testing the setup of the payoff distributions."""
import os.path as op
from collections import OrderedDict
import numpy as np
import pandas as pd
......@@ -32,7 +33,7 @@ def test_get_random_payoff_dict():
payoff_dict, payoff_settings = get_random_payoff_dict(payoff_settings)
# Should be a dict
assert isinstance(payoff_dict, dict)
assert isinstance(payoff_dict, OrderedDict)
assert len(list(payoff_dict.values())[0]) == 10
assert len(list(payoff_dict.values())[1]) == 10
......
......@@ -3,7 +3,7 @@ import os
import os.path as op
from tempfile import gettempdir
from shutil import rmtree, copyfile
from collections import OrderedDict
import pytest
import numpy as np
......@@ -76,7 +76,7 @@ def test_get_payoff_dict():
# The trial argument is 0-indexed
payoff_dict = get_payoff_dict(df, 0)
assert isinstance(payoff_dict, dict)
assert isinstance(payoff_dict, OrderedDict)
# Make a more thorough test with the second payoff distribution
payoff_dict = get_payoff_dict(df, 1)
......
......@@ -8,6 +8,7 @@ other utilities: psychopy_utils.py
"""
import os.path as op
from collections import OrderedDict
import numpy as np
import pandas as pd
......@@ -140,7 +141,7 @@ def get_payoff_dict(df, trial):
Returns
-------
payoff_dict : dict
payoff_dict : collections.OrderedDict
Dictionary containing the reward distribution setting of the current
trial. NOTE: returns the "final" payoff_dict that was used for
drawing the reward
......@@ -208,7 +209,7 @@ def get_passive_action(df, trial, sample):
(df['action_type'].isin(admissible_actions))]
key = int(df['action'].tolist()[int(sample)])
rt = float(df['response_time'].tolist()[int(sample)])
key = dict(enumerate(KEYLIST_SAMPLES))[key]
key = OrderedDict(enumerate(KEYLIST_SAMPLES))[key]
keys_rts = [(key, rt)]
return keys_rts
......@@ -294,7 +295,7 @@ def log_data(fpath, onset='n/a', duration=0, trial='n/a', action='n/a',
value : byte | 'n/a'
the TTL trigger value (=EEG marker value) associated with an event
payoff_dict : dict | 'n/a'
payoff_dict : collections.OrderedDict | 'n/a'
Dictionary containing the reward distribution setting of the current
trial.
......@@ -314,7 +315,7 @@ def log_data(fpath, onset='n/a', duration=0, trial='n/a', action='n/a',
"""
# Infer action type
action_type_dict = dict()
action_type_dict = OrderedDict()
action_type_dict[0] = 'sample'
action_type_dict[1] = 'sample'
action_type_dict[2] = 'stop'
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment