add left/right trig codes for fin out. adjust tests and testdata

parent 0c36dfb8
......@@ -11,6 +11,8 @@ import itertools
import numpy as np
import pandas as pd
from sp_experiment.define_ttl_triggers import provide_trigger_dict
def get_payoff_settings(ev_diff):
"""Provide a set of possible payoff distributions.
......@@ -205,9 +207,15 @@ def provide_balancing_selection(df, payoff_settings):
"""
# Get sampling actions and corresponding outcomes so far
actions = df[df['value'].isin([5, 6])]['action'].to_numpy(copy=True,
dtype=int)
outcomes = df[df['value'] == 9]['outcome'].to_numpy(copy=True, dtype=int)
trig_dict = provide_trigger_dict()
trig_sample = [ord(trig_dict['trig_left_choice']),
ord(trig_dict['trig_right_choice'])]
trig_out = [ord(trig_dict['trig_show_out_l']),
ord(trig_dict['trig_show_out_r'])]
actions = df[df['value'].isin(trig_sample)]['action'].to_numpy(copy=True,
dtype=int)
outcomes = df[df['value'].isin(trig_out)]['outcome'].to_numpy(copy=True,
dtype=int)
# combine actions and outcomes to code outcomes on the left with negative
# sign outcomes on the right with positive sign ... will end up with stim
......
......@@ -31,35 +31,39 @@ def provide_trigger_dict():
trigger_dict['trig_final_choice'] = bytes([7])
# When displaying outcomes during sampling
trigger_dict['trig_mask_outcome'] = bytes([8])
trigger_dict['trig_show_outcome'] = bytes([9])
trigger_dict['trig_mask_out_l'] = bytes([8])
trigger_dict['trig_show_out_l'] = bytes([9])
trigger_dict['trig_mask_out_r'] = bytes([10])
trigger_dict['trig_show_out_r'] = bytes([11])
# Indication when a final choice is started
trigger_dict['trig_new_final_choice'] = bytes([10])
trigger_dict['trig_new_final_choice'] = bytes([12])
# Whenever a final choice is started (fixation stim)
trigger_dict['trig_final_choice_onset'] = bytes([11])
trigger_dict['trig_final_choice_onset'] = bytes([13])
# Inquiring actions during CHOICE
trigger_dict['trig_left_final_choice'] = bytes([12])
trigger_dict['trig_right_final_choice'] = bytes([13])
trigger_dict['trig_left_final_choice'] = bytes([14])
trigger_dict['trig_right_final_choice'] = bytes([15])
# Displaying outcomes during CHOICE
trigger_dict['trig_mask_final_outcome'] = bytes([14])
trigger_dict['trig_show_final_outcome'] = bytes([15])
trigger_dict['trig_mask_final_out_l'] = bytes([16])
trigger_dict['trig_show_final_out_l'] = bytes([17])
trigger_dict['trig_mask_final_out_r'] = bytes([18])
trigger_dict['trig_show_final_out_r'] = bytes([19])
# trigger for ERROR, when a trial has to be reset
# (ignore all markers prior to this marker within this trial)
trigger_dict['trig_error'] = bytes([16])
trigger_dict['trig_error'] = bytes([20])
# If the subject sampled a maximum of steps and now wants to take yet
# another one, we force stop and initiate a final choice
trigger_dict['trig_forced_stop'] = bytes([17])
trigger_dict['trig_forced_stop'] = bytes([21])
# If subject tried to make a final choice before taking at least one sample
trigger_dict['trig_premature_stop'] = bytes([18])
trigger_dict['trig_premature_stop'] = bytes([22])
# Display the block feedback
trigger_dict['trig_block_feedback'] = bytes([19])
trigger_dict['trig_block_feedback'] = bytes([23])
return trigger_dict
......@@ -89,10 +89,10 @@ def make_events_json_dict():
'during sampling'),
trigger_dict['trig_final_choice']: ('subject chose *stop* during '
'sampling'),
trigger_dict['trig_mask_outcome']: ('a masked outcome is shown '
'after sampling'),
trigger_dict['trig_show_outcome']: ('a masked outcome is revealed '
'after sampling'),
trigger_dict['trig_mask_out_l']: ('a masked outcome is shown '
'after sampling (left side)'),
trigger_dict['trig_show_out_r']: ('a masked outcome is revealed '
'after sampling (right side)'),
trigger_dict['trig_new_final_choice']: ('color of fixcross is '
'changed to indicate '
'start of a final choice'),
......@@ -105,12 +105,12 @@ def make_events_json_dict():
'for final choice'),
trigger_dict['trig_right_final_choice']: ('subject chose *right* '
'for final choice'),
trigger_dict['trig_mask_final_outcome']: ('a masked outcome is '
'shown after final '
'choice'),
trigger_dict['trig_show_final_outcome']: ('a masked outcome is '
'revealed after final '
'choice'),
trigger_dict['trig_mask_final_out_l']: ('a masked outcome is '
'shown after final '
'choice (left side)'),
trigger_dict['trig_show_final_out_r']: ('a masked outcome is '
'revealed after final '
'choice (right side)'),
trigger_dict['trig_error']: ('color of fixcross is changed to '
'indicate an error (ignore all '
'markers prior to this marker '
......
This diff is collapsed.
......@@ -66,7 +66,7 @@ def test_get_final_choice_outcomes():
"""Test getting final choice outcomes."""
df = pd.read_csv(no_errors_file, sep='\t')
outcomes = get_final_choice_outcomes(df)
expected_outcomes = [8, 6] # as can be read in the data file
expected_outcomes = [5, 9] # as can be read in the data file
np.testing.assert_array_equal(outcomes, expected_outcomes)
......@@ -81,12 +81,12 @@ def test_get_payoff_dict():
# Make a more thorough test with the second payoff distribution
payoff_dict = get_payoff_dict(df, 1)
read_set = set(payoff_dict[0])
expected_set = set((1, 6))
expected_set = set((3, 9))
assert len(read_set) == len(expected_set)
assert sorted(read_set) == sorted(expected_set)
read_set = set(payoff_dict[1])
expected_set = set((9, 5))
expected_set = set((7, 8))
assert len(read_set) == len(expected_set)
assert sorted(read_set) == sorted(expected_set)
......@@ -107,8 +107,8 @@ def test_get_passive_action():
assert isinstance(keys_rts[0], tuple)
# did we read the correct numbers
assert keys_rts[0][0] == KEYLIST_SAMPLES[1]
np.testing.assert_allclose(keys_rts[0][1], 0.328, rtol=0.01)
assert keys_rts[0][0] == KEYLIST_SAMPLES[0]
np.testing.assert_allclose(keys_rts[0][1], 0.227, rtol=0.01)
def test_get_passive_outcome():
......@@ -121,7 +121,7 @@ def test_get_passive_outcome():
assert outcome == outcomes[0]
# Other samples give us reasonable results
expected_outcomes = [2, 8, 8, 8, 3, 3, 3, 3, 3, 3, 3, 3]
expected_outcomes = [3, 3, 3, 5, 5, 5, 4, 5, 3, 3, 3, 3]
for sample, expected in zip(range(12), expected_outcomes):
out = get_passive_outcome(df, 0, sample)
assert out == expected
......@@ -142,7 +142,7 @@ def test_log_data():
# Check that action_types are as expected
action_types = df['action_type'].dropna().unique().tolist()
np.testing.assert_array_equal(action_types,
['sample', 'stop', 'final_choice'])
['sample', 'forced_stop', 'final_choice'])
# Create a temporary logging file
myhash = str(hash(os.times()))
......
......@@ -14,6 +14,7 @@ import pandas as pd
import sp_experiment
from sp_experiment.define_payoff_settings import get_random_payoff_dict
from sp_experiment.define_ttl_triggers import provide_trigger_dict
# CONSTANTS
......@@ -62,7 +63,11 @@ def calc_bonus_payoff(sub_id, conversion_factor=0.01):
return bonus
else:
df = pd.read_csv(fpath, sep='\t')
points += np.sum(df[df['value'] == 15]['outcome'].to_numpy())
trig_dict = provide_trigger_dict()
trig_fin_out = [ord(trig_dict['trig_show_final_out_l']),
ord(trig_dict['trig_show_final_out_r'])]
vals = df[df['value'].isin(trig_fin_out)]['outcome'].to_numpy()
points += np.sum(vals)
money = int(np.ceil(points * conversion_factor))
bonus = f'earned {money} Euros as bonus.'
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment