Commit 0fde51d8 authored by Simon Ciranka's avatar Simon Ciranka

i implemented MLE and Wouters version of the Bayesian Updating

parent 75a12cbc
This diff is collapsed.
......@@ -312,6 +312,13 @@ learningDiscounting<-ggplot(,aes(x=1,y=unique(data$learningRateDisc)))+geom_viol
xlab("Data Density")+geom_hline(yintercept = 1) +
annotate("text", x=0.65, y=1, vjust = -1, label = "Ideal Observer")+
my_theme
learningHGF<-ggplot(,aes(x=1,y=unique(data$learningRateHGF)))+geom_violin(color="blue",fill="red",alpha=0.1)+geom_jitter(width = 0.2)+geom_boxplot(width=0.1, alpha=0.5)+
ggtitle("LearningRate Distribution Marbles")+
ylab("Learning Rate Estimate: Sequential")+
xlab("Data Density")+geom_hline(yintercept = 1) +
#annotate("text", x=0.65, y=1, vjust = -1, label = "Ideal Observer")+
my_theme
```
### Marble Estimate Distribution
This plot shows how the learning rates are distributed in all subjects. We can see that most of the subjects seem to overweight new pieces of information
......
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
#debug script
library(here)
setwd(here())
linearWeights<- function(v){
lr<-v[1]
for (i in 1:nrow(subjectLevel)){
#reset shape paramters
aBeta<-0 # number of sucesses
bBeta<-0 # number of losses
#preallocate internal variables
mu<-rep(0,nrow(subjectLevel))
sigma<-rep(0,nrow(subjectLevel))
subEstimate<-rep(0,nrow(subjectLevel))
#here i
red<-strsplit(subjectLevel$sequence.marbles.color2[i],"")
red<-as.numeric(unlist(red))#prepre the array
blue<-strsplit(subjectLevel$sequence.marbles.color1[i], "")
blue<-as.numeric(unlist(blue))
######## THIS IS WHERE THE WEIGHTING HAPPENS
# each draw of the distributions is weighted with a learningrate
for (j in 1:length(red)){
bBeta=bBeta+red[j]*lr#update failures
}# could do it in one step but its easiear to read this way.
for (j in 1:length(blue)){
aBeta=aBeta+blue[j]*lr#update sucesses
}
#calculate the sufficient statistics of the beta distribution.
mu<-aBeta/(aBeta+bBeta)# calculate only the last mu.
sigma<-(aBeta*bBeta)/((aBeta+bBeta)^2*(aBeta+bBeta+1))#calculate the sigma.
subEstimate=subjectLevel$p.response[i]/100
# correct extreme cases.
ifelse(subEstimate==0,(subEstimate<-0.01),(ifelse(subEstimate==1,(subEstimate<-0.99),(subEstimate=subEstimate))))# 0 and one cant be handeled by the
ifelse(mu==0,(mu<-0.01),(ifelse(mu==1,(mu<-0.99),(mu=mu))))
# get pr and m. I need to dig into what this means exactly.
# i use the estiamted mean and sigma to be able to get descrete log likelihoods.
pr = discretenormalMarble005(posResp,mu, sigma)[[1]];
m = discretenormalMarble005(posResp,mu, sigma)[[2]];
#check where the possible resposnes equal the exact response of the subject. now we have the response liklihood.
probEstimate = pr[which(posResp==subEstimate)]; # SEE HERE YOU FIND THE PROB EST THAT FITS THE GIVEN SUBJECTIVE VALUE
print(subEstimate)
#zero doesnt work.
if (probEstimate == 0 ){
probEstimate <- 0.01;
}
subjectLevel$loglik[i]<- log(probEstimate); #build a likelihood vector
}
#add log likelihood
#Calculate loglikelihood of correct predictions
# list of log likelihoods
# g squared (fancy name for changing the sign of the LL)
G2 <- sum(subjectLevel$loglik, na.rm = TRUE)
G2 <- -2*G2
G2
}
#of the posterior beta distribution that results from binomal updating.
discretenormalMarble005<-function (possibleResp, mean, stdev){
lower = min(possibleResp);
upper = max(possibleResp);
top = seq((lower+0.005),(upper+0.005),0.01);
bottom = seq((lower-0.005),(upper-0.005),0.01);
unp = pnorm(top,mean,stdev) - pnorm(bottom,mean,stdev);
unp = 0.9998*unp+0.0001;
p = unp/(sum(unp));
m = p*possibleResp;
return(list(p,m))
}
############ fit
load("RobertsMarbleDf.RData")
data$sub.id<-as.numeric(data$sub.id)
Subs<-unique(data$sub.id)
data$sequence.marbles.color1<-as.character(data$sequence.marbles.color1) #blue
data$sequence.marbles.color2<-as.character(data$sequence.marbles.color2) #red
sub.list<-list()
## Model Fitting
for (i in 1:length(Subs)){
subjectLevel<-data[data$sub.id==Subs[i],]
output<-optim(c(1), fn = linearWeights, method = c("Brent"),upper = 10,lower = 0)
print(output)## this is the search function we use simplex method or Nelder-Mead
pars<-output$par
LL<-output$value
sub.list[[i]]<-c(pars, LL) ## saves best fitting pars and LL
}
This diff is collapsed.
This diff is collapsed.
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment