Commit 9555081a authored by Simon Ciranka's avatar Simon Ciranka

splot trialwise beta predictive distirbutions

parent 2ae4e77c
......@@ -40,7 +40,7 @@ posResp = round(posResp,2);
#sigma=0.1
#here i define a function that gives us the probability densitiy of all possible values given the mean and the standart deviation
#of the posterior beta distribution that results from binomal updating.
discretenormalMarble005<-function (possibleResp, a, b){
discreteBetaMarble005<-function (possibleResp, a, b){
lower = min(possibleResp);
upper = max(possibleResp);
......@@ -107,8 +107,8 @@ linearWeights<- function(v){
# get pr and m. I need to dig into what this means exactly.
# i use the estiamted mean and sigma to be able to get descrete log likelihoods.
pr = discretenormalMarble005(posResp,aBeta, bBeta)[[1]];
m = discretenormalMarble005(posResp,aBeta, bBeta)[[2]];
pr = discreteBetaMarble005(posResp,aBeta, bBeta)[[1]];
m = discreteBetaMarble005(posResp,aBeta, bBeta)[[2]];
#check where the possible resposnes equal the exact response of the subject. now we have the response liklihood.
probEstimate = pr[which(posResp==subEstimate)]; # SEE HERE YOU FIND THE PROB EST THAT FITS THE GIVEN SUBJECTIVE VALUE
#zero doesnt work.
......@@ -173,8 +173,8 @@ exponentialWeights<- function(v){
# get pr and m. I need to dig into what this means exactly.
# i use the estiamted mean and sigma to be able to get descrete log likelihoods.
pr = discretenormalMarble005(posResp,aBeta, bBeta)[[1]];
m = discretenormalMarble005(posResp,aBeta, bBeta)[[2]];
pr = discreteBetaMarble005(posResp,aBeta, bBeta)[[1]];
m = discreteBetaMarble005(posResp,aBeta, bBeta)[[2]];
#check where the possible resposnes equal the exact response of the subject. now we have the response liklihood.
probEstimate = pr[which(posResp==subEstimate)]; # SEE HERE YOU FIND THE PROB EST THAT FITS THE GIVEN SUBJECTIVE VALUE
#zero doesnt work.
......@@ -326,4 +326,95 @@ What i get now if i look at the parameter estimate of the exponential model is a
print(learningDiscounting)
```
# Retrieve Values
In this chunk of code i use the function that i made before to fit the Marble data and get the alpha and beta parameters that i need to calculate all other fancy things.
Yeah it works but i´m breaking my head over the visualization. How Can i Visualize it so that it makes sense?
```{r expoBuild,message=FALSE, warning=FALSE}
GetShapeParamsExponentialWeights<- function(){
for (i in 1:nrow(subjectLevel)){
#reset shape paramters
aBeta<-0 # number of sucesses
bBeta<-0 # number of losses
#preallocate internal variables
mu<-rep(0,nrow(subjectLevel))
sigma<-rep(0,nrow(subjectLevel))
subEstimate<-rep(0,nrow(subjectLevel))
#here i
red<-strsplit(subjectLevel$sequence.marbles.color2[i],"")
red<-as.numeric(unlist(red))#prepre the array
blue<-strsplit(subjectLevel$sequence.marbles.color1[i], "")
blue<-as.numeric(unlist(blue))
######## THIS IS WHERE THE WEIGHTING HAPPENS
# each draw of the distributions is weighted with a learningrate
for (j in 1:length(red)){
bBeta=bBeta+red[j]^subjectLevel$learningRateExp[i]#update failures
}# could do it in one step but its easiear to read this way.
for (j in 1:length(blue)){
aBeta=aBeta+blue[j]^subjectLevel$learningRateExp[i]#update sucesses
}
#calculate the sufficient statistics of the beta distribution.
subjectLevel$BetaShapeAlpha[i]<-aBeta
subjectLevel$BetaShapeBeta[i]<-bBeta
subjectLevel$mu[i]<-aBeta/(aBeta+bBeta)# calculate only the last mu.
subjectLevel$var[i]<-(aBeta*bBeta)/((aBeta+bBeta)^2*(aBeta+bBeta+1))#calculate the sigma.
}
return(subjectLevel)
}
#preallocate subs
subjectLevel<-data[data$sub.id==Subs[1],]
AllParams<-GetShapeParamsExponentialWeights()
### Here i build the DataframeAgain
for (i in 2:length(Subs)){
subjectLevel<-data[data$sub.id==Subs[i],]
data2<-GetShapeParamsExponentialWeights()
AllParams<-rbind(AllParams,data2)
}
```
Eventhough i still dont know how i can visualize this in a meaningful way, you can look at the Distributions for Each Trail per Subject. For this i build a new Dataframe that contains the probability Densities which result
from the fitted Parameters that i obtain above in a long format. This Helps me at least to identify bad ModelFits and Subjects.
```{r L,message=FALSE, warning=FALSE, fig.height = 10, fig.width = 20}
library(ggfortify)
library(tidyverse)
x<-seq(0,1,0.01)
density<-seq(0,1,0.01)
newData<-as.tibble(
expand.grid(
SubId<-unique(AllParams$sub.id),
Shown<-unique(AllParams$mean.act),
Density<-density
# Prob<-x
)
)
colnames(newData)<-c("SubId","Shown","Density")
newData%>%filter(SubId==AllParams$sub.id[1] & Shown == AllParams$mean.act[1])->FullData
FullData$Density=dbeta(x,AllParams$BetaShapeAlpha[1],AllParams$BetaShapeBeta[1])
FullData$x<-seq(0,1,0.01)
for (i in 1:length(AllParams$condition)){
newData%>%filter(SubId==AllParams$sub.id[i] & Shown == AllParams$mean.act[i])->Subsetted
Subsetted$Density=dbeta(x,AllParams$BetaShapeAlpha[i],AllParams$BetaShapeBeta[i])
Subsetted$x<-seq(0,1,0.01)
FullData<-rbind(FullData,Subsetted)
}
i=1
for (i in 1:length(unique(FullData$SubId))){
p<-ggplot(FullData[FullData$SubId==i,],aes(x=x,y=Density,color=as.factor(Shown)))+geom_point()+
scale_y_continuous(name="Estimated Predictive Probability Density of Subject 1")+
scale_color_discrete(name="Actual_Probability Per Trial")+
ggtitle(paste0("Probability Esitmate Subject ",i))+
#facet_grid(~Shown)+
my_theme
print(p)
}
```
\ No newline at end of file
......@@ -28,7 +28,7 @@ Make the cumulative Densitiy
#sigma=0.1
#here i define a function that gives us the probability densitiy of all possible values given the mean and the standart deviation
#of the posterior beta distribution that results from binomal updating.
discretenormalMarble005<-function (possibleResp, a, b){
discreteBetaMarble005<-function (possibleResp, a, b){
lower = min(possibleResp);
upper = max(possibleResp);
......@@ -113,8 +113,8 @@ estimate of the participants to create logliks.
# get pr and m. I need to dig into what this means exactly.
# i use the estiamted mean and sigma to be able to get descrete log likelihoods.
pr = discretenormalMarble005(posResp,aBeta, bBeta)[[1]];
m = discretenormalMarble005(posResp,aBeta, bBeta)[[2]];
pr = discreteBetaMarble005(posResp,aBeta, bBeta)[[1]];
m = discreteBetaMarble005(posResp,aBeta, bBeta)[[2]];
#check where the possible resposnes equal the exact response of the subject. now we have the response liklihood.
probEstimate = pr[which(posResp==subEstimate)]; # SEE HERE YOU FIND THE PROB EST THAT FITS THE GIVEN SUBJECTIVE VALUE
#zero doesnt work.
......@@ -181,8 +181,8 @@ amount of time.
# get pr and m. I need to dig into what this means exactly.
# i use the estiamted mean and sigma to be able to get descrete log likelihoods.
pr = discretenormalMarble005(posResp,aBeta, bBeta)[[1]];
m = discretenormalMarble005(posResp,aBeta, bBeta)[[2]];
pr = discreteBetaMarble005(posResp,aBeta, bBeta)[[1]];
m = discreteBetaMarble005(posResp,aBeta, bBeta)[[2]];
#check where the possible resposnes equal the exact response of the subject. now we have the response liklihood.
probEstimate = pr[which(posResp==subEstimate)]; # SEE HERE YOU FIND THE PROB EST THAT FITS THE GIVEN SUBJECTIVE VALUE
#zero doesnt work.
......@@ -315,3 +315,99 @@ What i get now if i look at the parameter estimate of the exponential
model is an **underweghting** of all information considered.
![](LiklelihoodUpdateBeta_files/figure-markdown_strict/Show%20second%20Plot-1.png)
Retrieve Values
===============
In this chunk of code i use the function that i made before to fit the
Marble data and get the alpha and beta parameters that i need to
calculate all other fancy things. Yeah it works but i´m breaking my head
over the visualization. How Can i Visualize it so that it makes sense?
GetShapeParamsExponentialWeights<- function(){
for (i in 1:nrow(subjectLevel)){
#reset shape paramters
aBeta<-0 # number of sucesses
bBeta<-0 # number of losses
#preallocate internal variables
mu<-rep(0,nrow(subjectLevel))
sigma<-rep(0,nrow(subjectLevel))
subEstimate<-rep(0,nrow(subjectLevel))
#here i
red<-strsplit(subjectLevel$sequence.marbles.color2[i],"")
red<-as.numeric(unlist(red))#prepre the array
blue<-strsplit(subjectLevel$sequence.marbles.color1[i], "")
blue<-as.numeric(unlist(blue))
######## THIS IS WHERE THE WEIGHTING HAPPENS
# each draw of the distributions is weighted with a learningrate
for (j in 1:length(red)){
bBeta=bBeta+red[j]^subjectLevel$learningRateExp[i]#update failures
}# could do it in one step but its easiear to read this way.
for (j in 1:length(blue)){
aBeta=aBeta+blue[j]^subjectLevel$learningRateExp[i]#update sucesses
}
#calculate the sufficient statistics of the beta distribution.
subjectLevel$BetaShapeAlpha[i]<-aBeta
subjectLevel$BetaShapeBeta[i]<-bBeta
subjectLevel$mu[i]<-aBeta/(aBeta+bBeta)# calculate only the last mu.
subjectLevel$var[i]<-(aBeta*bBeta)/((aBeta+bBeta)^2*(aBeta+bBeta+1))#calculate the sigma.
}
return(subjectLevel)
}
#preallocate subs
subjectLevel<-data[data$sub.id==Subs[1],]
AllParams<-GetShapeParamsExponentialWeights()
### Here i build the DataframeAgain
for (i in 2:length(Subs)){
subjectLevel<-data[data$sub.id==Subs[i],]
data2<-GetShapeParamsExponentialWeights()
AllParams<-rbind(AllParams,data2)
}
Eventhough i still dont know how i can visualize this in a meaningful
way, you can look at the Distributions for Each Trail per Subject. For
this i build a new Dataframe that contains the probability Densities
which result from the fitted Parameters that i obtain above in a long
format. This Helps me at least to identify bad ModelFits and Subjects.
library(ggfortify)
library(tidyverse)
x<-seq(0,1,0.01)
density<-seq(0,1,0.01)
newData<-as.tibble(
expand.grid(
SubId<-unique(AllParams$sub.id),
Shown<-unique(AllParams$mean.act),
Density<-density
# Prob<-x
)
)
colnames(newData)<-c("SubId","Shown","Density")
newData%>%filter(SubId==AllParams$sub.id[1] & Shown == AllParams$mean.act[1])->FullData
FullData$Density=dbeta(x,AllParams$BetaShapeAlpha[1],AllParams$BetaShapeBeta[1])
FullData$x<-seq(0,1,0.01)
for (i in 1:length(AllParams$condition)){
newData%>%filter(SubId==AllParams$sub.id[i] & Shown == AllParams$mean.act[i])->Subsetted
Subsetted$Density=dbeta(x,AllParams$BetaShapeAlpha[i],AllParams$BetaShapeBeta[i])
Subsetted$x<-seq(0,1,0.01)
FullData<-rbind(FullData,Subsetted)
}
i=1
for (i in 1:length(unique(FullData$SubId))){
p<-ggplot(FullData[FullData$SubId==i,],aes(x=x,y=Density,color=as.factor(Shown)))+geom_point()+
scale_y_continuous(name="Estimated Predictive Probability Density of Subject 1")+
scale_color_discrete(name="Actual_Probability Per Trial")+
ggtitle(paste0("Probability Esitmate Subject ",i))+
#facet_grid(~Shown)+
my_theme
print(p)
}
![](LiklelihoodUpdateBeta_files/figure-markdown_strict/L-1.png)![](LiklelihoodUpdateBeta_files/figure-markdown_strict/L-2.png)![](LiklelihoodUpdateBeta_files/figure-markdown_strict/L-3.png)![](LiklelihoodUpdateBeta_files/figure-markdown_strict/L-4.png)![](LiklelihoodUpdateBeta_files/figure-markdown_strict/L-5.png)![](LiklelihoodUpdateBeta_files/figure-markdown_strict/L-6.png)![](LiklelihoodUpdateBeta_files/figure-markdown_strict/L-7.png)![](LiklelihoodUpdateBeta_files/figure-markdown_strict/L-8.png)![](LiklelihoodUpdateBeta_files/figure-markdown_strict/L-9.png)![](LiklelihoodUpdateBeta_files/figure-markdown_strict/L-10.png)![](LiklelihoodUpdateBeta_files/figure-markdown_strict/L-11.png)![](LiklelihoodUpdateBeta_files/figure-markdown_strict/L-12.png)![](LiklelihoodUpdateBeta_files/figure-markdown_strict/L-13.png)![](LiklelihoodUpdateBeta_files/figure-markdown_strict/L-14.png)![](LiklelihoodUpdateBeta_files/figure-markdown_strict/L-15.png)![](LiklelihoodUpdateBeta_files/figure-markdown_strict/L-16.png)![](LiklelihoodUpdateBeta_files/figure-markdown_strict/L-17.png)![](LiklelihoodUpdateBeta_files/figure-markdown_strict/L-18.png)![](LiklelihoodUpdateBeta_files/figure-markdown_strict/L-19.png)![](LiklelihoodUpdateBeta_files/figure-markdown_strict/L-20.png)![](LiklelihoodUpdateBeta_files/figure-markdown_strict/L-21.png)![](LiklelihoodUpdateBeta_files/figure-markdown_strict/L-22.png)![](LiklelihoodUpdateBeta_files/figure-markdown_strict/L-23.png)![](LiklelihoodUpdateBeta_files/figure-markdown_strict/L-24.png)![](LiklelihoodUpdateBeta_files/figure-markdown_strict/L-25.png)![](LiklelihoodUpdateBeta_files/figure-markdown_strict/L-26.png)![](LiklelihoodUpdateBeta_files/figure-markdown_strict/L-27.png)![](LiklelihoodUpdateBeta_files/figure-markdown_strict/L-28.png)![](LiklelihoodUpdateBeta_files/figure-markdown_strict/L-29.png)![](LiklelihoodUpdateBeta_files/figure-markdown_strict/L-30.png)![](LiklelihoodUpdateBeta_files/figure-markdown_strict/L-31.png)![](LiklelihoodUpdateBeta_files/figure-markdown_strict/L-32.png)![](LiklelihoodUpdateBeta_files/figure-markdown_strict/L-33.png)![](LiklelihoodUpdateBeta_files/figure-markdown_strict/L-34.png)![](LiklelihoodUpdateBeta_files/figure-markdown_strict/L-35.png)![](LiklelihoodUpdateBeta_files/figure-markdown_strict/L-36.png)![](LiklelihoodUpdateBeta_files/figure-markdown_strict/L-37.png)![](LiklelihoodUpdateBeta_files/figure-markdown_strict/L-38.png)![](LiklelihoodUpdateBeta_files/figure-markdown_strict/L-39.png)![](LiklelihoodUpdateBeta_files/figure-markdown_strict/L-40.png)![](LiklelihoodUpdateBeta_files/figure-markdown_strict/L-41.png)![](LiklelihoodUpdateBeta_files/figure-markdown_strict/L-42.png)![](LiklelihoodUpdateBeta_files/figure-markdown_strict/L-43.png)![](LiklelihoodUpdateBeta_files/figure-markdown_strict/L-44.png)![](LiklelihoodUpdateBeta_files/figure-markdown_strict/L-45.png)![](LiklelihoodUpdateBeta_files/figure-markdown_strict/L-46.png)![](LiklelihoodUpdateBeta_files/figure-markdown_strict/L-47.png)![](LiklelihoodUpdateBeta_files/figure-markdown_strict/L-48.png)![](LiklelihoodUpdateBeta_files/figure-markdown_strict/L-49.png)![](LiklelihoodUpdateBeta_files/figure-markdown_strict/L-50.png)![](LiklelihoodUpdateBeta_files/figure-markdown_strict/L-51.png)![](LiklelihoodUpdateBeta_files/figure-markdown_strict/L-52.png)![](LiklelihoodUpdateBeta_files/figure-markdown_strict/L-53.png)
......@@ -80,9 +80,6 @@ discretenormalMarble005<-function (possibleResp, mean, stdev){
############ fit
......@@ -104,3 +101,39 @@ for (i in 1:length(Subs)){
sub.list[[i]]<-c(pars, LL) ## saves best fitting pars and LL
}
library(ggfortify)
library(tidyverse)
x<-seq(0,1,0.01)
density<-seq(0,1,0.01)
newData<-as.tibble(
expand.grid(
SubId<-unique(AllParams$sub.id),
Shown<-unique(AllParams$mean.act),
Density<-density
# Prob<-x
)
)
colnames(newData)<-c("SubId","Shown","Density")
newData%>%filter(SubId==AllParams$sub.id[1] & Shown == AllParams$mean.act[1])->FullData
FullData$Density=dbeta(x,AllParams$BetaShapeAlpha[1],AllParams$BetaShapeBeta[1])
FullData$x<-seq(0,1,0.01)
for (i in 1:length(AllParams$condition)){
newData%>%filter(SubId==AllParams$sub.id[i] & Shown == AllParams$mean.act[i])->Subsetted
Subsetted$Density=dbeta(x,AllParams$BetaShapeAlpha[i],AllParams$BetaShapeBeta[i])
Subsetted$x<-seq(0,1,0.01)
FullData<-rbind(FullData,Subsetted)
}
FullData%>%filter(SubId==1)%>%ggplot(aes(x=x,y=Density,color=as.factor(Shown)))+geom_line()+
scale_y_continuous(name="Estimated Predictive Probability Density of Subject 1")+
scale_color_discrete(name="Actual_Probability Per Trial")+
ggtitle("Probability Esitmate")+
facet_grid(~Shown)+
my_theme
#ggplot(aes(x=BetaShapeAlpha,y=BetaShapeBeta,color=mean.act))+geom_point()
This diff is collapsed.
This diff is collapsed.
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment