Commit 990f82b2 authored by Simon Ciranka's avatar Simon Ciranka

New Report

parent 47284d71
This source diff could not be displayed because it is too large. You can view the blob instead.
This source diff could not be displayed because it is too large. You can view the blob instead.
This source diff could not be displayed because it is too large. You can view the blob instead.
......@@ -123,7 +123,7 @@ for(i in 1:length(TidyMarble$rt)){
if(as.numeric(as.character(TidyMarble$age[i]))<18){
TidyMarble$Agegroup[i]=1#are they adolescents?
if(as.numeric(as.character(TidyMarble$age[i]))<12){
if(as.numeric(as.character(TidyMarble$age[i]))<=12){
TidyMarble$Agegroup[i]=0#are they kids?
}#enddoubleif.
}else TidyMarble$Agegroup[i]=2#ore adults?
......@@ -287,7 +287,7 @@ for(i in 1:length(TidyMarble$rt)){
if(as.numeric(as.character(TidyMarble$age[i]))<18){
TidyMarble$Agegroup[i]=1#are they adolescents?
if(as.numeric(as.character(TidyMarble$age[i]))<12){
if(as.numeric(as.character(TidyMarble$age[i]))<=12){
TidyMarble$Agegroup[i]=0#are they kids?
}#enddoubleif.
}else TidyMarble$Agegroup[i]=2#ore adults?
......
......@@ -17,6 +17,13 @@ source('../../../R/Helpers/R_rainclouds.R')
#source('../../../R/Helpers/summarySE.R')
```
# Agenda
TODO: Posterior predictives.
* I added a first model
* no age correlations in any of the Parameters, which is wierd, I probably did not do it right
* We are still waiting for 16 participants, But maybe we can already start thinking about this a little more
* Miniconference.
# Marble Data
In this document we have a look at the Data from the Marble Task. We are supposed to have 50 participants pre, 50 post and 50 adolescents who performed a social and solo version of the experiment.
......@@ -63,7 +70,7 @@ MarbleData%>%group_by(subject)%>%mutate(
stat_summary(geom="pointrange",fun.y=mean,fun.ymin =function(x) mean(x)-1.96*(sd(x)/sqrt(length(x))),fun.ymax=function(x) mean(x)+1.96*(sd(x)/sqrt(length(x))),position = position_dodge(0.9),alpha=0.7)+
scale_x_discrete(name="Agegroup",breaks=c(0,1,2),labels=c("Kids","Adolescents","Adults"))+
scale_y_continuous(name="Final Payoff (Bonuspoints)")+
coord_cartesian(ylim=c(1500,1600))+
coord_cartesian(ylim=c(1975,2025))+
guides(alpha=F)
#facet_grid(.~Social1Ind0)
ggtitle("Points Collected by Agegroup")
......@@ -77,11 +84,10 @@ MarbleData%>%group_by(subject)%>%mutate(
stat_summary(geom="bar",fun.y="mean")+
stat_summary(geom="pointrange",fun.y=mean,fun.ymin =function(x) mean(x)-1.96*(sd(x)/sqrt(length(x))),fun.ymax=function(x) mean(x)+1.96*(sd(x)/sqrt(length(x))),position = position_dodge(0.9),alpha=0.7)+
scale_x_discrete(name="Agegroup",breaks=c(0,1,2),labels=c("Kids","Adolescents","Adults"))+
scale_y_continuous(name="Final Payoff (Bonuspoints)")+
coord_cartesian(ylim=c(600,800))+
guides(alpha=F)
ggsave(filename = "X_Figures/Winner.pdf")
scale_y_continuous(name="Final Payoff (Bonuspoints)")
# coord_cartesian(ylim=c(1800,2000))
#guides(alpha=F)
ggsave(filename = "X_Figures/Winner.pdf")
```
lol. Adolescents. This is going to be interesting.
......@@ -104,9 +110,10 @@ labels2 <- c(
"1" = "RiskAdvice",
"NULL"= "Solo"
)
#
MarbleData%>%group_by(Agegroup,DFE1DFD0,Social1Ind0)%>%summarise(
PercentRiskyChoice=mean(ChooseRisk),
ci=1.96*sd(ChooseRisk)/sqrt(n())
ci=1.96*sd(ChooseRisk)/sqrt(dplyr::n())
)%>%ungroup()%>%group_by(Agegroup,DFE1DFD0)%>%filter(Social1Ind0==1)->Social
......@@ -131,15 +138,15 @@ MarbleData%>%ggplot(aes(y=ChooseRisk,x=Agegroup,group=DFE1DFD0))+stat_summary(ma
ggtitle("Risky Choice")
ggsave(filename ="X_Figures/RawData.pdf",height = 6,width=10)
#
# maybe
# maybe
MarbleData%>%group_by(Agegroup,DFE1DFD0,Social1Ind0)%>%summarise(
PercentRiskyChoice=mean(ChooseRisk),
ci=sd(ChooseRisk)/sqrt(n())
)%>%ungroup()%>%group_by(Agegroup,DFE1DFD0)%>%filter(Social1Ind0==0)->Solo
Solo$Diff<-Social$PercentRiskyChoice-Solo$PercentRiskyChoice
Solo$Diff<-Social$PercentRiskyChoice-Solo$PercentRiskyChoice
Solo%>%ggplot(aes(y=Diff,fill=DFE1DFD0,x=Agegroup))+geom_bar(stat="identity",position = "dodge",color= "black")+
scale_x_discrete(name="Agegroup",breaks=c(0,1,2),labels=c("Kids","Adolescents","Adults"))+
geom_errorbar( mapping=aes(y=Diff, ymin=Diff-ci, ymax=Diff+ci),position="dodge")+
......@@ -339,11 +346,24 @@ MarbleData%>%filter(DFE1DFD0=="1"&PercentBlueEstimate!="NULL")%>%mutate(
```
# Here i prepare the Rest of the Data
To be outsourced this is just for debugging now.
# Models
I outsourced Modelfitting to the tardis
Here i fit a first model for ALL subjects We should look at the stuff with distributional assumptions afterwards, but judging superficially, there is nothing much going on agetrend wise.
We can look at it indivdually later on but i think this can provide a good first look. First i need to concatanete the subject parameters.
For this I need to retrieve the real Values at some point.
```{r}
library(rstan)
load("C_ModelFits/FullLearningModel")
Parameters<-(rstan::extract(fitSep))
library(tidyverse)
#Bin_Update_Data<-Bin_Update_Data[Bin_Update_Data$typeRA=="1",]# keep only The Risk trails.
Bin_Update_Data<-MarbleData%>%arrange(Agegroup)# i order it first so i can make sure that
......@@ -359,403 +379,138 @@ Bin_Update_Data<-Bin_Update_Data%>%dplyr::mutate(
)
)#end PeerChoice.
#subset it to fit on only one agegroup.
Bin_Update_Data%>%filter(Agegroup==2)->Bin_Update_Data
Bin_Update_Data%>%filter(DFE1DFD0==0)->Bin_Update_Data
#Bin_Update_Data%>%filter(Agegroup==2)->Bin_Update_Data
#Bin_Update_Data%>%filter(DFE1DFD0==0)->Bin_Update_Data
# now check how many participants we have.
Bin_Update_Data$subject<-as.numeric(Bin_Update_Data$subject)
#change colname of subject into subjID
numSubjs<-length(unique(Bin_Update_Data$subject))#Total Number of Subs
subjList <- unique(Bin_Update_Data$subject)######
Sequence_Length <- unique(Bin_Update_Data[Bin_Update_Data$TotalNShown<99,]$TotalNShown)
Sequence_Length<-1
####### number of trials and see which group he is in for each subject######
Tsubj <- as.vector(rep(0, numSubjs))
for (sIdx in 1:numSubjs) {
curSubj <- subjList[sIdx]
Tsubj[sIdx] <- length(Bin_Update_Data[Bin_Update_Data$subject==curSubj,]$subject) # How many entries per Subject?
#GSubj[sIdx] <- Simulations[[i]]$group
}
maxTrials <- max(Tsubj)
# Information for user continued
cat(" # of (max) trials per subject = ", maxTrials, "\n\n")
ageList<-Bin_Update_Data%>%group_by(subject)%>%dplyr::summarize(
age=mean(age)
)%>%select(age)%>%ungroup()
ageList=as.vector(ageList$age)
# for multiple subjects
condition <- array(0, c(numSubjs, maxTrials))#Other Chose Risk?
p_gamble <- array(0, c(numSubjs, maxTrials))
choice <- array(0, c(numSubjs, maxTrials))
safe_payoff<- array(0, c(numSubjs, maxTrials))
risky_payoff<- array(0, c(numSubjs, maxTrials))
confList<-Bin_Update_Data%>%filter(HowSure!=102)%>%group_by(subject)%>%dplyr::summarise(
Confidence=mean(HowSure)
)
confList=as.vector(confList$Confidence)
```
risk1Unc0 <- array(0, c(numSubjs, maxTrials))
In what follows i look at the Whole parameter Distributions.
#specs for the uncertain trails
Sucess <- array(0, c(numSubjs, maxTrials,Sequence_Length))
Fail <- array(0, c(numSubjs, maxTrials,Sequence_Length))
p_gamble_est<- array(0, c(numSubjs, maxTrials))
Blue_Sum_All<- array(0, c(numSubjs, maxTrials))
Red_Sum_All<- array(0, c(numSubjs, maxTrials))
#Agegroup<- array(0, c(numSubjs, maxTrials))
# generate the data Lists to be passed to stan
# concatenate different groups in the third dimension.
# Individual parameters
After i collect the fits, we can actually nicely concatenate them.
for (i in 1:numSubjs) {
curSubj <- subjList[i]
useTrials <- Tsubj[i]
condition[i, 1:useTrials] <- Bin_Update_Data[Bin_Update_Data$subject==curSubj,]$OtherChoseRisk
p_gamble[i, 1:useTrials] <- Bin_Update_Data[Bin_Update_Data$subject==curSubj,]$probGamb
choice[i, 1:useTrials] <- Bin_Update_Data[Bin_Update_Data$subject==curSubj,]$ChooseRisk
risky_payoff[i, 1:useTrials] <- Bin_Update_Data[Bin_Update_Data$subject==curSubj,]$valueGamble
safe_payoff[i, 1:useTrials] <- 5
risk1Unc0[i, 1:useTrials] <- as.numeric(as.character(Bin_Update_Data[Bin_Update_Data$subject==curSubj,]$DFE1DFD0))
p_gamble_est[i, 1:useTrials]<-as.double(as.character(Bin_Update_Data[Bin_Update_Data$subject==curSubj,]$PercentBlueEstimate))/100
#ambigLevel[i, 1:useTrials] <- Bin_Update_Data[Bin_Update_Data$subject==curSubj,]$Ambiguity
# here i tear up the sequence. its saved as factor so i need to make it into a character string and split it up according to the seperator,
for (t in 1:useTrials){
Blue<-strsplit(toString((Bin_Update_Data[Bin_Update_Data$subject==curSubj,]$blue_marbles[t])),split=",")
BlueSum<-0;
# now i add up the number of sucesses
for (k in 1:length(Blue[[1]])){
BlueSum=BlueSum+as.numeric(Blue[[1]][k])
}
Blue_Sum_All[i, t]<-BlueSum
#here i add up the number of failures
Red<-strsplit(toString((Bin_Update_Data[Bin_Update_Data$subject==curSubj,]$red_marbles[t])),split=",")
RedSum<-0;
for (k in 1:length(Blue[[1]])){
RedSum=RedSum+as.numeric(Red[[1]][k])
}
Red_Sum_All[i, t]<-RedSum
# this isfor sequential updating where binary sucesses and fails are stored in the thrid dimension of the matrix
if(RedSum!=99){
Sucess[i, t,1:Sequence_Length]<-c(array(1, c(BlueSum)), array(0, c(Sequence_Length-BlueSum)) )
Fail[i, t,1:Sequence_Length]<-c(array(1, c(RedSum)), array(0, c(Sequence_Length-RedSum)) )
}else{
# if it was a risk trial, then either put in 99 or make sure that you sample from uniform.
Sucess[i, t,1:Sequence_Length]<-99
Red_Sum_All[i, t]<-1
Fail[i, t,1:Sequence_Length]<-99
Blue_Sum_All[i, t]<-1
}
}
}
#cant be bigger than 1.
p_gamble_est[p_gamble_est>=1]<-0.99
# Specify the number of parameters and parameters of interest
numPars <- 5
POI <- c("mu_rho", "mu_tau","mu_ocu_Uncertainty","ocu_Uncertainty",
"rho","tau") #"log_lik","y_pred")
#this is all i need for stan.
dataList <- list(
N = numSubjs,## number of subjects in each group.
T = maxTrials,
#Seq = Sequence_Length,
Tsubj = Tsubj,
numPars = numPars,
safe_payoff = safe_payoff,
risky_payoff = risky_payoff,
p_gamble_est= p_gamble_est,
Sucess = Blue_Sum_All,
Fail = Red_Sum_All,
condition = condition, # condition is 0= solo ,1 = safe ,3 = risky
p_gamble = p_gamble,
choice = choice,
risk1Unc0=risk1Unc0
# agegroup=agegroup
```{r}
datalist = list()
for(i in 1:117){
df<-data.frame(
alphaAdd=as.vector(Parameters$alpha_add[,i]),
betaAdd=as.vector(Parameters$beta_add[,i]),
rho=as.vector(Parameters$rho[,i]),
tau=as.vector(Parameters$tau[,i]),
ocuRisk=as.vector(Parameters$ocu_Risk[,i]),
ocuUncertainty=as.vector(Parameters$ocu_Uncertainty[,i]),
subject=subjList[i],
age=ageList[i],# Here i need to get some kind of dictionary.
conf=confList[i]
)
inits<-"fixed"
# priors
if (inits[1] != "random") {
if (inits[1] == "fixed") {
inits_fixed <- c(0.5, 0.9, 0.0 ,0.5, 0.5)
} else {
if (length(inits) == numPars) {
inits_fixed <- inits
# mu_ocu =rep(inits_fixed[3],2)
} else {
stop("Check your inital values!")
}
}
genInitList <- function() {
list(
#initial values.
mu_rho =rep(inits_fixed[1]),
mu_tau =rep(inits_fixed[2]),
# mu_ocu_Risk =c(0.1),
mu_ocu_Uncertainty =c(0.1),
# mu_alpha_add=inits_fixed[4],
# mu_beta_add=inits_fixed[5],
sigma_rho= rep(c(1.0)),
sigma_tau= rep(c(1.0)),
# sigma_ocu_Risk= rep(c(1.0)),
sigma_ocu_Uncertainty= rep(c(1.0)),
# sigma_alpha_add=1.0,
# sigma_beta_add=1.0,
rho_p = rep(qnorm(inits_fixed[1]/2),numSubjs),
tau_p = rep(log(inits_fixed[2]),numSubjs),
# ocu_Risk_p = rep(0.1,numSubjs)
ocu_Uncertainty_p = rep(0.1,numSubjs)
# alpha_add_p = rep(qnorm(inits_fixed[4]/2),numSubjs),
# beta_add_p = rep(qnorm(inits_fixed[5]/2),numSubjs)
#ocu_p = matrix(inits_fixed[3],numSubjsG, nGroups)
)
}
} else {
genInitList <- "random"
datalist[[i]] <- df
}
big_data = do.call(rbind, datalist)
```
```{r, eval=F}
fitSep = stan("B_Model_Code/ChoiceModles/ocu_hier_RiskM.stan",
data = dataList,
pars = POI,
init = genInitList,
iter = 2000,
cores = 4,
chains =4,
control = list(adapt_delta = 0.99)
```{r}
PosteriorMean<-big_data%>%group_by(subject,age,conf)%>%dplyr::summarise(
MeanAlphaAdd=mean(alphaAdd),
MeanBetaAdd=mean(betaAdd),
MeanRho=mean(rho),
meanTau=mean(tau),
meanOcuR=mean(ocuRisk),
meanOCUUnc=mean(ocuUncertainty)
)
save(fitSep,file="C_ModelFits/FirstTryOhWeh")
```
## The Learning Model
```{r}
library(tidybayes)
In each uncertain trial, Subjects see 9 pieces of evidence. These can be either red meaning a loss and blue meaning a win.
The number of observed losses is denoted by $\beta$, the number of observed wins is denoted by $\beta$.
In uncertain Trails, we assume that subjects update their beliefs about the outcome distribution in a quasi Bayesian manner.
```
$$
p \sim B(\alpha^{p},\beta^{q})
$$
We allow Failures and Sucesses to be subjectively over or underrepresented raising them to the free parameter of p or q respectively.
## The Value Model
The Utility model is an expected utility model. Here we use the probability estimate which we either obtained by sampling or is described to the participant and multiply it with the subjective utility of the option.
Participants could choose between a safe option which always had the Value of 5 an A risky option which can be computed as the probability of occurance times the utility.
$$
EU=p*V^\rho
$$
## The Choice Model
The choice model takes the negative difference of these utilites $\Delta Ut$ and translates it into a choice probability by the sigmoid transformation which scales the difference between utils up and down.
# Here i give the learning Model a try
$$
p_{ChooseRisk}=\frac{1}{1+e^{-\Delta Ut*\tau}}
$$
# Parameter Estimates
To be outsourced this is just for debugging now.
In what follows i Show yu the mean of the parameter Estiamtes.
#Alpha Scaling: p
```{r}
library(rstan)
#Bin_Update_Data<-Bin_Update_Data[Bin_Update_Data$typeRA=="1",]# keep only The Risk trails.
Bin_Update_Data<-MarbleData%>%arrange(Agegroup)# i order it first so i can make sure that
Agegroups<-unique(Bin_Update_Data$Age.bins)# for indexing my agegroups.
#make it fit with my stan file.
Bin_Update_Data<-Bin_Update_Data%>%dplyr::mutate(
OtherChoseRisk = case_when(
OtherChoseRisk=="NULL" ~ 2,# i dont need this but i restricted the numbers in stan between 1 and 3
OtherChoseRisk=="1" ~ 3,# risky choices are coded as 3 in my stan code
OtherChoseRisk=="0" ~ 1,# safe choices are coded as 1 in my stan code
TRUE~0 # keep the rest.
)
)#end PeerChoice.
#subset it to fit on only one agegroup.
#Bin_Update_Data%>%filter(Agegroup==2)->Bin_Update_Data
#Bin_Update_Data%>%filter(DFE1DFD0==0)->Bin_Update_Data
# now check how many participants we have.
Bin_Update_Data$subject<-as.numeric(Bin_Update_Data$subject)
#change colname of subject into subjID
numSubjs<-length(unique(Bin_Update_Data$subject))#Total Number of Subs
subjList <- unique(Bin_Update_Data$subject)######
Sequence_Length <- unique(Bin_Update_Data[Bin_Update_Data$TotalNShown<99,]$TotalNShown)
Sequence_Length<-9
####### number of trials and see which group he is in for each subject######
Tsubj <- as.vector(rep(0, numSubjs))
for (sIdx in 1:numSubjs) {
curSubj <- subjList[sIdx]
Tsubj[sIdx] <- length(Bin_Update_Data[Bin_Update_Data$subject==curSubj,]$subject) # How many entries per Subject?
#GSubj[sIdx] <- Simulations[[i]]$group
}
maxTrials <- max(Tsubj)
# Information for user continued
cat(" # of (max) trials per subject = ", maxTrials, "\n\n")
library(lme4)
lm(MeanAlphaAdd~age,data=PosteriorMean)
ggplot(PosteriorMean,aes(x=age,y=MeanAlphaAdd))+
geom_point()+geom_smooth(method="lm")
```
# for multiple subjects
#Beta Scaling: q
```{r}
condition <- array(0, c(numSubjs, maxTrials))#Other Chose Risk?
p_gamble <- array(0, c(numSubjs, maxTrials))
choice <- array(0, c(numSubjs, maxTrials))
safe_payoff<- array(0, c(numSubjs, maxTrials))
risky_payoff<- array(0, c(numSubjs, maxTrials))
ggplot(PosteriorMean,aes(x=age,y=MeanBetaAdd))+
geom_jitter()+geom_smooth(method="lm")
```
risk1Unc0 <- array(0, c(numSubjs, maxTrials))
```{r}
lm(MeanRho~poly(age,2),data=PosteriorMean)
#specs for the uncertain trails
Sucess <- array(0, c(numSubjs, maxTrials,Sequence_Length))
Fail <- array(0, c(numSubjs, maxTrials,Sequence_Length))
p_gamble_est<- array(0, c(numSubjs, maxTrials))
Blue_Sum_All<- array(0, c(numSubjs, maxTrials))
Red_Sum_All<- array(0, c(numSubjs, maxTrials))
#Agegroup<- array(0, c(numSubjs, maxTrials))
ggplot(PosteriorMean,aes(x=age,MeanRho))+
geom_point()+geom_smooth(method="lm")
```
# generate the data Lists to be passed to stan
# concatenate different groups in the third dimension.
```{r}
lm(meanTau~poly(age,2),data=PosteriorMean)
for (i in 1:numSubjs) {
curSubj <- subjList[i]
useTrials <- Tsubj[i]
condition[i, 1:useTrials] <- Bin_Update_Data[Bin_Update_Data$subject==curSubj,]$OtherChoseRisk
p_gamble[i, 1:useTrials] <- Bin_Update_Data[Bin_Update_Data$subject==curSubj,]$probGamb
choice[i, 1:useTrials] <- Bin_Update_Data[Bin_Update_Data$subject==curSubj,]$ChooseRisk
risky_payoff[i, 1:useTrials] <- Bin_Update_Data[Bin_Update_Data$subject==curSubj,]$valueGamble
safe_payoff[i, 1:useTrials] <- 5
risk1Unc0[i, 1:useTrials] <- as.numeric(as.character(Bin_Update_Data[Bin_Update_Data$subject==curSubj,]$DFE1DFD0))
p_gamble_est[i, 1:useTrials]<-as.double(as.character(Bin_Update_Data[Bin_Update_Data$subject==curSubj,]$PercentBlueEstimate))/100
#ambigLevel[i, 1:useTrials] <- Bin_Update_Data[Bin_Update_Data$subject==curSubj,]$Ambiguity
# here i tear up the sequence. its saved as factor so i need to make it into a character string and split it up according to the seperator,
for (t in 1:useTrials){
Blue<-strsplit(toString((Bin_Update_Data[Bin_Update_Data$subject==curSubj,]$blue_marbles[t])),split=",")
BlueSum<-0;
# now i add up the number of sucesses
for (k in 1:length(Blue[[1]])){
BlueSum=BlueSum+as.numeric(Blue[[1]][k])
}
Blue_Sum_All[i, t]<-BlueSum
#here i add up the number of failures
Red<-strsplit(toString((Bin_Update_Data[Bin_Update_Data$subject==curSubj,]$red_marbles[t])),split=",")
RedSum<-0;
for (k in 1:length(Blue[[1]])){
RedSum=RedSum+as.numeric(Red[[1]][k])
}
Red_Sum_All[i, t]<-RedSum
# this isfor sequential updating where binary sucesses and fails are stored in the thrid dimension of the matrix
if(RedSum!=99){
Sucess[i, t,1:Sequence_Length]<-c(array(1, c(BlueSum)), array(0, c(Sequence_Length-BlueSum)) )
Fail[i, t,1:Sequence_Length]<-c(array(1, c(RedSum)), array(0, c(Sequence_Length-RedSum)) )
}else{
# if it was a risk trial, then either put in 99 or make sure that you sample from uniform.
Sucess[i, t,1:Sequence_Length]<-99
Red_Sum_All[i, t]<-1
Fail[i, t,1:Sequence_Length]<-99
Blue_Sum_All[i, t]<-1
}
}
}
ggplot(PosteriorMean,aes(x=age,y=meanTau))+
geom_point()+geom_smooth(method="lm")
```
#cant be bigger than 1.
p_gamble_est[p_gamble_est>=1]<-0.99
# Specify the number of parameters and parameters of interest
#numPars <- 5
POI <- c("mu_rho", "mu_tau","mu_ocu_Uncertainty","mu_ocu_Risk",
"mu_alpha_add","mu_beta_add",
"ocu_Uncertainty","ocu_Risk","alpha_add","beta_add",
"rho","tau") #"log_lik","y_pred")
#this is all i need for stan.
dataList <- list(
N = numSubjs,## number of subjects in each group.
T = maxTrials,
#Seq = Sequence_Length,
Tsubj = Tsubj,
numPars = numPars,
safe_payoff = safe_payoff,
risky_payoff = risky_payoff,
p_gamble_est= p_gamble_est,
Sucess = Blue_Sum_All,
Fail = Red_Sum_All,
condition = condition, # condition is 0= solo ,1 = safe ,3 = risky
p_gamble = p_gamble,
choice = choice,
risk1Unc0=risk1Unc0
# agegroup=agegroup
)
inits<-"fixed"
# priors
if (inits[1] != "random") {
if (inits[1] == "fixed") {
inits_fixed <- c(0.5, 0.9, 0.0 ,0.5, 0.5)
} else {
if (length(inits) == numPars) {
inits_fixed <- inits
# mu_ocu =rep(inits_fixed[3],2)
} else {
stop("Check your inital values!")
}
}
genInitList <- function() {
list(
#initial values.
mu_rho =rep(inits_fixed[1]),
mu_tau =rep(inits_fixed[2]),
mu_ocu_Risk =c(0.1),
mu_ocu_Uncertainty =c(0.1),
mu_alpha_add=inits_fixed[4],
mu_beta_add=inits_fixed[5],
sigma_rho= rep(c(1.0)),
sigma_tau= rep(c(1.0)),
sigma_ocu_Risk= rep(c(1.0)),
sigma_ocu_Uncertainty= rep(c(1.0)),
sigma_alpha_add=1.0,
sigma_beta_add=1.0,
rho_p = rep(qnorm(inits_fixed[1]/2),numSubjs),
tau_p = rep(log(inits_fixed[2]),numSubjs),
ocu_Risk_p = rep(0.1,numSubjs),
ocu_Uncertainty_p = rep(0.1,numSubjs),
alpha_add_p = rep(qnorm(inits_fixed[4]/2),numSubjs),
beta_add_p = rep(qnorm(inits_fixed[5]/2),numSubjs)
#ocu_p = matrix(inits_fixed[3],numSubjsG, nGroups)
)
}
} else {
genInitList <- "random"
}
```{r}
lm(meanOcuR~poly(age,2),data=PosteriorMean)
ggplot(PosteriorMean,aes(x=age,meanOcuR))+
geom_point()+geom_smooth(method="lm")
```
```{r}
lm(meanOCUUnc~poly(age,2),data=PosteriorMean)
```{r, eval=F}
fitSep = stan("B_Model_Code/LearningModels/ocu_hier_RiskUnc.stan",
data = dataList,
pars = POI,
init = genInitList,
iter = 2000,
cores = 4,
chains =4,
control = list(adapt_delta = 0.99)
)
save(fitSep,file="C_ModelFits/FullLearningModel")
```
\ No newline at end of file
ggplot(PosteriorMean,aes(x=conf,y=meanOCUUnc))+
geom_point()+geom_smooth(method="lm")
```