This code reproduces the trial-level analyses including the change condition reported in the following manuscript:

Cosme, D., Mobasser, A., & J. H. Pfeifer. If you’re happy and you know it: Neural correlates of self-evaluated psychological health and well-being

load packages

if(!require('pacman')) {
    install.packages('pacman')
}

pacman::p_load(tidyverse, gtools, GGally, sjstats, lme4, lmerTest, knitr, ggeffects, kableExtra, wesanderson, modelbased, install = TRUE)

define aesthetics

rois = c("#006989", "#56445D", "#8EC922")
constructs = c("#FEC601", "#254E70", "#F43C13")
instructions = wesanderson::wes_palette("Darjeeling1", 2, "continuous")
valence = c("#119DA4", "#19647E")
plot_aes = theme_minimal() +
  theme(legend.position = "top",
        legend.text = element_text(size = 12),
        text = element_text(size = 16, family = "Futura Medium"),
        axis.text = element_text(color = "black"),
        axis.line = element_line(colour = "black"),
        axis.ticks.y = element_blank(),
        panel.grid.major = element_blank(),
        panel.grid.minor = element_blank(),
        panel.border = element_blank(),
        panel.background = element_blank())

define functions

model_table = function(model) {
  model %>%
    broom.mixed::tidy(., conf.int = TRUE) %>%
    filter(effect == "fixed") %>%
    rename("SE" = std.error,
           "z" = statistic,
           "p" = p.value) %>%
    mutate(term = gsub("\\(Intercept\\)", "Intercept", term),
           term = gsub("log\\(RT\\)", "RT", term),
           term = gsub("constructill-being", "Construct (ill-being)", term),
           term = gsub("constructsocial well-being", "Construct (social well-being)", term),
           term = gsub("instructionchange", "Instructions (change)", term),
           term = gsub(":", " x ", term),
           p = ifelse(p < .001, "< .001",
                      ifelse(p == 1, "1.000", gsub("0.(.*)", ".\\1", sprintf("%.3f", p)))),
           `b [95% CI]` = sprintf("%.2f [%0.2f, %.2f]", estimate, conf.low, conf.high),
           z = abs(round(z, 2))) %>%
    select(term, `b [95% CI]`, z, p) %>%
    kable() %>%
    kableExtra::kable_styling()

}

load data

task = read.csv("../../data/task_data.csv") %>%
  mutate(responseYN = as.factor(responseYN),
         construct = recode(construct, "well-being" = "self-oriented well-being",
                            "social" = "social well-being"),
         construct = factor(construct, levels = c("self-oriented well-being", "social well-being", "ill-being")),
         valence = factor(valence, levels = c("positive", "negative")))
betas = read.csv("../../data/neuro_data_2s.csv")

exclude outliers and standardize

  • Exclude outlier trials that are > 3 SDs from roi median across participants
trial_conditions = task %>%
  select(subjectID, trial, run, instruction)

betas_outlier = betas %>%
  left_join(., trial_conditions) %>%
  filter(!is.na(meanPE)) %>%
  filter(!(run == "run2" & subjectID %in% c("FP067", "FP114"))) %>%
  group_by(roi) %>%
  mutate(median = median(meanPE, na.rm = TRUE),
         sd3 = 3*sd(meanPE, na.rm = TRUE),
         outlier = ifelse(meanPE > median + sd3 | meanPE < median - sd3, "yes", "no")) 
## Joining, by = c("subjectID", "trial", "run")
betas_outlier %>%
  group_by(outlier) %>%
  summarize(n = n()) %>%
  ungroup() %>%
  mutate(total = sum(n),
         percent = round((n / total) * 100, 2)) %>%
  kable(format = "pandoc")
outlier n total percent
no 23639 23976 98.59
yes 337 23976 1.41
betas_ex = betas_outlier %>%
  filter(outlier == "no") %>%
  select(-c(median, sd3, outlier))

merge data and exclude participants

  • Motion exclusions: FP091
  • Technical failure: FP080, FP082
  • Non-compliance: FP021, FP049, FP085, FP121
  • Standardize within participant and ROI
data_ex = task %>%
  left_join(., betas_ex) %>%
  filter(!subjectID %in% c("FP091", "FP080", "FP082", "FP021", "FP049", "FP085", "FP121")) %>%
  filter(!is.na(meanPE)) %>%
  group_by(roi, subjectID) %>%
  mutate(std = meanPE / sd(meanPE, na.rm = TRUE)) %>%
  select(-sdPE)
## Joining, by = c("trial", "subjectID", "run", "instruction")

remove missing data for MLM analysis

data_complete_mod = data_ex %>%
  select(-meanPE) %>%
  spread(roi, std) %>%
  select(subjectID, trial, run, instruction, construct, item, valence, response, responseYN, RT, pgACC, vmPFC, VS) %>%
  na.omit() %>%
  mutate(instruction = factor(instruction, levels = c("self", "change")))

item responses

percent endorsements

percents = data_complete_mod %>%
  group_by(instruction, construct, item, response) %>%
  summarize(n = n()) %>%
  spread(response, n) %>%
  mutate(no = ifelse(is.na(no), 0, no),
         yes = ifelse(is.na(yes), 0, yes),
         total = no + yes) %>%
  gather(response, n, no, yes) %>%
  mutate(percent = round((n/total) * 100, 1),
         n = sprintf("%s / %s", n, total)) %>%
  filter(response == "yes")
## `summarise()` has grouped output by 'instruction', 'construct', 'item'. You can
## override using the `.groups` argument.
percents %>%
  select(-response, -total, -n) %>%
  spread(instruction, percent) %>%
  arrange(desc(change)) %>%
  kable() %>%
  kableExtra::kable_styling()
construct item self change
ill-being depressed 0.0 100.0
ill-being tired 100.0 100.0
ill-being unhappy 25.0 100.0
ill-being sad 14.1 92.8
self-oriented well-being happy 92.2 92.1
ill-being feel useless 9.2 88.5
ill-being feel worthless 5.2 88.5
ill-being stressed 66.7 87.4
ill-being overwhelmed 47.1 87.1
ill-being feel unimportant 18.0 86.3
self-oriented well-being joyful 86.5 85.1
ill-being angry 9.4 84.8
social well-being isolated 23.5 84.3
social well-being feel encouraged 81.4 84.2
self-oriented well-being interested in life 99.0 83.5
self-oriented well-being engaged in tasks 89.7 83.3
social well-being have warm relationships 91.2 82.4
social well-being lonely 30.4 82.2
social well-being helped by friends 79.4 81.4
self-oriented well-being hopeful 90.6 81.2
self-oriented well-being like myself 81.6 81.0
self-oriented well-being satisfied with life 78.2 81.0
social well-being have good friends 88.2 81.0
self-oriented well-being full of energy 80.0 80.0
self-oriented well-being positive 80.6 80.0
social well-being alienated 0.0 80.0
social well-being have support systems 100.0 80.0
social well-being neglected 0.0 80.0
social well-being well-liked 100.0 80.0
ill-being feel invisible 0.0 80.0
ill-being feel rejected 15.7 80.0
ill-being nervous 51.5 79.4
social well-being have trusting relationships 87.8 79.3
self-oriented well-being live a purposeful life 90.2 78.0
self-oriented well-being achieve goals 91.3 77.0
self-oriented well-being live a meaningful life 93.1 76.5
self-oriented well-being proud 78.2 75.0
social well-being comfortable with friends 92.3 74.7
ill-being fearful 27.1 73.5
social well-being belong in social group 64.1 72.9
social well-being trusted 97.1 72.0
social well-being am cared for 96.0 70.3
social well-being loved 99.0 69.7
ill-being anxious 53.9 63.4
ill-being unable to cope 7.8 62.0
self-oriented well-being cheerful 60.0 60.0
self-oriented well-being optimistic 80.0 60.0
self-oriented well-being capable 100.0 50.0

percent endorsements for improperly randomized items

percents %>%
  filter(total < 10) %>%
  select(-response, -total, -n) %>%
  spread(instruction, percent) %>%
  arrange(desc(construct)) %>%
  kable() %>%
  kableExtra::kable_styling()
construct item self change
ill-being depressed 0 100
ill-being feel invisible 0 80
ill-being tired 100 100
ill-being unhappy 25 100
social well-being alienated 0 80
social well-being have support systems 100 80
social well-being neglected 0 80
social well-being well-liked 100 80
self-oriented well-being capable 100 50
self-oriented well-being cheerful 60 60
self-oriented well-being full of energy 80 80
self-oriented well-being optimistic 80 60

plot

data_complete_mod %>%
  group_by(instruction, construct, item, responseYN) %>%
  summarize(n = n()) %>%
  spread(responseYN, n) %>%
  mutate(no = ifelse(is.na(no), 0, no),
         yes = ifelse(is.na(yes), 0, yes),
         total = no + yes) %>%
  gather(responseYN, n, no, yes) %>%
  mutate(percent = round((n/total) * 100, 1)) %>%
  filter(responseYN == "yes") %>%
  ggplot(aes(percent, fill = instruction)) +
  geom_density(color = NA, alpha = .8) +
  facet_grid(~construct) +
  scale_fill_manual(values = instructions) +
  labs(x = "\n percent endorsed") +
  plot_aes
## `summarise()` has grouped output by 'instruction', 'construct', 'item'. You can
## override using the `.groups` argument.

ROI descriptives

self trials

# correlations
pgACC_vmPFC = data_complete_mod %>%
  filter(instruction == "self") %>%
   mutate(subjectID = as.factor(subjectID)) %>%
   select(subjectID, pgACC, vmPFC) %>%
   rmcorr::rmcorr(subjectID, pgACC, vmPFC, .)

pgACC_VS = data_complete_mod %>%
  filter(instruction == "self") %>%
   mutate(subjectID = as.factor(subjectID)) %>%
   select(subjectID, pgACC, VS) %>%
   rmcorr::rmcorr(subjectID, pgACC, VS, .)

vmPFC_VS = data_complete_mod %>%
  filter(instruction == "self") %>%
   mutate(subjectID = as.factor(subjectID)) %>%
   select(subjectID, vmPFC, VS) %>%
   rmcorr::rmcorr(subjectID, vmPFC, VS, .)

# means and SDs
mean_table = data_complete_mod %>%
  gather(roi, value, pgACC, vmPFC, VS) %>%
  group_by(roi) %>%
  summarize(M = round(mean(value, na.rm = TRUE), 2),
            SD = round(sd(value, na.rm = TRUE), 2)) %>%
  mutate(order = ifelse(roi == "pgACC", 1,
                 ifelse(roi == "vmPFC", 2, 3)),
         roi = ifelse(roi == "pgACC", "1. pgACC",
               ifelse(roi == "vmPFC", "2. vmPFC", "3. VS"))) %>%
  rename("ROI" = roi) %>%
  arrange(order) %>%
  select(-order)

# table
corr_table = data.frame(ROI = c("1. pgACC", "2. vmPFC", "3. VS"),
           `1` = c("--", paste0(round(pgACC_vmPFC[[1]],2), " [",round(pgACC_vmPFC[[4]][1],2),", ",round(pgACC_vmPFC[[4]][2],2),"]"),
                   paste0(round(pgACC_VS[[1]],2), " [",round(pgACC_VS[[4]][1],2),", ",round(pgACC_VS[[4]][2],2),"]")),
           `2` = c("", "--", paste0(round(vmPFC_VS[[1]],2), " [",round(vmPFC_VS[[4]][1],2),", ",round(vmPFC_VS[[4]][2],2),"]")),
           `3` = c("", "", "--"),
           check.names = FALSE)

mean_table %>%
  full_join(., corr_table) %>%
  knitr::kable(format = "pandoc", caption = "Repeated Measures Correlations Between ROIs (n = 3694). All correlations are statistically significant, p < .001. 95% confidence intervals are bracketed")
## Joining, by = "ROI"
Repeated Measures Correlations Between ROIs (n = 3694). All correlations are statistically significant, p < .001. 95% confidence intervals are bracketed
ROI M SD 1 2 3
1. pgACC -0.17 1.01 –
2. vmPFC -0.06 1.02 0.82 [0.81, 0.84] –
3. VS 0.14 1.03 0.42 [0.39, 0.45] 0.45 [0.42, 0.47] –

change trials

# correlations
pgACC_vmPFC = data_complete_mod %>%
  filter(instruction == "change") %>%
   mutate(subjectID = as.factor(subjectID)) %>%
   select(subjectID, pgACC, vmPFC) %>%
   rmcorr::rmcorr(subjectID, pgACC, vmPFC, .)

pgACC_VS = data_complete_mod %>%
  filter(instruction == "change") %>%
   mutate(subjectID = as.factor(subjectID)) %>%
   select(subjectID, pgACC, VS) %>%
   rmcorr::rmcorr(subjectID, pgACC, VS, .)

vmPFC_VS = data_complete_mod %>%
  filter(instruction == "change") %>%
   mutate(subjectID = as.factor(subjectID)) %>%
   select(subjectID, vmPFC, VS) %>%
   rmcorr::rmcorr(subjectID, vmPFC, VS, .)

# means and SDs
mean_table = data_complete_mod %>%
  gather(roi, value, pgACC, vmPFC, VS) %>%
  group_by(roi) %>%
  summarize(M = round(mean(value, na.rm = TRUE), 2),
            SD = round(sd(value, na.rm = TRUE), 2)) %>%
  mutate(order = ifelse(roi == "pgACC", 1,
                 ifelse(roi == "vmPFC", 2, 3)),
         roi = ifelse(roi == "pgACC", "1. pgACC",
               ifelse(roi == "vmPFC", "2. vmPFC", "3. VS"))) %>%
  rename("ROI" = roi) %>%
  arrange(order) %>%
  select(-order)

# table
corr_table = data.frame(ROI = c("1. pgACC", "2. vmPFC", "3. VS"),
           `1` = c("--", paste0(round(pgACC_vmPFC[[1]],2), " [",round(pgACC_vmPFC[[4]][1],2),", ",round(pgACC_vmPFC[[4]][2],2),"]"),
                   paste0(round(pgACC_VS[[1]],2), " [",round(pgACC_VS[[4]][1],2),", ",round(pgACC_VS[[4]][2],2),"]")),
           `2` = c("", "--", paste0(round(vmPFC_VS[[1]],2), " [",round(vmPFC_VS[[4]][1],2),", ",round(vmPFC_VS[[4]][2],2),"]")),
           `3` = c("", "", "--"),
           check.names = FALSE)

mean_table %>%
  full_join(., corr_table) %>%
  knitr::kable(format = "pandoc", caption = "Repeated Measures Correlations Between ROIs (n = 3687). All correlations are statistically significant, p < .001. 95% confidence intervals are bracketed")
## Joining, by = "ROI"
Repeated Measures Correlations Between ROIs (n = 3687). All correlations are statistically significant, p < .001. 95% confidence intervals are bracketed
ROI M SD 1 2 3
1. pgACC -0.17 1.01 –
2. vmPFC -0.06 1.02 0.82 [0.81, 0.83] –
3. VS 0.14 1.03 0.44 [0.42, 0.47] 0.51 [0.49, 0.54] –

model each construct separately

self-oriented well-being

model_rt_wellbeing = glmer(responseYN ~ RT * instruction + 
                     pgACC * instruction + vmPFC * instruction + VS * instruction +
                     (1 | subjectID), 
                family = binomial, 
                data = filter(data_complete_mod, construct == "self-oriented well-being"),
                control = glmerControl(optimizer = "bobyqa"))

model summary

summary(model_rt_wellbeing)
## Generalized linear mixed model fit by maximum likelihood (Laplace
##   Approximation) [glmerMod]
##  Family: binomial  ( logit )
## Formula: responseYN ~ RT * instruction + pgACC * instruction + vmPFC *  
##     instruction + VS * instruction + (1 | subjectID)
##    Data: filter(data_complete_mod, construct == "self-oriented well-being")
## Control: glmerControl(optimizer = "bobyqa")
## 
##      AIC      BIC   logLik deviance df.resid 
##   1871.7   1935.5   -924.9   1849.7     2418 
## 
## Scaled residuals: 
##      Min       1Q   Median       3Q      Max 
## -22.5221   0.1442   0.2852   0.4426   1.7140 
## 
## Random effects:
##  Groups    Name        Variance Std.Dev.
##  subjectID (Intercept) 0.549    0.741   
## Number of obs: 2429, groups:  subjectID, 105
## 
## Fixed effects:
##                         Estimate Std. Error z value             Pr(>|z|)    
## (Intercept)              2.01901    0.13036  15.487 < 0.0000000000000002 ***
## RT                      -3.31538    0.31601 -10.492 < 0.0000000000000002 ***
## instructionchange       -0.02215    0.14812  -0.150             0.881127    
## pgACC                    0.41838    0.17495   2.391             0.016783 *  
## vmPFC                   -0.53692    0.17942  -2.992             0.002767 ** 
## VS                       0.14314    0.10732   1.334             0.182283    
## RT:instructionchange     1.43872    0.38884   3.700             0.000216 ***
## instructionchange:pgACC -0.41291    0.21861  -1.889             0.058914 .  
## instructionchange:vmPFC  0.59192    0.22862   2.589             0.009622 ** 
## instructionchange:VS    -0.21599    0.13981  -1.545             0.122373    
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
## 
## Correlation of Fixed Effects:
##             (Intr) RT     instrc pgACC  vmPFC  VS     RT:nst in:ACC in:PFC
## RT          -0.149                                                        
## instrctnchn -0.530  0.069                                                 
## pgACC        0.218 -0.041 -0.171                                          
## vmPFC       -0.232  0.093  0.187 -0.782                                   
## VS          -0.059 -0.053  0.044 -0.095 -0.204                            
## RT:nstrctnc  0.102 -0.771 -0.314  0.023 -0.067  0.045                     
## instrct:ACC -0.168  0.033  0.230 -0.783  0.612  0.071 -0.032              
## instrct:PFC  0.180 -0.075 -0.124  0.600 -0.770  0.159  0.084 -0.771       
## instrctn:VS  0.030  0.039 -0.131  0.073  0.149 -0.750 -0.040 -0.075 -0.235

model table

model_table(model_rt_wellbeing)
term b [95% CI] z p
Intercept 2.02 [1.76, 2.27] 15.49 < .001
RT -3.32 [-3.93, -2.70] 10.49 < .001
Instructions (change) -0.02 [-0.31, 0.27] 0.15 .881
pgACC 0.42 [0.08, 0.76] 2.39 .017
vmPFC -0.54 [-0.89, -0.19] 2.99 .003
VS 0.14 [-0.07, 0.35] 1.33 .182
RT x Instructions (change) 1.44 [0.68, 2.20] 3.70 < .001
Instructions (change) x pgACC -0.41 [-0.84, 0.02] 1.89 .059
Instructions (change) x vmPFC 0.59 [0.14, 1.04] 2.59 .010
Instructions (change) x VS -0.22 [-0.49, 0.06] 1.54 .122

social well-being

model_rt_social = glmer(responseYN ~ RT * instruction + 
                     pgACC * instruction + vmPFC * instruction + VS * instruction +
                     (1 | subjectID), 
                family = binomial, 
                data = filter(data_complete_mod, construct == "social well-being"),
                control = glmerControl(optimizer = "bobyqa"))

model summary

summary(model_rt_social)
## Generalized linear mixed model fit by maximum likelihood (Laplace
##   Approximation) [glmerMod]
##  Family: binomial  ( logit )
## Formula: responseYN ~ RT * instruction + pgACC * instruction + vmPFC *  
##     instruction + VS * instruction + (1 | subjectID)
##    Data: filter(data_complete_mod, construct == "social well-being")
## Control: glmerControl(optimizer = "bobyqa")
## 
##      AIC      BIC   logLik deviance df.resid 
##   2399.6   2463.4  -1188.8   2377.6     2427 
## 
## Scaled residuals: 
##     Min      1Q  Median      3Q     Max 
## -8.1609  0.1165  0.3333  0.6424  1.8433 
## 
## Random effects:
##  Groups    Name        Variance Std.Dev.
##  subjectID (Intercept) 0.1864   0.4317  
## Number of obs: 2438, groups:  subjectID, 105
## 
## Fixed effects:
##                         Estimate Std. Error z value             Pr(>|z|)    
## (Intercept)              1.98981    0.11463  17.358 < 0.0000000000000002 ***
## RT                      -2.96951    0.27799 -10.682 < 0.0000000000000002 ***
## instructionchange       -1.30364    0.12815 -10.173 < 0.0000000000000002 ***
## pgACC                    0.36706    0.14997   2.448             0.014385 *  
## vmPFC                   -0.57969    0.15524  -3.734             0.000188 ***
## VS                       0.09339    0.09959   0.938             0.348369    
## RT:instructionchange     2.78343    0.32745   8.500 < 0.0000000000000002 ***
## instructionchange:pgACC -0.63529    0.19012  -3.341             0.000833 ***
## instructionchange:vmPFC  0.76039    0.19539   3.892            0.0000996 ***
## instructionchange:VS    -0.06058    0.12092  -0.501             0.616401    
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
## 
## Correlation of Fixed Effects:
##             (Intr) RT     instrc pgACC  vmPFC  VS     RT:nst in:ACC in:PFC
## RT          -0.258                                                        
## instrctnchn -0.765  0.220                                                 
## pgACC        0.223 -0.041 -0.196                                          
## vmPFC       -0.390  0.095  0.346 -0.724                                   
## VS           0.015 -0.046 -0.016 -0.052 -0.282                            
## RT:nstrctnc  0.229 -0.840 -0.306  0.034 -0.084  0.038                     
## instrct:ACC -0.179  0.034  0.235 -0.782  0.563  0.040 -0.042              
## instrct:PFC  0.310 -0.080 -0.280  0.572 -0.783  0.216  0.099 -0.748       
## instrctn:VS -0.012  0.039 -0.047  0.040  0.228 -0.811 -0.047 -0.045 -0.280

model table

model_table(model_rt_social)
term b [95% CI] z p
Intercept 1.99 [1.77, 2.21] 17.36 < .001
RT -2.97 [-3.51, -2.42] 10.68 < .001
Instructions (change) -1.30 [-1.55, -1.05] 10.17 < .001
pgACC 0.37 [0.07, 0.66] 2.45 .014
vmPFC -0.58 [-0.88, -0.28] 3.73 < .001
VS 0.09 [-0.10, 0.29] 0.94 .348
RT x Instructions (change) 2.78 [2.14, 3.43] 8.50 < .001
Instructions (change) x pgACC -0.64 [-1.01, -0.26] 3.34 < .001
Instructions (change) x vmPFC 0.76 [0.38, 1.14] 3.89 < .001
Instructions (change) x VS -0.06 [-0.30, 0.18] 0.50 .616

ill-being

model_rt_illbeing = glmer(responseYN ~ RT * instruction + 
                     pgACC * instruction + vmPFC * instruction + VS * instruction +
                     (1 | subjectID), 
                family = binomial, 
                data = filter(data_complete_mod, construct == "ill-being"),
                control = glmerControl(optimizer = "bobyqa"))

model summary

summary(model_rt_illbeing)
## Generalized linear mixed model fit by maximum likelihood (Laplace
##   Approximation) [glmerMod]
##  Family: binomial  ( logit )
## Formula: responseYN ~ RT * instruction + pgACC * instruction + vmPFC *  
##     instruction + VS * instruction + (1 | subjectID)
##    Data: filter(data_complete_mod, construct == "ill-being")
## Control: glmerControl(optimizer = "bobyqa")
## 
##      AIC      BIC   logLik deviance df.resid 
##   2474.2   2537.9  -1226.1   2452.2     2419 
## 
## Scaled residuals: 
##     Min      1Q  Median      3Q     Max 
## -5.5671 -0.5699  0.2484  0.4961  2.9289 
## 
## Random effects:
##  Groups    Name        Variance Std.Dev.
##  subjectID (Intercept) 0.4448   0.6669  
## Number of obs: 2430, groups:  subjectID, 105
## 
## Fixed effects:
##                         Estimate Std. Error z value             Pr(>|z|)    
## (Intercept)             -1.09249    0.09820 -11.125 < 0.0000000000000002 ***
## RT                      -0.13059    0.21852  -0.598               0.5501    
## instructionchange        2.93831    0.13195  22.268 < 0.0000000000000002 ***
## pgACC                    0.15227    0.12455   1.223               0.2215    
## vmPFC                    0.30186    0.12482   2.418               0.0156 *  
## VS                      -0.16057    0.08099  -1.983               0.0474 *  
## RT:instructionchange    -1.80544    0.33742  -5.351         0.0000000876 ***
## instructionchange:pgACC -0.38602    0.18719  -2.062               0.0392 *  
## instructionchange:vmPFC -0.19813    0.19129  -1.036               0.3003    
## instructionchange:VS     0.05388    0.11987   0.450               0.6530    
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
## 
## Correlation of Fixed Effects:
##             (Intr) RT     instrc pgACC  vmPFC  VS     RT:nst in:ACC in:PFC
## RT           0.103                                                        
## instrctnchn -0.460 -0.131                                                 
## pgACC        0.094 -0.012 -0.065                                          
## vmPFC       -0.142  0.032  0.120 -0.770                                   
## VS          -0.102 -0.004  0.064 -0.122 -0.196                            
## RT:nstrctnc -0.039 -0.602 -0.247  0.012 -0.033  0.001                     
## instrct:ACC -0.058  0.005  0.135 -0.650  0.496  0.081  0.018              
## instrct:PFC  0.087 -0.025 -0.052  0.486 -0.630  0.122  0.035 -0.772       
## instrctn:VS  0.064  0.003 -0.216  0.085  0.124 -0.654 -0.005 -0.076 -0.230

model table

model_table(model_rt_illbeing)
term b [95% CI] z p
Intercept -1.09 [-1.28, -0.90] 11.13 < .001
RT -0.13 [-0.56, 0.30] 0.60 .550
Instructions (change) 2.94 [2.68, 3.20] 22.27 < .001
pgACC 0.15 [-0.09, 0.40] 1.22 .221
vmPFC 0.30 [0.06, 0.55] 2.42 .016
VS -0.16 [-0.32, -0.00] 1.98 .047
RT x Instructions (change) -1.81 [-2.47, -1.14] 5.35 < .001
Instructions (change) x pgACC -0.39 [-0.75, -0.02] 2.06 .039
Instructions (change) x vmPFC -0.20 [-0.57, 0.18] 1.04 .300
Instructions (change) x VS 0.05 [-0.18, 0.29] 0.45 .653

plot predicted effects

by construct

raw_values = data_complete_mod %>%
  select(subjectID, construct, instruction, RT, pgACC, vmPFC, VS) %>%
 gather(roi, x, pgACC, VS, vmPFC)

vals = seq(round(min(raw_values$x),1), round(max(raw_values$x), 1), .5)

predicted_sub = ggpredict(model_rt_wellbeing, terms = c("pgACC [vals]", "instruction", "subjectID"), type = "random") %>%
  data.frame() %>%
  mutate(roi = "pgACC",
         construct = "self-oriented well-being") %>%
  bind_rows(ggpredict(model_rt_wellbeing, terms = c("VS [vals]", "instruction", "subjectID"), type = "random") %>%
              data.frame() %>%
              mutate(roi = "VS",
                     construct = "self-oriented well-being")) %>%
  bind_rows(ggpredict(model_rt_wellbeing, terms = c("vmPFC [vals]", "instruction", "subjectID"), type = "random") %>%
              data.frame() %>%
              mutate(roi = "vmPFC",
                     construct = "self-oriented well-being")) %>%
  bind_rows(ggpredict(model_rt_social, terms = c("pgACC [vals]", "instruction", "subjectID"), type = "random") %>%
              data.frame() %>%
              mutate(roi = "pgACC",
                     construct = "social well-being")) %>%
  bind_rows(ggpredict(model_rt_social, terms = c("VS [vals]", "instruction", "subjectID"), type = "random") %>%
              data.frame() %>%
              mutate(roi = "VS",
                     construct = "social well-being")) %>%
  bind_rows(ggpredict(model_rt_social, terms = c("vmPFC [vals]", "instruction", "subjectID"), type = "random") %>%
              data.frame() %>%
              mutate(roi = "vmPFC",
                     construct = "social well-being")) %>%
  bind_rows(ggpredict(model_rt_illbeing, terms = c("pgACC [vals]", "instruction", "subjectID"), type = "random") %>%
              data.frame() %>%
              mutate(roi = "pgACC",
                     construct = "ill-being")) %>%
  bind_rows(ggpredict(model_rt_illbeing, terms = c("VS [vals]", "instruction", "subjectID"), type = "random") %>%
              data.frame() %>%
              mutate(roi = "VS",
                     construct = "ill-being")) %>%
  bind_rows(ggpredict(model_rt_illbeing, terms = c("vmPFC [vals]", "instruction", "subjectID"), type = "random") %>%
              data.frame() %>%
              mutate(roi = "vmPFC",
                     construct = "ill-being")) %>%
  mutate(construct = factor(construct, levels = c("self-oriented well-being", "social well-being", "ill-being")))

predicted = ggeffect(model_rt_wellbeing, terms = c("pgACC [vals]", "instruction")) %>%
  data.frame() %>%
  mutate(roi = "pgACC",
         construct = "self-oriented well-being") %>%
  bind_rows(ggeffect(model_rt_wellbeing, terms = c("VS [vals]", "instruction")) %>%
              data.frame() %>%
              mutate(roi = "VS",
                     construct = "self-oriented well-being")) %>%
  bind_rows(ggeffect(model_rt_wellbeing, terms = c("vmPFC [vals]", "instruction")) %>%
              data.frame() %>%
              mutate(roi = "vmPFC",
                     construct = "self-oriented well-being")) %>%
  bind_rows(ggeffect(model_rt_social, terms = c("pgACC [vals]", "instruction")) %>%
              data.frame() %>%
              mutate(roi = "pgACC",
                     construct = "social well-being")) %>%
  bind_rows(ggeffect(model_rt_social, terms = c("VS [vals]", "instruction")) %>%
              data.frame() %>%
              mutate(roi = "VS",
                     construct = "social well-being")) %>%
  bind_rows(ggeffect(model_rt_social, terms = c("vmPFC [vals]", "instruction")) %>%
              data.frame() %>%
              mutate(roi = "vmPFC",
                     construct = "social well-being")) %>%
  bind_rows(ggeffect(model_rt_illbeing, terms = c("pgACC [vals]", "instruction")) %>%
              data.frame() %>%
              mutate(roi = "pgACC",
                     construct = "ill-being")) %>%
  bind_rows(ggeffect(model_rt_illbeing, terms = c("VS [vals]", "instruction")) %>%
              data.frame() %>%
              mutate(roi = "VS",
                     construct = "ill-being")) %>%
  bind_rows(ggeffect(model_rt_illbeing, terms = c("vmPFC [vals]", "instruction")) %>%
              data.frame() %>%
              mutate(roi = "vmPFC",
                     construct = "ill-being")) %>%
  mutate(construct = factor(construct, levels = c("self-oriented well-being", "social well-being", "ill-being")))

predicted %>%
  ggplot(aes(x, predicted)) +
  geom_line(data = predicted_sub, aes(color = construct, group = interaction(construct, group, facet)), size = .25, alpha = .5) +
  geom_line(aes(color = construct), size = 2) +
  facet_grid(group~roi) +
  scale_color_manual(values = constructs, name = "") + 
  scale_fill_manual(values = constructs, name = "") + 
  scale_x_continuous(breaks = seq(-6, 6, 2)) +
  labs(x = "\nparameter estimate (SD)", y = "probability of responding yes\n") + 
  plot_aes
## Warning: Using `size` aesthetic for lines was deprecated in ggplot2 3.4.0.
## ℹ Please use `linewidth` instead.
## This warning is displayed once every 8 hours.
## Call `lifecycle::last_lifecycle_warnings()` to see where this warning was
## generated.

by instruction

predicted %>%
  ggplot(aes(x, predicted)) +
  geom_line(data = predicted_sub, aes(color = group, group = interaction(group, roi, facet)), size = .25, alpha = .5) +
  geom_line(aes(color = group), size = 2) +
  facet_grid(construct~roi) +
  scale_color_manual(values = instructions, name = "") + 
  scale_fill_manual(values = instructions, name = "") + 
  scale_x_continuous(breaks = seq(-6, 6, 2)) +
  labs(x = "\nparameter estimate (SD)", y = "probability of responding yes\n") + 
  plot_aes

by ROI

predicted %>%
  ggplot(aes(x, predicted)) +
  geom_line(data = predicted_sub, aes(color = roi, group = interaction(group, roi, facet)), size = .25, alpha = .5) +
  geom_line(aes(color = roi), size = 2) +
  facet_grid(group~construct) +
  scale_color_manual(values = rois, name = "") + 
  scale_fill_manual(values = rois, name = "") + 
  scale_x_continuous(breaks = seq(-6, 6, 2)) +
  labs(x = "\nparameter estimate (SD)", y = "probability of responding yes\n") + 
  plot_aes