This code reproduces the trial-level analyses with valence reported in the following manuscript:

Cosme, D., Mobasser, A., & J. H. Pfeifer. If you’re happy and you know it: Neural correlates of self-evaluated psychological health and well-being

load packages

if(!require('pacman')) {
    install.packages('pacman')
}

pacman::p_load(tidyverse, gtools, GGally, sjstats, lme4, lmerTest, knitr, ggeffects, kableExtra, install = TRUE)

define aesthetics

rois = c("#006989", "#56445D", "#8EC922")
constructs = c("#FEC601", "#F43C13", "#254E70")
instructions = wesanderson::wes_palette("Darjeeling1", 2, "continuous")
valence = c("#119DA4", "#19647E")
plot_aes = theme_minimal() +
  theme(legend.position = "top",
        legend.text = element_text(size = 12),
        text = element_text(size = 16, family = "Futura Medium"),
        axis.text = element_text(color = "black"),
        axis.line = element_line(colour = "black"),
        axis.ticks.y = element_blank(),
        panel.grid.major = element_blank(),
        panel.grid.minor = element_blank(),
        panel.border = element_blank(),
        panel.background = element_blank())

define functions

model_table = function(model) {
  model %>%
    broom.mixed::tidy(., conf.int = TRUE) %>%
    filter(effect == "fixed") %>%
    rename("SE" = std.error,
           "z" = statistic,
           "p" = p.value) %>%
    mutate(term = gsub("\\(Intercept\\)", "Intercept (well-being)", term),
           term = gsub("log\\(RT\\)", "RT", term),
           term = gsub("valencenegative", "Valence (negative)", term),
           term = gsub(":", " x ", term),
           p = ifelse(p < .001, "< .001",
                      ifelse(p == 1, "1.000", gsub("0.(.*)", ".\\1", sprintf("%.3f", p)))),
           `b [95% CI]` = sprintf("%.2f [%0.2f, %.2f]", estimate, conf.low, conf.high),
           z = abs(round(z, 2))) %>%
    select(term, `b [95% CI]`, z, p) %>%
    kable() %>%
    kableExtra::kable_styling()
}

load data

task = read.csv("../../data/task_data.csv") %>%
  mutate(responseYN = as.factor(responseYN),
         construct = recode(construct, "well-being" = "self-oriented well-being",
                            "social" = "social well-being"),
         construct = factor(construct, levels = c("self-oriented well-being", "social well-being", "ill-being")),
         valence = factor(valence, levels = c("positive", "negative")))
betas = read.csv("../../data/neuro_data.csv")

exclude outliers and standardize

  • Subset self trials
  • Exclude outlier trials that are > 3 SDs from roi median across participants
trial_conditions = task %>%
  select(subjectID, trial, run, instruction)

betas_outlier = betas %>%
  left_join(., trial_conditions) %>%
  filter(instruction == "self") %>%
  group_by(roi) %>%
  mutate(median = median(meanPE, na.rm = TRUE),
         sd3 = 3*sd(meanPE, na.rm = TRUE),
         outlier = ifelse(meanPE > median + sd3 | meanPE < median - sd3, "yes", "no")) 
## Joining, by = c("subjectID", "trial", "run")
betas_outlier %>%
  group_by(outlier) %>%
  summarize(n = n()) %>%
  ungroup() %>%
  mutate(total = sum(n),
         percent = round((n / total) * 100, 2)) %>%
  kable(format = "pandoc")
outlier n total percent
no 11900 11988 99.27
yes 88 11988 0.73
betas_ex = betas_outlier %>%
  filter(outlier == "no") %>%
  select(-c(median, sd3, outlier))

merge data and exclude participants

  • Motion exclusions: FP091
  • Technical failure: FP080, FP082
  • Non-compliance: FP021, FP049, FP085, FP121
  • Standardize within participant and ROI
data_ex = task %>%
  left_join(., betas_ex) %>%
  filter(!subjectID %in% c("FP091", "FP080", "FP082", "FP021", "FP049", "FP085", "FP121")) %>%
  filter(!is.na(meanPE)) %>%
  filter(instruction == "self") %>%
  group_by(roi, subjectID) %>%
  mutate(std = meanPE / sd(meanPE, na.rm = TRUE)) %>%
  select(-sdPE)
## Joining, by = c("trial", "subjectID", "run", "instruction")

remove missing data for MLM analysis

data_complete_mod = data_ex %>%
  select(-meanPE) %>%
  spread(roi, std) %>%
  select(subjectID, trial, run, construct, valence, item, responseYN, RT, pgACC, vmPFC, VS) %>%
  na.omit()

ROI descriptives

# correlations
pgACC_vmPFC = data_complete_mod %>%
   mutate(subjectID = as.factor(subjectID)) %>%
   select(subjectID, pgACC, vmPFC) %>%
   rmcorr::rmcorr(subjectID, pgACC, vmPFC, .)

pgACC_VS = data_complete_mod %>%
   mutate(subjectID = as.factor(subjectID)) %>%
   select(subjectID, pgACC, VS) %>%
   rmcorr::rmcorr(subjectID, pgACC, VS, .)

vmPFC_VS = data_complete_mod %>%
   mutate(subjectID = as.factor(subjectID)) %>%
   select(subjectID, vmPFC, VS) %>%
   rmcorr::rmcorr(subjectID, vmPFC, VS, .)

# means and SDs
mean_table = data_complete_mod %>%
  gather(roi, value, pgACC, vmPFC, VS) %>%
  group_by(roi) %>%
  summarize(M = round(mean(value, na.rm = TRUE), 2),
            SD = round(sd(value, na.rm = TRUE), 2)) %>%
  mutate(order = ifelse(roi == "pgACC", 1,
                 ifelse(roi == "vmPFC", 2, 3)),
         roi = ifelse(roi == "pgACC", "1. pgACC",
               ifelse(roi == "vmPFC", "2. vmPFC", "3. VS"))) %>%
  rename("ROI" = roi) %>%
  arrange(order) %>%
  select(-order)

# table
corr_table = data.frame(ROI = c("1. pgACC", "2. vmPFC", "3. VS"),
           `1` = c("--", paste0(round(pgACC_vmPFC[[1]],2), " [",round(pgACC_vmPFC[[4]][1],2),", ",round(pgACC_vmPFC[[4]][2],2),"]"),
                   paste0(round(pgACC_VS[[1]],2), " [",round(pgACC_VS[[4]][1],2),", ",round(pgACC_VS[[4]][2],2),"]")),
           `2` = c("", "--", paste0(round(vmPFC_VS[[1]],2), " [",round(vmPFC_VS[[4]][1],2),", ",round(vmPFC_VS[[4]][2],2),"]")),
           `3` = c("", "", "--"),
           check.names = FALSE)

mean_table %>%
  full_join(., corr_table) %>%
  knitr::kable(format = "pandoc", caption = "Repeated Measures Correlations Between ROIs (n = 3694). All correlations are statistically significant, p < .001. 95% confidence intervals are bracketed")
## Joining, by = "ROI"
Repeated Measures Correlations Between ROIs (n = 3694). All correlations are statistically significant, p < .001. 95% confidence intervals are bracketed
ROI M SD 1 2 3
1. pgACC 0.04 1.03 –
2. vmPFC 0.15 1.02 0.84 [0.83, 0.85] –
3. VS 0.21 1.03 0.42 [0.39, 0.45] 0.45 [0.43, 0.48] –

RT models

These are the primary analyses controlling for reaction time but including valence rather than well-being construct, reported in supplementary material.

run models

compare models

Determine the best fitting model.

model_0_rt = Base model including including no ROIs

model_1_rt = Main effects model including ROIs

model_2_rt = Interaction model including ROIs and their interactions with valence

model_0_rt = glmer(responseYN ~ valence * RT + 
                     (1 | subjectID), 
                family = binomial, 
                data = data_complete_mod, control = glmerControl(optimizer = "bobyqa"))

model_1_rt = glmer(responseYN ~ valence * RT + pgACC + vmPFC + VS + 
                     (1 | subjectID), 
                family = binomial, 
                data = data_complete_mod, control = glmerControl(optimizer = "bobyqa"))

model_2_rt = glmer(responseYN ~ RT*valence + pgACC*valence + vmPFC*valence + VS*valence +
                     (1 | subjectID), 
                family = binomial, 
                data = data_complete_mod, control = glmerControl(optimizer = "bobyqa"))

# compare models
anova(model_0_rt, model_1_rt, model_2_rt)

calculate R2

Compare the model R2 for the best fitting compared to the base model.

performance::r2(model_0_rt)
## # R2 for Mixed Models
## 
##   Conditional R2: 0.502
##      Marginal R2: 0.473
performance::r2(model_2_rt)
## # R2 for Mixed Models
## 
##   Conditional R2: 0.525
##      Marginal R2: 0.494

check variance inflation in the best fitting model

# vif
car::vif(model_2_rt)
##            RT       valence         pgACC         vmPFC            VS 
##      2.500556      1.232571      8.453405      8.622975      3.237803 
##    RT:valence valence:pgACC valence:vmPFC    valence:VS 
##      2.350125      8.256865      8.675406      3.239148

model summary

summary(model_2_rt)
## Generalized linear mixed model fit by maximum likelihood (Laplace
##   Approximation) [glmerMod]
##  Family: binomial  ( logit )
## Formula: responseYN ~ RT * valence + pgACC * valence + vmPFC * valence +  
##     VS * valence + (1 | subjectID)
##    Data: data_complete_mod
## Control: glmerControl(optimizer = "bobyqa")
## 
##      AIC      BIC   logLik deviance df.resid 
##   3223.9   3292.3  -1600.9   3201.9     3683 
## 
## Scaled residuals: 
##     Min      1Q  Median      3Q     Max 
## -7.6301 -0.6114  0.1928  0.4206  2.3656 
## 
## Random effects:
##  Groups    Name        Variance Std.Dev.
##  subjectID (Intercept) 0.211    0.4594  
## Number of obs: 3694, groups:  subjectID, 105
## 
## Fixed effects:
##                       Estimate Std. Error z value             Pr(>|z|)    
## (Intercept)            2.06393    0.09335  22.110 < 0.0000000000000002 ***
## RT                    -3.31452    0.22828 -14.520 < 0.0000000000000002 ***
## valencenegative       -2.79185    0.10355 -26.960 < 0.0000000000000002 ***
## pgACC                  0.56133    0.14269   3.934          0.000083525 ***
## vmPFC                 -0.57938    0.14531  -3.987          0.000066873 ***
## VS                     0.10794    0.08700   1.241              0.21474    
## RT:valencenegative     2.88822    0.28154  10.259 < 0.0000000000000002 ***
## valencenegative:pgACC -0.48139    0.17684  -2.722              0.00649 ** 
## valencenegative:vmPFC  0.92918    0.18105   5.132          0.000000286 ***
## valencenegative:VS    -0.23628    0.11001  -2.148              0.03173 *  
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
## 
## Correlation of Fixed Effects:
##             (Intr) RT     vlncng pgACC  vmPFC  VS     RT:vln vl:ACC vl:PFC
## RT          -0.281                                                        
## valencengtv -0.707  0.273                                                 
## pgACC        0.304 -0.168 -0.277                                          
## vmPFC       -0.318  0.174  0.288 -0.782                                   
## VS          -0.050 -0.031  0.047 -0.102 -0.214                            
## RT:vlncngtv  0.210 -0.751 -0.136  0.120 -0.130  0.032                     
## vlncngt:ACC -0.239  0.127  0.256 -0.791  0.618  0.080 -0.109              
## vlncngt:PFC  0.257 -0.140 -0.287  0.616 -0.786  0.166  0.129 -0.782       
## vlncngtv:VS  0.035  0.034 -0.092  0.077  0.166 -0.776 -0.015 -0.083 -0.219

model table

model_table(model_2_rt)
term b [95% CI] z p
Intercept (well-being) 2.06 [1.88, 2.25] 22.11 < .001
RT -3.31 [-3.76, -2.87] 14.52 < .001
Valence (negative) -2.79 [-2.99, -2.59] 26.96 < .001
pgACC 0.56 [0.28, 0.84] 3.93 < .001
vmPFC -0.58 [-0.86, -0.29] 3.99 < .001
VS 0.11 [-0.06, 0.28] 1.24 .215
RT x Valence (negative) 2.89 [2.34, 3.44] 10.26 < .001
Valence (negative) x pgACC -0.48 [-0.83, -0.13] 2.72 .006
Valence (negative) x vmPFC 0.93 [0.57, 1.28] 5.13 < .001
Valence (negative) x VS -0.24 [-0.45, -0.02] 2.15 .032

marginal probabilities

all

ggeffect(model_2_rt)
## $RT
## # Predicted probabilities of responseYN
## 
##    RT | Predicted |       95% CI
## --------------------------------
## -1.50 |      0.99 | [0.98, 0.99]
## -1.00 |      0.96 | [0.94, 0.97]
## -0.50 |      0.89 | [0.86, 0.91]
##  0.00 |      0.72 | [0.69, 0.75]
##  0.50 |      0.47 | [0.42, 0.51]
##  1.00 |      0.23 | [0.17, 0.29]
## 
## $valence
## # Predicted probabilities of responseYN
## 
## valence  | Predicted |       95% CI
## -----------------------------------
## positive |      0.92 | [0.90, 0.93]
## negative |      0.34 | [0.31, 0.38]
## 
## $pgACC
## # Predicted probabilities of responseYN
## 
## pgACC | Predicted |       95% CI
## --------------------------------
##    -6 |      0.26 | [0.10, 0.52]
##    -4 |      0.42 | [0.26, 0.61]
##    -2 |      0.61 | [0.51, 0.70]
##     0 |      0.77 | [0.74, 0.79]
##     2 |      0.87 | [0.82, 0.91]
##     4 |      0.94 | [0.87, 0.97]
##     6 |      0.97 | [0.91, 0.99]
## 
## $vmPFC
## # Predicted probabilities of responseYN
## 
## vmPFC | Predicted |       95% CI
## --------------------------------
##    -4 |      0.89 | [0.78, 0.95]
##    -2 |      0.84 | [0.77, 0.89]
##     0 |      0.77 | [0.75, 0.80]
##     2 |      0.69 | [0.61, 0.76]
##     4 |      0.59 | [0.41, 0.75]
##     6 |      0.48 | [0.23, 0.74]
## 
## $VS
## # Predicted probabilities of responseYN
## 
## VS | Predicted |       95% CI
## -----------------------------
## -4 |      0.76 | [0.65, 0.84]
## -2 |      0.76 | [0.71, 0.81]
##  0 |      0.77 | [0.74, 0.79]
##  2 |      0.77 | [0.72, 0.82]
##  4 |      0.78 | [0.69, 0.85]
##  6 |      0.78 | [0.64, 0.88]
## 
## attr(,"class")
## [1] "ggalleffects" "list"        
## attr(,"model.name")
## [1] "model_2_rt"

pgACC

ggeffect(model_2_rt, terms = c("valence", "pgACC [0, 1]")) %>%
  data.frame() %>%
  mutate(`b [95% CI]` = sprintf("%.2f [%0.2f, %.2f]", predicted, conf.low, conf.high)) %>%
  kable() %>%
  kableExtra::kable_styling()
x predicted std.error conf.low conf.high group b [95% CI]
positive 0.9146389 0.0985999 0.8982918 0.9285676 0 0.91 [0.90, 0.93]
negative 0.3417270 0.0740079 0.3098843 0.3750635 0 0.34 [0.31, 0.38]
positive 0.9494526 0.1847299 0.9289657 0.9642582 1 0.95 [0.93, 0.96]
negative 0.3599309 0.1257773 0.3053004 0.4184486 1 0.36 [0.31, 0.42]

vmPFC

ggeffect(model_2_rt, terms = c("valence", "vmPFC [0, 1]")) %>%
  data.frame() %>%
  mutate(`b [95% CI]` = sprintf("%.2f [%0.2f, %.2f]", predicted, conf.low, conf.high)) %>%
  kable() %>%
  kableExtra::kable_styling()
x predicted std.error conf.low conf.high group b [95% CI]
positive 0.9226005 0.1061025 0.9063838 0.9362058 0 0.92 [0.91, 0.94]
negative 0.3309961 0.0757254 0.2989920 0.3646439 0 0.33 [0.30, 0.36]
positive 0.8697602 0.1416705 0.8349566 0.8981201 1 0.87 [0.83, 0.90]
negative 0.4124426 0.1201028 0.3568014 0.4704146 1 0.41 [0.36, 0.47]

VS

ggeffect(model_2_rt, terms = c("valence", "VS [0, 1]")) %>%
  data.frame() %>%
  mutate(`b [95% CI]` = sprintf("%.2f [%0.2f, %.2f]", predicted, conf.low, conf.high)) %>%
  kable() %>%
  kableExtra::kable_styling()
x predicted std.error conf.low conf.high group b [95% CI]
positive 0.9146027 0.0995477 0.8980795 0.9286600 0 0.91 [0.90, 0.93]
negative 0.3484134 0.0752108 0.3157374 0.3825804 0 0.35 [0.32, 0.38]
positive 0.9226645 0.1261644 0.9030721 0.9385660 1 0.92 [0.90, 0.94]
negative 0.3198705 0.0918865 0.2820205 0.3602514 1 0.32 [0.28, 0.36]

plot predicted effects

by valence

raw_values = data_complete_mod %>%
  select(subjectID, valence, RT, pgACC, vmPFC, VS) %>%
  gather(roi, x, pgACC, VS, vmPFC)

vals = seq(round(min(raw_values$x),1), round(max(raw_values$x), 1), .5)

predicted_sub = ggpredict(model_2_rt, terms = c("pgACC [vals]", "valence", "subjectID"), type = "random") %>%
  data.frame() %>%
  mutate(roi = "pgACC") %>%
  bind_rows(ggpredict(model_2_rt, terms = c("VS [vals]", "valence", "subjectID"), type = "random") %>%
    data.frame() %>%
    mutate(roi = "VS")) %>%
  bind_rows(ggpredict(model_2_rt, terms = c("vmPFC [vals]", "valence", "subjectID"), type = "random") %>%
    data.frame() %>%
    mutate(roi = "vmPFC"))

predicted = ggeffect(model_2_rt, terms = c("pgACC [vals]", "valence")) %>%
  data.frame() %>%
  mutate(roi = "pgACC") %>%
  bind_rows(ggeffect(model_2_rt, terms = c("VS [vals]", "valence")) %>%
    data.frame() %>%
    mutate(roi = "VS")) %>%
  bind_rows(ggeffect(model_2_rt, terms = c("vmPFC [vals]", "valence")) %>%
    data.frame() %>%
    mutate(roi = "vmPFC"))

predicted %>%
  ggplot(aes(x, predicted)) +
  geom_line(data = predicted_sub, aes(color = group, group = interaction(facet, group)), size = .25, alpha = .5) +
  geom_line(aes(color = group), size = 2) +
  facet_grid(~roi) +
  scale_color_manual(values = valence, name = "") + 
  scale_fill_manual(values = valence, name = "") + 
  scale_x_continuous(breaks = seq(-6, 6, 2)) +
  labs(x = "\nparameter estimate (SD)", y = "probability of responding yes\n") + 
  plot_aes
## Warning: Using `size` aesthetic for lines was deprecated in ggplot2 3.4.0.
## ℹ Please use `linewidth` instead.
## This warning is displayed once every 8 hours.
## Call `lifecycle::last_lifecycle_warnings()` to see where this warning was
## generated.

by ROI

predicted %>%
  ggplot(aes(x, predicted)) +
  geom_line(data = predicted_sub, aes(color = roi, group = interaction(facet, roi)), size = .25, alpha = .5) +
  geom_line(aes(color = roi), size = 2) +
  facet_grid(~group) +
  scale_color_manual(values = rois, name = "") + 
  scale_fill_manual(values = rois, name = "") + 
  scale_x_continuous(breaks = seq(-6, 6, 2)) +
  labs(x = "\nparameter estimate (SD)", y = "probability of responding yes\n") + 
  plot_aes

RT

vals = seq(round(min(data_complete_mod$RT),1), round(max(data_complete_mod$RT), 1), .1)

predicted_sub = ggpredict(model_2_rt, terms = c("RT [vals]", "valence", "subjectID"), type = "random") %>%
  data.frame()

ggeffect(model_2_rt, terms = c("RT [vals]", "valence")) %>%
  data.frame() %>%
  ggplot(aes(x, predicted)) +
  geom_line(data = predicted_sub, aes(color = group, group = interaction(facet, group)), size = .25, alpha = .5) +
  geom_line(aes(color = group), size = 1.5) +
  scale_color_manual(values = valence, name = "") + 
  scale_fill_manual(values = valence, name = "") + 
  labs(x = "\nlog-transformed grand-mean centered RT (s)", y = "probability of responding yes\n") + 
  plot_aes