eohi/.history/eohi2/mixed anova - domain means_20251003150004.r
2025-12-23 15:47:09 -05:00

611 lines
26 KiB
R
Raw Blame History

This file contains ambiguous Unicode characters

This file contains Unicode characters that might be confused with other characters. If you think that this is intentional, you can safely ignore this warning. Use the Escape button to reveal them.

# Mixed ANOVA Analysis for Domain Means - EOHI2
# EOHI Experiment Data Analysis - Domain Level Analysis with INTERVAL factor
# Variables: NPast_5_pref_MEAN, NPast_5_pers_MEAN, NPast_5_val_MEAN, etc.
# NFut_5_pref_MEAN, NFut_5_pers_MEAN, NFut_5_val_MEAN, etc.
# NPast_10_pref_MEAN, NPast_10_pers_MEAN, NPast_10_val_MEAN, etc.
# NFut_10_pref_MEAN, NFut_10_pers_MEAN, NFut_10_val_MEAN, etc.
# 5.10past_pref_MEAN, 5.10past_pers_MEAN, 5.10past_val_MEAN
# 5.10fut_pref_MEAN, 5.10fut_pers_MEAN, 5.10fut_val_MEAN
# Load required libraries
library(tidyverse)
library(ez)
library(car)
library(afex) # For aov_ez (cleaner ANOVA output)
library(nortest) # For normality tests
library(emmeans) # For post-hoc comparisons
library(purrr) # For map functions
library(effsize) # For Cohen's d calculations
library(effectsize) # For effect size calculations
# Global options to remove scientific notation
options(scipen = 999)
# Set contrasts to sum for mixed ANOVA (necessary for proper interpretation)
options(contrasts = c("contr.sum", "contr.poly"))
setwd("C:/Users/irina/Documents/DND/EOHI/eohi2")
# Read the data
data <- read.csv("eohi2.csv")
# Display basic information about the dataset
print(paste("Dataset dimensions:", paste(dim(data), collapse = " x")))
print(paste("Number of participants:", length(unique(data$ResponseId))))
# Verify the specific variables we need
required_vars <- c("NPast_5_pref_MEAN", "NPast_5_pers_MEAN", "NPast_5_val_MEAN",
"NPast_10_pref_MEAN", "NPast_10_pers_MEAN", "NPast_10_val_MEAN",
"NFut_5_pref_MEAN", "NFut_5_pers_MEAN", "NFut_5_val_MEAN",
"NFut_10_pref_MEAN", "NFut_10_pers_MEAN", "NFut_10_val_MEAN",
"5.10past_pref_MEAN", "5.10past_pers_MEAN", "5.10past_val_MEAN",
"5.10fut_pref_MEAN", "5.10fut_pers_MEAN", "5.10fut_val_MEAN")
missing_vars <- required_vars[!required_vars %in% colnames(data)]
if (length(missing_vars) > 0) {
print(paste("Warning: Missing variables:", paste(missing_vars, collapse = ", ")))
} else {
print("All required domain mean variables found!")
}
# Define domain mapping with TIME, DOMAIN, and INTERVAL factors
domain_mapping <- data.frame(
variable = required_vars,
time = c(rep("Past", 3), rep("Past", 3), rep("Future", 3), rep("Future", 3),
rep("Past", 3), rep("Future", 3)),
domain = rep(c("Preferences", "Personality", "Values"), 6),
interval = c(rep("5", 3), rep("10", 3), rep("5", 3), rep("10", 3),
rep("5_10", 3), rep("5_10", 3)),
stringsAsFactors = FALSE
)
print("Domain mapping created:")
print(domain_mapping)
# Efficient data pivoting using pivot_longer
long_data <- data %>%
select(ResponseId, TEMPORAL_DO, INTERVAL_DO, all_of(required_vars)) %>%
pivot_longer(
cols = all_of(required_vars),
names_to = "variable",
values_to = "MEAN_DIFFERENCE"
) %>%
left_join(domain_mapping, by = "variable") %>%
# Convert to factors with proper levels
mutate(
TIME = factor(time, levels = c("Past", "Future")),
DOMAIN = factor(domain, levels = c("Preferences", "Personality", "Values")),
INTERVAL = factor(interval, levels = c("5", "10", "5_10")),
ResponseId = as.factor(ResponseId),
TEMPORAL_DO = as.factor(TEMPORAL_DO),
INTERVAL_DO = as.factor(INTERVAL_DO)
) %>%
# Select final columns and remove any rows with missing values
select(ResponseId, TEMPORAL_DO, INTERVAL_DO, TIME, DOMAIN, INTERVAL, MEAN_DIFFERENCE) %>%
filter(!is.na(MEAN_DIFFERENCE))
print(paste("Long data dimensions:", paste(dim(long_data), collapse = " x")))
print(paste("Number of participants:", length(unique(long_data$ResponseId))))
# =============================================================================
# DESCRIPTIVE STATISTICS
# =============================================================================
# Overall descriptive statistics by TIME, DOMAIN, and INTERVAL
desc_stats <- long_data %>%
group_by(TIME, DOMAIN, INTERVAL) %>%
summarise(
n = n(),
mean = round(mean(MEAN_DIFFERENCE, na.rm = TRUE), 5),
variance = round(var(MEAN_DIFFERENCE, na.rm = TRUE), 5),
sd = round(sd(MEAN_DIFFERENCE, na.rm = TRUE), 5),
median = round(median(MEAN_DIFFERENCE, na.rm = TRUE), 5),
q1 = round(quantile(MEAN_DIFFERENCE, 0.25, na.rm = TRUE), 5),
q3 = round(quantile(MEAN_DIFFERENCE, 0.75, na.rm = TRUE), 5),
min = round(min(MEAN_DIFFERENCE, na.rm = TRUE), 5),
max = round(max(MEAN_DIFFERENCE, na.rm = TRUE), 5),
.groups = 'drop'
)
print("Descriptive statistics by TIME, DOMAIN, and INTERVAL:")
print(desc_stats)
# Descriptive statistics by between-subjects factors
desc_stats_by_between <- long_data %>%
group_by(TEMPORAL_DO, INTERVAL_DO, TIME, DOMAIN, INTERVAL) %>%
summarise(
n = n(),
mean = round(mean(MEAN_DIFFERENCE, na.rm = TRUE), 5),
variance = round(var(MEAN_DIFFERENCE, na.rm = TRUE), 5),
sd = round(sd(MEAN_DIFFERENCE, na.rm = TRUE), 5),
.groups = 'drop'
)
print("Descriptive statistics by between-subjects factors:")
print(desc_stats_by_between)
# Summary by between-subjects factors only
desc_stats_between_only <- long_data %>%
group_by(TEMPORAL_DO, INTERVAL_DO) %>%
summarise(
n = n(),
mean = round(mean(MEAN_DIFFERENCE, na.rm = TRUE), 5),
variance = round(var(MEAN_DIFFERENCE, na.rm = TRUE), 5),
sd = round(sd(MEAN_DIFFERENCE, na.rm = TRUE), 5),
.groups = 'drop'
)
print("Descriptive statistics by between-subjects factors only:")
print(desc_stats_between_only)
# =============================================================================
# ASSUMPTION TESTING
# =============================================================================
# Remove missing values for assumption testing
long_data_clean <- long_data[!is.na(long_data$MEAN_DIFFERENCE), ]
print(paste("Data after removing missing values:", paste(dim(long_data_clean), collapse = " x")))
# 1. Missing values check
missing_summary <- long_data %>%
group_by(TIME, DOMAIN, INTERVAL) %>%
summarise(
n_total = n(),
n_missing = sum(is.na(MEAN_DIFFERENCE)),
pct_missing = round(100 * n_missing / n_total, 2),
.groups = 'drop'
)
print("Missing values by TIME, DOMAIN, and INTERVAL:")
print(missing_summary)
# 2. Outlier detection
outlier_summary <- long_data_clean %>%
group_by(TIME, DOMAIN, INTERVAL) %>%
summarise(
n = n(),
mean = mean(MEAN_DIFFERENCE),
sd = sd(MEAN_DIFFERENCE),
q1 = quantile(MEAN_DIFFERENCE, 0.25),
q3 = quantile(MEAN_DIFFERENCE, 0.75),
iqr = q3 - q1,
lower_bound = q1 - 1.5 * iqr,
upper_bound = q3 + 1.5 * iqr,
n_outliers = sum(MEAN_DIFFERENCE < lower_bound | MEAN_DIFFERENCE > upper_bound),
.groups = 'drop'
)
print("Outlier summary (IQR method):")
print(outlier_summary)
# 3. Anderson-Darling normality test
normality_results <- long_data_clean %>%
group_by(TIME, DOMAIN, INTERVAL) %>%
summarise(
n = n(),
ad_statistic = ad.test(.data$MEAN_DIFFERENCE)$statistic,
ad_p_value = ad.test(.data$MEAN_DIFFERENCE)$p.value,
.groups = 'drop'
)
print("Anderson-Darling normality test results:")
# Round only the numeric columns
normality_results_rounded <- normality_results %>%
mutate(across(where(is.numeric), ~ round(.x, 5)))
print(normality_results_rounded)
# 4. Homogeneity of variance (Levene's test)
# Test homogeneity across TIME within each DOMAIN × INTERVAL combination
homogeneity_time <- long_data_clean %>%
group_by(DOMAIN, INTERVAL) %>%
summarise(
levene_F = leveneTest(MEAN_DIFFERENCE ~ TIME)$`F value`[1],
levene_p = leveneTest(MEAN_DIFFERENCE ~ TIME)$`Pr(>F)`[1],
.groups = 'drop'
)
print("Homogeneity of variance across TIME within each DOMAIN × INTERVAL combination:")
print(homogeneity_time)
# Test homogeneity across DOMAIN within each TIME × INTERVAL combination
homogeneity_domain <- long_data_clean %>%
group_by(TIME, INTERVAL) %>%
summarise(
levene_F = leveneTest(MEAN_DIFFERENCE ~ DOMAIN)$`F value`[1],
levene_p = leveneTest(MEAN_DIFFERENCE ~ DOMAIN)$`Pr(>F)`[1],
.groups = 'drop'
)
print("Homogeneity of variance across DOMAIN within each TIME × INTERVAL combination:")
print(homogeneity_domain)
# Test homogeneity across INTERVAL within each TIME × DOMAIN combination
homogeneity_interval <- long_data_clean %>%
group_by(TIME, DOMAIN) %>%
summarise(
levene_F = leveneTest(MEAN_DIFFERENCE ~ INTERVAL)$`F value`[1],
levene_p = leveneTest(MEAN_DIFFERENCE ~ INTERVAL)$`Pr(>F)`[1],
.groups = 'drop'
)
print("Homogeneity of variance across INTERVAL within each TIME × DOMAIN combination:")
print(homogeneity_interval)
# 5. Hartley's F-max test for between-subjects factors
print("\n=== HARTLEY'S F-MAX TEST FOR BETWEEN-SUBJECTS FACTORS ===")
# Check what values the between-subjects factors actually have
print("Unique TEMPORAL_DO values:")
print(unique(long_data_clean$TEMPORAL_DO))
print("Unique INTERVAL_DO values:")
print(unique(long_data_clean$INTERVAL_DO))
# Function to calculate Hartley's F-max ratio
calculate_hartley_ratio <- function(variances) {
max(variances, na.rm = TRUE) / min(variances, na.rm = TRUE)
}
# Hartley's F-max test across TEMPORAL_DO within each TIME × DOMAIN × INTERVAL combination
print("\n=== HARTLEY'S F-MAX TEST: TEMPORAL_DO within each TIME × DOMAIN × INTERVAL combination ===")
observed_temporal_ratios <- long_data_clean %>%
group_by(TIME, DOMAIN, INTERVAL) %>%
summarise(
# Calculate variances for each TEMPORAL_DO level within this combination
past_var = var(MEAN_DIFFERENCE[TEMPORAL_DO == "01PAST"], na.rm = TRUE),
fut_var = var(MEAN_DIFFERENCE[TEMPORAL_DO == "02FUT"], na.rm = TRUE),
# Calculate F-max ratio
f_max_ratio = max(past_var, fut_var) / min(past_var, fut_var),
.groups = 'drop'
) %>%
select(TIME, DOMAIN, INTERVAL, past_var, fut_var, f_max_ratio)
print(observed_temporal_ratios)
# Hartley's F-max test across INTERVAL_DO within each TIME × DOMAIN × TEMPORAL_DO combination
print("\n=== HARTLEY'S F-MAX TEST: INTERVAL_DO within each TIME × DOMAIN × TEMPORAL_DO combination ===")
observed_interval_ratios <- long_data_clean %>%
group_by(TIME, DOMAIN, TEMPORAL_DO) %>%
summarise(
# Calculate variances for each INTERVAL_DO level within this combination
int5_var = var(MEAN_DIFFERENCE[INTERVAL_DO == "5"], na.rm = TRUE),
int10_var = var(MEAN_DIFFERENCE[INTERVAL_DO == "10"], na.rm = TRUE),
# Calculate F-max ratio
f_max_ratio = max(int5_var, int10_var) / min(int5_var, int10_var),
.groups = 'drop'
) %>%
select(TIME, DOMAIN, TEMPORAL_DO, int5_var, int10_var, f_max_ratio)
print(observed_interval_ratios)
# =============================================================================
# MIXED ANOVA ANALYSIS
# =============================================================================
# Check data dimensions and structure
print(paste("Data size for ANOVA:", nrow(long_data_clean), "rows"))
print(paste("Number of participants:", length(unique(long_data_clean$ResponseId))))
print(paste("Design factors: TIME (", length(levels(long_data_clean$TIME)), "), DOMAIN (",
length(levels(long_data_clean$DOMAIN)), "), INTERVAL (",
length(levels(long_data_clean$INTERVAL)), "), TEMPORAL_DO (",
length(levels(long_data_clean$TEMPORAL_DO)), "), INTERVAL_DO (",
length(levels(long_data_clean$INTERVAL_DO)), ")", sep = ""))
# Check for complete cases
complete_cases <- sum(complete.cases(long_data_clean))
print(paste("Complete cases:", complete_cases, "out of", nrow(long_data_clean)))
# Check if design is balanced
design_balance <- table(long_data_clean$ResponseId, long_data_clean$TIME, long_data_clean$DOMAIN, long_data_clean$INTERVAL)
if(all(design_balance %in% c(0, 1))) {
print("Design is balanced: each participant has data for all TIME × DOMAIN × INTERVAL combinations")
} else {
print("Warning: Design is unbalanced")
print(summary(as.vector(design_balance)))
}
# =============================================================================
# MIXED ANOVA WITH SPHERICITY CORRECTIONS
# =============================================================================
print("\n=== MIXED ANOVA RESULTS (with sphericity corrections) ===")
# Mixed ANOVA using ezANOVA with automatic sphericity corrections
# Between-subjects: TEMPORAL_DO (2 levels: 01PAST, 02FUT) × INTERVAL_DO (2 levels: 5, 10)
# Within-subjects: TIME (2 levels: Past, Future) × DOMAIN (3 levels: Preferences, Personality, Values) × INTERVAL (3 levels: 5, 10, 5_10)
mixed_anova_model <- ezANOVA(data = long_data_clean,
dv = MEAN_DIFFERENCE,
wid = ResponseId,
between = .(TEMPORAL_DO, INTERVAL_DO),
within = .(TIME, DOMAIN, INTERVAL),
type = 3,
detailed = TRUE)
print("ANOVA Results:")
anova_output <- mixed_anova_model$ANOVA
rownames(anova_output) <- NULL # Reset row numbers to be sequential
print(anova_output)
# Show Mauchly's test for sphericity
print("\nMauchly's Test of Sphericity:")
print(mixed_anova_model$Mauchly)
# Show sphericity-corrected results (Greenhouse-Geisser and Huynh-Feldt)
if(!is.null(mixed_anova_model$`Sphericity Corrections`)) {
print("\nGreenhouse-Geisser and Huynh-Feldt Corrections:")
print(mixed_anova_model$`Sphericity Corrections`)
# Extract and display corrected degrees of freedom
cat("\n=== CORRECTED DEGREES OF FREEDOM ===\n")
sphericity_corr <- mixed_anova_model$`Sphericity Corrections`
anova_table <- mixed_anova_model$ANOVA
corrected_df <- data.frame(
Effect = sphericity_corr$Effect,
Original_DFn = anova_table$DFn[match(sphericity_corr$Effect, anova_table$Effect)],
Original_DFd = anova_table$DFd[match(sphericity_corr$Effect, anova_table$Effect)],
GG_DFn = anova_table$DFn[match(sphericity_corr$Effect, anova_table$Effect)] * sphericity_corr$GGe,
GG_DFd = anova_table$DFd[match(sphericity_corr$Effect, anova_table$Effect)] * sphericity_corr$GGe,
HF_DFn = anova_table$DFn[match(sphericity_corr$Effect, anova_table$Effect)] * sphericity_corr$HFe,
HF_DFd = anova_table$DFd[match(sphericity_corr$Effect, anova_table$Effect)] * sphericity_corr$HFe,
GG_epsilon = sphericity_corr$GGe,
HF_epsilon = sphericity_corr$HFe
)
print(corrected_df)
cat("\n=== CORRECTED F-TESTS ===\n")
# Between-subjects effects (no sphericity corrections needed)
cat("\nBETWEEN-SUBJECTS EFFECTS:\n")
between_effects <- c("TEMPORAL_DO", "INTERVAL_DO", "TEMPORAL_DO:INTERVAL_DO")
for(effect in between_effects) {
if(effect %in% anova_table$Effect) {
f_value <- anova_table$F[anova_table$Effect == effect]
dfn <- anova_table$DFn[anova_table$Effect == effect]
dfd <- anova_table$DFd[anova_table$Effect == effect]
p_value <- anova_table$p[anova_table$Effect == effect]
cat(sprintf("%s: F(%d, %d) = %.3f, p = %.6f\n", effect, dfn, dfd, f_value, p_value))
}
}
# Within-subjects effects (sphericity corrections where applicable)
cat("\nWITHIN-SUBJECTS EFFECTS:\n")
# TIME main effect (2 levels, sphericity automatically satisfied)
if("TIME" %in% anova_table$Effect) {
f_value <- anova_table$F[anova_table$Effect == "TIME"]
dfn <- anova_table$DFn[anova_table$Effect == "TIME"]
dfd <- anova_table$DFd[anova_table$Effect == "TIME"]
p_value <- anova_table$p[anova_table$Effect == "TIME"]
cat(sprintf("TIME: F(%d, %d) = %.3f, p = %.6f (2 levels, sphericity satisfied)\n", dfn, dfd, f_value, p_value))
}
# DOMAIN main effect (3 levels, needs sphericity correction)
if("DOMAIN" %in% anova_table$Effect) {
f_value <- anova_table$F[anova_table$Effect == "DOMAIN"]
dfn <- anova_table$DFn[anova_table$Effect == "DOMAIN"]
dfd <- anova_table$DFd[anova_table$Effect == "DOMAIN"]
p_value <- anova_table$p[anova_table$Effect == "DOMAIN"]
cat(sprintf("DOMAIN: F(%d, %d) = %.3f, p = %.6f\n", dfn, dfd, f_value, p_value))
}
# INTERVAL main effect (3 levels, needs sphericity correction)
if("INTERVAL" %in% anova_table$Effect) {
f_value <- anova_table$F[anova_table$Effect == "INTERVAL"]
dfn <- anova_table$DFn[anova_table$Effect == "INTERVAL"]
dfd <- anova_table$DFd[anova_table$Effect == "INTERVAL"]
p_value <- anova_table$p[anova_table$Effect == "INTERVAL"]
cat(sprintf("INTERVAL: F(%d, %d) = %.3f, p = %.6f\n", dfn, dfd, f_value, p_value))
}
# Interactions with sphericity corrections
cat("\nINTERACTIONS WITH SPHERICITY CORRECTIONS:\n")
for(i in seq_len(nrow(corrected_df))) {
effect <- corrected_df$Effect[i]
f_value <- anova_table$F[match(effect, anova_table$Effect)]
cat(sprintf("\n%s:\n", effect))
cat(sprintf(" Original: F(%d, %d) = %.3f\n",
corrected_df$Original_DFn[i], corrected_df$Original_DFd[i], f_value))
cat(sprintf(" GG-corrected: F(%.2f, %.2f) = %.3f, p = %.6f\n",
corrected_df$GG_DFn[i], corrected_df$GG_DFd[i], f_value, sphericity_corr$`p[GG]`[i]))
cat(sprintf(" HF-corrected: F(%.2f, %.2f) = %.3f, p = %.6f\n",
corrected_df$HF_DFn[i], corrected_df$HF_DFd[i], f_value, sphericity_corr$`p[HF]`[i]))
}
} else {
print("\nNote: Sphericity corrections not needed (sphericity assumption met)")
}
# =============================================================================
# EFFECT SIZES (GENERALIZED ETA SQUARED)
# =============================================================================
print("\n=== EFFECT SIZES (GENERALIZED ETA SQUARED) ===")
# Extract generalized eta squared from ezANOVA (already calculated)
effect_sizes <- mixed_anova_model$ANOVA[, c("Effect", "ges")]
effect_sizes$ges <- round(effect_sizes$ges, 5)
print("Generalized Eta Squared:")
print(effect_sizes)
# =============================================================================
# POST-HOC COMPARISONS
# =============================================================================
# Post-hoc comparisons using emmeans
print("\n=== POST-HOC COMPARISONS ===")
# Create aov model for emmeans (emmeans requires aov object, not ezANOVA output)
aov_model <- aov(MEAN_DIFFERENCE ~ TEMPORAL_DO * INTERVAL_DO * TIME * DOMAIN * INTERVAL + Error(ResponseId/(TIME * DOMAIN * INTERVAL)),
data = long_data_clean)
# Main effect of TIME
print("Main Effect of TIME:")
time_emmeans <- emmeans(aov_model, ~ TIME)
print("Estimated Marginal Means:")
print(time_emmeans)
print("\nPairwise Contrasts:")
time_contrasts <- pairs(time_emmeans, adjust = "bonferroni")
print(time_contrasts)
# Main effect of DOMAIN
print("\nMain Effect of DOMAIN:")
domain_emmeans <- emmeans(aov_model, ~ DOMAIN)
print("Estimated Marginal Means:")
print(domain_emmeans)
print("\nPairwise Contrasts:")
domain_contrasts <- pairs(domain_emmeans, adjust = "bonferroni")
print(domain_contrasts)
# Main effect of INTERVAL
print("\nMain Effect of INTERVAL:")
interval_emmeans <- emmeans(aov_model, ~ INTERVAL)
print("Estimated Marginal Means:")
print(interval_emmeans)
print("\nPairwise Contrasts:")
interval_contrasts <- pairs(interval_emmeans, adjust = "bonferroni")
print(interval_contrasts)
# Main effect of TEMPORAL_DO
print("\nMain Effect of TEMPORAL_DO:")
temporal_emmeans <- emmeans(aov_model, ~ TEMPORAL_DO)
temporal_contrasts <- pairs(temporal_emmeans, adjust = "bonferroni")
print(temporal_contrasts)
# Main effect of INTERVAL_DO
print("\nMain Effect of INTERVAL_DO:")
interval_do_emmeans <- emmeans(aov_model, ~ INTERVAL_DO)
interval_do_contrasts <- pairs(interval_do_emmeans, adjust = "bonferroni")
print(interval_do_contrasts)
# =============================================================================
# COHEN'S D FOR MAIN EFFECTS
# =============================================================================
print("\n=== COHEN'S D FOR MAIN EFFECTS ===")
# Main Effect of TIME (if significant)
print("\n=== COHEN'S D FOR TIME MAIN EFFECT ===")
time_main_contrast <- pairs(time_emmeans, adjust = "none")
time_main_df <- as.data.frame(time_main_contrast)
print("TIME main effect contrast:")
print(time_main_df)
# Calculate Cohen's d for TIME main effect
if(nrow(time_main_df) > 0) {
cat("\nCohen's d for TIME main effect:\n")
time_past_data <- long_data_clean$MEAN_DIFFERENCE[long_data_clean$TIME == "Past"]
time_future_data <- long_data_clean$MEAN_DIFFERENCE[long_data_clean$TIME == "Future"]
time_cohens_d <- cohen.d(time_past_data, time_future_data)
cat(sprintf("Past vs Future: n1 = %d, n2 = %d\n", length(time_past_data), length(time_future_data)))
cat(sprintf("Cohen's d: %.5f\n", time_cohens_d$estimate))
cat(sprintf("Effect size interpretation: %s\n", time_cohens_d$magnitude))
cat(sprintf("p-value: %.5f\n", time_main_df$p.value[1]))
}
# Main Effect of DOMAIN (if significant)
print("\n=== COHEN'S D FOR DOMAIN MAIN EFFECT ===")
domain_main_contrast <- pairs(domain_emmeans, adjust = "bonferroni")
domain_main_df <- as.data.frame(domain_main_contrast)
print("DOMAIN main effect contrasts:")
print(domain_main_df)
# Calculate Cohen's d for significant DOMAIN contrasts
significant_domain <- domain_main_df[domain_main_df$p.value < 0.05, ]
if(nrow(significant_domain) > 0) {
cat("\nCohen's d for significant DOMAIN contrasts:\n")
for(i in seq_len(nrow(significant_domain))) {
contrast_name <- as.character(significant_domain$contrast[i])
contrast_parts <- strsplit(contrast_name, " - ")[[1]]
if(length(contrast_parts) == 2) {
level1 <- trimws(contrast_parts[1])
level2 <- trimws(contrast_parts[2])
data1 <- long_data_clean$MEAN_DIFFERENCE[long_data_clean$DOMAIN == level1]
data2 <- long_data_clean$MEAN_DIFFERENCE[long_data_clean$DOMAIN == level2]
if(length(data1) > 0 && length(data2) > 0) {
domain_cohens_d <- cohen.d(data1, data2)
cat(sprintf("Comparison: %s\n", contrast_name))
cat(sprintf(" n1 = %d, n2 = %d\n", length(data1), length(data2)))
cat(sprintf(" Cohen's d: %.5f\n", domain_cohens_d$estimate))
cat(sprintf(" Effect size interpretation: %s\n", domain_cohens_d$magnitude))
cat(sprintf(" p-value: %.5f\n", significant_domain$p.value[i]))
cat("\n")
}
}
}
}
# Main Effect of INTERVAL (if significant)
print("\n=== COHEN'S D FOR INTERVAL MAIN EFFECT ===")
interval_main_contrast <- pairs(interval_emmeans, adjust = "bonferroni")
interval_main_df <- as.data.frame(interval_main_contrast)
print("INTERVAL main effect contrasts:")
print(interval_main_df)
# Calculate Cohen's d for significant INTERVAL contrasts
significant_interval <- interval_main_df[interval_main_df$p.value < 0.05, ]
if(nrow(significant_interval) > 0) {
cat("\nCohen's d for significant INTERVAL contrasts:\n")
for(i in seq_len(nrow(significant_interval))) {
contrast_name <- as.character(significant_interval$contrast[i])
contrast_parts <- strsplit(contrast_name, " - ")[[1]]
if(length(contrast_parts) == 2) {
level1 <- trimws(contrast_parts[1])
level2 <- trimws(contrast_parts[2])
data1 <- long_data_clean$MEAN_DIFFERENCE[long_data_clean$INTERVAL == level1]
data2 <- long_data_clean$MEAN_DIFFERENCE[long_data_clean$INTERVAL == level2]
if(length(data1) > 0 && length(data2) > 0) {
interval_cohens_d <- cohen.d(data1, data2)
cat(sprintf("Comparison: %s\n", contrast_name))
cat(sprintf(" n1 = %d, n2 = %d\n", length(data1), length(data2)))
cat(sprintf(" Cohen's d: %.5f\n", interval_cohens_d$estimate))
cat(sprintf(" Effect size interpretation: %s\n", interval_cohens_d$magnitude))
cat(sprintf(" p-value: %.5f\n", significant_interval$p.value[i]))
cat("\n")
}
}
}
}
# =============================================================================
# INTERACTION EXPLORATIONS (if significant)
# =============================================================================
# Note: Detailed interaction analyses would be added here if significant interactions are found
# For now, we'll provide a framework for the most common interactions
print("\n=== INTERACTION EXPLORATIONS ===")
print("Note: Detailed interaction analyses will be performed for significant interactions")
print("Check the ANOVA results above to identify which interactions are significant")
# Example framework for TIME × DOMAIN interaction (if significant)
# if("TIME:DOMAIN" %in% anova_output$Effect && anova_output$p[anova_output$Effect == "TIME:DOMAIN"] < 0.05) {
# print("\n=== TIME × DOMAIN INTERACTION (SIGNIFICANT) ===")
# time_domain_emmeans <- emmeans(aov_model, ~ TIME * DOMAIN)
# print("Estimated Marginal Means:")
# print(time_domain_emmeans)
#
# print("\nSimple Effects of DOMAIN within each TIME:")
# time_domain_simple <- pairs(time_domain_emmeans, by = "TIME", adjust = "bonferroni")
# print(time_domain_simple)
#
# print("\nSimple Effects of TIME within each DOMAIN:")
# time_domain_simple2 <- pairs(time_domain_emmeans, by = "DOMAIN", adjust = "bonferroni")
# print(time_domain_simple2)
# }
print("\n=== ANALYSIS COMPLETE ===")
print("Mixed ANOVA analysis with three within-subjects factors (TIME, DOMAIN, INTERVAL)")
print("and two between-subjects factors (TEMPORAL_DO, INTERVAL_DO) completed.")
print("Check the results above for significant effects and perform additional")
print("interaction analyses as needed based on the significance patterns.")