Overview

This script works through calculation of fear sensitivity scores using a Hyperbolic Tangent Growth Model. The objective is to obtain a measure of between-person differences in children’s fear sensitivity using data about children’s expression of fearful behavior during a laboratory visit wherein they completed six novelty tasks (e.g., novel stranger type tasks). We use the method described in
Buss, K., Davis, E., Ram, N., & Coccia, M. (in press). Dysregulated fear, social inhibition and respiratory sinus arrythmia: A replication and extension. Child Development.

The original implementation was done in SAS. Here, we work out the details of the R-based implementation, and extend the calculations to a new sample.

Outline

This script covers …

A. Describing the repeated measures data
B. Fitting the hyperbolic tangent growth model
C. Calculating fear sensitivity scores

Preliminaries

Loading libraries used in this script.

library(reshape2)
library(car)
library(psych)
library(ggplot2)
library(nlstools)
library(nlme)
library(lme4)

Reading in the data

#Loading the data
#read in the .csv file 
orig <- read.csv(file="lpadata_052917.csv",header=TRUE)

#replacing missing values (coded as -99) with NA
orig[orig == -99] <- NA

#fixing names
names(orig)[1] <- c("id")

#Looking at the top few rows of the wide data.
head(orig,6)
##   id     clown       ps       sa       sw    robot   spider
## 1  1  2.255639 20.39216  0.00000 26.66667 23.18681 14.47368
## 2  4 17.209302 37.31884  0.00000 31.33858 34.59459 48.72340
## 3  5 94.434783 74.78261 57.24138 39.04000 40.34783 21.90476
## 4  6  4.758065 43.75940       NA 43.69748 65.56701 12.26415
## 5  7  4.814815 69.84000 63.02326       NA 55.23810 31.57895
## 6  8 19.726962 20.30000  0.00000 29.28571 64.84848 27.40741

Reshaping the data

Reshape from wide to long for plotting etc.

#melting to long 
data_long <- melt(orig,
                 id.vars="id", 
                 variable.name="task",
                 na.rm=FALSE,
                 value.name = "fear")

#sorting for easy viewing
# order by id and task
data_long <- data_long[order(data_long$id,data_long$task), ]

Creating threat level variables (numeric variables)

#threat level ordering based on Kristin's theoretical ordering
data_long$threatlevel_theory <- recode(data_long$task, "'clown'=1; 'ps'=2; 'sw'=3; 'sa'=4; 'robot'=5; 'spider'=6", as.factor.result=FALSE)

#setting level of factor variable 
levels(data_long$task) <- c("clown","ps","sw","sa","robot","spider")

#threat level ordering based on empirical ordering (from original analysis)
data_long$threatlevel_empirical <- recode(data_long$task, "'clown'=3; 'ps'=4; 'sw'=1; 'sa'=2;  'robot'=5; 'spider'=6", as.factor.result=FALSE)

#creating new empirical order task variable  
data_long$taskempirical <- factor(data_long$task, 
                                  levels = c("sw","sa","clown","ps","robot","spider"))

#looking at the top few rows of the long data
head(data_long,18)
##      id   task      fear threatlevel_theory threatlevel_empirical
## 1     1  clown  2.255639                  1                     3
## 236   1     ps 20.392157                  2                     4
## 471   1     sw  0.000000                  4                     1
## 706   1     sa 26.666667                  3                     2
## 941   1  robot 23.186813                  5                     5
## 1176  1 spider 14.473684                  6                     6
## 2     4  clown 17.209302                  1                     3
## 237   4     ps 37.318841                  2                     4
## 472   4     sw  0.000000                  4                     1
## 707   4     sa 31.338583                  3                     2
## 942   4  robot 34.594595                  5                     5
## 1177  4 spider 48.723404                  6                     6
## 3     5  clown 94.434783                  1                     3
## 238   5     ps 74.782609                  2                     4
## 473   5     sw 57.241379                  4                     1
## 708   5     sa 39.040000                  3                     2
## 943   5  robot 40.347826                  5                     5
## 1178  5 spider 21.904762                  6                     6
##      taskempirical
## 1            clown
## 236             ps
## 471             sw
## 706             sa
## 941          robot
## 1176        spider
## 2            clown
## 237             ps
## 472             sw
## 707             sa
## 942          robot
## 1177        spider
## 3            clown
## 238             ps
## 473             sw
## 708             sa
## 943          robot
## 1178        spider
#reshaping from long to wide
data_wide <- dcast(data_long, id ~ task, value.var="fear")

A. Describing the data

Descriptives

Basic descriptives of the 6-task data.

#chekcing data frame formats
str(data_long)
## 'data.frame':    1410 obs. of  6 variables:
##  $ id                   : int  1 1 1 1 1 1 4 4 4 4 ...
##  $ task                 : Factor w/ 6 levels "clown","ps","sw",..: 1 2 3 4 5 6 1 2 3 4 ...
##  $ fear                 : num  2.26 20.39 0 26.67 23.19 ...
##  $ threatlevel_theory   : num  1 2 4 3 5 6 1 2 4 3 ...
##  $ threatlevel_empirical: num  3 4 1 2 5 6 3 4 1 2 ...
##  $ taskempirical        : Factor w/ 6 levels "sw","sa","clown",..: 3 4 1 2 5 6 3 4 1 2 ...
str(data_wide)
## 'data.frame':    235 obs. of  7 variables:
##  $ id    : int  1 4 5 6 7 8 9 10 11 12 ...
##  $ clown : num  2.26 17.21 94.43 4.76 4.81 ...
##  $ ps    : num  20.4 37.3 74.8 43.8 69.8 ...
##  $ sw    : num  0 0 57.2 NA 63 ...
##  $ sa    : num  26.7 31.3 39 43.7 NA ...
##  $ robot : num  23.2 34.6 40.3 65.6 55.2 ...
##  $ spider: num  14.5 48.7 21.9 12.3 31.6 ...
#means, sd, etc.
describe(data_wide)
##        vars   n    mean      sd  median trimmed     mad  min     max
## id        1 235 3330.85 3101.59 6035.00 3339.56 1031.89 1.00 6820.00
## clown     2 235   25.90   23.29   22.12   23.07   27.50 0.22   98.86
## ps        3 234   37.32   21.71   37.36   36.45   23.89 0.09   99.79
## sw        4 229   27.67   21.06   26.12   26.00   23.72 0.00   97.47
## sa        5 230   25.21   17.53   26.43   24.06   16.43 0.00   80.68
## robot     6 233   51.27   25.92   51.80   51.96   29.43 0.00   99.68
## spider    7 233   51.05   22.39   50.75   51.52   26.03 0.00   99.69
##          range  skew kurtosis     se
## id     6819.00 -0.10    -1.99 202.33
## clown    98.64  0.84    -0.11   1.52
## ps       99.70  0.40    -0.35   1.42
## sw       97.47  0.59    -0.23   1.39
## sa       80.68  0.53     0.33   1.16
## robot    99.68 -0.18    -0.85   1.70
## spider   99.69 -0.14    -0.74   1.47
#boxplot by task
ggplot(data=data_long[which(data_long$id > 0), ], aes(x=task, y=fear)) + 
  geom_boxplot(notch = TRUE) +
  stat_summary(fun.y="mean", geom="point", shape=23, size=3, fill="white") +
  labs(x = "Task", y = "Fear")
## Warning: Removed 16 rows containing non-finite values (stat_boxplot).
## Warning: Removed 16 rows containing non-finite values (stat_summary).

#boxplot by task empirical
ggplot(data=data_long[which(data_long$id > 0), ], aes(x=taskempirical, y=fear)) + 
  geom_boxplot(notch = TRUE) +
  stat_summary(fun.y="mean", geom="point", shape=23, size=3, fill="white") +
  labs(x = "Task", y = "Fear")
## Warning: Removed 16 rows containing non-finite values (stat_boxplot).

## Warning: Removed 16 rows containing non-finite values (stat_summary).

#correlations
# Correlations 
round(cor(data_wide[,c("clown","ps","sa","sw","robot","spider")], 
          use="complete.obs",method="spearman"),2)
##        clown   ps   sa   sw robot spider
## clown   1.00 0.29 0.34 0.30  0.28   0.37
## ps      0.29 1.00 0.25 0.15  0.12   0.07
## sa      0.34 0.25 1.00 0.19  0.16   0.08
## sw      0.30 0.15 0.19 1.00  0.17   0.11
## robot   0.28 0.12 0.16 0.17  1.00   0.52
## spider  0.37 0.07 0.08 0.11  0.52   1.00
#pairs plot from the psych library
pairs.panels(data_wide[,c("clown","ps","sa","sw","robot","spider"),])

Plot of sample-level change in distribution across time

#Density distribution by task
ggplot(data=data_long, aes(x=fear)) + 
  geom_density(aes(group=task, colour=task, fill=task), alpha=0.3)
## Warning: Removed 16 rows containing non-finite values (stat_density).

Plot of individual-level trajectories

#intraindividual change trajectories
ggplot(data = data_long, aes(x = task, y = fear, group = id)) +
  geom_point(color="black") + 
  geom_line(color="black") +
  xlab("Task") + 
  ylab("Fear") + ylim(0,100) 
## Warning: Removed 16 rows containing missing values (geom_point).
## Warning: Removed 2 rows containing missing values (geom_path).

#intraindividual change trajectories
ggplot(data = data_long, aes(x = threatlevel_empirical, y = fear, group = id)) +
  geom_point(color="black") + 
  geom_line(color="black") +
  xlab("Threatlevel (Empirical Ordering)") + 
  ylab("Fear") + ylim(0,100) 
## Warning: Removed 16 rows containing missing values (geom_point).
## Warning: Removed 8 rows containing missing values (geom_path).

Yay!

B. Fitting the growth models

As explained in Buss et al (in press), fear was measured as the percentage of episode spent engaging in fearful behavior scores from each of the six novel, fear contexts …

  1. clown
  2. puppet show
  3. stranger working
  4. stranger approach
  5. robot
  6. spider

Individual propensity to exhibit fear across the six threat episodes was derived using a measurement model based on a nonlinear growth model (Ram & Grimm, 2015). Specifically, each child’s six fearful behavior scores, \(fear_{it}\), were modeled as a nonlinear function of the putative level of situational threat, \(threatlevel_{t}\), given by the ordinal ranking (1–6) shown above (note this ordering was incorrectly stated in paper). Specifically, patterning of fear behavior across tasks was modeled using a hyperbolic tangent function, tanh (see, e.g., Liebovitch, Vallacher, & Michaels, 2010). Specifically,

\[fear_{it} = \beta_{0} + \beta_{1} [\tanh (threatlevel_{t} - \lambda_{i})]+ e_{it}\]

where \(fear_{it}\) across tasks is modeled as a sigmoidal trajectory defined (by fixing \(\beta_{0} = \beta_{1} = 50\)) to travel from observation of 0% fear behavior at very low level of threat to 100% fear behavior at very high level of threat with inflection point given by \(\lambda_{i}\). The person-specific inflection points, \(\lambda_{i}\), thus locate the specific level of threat at which each child would, based on their behavior in all six tasks, display fear for 50% of task time. Here, the \(\lambda_{i}\) parameter estimates (subsequently referred to as fear sensitivity) provide a measure of a child’s propensity to display fear relative to the threat/intensity of the tasks (normalized to the sample and specific tasks to which the child was exposed in this study). In the original study, models were estimated separately for each child in SAS 9.2 (proc nlin), with the child-specific fear sensitivity scores collected into a new variable for use in subsequent analyses. Here we do the analysis in R using nls.

Hyperbolic Tangent Model

First lets generate a bit of data to see what trajectories from this model look like. Set up a function to hold the mathemtical growth function …
We make the function object and insert a-priori values for \(b_{0}\) and \(b_{1}\).

growth.fun <- function(x, u0 = 0, u1 = 0, error = 0) {
  b0 = 50
  b1 = 50
  lambda = 4.2
  y = b0 + b1*(tanh(x-lambda)) + error
  return(y)
}

Generate a single prototype curve Set number of persons and occasions.

num.persons <- 1
num.occasions <- 6

Generate empty data:

proto.data <- data.frame(
                  id = sort(rep(c(1:num.persons),num.occasions)), 
                  time = rep(c(1:(num.occasions)),num.persons), 
                  outcome = NA)
head(proto.data)
##   id time outcome
## 1  1    1      NA
## 2  1    2      NA
## 3  1    3      NA
## 4  1    4      NA
## 5  1    5      NA
## 6  1    6      NA

Fill in ‘outcome’ with prototype curve from function (plugging in x = time variable)

proto.data$outcome <- growth.fun(x=proto.data$time,
                                 u0 = 0,
                                 u1 = 0,
                                 error = 0)
proto.data
##   id time    outcome
## 1  1    1  0.1658801
## 2  1    2  1.2128435
## 3  1    3  8.3172696
## 4  1    4 40.1312340
## 5  1    5 83.2018385
## 6  1    6 97.3403006

Plot the prototype curve

ggplot(data = proto.data, aes(x = time, y = outcome, group = id)) +
  ggtitle("Simulated Tanh Trajectory") +
  #geom_point() + 
  geom_line(color="red", size = 2) +
  geom_hline(aes(yintercept = 50), size = .5, color="black") +
  geom_vline(aes(xintercept = 4.2), size = .5, color="black") +
  xlab("Time") + 
  ylab("Outcome") + ylim(0,100) +
  scale_x_continuous(breaks=seq(1,6,by=1))

Simulation looks good.
This individual’s fear sensitivity score would be 4.2 (the time at which the trajectory reaches the inflection point, at outcome = 50).

Empirical Data

Following the procedures described in Buss et al., (in press), we fit the model to each individual’s data seprately (person-specific) to obtain a fear sensivity score = \(\lambda_{i}\).

We create a loop, fit the model using the theoretical ordering of tasks, fit the model using the empirical ordering of tasks, and collect the parameter estimates into a data-set.

#creating id list
idlist <- unique(data_long$id)

#Creating a data frame to hold the person-specific results  
modelout <- data.frame(id=numeric(),
                    lambda_theory=numeric(),
                    sigma_theory=numeric(),
                    pred_theory_1=numeric(),
                    pred_theory_2=numeric(),
                    pred_theory_3=numeric(),
                    pred_theory_4=numeric(),
                    pred_theory_5=numeric(),
                    pred_theory_6=numeric(),
                    lambda_empirical=numeric(),
                    sigma_empirical=numeric(),
                    pred_empirical_1=numeric(),
                    pred_empirical_2=numeric(),
                    pred_empirical_3=numeric(),
                    pred_empirical_4=numeric(),
                    pred_empirical_5=numeric(),
                    pred_empirical_6=numeric())
#create new data for obtaining predicted scores
newdata1 <- data.frame(matrix(c(1,2,3,4,5,6),nrow = 6,ncol=1))
names(newdata1) <- c("threatlevel_theory")
newdata2 <- data.frame(matrix(c(1,2,3,4,5,6),nrow = 6,ncol=1))
names(newdata2) <- c("threatlevel_empirical")

#Looping through all participants 
for(i in 1:length(idlist)){
#i <- 6  #for testing
datasub <- data_long[which(data_long$id == idlist[i]),]
model1_i <- nls(fear ~ 50 + 50*(tanh(threatlevel_theory - lambda)), 
                data=datasub,
                start=list(lambda=5.2),
               control = nls.control(maxiter = 100)
               )
summary(model1_i)
model2_i <- nls(fear ~ 50 + 50*(tanh(threatlevel_empirical - lambda)), 
                data=datasub,
                start=list(lambda=5.2),
               control = nls.control(maxiter = 100)
               )
summary(model2_i)

#pulling info together into a vector
info_i <- cbind(idlist[i], t(coef(model1_i)), summary(model1_i)$sigma, 
                t(predict(model1_i, newdata1)), 
                  t(coef(model2_i)), summary(model2_i)$sigma, 
                t(predict(model2_i, newdata2)))
#print(info_i)
#binding person info into concatenated list of all persons
modelout <- rbind(modelout,info_i)
#return(modelout)
}

names(modelout) <- c("id","lambda_theory","sigma_theory",
                     "predtheory_1", "predtheory_2", "predtheory_3", "predtheory_4", "predtheory_5", "predtheory_6",
                     "lambda_empirical","sigma_empirical",
                     "predempirical_1", "predempirical_2", "predempirical_3", "predempirical_4", "predempirical_5", "predempirical_6")

Describing the obtained fear sensitivity scores.

#describing newly calculated scores on previous sample
describe(modelout[which(modelout$id > 6000), ])
##                  vars   n    mean     sd  median trimmed    mad     min
## id                  1 124 6256.02 193.73 6194.50 6233.23 146.78 6001.00
## lambda_theory       2 124    4.76   1.27    4.66    4.81   0.86    0.16
## sigma_theory        3 124   29.42  10.74   30.27   29.11  11.92    2.92
## predtheory_1        4 124    2.80  12.37    0.07    0.09   0.09    0.00
## predtheory_2        5 124    5.06  19.19    0.49    0.63   0.64    0.00
## predtheory_3        6 124    9.50  21.02    3.48    4.35   4.54    0.00
## predtheory_4        7 124   26.19  25.32   21.02   22.21  23.54    0.02
## predtheory_5        8 124   56.56  30.95   66.29   58.08  30.91    0.18
## predtheory_6        9 124   80.98  25.23   93.56   86.25   7.00    1.30
## lambda_empirical   10 124    4.50   1.36    4.48    4.52   1.14    0.86
## sigma_empirical    11 124   26.76   8.76   26.86   26.48   9.06    3.09
## predempirical_1    12 124    1.97   6.95    0.09    0.38   0.13    0.00
## predempirical_2    13 124    6.93  16.71    0.69    2.45   0.95    0.00
## predempirical_3    14 124   17.22  27.41    4.89   11.09   6.69    0.00
## predempirical_4    15 124   35.94  33.07   27.54   32.80  33.83    0.02
## predempirical_5    16 124   62.37  32.99   73.74   64.95  34.91    0.16
## predempirical_6    17 124   83.04  25.15   95.40   88.66   6.55    1.17
##                      max  range  skew kurtosis    se
## id               6820.00 819.00  1.07     0.50 17.40
## lambda_theory       8.17   8.00 -0.86     2.83  0.11
## sigma_theory       58.84  55.91  0.21    -0.18  0.96
## predtheory_1       84.16  84.16  4.62    21.30  1.11
## predtheory_2       97.52  97.52  4.15    15.48  1.72
## predtheory_3       99.66  99.65  3.66    12.54  1.89
## predtheory_4       99.95  99.93  1.29     1.32  2.27
## predtheory_5       99.99  99.82 -0.45    -1.18  2.78
## predtheory_6      100.00  98.70 -1.66     1.74  2.27
## lambda_empirical    8.22   7.35 -0.14     0.15  0.12
## sigma_empirical    54.02  50.93  0.23    -0.05  0.79
## predempirical_1    56.71  56.71  5.49    34.37  0.62
## predempirical_2    90.64  90.64  3.16     9.98  1.50
## predempirical_3    98.62  98.62  1.78     1.75  2.46
## predempirical_4    99.81  99.79  0.72    -0.84  2.97
## predempirical_5    99.97  99.81 -0.57    -1.11  2.96
## predempirical_6   100.00  98.82 -1.79     2.22  2.26
#on new sample 
describe(modelout[which(modelout$id < 6000), ])
##                  vars   n  mean    sd median trimmed   mad  min    max
## id                  1 111 63.10 35.34  63.00   63.26 45.96 1.00 122.00
## lambda_theory       2 111  5.58  1.14   5.89    5.76  0.64 0.83   7.32
## sigma_theory        3 111 31.14 10.77  31.02   30.81  9.08 9.25  67.10
## predtheory_1        4 111  1.52  8.84   0.01    0.02  0.00 0.00  58.42
## predtheory_2        5 111  2.85 14.61   0.04    0.12  0.04 0.00  91.21
## predtheory_3        6 111  5.04 17.19   0.31    0.84  0.27 0.02  98.71
## predtheory_4        7 111 11.58 22.89   2.23    5.24  1.94 0.13  99.82
## predtheory_5        8 111 27.46 28.95  14.41   22.22 11.86 0.95  99.98
## predtheory_6        9 111 58.54 25.50  55.44   58.51 29.79 6.62 100.00
## lambda_empirical   10 111  5.49  1.13   5.83    5.63  0.75 0.94   7.36
## sigma_empirical    11 111 30.33 10.54  29.75   30.10  8.46 8.32  66.97
## predempirical_1    12 111  0.88  5.52   0.01    0.03  0.01 0.00  53.18
## predempirical_2    13 111  2.51 11.84   0.05    0.21  0.05 0.00  89.35
## predempirical_3    14 111  5.60 17.14   0.35    1.45  0.37 0.02  98.41
## predempirical_4    15 111 13.87 24.11   2.53    7.93  2.65 0.12  99.78
## predempirical_5    16 111 31.28 31.43  16.09   26.91 16.08 0.88  99.97
## predempirical_6    17 111 61.02 26.73  58.62   61.57 35.08 6.12 100.00
##                   range  skew kurtosis   se
## id               121.00 -0.01    -1.27 3.35
## lambda_theory      6.49 -2.12     5.71 0.11
## sigma_theory      57.85  0.49     0.96 1.02
## predtheory_1      58.42  5.82    32.41 0.84
## predtheory_2      91.21  5.66    30.67 1.39
## predtheory_3      98.70  4.60    21.25 1.63
## predtheory_4      99.69  2.63     6.13 2.17
## predtheory_5      99.03  1.37     0.53 2.75
## predtheory_6      93.37  0.12    -1.04 2.42
## lambda_empirical   6.43 -1.53     2.81 0.11
## sigma_empirical   58.66  0.48     1.03 1.00
## predempirical_1   53.18  8.12    71.03 0.52
## predempirical_2   89.35  5.72    33.43 1.12
## predempirical_3   98.40  4.34    18.66 1.63
## predempirical_4   99.66  2.16     3.89 2.29
## predempirical_5   99.10  1.04    -0.44 2.98
## predempirical_6   93.87  0.02    -1.21 2.54
#on total set
describe(modelout)
##                  vars   n    mean      sd  median trimmed     mad  min
## id                  1 235 3330.85 3101.59 6035.00 3339.56 1031.89 1.00
## lambda_theory       2 235    5.15    1.27    5.37    5.26    1.16 0.16
## sigma_theory        3 235   30.23   10.77   30.69   29.92   10.75 2.92
## predtheory_1        4 235    2.20   10.84    0.02    0.05    0.02 0.00
## predtheory_2        5 235    4.02   17.18    0.12    0.38    0.16 0.00
## predtheory_3        6 235    7.39   19.40    0.87    2.64    1.18 0.00
## predtheory_4        7 235   19.29   25.23    6.09   14.08    8.17 0.02
## predtheory_5        8 235   42.82   33.31   32.40   41.16   39.98 0.18
## predtheory_6        9 235   70.38   27.68   77.98   73.28   28.89 1.30
## lambda_empirical   10 235    4.97    1.34    5.15    5.08    1.33 0.86
## sigma_empirical    11 235   28.45    9.78   28.68   28.13    9.12 3.09
## predempirical_1    12 235    1.46    6.32    0.02    0.11    0.03 0.00
## predempirical_2    13 235    4.84   14.75    0.18    0.81    0.25 0.00
## predempirical_3    14 235   11.73   23.81    1.34    4.95    1.86 0.00
## predempirical_4    15 235   25.52   31.14    9.11   19.98   12.45 0.02
## predempirical_5    16 235   47.68   35.75   42.54   46.83   49.46 0.16
## predempirical_6    17 235   72.64   28.11   84.54   76.02   22.54 1.17
##                      max   range  skew kurtosis     se
## id               6820.00 6819.00 -0.10    -1.99 202.33
## lambda_theory       8.17    8.00 -1.25     2.79   0.08
## sigma_theory       67.10   64.18  0.35     0.43   0.70
## predtheory_1       84.16   84.16  5.16    26.56   0.71
## predtheory_2       97.52   97.52  4.75    20.82   1.12
## predtheory_3       99.66   99.65  4.04    15.79   1.27
## predtheory_4       99.95   99.93  1.69     2.31   1.65
## predtheory_5       99.99   99.82  0.30    -1.49   2.17
## predtheory_6      100.00   98.70 -0.63    -0.85   1.81
## lambda_empirical    8.22    7.35 -0.67     0.22   0.09
## sigma_empirical    66.97   63.88  0.47     0.92   0.64
## predempirical_1    56.71   56.71  6.45    46.39   0.41
## predempirical_2    90.64   90.64  3.97    16.04   0.96
## predempirical_3    98.62   98.62  2.49     5.00   1.55
## predempirical_4    99.81   99.79  1.20     0.16   2.03
## predempirical_5    99.97   99.81  0.14    -1.61   2.33
## predempirical_6   100.00   98.82 -0.74    -0.75   1.83
# Correlations 
round(cor(modelout[,c("lambda_theory","lambda_empirical")], 
          use="complete.obs",method="spearman"),2)
##                  lambda_theory lambda_empirical
## lambda_theory             1.00             0.89
## lambda_empirical          0.89             1.00
#pairs plot from the psych library
pairs.panels(modelout[,c("lambda_theory","lambda_empirical"),])

Plotting the predicted trajectories

#variable names in a form needed for input into R
dput(as.character(names(modelout)))
## c("id", "lambda_theory", "sigma_theory", "predtheory_1", "predtheory_2", 
## "predtheory_3", "predtheory_4", "predtheory_5", "predtheory_6", 
## "lambda_empirical", "sigma_empirical", "predempirical_1", "predempirical_2", 
## "predempirical_3", "predempirical_4", "predempirical_5", "predempirical_6"
## )
#reshaping wide to long
modelout_long <- reshape(data=modelout, 
                         timevar=c("threatlevel"), 
                         idvar=c("id", "lambda_theory", "sigma_theory",
                                 "lambda_empirical", "sigma_empirical"),
                         varying=c("predtheory_1", "predtheory_2", "predtheory_3", "predtheory_4", "predtheory_5", "predtheory_6", 
"predempirical_1", "predempirical_2", "predempirical_3", "predempirical_4", "predempirical_5", "predempirical_6"),
                         direction="long", sep="_")
#sorting for easy viewing
# order by id and threatlevel
modelout_long <- modelout_long[order(modelout_long$id,modelout_long$threatlevel), ]

#plotting intraindividual trajectories
ggplot(data = modelout_long, aes(x = threatlevel, y = predtheory, group = id)) +
  ggtitle("Predicted Trajectories: Theoretical Ordering of Tasks") +
  #geom_point() + 
  geom_line(color="black", size = 1) +
  geom_hline(aes(yintercept = 50), size = .5, color="red") +
  geom_vline(aes(xintercept = 5.15), size = .5, color="red") +
  xlab("Threat Level (Theory Order)") + 
  ylab("Outcome") + ylim(0,100) +
  scale_x_continuous(breaks=seq(1,6,by=1))

#plotting intraindividual trajectories
ggplot(data = modelout_long, aes(x = threatlevel, y = predtheory, group = id)) +
  ggtitle("Predicted Trajectories: Empirical Ordering of Tasks") +
  #geom_point() + 
  geom_line(color="blue", size = 1) +
  geom_hline(aes(yintercept = 50), size = .5, color="red") +
  geom_vline(aes(xintercept = 4.97), size = .5, color="red") +
  xlab("Threat Level (Empirical Order)") + 
  ylab("Outcome") + ylim(0,100) +
  scale_x_continuous(breaks=seq(1,6,by=1))

Between-person differences in fear sensitivity captured by the estimated \(\lambda_{i}\) are visualized as the threat level at which the trajecotry crosses 50. Average score noted by the vertical red line.

Writing out csv file with original data and estimated lambda scores for use in other analyses. Note that the fear sensitivity scores are treated as calculations (although they are estimated scores and do have unceratainty embedded in them).

dataout <- merge(orig,modelout[ ,c("id","lambda_theory","lambda_empirical")], by="id")

write.csv(dataout,file="lpadata_052917_withlambdaestimates_050918.csv", row.names=FALSE)

Thanks for playing!