diff --git a/DESCRIPTION b/DESCRIPTION index 5726d44..7b27790 100644 --- a/DESCRIPTION +++ b/DESCRIPTION @@ -1,12 +1,13 @@ Package: timevarcorr Title: Time Varying Correlation -Version: 0.1.0 +Version: 0.1.1 Authors@R: c(person("Alexandre", "Courtiol", email = "alexandre.courtiol@gmail.com", role = c("aut", "cre", "cph"), comment = c(ORCID = "0000-0003-0637-2959")), person("François", "Rousset", role = "aut", comment = c(ORCID = "0000-0003-4670-0371"))) Description: Computes how the correlation between 2 time-series changes over time. - To do so, the package performs a non-parametric kernel smoothing (using a common bandwidth) of all underlying components required for the computation of a correlation coefficient (i.e., x, y, x^2, y^2, xy). + To do so, the package follows the method from Choi & Shin (2021) . + It performs a non-parametric kernel smoothing (using a common bandwidth) of all underlying components required for the computation of a correlation coefficient (i.e., x, y, x^2, y^2, xy). An automatic selection procedure for the bandwidth parameter is implemented. Alternative kernels can be used (Epanechnikov, box and normal). Both Pearson and Spearman correlation coefficients can be estimated and change in correlation over time can be tested. diff --git a/NEWS.md b/NEWS.md index 698bdfd..50b5efc 100644 --- a/NEWS.md +++ b/NEWS.md @@ -1,7 +1,14 @@ +# timevarcorr 0.1.1 + +New submission to CRAN after revisions. + +* Add Choi & Shin (2021) in DESCRIPTION. +* Improve documentation (returns more detailed and use of LaTeX). + # timevarcorr 0.1.0 -* Initial CRAN submission. +Initial CRAN submission (rejected). # timevarcorr 0.0.0.9006 and earlier -* Prepare package for CRAN release. +Prepare package for CRAN release. diff --git a/R/CI.R b/R/CI.R index 4d73fb7..e231923 100644 --- a/R/CI.R +++ b/R/CI.R @@ -1,9 +1,9 @@ #' Internal functions for the computation of confidence intervals #' -#' These functions compute the different terms required to compute the confidence -#' interval around the time-varying correlation coefficient. +#' These functions compute the different terms required for [`tcor()`] to compute the confidence +#' interval around the time-varying correlation coefficient. These terms are defined in Choi & Shin (2021). #' -#' @seealso [`tcor`] +#' @seealso [`tcor()`] #' @name CI #' #' @references @@ -13,6 +13,15 @@ #' Andrews, D. W. K. Heteroskedasticity and autocorrelation consistent covariance matrix estimation. #' Econometrica: Journal of the Econometric Society, 817-858 (1991). #' +#' @return +#' - `calc_H()` returns a 5 x 5 x \eqn{t} array of elements of class numeric, which corresponds to \eqn{\hat{H_t}} in Choi & Shin (2021). +#' - `calc_e()` returns a \eqn{t} x 5 matrix of elements of class numeric storing the residuals, which corresponds to \eqn{\hat{e}_t} in Choi & Shin (2021). +#' - `calc_Gamma()` returns a 5 x 5 matrix of elements of class numeric, which corresponds to \eqn{\hat{\Gamma}_l} in Choi & Shin (2021). +#' - `calc_GammaINF()` returns a 5 x 5 matrix of elements of class numeric, which corresponds to \eqn{\hat{\Gamma}^\infty} in Choi & Shin (2021). +#' - `calc_L_And()` returns a scalar of class numeric, which corresponds to \eqn{L_{And}} in Choi & Shin (2021). +#' - `calc_D()` returns a \eqn{t} x 5 matrix of elements of class numeric storing the residuals, which corresponds to \eqn{D_t} in Choi & Shin (2021). +#' - `calc_SE()` returns a vector of length \eqn{t} of elements of class numeric, which corresponds to \eqn{se(\hat{\rho}_t(h))} in Choi & Shin (2021). +#' #' @examples #' rho_obj <- with(na.omit(stockprice), #' calc_rho(x = SP500, y = FTSE100, t = DateID, h = 20, kernel = "box")) @@ -21,18 +30,16 @@ NULL -#' @describeIn CI Internal function computing the `$\hat{H_t}$` array. +#' @describeIn CI computes the \eqn{\hat{H_t}} array. #' -#' `$\hat{H_t}$` is a component needed to compute confidence intervals; -#' `$H_t$` is defined in eq. 6 from Choi & Shin, 2021. -#' The function returns a 5 x 5 x `t` array. +#' \eqn{\hat{H_t}} is a component needed to compute confidence intervals; +#' \eqn{H_t} is defined in eq. 6 from Choi & Shin (2021). #' #' @export #' @param smoothed_obj an object created with [`calc_rho`]. #' #' @examples -#' -#' ## Computing `$\hat{H_t}$` +#' ## Computing \eqn{\hat{H_t}} #' #' H <- calc_H(smoothed_obj = rho_obj) #' H[, , 1:2] # H array for the first two time points @@ -63,18 +70,15 @@ calc_H <- function(smoothed_obj) { } -#' @describeIn CI Internal function computing `$\hat{e}_t$`. +#' @describeIn CI computes \eqn{\hat{e}_t}. #' -#' `$\hat{e}_t$` is a component needed to compute confidence intervals; -#' it is defined in eq. 9 from Choi & Shin, 2021. -#' The function returns a `t` x 5 matrix storing the residuals. +#' \eqn{\hat{e}_t} is defined in eq. 9 from Choi & Shin (2021). #' #' @export -#' @param H an object created with [`calc_H`]. +#' @param H an object created with `calc_H`. #' #' @examples -#' -#' ## Computing `$\hat{e}_t$` +#' ## Computing \eqn{\hat{e}_t} #' #' e <- calc_e(smoothed_obj = rho_obj, H = H) #' head(e) # e matrix for the first six time points @@ -91,19 +95,16 @@ calc_e <- function(smoothed_obj, H) { } -#' @describeIn CI Internal function computing `$\hat{\Gamma}_l$`. +#' @describeIn CI computes \eqn{\hat{\Gamma}_l}. #' -#' `$\hat{\Gamma}_l$` is a component needed to compute confidence intervals; -#' it is defined in eq. 9 from Choi & Shin, 2021. -#' The function returns a 5 x 5 matrix. +#' \eqn{\hat{\Gamma}_l} is defined in eq. 9 from Choi & Shin (2021). #' #' @export -#' @param e an object created with [`calc_e`]. +#' @param e an object created with `calc_e`. #' @param l a scalar indicating a number of time points. #' #' @examples -#' -#' ## Computing `$\hat{\Gamma}_l$` +#' ## Computing \eqn{\hat{\Gamma}_l} #' #' calc_Gamma(e = e, l = 3) #' @@ -116,18 +117,15 @@ calc_Gamma <- function(e, l) { } -#' @describeIn CI Internal function computing `$\hat{\Gamma}^\Inf$`. +#' @describeIn CI computes \eqn{\hat{\Gamma}^\infty}. #' -#' `$\hat{\Gamma}^\Inf$` is a component needed to compute confidence intervals (the long run variance estimator); -#' it is defined in eq. 9 from Choi & Shin, 2021. -#' The function returns a 5 x 5 matrix. +#' \eqn{\hat{\Gamma}^\infty} is the long run variance estimator, defined in eq. 9 from Choi & Shin (2021). #' #' @export #' @param L a scalar indicating a bandwidth parameter. #' #' @examples -#' -#' ## Computing `$\hat{\Gamma}^\Inf$` +#' ## Computing \eqn{\hat{\Gamma}^\infty} #' #' calc_GammaINF(e = e, L = 2) #' @@ -145,19 +143,16 @@ calc_GammaINF <- function(e, L) { } -#' @describeIn CI Internal function computing `$L_{And}$`. +#' @describeIn CI computes \eqn{L_{And}}. #' -#' `$L_{And}$` is a component needed to compute confidence intervals; -#' it is defined in Choi & Shin, 2021, p 342. -#' It also corresponds to `$S_T^*$`, eq 5.3 in Andrews 1991. -#' The function returns a scalar which should be used as an input for `L` in [`calc_GammaINF`]. +#' \eqn{L_{And}} is defined in Choi & Shin (2021, p 342). +#' It also corresponds to \eqn{S_T^*}, eq 5.3 in Andrews (1991). #' #' @export -#' @param AR.method character string specifying the method to fit the autoregressive model used to compute `$\hat{\gamma}_1$` in `$L_{And}$` (see [`stats::ar`] for details). +#' @param AR.method character string specifying the method to fit the autoregressive model used to compute \eqn{\hat{\gamma}_1} in \eqn{L_{And}} (see [`stats::ar`] for details). #' #' @examples -#' -#' ## Computing `$L_{And}$` +#' ## Computing \eqn{L_{And}} #' #' calc_L_And(e = e) #' sapply(c("yule-walker", "burg", "ols", "mle", "yw"), @@ -171,17 +166,14 @@ calc_L_And <- function(e, AR.method = c("yule-walker", "burg", "ols", "mle", "yw } -#' @describeIn CI Internal function computing `$D_t$`. +#' @describeIn CI computes \eqn{D_t}. #' -#' `$D_t$` is a component needed to compute confidence intervals; -#' it is defined in Choi & Shin, 2021, p 338. -#' The function returns a `t` x 5 matrix storing the residuals. +#' \eqn{D_t} is defined in Choi & Shin (2021, p 338). #' #' @export #' #' @examples -#' -#' ## Computing `$D_t$` +#' ## Computing \eqn{D_t} #' #' D <- calc_D(smoothed_obj = rho_obj) #' head(D) # D matrix for the first six time points @@ -197,24 +189,22 @@ calc_D <- function(smoothed_obj) { } -#' @describeIn CI Internal function computing `$se(\hat{rho}_t(h))$`. +#' @describeIn CI computes \eqn{se(\hat{\rho}_t(h))}. #' -#' The standard deviation of the time-varying correlation (`$se(\hat{rho}_t(h))$`) is defined in eq. 8 from Choi & Shin, 2021. -#' It depends on `$D_{Lt}$`, `$D_{Mt}$` & `$D_{Ut}$`, themselves defined in Choi & Shin, 2021, p 337 & 339. -#' The `$D_{Xt}$` terms are all computed within the function since they all rely on the same components. -#' The function returns a vector of length `t`. +#' The standard deviation of the time-varying correlation (\eqn{se(\hat{\rho}_t(h))}) is defined in eq. 8 from Choi & Shin (2021). +#' It depends on \eqn{D_{Lt}}, \eqn{D_{Mt}} & \eqn{D_{Ut}}, themselves defined in Choi & Shin (2021, p 337 & 339). +#' The \eqn{D_{Xt}} terms are all computed within the function since they all rely on the same components. #' #' @export #' @inheritParams kern_smooth #' #' @examples +#' ## Computing \eqn{se(\hat{\rho}_t(h))} +#' # nb: takes a few seconds to run #' #' run <- FALSE ## change to TRUE to run the example #' if (in_pkgdown() || run) { #' -#' ## Computing `$se(\hat{rho}_t(h))$` -#' # nb: takes a few seconds to run -#' #' SE <- calc_SE(smoothed_obj = rho_obj, h = 50) #' head(SE) # SE vector for the first six time points #' diff --git a/R/data.R b/R/data.R index cf3b709..acc7c2b 100644 --- a/R/data.R +++ b/R/data.R @@ -1,7 +1,7 @@ #' Daily Closing Prices of Major European Stock Indices, April 2000--December 2017 #' #' A dataset containing the stockmarket returns between 2000-04-03 and 2017-12-05. -#' This dataset is very close to the one used by Choi & Shin 2021, although not +#' This dataset is very close to the one used by Choi & Shin (2021), although not #' strictly identical. It has been produced by the Oxford-Man Institute of Quantitative Finance. #' #' @format A data frame with 4618 rows and 7 variables: @@ -12,7 +12,7 @@ #' \item{Nikkei}{a numeric vector of the stockmarket return for the Nikkei 225.} #' \item{DAX}{a numeric vector of the stockmarket return for the German stock index.} #' \item{NASDAQ}{a numeric vector of the stockmarket return for the Nasdaq Stock Market.} -#' \item{Event}{a character string of particular events that have impacted the stockmarket, as in Choi & Shin 2021.} +#' \item{Event}{a character string of particular events that have impacted the stockmarket, as in Choi & Shin (2021).} #' } #' #' @source diff --git a/R/smoothers.R b/R/smoothers.R index d85f837..5c1ea87 100644 --- a/R/smoothers.R +++ b/R/smoothers.R @@ -4,8 +4,8 @@ #' #' The function is essentially a wrapper that calls different underlying #' functions depending on the kernel that is selected: -#' - [`lpridge::lpepa`] for "epanechnikov". -#' - [`stats::ksmooth`] for "normal" and "box". +#' - [`lpridge::lpepa()`] for "epanechnikov". +#' - [`stats::ksmooth()`] for "normal" and "box". #' The argument `param_smoother` can be used to pass additional arguments to #' these functions. #' diff --git a/R/tcor.R b/R/tcor.R index 8791501..52adc7e 100644 --- a/R/tcor.R +++ b/R/tcor.R @@ -1,23 +1,24 @@ #' Compute time varying correlation coefficients #' -#' The function `tcor` implements (together with its helper function -#' `calc_rho`) the nonparametric estimation of the time varying correlation -#' coefficient proposed by Choi & Shin, 2021. The general idea is to compute a -#' (Pearson) correlation coefficient (`r(x,y) = (mean(xy) - mean(x)*mean(y)) / -#' (sqrt(mean(x^2)-mean(x)^2) * sqrt(mean(y^2)-mean(y)^2))`), but instead of -#' using the means required for such a computation, each component (i.e., `x`, -#' `y`, `x^2`, `y^2`, `x*y`) is smoothed and the smoothed terms are considered -#' in place the original means. The intensity of the smoothing depends on a -#' unique parameter: the bandwidth (`h`). If `h = Inf`, the method produces the -#' original (i.e., time-invariant) correlation value. The smaller the parameter -#' `h`, the more variation in time is being captured. The parameter `h` can be -#' provided by the user; otherwise it is automatically estimated by the internal -#' helper functions `select_h` and `calc_RMSE` (see **Details**). +#' The function `tcor()` implements (together with its helper function +#' `calc_rho()`) the nonparametric estimation of the time varying correlation +#' coefficient proposed by Choi & Shin (2021). The general idea is to compute a +#' (Pearson) correlation coefficient (\eqn{r(x,y) = \frac{\hat{xy} - \hat{x}\times\hat{y}}{ +#' \sqrt{\hat{x^2}-\hat{x}^2} \times \sqrt{\hat{y^2}-\hat{y}^2}}}), but instead of +#' using the means required for such a computation, each component (i.e., +#' \eqn{x}, \eqn{y}, \eqn{x^2}, \eqn{y^2}, \eqn{x \times y}) is smoothed and the +#' smoothed terms are considered in place the original means. The intensity of +#' the smoothing depends on a unique parameter: the bandwidth (`h`). If `h = +#' Inf`, the method produces the original (i.e., time-invariant) correlation +#' value. The smaller the parameter `h`, the more variation in time is being +#' captured. The parameter `h` can be provided by the user; otherwise it is +#' automatically estimated by the internal helper functions `select_h()` and +#' `calc_RMSE()` (see **Details**). #' #' - **Smoothing**: the smoothing of each component is performed by kernel #' regression. The default is to use the Epanechnikov kernel following Choi & -#' Shin 2021, but other kernels have also been implemented and can thus -#' alternatively be used (see [`kern_smooth`] for details). The normal kernel +#' Shin (2021), but other kernels have also been implemented and can thus +#' alternatively be used (see [`kern_smooth()`] for details). The normal kernel #' seems to sometimes lead to very small bandwidth being selected, but the #' default kernel can lead to numerical issues (see next point). We thus #' recommend always comparing the results from different kernel methods. @@ -25,25 +26,25 @@ #' - **Numerical issues**: some numerical issues can happen because the smoothing #' is performed independently on each component of the correlation coefficient. #' As a consequence, some relationship between components may become violated -#' for some time points. For instance, if the square of the smoothed $x$ term -#' gets larger than the smoothed $x^2$ term, the variance of $x$ would become +#' for some time points. For instance, if the square of the smoothed \eqn{x} term +#' gets larger than the smoothed \eqn{x^2} term, the variance of \eqn{x} would become #' negative. In such cases, coefficient values returned are `NA`. #' #' - **Bandwidth selection**: when the value used to define the bandwidth (`h`) -#' in `tcor` is set to `NULL` (the default), the internal function `select_h` +#' in `tcor()` is set to `NULL` (the default), the internal function `select_h()` #' is used to to select the optimal value for `h`. It is first estimated by -#' leave-one-out cross validation (using internally `calc_RMSE`). If the cross +#' leave-one-out cross validation (using internally `calc_RMSE()`). If the cross #' validation error (RMSE) is minimal for the maximal value of `h` considered -#' (`8*sqrt(N)`), rather than taking this as the optimal `h` value, the +#' (\eqn{8\sqrt{N}}), rather than taking this as the optimal `h` value, the #' bandwidth becomes estimated using the so-called elbow criterion. This latter #' method identifies the value `h` after which the cross validation error #' decreasing very little. The procedure is detailed in section 2.1 in Choi & -#' Shin, 2021. +#' Shin (2021). #' #' - **Parallel computation**: if `h` is not provided, an automatic bandwidth #' selection occurs (see above). For large datasets, this step can be #' computationally demanding. The current implementation thus relies on -#' [`parallel::mclapply`] and is thus only effective for Linux and MacOS. +#' [`parallel::mclapply()`] and is thus only effective for Linux and MacOS. #' Relying on parallel processing also implies that you call `options("mc.cores" #' = XX)` beforehand, replacing `XX` by the relevant number of CPU cores you #' want to use (see **Examples**). For debugging, do use `options("mc.cores" = @@ -51,8 +52,8 @@ #' child nodes. #' #' - **Confidence interval**: if `CI` is set to `TRUE`, a confidence interval is -#' calculated as described in Choi & Shin, 2021. This is also necessary for using -#' [`test_equality`] to test differences between correlations at two time points. +#' calculated as described in Choi & Shin (2021). This is also necessary for using +#' [`test_equality()`] to test differences between correlations at two time points. #' The computation of the confidence intervals involves multiple internal #' functions (see [`CI`] for details). #' @@ -68,21 +69,6 @@ #' @param verbose a logical specifying if information should be displayed to #' monitor the progress of the cross validation (default = `FALSE`). #' -#' @return A dataframe containing: -#' - the time points (`t`). -#' - the estimates of the correlation value (`r`). -#' -#' as well as, if `CI = TRUE`: -#' - the Standard Error (`SE`). -#' - the lower boundary of the confidence intervals (`lwr`). -#' - the upper boundary of the confidence intervals (`upr`). -#' -#' Some metadata are also attached to the dataframe (as attributes): -#' - `h` the bandwidth parameter. -#' - `RMSE` the minimal root mean square error when `h` is selected by cross validation. -#' - `h_selection` the method used to select `h`. -#' - `h_select_duration` the computing time spent to select the bandwidth parameter. -#' #' @name tcor #' @rdname tcor #' @@ -97,7 +83,29 @@ NULL -#' @describeIn tcor **The user-level function to be used**. +#' @describeIn tcor **the user-level function to be used**. +#' +#' @return +#' **---Output for `tcor()`---** +#' +#' A 2 x \eqn{t} dataframe containing: +#' - the time points (`t`). +#' - the estimates of the correlation value (`r`). +#' +#' Or, if `CI = TRUE`, a 5 x \eqn{t} dataframe containing: +#' - the time points (`t`). +#' - the estimates of the correlation value (`r`). +#' - the Standard Error (`SE`). +#' - the lower boundary of the confidence intervals (`lwr`). +#' - the upper boundary of the confidence intervals (`upr`). +#' +#' Some metadata are also attached to the dataframe (as attributes): +#' - the call to the function (`call`). +#' - the argument `CI`. +#' - the bandwidth parameter (`h`). +#' - the method used to select `h` (`h_selection`). +#' - the minimal root mean square error when `h` is selected (`RMSE`). +#' - the computing time (in seconds) spent to select the bandwidth parameter (`h_selection_duration`) if `h` automatically selected. #' #' @order 1 #' @@ -347,12 +355,20 @@ tcor <- function(x, y, t = seq_along(x), h = NULL, cor.method = c("pearson", "sp } -#' @describeIn tcor Internal function computing the correlation for a given bandwidth. +#' @describeIn tcor computes the correlation for a given bandwidth. #' #' The function calls the kernel smoothing procedure on each component required -#' to compute the time-varying correlation. It returns a dataframe with the time, -#' the correlation value and the underlying components used for the computation. +#' to compute the time-varying correlation. +#' +#' @return +#' **---Output for `calc_rho()`---** #' +#' A 14 x \eqn{t} dataframe with: +#' - the six raw components of correlation (`x`, `y`, `x2`, `y2`, `xy`). +#' - the time points (`t`). +#' - the six raw components of correlation after smoothing (`x_smoothed`, `y_smoothed`, `x2_smoothed`, `y2_smoothed`, `xy_smoothed`). +#' - the standard deviation around \eqn{x} and \eqn{y} (`sd_x_smoothed`, `sd_y_smoothed`). +#' - the smoothed correlation coefficient (`rho_smoothed`). #' @order 2 #' #' @export @@ -450,8 +466,12 @@ calc_rho <- function(x, y, t = seq_along(x), t.for.pred = t, h, cor.method = c(" #' at the missing time point based on the other time points. It then computes #' and returns the RMSE between this predicted correlation and the one predicted #' using the full dataset. See also *Bandwidth selection* and *Parallel -#' computation* in -#' **Details**. +#' computation* in **Details**. +#' +#' @return +#' **---Output for `calc_RMSE()`---** +#' +#' A scalar of class numeric corresponding to the RMSE. #' #' @order 3 #' @@ -505,6 +525,15 @@ calc_RMSE <- function(h, x, y, t = seq_along(x), cor.method = c("pearson", "spea #' #' @order 4 #' +#' @return +#' **---Output for `select_h()`---** +#' +#' A list with the following components: +#' - the selected bandwidth parameter (`h`). +#' - the method used to select `h` (`h_selection`). +#' - the minimal root mean square error when `h` is selected (`RMSE`). +#' - the computing time (in seconds) spent to select the bandwidth parameter (`time`). +#' #' @export #' #' @examples diff --git a/R/ttest.R b/R/ttest.R index d366fff..491ce6f 100644 --- a/R/ttest.R +++ b/R/ttest.R @@ -1,12 +1,12 @@ #' Compute equality test between correlation coefficient estimates at two time points #' #' This function tests whether smoothed correlation values at two time points are equal (H0) or not. -#' The test is described page 341 in Choi & Shin, 2021. +#' The test is described page 341 in Choi & Shin (2021). #' #' Two different test statistics can be used, one is asymptotically Student-t distributed under H0 and one is chi-square distributed. #' In practice, it seems to give very similar results. #' -#' @param tcor_obj the output of a call to [`tcor`] with `CI = TRUE`. +#' @param tcor_obj the output of a call to [`tcor()`] with `CI = TRUE`. #' @param t1 the first time point used by the test (by default, the first time point in the time series). #' @param t2 the second time point used by the test (by default, the last time point in the time series). #' @param test a character string indicating which test to use ("student", the default; or "chi2"). @@ -15,7 +15,7 @@ #' #' @export #' -#' @seealso [`test_ref`], [`tcor`] +#' @seealso [`test_ref()`], [`tcor()`] #' #' @examples #' ## Simple example @@ -110,7 +110,7 @@ test_equality <- function(tcor_obj, t1 = 1, t2 = nrow(tcor_obj), test = c("stude #' Test difference between correlation coefficient estimates and a value of reference #' #' This function tests whether smoothed correlation values are equal (H0) or not to a reference value (default = `0`). -#' The test is not described in Choi & Shin, 2021, but it is based on the idea behind [`test_equality`]. +#' The test is not described in Choi & Shin, 2021, but it is based on the idea behind [`test_equality()`]. #' #' Two different test statistics can be used, one is asymptotically Student-t distributed under H0 and one is chi-square distributed. #' In practice, it seems to give very similar results. @@ -118,13 +118,13 @@ test_equality <- function(tcor_obj, t1 = 1, t2 = nrow(tcor_obj), test = c("stude #' @inheritParams test_equality #' @param t a vector of time point(s) used by the test (by default, all time points are considered). #' @param r_ref a scalar indicating the reference value for the correlation coefficient to be used in the test (default = `0`). -#' @param p.adjust.methods a character string indicating the method used to adjust p-values for multiple testing (see [`p.adjust`]; default = "none"). +#' @param p.adjust.methods a character string indicating the method used to adjust p-values for multiple testing (see [`p.adjust()`]; default = "none"). #' #' @return a data.frame with the result of the test, including the effect size (`delta_r = r[t] - r_ref`). #' #' @export #' -#' @seealso [`test_equality`], [`tcor`] +#' @seealso [`test_equality()`], [`tcor()`] #' #' @examples #' ## Comparison of all correlation values to reference of 0.5 diff --git a/README.md b/README.md index 05c6c92..d8302a1 100644 --- a/README.md +++ b/README.md @@ -86,7 +86,7 @@ example1 <- with(d, tcor(x = SP500, y = FTSE100, t = DateID, kernel = "normal")) #> #> You may use several CPU cores for faster computation by calling `options('mc.cores' = XX)` with `XX` corresponding to the number of CPU cores to be used. #> h selected using LOO-CV = 60.9 -#> Bandwidth automatic selection completed in 8.1 seconds +#> Bandwidth automatic selection completed in 9.1 seconds plot(example1, type = "l") ``` @@ -128,7 +128,7 @@ d |> #> #> You may use several CPU cores for faster computation by calling `options('mc.cores' = XX)` with `XX` corresponding to the number of CPU cores to be used. #> h selected using LOO-CV = 60.9 -#> Bandwidth automatic selection completed in 9.8 seconds +#> Bandwidth automatic selection completed in 8.7 seconds ``` @@ -153,7 +153,7 @@ example3 <- with(d, tcor(x = SP500, y = FTSE100, t = DateID, kernel = "normal", #> #> You may use several CPU cores for faster computation by calling `options('mc.cores' = XX)` with `XX` corresponding to the number of CPU cores to be used. #> h selected using LOO-CV = 60.9 -#> Bandwidth automatic selection completed in 9.5 seconds +#> Bandwidth automatic selection completed in 8.9 seconds test_equality(example3, t1 = "2000-05-02", t2 = "2001-05-02") #> t1 r1 t2 r2 delta_r SE_delta_r T_stat df #> 1 2000-05-02 0.4354492 2001-05-02 0.5722 0.1367509 0.1224746 1.116565 910 @@ -191,7 +191,7 @@ devtools::session_info() #> collate en_US.UTF-8 #> ctype en_US.UTF-8 #> tz Europe/Berlin -#> date 2023-11-05 +#> date 2023-11-06 #> pandoc 3.1.8 @ /usr/lib/rstudio/resources/app/bin/quarto/bin/tools/x86_64/ (via rmarkdown) #> #> ─ Packages ─────────────────────────────────────────────────────────────────── @@ -250,7 +250,7 @@ devtools::session_info() #> stringr 1.5.0 2022-12-02 [4] RSPM (R 4.2.0) #> tibble 3.2.1 2023-03-20 [4] RSPM (R 4.3.0) #> tidyselect 1.2.0 2022-10-10 [4] RSPM (R 4.2.0) -#> timevarcorr * 0.1.0 2023-11-05 [1] local +#> timevarcorr * 0.1.1 2023-11-06 [1] local #> urlchecker 1.0.1 2021-11-30 [4] RSPM (R 4.2.0) #> usethis 2.2.2 2023-07-06 [4] RSPM (R 4.2.0) #> utf8 1.2.4 2023-10-22 [4] RSPM (R 4.3.0) @@ -260,7 +260,7 @@ devtools::session_info() #> xtable 1.8-4 2019-04-21 [4] RSPM (R 4.2.0) #> yaml 2.3.7 2023-01-23 [4] RSPM (R 4.2.0) #> -#> [1] /tmp/Rtmpc9Ly9p/temp_libpathfaae13b7a4154 +#> [1] /tmp/RtmpZf1Gw1/temp_libpath1049fd45c346be #> [2] /home/courtiol/R/x86_64-pc-linux-gnu-library/4.3 #> [3] /usr/local/lib/R/site-library #> [4] /usr/lib/R/site-library diff --git a/cran-comments.md b/cran-comments.md index 858617d..aa5a22b 100644 --- a/cran-comments.md +++ b/cran-comments.md @@ -1,5 +1,7 @@ -## R CMD check results -0 errors | 0 warnings | 1 note +Resubmission in response to review from Victoria Wimmer: -* This is a new release. +* Add Choi & Shin (2021) in DESCRIPTION. +* Improve documentation (returns more details and use of LaTeX). + +V.W. also mentioned "Please ensure that you do not use more than 2 cores in your examples, vignettes, etc.", but to the best of my knowledge, I never use more than 2 cores in the package. diff --git a/man/CI.Rd b/man/CI.Rd index 0a69140..5ef76ae 100644 --- a/man/CI.Rd +++ b/man/CI.Rd @@ -32,67 +32,66 @@ calc_SE( \arguments{ \item{smoothed_obj}{an object created with \code{\link{calc_rho}}.} -\item{H}{an object created with \code{\link{calc_H}}.} +\item{H}{an object created with \code{calc_H}.} -\item{e}{an object created with \code{\link{calc_e}}.} +\item{e}{an object created with \code{calc_e}.} \item{l}{a scalar indicating a number of time points.} \item{L}{a scalar indicating a bandwidth parameter.} -\item{AR.method}{character string specifying the method to fit the autoregressive model used to compute \verb{$\\hat\{\\gamma\}_1$} in \verb{$L_\{And\}$} (see \code{\link[stats:ar]{stats::ar}} for details).} +\item{AR.method}{character string specifying the method to fit the autoregressive model used to compute \eqn{\hat{\gamma}_1} in \eqn{L_{And}} (see \code{\link[stats:ar]{stats::ar}} for details).} \item{h}{a scalar indicating the bandwidth used by the smoothing function.} } +\value{ +\itemize{ +\item \code{calc_H()} returns a 5 x 5 x \eqn{t} array of elements of class numeric, which corresponds to \eqn{\hat{H_t}} in Choi & Shin (2021). +\item \code{calc_e()} returns a \eqn{t} x 5 matrix of elements of class numeric storing the residuals, which corresponds to \eqn{\hat{e}_t} in Choi & Shin (2021). +\item \code{calc_Gamma()} returns a 5 x 5 matrix of elements of class numeric, which corresponds to \eqn{\hat{\Gamma}_l} in Choi & Shin (2021). +\item \code{calc_GammaINF()} returns a 5 x 5 matrix of elements of class numeric, which corresponds to \eqn{\hat{\Gamma}^\infty} in Choi & Shin (2021). +\item \code{calc_L_And()} returns a scalar of class numeric, which corresponds to \eqn{L_{And}} in Choi & Shin (2021). +\item \code{calc_D()} returns a \eqn{t} x 5 matrix of elements of class numeric storing the residuals, which corresponds to \eqn{D_t} in Choi & Shin (2021). +\item \code{calc_SE()} returns a vector of length \eqn{t} of elements of class numeric, which corresponds to \eqn{se(\hat{\rho}_t(h))} in Choi & Shin (2021). +} +} \description{ -These functions compute the different terms required to compute the confidence -interval around the time-varying correlation coefficient. +These functions compute the different terms required for \code{\link[=tcor]{tcor()}} to compute the confidence +interval around the time-varying correlation coefficient. These terms are defined in Choi & Shin (2021). } \section{Functions}{ \itemize{ -\item \code{calc_H()}: Internal function computing the \verb{$\\hat\{H_t\}$} array. +\item \code{calc_H()}: computes the \eqn{\hat{H_t}} array. -\verb{$\\hat\{H_t\}$} is a component needed to compute confidence intervals; -\verb{$H_t$} is defined in eq. 6 from Choi & Shin, 2021. -The function returns a 5 x 5 x \code{t} array. +\eqn{\hat{H_t}} is a component needed to compute confidence intervals; +\eqn{H_t} is defined in eq. 6 from Choi & Shin (2021). -\item \code{calc_e()}: Internal function computing \verb{$\\hat\{e\}_t$}. +\item \code{calc_e()}: computes \eqn{\hat{e}_t}. -\verb{$\\hat\{e\}_t$} is a component needed to compute confidence intervals; -it is defined in eq. 9 from Choi & Shin, 2021. -The function returns a \code{t} x 5 matrix storing the residuals. +\eqn{\hat{e}_t} is defined in eq. 9 from Choi & Shin (2021). -\item \code{calc_Gamma()}: Internal function computing \verb{$\\hat\{\\Gamma\}_l$}. +\item \code{calc_Gamma()}: computes \eqn{\hat{\Gamma}_l}. -\verb{$\\hat\{\\Gamma\}_l$} is a component needed to compute confidence intervals; -it is defined in eq. 9 from Choi & Shin, 2021. -The function returns a 5 x 5 matrix. +\eqn{\hat{\Gamma}_l} is defined in eq. 9 from Choi & Shin (2021). -\item \code{calc_GammaINF()}: Internal function computing \verb{$\\hat\{\\Gamma\}^\\Inf$}. +\item \code{calc_GammaINF()}: computes \eqn{\hat{\Gamma}^\infty}. -\verb{$\\hat\{\\Gamma\}^\\Inf$} is a component needed to compute confidence intervals (the long run variance estimator); -it is defined in eq. 9 from Choi & Shin, 2021. -The function returns a 5 x 5 matrix. +\eqn{\hat{\Gamma}^\infty} is the long run variance estimator, defined in eq. 9 from Choi & Shin (2021). -\item \code{calc_L_And()}: Internal function computing \verb{$L_\{And\}$}. +\item \code{calc_L_And()}: computes \eqn{L_{And}}. -\verb{$L_\{And\}$} is a component needed to compute confidence intervals; -it is defined in Choi & Shin, 2021, p 342. -It also corresponds to \verb{$S_T^*$}, eq 5.3 in Andrews 1991. -The function returns a scalar which should be used as an input for \code{L} in \code{\link{calc_GammaINF}}. +\eqn{L_{And}} is defined in Choi & Shin (2021, p 342). +It also corresponds to \eqn{S_T^*}, eq 5.3 in Andrews (1991). -\item \code{calc_D()}: Internal function computing \verb{$D_t$}. +\item \code{calc_D()}: computes \eqn{D_t}. -\verb{$D_t$} is a component needed to compute confidence intervals; -it is defined in Choi & Shin, 2021, p 338. -The function returns a \code{t} x 5 matrix storing the residuals. +\eqn{D_t} is defined in Choi & Shin (2021, p 338). -\item \code{calc_SE()}: Internal function computing \verb{$se(\\hat\{rho\}_t(h))$}. +\item \code{calc_SE()}: computes \eqn{se(\hat{\rho}_t(h))}. -The standard deviation of the time-varying correlation (\verb{$se(\\hat\{rho\}_t(h))$}) is defined in eq. 8 from Choi & Shin, 2021. -It depends on \verb{$D_\{Lt\}$}, \verb{$D_\{Mt\}$} & \verb{$D_\{Ut\}$}, themselves defined in Choi & Shin, 2021, p 337 & 339. -The \verb{$D_\{Xt\}$} terms are all computed within the function since they all rely on the same components. -The function returns a vector of length \code{t}. +The standard deviation of the time-varying correlation (\eqn{se(\hat{\rho}_t(h))}) is defined in eq. 8 from Choi & Shin (2021). +It depends on \eqn{D_{Lt}}, \eqn{D_{Mt}} & \eqn{D_{Ut}}, themselves defined in Choi & Shin (2021, p 337 & 339). +The \eqn{D_{Xt}} terms are all computed within the function since they all rely on the same components. }} \examples{ @@ -100,48 +99,41 @@ rho_obj <- with(na.omit(stockprice), calc_rho(x = SP500, y = FTSE100, t = DateID, h = 20, kernel = "box")) head(rho_obj) - -## Computing `$\hat{H_t}$` +## Computing \eqn{\hat{H_t}} H <- calc_H(smoothed_obj = rho_obj) H[, , 1:2] # H array for the first two time points - -## Computing `$\hat{e}_t$` +## Computing \eqn{\hat{e}_t} e <- calc_e(smoothed_obj = rho_obj, H = H) head(e) # e matrix for the first six time points - -## Computing `$\hat{\Gamma}_l$` +## Computing \eqn{\hat{\Gamma}_l} calc_Gamma(e = e, l = 3) - -## Computing `$\hat{\Gamma}^\Inf$` +## Computing \eqn{\hat{\Gamma}^\infty} calc_GammaINF(e = e, L = 2) - -## Computing `$L_{And}$` +## Computing \eqn{L_{And}} calc_L_And(e = e) sapply(c("yule-walker", "burg", "ols", "mle", "yw"), function(m) calc_L_And(e = e, AR.method = m)) ## comparing AR.methods - -## Computing `$D_t$` +## Computing \eqn{D_t} D <- calc_D(smoothed_obj = rho_obj) head(D) # D matrix for the first six time points +## Computing \eqn{se(\hat{\rho}_t(h))} +# nb: takes a few seconds to run run <- FALSE ## change to TRUE to run the example if (in_pkgdown() || run) { -## Computing `$se(\hat{rho}_t(h))$` -# nb: takes a few seconds to run - SE <- calc_SE(smoothed_obj = rho_obj, h = 50) head(SE) # SE vector for the first six time points @@ -157,5 +149,5 @@ Andrews, D. W. K. Heteroskedasticity and autocorrelation consistent covariance m Econometrica: Journal of the Econometric Society, 817-858 (1991). } \seealso{ -\code{\link{tcor}} +\code{\link[=tcor]{tcor()}} } diff --git a/man/kern_smooth.Rd b/man/kern_smooth.Rd index 673af8a..042652a 100644 --- a/man/kern_smooth.Rd +++ b/man/kern_smooth.Rd @@ -45,8 +45,8 @@ The function perform the smoothing of a time-series by non-parametric kernel reg The function is essentially a wrapper that calls different underlying functions depending on the kernel that is selected: \itemize{ -\item \code{\link[lpridge:lpepa]{lpridge::lpepa}} for "epanechnikov". -\item \code{\link[stats:ksmooth]{stats::ksmooth}} for "normal" and "box". +\item \code{\link[lpridge:lpepa]{lpridge::lpepa()}} for "epanechnikov". +\item \code{\link[stats:ksmooth]{stats::ksmooth()}} for "normal" and "box". The argument \code{param_smoother} can be used to pass additional arguments to these functions. } diff --git a/man/stockprice.Rd b/man/stockprice.Rd index 6f52333..c15a76e 100644 --- a/man/stockprice.Rd +++ b/man/stockprice.Rd @@ -13,7 +13,7 @@ A data frame with 4618 rows and 7 variables: \item{Nikkei}{a numeric vector of the stockmarket return for the Nikkei 225.} \item{DAX}{a numeric vector of the stockmarket return for the German stock index.} \item{NASDAQ}{a numeric vector of the stockmarket return for the Nasdaq Stock Market.} -\item{Event}{a character string of particular events that have impacted the stockmarket, as in Choi & Shin 2021.} +\item{Event}{a character string of particular events that have impacted the stockmarket, as in Choi & Shin (2021).} } } \source{ @@ -25,7 +25,7 @@ stockprice } \description{ A dataset containing the stockmarket returns between 2000-04-03 and 2017-12-05. -This dataset is very close to the one used by Choi & Shin 2021, although not +This dataset is very close to the one used by Choi & Shin (2021), although not strictly identical. It has been produced by the Oxford-Man Institute of Quantitative Finance. } \references{ diff --git a/man/tcor.Rd b/man/tcor.Rd index 79240b5..d516ba1 100644 --- a/man/tcor.Rd +++ b/man/tcor.Rd @@ -86,14 +86,18 @@ monitor the progress of the cross validation (default = \code{FALSE}).} evaluate the smoothed fit. If missing, \code{t} is used.} } \value{ -A dataframe containing: +\strong{---Output for \code{tcor()}---} + +A 2 x \eqn{t} dataframe containing: \itemize{ \item the time points (\code{t}). \item the estimates of the correlation value (\code{r}). } -as well as, if \code{CI = TRUE}: +Or, if \code{CI = TRUE}, a 5 x \eqn{t} dataframe containing: \itemize{ +\item the time points (\code{t}). +\item the estimates of the correlation value (\code{r}). \item the Standard Error (\code{SE}). \item the lower boundary of the confidence intervals (\code{lwr}). \item the upper boundary of the confidence intervals (\code{upr}). @@ -101,74 +105,101 @@ as well as, if \code{CI = TRUE}: Some metadata are also attached to the dataframe (as attributes): \itemize{ -\item \code{h} the bandwidth parameter. -\item \code{RMSE} the minimal root mean square error when \code{h} is selected by cross validation. -\item \code{h_selection} the method used to select \code{h}. -\item \code{h_select_duration} the computing time spent to select the bandwidth parameter. +\item the call to the function (\code{call}). +\item the argument \code{CI}. +\item the bandwidth parameter (\code{h}). +\item the method used to select \code{h} (\code{h_selection}). +\item the minimal root mean square error when \code{h} is selected (\code{RMSE}). +\item the computing time (in seconds) spent to select the bandwidth parameter (\code{h_selection_duration}) if \code{h} automatically selected. +} + +\strong{---Output for \code{calc_rho()}---} + +A 14 x \eqn{t} dataframe with: +\itemize{ +\item the six raw components of correlation (\code{x}, \code{y}, \code{x2}, \code{y2}, \code{xy}). +\item the time points (\code{t}). +\item the six raw components of correlation after smoothing (\code{x_smoothed}, \code{y_smoothed}, \code{x2_smoothed}, \code{y2_smoothed}, \code{xy_smoothed}). +\item the standard deviation around \eqn{x} and \eqn{y} (\code{sd_x_smoothed}, \code{sd_y_smoothed}). +\item the smoothed correlation coefficient (\code{rho_smoothed}). +} + +\strong{---Output for \code{calc_RMSE()}---} + +A scalar of class numeric corresponding to the RMSE. + +\strong{---Output for \code{select_h()}---} + +A list with the following components: +\itemize{ +\item the selected bandwidth parameter (\code{h}). +\item the method used to select \code{h} (\code{h_selection}). +\item the minimal root mean square error when \code{h} is selected (\code{RMSE}). +\item the computing time (in seconds) spent to select the bandwidth parameter (\code{time}). } } \description{ -The function \code{tcor} implements (together with its helper function -\code{calc_rho}) the nonparametric estimation of the time varying correlation -coefficient proposed by Choi & Shin, 2021. The general idea is to compute a -(Pearson) correlation coefficient (\code{r(x,y) = (mean(xy) - mean(x)*mean(y)) / (sqrt(mean(x^2)-mean(x)^2) * sqrt(mean(y^2)-mean(y)^2))}), but instead of -using the means required for such a computation, each component (i.e., \code{x}, -\code{y}, \code{x^2}, \code{y^2}, \code{x*y}) is smoothed and the smoothed terms are considered -in place the original means. The intensity of the smoothing depends on a -unique parameter: the bandwidth (\code{h}). If \code{h = Inf}, the method produces the -original (i.e., time-invariant) correlation value. The smaller the parameter -\code{h}, the more variation in time is being captured. The parameter \code{h} can be -provided by the user; otherwise it is automatically estimated by the internal -helper functions \code{select_h} and \code{calc_RMSE} (see \strong{Details}). +The function \code{tcor()} implements (together with its helper function +\code{calc_rho()}) the nonparametric estimation of the time varying correlation +coefficient proposed by Choi & Shin (2021). The general idea is to compute a +(Pearson) correlation coefficient (\eqn{r(x,y) = \frac{\hat{xy} - \hat{x}\times\hat{y}}{ +\sqrt{\hat{x^2}-\hat{x}^2} \times \sqrt{\hat{y^2}-\hat{y}^2}}}), but instead of +using the means required for such a computation, each component (i.e., +\eqn{x}, \eqn{y}, \eqn{x^2}, \eqn{y^2}, \eqn{x \times y}) is smoothed and the +smoothed terms are considered in place the original means. The intensity of +the smoothing depends on a unique parameter: the bandwidth (\code{h}). If \code{h = Inf}, the method produces the original (i.e., time-invariant) correlation +value. The smaller the parameter \code{h}, the more variation in time is being +captured. The parameter \code{h} can be provided by the user; otherwise it is +automatically estimated by the internal helper functions \code{select_h()} and +\code{calc_RMSE()} (see \strong{Details}). } \details{ \itemize{ \item \strong{Smoothing}: the smoothing of each component is performed by kernel regression. The default is to use the Epanechnikov kernel following Choi & -Shin 2021, but other kernels have also been implemented and can thus -alternatively be used (see \code{\link{kern_smooth}} for details). The normal kernel +Shin (2021), but other kernels have also been implemented and can thus +alternatively be used (see \code{\link[=kern_smooth]{kern_smooth()}} for details). The normal kernel seems to sometimes lead to very small bandwidth being selected, but the default kernel can lead to numerical issues (see next point). We thus recommend always comparing the results from different kernel methods. \item \strong{Numerical issues}: some numerical issues can happen because the smoothing is performed independently on each component of the correlation coefficient. As a consequence, some relationship between components may become violated -for some time points. For instance, if the square of the smoothed $x$ term -gets larger than the smoothed $x^2$ term, the variance of $x$ would become +for some time points. For instance, if the square of the smoothed \eqn{x} term +gets larger than the smoothed \eqn{x^2} term, the variance of \eqn{x} would become negative. In such cases, coefficient values returned are \code{NA}. \item \strong{Bandwidth selection}: when the value used to define the bandwidth (\code{h}) -in \code{tcor} is set to \code{NULL} (the default), the internal function \code{select_h} +in \code{tcor()} is set to \code{NULL} (the default), the internal function \code{select_h()} is used to to select the optimal value for \code{h}. It is first estimated by -leave-one-out cross validation (using internally \code{calc_RMSE}). If the cross +leave-one-out cross validation (using internally \code{calc_RMSE()}). If the cross validation error (RMSE) is minimal for the maximal value of \code{h} considered -(\code{8*sqrt(N)}), rather than taking this as the optimal \code{h} value, the +(\eqn{8\sqrt{N}}), rather than taking this as the optimal \code{h} value, the bandwidth becomes estimated using the so-called elbow criterion. This latter method identifies the value \code{h} after which the cross validation error decreasing very little. The procedure is detailed in section 2.1 in Choi & -Shin, 2021. +Shin (2021). \item \strong{Parallel computation}: if \code{h} is not provided, an automatic bandwidth selection occurs (see above). For large datasets, this step can be computationally demanding. The current implementation thus relies on -\code{\link[parallel:mclapply]{parallel::mclapply}} and is thus only effective for Linux and MacOS. +\code{\link[parallel:mclapply]{parallel::mclapply()}} and is thus only effective for Linux and MacOS. Relying on parallel processing also implies that you call \code{options("mc.cores" = XX)} beforehand, replacing \code{XX} by the relevant number of CPU cores you want to use (see \strong{Examples}). For debugging, do use \code{options("mc.cores" = 1)}, otherwise you may not be able to see the error messages generated in child nodes. \item \strong{Confidence interval}: if \code{CI} is set to \code{TRUE}, a confidence interval is -calculated as described in Choi & Shin, 2021. This is also necessary for using -\code{\link{test_equality}} to test differences between correlations at two time points. +calculated as described in Choi & Shin (2021). This is also necessary for using +\code{\link[=test_equality]{test_equality()}} to test differences between correlations at two time points. The computation of the confidence intervals involves multiple internal functions (see \code{\link{CI}} for details). } } \section{Functions}{ \itemize{ -\item \code{tcor()}: \strong{The user-level function to be used}. +\item \code{tcor()}: \strong{the user-level function to be used}. -\item \code{calc_rho()}: Internal function computing the correlation for a given bandwidth. +\item \code{calc_rho()}: computes the correlation for a given bandwidth. The function calls the kernel smoothing procedure on each component required -to compute the time-varying correlation. It returns a dataframe with the time, -the correlation value and the underlying components used for the computation. +to compute the time-varying correlation. \item \code{calc_RMSE()}: Internal function computing the root mean square error (RMSE) for a given bandwidth. @@ -176,8 +207,7 @@ The function removes each time point one by one and predicts the correlation at the missing time point based on the other time points. It then computes and returns the RMSE between this predicted correlation and the one predicted using the full dataset. See also \emph{Bandwidth selection} and \emph{Parallel -computation} in -\strong{Details}. +computation} in \strong{Details}. \item \code{select_h()}: Internal function selecting the optimal bandwidth parameter \code{h}. diff --git a/man/test_equality.Rd b/man/test_equality.Rd index 8d5859a..4c36bda 100644 --- a/man/test_equality.Rd +++ b/man/test_equality.Rd @@ -12,7 +12,7 @@ test_equality( ) } \arguments{ -\item{tcor_obj}{the output of a call to \code{\link{tcor}} with \code{CI = TRUE}.} +\item{tcor_obj}{the output of a call to \code{\link[=tcor]{tcor()}} with \code{CI = TRUE}.} \item{t1}{the first time point used by the test (by default, the first time point in the time series).} @@ -25,7 +25,7 @@ a data.frame with the result of the test, including the effect size (\code{delta } \description{ This function tests whether smoothed correlation values at two time points are equal (H0) or not. -The test is described page 341 in Choi & Shin, 2021. +The test is described page 341 in Choi & Shin (2021). } \details{ Two different test statistics can be used, one is asymptotically Student-t distributed under H0 and one is chi-square distributed. @@ -59,5 +59,5 @@ stockprice[1000, "DateID"] ## t2 does match with date `stockprice` despite missi } \seealso{ -\code{\link{test_ref}}, \code{\link{tcor}} +\code{\link[=test_ref]{test_ref()}}, \code{\link[=tcor]{tcor()}} } diff --git a/man/test_ref.Rd b/man/test_ref.Rd index aca211b..2b41425 100644 --- a/man/test_ref.Rd +++ b/man/test_ref.Rd @@ -14,7 +14,7 @@ test_ref( ) } \arguments{ -\item{tcor_obj}{the output of a call to \code{\link{tcor}} with \code{CI = TRUE}.} +\item{tcor_obj}{the output of a call to \code{\link[=tcor]{tcor()}} with \code{CI = TRUE}.} \item{t}{a vector of time point(s) used by the test (by default, all time points are considered).} @@ -22,14 +22,14 @@ test_ref( \item{test}{a character string indicating which test to use ("student", the default; or "chi2").} -\item{p.adjust.methods}{a character string indicating the method used to adjust p-values for multiple testing (see \code{\link{p.adjust}}; default = "none").} +\item{p.adjust.methods}{a character string indicating the method used to adjust p-values for multiple testing (see \code{\link[=p.adjust]{p.adjust()}}; default = "none").} } \value{ a data.frame with the result of the test, including the effect size (\code{delta_r = r[t] - r_ref}). } \description{ This function tests whether smoothed correlation values are equal (H0) or not to a reference value (default = \code{0}). -The test is not described in Choi & Shin, 2021, but it is based on the idea behind \code{\link{test_equality}}. +The test is not described in Choi & Shin, 2021, but it is based on the idea behind \code{\link[=test_equality]{test_equality()}}. } \details{ Two different test statistics can be used, one is asymptotically Student-t distributed under H0 and one is chi-square distributed. @@ -62,5 +62,5 @@ test_ref(res, t = c("2000-08-18", "2000-10-27")) } \seealso{ -\code{\link{test_equality}}, \code{\link{tcor}} +\code{\link[=test_equality]{test_equality()}}, \code{\link[=tcor]{tcor()}} }