Coverage for src / ts_stat_tests / correlation / algorithms.py: 100%
32 statements
« prev ^ index » next coverage.py v7.13.2, created at 2026-02-01 09:48 +0000
« prev ^ index » next coverage.py v7.13.2, created at 2026-02-01 09:48 +0000
1# ============================================================================ #
2# #
3# Title: Correlation Algorithms #
4# Purpose: Algorithms for Correlation Measures in Time Series Analysis #
5# #
6# ============================================================================ #
9# ---------------------------------------------------------------------------- #
10# #
11# Overview ####
12# #
13# ---------------------------------------------------------------------------- #
16# ---------------------------------------------------------------------------- #
17# Description ####
18# ---------------------------------------------------------------------------- #
21"""
22!!! note "Summary"
23 The correlation algorithms module provides functions to compute correlation measures for time series data, including the autocorrelation function (ACF), partial autocorrelation function (PACF), and cross-correlation function (CCF). These measures help identify relationships and dependencies between time series variables, which are essential for time series analysis and forecasting.
25 This module leverages the `statsmodels` library to implement these correlation measures, ensuring robust and efficient computations. The functions are designed to handle various input scenarios and provide options for customization, such as specifying the number of lags, confidence intervals, and handling missing data.
27 By using these correlation algorithms, users can gain insights into the temporal dependencies within their time series data, aiding in model selection and improving forecasting accuracy.
28"""
31# ---------------------------------------------------------------------------- #
32# #
33# Setup ####
34# #
35# ---------------------------------------------------------------------------- #
38# ---------------------------------------------------------------------------- #
39# Imports ####
40# ---------------------------------------------------------------------------- #
43# ## Python StdLib Imports ----
44from typing import Literal, Optional, Union, overload
46# ## Python Third Party Imports ----
47import numpy as np
48import pandas as pd
49from numpy.typing import ArrayLike, NDArray
50from statsmodels.regression.linear_model import (
51 RegressionResults,
52 RegressionResultsWrapper,
53)
54from statsmodels.stats.api import acorr_breusch_godfrey, acorr_ljungbox, acorr_lm
55from statsmodels.stats.diagnostic import ResultsStore
56from statsmodels.tsa.api import acf as st_acf, ccf as st_ccf, pacf as st_pacf
57from statsmodels.tsa.stattools import ArrayLike1D
58from typeguard import typechecked
61# ---------------------------------------------------------------------------- #
62# Exports ####
63# ---------------------------------------------------------------------------- #
66__all__: list[str] = ["acf", "pacf", "ccf", "lb", "lm", "bglm"]
69## --------------------------------------------------------------------------- #
70## Constants ####
71## --------------------------------------------------------------------------- #
74VALID_ACF_MISSING_OPTIONS = Literal["none", "raise", "conservative", "drop"]
77VALID_PACF_METHOD_OPTIONS = Literal[
78 "yw",
79 "ywadjusted",
80 "ols",
81 "ols-inefficient",
82 "ols-adjusted",
83 "ywm",
84 "ywmle",
85 "ld",
86 "ldadjusted",
87 "ldb",
88 "ldbiased",
89 "burg",
90]
93VALID_LM_COV_TYPE_OPTIONS = Literal["nonrobust", "HC0", "HC1", "HC2", "HC3"]
96# ---------------------------------------------------------------------------- #
97# #
98# Algorithms ####
99# #
100# ---------------------------------------------------------------------------- #
103@overload
104def acf(
105 x: ArrayLike,
106 adjusted: bool = False,
107 nlags: Optional[int] = None,
108 fft: bool = True,
109 bartlett_confint: bool = True,
110 missing: VALID_ACF_MISSING_OPTIONS = "none",
111 *,
112 qstat: Literal[False] = False,
113 alpha: None = None,
114) -> NDArray[np.float64]: ...
115@overload
116def acf(
117 x: ArrayLike,
118 adjusted: bool = False,
119 nlags: Optional[int] = None,
120 fft: bool = True,
121 bartlett_confint: bool = True,
122 missing: VALID_ACF_MISSING_OPTIONS = "none",
123 *,
124 qstat: Literal[False] = False,
125 alpha: float,
126) -> tuple[NDArray[np.float64], NDArray[np.float64]]: ...
127@overload
128def acf(
129 x: ArrayLike,
130 adjusted: bool = False,
131 nlags: Optional[int] = None,
132 fft: bool = True,
133 bartlett_confint: bool = True,
134 missing: VALID_ACF_MISSING_OPTIONS = "none",
135 *,
136 qstat: Literal[True],
137 alpha: None = None,
138) -> tuple[NDArray[np.float64], NDArray[np.float64], NDArray[np.float64]]: ...
139@overload
140def acf(
141 x: ArrayLike,
142 adjusted: bool = False,
143 nlags: Optional[int] = None,
144 fft: bool = True,
145 bartlett_confint: bool = True,
146 missing: VALID_ACF_MISSING_OPTIONS = "none",
147 *,
148 qstat: Literal[True],
149 alpha: float,
150) -> tuple[NDArray[np.float64], NDArray[np.float64], NDArray[np.float64], NDArray[np.float64]]: ...
151@typechecked
152def acf(
153 x: ArrayLike,
154 adjusted: bool = False,
155 nlags: Optional[int] = None,
156 fft: bool = True,
157 bartlett_confint: bool = True,
158 missing: VALID_ACF_MISSING_OPTIONS = "none",
159 *,
160 qstat: bool = False,
161 alpha: Optional[float] = None,
162) -> Union[
163 NDArray[np.float64],
164 tuple[NDArray[np.float64], NDArray[np.float64]],
165 tuple[NDArray[np.float64], NDArray[np.float64], NDArray[np.float64]],
166 tuple[NDArray[np.float64], NDArray[np.float64], NDArray[np.float64], NDArray[np.float64]],
167]:
168 r"""
169 !!! note "Summary"
171 The autocorrelation function (ACF) is a statistical tool used to study the correlation between a time series and its lagged values. In time series forecasting, the ACF is used to identify patterns and relationships between values in a time series at different lags, which can then be used to make predictions about future values.
173 This function will implement the [`acf()`](https://www.statsmodels.org/stable/generated/statsmodels.tsa.stattools.acf.html) function from the [`statsmodels`](https://www.statsmodels.org) library.
175 ???+ abstract "Details"
177 The acf at lag `0` (ie., `1`) is returned.
179 For very long time series it is recommended to use `fft` convolution instead. When `fft` is `False` uses a simple, direct estimator of the autocovariances that only computes the first $nlags + 1$ values. This can be much faster when the time series is long and only a small number of autocovariances are needed.
181 If `adjusted` is `True`, the denominator for the autocovariance is adjusted for the loss of data.
183 The ACF measures the correlation between a time series and its lagged values at different lags. The correlation is calculated as the ratio of the covariance between the series and its lagged values to the product of their standard deviations. The ACF is typically plotted as a graph, with the lag on the `x`-axis and the correlation coefficient on the `y`-axis.
185 If the ACF shows a strong positive correlation at lag $k$, this means that values in the time series at time $t$ and time $t-k$ are strongly related. This can be useful in forecasting, as it suggests that past values can be used to predict future values. If the ACF shows a strong negative correlation at lag $k$, this means that values at time $t$ and time $t-k$ are strongly inversely related, which can also be useful in forecasting.
187 The ACF can be used to identify the order of an autoregressive (AR) model, which is a type of model used in time series forecasting. The order of an AR model is the number of lags that are used to predict future values. The ACF can also be used to diagnose the presence of seasonality in a time series.
189 Overall, the autocorrelation function is a valuable tool in time series forecasting, as it helps to identify patterns and relationships between values in a time series that can be used to make predictions about future values.
191 The ACF can be calculated using the `acf()` function in the `statsmodels` package in Python. The function takes a time series array as input and returns an array of autocorrelation coefficients at different lags. The significance of the autocorrelation coefficients can be tested using the Ljung-Box test, which tests the null hypothesis that the autocorrelation coefficients are zero up to a certain lag. The Ljung-Box test can be performed using the `acorr_ljungbox()` function in the `statsmodels` package. If the p-value of the test is less than a certain significance level (e.g. $0.05$), then there is evidence of significant autocorrelation in the time series up to the specified lag.
193 Params:
194 x (ArrayLike):
195 The time series data.
196 adjusted (bool, optional):
197 If `True`, then denominators for auto-covariance are $n-k$, otherwise $n$.<br>
198 Defaults to `False`.
199 nlags (Optional[int], optional):
200 Number of lags to return autocorrelation for. If not provided, uses $\min(10 \times \log_{10}(n_{obs}), n_{obs}-1)$ (calculated with: `min(int(10 * np.log10(nobs)), nobs - 1)`). The returned value includes lag $0$ (ie., $1$) so size of the acf vector is $(nlags + 1,)$.<br>
201 Defaults to `None`.
202 qstat (bool, optional):
203 If `True`, also returns the Ljung-Box $Q$ statistic and corresponding p-values for each autocorrelation coefficient; see the *Returns* section for details.<br>
204 Defaults to `False`.
205 fft (bool, optional):
206 If `True`, computes the ACF via FFT.<br>
207 Defaults to `True`.
208 alpha (Optional[float], optional):
209 If a number is given, the confidence intervals for the given level are returned. For instance if `alpha=0.05`, a $95\%$ confidence intervals are returned where the standard deviation is computed according to Bartlett's formula.<br>
210 Defaults to `None`.
211 bartlett_confint (bool, optional):
212 Confidence intervals for ACF values are generally placed at 2 standard errors around $r_k$. The formula used for standard error depends upon the situation. If the autocorrelations are being used to test for randomness of residuals as part of the ARIMA routine, the standard errors are determined assuming the residuals are white noise. The approximate formula for any lag is that standard error of each $r_k = \frac{1}{\sqrt{N}}$. See section 9.4 of [2] for more details on the $\frac{1}{\sqrt{N}}$ result. For more elementary discussion, see section 5.3.2 in [3]. For the ACF of raw data, the standard error at a lag $k$ is found as if the right model was an $\text{MA}(k-1)$. This allows the possible interpretation that if all autocorrelations past a certain lag are within the limits, the model might be an $\text{MA}$ of order defined by the last significant autocorrelation. In this case, a moving average model is assumed for the data and the standard errors for the confidence intervals should be generated using Bartlett's formula. For more details on Bartlett formula result, see section 7.2 in [2].<br>
213 Defaults to `True`.
214 missing (VALID_ACF_MISSING_OPTIONS, optional):
215 A string in `["none", "raise", "conservative", "drop"]` specifying how the `NaN`'s are to be treated.
217 - `"none"` performs no checks.
218 - `"raise"` raises an exception if NaN values are found.
219 - `"drop"` removes the missing observations and then estimates the autocovariances treating the non-missing as contiguous.
220 - `"conservative"` computes the autocovariance using nan-ops so that nans are removed when computing the mean and cross-products that are used to estimate the autocovariance.
222 When using `"conservative"`, $n$ is set to the number of non-missing observations.<br>
223 Defaults to `"none"`.
225 Returns:
226 (Union[NDArray[np.float64], tuple[NDArray[np.float64], NDArray[np.float64]], tuple[NDArray[np.float64], NDArray[np.float64], NDArray[np.float64]], tuple[NDArray[np.float64], NDArray[np.float64], NDArray[np.float64], NDArray[np.float64]]]):
227 Depending on `qstat` and `alpha`, returns the following values:
228 - `acf` (NDArray[np.float64]): The autocorrelation function for lags `0, 1, ..., nlags`.
229 - `confint` (NDArray[np.float64], optional): Confidence intervals for the ACF (returned if `alpha` is not `None`).
230 - `qstat` (NDArray[np.float64], optional): The Ljung-Box Q-Statistic (returned if `qstat` is `True`).
231 - `pvalues` (NDArray[np.float64], optional): P-values associated with the Q-statistics (returned if `qstat` is `True`).
233 ???+ example "Examples"
235 ```pycon {.py .python linenums="1" title="Setup"}
236 >>> from ts_stat_tests.correlation.algorithms import acf
237 >>> from ts_stat_tests.utils.data import data_airline, data_macrodata
238 >>> data_macro = data_macrodata.realgdp.values
239 >>> data_airline = data_airline.values
241 ```
243 ```pycon {.py .python linenums="1" title="Example 1: Basic ACF"}
244 >>> res_acf = acf(data_macro, nlags=5)
245 >>> print(res_acf[1:6])
246 [0.98685781 0.97371846 0.96014366 0.94568545 0.93054425]
248 ```
250 ```pycon {.py .python linenums="1" title="Example 2: ACF with Confidence Intervals and Q-Statistics"}
251 >>> res_acf, res_confint, res_qstat, res_pvalues = acf(
252 ... data_macro, nlags=5, qstat=True, alpha=0.05
253 ... )
254 >>> print(res_acf[1:6])
255 [0.98685781 0.97371846 0.96014366 0.94568545 0.93054425]
256 >>> print(res_confint[1:6])
257 [[0.84929531 1.12442032]
258 [0.73753616 1.20990077]
259 [0.65738012 1.2629072 ]
260 [0.5899385 1.30143239]
261 [0.53004062 1.33104787]]
262 >>> print(res_qstat[:5])
263 [200.63546275 396.93562234 588.75493948 775.77587865 957.77058934]
264 >>> print(res_pvalues[:5])
265 [1.51761209e-045 6.40508316e-087 2.76141970e-127 1.35591614e-166
266 8.33354393e-205]
268 ```
270 ```pycon {.py .python linenums="1" title="Example 3: ACF without FFT"}
271 >>> res_acf, res_confint, res_qstat, res_pvalues = acf(
272 ... data_macro, nlags=5, qstat=True, alpha=0.05, fft=False
273 ... )
274 >>> print(res_acf[1:6])
275 [0.98685781 0.97371846 0.96014366 0.94568545 0.93054425]
276 >>> print(res_qstat[:5])
277 [200.63546275 396.93562234 588.75493948 775.77587865 957.77058934]
279 ```
281 ```pycon {.py .python linenums="1" title="Example 4: ACF with Adjusted Denominator"}
282 >>> res_acf, res_confint = acf(data_macro, nlags=5, adjusted=True, alpha=0.05)
283 >>> print(res_acf[1:6])
284 [0.99174325 0.98340721 0.97454582 0.9646942 0.95404284]
285 >>> print(res_confint[1:6])
286 [[0.85418074 1.12930575]
287 [0.74645168 1.22036273]
288 [0.66999819 1.27909344]
289 [0.60595482 1.32343358]
290 [0.54917796 1.35890772]]
292 ```
294 ??? equation "Calculation"
296 The ACF at lag $k$ is defined as:
298 $$
299 ACF(k) = \frac{ Cov(Y_t, Y_{t-k}) }{ \sqrt{Var(Y_t) \times Var(Y_{t-k})} }
300 $$
302 where:
304 - $Y_t$ and $Y_{t-k}$ are the values of the time series at time $t$ and time $t-k$, respectively,
305 - $Cov(Y_t, Y_{t-k})$ is the covariance between the two values, and
306 - $Var(Y_t)$ and $Var(Y_{t-k})$ are the variances of the two values.
308 For a stationary series, this simplifies to:
310 $$
311 ACF(k) = \frac{ Cov(Y_t, Y_{t-k}) }{ Var(Y_t) }
312 $$
314 ??? success "Credit"
315 - All credit goes to the [`statsmodels`](https://www.statsmodels.org/) library.
317 ??? question "References"
318 1. Parzen, E., 1963. On spectral analysis with missing observations and amplitude modulation. Sankhya: The Indian Journal of Statistics, Series A, pp.383-392.
319 2. Brockwell and Davis, 1987. Time Series Theory and Methods.
320 3. Brockwell and Davis, 2010. Introduction to Time Series and Forecasting, 2nd edition.
322 ??? tip "See Also"
323 - [`statsmodels.tsa.stattools.acf`](https://www.statsmodels.org/stable/generated/statsmodels.tsa.stattools.acf.html): Estimate the autocorrelation function.
324 - [`statsmodels.tsa.stattools.pacf`](https://www.statsmodels.org/stable/generated/statsmodels.tsa.stattools.pacf.html): Partial autocorrelation estimation.
325 - [`statsmodels.tsa.stattools.ccf`](https://www.statsmodels.org/stable/generated/statsmodels.tsa.stattools.ccf.html): The cross-correlation function.
326 - [`ts_stat_tests.correlation.algorithms.acf`][ts_stat_tests.correlation.algorithms.acf]: Estimate the autocorrelation function
327 - [`ts_stat_tests.correlation.algorithms.pacf`][ts_stat_tests.correlation.algorithms.pacf]: Partial autocorrelation estimate.
328 - [`ts_stat_tests.correlation.algorithms.ccf`][ts_stat_tests.correlation.algorithms.ccf]: The cross-correlation function.
329 """
330 return st_acf(
331 x=x,
332 adjusted=adjusted,
333 nlags=nlags,
334 qstat=qstat,
335 fft=fft,
336 alpha=alpha,
337 bartlett_confint=bartlett_confint,
338 missing=missing,
339 )
342@overload
343def pacf(
344 x: ArrayLike1D,
345 nlags: Optional[int] = None,
346 method: VALID_PACF_METHOD_OPTIONS = "ywadjusted",
347 *,
348 alpha: None = None,
349) -> NDArray[np.float64]: ...
350@overload
351def pacf(
352 x: ArrayLike1D,
353 nlags: Optional[int] = None,
354 method: VALID_PACF_METHOD_OPTIONS = "ywadjusted",
355 *,
356 alpha: float,
357) -> tuple[NDArray[np.float64], NDArray[np.float64]]: ...
358@typechecked
359def pacf(
360 x: ArrayLike1D,
361 nlags: Optional[int] = None,
362 method: VALID_PACF_METHOD_OPTIONS = "ywadjusted",
363 *,
364 alpha: Optional[float] = None,
365) -> Union[NDArray[np.float64], tuple[NDArray[np.float64], NDArray[np.float64]]]:
366 r"""
367 !!! note "Summary"
369 The partial autocorrelation function (PACF) is a statistical tool used in time series forecasting to identify the direct relationship between two variables, controlling for the effect of the other variables in the time series. In other words, the PACF measures the correlation between a time series and its lagged values, while controlling for the effects of other intermediate lags.
371 This function will implement the [`pacf()`](https://www.statsmodels.org/stable/generated/statsmodels.tsa.stattools.pacf.html) function from the [`statsmodels`](https://www.statsmodels.org) library.
373 ???+ abstract "Details"
375 Based on simulation evidence across a range of low-order ARMA models, the best methods based on root MSE are Yule-Walker (MLW), Levinson-Durbin (MLE) and Burg, respectively. The estimators with the lowest bias included these three in addition to OLS and OLS-adjusted. Yule-Walker (adjusted) and Levinson-Durbin (adjusted) performed consistently worse than the other options.
377 The PACF is a plot of the correlation between a time series and its lagged values, controlling for the effect of other lags. The PACF is useful for identifying the order of an autoregressive (AR) model, which is a type of model used in time series forecasting. The order of an AR model is the number of lags that are used to predict future values.
379 The PACF is calculated using the Yule-Walker equations, which are a set of linear equations that describe the relationship between a time series and its lagged values. The PACF is calculated as the difference between the correlation coefficient at lag $k$ and the correlation coefficient at lag $k-1$, controlling for the effects of intermediate lags.
381 The PACF is typically plotted as a graph, with the lag on the `x`-axis and the correlation coefficient on the `y`-axis. If the PACF shows a strong positive correlation at lag $k$, this means that values in the time series at time $t$ and time $t-k$ are strongly related, after controlling for the effects of intermediate lags. This suggests that past values can be used to predict future values using an AR model with an order of $k$.
383 Overall, the partial autocorrelation function is a valuable tool in time series forecasting, as it helps to identify the order of an autoregressive model and to control for the effects of intermediate lags. By identifying the direct relationship between two variables, the PACF can help to improve the accuracy of time series forecasting models.
385 The PACF can be calculated using the `pacf()` function in the `statsmodels` package in Python. The function takes a time series array as input and returns an array of partial autocorrelation coefficients at different lags. The significance of the partial autocorrelation coefficients can be tested using the same Ljung-Box test as for the ACF. If the p-value of the test is less than a certain significance level (e.g. $0.05$), then there is evidence of significant partial autocorrelation in the time series up to the specified lag.
387 Params:
388 x (ArrayLike1D):
389 Observations of time series for which pacf is calculated.
390 nlags (Optional[int], optional):
391 Number of lags to return autocorrelation for. If not provided, uses $\min(10 \times \log_{10}(n_{obs}), \lfloor \frac{n_{obs}}{2} \rfloor - 1)$ (calculated with: `min(int(10*np.log10(nobs)), nobs // 2 - 1)`). The returned value includes lag `0` (ie., `1`) so size of the pacf vector is $(nlags + 1,)$.<br>
392 Defaults to `None`.
393 method (VALID_PACF_METHOD_OPTIONS, optional):
394 Specifies which method for the calculations to use.
396 - `"yw"` or `"ywadjusted"`: Yule-Walker with sample-size adjustment in denominator for acovf. Default.
397 - `"ywm"` or `"ywmle"`: Yule-Walker without adjustment.
398 - `"ols"`: regression of time series on lags of it and on constant.
399 - `"ols-inefficient"`: regression of time series on lags using a single common sample to estimate all pacf coefficients.
400 - `"ols-adjusted"`: regression of time series on lags with a bias adjustment.
401 - `"ld"` or `"ldadjusted"`: Levinson-Durbin recursion with bias correction.
402 - `"ldb"` or `"ldbiased"`: Levinson-Durbin recursion without bias correction.<br>
404 Defaults to `"ywadjusted"`.
405 alpha (Optional[float], optional):
406 If a number is given, the confidence intervals for the given level are returned. For instance if `alpha=.05`, $95\%$ confidence intervals are returned where the standard deviation is computed according to $\frac{1}{\sqrt{len(x)}}$.<br>
407 Defaults to `None`.
409 Returns:
410 (Union[NDArray[np.float64], tuple[NDArray[np.float64], NDArray[np.float64]]]):
411 Depending on `alpha`, returns the following values:
412 - `pacf` (NDArray[np.float64]): The partial autocorrelations for lags `0, 1, ..., nlags`.
413 - `confint` (NDArray[np.float64], optional): Confidence intervals for the PACF (returned if `alpha` is not `None`).
415 ???+ example "Examples"
417 ```pycon {.py .python linenums="1" title="Setup"}
418 >>> from ts_stat_tests.correlation.algorithms import pacf
419 >>> from ts_stat_tests.utils.data import data_airline
420 >>> data = data_airline.values
422 ```
424 ```pycon {.py .python linenums="1" title="Example 1: Basic PACF using Yule-Walker adjusted"}
425 >>> res_pacf = pacf(data, nlags=5)
426 >>> print(res_pacf[1:6])
427 [ 0.95467704 -0.26527732 0.05546955 0.10885622 0.08112579]
429 ```
431 ```pycon {.py .python linenums="1" title="Example 2: PACF with confidence intervals"}
432 >>> res_pacf, res_confint = pacf(data, nlags=5, alpha=0.05)
433 >>> print(res_confint[1:3])
434 [[ 0.79134671 1.11800737]
435 [-0.42860765 -0.10194698]]
437 ```
439 ```pycon {.py .python linenums="1" title="Example 3: PACF using OLS method"}
440 >>> res_pacf_ols = pacf(data, nlags=5, method="ols")
441 >>> print(res_pacf_ols[1:6])
442 [ 0.95893198 -0.32983096 0.2018249 0.14500798 0.25848232]
444 ```
446 ```pycon {.py .python linenums="1" title="Example 4: PACF using Levinson-Durbin recursion with bias correction"}
447 >>> res_pacf_ld = pacf(data, nlags=5, method="ldadjusted")
448 >>> print(res_pacf_ld[1:6])
449 [ 0.95467704 -0.26527732 0.05546955 0.10885622 0.08112579]
451 ```
453 ??? equation "Calculation"
455 The PACF at lag $k$ is defined as:
457 $$
458 PACF(k) = \text{Corr}\left( Y_t, Y_{t-k} \mid Y_{t-1}, Y_{t-2}, \dots, Y_{t-k+1} \right)
459 $$
461 where:
463 - $Y_t$ and $Y_{t-k}$ are the values of the time series at time $t$ and time $t-k$, respectively, and
464 - $Y_{t-1}, Y_{t-2}, \dots, Y_{t-k+1}$ are the values of the time series at intervening lags.
465 - $\text{Corr}()$ denotes the correlation coefficient between two variables.
467 ??? success "Credit"
468 - All credit goes to the [`statsmodels`](https://www.statsmodels.org/) library.
470 ??? question "References"
471 1. Box, G. E., Jenkins, G. M., Reinsel, G. C., & Ljung, G. M. (2015). Time series analysis: forecasting and control. John Wiley & Sons, p. 66.
472 2. Brockwell, P.J. and Davis, R.A., 2016. Introduction to time series and forecasting. Springer.
474 ??? tip "See Also"
475 - [`statsmodels.tsa.stattools.acf`](https://www.statsmodels.org/stable/generated/statsmodels.tsa.stattools.acf.html): Estimate the autocorrelation function.
476 - [`statsmodels.tsa.stattools.pacf`](https://www.statsmodels.org/stable/generated/statsmodels.tsa.stattools.pacf.html): Partial autocorrelation estimation.
477 - [`statsmodels.tsa.stattools.ccf`](https://www.statsmodels.org/stable/generated/statsmodels.tsa.stattools.ccf.html): The cross-correlation function.
478 - [`statsmodels.tsa.stattools.pacf_yw`](https://www.statsmodels.org/stable/generated/statsmodels.tsa.stattools.pacf_yw.html): Partial autocorrelation estimation using Yule-Walker.
479 - [`statsmodels.tsa.stattools.pacf_ols`](https://www.statsmodels.org/stable/generated/statsmodels.tsa.stattools.pacf_ols.html): Partial autocorrelation estimation using OLS.
480 - [`statsmodels.tsa.stattools.pacf_burg`](https://www.statsmodels.org/stable/generated/statsmodels.tsa.stattools.pacf_burg.html): Partial autocorrelation estimation using Burg's method.
481 - [`ts_stat_tests.correlation.algorithms.acf`][ts_stat_tests.correlation.algorithms.acf]: Estimate the autocorrelation function
482 - [`ts_stat_tests.correlation.algorithms.pacf`][ts_stat_tests.correlation.algorithms.pacf]: Partial autocorrelation estimate.
483 - [`ts_stat_tests.correlation.algorithms.ccf`][ts_stat_tests.correlation.algorithms.ccf]: The cross-correlation function.
484 """
485 return st_pacf(
486 x=x,
487 nlags=nlags,
488 method=method,
489 alpha=alpha,
490 )
493@overload
494def ccf(
495 x: ArrayLike,
496 y: ArrayLike,
497 adjusted: bool = True,
498 fft: bool = True,
499 *,
500 nlags: Optional[int] = None,
501 alpha: None = None,
502) -> NDArray[np.float64]: ...
503@overload
504def ccf(
505 x: ArrayLike,
506 y: ArrayLike,
507 adjusted: bool = True,
508 fft: bool = True,
509 *,
510 nlags: Optional[int] = None,
511 alpha: float,
512) -> tuple[NDArray[np.float64], NDArray[np.float64]]: ...
513@typechecked
514def ccf(
515 x: ArrayLike,
516 y: ArrayLike,
517 adjusted: bool = True,
518 fft: bool = True,
519 *,
520 nlags: Optional[int] = None,
521 alpha: Optional[float] = None,
522) -> Union[NDArray[np.float64], tuple[NDArray[np.float64], NDArray[np.float64]]]:
523 r"""
524 !!! note "Summary"
526 The cross-correlation function (CCF) is a statistical tool used in time series forecasting to measure the correlation between two time series at different lags. It is used to study the relationship between two time series, and can help to identify lead-lag relationships and causal effects.
528 This function will implement the [`ccf()`](https://www.statsmodels.org/stable/generated/statsmodels.tsa.stattools.ccf.html) function from the [`statsmodels`](https://www.statsmodels.org) library.
530 ???+ abstract "Details"
532 If `adjusted` is `True`, the denominator for the autocovariance is adjusted.
534 The CCF measures the correlation between two time series at different lags. It is calculated as the ratio of the covariance between the two series at lag $k$ to the product of their standard deviations. The CCF is typically plotted as a graph, with the lag on the `x`-axis and the correlation coefficient on the `y`-axis.
536 If the CCF shows a strong positive correlation at lag $k$, this means that changes in one time series at time $t$ are strongly related to changes in the other time series at time $t-k$. This suggests a lead-lag relationship between the two time series, where changes in one series lead changes in the other series by a certain number of periods. The CCF can be used to estimate the time lag between the two time series.
538 The CCF can also help to identify causal relationships between two time series. If the CCF shows a strong positive correlation at lag $k$, and the lag is consistent with a causal relationship between the two time series, this suggests that changes in one time series are causing changes in the other time series.
540 Overall, the cross-correlation function is a valuable tool in time series forecasting, as it helps to study the relationship between two time series and to identify lead-lag relationships and causal effects. By identifying the relationship between two time series, the CCF can help to improve the accuracy of time series forecasting models.
542 The CCF can be calculated using the `ccf()` function in the `statsmodels` package in Python. The function takes two time series arrays as input and returns an array of cross-correlation coefficients at different lags. The significance of the cross-correlation coefficients can be tested using a similar test to the Ljung-Box test, such as the Box-Pierce test or the Breusch-Godfrey test. These tests can be performed using the `boxpierce()` and `lm()` functions in the `statsmodels` package, respectively. If the p-value of the test is less than a certain significance level (e.g. $0.05$), then there is evidence of significant cross-correlation between the two time series at the specified lag.
544 Params:
545 x (ArrayLike):
546 The time series data to use in the calculation.
547 y (ArrayLike):
548 The time series data to use in the calculation.
549 adjusted (bool, optional):
550 If `True`, then denominators for cross-correlation is $n-k$, otherwise $n$.<br>
551 Defaults to `True`.
552 fft (bool, optional):
553 If `True`, use FFT convolution. This method should be preferred for long time series.<br>
554 Defaults to `True`.
555 nlags (Optional[int], optional):
556 Number of lags to return cross-correlations for. If not provided, the number of lags equals len(x).
557 Defaults to `None`.
558 alpha (Optional[float], optional):
559 If a number is given, the confidence intervals for the given level are returned. For instance if `alpha=.05`, 95% confidence intervals are returned where the standard deviation is computed according to `1/sqrt(len(x))`.
560 Defaults to `None`.
562 Returns:
563 (Union[NDArray[np.float64], tuple[NDArray[np.float64], NDArray[np.float64]]]):
564 Depending on `alpha`, returns the following values:
565 - `ccf` (NDArray[np.float64]): The cross-correlation function of `x` and `y` for lags `0, 1, ..., nlags`.
566 - `confint` (NDArray[np.float64], optional): Confidence intervals for the CCF (returned if `alpha` is not `None`).
568 ???+ example "Examples"
570 ```pycon {.py .python linenums="1" title="Setup"}
571 >>> from ts_stat_tests.correlation.algorithms import ccf
572 >>> from ts_stat_tests.utils.data import data_airline
573 >>> data = data_airline.values
575 ```
577 ```pycon {.py .python linenums="1" title="Example 1: Basic CCF"}
578 >>> res = ccf(data, data + 1, adjusted=True)
579 >>> print(res[:5])
580 [1. 0.95467704 0.88790688 0.82384458 0.774129 ]
582 ```
584 ```pycon {.py .python linenums="1" title="Example 2: CCF with confidence intervals"}
585 >>> res_ccf, res_conf = ccf(data, data + 1, alpha=0.05)
586 >>> print(res_ccf[:5])
587 [1. 0.95467704 0.88790688 0.82384458 0.774129 ]
588 >>> print(res_conf[:5])
589 [[0.83666967 1.16333033]
590 [0.79134671 1.11800737]
591 [0.72457654 1.05123721]
592 [0.66051425 0.98717492]
593 [0.61079867 0.93745933]]
595 ```
597 ```pycon {.py .python linenums="1" title="Example 3: CCF without adjustment"}
598 >>> res_ccf_no_adj = ccf(data, data + 1, adjusted=False)
599 >>> print(res_ccf_no_adj[:5])
600 [1. 0.94804734 0.87557484 0.80668116 0.75262542]
602 ```
604 ```pycon {.py .python linenums="1" title="Example 4: CCF without FFT"}
605 >>> res_ccf_no_fft = ccf(data, data + 1, fft=False)
606 >>> print(res_ccf_no_fft[:5])
607 [1. 0.95467704 0.88790688 0.82384458 0.774129 ]
609 ```
611 ??? equation "Calculation"
613 The CCF at lag $k$ is defined as:
615 $$
616 CCF(k) = \text{Corr}(X_t, Y_{t-k})
617 $$
619 where:
621 - $X_t$ and $Y_{t-k}$ are the values of the two time series at time $t$ and time $t-k$, respectively.
622 - $\text{Corr}()$ denotes the correlation coefficient between two variables.
624 ??? success "Credit"
625 - All credit goes to the [`statsmodels`](https://www.statsmodels.org/) library.
627 ??? tip "See Also"
628 - [`statsmodels.tsa.stattools.acf`](https://www.statsmodels.org/stable/generated/statsmodels.tsa.stattools.acf.html): Estimate the autocorrelation function.
629 - [`statsmodels.tsa.stattools.pacf`](https://www.statsmodels.org/stable/generated/statsmodels.tsa.stattools.pacf.html): Partial autocorrelation estimation.
630 - [`statsmodels.tsa.stattools.ccf`](https://www.statsmodels.org/stable/generated/statsmodels.tsa.stattools.ccf.html): The cross-correlation function.
631 - [`ts_stat_tests.correlation.algorithms.acf`][ts_stat_tests.correlation.algorithms.acf]: Estimate the autocorrelation function
632 - [`ts_stat_tests.correlation.algorithms.pacf`][ts_stat_tests.correlation.algorithms.pacf]: Partial autocorrelation estimate.
633 - [`ts_stat_tests.correlation.algorithms.ccf`][ts_stat_tests.correlation.algorithms.ccf]: The cross-correlation function.
634 """
635 return st_ccf(
636 x=x,
637 y=y,
638 adjusted=adjusted,
639 fft=fft,
640 nlags=nlags,
641 alpha=alpha,
642 )
645@typechecked
646def lb(
647 x: ArrayLike,
648 lags: Optional[Union[int, ArrayLike]] = None,
649 boxpierce: bool = False,
650 model_df: int = 0,
651 period: Optional[int] = None,
652 return_df: bool = True,
653 auto_lag: bool = False,
654) -> pd.DataFrame:
655 r"""
656 !!! note "Summary"
658 The Ljung-Box test is a statistical test used in time series forecasting to test for the presence of autocorrelation in the residuals of a model. The test is based on the autocorrelation function (ACF) of the residuals, and can be used to assess the adequacy of a time series model and to identify areas for improvement.
660 This function will implement the [`acorr_ljungbox()`](https://www.statsmodels.org/stable/generated/statsmodels.stats.diagnostic.acorr_ljungbox.html) function from the [`statsmodels`](https://www.statsmodels.org) library.
662 ???+ abstract "Details"
664 The Ljung-Box and Box-Pierce statistics differ in how they scale the autocorrelation function; the Ljung-Box test has better finite-sample properties.
666 Under the null hypothesis, the test statistic follows a chi-squared distribution with degrees of freedom equal to $m-p$, where $p$ is the number of parameters estimated in fitting the time series model.
668 The Ljung-Box test is performed by calculating the autocorrelation function (ACF) of the residuals from a time series model, and then comparing the ACF values to the expected values under the null hypothesis of no autocorrelation. The test statistic is calculated as the sum of the squared autocorrelations up to a given lag, and is compared to a chi-squared distribution with degrees of freedom equal to the number of lags tested.
670 If the test statistic is greater than the critical value from the chi-squared distribution, then the null hypothesis of no autocorrelation is rejected, indicating that there is evidence of autocorrelation in the residuals. This suggests that the time series model is inadequate, and that additional terms may need to be added to the model to account for the remaining autocorrelation.
672 If the test statistic is less than the critical value from the chi-squared distribution, then the null hypothesis of no autocorrelation is not rejected, indicating that there is no evidence of autocorrelation in the residuals. This suggests that the time series model is adequate, and that no further improvements are needed.
674 Overall, the Ljung-Box test is a valuable tool in time series forecasting, as it helps to assess the adequacy of a time series model and to identify areas for improvement. By testing for autocorrelation in the residuals, the test helps to ensure that the model is accurately capturing the underlying patterns in the time series data.
676 The Ljung-Box test can be calculated using the `acorr_ljungbox()` function in the `statsmodels` package in Python. The function takes a time series array and the maximum lag $m$ as input, and returns an array of $Q$-statistics and associated p-values for each lag up to $m$. If the p-value of the test is less than a certain significance level (e.g. $0.05$), then there is evidence of significant autocorrelation in the time series up to the specified lag.
678 Params:
679 x (ArrayLike):
680 The data series. The data is demeaned before the test statistic is computed.
681 lags (Optional[Union[int, ArrayLike]], optional):
682 If lags is an integer (`int`) then this is taken to be the largest lag that is included, the test result is reported for all smaller lag length. If lags is a list or array, then all lags are included up to the largest lag in the list, however only the tests for the lags in the list are reported. If lags is `None`, then the default maxlag is currently $\min(\lfloor \frac{n_{obs}}{2} \rfloor - 2, 40)$ (calculated with: `min(nobs // 2 - 2, 40)`). The default number of `lags` changes if `period` is set.
683 !!! deprecation "Deprecation"
684 After `statsmodels` version `0.12`, this calculation will change from
686 $$
687 \min\left(\lfloor \frac{n_{obs}}{2} \rfloor - 2, 40\right)
688 $$
690 to
692 $$
693 \min\left(10, \frac{n_{obs}}{5}\right)
694 $$
696 Defaults to `None`.
697 boxpierce (bool, optional):
698 If `True`, then additional to the results of the Ljung-Box test also the Box-Pierce test results are returned.<br>
699 Defaults to `False`.
700 model_df (int, optional):
701 Number of degrees of freedom consumed by the model. In an ARMA model, this value is usually $p+q$ where $p$ is the AR order and $q$ is the MA order. This value is subtracted from the degrees-of-freedom used in the test so that the adjusted dof for the statistics are $lags - model_df$. If $lags - model_df \le 0$, then `NaN` is returned.<br>
702 Defaults to `0`.
703 period (Optional[int], optional):
704 The period of a Seasonal time series. Used to compute the max lag for seasonal data which uses $\min(2 \times period, \lfloor \frac{n_{obs}}{5} \rfloor)$ (calculated with: `min(2*period,nobs//5)`) if set. If `None`, then the default rule is used to set the number of lags. When set, must be $\ge 2$.<br>
705 Defaults to `None`.
706 return_df (bool, optional):
707 Flag indicating whether to return the result as a single DataFrame with columns `lb_stat`, `lb_pvalue`, and optionally `bp_stat` and `bp_pvalue`. Set to `True` to return the DataFrame or `False` to continue returning the $2-4$ output. If `None` (the default), a warning is raised.
709 !!! deprecation "Deprecation"
710 After `statsmodels` version `0.12`, this will become the only return method.
712 Defaults to `True`.
713 auto_lag (bool, optional):
714 Flag indicating whether to automatically determine the optimal lag length based on threshold of maximum correlation value.<br>
715 Defaults to `False`.
717 Returns:
718 (Union[pd.DataFrame, tuple[NDArray[np.float64], NDArray[np.float64]], tuple[NDArray[np.float64], NDArray[np.float64], NDArray[np.float64], NDArray[np.float64]]]):
719 Depending on `return_df` and `boxpierce`, returns the following values:
720 - `lb_stat` (NDArray[np.float64]): The Ljung-Box test statistic.
721 - `lb_pvalue` (NDArray[np.float64]): The p-value for the Ljung-Box test.
722 - `bp_stat` (NDArray[np.float64], optional): The Box-Pierce test statistic (returned if `boxpierce` is `True`).
723 - `bp_pvalue` (NDArray[np.float64], optional): The p-value for the Box-Pierce test (returned if `boxpierce` is `True`).
725 ???+ example "Examples"
727 ```pycon {.py .python linenums="1" title="Setup"}
728 >>> from statsmodels import api as sm
729 >>> from ts_stat_tests.correlation.algorithms import lb
730 >>> from ts_stat_tests.utils.data import data_airline
731 >>> data = data_airline.values
732 >>> res = sm.tsa.ARIMA(data, order=(1, 0, 1)).fit()
734 ```
736 ```pycon {.py .python linenums="1" title="Example 1: Ljung-Box test on ARIMA residuals"}
737 >>> results = lb(res.resid, lags=[10], return_df=True)
738 >>> print(results)
739 lb_stat lb_pvalue
740 10 13.844361 0.18021
742 ```
744 ```pycon {.py .python linenums="1" title="Example 2: Ljung-Box and Box-Pierce tests with multiple lags"}
745 >>> results = lb(res.resid, lags=[5, 10, 15], boxpierce=True, return_df=True)
746 >>> print(results)
747 lb_stat lb_pvalue bp_stat bp_pvalue
748 5 6.274986 2.803736e-01 6.019794 3.042976e-01
749 10 13.844361 1.802099e-01 13.080554 2.192028e-01
750 15 86.182531 5.083111e-12 78.463124 1.332482e-10
752 ```
754 ```pycon {.py .python linenums="1" title="Example 3: Ljung-Box test with specific lag"}
755 >>> results = lb(res.resid, lags=[5], return_df=True)
756 >>> print(results)
757 lb_stat lb_pvalue
758 5 6.274986 0.280374
760 ```
762 ??? equation "Calculation"
764 The Ljung-Box test statistic is calculated as:
766 $$
767 Q(m) = n(n+2) \sum_{k=1}^m \frac{r_k^2}{n-k}
768 $$
770 where:
772 - $n$ is the sample size,
773 - $m$ is the maximum lag being tested,
774 - $r_k$ is the sample autocorrelation at lag $k$, and
775 - $\sum$ denotes the sum over $k$ from $1$ to $m$.
777 ??? success "Credit"
778 - All credit goes to the [`statsmodels`](https://www.statsmodels.org/) library.
780 ??? question "References"
781 1. Green, W. "Econometric Analysis," 5th ed., Pearson, 2003.
782 2. J. Carlos Escanciano, Ignacio N. Lobato "An automatic Portmanteau test for serial correlation"., Volume 151, 2009.
784 ??? tip "See Also"
785 - [`statsmodels.regression.linear_model.OLS.fit`](https://www.statsmodels.org/stable/generated/statsmodels.regression.linear_model.OLS.fit.html): Fit a linear model.
786 - [`statsmodels.regression.linear_model.RegressionResults`](https://www.statsmodels.org/stable/generated/statsmodels.regression.linear_model.RegressionResults.html): The output results of a linear regression model.
787 - [`statsmodels.stats.diagnostic.acorr_ljungbox`](https://www.statsmodels.org/stable/generated/statsmodels.stats.diagnostic.acorr_ljungbox.html): Ljung-Box test for serial correlation.
788 - [`statsmodels.stats.diagnostic.acorr_lm`](https://www.statsmodels.org/stable/generated/statsmodels.stats.diagnostic.acorr_lm.html): Lagrange Multiplier tests for autocorrelation.
789 - [`statsmodels.stats.diagnostic.acorr_breusch_godfrey`](https://www.statsmodels.org/stable/generated/statsmodels.stats.diagnostic.acorr_breusch_godfrey.html): Breusch-Godfrey test for serial correlation.
790 - [`ts_stat_tests.correlation.algorithms.lb`][ts_stat_tests.correlation.algorithms.lb]: Ljung-Box test of autocorrelation in residuals.
791 - [`ts_stat_tests.correlation.algorithms.lm`][ts_stat_tests.correlation.algorithms.lm]: Lagrange Multiplier tests for autocorrelation.
792 - [`ts_stat_tests.correlation.algorithms.bglm`][ts_stat_tests.correlation.algorithms.bglm]: Breusch-Godfrey Lagrange Multiplier tests for residual autocorrelation.
793 """
794 return acorr_ljungbox(
795 x=x,
796 lags=lags,
797 boxpierce=boxpierce,
798 model_df=model_df,
799 period=period,
800 return_df=return_df,
801 auto_lag=auto_lag,
802 )
805@overload
806def lm(
807 resid: ArrayLike,
808 nlags: Optional[int] = None,
809 *,
810 store: Literal[False] = False,
811 period: Optional[int] = None,
812 ddof: int = 0,
813 cov_type: VALID_LM_COV_TYPE_OPTIONS = "nonrobust",
814 cov_kwargs: Optional[dict] = None,
815) -> tuple[float, NDArray[np.float64], float, float]: ...
816@overload
817def lm(
818 resid: ArrayLike,
819 nlags: Optional[int] = None,
820 *,
821 store: Literal[True],
822 period: Optional[int] = None,
823 ddof: int = 0,
824 cov_type: VALID_LM_COV_TYPE_OPTIONS = "nonrobust",
825 cov_kwargs: Optional[dict] = None,
826) -> tuple[float, float, float, float, ResultsStore]: ...
827@typechecked
828def lm(
829 resid: ArrayLike,
830 nlags: Optional[int] = None,
831 *,
832 store: bool = False,
833 period: Optional[int] = None,
834 ddof: int = 0,
835 cov_type: VALID_LM_COV_TYPE_OPTIONS = "nonrobust",
836 cov_kwargs: Optional[dict] = None,
837) -> Union[
838 tuple[float, Union[float, NDArray[np.float64]], float, float],
839 tuple[float, Union[float, NDArray[np.float64]], float, float, ResultsStore],
840]:
841 r"""
842 !!! note "Summary"
844 The Lagrange Multiplier (LM) test is a statistical test used in time series forecasting to test for the presence of autocorrelation in a model. The test is based on the residual sum of squares (RSS) of a time series model, and can be used to assess the adequacy of the model and to identify areas for improvement.
846 This function implements the [`acorr_lm()`](https://www.statsmodels.org/stable/generated/statsmodels.stats.diagnostic.acorr_lm.html) function from the [`statsmodels`](https://www.statsmodels.org) library.
848 ???+ abstract "Details"
850 This is a generic Lagrange Multiplier (LM) test for autocorrelation. It returns Engle's ARCH test if `resid` is the squared residual array. The Breusch-Godfrey test is a variation on this LM test with additional exogenous variables in the auxiliary regression.
852 The LM test proceeds by:
854 - Fitting a time series model to the data and obtaining the residuals.
855 - Running an auxiliary regression of these residuals on their past `nlags` values (and any relevant exogenous variables).
856 - Computing the LM statistic as $(n_{obs} - ddof) \times R^2$ from this auxiliary regression.
858 Under the null hypothesis that the autocorrelations up to the specified lag are zero (no serial correlation in the residuals), the LM statistic is asymptotically distributed as a chi-squared random variable with degrees of freedom equal to the number of lagged residual terms included in the auxiliary regression (i.e. the number of lags being tested, adjusted for any restrictions implied by the model).
860 If the test statistic is greater than the critical value from the chi-squared distribution (or equivalently, if the p-value is less than a chosen significance level such as $0.05$), then the null hypothesis of no autocorrelation is rejected, indicating that there is evidence of autocorrelation in the residuals.
862 The LM test is a generalization of the Durbin-Watson test, which is a simpler test that only tests for first-order autocorrelation.
864 Params:
865 resid (ArrayLike):
866 Time series to test.
867 nlags (Optional[int], optional):
868 Highest lag to use. Defaults to `None`.
869 !!! deprecation "Deprecation"
870 The behavior of this parameter will change after `statsmodels` version `0.12`.
871 store (bool, optional):
872 If `True` then the intermediate results are also returned. Defaults to `False`.
873 period (Optional[int], optional):
874 The period of a Seasonal time series. Used to compute the max lag for seasonal data which uses $\min(2 \times period, \lfloor \frac{n_{obs}}{5} \rfloor)$ (calculated with: `min(2*period,nobs//5)`) if set. If `None`, then the default rule is used to set the number of lags. When set, must be $\ge 2$. Defaults to `None`.
875 ddof (int, optional):
876 The number of degrees of freedom consumed by the model used to produce `resid`. Defaults to `0`.
877 cov_type (VALID_LM_COV_TYPE_OPTIONS, optional):
878 Covariance type. The default is `"nonrobust"` which uses the classic OLS covariance estimator. Specify one of `"HC0"`, `"HC1"`, `"HC2"`, `"HC3"` to use White's covariance estimator. All covariance types supported by `OLS.fit` are accepted. Defaults to `"nonrobust"`.
879 cov_kwargs (Optional[dict], optional):
880 Dictionary of covariance options passed to `OLS.fit`. See [`OLS.fit`](https://www.statsmodels.org/stable/generated/statsmodels.regression.linear_model.OLS.fit.html) for more details. Defaults to `None`.
882 Returns:
883 (Union[tuple[float, Union[float, NDArray[np.float64]], float, float], tuple[float, Union[float, NDArray[np.float64]], float, float, ResultsStore]]):
884 Returns the following values:
885 - `lm` (float): Lagrange multiplier test statistic.
886 - `lmpval` (Union[float, NDArray[np.float64]]): The p-value for the Lagrange multiplier test.
887 - `fval` (float): The f-statistic of the F test.
888 - `fpval` (float): The p-value of the F test.
889 - `res_store` (ResultsStore, optional): Intermediate results (returned if `store` is `True`).
891 ???+ example "Examples"
893 ```pycon {.py .python linenums="1" title="Setup"}
894 >>> from ts_stat_tests.correlation.algorithms import lm
895 >>> from ts_stat_tests.utils.data import data_airline
896 >>> data = data_airline.values
898 ```
900 ```pycon {.py .python linenums="1" title="Example 1: Lagrange Multiplier test"}
901 >>> res_lm, res_p, res_f, res_fp = lm(data)
902 >>> print(f"LM statistic: {res_lm:.4f}")
903 LM statistic: 128.0966
904 >>> print(f"p-value: {res_p:.4e}")
905 p-value: 1.1417e-22
907 ```
909 ```pycon {.py .python linenums="1" title="Example 2: Lagrange Multiplier test with intermediate results"}
910 >>> res_lm, res_p, res_f, res_fp, res_store = lm(data, store=True)
911 >>> print(f"LM statistic: {res_lm:.4f}")
912 LM statistic: 128.0966
913 >>> print(f"p-value: {res_p:.4e}")
914 p-value: 1.1417e-22
916 ```
918 ```pycon {.py .python linenums="1" title="Example 3: Lagrange Multiplier test with robust covariance"}
919 >>> res_lm, res_p, res_f, res_fp = lm(data, cov_type="HC3")
920 >>> print(f"LM statistic: {res_lm:.4f}")
921 LM statistic: 2063.3981
922 >>> print(f"p-value: {res_p:.1f}")
923 p-value: 0.0
925 ```
927 ```pycon {.py .python linenums="1" title="Example 4: Lagrange Multiplier test with seasonal period"}
928 >>> res_lm, res_p, res_f, res_fp = lm(data, period=12)
929 >>> print(f"LM statistic: {res_lm:.4f}")
930 LM statistic: 119.1109
931 >>> print(f"p-value: {res_p:.4e}")
932 p-value: 1.3968e-14
934 ```
936 ```pycon {.py .python linenums="1" title="Example 5: Lagrange Multiplier test with specified degrees of freedom"}
937 >>> res_lm, res_p, res_f, res_fp = lm(data, ddof=2)
938 >>> print(f"LM statistic: {res_lm:.4f}")
939 LM statistic: 126.1847
940 >>> print(f"p-value: {res_p:.4e}")
941 p-value: 2.7990e-22
943 ```
945 ??? equation "Calculation"
947 The LM test statistic is computed as:
949 $$
950 LM = (n_{obs} - ddof) \times R^2
951 $$
953 where:
955 - $R^2$ is the coefficient of determination from the auxiliary regression of the residuals on their own `nlags` lags,
956 - $n_{obs}$ is the number of observations, and
957 - $ddof$ is the model degrees of freedom lost due to parameter estimation.
959 ??? success "Credit"
960 - All credit goes to the [`statsmodels`](https://www.statsmodels.org/) library.
962 ??? tip "See Also"
963 - [`statsmodels.stats.diagnostic.acorr_lm`](https://www.statsmodels.org/stable/generated/statsmodels.stats.diagnostic.acorr_lm.html): Lagrange Multiplier tests for autocorrelation.
964 - [`ts_stat_tests.correlation.algorithms.lb`][ts_stat_tests.correlation.algorithms.lb]: Ljung-Box test of autocorrelation in residuals.
965 - [`ts_stat_tests.correlation.algorithms.bglm`][ts_stat_tests.correlation.algorithms.bglm]: Breusch-Godfrey Lagrange Multiplier tests for residual autocorrelation.
966 """
967 return acorr_lm(
968 resid=resid,
969 nlags=nlags,
970 store=store,
971 period=period,
972 ddof=ddof,
973 cov_type=cov_type,
974 cov_kwargs=cov_kwargs,
975 )
978@overload
979def bglm(
980 res: Union[RegressionResults, RegressionResultsWrapper],
981 nlags: Optional[int] = None,
982 *,
983 store: Literal[False] = False,
984) -> tuple[float, Union[float, NDArray[np.float64]], float, float]: ...
985@overload
986def bglm(
987 res: Union[RegressionResults, RegressionResultsWrapper],
988 nlags: Optional[int] = None,
989 *,
990 store: Literal[True],
991) -> tuple[float, Union[float, NDArray[np.float64]], float, float, ResultsStore]: ...
992@typechecked
993def bglm(
994 res: Union[RegressionResults, RegressionResultsWrapper],
995 nlags: Optional[int] = None,
996 *,
997 store: bool = False,
998) -> Union[
999 tuple[float, Union[float, NDArray[np.float64]], float, float],
1000 tuple[float, Union[float, NDArray[np.float64]], float, float, ResultsStore],
1001]:
1002 r"""
1003 !!! note "Summary"
1005 The Breusch-Godfrey Lagrange Multiplier (BGLM) test is a statistical test used in time series forecasting to test for the presence of autocorrelation in the residuals of a model. The test is a generalization of the LM test and can be used to test for autocorrelation up to a specified order.
1007 This function implements the [`acorr_breusch_godfrey()`](https://www.statsmodels.org/stable/generated/statsmodels.stats.diagnostic.acorr_breusch_godfrey.html) function from the [`statsmodels`](https://www.statsmodels.org) library.
1009 ???+ abstract "Details"
1011 BG adds lags of residual to exog in the design matrix for the auxiliary regression with residuals as endog. See Greene (2002), section 12.7.1.
1013 The BGLM test is performed by first fitting a time series model to the data and then obtaining the residuals from the model. The residuals are then used to estimate the autocorrelation function (ACF) up to a specified order.
1015 Under the null hypothesis that there is no autocorrelation in the residuals of the regression model, the BGLM test statistic follows a chi-squared distribution with degrees of freedom equal to the number of lags included in the model.
1017 If the test statistic is greater than the critical value from the chi-squared distribution, then the null hypothesis of no autocorrelation is rejected, indicating that there is evidence of autocorrelation in the residuals.
1019 Params:
1020 res (Union[RegressionResults, RegressionResultsWrapper]):
1021 Estimation results for which the residuals are tested for serial correlation.
1022 nlags (Optional[int], optional):
1023 Number of lags to include in the auxiliary regression. (`nlags` is highest lag). Defaults to `None`.
1024 store (bool, optional):
1025 If `store` is `True`, then an additional class instance that contains intermediate results is returned. Defaults to `False`.
1027 Returns:
1028 (Union[tuple[float, float, float, float], tuple[float, float, float, float, ResultsStore]]):
1029 Returns the following values:
1030 - `lm` (float): Lagrange multiplier test statistic.
1031 - `lmpval` (float): The p-value for the Lagrange multiplier test.
1032 - `fval` (float): The value of the f-statistic for the F test.
1033 - `fpval` (float): The p-value of the F test.
1034 - `res_store` (ResultsStore, optional): Intermediate results (returned if `store` is `True`).
1036 ???+ example "Examples"
1038 ```pycon {.py .python linenums="1" title="Setup"}
1039 >>> from statsmodels import api as sm
1040 >>> from ts_stat_tests.correlation.algorithms import bglm
1041 >>> y = sm.datasets.longley.load_pandas().endog
1042 >>> X = sm.datasets.longley.load_pandas().exog
1043 >>> X = sm.add_constant(X)
1044 >>> model = sm.OLS(y, X).fit()
1046 ```
1048 ```pycon {.py .python linenums="1" title="Example 1: Breusch-Godfrey test"}
1049 >>> res_lm, res_p, res_f, res_fp = bglm(model)
1050 >>> print(f"LM statistic: {res_lm:.4f}")
1051 LM statistic: 5.1409
1052 >>> print(f"p-value: {res_p:.4f}")
1053 p-value: 0.1618
1055 ```
1057 ```pycon {.py .python linenums="1" title="Example 2: Breusch-Godfrey test with intermediate results"}
1058 >>> res_lm, res_p, res_f, res_fp, res_store = bglm(model, store=True)
1059 >>> print(f"LM statistic: {res_lm:.4f}")
1060 LM statistic: 5.1409
1061 >>> print(f"p-value: {res_p:.4f}")
1062 p-value: 0.1618
1064 ```
1066 ```pycon {.py .python linenums="1" title="Example 3: Breusch-Godfrey test with specified lags"}
1067 >>> res_lm, res_p, res_f, res_fp = bglm(model, nlags=2)
1068 >>> print(f"LM statistic: {res_lm:.4f}")
1069 LM statistic: 2.8762
1070 >>> print(f"p-value: {res_p:.4f}")
1071 p-value: 0.2374
1073 ```
1075 ??? equation "Calculation"
1077 The BGLM test statistic is calculated as:
1079 $$
1080 BGLM = n \times R^2
1081 $$
1083 where:
1085 - $n$ is the sample size and
1086 - $R^2$ is the coefficient of determination from a regression of the residuals on the lagged values of the residuals and the lagged values of the predictor variable.
1088 ??? success "Credit"
1089 - All credit goes to the [`statsmodels`](https://www.statsmodels.org/) library.
1091 ??? question "References"
1092 1. Greene, W. H. Econometric Analysis. New Jersey. Prentice Hall; 5th edition. (2002).
1094 ??? tip "See Also"
1095 - [`statsmodels.stats.diagnostic.acorr_breusch_godfrey`](https://www.statsmodels.org/stable/generated/statsmodels.stats.diagnostic.acorr_breusch_godfrey.html): Breusch-Godfrey test for serial correlation.
1096 - [`ts_stat_tests.correlation.algorithms.lb`][ts_stat_tests.correlation.algorithms.lb]: Ljung-Box test of autocorrelation in residuals.
1097 - [`ts_stat_tests.correlation.algorithms.lm`][ts_stat_tests.correlation.algorithms.lm]: Lagrange Multiplier tests for autocorrelation.
1098 """
1099 return acorr_breusch_godfrey(
1100 res=res,
1101 nlags=nlags,
1102 store=store,
1103 )