Title: | Fully Flexible Probabilities for Stress Testing and Portfolio Construction |
---|---|
Description: | Implements numerical entropy-pooling for portfolio construction and scenario analysis as described in Meucci, Attilio (2008) and Meucci, Attilio (2010) <doi:10.2139/ssrn.1696802>. |
Authors: | Bernardo Reckziegel [aut, cre] |
Maintainer: | Bernardo Reckziegel <[email protected]> |
License: | MIT + file LICENSE |
Version: | 0.2.2.9000 |
Built: | 2025-02-15 04:40:33 UTC |
Source: | https://github.com/reckziegel/ffp |
ffp
object with ggplot2Extends the autoplot
method for the ffp
class.
## S3 method for class 'ffp' autoplot(object, color = TRUE, ...) ## S3 method for class 'ffp' plot(object, ...)
## S3 method for class 'ffp' autoplot(object, color = TRUE, ...) ## S3 method for class 'ffp' plot(object, ...)
object |
An objected of the |
color |
A |
... |
Additional arguments to be passed to |
A ggplot2
object.
library(ggplot2) x <- exp_decay(EuStockMarkets, 0.001) y <- exp_decay(EuStockMarkets, 0.01) autoplot(x) + scale_color_viridis_c() autoplot(y) + scale_color_viridis_c()
library(ggplot2) x <- exp_decay(EuStockMarkets, 0.001) y <- exp_decay(EuStockMarkets, 0.01) autoplot(x) + scale_color_viridis_c() autoplot(y) + scale_color_viridis_c()
This function mimics dplyr
bind
. It's useful if you
have different ffp
objects and want to stack them in the tidy
(long) format.
bind_probs(...)
bind_probs(...)
... |
|
A tidy tibble
.
The output adds two new columns:
rowid
(an integer
) with the row number of each realization;
key
(a factor
) that keeps track of the ffp
inputs as separated objects.
crisp
exp_decay
kernel_normal
kernel_entropy
double_decay
library(ggplot2) library(dplyr, warn.conflicts = FALSE) x <- exp_decay(EuStockMarkets, lambda = 0.001) y <- exp_decay(EuStockMarkets, lambda = 0.002) bind_probs(x, y) bind_probs(x, y) %>% ggplot(aes(x = rowid, y = probs, color = fn)) + geom_line() + scale_color_viridis_d() + theme(legend.position="bottom")
library(ggplot2) library(dplyr, warn.conflicts = FALSE) x <- exp_decay(EuStockMarkets, lambda = 0.001) y <- exp_decay(EuStockMarkets, lambda = 0.002) bind_probs(x, y) bind_probs(x, y) %>% ggplot(aes(x = rowid, y = probs, color = fn)) + geom_line() + scale_color_viridis_d() + theme(legend.position="bottom")
Bind views for entropy programming.
bind_views(...)
bind_views(...)
... |
Objects of the class |
A list
of the view
class.
library(ggplot2) # Invariant ret <- diff(log(EuStockMarkets)) n <- nrow(ret) # Prior probabilities (usually equal weight scheme) prior <- rep(1 / n, n) # Prior belief for expected returns (here is 0% for each asset) view_mean <- view_on_mean(x = ret, mean = rep(0, 4)) #' view on volatility vol <- apply(ret, 2, stats::sd) * 1.1 # volatility 10% higher than average view_volatility <- view_on_volatility(x = ret, vol = vol) views_comb <- bind_views(view_mean, view_volatility) views_comb ep <- entropy_pooling(p = prior, Aeq = views_comb$Aeq, beq = views_comb$beq, A = views_comb$A, b = views_comb$b, solver = "nlminb") autoplot(ep)
library(ggplot2) # Invariant ret <- diff(log(EuStockMarkets)) n <- nrow(ret) # Prior probabilities (usually equal weight scheme) prior <- rep(1 / n, n) # Prior belief for expected returns (here is 0% for each asset) view_mean <- view_on_mean(x = ret, mean = rep(0, 4)) #' view on volatility vol <- apply(ret, 2, stats::sd) * 1.1 # volatility 10% higher than average view_volatility <- view_on_volatility(x = ret, vol = vol) views_comb <- bind_views(view_mean, view_volatility) views_comb ep <- entropy_pooling(p = prior, Aeq = views_comb$Aeq, beq = views_comb$beq, A = views_comb$A, b = views_comb$b, solver = "nlminb") autoplot(ep)
Resamples historical scenarios with flexible probabilities.
bootstrap_scenarios(x, p, n) ## S3 method for class 'numeric' bootstrap_scenarios(x, p, n) ## S3 method for class 'matrix' bootstrap_scenarios(x, p, n) ## S3 method for class 'ts' bootstrap_scenarios(x, p, n) ## S3 method for class 'xts' bootstrap_scenarios(x, p, n) ## S3 method for class 'tbl' bootstrap_scenarios(x, p, n) ## S3 method for class 'data.frame' bootstrap_scenarios(x, p, n)
bootstrap_scenarios(x, p, n) ## S3 method for class 'numeric' bootstrap_scenarios(x, p, n) ## S3 method for class 'matrix' bootstrap_scenarios(x, p, n) ## S3 method for class 'ts' bootstrap_scenarios(x, p, n) ## S3 method for class 'xts' bootstrap_scenarios(x, p, n) ## S3 method for class 'tbl' bootstrap_scenarios(x, p, n) ## S3 method for class 'data.frame' bootstrap_scenarios(x, p, n)
x |
A time series defining the scenario-probability distribution. |
p |
An object of the |
n |
An |
The argument x
is supposed to have the same size of p
.
A tibble
with the number of rows equal to n
.
set.seed(123) ret <- diff(log(EuStockMarkets)) ew <- rep(1 / nrow(ret), nrow(ret)) bootstrap_scenarios(x = ret, p = as_ffp(ew), n = 10)
set.seed(123) ret <- diff(log(EuStockMarkets)) ew <- rep(1 / nrow(ret), nrow(ret)) bootstrap_scenarios(x = ret, p = as_ffp(ew), n = 10)
Give full weight to occurrences that satisfies a logical condition.
crisp(x, lgl) ## Default S3 method: crisp(x, lgl) ## S3 method for class 'numeric' crisp(x, lgl) ## S3 method for class 'matrix' crisp(x, lgl) ## S3 method for class 'ts' crisp(x, lgl) ## S3 method for class 'xts' crisp(x, lgl) ## S3 method for class 'data.frame' crisp(x, lgl) ## S3 method for class 'tbl_df' crisp(x, lgl)
crisp(x, lgl) ## Default S3 method: crisp(x, lgl) ## S3 method for class 'numeric' crisp(x, lgl) ## S3 method for class 'matrix' crisp(x, lgl) ## S3 method for class 'ts' crisp(x, lgl) ## S3 method for class 'xts' crisp(x, lgl) ## S3 method for class 'data.frame' crisp(x, lgl) ## S3 method for class 'tbl_df' crisp(x, lgl)
x |
An univariate or a multivariate distribution. |
lgl |
A |
A numerical vector of class ffp
with the new
probabilities distribution.
library(ggplot2) # invariance (stationarity) ret <- diff(log(EuStockMarkets)) # full weight on scenarios where CAC returns were above 2% market_condition <- crisp(x = ret, ret[ , 3] > 0.02) market_condition autoplot(market_condition) + scale_color_viridis_c()
library(ggplot2) # invariance (stationarity) ret <- diff(log(EuStockMarkets)) # full weight on scenarios where CAC returns were above 2% market_condition <- crisp(x = ret, ret[ , 3] > 0.02) market_condition autoplot(market_condition) + scale_color_viridis_c()
matrix
format).Dataset used in Historical Scenarios with Fully Flexible Probabilities
(matrix
format).
db
db
An object of class matrix
(inherits from array
) with 1083 rows and 9 columns.
tibble
format).Dataset used in Historical Scenarios with Fully Flexible Probabilities
(tibble
format).
db_tbl
db_tbl
An object of class tbl_df
(inherits from tbl
, data.frame
) with 1083 rows and 9 columns.
Match different decay-factors on the covariance matrix.
double_decay(x, slow, fast) ## Default S3 method: double_decay(x, slow, fast) ## S3 method for class 'numeric' double_decay(x, slow, fast) ## S3 method for class 'matrix' double_decay(x, slow, fast) ## S3 method for class 'ts' double_decay(x, slow, fast) ## S3 method for class 'xts' double_decay(x, slow, fast) ## S3 method for class 'tbl' double_decay(x, slow, fast) ## S3 method for class 'data.frame' double_decay(x, slow, fast)
double_decay(x, slow, fast) ## Default S3 method: double_decay(x, slow, fast) ## S3 method for class 'numeric' double_decay(x, slow, fast) ## S3 method for class 'matrix' double_decay(x, slow, fast) ## S3 method for class 'ts' double_decay(x, slow, fast) ## S3 method for class 'xts' double_decay(x, slow, fast) ## S3 method for class 'tbl' double_decay(x, slow, fast) ## S3 method for class 'data.frame' double_decay(x, slow, fast)
x |
An univariate or a multivariate distribution. |
slow |
A |
fast |
A |
A numerical vector of class ffp
with the new
probabilities distribution.
De Santis, G., R. Litterman, A. Vesval, and K. Winkelmann, 2003, Covariance matrix estimation, Modern investment management: an equilibrium approach, Wiley.
library(ggplot2) slow <- 0.0055 fast <- 0.0166 ret <- diff(log(EuStockMarkets)) dd <- double_decay(ret, slow, fast) dd autoplot(dd) + scale_color_viridis_c()
library(ggplot2) slow <- 0.0055 fast <- 0.0166 ret <- diff(log(EuStockMarkets)) dd <- double_decay(ret, slow, fast) dd autoplot(dd) + scale_color_viridis_c()
Computes the mean, standard deviation, skewness, kurtosis, Value-at-Risk (VaR) and Conditional Value-at-Risk CVaR) under flexible probabilities.
empirical_stats(x, p, level = 0.01) ## Default S3 method: empirical_stats(x, p, level = 0.01) ## S3 method for class 'numeric' empirical_stats(x, p, level = 0.01) ## S3 method for class 'matrix' empirical_stats(x, p, level = 0.01) ## S3 method for class 'xts' empirical_stats(x, p, level = 0.01) ## S3 method for class 'ts' empirical_stats(x, p, level = 0.01) ## S3 method for class 'data.frame' empirical_stats(x, p, level = 0.01) ## S3 method for class 'tbl_df' empirical_stats(x, p, level = 0.01)
empirical_stats(x, p, level = 0.01) ## Default S3 method: empirical_stats(x, p, level = 0.01) ## S3 method for class 'numeric' empirical_stats(x, p, level = 0.01) ## S3 method for class 'matrix' empirical_stats(x, p, level = 0.01) ## S3 method for class 'xts' empirical_stats(x, p, level = 0.01) ## S3 method for class 'ts' empirical_stats(x, p, level = 0.01) ## S3 method for class 'data.frame' empirical_stats(x, p, level = 0.01) ## S3 method for class 'tbl_df' empirical_stats(x, p, level = 0.01)
x |
A time series defining the scenario-probability distribution. |
p |
An object of the |
level |
A number with the desired probability level. The default is
|
The data in x
and p
are expected to have the same number of rows
(size).
A tidy tibble
with 3 columns:
stat: a column with Mu
, Std
, Skew
, Kurt
, VaR
and CVaR
.
name: the asset names.
value: the computed value for each statistic.
library(dplyr, warn.conflicts = FALSE) library(ggplot2) ret <- diff(log(EuStockMarkets)) # with equal weights (standard scenario) ew <- rep(1 / nrow(ret), nrow(ret)) empirical_stats(x = ret, p = as_ffp(ew)) %>% ggplot(aes(x = name, y = value)) + geom_col() + facet_wrap(~stat, scales = "free") + labs(x = NULL, y = NULL) # with ffp exp_smooth <- exp_decay(ret, 0.015) empirical_stats(ret, exp_smooth) %>% ggplot(aes(x = name, y = value)) + geom_col() + facet_wrap(~stat, scales = "free") + labs(x = NULL, y = NULL)
library(dplyr, warn.conflicts = FALSE) library(ggplot2) ret <- diff(log(EuStockMarkets)) # with equal weights (standard scenario) ew <- rep(1 / nrow(ret), nrow(ret)) empirical_stats(x = ret, p = as_ffp(ew)) %>% ggplot(aes(x = name, y = value)) + geom_col() + facet_wrap(~stat, scales = "free") + labs(x = NULL, y = NULL) # with ffp exp_smooth <- exp_decay(ret, 0.015) empirical_stats(ret, exp_smooth) %>% ggplot(aes(x = name, y = value)) + geom_col() + facet_wrap(~stat, scales = "free") + labs(x = NULL, y = NULL)
Shows how many scenarios are effectively been considered when using flexible probabilities.
ens(p)
ens(p)
p |
An object of the |
A single double
.
set.seed(123) p <- exp_decay(stats::rnorm(100), 0.01) # ens is smaller than 100 ens(p)
set.seed(123) p <- exp_decay(stats::rnorm(100), 0.01) # ens is smaller than 100 ens(p)
This function solves the entropy minimization problem with equality and inequality constraints. The solution is a vector of posterior probabilities that distorts the least the prior (equal-weights probabilities) given the constraints (views on the market).
entropy_pooling( p, A = NULL, b = NULL, Aeq = NULL, beq = NULL, solver = c("nlminb", "solnl", "nloptr"), ... )
entropy_pooling( p, A = NULL, b = NULL, Aeq = NULL, beq = NULL, solver = c("nlminb", "solnl", "nloptr"), ... )
p |
A vector of prior probabilities. |
A |
The linear inequality constraint (left-hand side). |
b |
The linear inequality constraint (right-hand side). |
Aeq |
The linear equality constraint (left-hand side). |
beq |
The linear equality constraint (right-hand side). |
solver |
A |
... |
Further arguments passed to one of the solvers. |
When imposing views constraints there is no need to specify the non-negativity
constraint for probabilities, which is done automatically by entropy_pooling
.
For the arguments accepted in ...
, please see the documentation of
nlminb
, solnl
, nloptr
and the examples bellow.
A vector of posterior probabilities.
# setup ret <- diff(log(EuStockMarkets)) n <- nrow(ret) # View on expected returns (here is 2% for each asset) mean <- rep(0.02, 4) # Prior probabilities (usually equal weight scheme) prior <- rep(1 / n, n) # View views <- view_on_mean(x = ret, mean = mean) # Optimization ep <- entropy_pooling( p = prior, Aeq = views$Aeq, beq = views$beq, solver = "nlminb" ) ep ### Using the ... argument to control the optimization parameters # nlminb ep <- entropy_pooling( p = prior, Aeq = views$Aeq, beq = views$beq, solver = "nlminb", control = list( eval.max = 1000, iter.max = 1000, trace = TRUE ) ) ep # nloptr ep <- entropy_pooling( p = prior, Aeq = views$Aeq, beq = views$beq, solver = "nloptr", control = list( xtol_rel = 1e-10, maxeval = 1000, check_derivatives = TRUE ) ) ep
# setup ret <- diff(log(EuStockMarkets)) n <- nrow(ret) # View on expected returns (here is 2% for each asset) mean <- rep(0.02, 4) # Prior probabilities (usually equal weight scheme) prior <- rep(1 / n, n) # View views <- view_on_mean(x = ret, mean = mean) # Optimization ep <- entropy_pooling( p = prior, Aeq = views$Aeq, beq = views$beq, solver = "nlminb" ) ep ### Using the ... argument to control the optimization parameters # nlminb ep <- entropy_pooling( p = prior, Aeq = views$Aeq, beq = views$beq, solver = "nlminb", control = list( eval.max = 1000, iter.max = 1000, trace = TRUE ) ) ep # nloptr ep <- entropy_pooling( p = prior, Aeq = views$Aeq, beq = views$beq, solver = "nloptr", control = list( xtol_rel = 1e-10, maxeval = 1000, check_derivatives = TRUE ) ) ep
Exponential smoothing twists probabilities by giving relatively more weight to recent observations at an exponential rate.
exp_decay(x, lambda) ## Default S3 method: exp_decay(x, lambda) ## S3 method for class 'numeric' exp_decay(x, lambda) ## S3 method for class 'matrix' exp_decay(x, lambda) ## S3 method for class 'ts' exp_decay(x, lambda) ## S3 method for class 'xts' exp_decay(x, lambda) ## S3 method for class 'data.frame' exp_decay(x, lambda) ## S3 method for class 'tbl' exp_decay(x, lambda)
exp_decay(x, lambda) ## Default S3 method: exp_decay(x, lambda) ## S3 method for class 'numeric' exp_decay(x, lambda) ## S3 method for class 'matrix' exp_decay(x, lambda) ## S3 method for class 'ts' exp_decay(x, lambda) ## S3 method for class 'xts' exp_decay(x, lambda) ## S3 method for class 'data.frame' exp_decay(x, lambda) ## S3 method for class 'tbl' exp_decay(x, lambda)
x |
An univariate or a multivariate distribution. |
lambda |
A |
The half-life is linked with the lambda parameter as follows:
HL = log(2) / lambda
.
For example: log(2) / 0.0166 is approximately 42. So, a parameter lambda
of 0.0166
can be associated with a half-life of two-months (21 * 2).
A numerical vector of class ffp
with the new
probabilities distribution.
library(ggplot2) # long half_life long_hl <- exp_decay(EuStockMarkets, 0.001) long_hl autoplot(long_hl) + scale_color_viridis_c() # short half_life short_hl <- exp_decay(EuStockMarkets, 0.015) short_hl autoplot(short_hl) + scale_color_viridis_c()
library(ggplot2) # long half_life long_hl <- exp_decay(EuStockMarkets, 0.001) long_hl autoplot(long_hl) + scale_color_viridis_c() # short half_life short_hl <- exp_decay(EuStockMarkets, 0.015) short_hl autoplot(short_hl) + scale_color_viridis_c()
ffp
ClassHelpers and Constructors from ffp
.
ffp(x = double(), ...) is_ffp(x) as_ffp(x) ## Default S3 method: as_ffp(x) ## S3 method for class 'integer' as_ffp(x)
ffp(x = double(), ...) is_ffp(x) as_ffp(x) ## Default S3 method: as_ffp(x) ## S3 method for class 'integer' as_ffp(x)
x |
|
... |
Additional attributes to be passed to |
The ffp
class is designed to interact with doubles,
but the output of c(ffp, double)
or c(double, ffp)
will always return
a double
(not an ffp
object), since there is no way to guarantee the
interaction between a numeric vector and a probability will also be a probability.
ffp()
and as_ffp()
return an S3 vector of class ffp
(built upon double
's);
is_ffp()
returns a logical
object.
set.seed(123) p <- runif(5) p <- p / sum(p) is_ffp(p) as_ffp(p)
set.seed(123) p <- runif(5) p <- p / sum(p) is_ffp(p) as_ffp(p)
Computes the location and dispersion statistics under flexible probabilities.
ffp_moments(x, p = NULL) ## Default S3 method: ffp_moments(x, p = NULL) ## S3 method for class 'numeric' ffp_moments(x, p = NULL) ## S3 method for class 'matrix' ffp_moments(x, p = NULL) ## S3 method for class 'xts' ffp_moments(x, p = NULL) ## S3 method for class 'data.frame' ffp_moments(x, p = NULL) ## S3 method for class 'tbl_df' ffp_moments(x, p = NULL)
ffp_moments(x, p = NULL) ## Default S3 method: ffp_moments(x, p = NULL) ## S3 method for class 'numeric' ffp_moments(x, p = NULL) ## S3 method for class 'matrix' ffp_moments(x, p = NULL) ## S3 method for class 'xts' ffp_moments(x, p = NULL) ## S3 method for class 'data.frame' ffp_moments(x, p = NULL) ## S3 method for class 'tbl_df' ffp_moments(x, p = NULL)
x |
A tabular (non-tidy) data structure. |
p |
An object of the |
A list
with 2 elements: mu
and sigma
.
x <- matrix(diff(log(EuStockMarkets)), ncol = 4) colnames(x) <- colnames(EuStockMarkets) p <- stats::runif(nrow(x)) p <- p / sum(p) ffp_moments(x = x, p = p) # compare with the standard approach colMeans(x) cov(x)
x <- matrix(diff(log(EuStockMarkets)), ncol = 4) colnames(x) <- colnames(EuStockMarkets) p <- stats::runif(nrow(x)) p <- p / sum(p) ffp_moments(x = x, p = p) # compare with the standard approach colMeans(x) cov(x)
Compute the implied half-life of a decay parameter.
half_life(lambda)
half_life(lambda)
lambda |
A number. |
A single number with the half-life in days.
half_life(0.0166) half_life(0.01)
half_life(0.0166) half_life(0.01)
Find the probability distribution that can constrain the first two moments while imposing the minimal structure in the data.
kernel_entropy(x, mean, sigma = NULL) ## Default S3 method: kernel_entropy(x, mean, sigma = NULL) ## S3 method for class 'numeric' kernel_entropy(x, mean, sigma = NULL) ## S3 method for class 'matrix' kernel_entropy(x, mean, sigma = NULL) ## S3 method for class 'ts' kernel_entropy(x, mean, sigma = NULL) ## S3 method for class 'xts' kernel_entropy(x, mean, sigma = NULL) ## S3 method for class 'tbl_df' kernel_entropy(x, mean, sigma = NULL) ## S3 method for class 'data.frame' kernel_entropy(x, mean, sigma = NULL)
kernel_entropy(x, mean, sigma = NULL) ## Default S3 method: kernel_entropy(x, mean, sigma = NULL) ## S3 method for class 'numeric' kernel_entropy(x, mean, sigma = NULL) ## S3 method for class 'matrix' kernel_entropy(x, mean, sigma = NULL) ## S3 method for class 'ts' kernel_entropy(x, mean, sigma = NULL) ## S3 method for class 'xts' kernel_entropy(x, mean, sigma = NULL) ## S3 method for class 'tbl_df' kernel_entropy(x, mean, sigma = NULL) ## S3 method for class 'data.frame' kernel_entropy(x, mean, sigma = NULL)
x |
An univariate or a multivariate distribution. |
mean |
A numeric vector in which the kernel should be centered. |
sigma |
The uncertainty (volatility) around the mean. When |
A numerical vector of class ffp
with the new
probabilities distribution.
library(ggplot2) ret <- diff(log(EuStockMarkets[ , 1])) mean <- -0.01 # scenarios around -1% sigma <- var(diff(ret)) ke <- kernel_entropy(ret, mean, sigma) ke autoplot(ke) + scale_color_viridis_c()
library(ggplot2) ret <- diff(log(EuStockMarkets[ , 1])) mean <- -0.01 # scenarios around -1% sigma <- var(diff(ret)) ke <- kernel_entropy(ret, mean, sigma) ke autoplot(ke) + scale_color_viridis_c()
Historical realizations receive a weight proportional to their distance from a target mean.
kernel_normal(x, mean, sigma) ## Default S3 method: kernel_normal(x, mean, sigma) ## S3 method for class 'numeric' kernel_normal(x, mean, sigma) ## S3 method for class 'matrix' kernel_normal(x, mean, sigma) ## S3 method for class 'ts' kernel_normal(x, mean, sigma) ## S3 method for class 'xts' kernel_normal(x, mean, sigma) ## S3 method for class 'tbl_df' kernel_normal(x, mean, sigma) ## S3 method for class 'data.frame' kernel_normal(x, mean, sigma)
kernel_normal(x, mean, sigma) ## Default S3 method: kernel_normal(x, mean, sigma) ## S3 method for class 'numeric' kernel_normal(x, mean, sigma) ## S3 method for class 'matrix' kernel_normal(x, mean, sigma) ## S3 method for class 'ts' kernel_normal(x, mean, sigma) ## S3 method for class 'xts' kernel_normal(x, mean, sigma) ## S3 method for class 'tbl_df' kernel_normal(x, mean, sigma) ## S3 method for class 'data.frame' kernel_normal(x, mean, sigma)
x |
An univariate or a multivariate distribution. |
mean |
A numeric vector in which the kernel should be centered. |
sigma |
The uncertainty (volatility) around the mean. |
A numerical vector of class ffp
with the new
probabilities distribution.
library(ggplot2) ret <- diff(log(EuStockMarkets[ , 1])) mean <- -0.01 # scenarios around -1% sigma <- var(diff(ret)) kn <- kernel_normal(ret, mean, sigma) kn autoplot(kn) + scale_color_viridis_c() # A larger sigma spreads out the distribution sigma <- var(diff(ret)) / 0.05 kn <- kernel_normal(ret, mean, sigma) autoplot(kn) + scale_color_viridis_c()
library(ggplot2) ret <- diff(log(EuStockMarkets[ , 1])) mean <- -0.01 # scenarios around -1% sigma <- var(diff(ret)) kn <- kernel_normal(ret, mean, sigma) kn autoplot(kn) + scale_color_viridis_c() # A larger sigma spreads out the distribution sigma <- var(diff(ret)) / 0.05 kn <- kernel_normal(ret, mean, sigma) autoplot(kn) + scale_color_viridis_c()
Computes the relative entropy of two distributions.
relative_entropy(prior, posterior)
relative_entropy(prior, posterior)
prior |
A prior probability distribution. |
posterior |
A posterior probability distribution. |
A double
with the relative entropy.
set.seed(222) prior <- rep(1 / 100, 100) posterior <- runif(100) posterior <- posterior / sum(posterior) relative_entropy(prior, posterior)
set.seed(222) prior <- rep(1 / 100, 100) posterior <- runif(100) posterior <- posterior / sum(posterior) relative_entropy(prior, posterior)
This functions are designed to make it easier to visualize the impact of a View in the P&L distribution.
scenario_density(x, p, n = 10000) scenario_histogram(x, p, n = 10000)
scenario_density(x, p, n = 10000) scenario_histogram(x, p, n = 10000)
x |
An univariate marginal distribution. |
p |
A probability from the |
n |
An |
To generate a scenario-distribution the margins are bootstrapped using
bootstrap_scenarios
. The number of resamples can be controlled
with the n
argument (default is n = 10000
).
A ggplot2
object.
x <- diff(log(EuStockMarkets))[, 1] p <- exp_decay(x, 0.005) scenario_density(x, p, 500) scenario_histogram(x, p, 500)
x <- diff(log(EuStockMarkets))[, 1] p <- exp_decay(x, 0.005) scenario_density(x, p, 500) scenario_histogram(x, p, 500)
Helper to construct constraints on copulas for entropy programming.
view_on_copula(x, simul, p) ## Default S3 method: view_on_copula(x, simul, p) ## S3 method for class 'matrix' view_on_copula(x, simul, p) ## S3 method for class 'xts' view_on_copula(x, simul, p) ## S3 method for class 'tbl_df' view_on_copula(x, simul, p)
view_on_copula(x, simul, p) ## Default S3 method: view_on_copula(x, simul, p) ## S3 method for class 'matrix' view_on_copula(x, simul, p) ## S3 method for class 'xts' view_on_copula(x, simul, p) ## S3 method for class 'tbl_df' view_on_copula(x, simul, p)
x |
A multivariate copula. |
simul |
A simulated target copula. |
p |
An object of the |
A list
of the view
class.
set.seed(1) library(ggplot2) # Invariants ret <- diff(log(EuStockMarkets)) u <- apply(ret, 2, stats::pnorm) # assuming normal copula n <- nrow(u) #' Prior probability distribution prior <- rep(1 / n, n) # Simulated marginals simul_marg <- bootstrap_scenarios(ret, as_ffp(prior), as.double(n)) # Copulas derived from the simulated margins simul_cop <- apply(simul_marg, 2, stats::pnorm) # assuming normal copula views <- view_on_copula(x = u, simul = simul_cop, p = prior) views ep <- entropy_pooling(p = prior, Aeq = views$Aeq, beq = views$beq, solver = "nloptr") autoplot(ep)
set.seed(1) library(ggplot2) # Invariants ret <- diff(log(EuStockMarkets)) u <- apply(ret, 2, stats::pnorm) # assuming normal copula n <- nrow(u) #' Prior probability distribution prior <- rep(1 / n, n) # Simulated marginals simul_marg <- bootstrap_scenarios(ret, as_ffp(prior), as.double(n)) # Copulas derived from the simulated margins simul_cop <- apply(simul_marg, 2, stats::pnorm) # assuming normal copula views <- view_on_copula(x = u, simul = simul_cop, p = prior) views ep <- entropy_pooling(p = prior, Aeq = views$Aeq, beq = views$beq, solver = "nloptr") autoplot(ep)
Helper to construct views on the correlation matrix.
view_on_correlation(x, cor) ## Default S3 method: view_on_correlation(x, cor) ## S3 method for class 'matrix' view_on_correlation(x, cor) ## S3 method for class 'xts' view_on_correlation(x, cor) ## S3 method for class 'tbl_df' view_on_correlation(x, cor)
view_on_correlation(x, cor) ## Default S3 method: view_on_correlation(x, cor) ## S3 method for class 'matrix' view_on_correlation(x, cor) ## S3 method for class 'xts' view_on_correlation(x, cor) ## S3 method for class 'tbl_df' view_on_correlation(x, cor)
x |
An univariate or a multivariate distribution. |
cor |
A |
A list
of the view
class.
library(ggplot2) # Invariant ret <- diff(log(EuStockMarkets)) # Assume that a panic event throws all correlations to the roof! co <- matrix(0.95, 4, 4) diag(co) <- 1 co # Prior probability (usually the equal-weight setting) prior <- rep(1 / nrow(ret), nrow(ret)) # View views <- view_on_correlation(x = ret, cor = co) views # Optimization ep <- entropy_pooling(p = prior, Aeq = views$Aeq, beq = views$beq, solver = "nlminb") autoplot(ep) # prior correlation structure stats::cor(ret) # posterior correlation structure matches the initial view very closely stats::cov2cor(ffp_moments(x = ret, p = ep)$sigma)
library(ggplot2) # Invariant ret <- diff(log(EuStockMarkets)) # Assume that a panic event throws all correlations to the roof! co <- matrix(0.95, 4, 4) diag(co) <- 1 co # Prior probability (usually the equal-weight setting) prior <- rep(1 / nrow(ret), nrow(ret)) # View views <- view_on_correlation(x = ret, cor = co) views # Optimization ep <- entropy_pooling(p = prior, Aeq = views$Aeq, beq = views$beq, solver = "nlminb") autoplot(ep) # prior correlation structure stats::cor(ret) # posterior correlation structure matches the initial view very closely stats::cov2cor(ffp_moments(x = ret, p = ep)$sigma)
Helper to construct constraints on the entire distribution.
view_on_joint_distribution(x, simul, p) ## Default S3 method: view_on_joint_distribution(x, simul, p) ## S3 method for class 'matrix' view_on_joint_distribution(x, simul, p) ## S3 method for class 'xts' view_on_joint_distribution(x, simul, p) ## S3 method for class 'tbl_df' view_on_joint_distribution(x, simul, p)
view_on_joint_distribution(x, simul, p) ## Default S3 method: view_on_joint_distribution(x, simul, p) ## S3 method for class 'matrix' view_on_joint_distribution(x, simul, p) ## S3 method for class 'xts' view_on_joint_distribution(x, simul, p) ## S3 method for class 'tbl_df' view_on_joint_distribution(x, simul, p)
x |
An univariate or a multivariate distribution. |
simul |
An univariate or multivariate simulated panel. |
p |
An object of the |
simul
must have the same number of columns than x
p
should have the same number of rows that simul
.
A list
of the view
class.
set.seed(1) library(ggplot2) # Invariants ret <- diff(log(EuStockMarkets)) n <- nrow(ret) #' Prior probability distribution prior <- rep(1 / n, n) # Simulated marginals simul <- bootstrap_scenarios(ret, as_ffp(prior), as.double(n)) views <- view_on_joint_distribution(x = ret, simul = simul, p = prior) views ep <- entropy_pooling(p = prior, Aeq = views$Aeq, beq = views$beq, solver = "nlminb") autoplot(ep) # location matches colMeans(simul) ffp_moments(x = ret, p = ep)$mu # dispersion matches cov(simul) ffp_moments(x = ret, p = ep)$sigma
set.seed(1) library(ggplot2) # Invariants ret <- diff(log(EuStockMarkets)) n <- nrow(ret) #' Prior probability distribution prior <- rep(1 / n, n) # Simulated marginals simul <- bootstrap_scenarios(ret, as_ffp(prior), as.double(n)) views <- view_on_joint_distribution(x = ret, simul = simul, p = prior) views ep <- entropy_pooling(p = prior, Aeq = views$Aeq, beq = views$beq, solver = "nlminb") autoplot(ep) # location matches colMeans(simul) ffp_moments(x = ret, p = ep)$mu # dispersion matches cov(simul) ffp_moments(x = ret, p = ep)$sigma
Helper to construct constraints on the marginal distribution.
view_on_marginal_distribution(x, simul, p) ## Default S3 method: view_on_marginal_distribution(x, simul, p) ## S3 method for class 'matrix' view_on_marginal_distribution(x, simul, p) ## S3 method for class 'xts' view_on_marginal_distribution(x, simul, p) ## S3 method for class 'tbl_df' view_on_marginal_distribution(x, simul, p)
view_on_marginal_distribution(x, simul, p) ## Default S3 method: view_on_marginal_distribution(x, simul, p) ## S3 method for class 'matrix' view_on_marginal_distribution(x, simul, p) ## S3 method for class 'xts' view_on_marginal_distribution(x, simul, p) ## S3 method for class 'tbl_df' view_on_marginal_distribution(x, simul, p)
x |
An univariate or a multivariate distribution. |
simul |
An univariate or multivariate simulated panel. |
p |
An object of the |
simul
must have the same number of columns than x
p
should have the same number of rows that simul
.
A list
of the view
class.
set.seed(1) library(ggplot2) # Invariants ret <- diff(log(EuStockMarkets)) n <- nrow(ret) #' Prior probability distribution prior <- rep(1 / n, n) # Simulated marginals simul <- bootstrap_scenarios(ret, as_ffp(prior), as.double(n)) views <- view_on_marginal_distribution(x = ret, simul = simul, p = prior) views ep <- entropy_pooling(p = prior, Aeq = views$Aeq, beq = views$beq, solver = "nlminb") autoplot(ep) # location matches colMeans(simul) ffp_moments(x = ret, p = ep)$mu # dispersion matches cov(simul) ffp_moments(x = ret, p = ep)$sigma
set.seed(1) library(ggplot2) # Invariants ret <- diff(log(EuStockMarkets)) n <- nrow(ret) #' Prior probability distribution prior <- rep(1 / n, n) # Simulated marginals simul <- bootstrap_scenarios(ret, as_ffp(prior), as.double(n)) views <- view_on_marginal_distribution(x = ret, simul = simul, p = prior) views ep <- entropy_pooling(p = prior, Aeq = views$Aeq, beq = views$beq, solver = "nlminb") autoplot(ep) # location matches colMeans(simul) ffp_moments(x = ret, p = ep)$mu # dispersion matches cov(simul) ffp_moments(x = ret, p = ep)$sigma
Helper to construct views on expected returns.
view_on_mean(x, mean) ## Default S3 method: view_on_mean(x, mean) ## S3 method for class 'matrix' view_on_mean(x, mean) ## S3 method for class 'xts' view_on_mean(x, mean) ## S3 method for class 'tbl_df' view_on_mean(x, mean)
view_on_mean(x, mean) ## Default S3 method: view_on_mean(x, mean) ## S3 method for class 'matrix' view_on_mean(x, mean) ## S3 method for class 'xts' view_on_mean(x, mean) ## S3 method for class 'tbl_df' view_on_mean(x, mean)
x |
An univariate or a multivariate distribution. |
mean |
A |
A list
of the view
class.
library(ggplot2) # Invariant ret <- diff(log(EuStockMarkets)) n <- nrow(ret) # View on expected returns (here is 2% for each asset) mean <- rep(0.02, 4) # Prior probabilities (usually equal weight scheme) prior <- rep(1 / n, n) # View views <- view_on_mean(x = ret, mean = mean) views # Optimization ep <- entropy_pooling(p = prior, Aeq = views$Aeq, beq = views$beq, solver = "nlminb") autoplot(ep) # Probabilities are twisted in such a way that the posterior # `mu` match's exactly with previously stated beliefs ffp_moments(x = ret, p = ep)$mu
library(ggplot2) # Invariant ret <- diff(log(EuStockMarkets)) n <- nrow(ret) # View on expected returns (here is 2% for each asset) mean <- rep(0.02, 4) # Prior probabilities (usually equal weight scheme) prior <- rep(1 / n, n) # View views <- view_on_mean(x = ret, mean = mean) views # Optimization ep <- entropy_pooling(p = prior, Aeq = views$Aeq, beq = views$beq, solver = "nlminb") autoplot(ep) # Probabilities are twisted in such a way that the posterior # `mu` match's exactly with previously stated beliefs ffp_moments(x = ret, p = ep)$mu
Helper to construct views on relative performance of assets.
view_on_rank(x, rank) ## Default S3 method: view_on_rank(x, rank) ## S3 method for class 'matrix' view_on_rank(x, rank) ## S3 method for class 'xts' view_on_rank(x, rank) ## S3 method for class 'tbl_df' view_on_rank(x, rank)
view_on_rank(x, rank) ## Default S3 method: view_on_rank(x, rank) ## S3 method for class 'matrix' view_on_rank(x, rank) ## S3 method for class 'xts' view_on_rank(x, rank) ## S3 method for class 'tbl_df' view_on_rank(x, rank)
x |
An univariate or a multivariate distribution. |
rank |
A |
If rank = c(2, 1)
it is implied that asset in the first column will outperform
the asset in the second column. For longer vectors the interpretation
is the same: assets on the right will outperform assets on the left.
A list
of the view
class.
library(ggplot2) # Invariants x <- diff(log(EuStockMarkets)) prior <- rep(1 / nrow(x), nrow(x)) # asset in the first col will outperform the asset in the second col (DAX will # outperform SMI). views <- view_on_rank(x = x, rank = c(2, 1)) views ep <- entropy_pooling(p = prior, A = views$A, b = views$b, solver = "nloptr") autoplot(ep) # Prior Returns (SMI > DAX) colMeans(x)[1:2] # Posterior Returns (DAX > SMI) ffp_moments(x, ep)$mu[1:2]
library(ggplot2) # Invariants x <- diff(log(EuStockMarkets)) prior <- rep(1 / nrow(x), nrow(x)) # asset in the first col will outperform the asset in the second col (DAX will # outperform SMI). views <- view_on_rank(x = x, rank = c(2, 1)) views ep <- entropy_pooling(p = prior, A = views$A, b = views$b, solver = "nloptr") autoplot(ep) # Prior Returns (SMI > DAX) colMeans(x)[1:2] # Posterior Returns (DAX > SMI) ffp_moments(x, ep)$mu[1:2]
Helper to construct views on volatility.
view_on_volatility(x, vol) ## Default S3 method: view_on_volatility(x, vol) ## S3 method for class 'matrix' view_on_volatility(x, vol) ## S3 method for class 'xts' view_on_volatility(x, vol) ## S3 method for class 'tbl_df' view_on_volatility(x, vol)
view_on_volatility(x, vol) ## Default S3 method: view_on_volatility(x, vol) ## S3 method for class 'matrix' view_on_volatility(x, vol) ## S3 method for class 'xts' view_on_volatility(x, vol) ## S3 method for class 'tbl_df' view_on_volatility(x, vol)
x |
An univariate or a multivariate distribution. |
vol |
A |
A list
of the view
class.
library(ggplot2) # Invariant ret <- diff(log(EuStockMarkets)) n <- nrow(ret) # Expected a volatility 30% higher than historical average vol <- apply(ret, 2, stats::sd) * 1.3 # Prior Probabilities prior <- rep(1 / n, n) # Views views <- view_on_volatility(x = ret, vol = vol) views # Optimization ep <- entropy_pooling(p = prior, Aeq = views$Aeq, beq = views$beq, solver = "nlminb") autoplot(ep) # Desired volatility vol # Posterior volatility matches very closely with the desired volatility sqrt(diag(ffp_moments(x = ret, p = ep)$sigma))
library(ggplot2) # Invariant ret <- diff(log(EuStockMarkets)) n <- nrow(ret) # Expected a volatility 30% higher than historical average vol <- apply(ret, 2, stats::sd) * 1.3 # Prior Probabilities prior <- rep(1 / n, n) # Views views <- view_on_volatility(x = ret, vol = vol) views # Optimization ep <- entropy_pooling(p = prior, Aeq = views$Aeq, beq = views$beq, solver = "nlminb") autoplot(ep) # Desired volatility vol # Posterior volatility matches very closely with the desired volatility sqrt(diag(ffp_moments(x = ret, p = ep)$sigma))