Skip to content

Commit

Permalink
Added testing cases and accessors
Browse files Browse the repository at this point in the history
  • Loading branch information
Divasco committed Oct 17, 2024
1 parent 1543f11 commit f99af64
Show file tree
Hide file tree
Showing 5 changed files with 93 additions and 26 deletions.
1 change: 1 addition & 0 deletions garpar/core/portfolio.py
Original file line number Diff line number Diff line change
Expand Up @@ -376,6 +376,7 @@ def shape(self):
return self._prices_df.shape

# UTILS ===================================================================
# TODO: Hacer que copy pueda elegir desde que dia hasta que dia copiar
def copy(
self,
*,
Expand Down
36 changes: 36 additions & 0 deletions garpar/core/prices_acc.py
Original file line number Diff line number Diff line change
Expand Up @@ -11,6 +11,8 @@

import numpy as np

import pandas as pd

from ..utils import accabc

# =============================================================================
Expand Down Expand Up @@ -162,3 +164,37 @@ def mad(self, skipna=True):
"""
df = self._pf._prices_df
return (df - df.mean(axis=0)).abs().mean(axis=0, skipna=skipna)

def mean_tendency_size(self):

def count_consecutive(stock_groups):
# Calculate the size of each consecutive group
stock_counts = stock_groups.groupby(stock_groups).size()

# Return the mean size of consecutive groups
return stock_counts.mean()

# Convert the entire DataFrame to boolean values
# True if return is positive, False otherwise
wins = self._pf.as_returns() > 0

# Detect changes in the sequence of wins/losses
# Shift all values down by one position and compare
# False indicates a change in the sequence
# (win to loss or vice versa)
changes = wins != wins.shift()

# Use cumulative sum to assign a unique identifier to each consecutive
# group. Each time a change is detected (False in 'changes'), the group
# number increments
groups = changes.cumsum()

# Apply the count_consecutive function to each column
# This calculates the mean size of
# consecutive win/loss streaks
counts = groups.apply(count_consecutive)

# Name the resulting Series for clarity
counts.name = "mean_tendency_size"

return counts
54 changes: 41 additions & 13 deletions garpar/optimize/mean_variance.py
Original file line number Diff line number Diff line change
Expand Up @@ -39,7 +39,8 @@ class MVOptimizer(MeanVarianceFamilyMixin, OptimizerABC):

target_return = mabc.hparam(default=None)
target_risk = mabc.hparam(default=None)
risk_free_rate = mabc.hparam(default=0.02)
risk_free_rate = mabc.hparam(default=None)
risk_aversion = mabc.hparam(default=None)

def _get_optimizer(self, pf):
expected_returns = pf.ereturns(self.returns, **self.returns_kw)
Expand All @@ -50,20 +51,34 @@ def _get_optimizer(self, pf):
weight_bounds=self.weight_bounds,
)

def _coerce_risk_free_rate(self, pf):
if self.risk_free_rate is not None:
return self.risk_free_rate

expected_returns = list(pf.ereturns())
return np.median(expected_returns)

def _coerce_risk_aversion(self, pf):
return 1 # FIXME Implementar
if self.risk_aversion is not None:
return self.risk_aversion

expected_returns = list(pf.ereturns())
risk_aversion = 1 / np.var(expected_returns)
return risk_aversion

def _coerce_target_return(self, pf):
if self.target_return is None:
returns = pf.as_returns().to_numpy().flatten()
returns = returns[(returns != 0) & (~np.isnan(returns))]
return np.min(np.abs(returns))
return self.target_return
if self.target_return is not None:
return self.target_return

returns = pf.as_returns().to_numpy().flatten()
returns = returns[(returns != 0) & (~np.isnan(returns))]
return np.min(np.abs(returns))

def _coerce_target_volatility(self, pf):
if self.target_risk is None:
return np.min(np.std(pf.as_prices(), axis=0))
return self.target_risk
if self.target_risk is not None:
return self.target_risk

return np.min(np.std(pf.as_prices(), axis=0))

def _calculate_weights(self, pf):
optimizer = self._get_optimizer(pf)
Expand All @@ -86,40 +101,53 @@ def _calculate_weights(self, pf):
def _min_volatility(self, optimizer, pf):
weights_dict = optimizer.min_volatility()
weights = [weights_dict[stock] for stock in pf.stocks]

return weights, {"name": "min_volatility"}

def _max_sharpe(self, optimizer, pf):
weights_dict = optimizer.max_sharpe(risk_free_rate=self.risk_free_rate)
risk_free_rate = self._coerce_risk_free_rate(pf)

weights_dict = optimizer.max_sharpe(risk_free_rate=risk_free_rate)
weights = [weights_dict[stock] for stock in pf.stocks]
return weights, {"name": "max_sharpe", "risk_free_rate": self.risk_free_rate}

return weights, {"name": "max_sharpe", "risk_free_rate": risk_free_rate}

def _max_quadratic_utility(self, optimizer, pf):
risk_aversion = self._coerce_risk_aversion(pf)

weights_dict = optimizer.max_quadratic_utility(
risk_aversion, market_neutral=self.market_neutral
)
weights = [weights_dict[stock] for stock in pf.stocks]

return weights, {"name": "max_quadratic_utility", "risk_aversion": risk_aversion}

def _efficient_risk(self, optimizer, pf):
target_volatility = self._coerce_target_volatility(pf)

weights_dict = optimizer.efficient_risk(
target_volatility, market_neutral=self.market_neutral
)
weights = [weights_dict[stock] for stock in pf.stocks]

return weights, {"name": "efficient_risk", "target_volatility": target_volatility}

def _efficient_return(self, optimizer, pf):
target_return = self._coerce_target_return(pf)

weights_dict = optimizer.efficient_return(
target_return, market_neutral=self.market_neutral
)
weights = [weights_dict[stock] for stock in pf.stocks]

return weights, {"name": "efficient_return", "target_return": target_return}

def _portfolio_performance(self, optimizer, pf):
weigths_dict = optimizer.portfolio_performance(self.risk_free_rate)
risk_free_rate = self._coerce_risk_free_rate(pf)

weigths_dict = optimizer.portfolio_performance(risk_free_rate)
weights = [weigths_dict[stock] for stock in pf.stocks]

return weights, {"name": "portfolio_performance"}


Expand Down
9 changes: 9 additions & 0 deletions tests/conftest.py
Original file line number Diff line number Diff line change
Expand Up @@ -32,6 +32,14 @@
"uniform": gp.datasets.make_risso_uniform,
}

METHODS = [
"min_volatility",
"max_sharpe",
"max_quadratic_utility",
"efficient_risk",
"efficient_return",
]

# =============================================================================
# FIXTURES
# =============================================================================
Expand Down Expand Up @@ -71,3 +79,4 @@ def make(*, distribution="normal", **kwargs):

def pytest_configure():
pytest.DISTRIBUTIONS = DISTRIBUTIONS
pytest.METHODS = METHODS
19 changes: 6 additions & 13 deletions tests/optimize/test_mean_variance.py
Original file line number Diff line number Diff line change
Expand Up @@ -40,14 +40,14 @@ def test_MVOptimizer_custom_initialization():
assert optimizer.method == "min_volatility"
assert optimizer.weight_bounds == (-1, 1)

@pytest.mark.parametrize("method", pytest.METHODS)
@pytest.mark.parametrize("price_distribution", pytest.DISTRIBUTIONS)
def test_MVOptimizer_calculate_weights_max_sharpe(risso_portfolio, price_distribution):
def test_MVOptimizer_calculate_weights_method_coerced(risso_portfolio, method, price_distribution):
pf = risso_portfolio(random_state=42, distribution=price_distribution)
optimizer = MVOptimizer(risk_free_rate=0.001)
optimizer = MVOptimizer(method=method)
weights, meta = optimizer._calculate_weights(pf)
assert len(weights) == len(pf.stocks)
assert meta["name"] == "max_sharpe"
assert meta["risk_free_rate"] == 0.001
assert meta["name"] == method

@pytest.mark.parametrize("price_distribution", pytest.DISTRIBUTIONS)
def test_MVOptimizer_min_volatility(risso_portfolio, price_distribution):
Expand All @@ -64,13 +64,6 @@ def test_MVOptimizer_invalid_method(risso_portfolio, price_distribution):
with pytest.raises(ValueError):
optimizer._calculate_weights(pf)

@pytest.mark.parametrize("price_distribution", pytest.DISTRIBUTIONS)
def test_MVOptimizer_coerce_target_return(risso_portfolio, price_distribution):
pf = risso_portfolio(random_state=42, distribution=price_distribution)
optimizer = MVOptimizer(target_return=None)
coerced_return = optimizer._coerce_target_return(pf)
assert coerced_return == 0.05 # The minimum absolute return from mock portfolio

@pytest.mark.parametrize("price_distribution", pytest.DISTRIBUTIONS)
def test_MVOptimizer_get_optimizer(risso_portfolio, price_distribution):
pf = risso_portfolio(random_state=42, distribution=price_distribution)
Expand All @@ -85,11 +78,11 @@ def test_MVOptimizer_get_optimizer(risso_portfolio, price_distribution):
(13.383798092382262, pytest.DISTRIBUTIONS['uniform'])
]
)
def test_MVOptimizer_coerce_volatiliy(volatiliy, price_distribution):
def test_MVOptimizer_coerce_volatility(volatiliy, price_distribution):
pf = price_distribution(random_state=43)
optimizer = MVOptimizer(method="max-sharpe")
coerced_volatility = optimizer._coerce_target_volatility(pf)
assert coerced_volatility == pytest.approx(volatiliy, 1e-9)
np.testing.assert_almost_equal(coerced_volatility, volatiliy, decimal=9)

# =============================================================================
# MARKOWITZ TEST
Expand Down

0 comments on commit f99af64

Please sign in to comment.