diff --git a/.flake8 b/.flake8 deleted file mode 100644 index cb23f32..0000000 --- a/.flake8 +++ /dev/null @@ -1,3 +0,0 @@ -[flake8] -max-line-length = 160 -extend-ignore = E203, E501 diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 8269e08..c1b201a 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -27,7 +27,6 @@ repos: rev: v4.6.0 hooks: - id: check-added-large-files - - id: check-ast - id: check-case-conflict - id: check-executables-have-shebangs - id: check-json @@ -38,17 +37,13 @@ repos: # - id: detect-aws-credentials - id: check-xml - id: check-yaml - - id: debug-statements - id: detect-private-key - id: end-of-file-fixer - - id: mixed-line-ending - args: ["--fix=lf"] - id: name-tests-test args: ["--pytest-test-first"] - id: no-commit-to-branch # - id: pretty-format-json - id: requirements-txt-fixer - - id: trailing-whitespace - repo: https://github.com/pre-commit/mirrors-prettier rev: v4.0.0-alpha.8 @@ -79,24 +74,11 @@ repos: hooks: - id: shellcheck - - repo: https://github.com/pycqa/pydocstyle.git - rev: 6.3.0 - hooks: - - id: pydocstyle - additional_dependencies: ["tomli"] - - repo: https://github.com/Mateusz-Grzelinski/actionlint-py rev: v1.7.1.15 hooks: - id: actionlint - - repo: https://github.com/pycqa/flake8 - rev: "7.1.0" - hooks: - - id: flake8 - additional_dependencies: - - pep8-naming - - repo: https://github.com/adrienverge/yamllint.git rev: v1.35.1 hooks: @@ -107,10 +89,8 @@ repos: rev: v0.5.1 hooks: - id: ruff - files: ^(scripts|tests|custom_components)/.+\.py$ - args: [--fix, --exit-non-zero-on-fix] + args: [--fix, --exit-non-zero-on-fix, --config=ruff.toml] - id: ruff-format - files: ^(scripts|tests|custom_components)/.+\.py$ - repo: local hooks: diff --git a/.readthedocs.yml b/.readthedocs.yml index a2bcab3..ef0e4c7 100644 --- a/.readthedocs.yml +++ b/.readthedocs.yml @@ -4,23 +4,20 @@ # Required version: 2 +# Set the OS, Python version and other tools you might need +build: + os: ubuntu-22.04 + tools: + python: "3.11" + # Build documentation in the docs/ directory with Sphinx sphinx: configuration: docs/conf.py -# Build documentation with MkDocs -#mkdocs: -# configuration: mkdocs.yml - # Optionally build your docs in additional formats such as PDF formats: - pdf -build: - os: ubuntu-22.04 - tools: - python: "3.11" - python: install: - requirements: docs/requirements.txt diff --git a/AUTHORS.rst b/AUTHORS.rst index 5281c92..8308d5e 100644 --- a/AUTHORS.rst +++ b/AUTHORS.rst @@ -2,4 +2,9 @@ Contributors ============ -* github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com> +* Arfima Dev +* Gonzalo Álvarez +* Jonathan Yánez +* Víctor de Luna +* Virginia Morales +* Xavier Barrachina diff --git a/CHANGELOG.rst b/CHANGELOG.rst index 226e6f5..6fcf78a 100644 --- a/CHANGELOG.rst +++ b/CHANGELOG.rst @@ -5,6 +5,4 @@ Changelog Version 0.1 =========== -- Feature A added -- FIX: nasty bug #1729 fixed -- add your changes here! +- Initial version! diff --git a/CONTRIBUTING.rst b/CONTRIBUTING.rst index a4d1ae7..ec4b46f 100644 --- a/CONTRIBUTING.rst +++ b/CONTRIBUTING.rst @@ -1,28 +1,3 @@ -.. todo:: THIS IS SUPPOSED TO BE AN EXAMPLE. MODIFY IT ACCORDING TO YOUR NEEDS! - - The document assumes you are using a source repository service that promotes a - contribution model similar to `GitHub's fork and pull request workflow`_. - While this is true for the majority of services (like GitHub, GitLab, - BitBucket), it might not be the case for private repositories (e.g., when - using Gerrit). - - Also notice that the code examples might refer to GitHub URLs or the text - might use GitHub specific terminology (e.g., *Pull Request* instead of *Merge - Request*). - - Please make sure to check the document having these assumptions in mind - and update things accordingly. - -.. todo:: Provide the correct links/replacements at the bottom of the document. - -.. todo:: You might want to have a look on `PyScaffold's contributor's guide`_, - - especially if your project is open source. The text should be very similar to - this template, but there are a few extra contents that you might decide to - also include, like mentioning labels of your issue tracker or automated - releases. - - ============ Contributing ============ @@ -72,25 +47,20 @@ by adding missing information and correcting mistakes. This means that the docs are kept in the same repository as the project code, and that any documentation update is done in the same way was a code contribution. -.. todo:: Don't forget to mention which markup language you are using. - - e.g., reStructuredText_ or CommonMark_ with MyST_ extensions. -.. todo:: If your project is hosted on GitHub, you can also mention the following tip: - - .. tip:: - Please notice that the `GitHub web interface`_ provides a quick way of - propose changes in ``osc-physrisk-financial``'s files. While this mechanism can - be tricky for normal code contributions, it works perfectly fine for - contributing to the docs, and can be quite handy. - - If you are interested in trying this method out, please navigate to - the ``docs`` folder in the source repository_, find which file you - would like to propose changes and click in the little pencil icon at the - top, to open `GitHub's code editor`_. Once you finish editing the file, - please write a message in the form at the bottom of the page describing - which changes have you made and what are the motivations behind them and - submit your proposal. +.. tip:: + Please notice that the `GitHub web interface`_ provides a quick way of + propose changes in ``osc-physrisk-financial``'s files. While this mechanism can + be tricky for normal code contributions, it works perfectly fine for + contributing to the docs, and can be quite handy. + + If you are interested in trying this method out, please navigate to + the ``docs`` folder in the source repository_, find which file you + would like to propose changes and click in the little pencil icon at the + top, to open `GitHub's code editor`_. Once you finish editing the file, + please write a message in the form at the bottom of the page describing + which changes have you made and what are the motivations behind them and + submit your proposal. When working on documentation changes in your local machine, you can compile them using |tox|_:: @@ -151,8 +121,6 @@ Clone the repository to be able to import the package under development in the Python REPL. - .. todo:: if you are not using pre-commit, please remove the following item: - #. Install |pre-commit|_:: pip install pre-commit @@ -182,11 +150,9 @@ Implement your changes to record your changes in git_. - .. todo:: if you are not using pre-commit, please remove the following item: - Please make sure to see the validation messages from |pre-commit|_ and fix any eventual issues. - This should automatically use flake8_/black_ to check/fix the code style + This should automatically use ruff_ to check/fix the code style in a way that is compatible with the project. .. important:: Don't forget to add unit tests and documentation in case your @@ -218,11 +184,9 @@ Submit your contribution #. Go to the web page of your fork and click |contribute button| to send your changes for review. - .. todo:: if you are using GitHub, you can uncomment the following paragraph - - Find more detailed information in `creating a PR`_. You might also want to open - the PR as a draft first and mark it as ready for review after the feedbacks - from the continuous integration (CI) system or any required fixes. + Find more detailed information in `creating a PR`_. You might also want to open + the PR as a draft first and mark it as ready for review after the feedbacks + from the continuous integration (CI) system or any required fixes. Troubleshooting @@ -278,11 +242,6 @@ Maintainer tasks Releases -------- -.. todo:: This section assumes you are using PyPI to publicly release your package. - - If instead you are using a different/private package index, please update - the instructions accordingly. - If you are part of the group of maintainers and have correct user permissions on PyPI_, the following steps can be used to release a new version for ``osc-physrisk-financial``: @@ -308,15 +267,12 @@ on PyPI_, the following steps can be used to release a new version for of environments, including private companies and proprietary code bases. -.. <-- start --> -.. todo:: Please review and change the following definitions: .. |the repository service| replace:: GitHub .. |contribute button| replace:: "Create pull request" -.. _repository: https://github.com//osc-physrisk-financial -.. _issue tracker: https://github.com//osc-physrisk-financial/issues -.. <-- end --> +.. _repository: https://github.com/os-climate/osc-physrisk-financial +.. _issue tracker: https://github.com/os-climate/osc-physrisk-financial/issues .. |virtualenv| replace:: ``virtualenv`` @@ -331,7 +287,7 @@ on PyPI_, the following steps can be used to release a new version for .. _descriptive commit message: https://chris.beams.io/posts/git-commit .. _docstrings: https://www.sphinx-doc.org/en/master/usage/extensions/napoleon.html .. _first-contributions tutorial: https://github.com/firstcontributions/first-contributions -.. _flake8: https://flake8.pycqa.org/en/stable/ +.. _ruff: https://docs.astral.sh/ruff/ .. _git: https://git-scm.com .. _GitHub's fork and pull request workflow: https://guides.github.com/activities/forking/ .. _guide created by FreeCodeCamp: https://github.com/FreeCodeCamp/how-to-contribute-to-open-source diff --git a/README.md b/README.md new file mode 100644 index 0000000..b1fa30a --- /dev/null +++ b/README.md @@ -0,0 +1,28 @@ + + +> [!IMPORTANT] +> On June 26 2024, Linux Foundation announced the merger of its financial services umbrella, the Fintech Open Source Foundation ([FINOS](https://finos.org)), with OS-Climate, an open source community dedicated to building data technologies, modeling, and analytic tools that will drive global capital flows into climate change mitigation and resilience; OS-Climate projects are in the process of transitioning to the [FINOS governance framework](https://community.finos.org/docs/governance); read more on [finos.org/press/finos-join-forces-os-open-source-climate-sustainability-esg](https://finos.org/press/finos-join-forces-os-open-source-climate-sustainability-esg) + + +osc-physrisk-financial +====================== +Physical climate risk financial valuation + +drawing + +## About osc-physrisk-financial + +An [OS-Climate](https://os-climate.org) project, osc-physrisk-financial is a library for valuating assets under different climate risk scenarios. + +## Using the library +The library can be run locally and is installed via: +``` +pip install osc-physrisk-financial +``` + +The library uses the output generated by the [physrisk](https://github.com/os-climate/physrisk) library + + +### Note + +This is the first stage of development, where the models are intentionally simple, focusing on setting up the proper structure of the library. \ No newline at end of file diff --git a/README.rst b/README.rst deleted file mode 100644 index 83f5efa..0000000 --- a/README.rst +++ /dev/null @@ -1,54 +0,0 @@ -💬 Important - -On June 26 2024, Linux Foundation announced the merger of its financial services umbrella, the Fintech Open Source Foundation (`FINOS `_), with OS-Climate, an open source community dedicated to building data technologies, modelling, and analytic tools that will drive global capital flows into climate change mitigation and resilience; OS-Climate projects are in the process of transitioning to the `FINOS governance framework `_; read more on `finos.org/press/finos-join-forces-os-open-source-climate-sustainability-esg `_ - - -.. These are examples of badges you might want to add to your README: - please update the URLs accordingly - - .. image:: https://api.cirrus-ci.com/github//osc-physrisk-financial.svg?branch=main - :alt: Built Status - :target: https://cirrus-ci.com/github//osc-physrisk-financial - .. image:: https://readthedocs.org/projects/osc-physrisk-financial/badge/?version=latest - :alt: ReadTheDocs - :target: https://osc-physrisk-financial.readthedocs.io/en/stable/ - .. image:: https://img.shields.io/coveralls/github//osc-physrisk-financial/main.svg - :alt: Coveralls - :target: https://coveralls.io/r//osc-physrisk-financial - .. image:: https://img.shields.io/pypi/v/osc-physrisk-financial.svg - :alt: PyPI-Server - :target: https://pypi.org/project/osc-physrisk-financial/ - .. image:: https://img.shields.io/conda/vn/conda-forge/osc-physrisk-financial.svg - :alt: Conda-Forge - :target: https://anaconda.org/conda-forge/osc-physrisk-financial - .. image:: https://pepy.tech/badge/osc-physrisk-financial/month - :alt: Monthly Downloads - :target: https://pepy.tech/project/osc-physrisk-financial - .. image:: https://img.shields.io/twitter/url/http/shields.io.svg?style=social&label=Twitter - :alt: Twitter - :target: https://twitter.com/osc-physrisk-financial - -.. image:: https://img.shields.io/badge/-PyScaffold-005CA0?logo=pyscaffold - :alt: Project generated with PyScaffold - :target: https://pyscaffold.org/ - -| - -====================== -osc-physrisk-financial -====================== - - - OS-Climate Python Project - - -A longer description of your project goes here... - - -.. _pyscaffold-notes: - -Note -==== - -This project has been set up using PyScaffold 4.5. For details and usage -information on PyScaffold see https://pyscaffold.org/. diff --git a/docs/assets.rst b/docs/assets.rst new file mode 100644 index 0000000..ab963bf --- /dev/null +++ b/docs/assets.rst @@ -0,0 +1,7 @@ +Assets +====== + +.. automodule:: osc_physrisk_financial.assets + :members: + :undoc-members: + :show-inheritance: \ No newline at end of file diff --git a/docs/conf.py b/docs/conf.py index a798c7b..3aaf850 100644 --- a/docs/conf.py +++ b/docs/conf.py @@ -9,7 +9,6 @@ import os import sys -import shutil # -- Path setup -------------------------------------------------------------- @@ -28,31 +27,31 @@ # setup.py install" in the RTD Advanced Settings. # Additionally it helps us to avoid running apidoc manually -try: # for Sphinx >= 1.7 - from sphinx.ext import apidoc -except ImportError: - from sphinx import apidoc +# try: # for Sphinx >= 1.7 +# from sphinx.ext import apidoc +# except ImportError: +# from sphinx import apidoc -output_dir = os.path.join(__location__, "api") -module_dir = os.path.join(__location__, "../src/osc_physrisk_financial") -try: - shutil.rmtree(output_dir) -except FileNotFoundError: - pass +# # output_dir = os.path.join(__location__, "api") +# module_dir = os.path.join(__location__, "../src/osc_physrisk_financial") +# try: +# shutil.rmtree(output_dir) +# except FileNotFoundError: +# pass -try: - import sphinx +# try: +# import sphinx - cmd_line = f"sphinx-apidoc --implicit-namespaces -f -o {output_dir} {module_dir}" +# cmd_line = f"sphinx-apidoc --implicit-namespaces -f -o {output_dir} {module_dir}" - args = cmd_line.split(" ") - if tuple(sphinx.__version__.split(".")) >= ("1", "7"): - # This is a rudimentary parse_version to avoid external dependencies - args = args[1:] +# args = cmd_line.split(" ") +# if tuple(sphinx.__version__.split(".")) >= ("1", "7"): +# # This is a rudimentary parse_version to avoid external dependencies +# args = args[1:] - apidoc.main(args) -except Exception as e: - print("Running `sphinx-apidoc` failed!\n{}".format(e)) +# apidoc.main(args) +# except Exception as e: +# print("Running `sphinx-apidoc` failed!\n{}".format(e)) # -- General configuration --------------------------------------------------- @@ -63,7 +62,6 @@ # coming with Sphinx (named 'sphinx.ext.*') or your custom ones. extensions = [ "sphinx.ext.autodoc", - "sphinx.ext.intersphinx", "sphinx.ext.todo", "sphinx.ext.autosummary", "sphinx.ext.viewcode", @@ -72,6 +70,8 @@ "sphinx.ext.ifconfig", "sphinx.ext.mathjax", "sphinx.ext.napoleon", + "sphinx_design", + "myst_parser", ] # Add any paths that contain templates here, relative to this directory. @@ -88,7 +88,18 @@ # General information about the project. project = "osc-physrisk-financial" -copyright = "2024, github-actions[bot]" +copyright = "2024, Arfima Dev" +author = "Arfima Dev" + +# Summary +autosummary_generate = True + +# Docstrings of private methods +autodoc_default_options = { + "members": True, + "undoc-members": True, + "private-members": False, +} # The version info for the project you're documenting, acts as replacement for # |version| and |release|, also used in various other places throughout the @@ -153,14 +164,15 @@ # The theme to use for HTML and HTML Help pages. See the documentation for # a list of builtin themes. -html_theme = "alabaster" - +# html_theme = "alabaster" +html_theme = "pydata_sphinx_theme" # Theme options are theme-specific and customize the look and feel of a theme # further. For a list of options available for each theme, see the # documentation. html_theme_options = { - "sidebar_width": "300px", - "page_width": "1200px" + # "sidebar_width": "300px", + # "page_width": "1200px", + "logo": {"text": "PhysRisk Financial"} } # Add any paths that contain custom themes here, relative to this directory. @@ -168,14 +180,14 @@ # The name for this set of Sphinx documents. If None, it defaults to # " v documentation". -# html_title = None +html_title = "Physrisk Financial Documentation" # A shorter title for the navigation bar. Default is the same as html_title. # html_short_title = None # The name of an image file (relative to this directory) to place at the top # of the sidebar. -# html_logo = "" +html_logo = "images/OS-Climate-Logo.png" # The name of an image file (within the static path) to use as favicon of the # docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32 @@ -196,7 +208,13 @@ # html_use_smartypants = True # Custom sidebar templates, maps document names to template names. -# html_sidebars = {} +html_sidebars = { + "readme": [], + "changelog": [], + "authors": [], + "contributing": [], + "license": [], +} # Additional templates that should be rendered to pages, maps page names to # template names. @@ -246,7 +264,13 @@ # Grouping the document tree into LaTeX files. List of tuples # (source start file, target name, title, author, documentclass [howto/manual]). latex_documents = [ - ("index", "user_guide.tex", "osc-physrisk-financial Documentation", "github-actions[bot]", "manual") + ( + "index", + "user_guide.tex", + "osc-physrisk-financial Documentation", + "Arfima Dev", + "manual", + ) ] # The name of an image file (relative to this directory) to place at the top of diff --git a/docs/dynamics.rst b/docs/dynamics.rst new file mode 100644 index 0000000..a4b7846 --- /dev/null +++ b/docs/dynamics.rst @@ -0,0 +1,7 @@ +Dynamics +======== + +.. automodule:: osc_physrisk_financial.dynamics + :members: + :undoc-members: + :show-inheritance: \ No newline at end of file diff --git a/docs/functions.rst b/docs/functions.rst new file mode 100644 index 0000000..98d73f5 --- /dev/null +++ b/docs/functions.rst @@ -0,0 +1,7 @@ +Functions +========= + +.. automodule:: osc_physrisk_financial.functions + :members: + :undoc-members: + :show-inheritance: \ No newline at end of file diff --git a/docs/images/OS-Climate-Logo.png b/docs/images/OS-Climate-Logo.png new file mode 100644 index 0000000..36c8878 Binary files /dev/null and b/docs/images/OS-Climate-Logo.png differ diff --git a/docs/index.rst b/docs/index.rst index 521f362..40add41 100644 --- a/docs/index.rst +++ b/docs/index.rst @@ -1,40 +1,98 @@ +Physrisk Financial ====================== -osc-physrisk-financial -====================== -This is the documentation of **osc-physrisk-financial**. +- **version**: 0.1 (See `Changelog `_) +- **date**: July 4th 2024 + +An OS-Climate project, **osc-physrisk-financial** is a library for valuating assets under different climate risk scenarios. + +.. _cards-clickable: + +.. + list with all the possible icons for the grid + https://sphinx-design.readthedocs.io/en/latest/badges_buttons.html + +.. raw:: html + + + + +.. grid:: 2 + :gutter: 1 + + .. grid-item-card:: Overview + :link: readme.html + :text-align: center + + :octicon:`book;5em;sd-text-info` + ^^^ + Check the getting started guides and tutorials to learn how to install and use **osc-physrisk-financial**. + + .. grid-item-card:: Code documentation + :link: modules.html + :text-align: center -.. note:: + :octicon:`code;5em;sd-text-info` + ^^^ + Check the documentation of the code used in **osc-physrisk-financial**. - This is the main page of your project's `Sphinx`_ documentation. - It is formatted in `reStructuredText`_. Add additional pages - by creating rst-files in ``docs`` and adding them to the `toctree`_ below. - Use then `references`_ in order to link them from this page, e.g. - :ref:`authors` and :ref:`changes`. +.. grid:: 2 + :gutter: 1 - It is also possible to refer to the documentation of other Python packages - with the `Python domain syntax`_. By default you can reference the - documentation of `Sphinx`_, `Python`_, `NumPy`_, `SciPy`_, `matplotlib`_, - `Pandas`_, `Scikit-Learn`_. You can add more by extending the - ``intersphinx_mapping`` in your Sphinx's ``conf.py``. + .. grid-item-card:: Changelog + :link: changelog.html + :text-align: center - The pretty useful extension `autodoc`_ is activated by default and lets - you include documentation from docstrings. Docstrings can be written in - `Google style`_ (recommended!), `NumPy style`_ and `classical style`_. + :octicon:`list-ordered;5em;sd-text-info` + ^^^ + Check the history of the evolution of the code. + + .. grid-item-card:: Contributions & Help + :link: contributing.html + :text-align: center + + :octicon:`code-of-conduct;5em;sd-text-info` + ^^^ + If you want to contribute to the development take a look to the development guidelines first. + +.. grid:: 2 + :gutter: 1 + + .. grid-item-card:: Authors + :link: authors.html + :text-align: center + + :octicon:`people;5em;sd-text-info` + ^^^ + + .. grid-item-card:: License + :link: license.html + :text-align: center + + :octicon:`file-badge;5em;sd-text-info` + ^^^ Contents -======== +========== .. toctree:: :maxdepth: 2 Overview + Code documentation + Changelog Contributions & Help - License Authors - Changelog - Module Reference + License Indices and tables @@ -58,4 +116,4 @@ Indices and tables .. _autodoc: https://www.sphinx-doc.org/en/master/ext/autodoc.html .. _Google style: https://google.github.io/styleguide/pyguide.html#38-comments-and-docstrings .. _NumPy style: https://numpydoc.readthedocs.io/en/latest/format.html -.. _classical style: https://www.sphinx-doc.org/en/master/domains.html#info-field-lists +.. _classical style: https://www.sphinx-doc.org/en/master/domains.html#info-field-lists \ No newline at end of file diff --git a/docs/license.rst b/docs/license.rst index 3989c51..725c6af 100644 --- a/docs/license.rst +++ b/docs/license.rst @@ -4,4 +4,5 @@ License ======= -.. include:: ../LICENSE.txt +.. include:: ../LICENSES/Apache-2.0.txt + :parser: myst_parser.sphinx_ diff --git a/docs/modules.rst b/docs/modules.rst new file mode 100644 index 0000000..1b5ebfc --- /dev/null +++ b/docs/modules.rst @@ -0,0 +1,46 @@ +Code documentation +================== + +.. grid:: 2 + :gutter: 1 + + .. grid-item-card:: Assets + :link: assets.html + :text-align: center + + :octicon:`list-unordered;5em;sd-text-info` + ^^^ + + .. grid-item-card:: Dynamics + :link: dynamics.html + :text-align: center + + :octicon:`pulse;5em;sd-text-info` + ^^^ + +.. grid:: 2 + :gutter: 1 + + .. grid-item-card:: Functions + :link: functions.html + :text-align: center + + :octicon:`tools;5em;sd-text-info` + ^^^ + + .. grid-item-card:: Random Variables + :link: random_variables.html + :text-align: center + + :octicon:`graph;5em;sd-text-info` + ^^^ + + +.. toctree:: + :maxdepth: 1 + :hidden: + + assets + dynamics + functions + random_variables \ No newline at end of file diff --git a/docs/random_variables.rst b/docs/random_variables.rst new file mode 100644 index 0000000..f9af248 --- /dev/null +++ b/docs/random_variables.rst @@ -0,0 +1,7 @@ +Random Variables +================ + +.. automodule:: osc_physrisk_financial.random_variables + :members: + :undoc-members: + :show-inheritance: \ No newline at end of file diff --git a/docs/readme.rst b/docs/readme.rst index 81995ef..6a59a1a 100644 --- a/docs/readme.rst +++ b/docs/readme.rst @@ -1,2 +1,29 @@ -.. _readme: -.. include:: ../README.rst +Overview +====================== + +osc-physrisk-financial +---------------------- +Physical climate risk financial valuation + +.. image:: images/OS-Climate-Logo.png + :alt: drawing + :width: 150 + +About osc-physrisk-financial +---------------------------- + +An `OS-Climate `_ project, osc-physrisk-financial is a library for valuating assets under different climate risk scenarios. + +Using the library +----------------- + +The library can be run locally and is installed via:: + + pip install osc-physrisk-financial + +The library uses the output generated by the `physrisk `_ library. + +Note +---- + +This is the first stage of development, where the models are intentionally simple, focusing on setting up the proper structure of the library. diff --git a/docs/requirements.txt b/docs/requirements.txt index 2ddf98a..d74d910 100644 --- a/docs/requirements.txt +++ b/docs/requirements.txt @@ -1,5 +1,7 @@ # Requirements file for ReadTheDocs, check .readthedocs.yml. # To build the module reference correctly, make sure every external package # under `install_requires` in `setup.cfg` is also listed here! -sphinx>=3.2.1 -# sphinx_rtd_theme +sphinx>=7.3.7 +pydata_sphinx_theme>=0.15.4 +sphinx_design>=0.6.0 +myst_parser>=3.0.1 diff --git a/pyproject.toml b/pyproject.toml index b4025d7..2877dfb 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -12,28 +12,53 @@ package-dir = "src" [project] name = "osc-physrisk-financial" -description = "OS-Climate Python Project" -readme = "README.rst" +description = "Physical climate risk financial valuation" +readme = "README.md" +dynamic = ["version"] +keywords = ["Financial risk", "climate", "Physical risk"] authors = [ - {name = "github-actions[bot]", email = "41898282+github-actions[bot]@users.noreply.github.com"}, + {name = "Arfima Dev", email = "dev@arfima.com"}, ] classifiers = [ "Development Status :: 4 - Beta", "Programming Language :: Python", ] dependencies = [ - "importlib-metadata; python_version<\"3.8\"", + "scipy>=1.10.1", + "pandas>=2.0.3", + "plotly>=5.15", + "numpy>=1.24", ] license = {text = "Apache-2.0"} requires-python = ">=3.10" [project.urls] -Homepage = "https://github.com/pyscaffold/pyscaffold/" -Documentation = "https://pyscaffold.org/" +"Homepage" = "https://github.com/os-climate/osc-physrisk-financial" +"Documentation" = "https://github.com/os-climate/osc-physrisk-financial" +"Bug Tracker" = "https://github.com/os-climate/osc-physrisk-financial/issues" + [project.optional-dependencies] +docs = [ + "sphinx>=5.3.0", + "sphinx_copybutton==0.5.0", + "sphinx_toggleprompt==0.2.0", + "myst_nb==0.16.0", + "sphinx_design==0.3", + "pydata-sphinx-theme==0.13.3", + "sphinx_simplepdf", + "numpydoc>=1.6.0", + "sphinxcontrib.bibtex", + "nbsphinx" +] testing = [ "pytest", "pytest-cov", - "setuptools", ] + +[tool.pdm.version] +source = "scm" +write_to = "osc_physrisk_financial/_version.py" +write_template = "version: str\n__version__: str\n__version__ = version = '{}'\n" +tag_regex = '^(?:[\w-]+-)?(?P[vV]?\d+(?:\.\d+){0,2}[^\+-]*?)(?:\-.*)?(?:\+.*)?(?:\-.*)?$' + diff --git a/ruff.toml b/ruff.toml new file mode 100644 index 0000000..265e6b0 --- /dev/null +++ b/ruff.toml @@ -0,0 +1,32 @@ +[lint] +extend-fixable = [ + #Instead of trailing-whitespace + "W291", "W293" + ] + +extend-select = [ + # Instead of pydocstyle + "D", + #Instead of flake8 + "E", "F","B", + # Instead of pep8-naming + "N", + # Instead of flake8-debugger or debug-statements + "T10", +] + +ignore = [ + "E203", + "E501", + + # Avoid incompatible rules + "D203", + "D213", +] + +[lint.per-file-ignores] +# Ignore `D` rules everywhere except for the `src/` directory. +"!src/**.py" = ["D"] + +[lint.pycodestyle] +max-line-length = 160 diff --git a/src/osc_physrisk_financial/__init__.py b/src/osc_physrisk_financial/__init__.py index a65eb6f..2c149d4 100644 --- a/src/osc_physrisk_financial/__init__.py +++ b/src/osc_physrisk_financial/__init__.py @@ -1,10 +1,6 @@ -import sys +"""Init for osc-physrisk.""" -if sys.version_info[:2] >= (3, 8): - # TODO: Import directly (no need for conditional) when `python_requires = >= 3.8` - from importlib.metadata import PackageNotFoundError, version # pragma: no cover -else: - from importlib_metadata import PackageNotFoundError, version # pragma: no cover +from importlib.metadata import PackageNotFoundError, version # pragma: no cover try: # Change here if project is renamed and does not equal the package name diff --git a/src/osc_physrisk_financial/assets.py b/src/osc_physrisk_financial/assets.py new file mode 100644 index 0000000..84043cf --- /dev/null +++ b/src/osc_physrisk_financial/assets.py @@ -0,0 +1,333 @@ +"""Assets definitions.""" + +from typing import Optional, Sequence, Union + +import numpy as np +import pandas as pd + +import osc_physrisk_financial.functions as afsfun +from osc_physrisk_financial.dynamics import Dynamic +from osc_physrisk_financial.random_variables import DiscreteRandomVariable + + +class Asset(object): + """Class for instantiating a general Asset. + + Parameters + ---------- + value_0 : float + Value of the asset at 0, :math:`V_{0}` + + dynamics : dynamics.Dynamic + Dynamics assumed for the asset value. + + name : string, optional + Name for identification. + + cash_flows: Sequence, optional + Sequence of the associated cash flows (for cash flow generating assets only). + + References + ---------- + Methodology, Chapter 4 of Methodology survey (Overleaf). + + """ + + # TODO: This is not the final parameters list. Check OS-C (assets.py) + # TODO: we should include latitude: float, longitude: float. + + def __init__( + self, + value_0: float, + dynamics: Optional[Dynamic] = None, + name: Optional[str] = None, + cash_flows: Optional[Sequence] = None, + ): + """Initialize the AssetClass with dynamics and name. + + `dynamics` or `name` are optional. 'value_0 must be provided' + + Parameters + ---------- + value_0 : float + Initial value + dynamics : Optional[Dynamic] = None + Asset value dynamics. + name : Optional[str] = None + Asset name. + cash_flows: Optional[Sequence] = None + Cash flows. + + """ + self.value_0 = value_0 + self.dynamics = dynamics + self.name = name # TODO: Not sure if this is useful. + self.cash_flows = cash_flows + + # TODO: Maybe here we can use OS-C standard: + # class Asset: + # def __init__(self, latitude: float, longitude: float, **kwargs): + # self.latitude = latitude + # self.longitude = longitude + # self.__dict__.update(kwargs) + + +class RealAsset(Asset): + """Class for instantiating a Real Asset. + + Parameters + ---------- + value_0 : float + Value of the asset at 0, :math:`V_{0}` + + dynamics : dynamics.Dynamic + Dynamics assumed for the asset value. + + name : string, optional + Name for identification. + + References + ---------- + Methodology, Chapter 4 of Methodology survey (Overleaf). + + """ + + def __init__(self, value_0: float, dynamics: Dynamic, name: Optional[str] = None): + """Initialize the RealAssetClass with dynamics and name. + + `dynamics` or `name` are optional. 'value_0 must be provided' + + Parameters + ---------- + value_0 : float + Initial value + dynamics : Optional[Dynamic] = None + Asset value dynamics. + name : Optional[str] = None + Asset name. + + """ + super().__init__(value_0=value_0, dynamics=dynamics, name=None) + + def financial_losses( + self, dates: Union[pd.DatetimeIndex, list], damage: DiscreteRandomVariable + ): + """Compute financial losses for a real asset. + + Parameters + ---------- + dates : pandas.DatetimeIndex, list of strings, pandas.Timestamp, or string + Dates for which we want to compute :math:`X_t` of [Methodology]. TODO: Do we want to include t_0 here? + damage : random_variables.RandomVariable + Damage caused to the asset. + + Returns + ------- + random_variables.RandomVariable + Random variable representing :math:`X_{t}` [Methodology]. + + References + ---------- + Methodology, Chapter 4 of Methodology survey (Overleaf). + + """ + dates = afsfun.dates_formatting(dates) + value_t = self.dynamics.compute_value(dates) + losses = value_t * damage + return losses + + def ltv( + self, + dates: Union[pd.DatetimeIndex, list], + damages: Sequence[DiscreteRandomVariable], + loan_amounts: Sequence[float], + ): + r"""Compute Loan To Value (LTV) for a real asset. + + Parameters + ---------- + dates : pandas.DatetimeIndex, list of strings, pandas.Timestamp, or string + Dates for which we want to compute :math:`X_t` of [Methodology]. + Note that :math: `t_0` should be included here. # TODO: Do we want to include t_0 here? + damages : Sequence[DiscreteRandomVariable] + Sequence of DiscreteRandomVariable instances representing damage for each asset. + loan_amounts : Sequence[float] + Sequence of floats representing loan amount for each asset. + + Returns + ------- + random_variables.RandomVariable + Random variable representing LTV of [Methodology]. + It returns a numpy.ndarray of 2 dimensions and shape :math:`(\\# dates, \\# assets)`. + + References + ---------- + Methodology, Chapter 4 of Methodology survey (Overleaf). + + """ + # Define a function to apply the check to an array of DiscreteRandomVariable instances + damages = np.array(damages) + + def validate_values(drvs: Sequence[DiscreteRandomVariable]): + # Vectorize check_values method + vec_check = np.vectorize(lambda drv: drv.check_values()) + values_valid = vec_check(drvs) + + if not np.all(values_valid): + raise ValueError( + "One or more damages have values outside the 0 to 1 range." + ) + + validate_values(damages) + + if len(damages) != len(loan_amounts): + raise ValueError( + "The lengths of 'damage' and 'loan_amount' (number of assets) must match." + ) + # We reshape for allowing broadcasting + valuet = self.dynamics.compute_value(dates=dates).reshape((len(dates), 1)) + damages_mod = (1 + (-1) * damages).reshape( + 1, len(damages) + ) # Note that __sub__ is not needed in class DiscreteRandomVariable. + valuet_sc = valuet * damages_mod + return loan_amounts / valuet_sc + + # TODO: Maybe it is interesting to vectorize the computation of the mean and variance of the LTVs computed by leveraging numpy. + # Check impact_distrib.py from OS-C. As it stands, we can do it using np.vectorize (see methods means_vectorized and means_vectorized) + # but we know it is not efficient (essentially a for loop) + # https://numpy.org/doc/stable/reference/generated/numpy.vectorize.html#:~:text=returns%20a%20ufunc-,Notes,-The%20vectorize%20function + + +class PowerPlants(Asset): + """Class for instantiating a PowerPlant Asset. + + Either `production` or both `capacity` and `av_rate` must be provided. If not directly provided, `production`, is calculated as: + `production` = `capacity` * `av_rate` * 8760. + + Parameters + ---------- + dynamics : dynamics.Dynamic + Dynamics assumed for the asset value. + + name : string, optional + Name for identification. + + production : float, optional + Real annual production of a power plant in Wh. + + capacity : float, optional + Capacity of the power plant in W. + + av_rate : float, optional + Availability factor of production. + + References + ---------- + `Canonical_Example_Power_Generation_Plants_Floods` (Overleaf). + + Notes + ----- + In this case the cash flows are defined through production. + + """ + + def __init__( + self, dynamics: Optional[Dynamic] = None, name: Optional[str] = None, **kwargs + ): + """Initialize the PowerPlantsClass with dynamics, name and a variable number of arguments. + + `dynamics` or `name` are optional. 'value_0 must be provided' + + Parameters + ---------- + dynamics : dynamics.Dynamic + Dynamics assumed for the asset value. + name : string, optional + Name for identification. + **kwargs : dict + Variable number of arguments. + + """ + if "production" in kwargs: + production = kwargs["production"] + # If not, check if capacity and av_rate are both provided + elif "capacity" in kwargs and "av_rate" in kwargs: + production = ( + kwargs["capacity"] * kwargs["av_rate"] * 8760 + ) # Number of hours in a year + else: + raise ValueError( + "Must provide either 'production' or both 'capacity' and 'av_rate'." + ) + super().__init__( + value_0=production, dynamics=dynamics, name=None, cash_flows=None + ) + + @staticmethod + def discount(r: Sequence[float], n: Optional[int] = 1) -> float: + r"""Compute discount for a given annual evolution of interest rates. + + Parameters + ---------- + r : Sequence[float] + An array or sequence including the yearly interest rate for the required period. + + n : int, optional + By default r is a list containing the yearly interest rates. + To consider a constant interest rate, introduce the value of the + interest rate in r and n = number of years to be discounted. + + Returns + ------- + float + Float containing the discounting factor calculated as + :math:`\prod_{i} 1/(1+r_i)^n`. + + """ + if n < 1: + raise ValueError("Discounting cash flows in negative number of year") + + if len(r) > 1 and n != 1: + raise ValueError("Discounting cash flows has a wrong format") + + aux = np.array(r) + 1 + disc = 1 / np.prod(aux) ** n + + return disc + + def financial_losses( + self, + damages: DiscreteRandomVariable, + energy_price: float, + r: Sequence[float], + n: Optional[int] = 1, + ) -> DiscreteRandomVariable: + r"""Compute financial losses for a PowerPlant asset. + + Parameters + ---------- + damages : DiscreteRandomVariable + Random Variable with the production loss expressed as a decimal (50% :math:`\equiv` 0.5) for each plant. + + energy_price : float + Average price in €/Wh of the energy production. + + r : list[float] + An array or sequence containing the annual interest rates. + + n : int, optional + Number of years to discount. + + Returns + ------- + DiscreteRandomVariable + Random Variable containing the financial losses for the asset. + + Notes + ----- + The use of `r` and `n` follows the same convention as in `discount` method. + + + """ + self.cash_flows = self.value_0 * energy_price * damages + return self.cash_flows * self.discount(r, n) diff --git a/src/osc_physrisk_financial/dynamics.py b/src/osc_physrisk_financial/dynamics.py new file mode 100644 index 0000000..11a6932 --- /dev/null +++ b/src/osc_physrisk_financial/dynamics.py @@ -0,0 +1,121 @@ +"""Dynamics.""" + +from abc import ABC, abstractmethod +from typing import Optional, Union + +import numpy as np +import pandas as pd + +import osc_physrisk_financial.functions as afsfun + + +class Dynamic(ABC): + """A base class for simulating asset value dynamics. + + Notes + ----- + This base class is based on Underlying from pypricing. + + """ + + def __init__(self, name: Optional[str] = None): + """Initialize a new instance of Dynamic. + + Attributes + ---------- + name : string, optional + Name for identification. + + """ + self.name = name + self.data = pd.DataFrame() + + @abstractmethod + def compute_value(self, dates: Union[pd.DatetimeIndex, list]): + """Abstract method for computing the asset value at future dates. + + Attributes + ---------- + dates : pandas.DatetimeIndex, list of strings, pandas.Timestamp, or string + Future dates for which the asset value wants to be computed. + + Notes + ----- + This base class is based on Underlying from pypricing. + + """ + + # TODO: Maybe we can use methods like set_data, get_data, get_value, get_dates, get_arithmetic_return, get_return from Underlying. + # TODO: We have to think about this while developing the code. + + +class ConstantGrowth(Dynamic): + r"""Class representing a constant growth model: :math:`V_t = V_0 \\times (1 + \mu)^t.`. + + Parameters + ---------- + growth_rate : float + Constant growth rate :math:`\mu.` + + name : string, optional + Name for identification. + + value0 : float + :math:`V_0` in [Methodology] + + Examples + -------- + >>> cg = ConstantGrowth(growth_rate=0.02, name='RealAsset') + + References + ---------- + Methodology, Chapter 4 of Methodology survey (Overleaf). + + """ + + def __init__(self, growth_rate: float, value0: float, name: Optional[str] = None): + r"""Initialize a new instance of ConstantGrowth. + + Attributes + ---------- + growth_rate : float + Constant growth rate :math:`\mu.` + + value0 : float + :math:`V_0` in [Methodology] + + name : string, optional + Name for identification. + + """ + super().__init__(name=name) + self.growth_rate = growth_rate + self.value0 = value0 + + def compute_value(self, dates: Union[pd.DatetimeIndex, list]): + """Compute the asset value at future dates. + + Attributes + ---------- + dates : pandas.DatetimeIndex, list of strings, pandas.Timestamp, or string + Dates for which the value wants to be computed. Note that in this model we are only + interested in the years, so we only extract that part. The initial date is also included + here ( :math:`t_{0}` such that :math:`V_{t_0} = V_0` of [Methodology]. + + Returns + ------- + np.ndarray + :math:`V_t` in [Methodology] for the different dates. It includes the value :math:`V_0`. + Note that the dates have been sorted and the output is returned with the dates sorted. + + References + ---------- + Methodology, Chapter 4 of Methodology survey (Overleaf). + + """ + dates = afsfun.dates_formatting(dates) + years = dates.year + years = years - years[0] + valuet = self.value0 * (1 + self.growth_rate) ** years + valuet = np.array(valuet) + return valuet diff --git a/src/osc_physrisk_financial/functions.py b/src/osc_physrisk_financial/functions.py new file mode 100644 index 0000000..ed9c862 --- /dev/null +++ b/src/osc_physrisk_financial/functions.py @@ -0,0 +1,172 @@ +"""Auxiliary functions.""" + +import math + +import numpy as np +import pandas as pd +import plotly.graph_objects as go +from scipy import optimize + +# TODO: We should make pypricing library installable so we can import pypricing.pricing.functions.py. +# TODO: Meanwhile we have copied this file in this repository. + + +def check_all_nonnumeric(arr): + """Check if all elements in a numpy array or Python list are non-numeric. + + This function tries to convert each element in the array or list to a float. If the + conversion raises a ValueError or TypeError, or if the value is nan, it means the + element is non-numeric, so the function continues to the next element. If the + conversion does not raise an exception and the value is not nan, it means the element + is numeric, so the function immediately returns False. If the function finishes + checking all elements without finding a numeric one, it returns True. + + Parameters + ---------- + arr : numpy.ndarray or list + The array or list to check. + + Returns + ------- + bool + True if all elements are non-numeric, False otherwise. + + """ + for i in arr: + try: + val = float(i) + if not math.isnan(val): + return False + except (ValueError, TypeError): + continue + return True + + +def find_root(func, x0, interval, tolerance=10**-8, fprime=None): + """Find the root of a given function, using several methods. + + Each method is tried in turn until one succeeds. + + If none succeeds, we plot the function in interval. + + Parameters + ---------- + func : callable + The function for which the root is to be computed. + x0 : float + Initial guess for the root. + interval : list + Interval [a,b] for ridder, bisecction and brentq. + tolerance : float + If func(solution)>tolerance an exception is raised. + fprime : callable, optional + The derivative of the function. If not provided, the Newton method will use the secant method. + + Returns + ------- + float + The root found by the successful method (unless all methods failed). + + """ + methods = [ + ("fixed_point", optimize.fixed_point), + ("newton (Secant)", optimize.newton), + ("newton (Newton-Raphson)", optimize.newton), + ("bisection", optimize.bisect), + ("brentq", optimize.brentq), + ("ridder", optimize.ridder), + ] + + root = None + + for name, method in methods: + try: + if name == "fixed_point": + root = method(lambda x: x - func(x), x0) + elif name == "newton (Secant)": + root = method(func, x0, fprime=None) + elif name == "newton (Newton-Raphson)": + root = method(func, x0, fprime=fprime) + else: + root = method(func, interval[0], interval[1]) + # print(f"Method {name} succeeded with root {root}") + break # if method succeeded, stop trying the rest + except Exception: + pass + if root is None: + x_vals = np.linspace(interval[0], interval[1], 200) + y_vals = [ + func(x) for x in x_vals + ] # Note that this code snippet is intentionally not vectorized. + fig = go.Figure(data=go.Scatter(x=x_vals, y=y_vals)) + fig.update_layout(title="Plot of func", xaxis_title="x", yaxis_title="y") + fig.show() + raise Exception("All methods failed") + else: + if np.abs(func(root)) > tolerance: # Maybe another tolerance can be chosen. + raise Exception("The numerical error is too large.") + else: + return root + + +def dates_formatting(*date_sets): + """Convert dates to a consistent format and sort them in ascending order. + + Parameters + ---------- + date_sets : pandas.DatetimeIndex,list of strings, pandas.Timestamp, or string + Dates to be formatted. It can be a single date (as a pandas.Timestamp or its string representation) + or an array-like object (as a pandas.DatetimeIndex or a list of its string representation) containing dates. + + Returns + ------- + sorted_dates : pandas.DatetimeIndex + A pandas DatetimeIndex object containing the formatted dates in ascending order. + + Examples + -------- + >>> dates_formatting('2022-01-01') + DatetimeIndex(['2022-01-01'], dtype='datetime64[ns]', freq=None) + + >>> dates_formatting(['2022-01-03', '2022-01-01', '2022-01-02']) + DatetimeIndex(['2022-01-01', '2022-01-02', '2022-01-03'], dtype='datetime64[ns]', freq=None) + + >>> dates_formatting(['2022-01-03', '2022-01-01', '2022-01-02'], '2022-01-01') + [DatetimeIndex(['2022-01-01', '2022-01-02', '2022-01-03'], dtype='datetime64[ns]', freq=None), DatetimeIndex(['2022-01-01'], dtype='datetime64[ns]', freq=None)] + + """ + formatted_dates = [] + for date_set in date_sets: + if np.asarray(date_set).shape == (): + dates = [date_set] + else: + dates = date_set + formatted_dates.append(pd.to_datetime(dates).sort_values()) + if len(formatted_dates) == 1: + formatted_dates = formatted_dates[0] + + return formatted_dates + + +def contains_word(string_list, word): + """Check if strings in the given list contain the specified word. Words in each string are separated by underscores. + + Parameters + ---------- + string_list : list of str + The list of strings where each string has words separated by underscores. + word : str + The word to search for within the strings. + + Returns + ------- + list of str + A list of strings from the input `string_list` that contain the specified `word`. + + Examples + -------- + >>> contains_word(['word1_word2', 'word3_word4', 'word2_word5'], 'word2') + ['word1_word2', 'word2_word5'] + + """ + return [s for s in string_list if word in s.split("_")] diff --git a/src/osc_physrisk_financial/random_variables.py b/src/osc_physrisk_financial/random_variables.py new file mode 100644 index 0000000..b5f01c0 --- /dev/null +++ b/src/osc_physrisk_financial/random_variables.py @@ -0,0 +1,1030 @@ +"""functions for random and discrete random variables.""" + +from abc import ABC, abstractmethod +from typing import Optional, Union, Sequence, Any + +import numpy as np +import plotly.graph_objects as go + + +class RandomVariable(ABC): + """Abstract class with the common methods and attributes of discrete and continuous random variables. + + Ideally, we wouldn't have to implement this class from scratch, but an initial search seems to indicate + that what we want doesn't exist in another libraries (like SciPy). + """ + + @abstractmethod + def __init__(self): + """Initialize a RandomVariable.""" + + @abstractmethod + def __mul__(self, other: Union[float, int]): + """Multiply the random variable by a real number. Case RandomVariable * real number. + + This method scales the pdf or pmf of the random variable by a given scalar + while keeping the probabilities unchanged. + + Parameters + ---------- + other : float, or int + The scalar by which to multiply the pdf or pmf of the random variable. + + Returns + ------- + RandomVariable + A new instance of DiscreteRandomVariable with scaled pdf or pmf. + + Notes + ----- + We define this class since operations like the ones defined are not implemented in scipy. + For instance: TypeError: unsupported operand type(s) for *: 'int' and 'rv_sample'. + + """ + + def __rmul__(self, other: Union[float, int]): + """Multiply the random variable by a real number. Case real number * RandomVariable. + + This method delegates to `__mul__`, assuming commutativity of the operation. + + Parameters + ---------- + other : float, or int + The real number by which to multiply the random variable. + + Returns + ------- + RandomVariable + A new instance of DiscreteRandomVariable with scaled pdf or pmf. + + """ + return self.__mul__(other) + + def __neg__(self): + """Negate the random variable.""" + return self.__mul__(-1) + + @abstractmethod + def __add__(self, other: Union[float, int]): + """Add a real number to the random variable. Case RandomVariable + real number. + + This method shifts the pdf or pmf of the random variable by a given number + while keeping the probabilities unchanged. + + Parameters + ---------- + other : float, or int + The real number to add to the pdf or pmf of the random variable. + + Returns + ------- + RandomVariable + A new instance of DiscreteRandomVariable with shifted pdf or pmf. + + """ + + def __radd__(self, other): + """Add a real number from the random variable. Case real number + RandomVariable. + + This method is called if the first operand does not support addition + or returns NotImplemented. It allows commutative addition where the scalar + is on the left side of the `+`. + + Parameters are the same as __add__. + """ + # __add__ handles the actual operation, so we just delegate to it. + return self.__add__(other) + + def __sub__(self, other): + """Subtract a real number to the random variable. Case RandomVariable - real number. + + __add__ handles the actual operation, so we just delegate to it. + + Parameters are the same as __add__. + """ + return self.__add__(-other) + + def __rsub__(self, other): + """Subtract the random variable from a real number. Case real number - RandomVariable. + + __add__ and __mul__ handle the actual operation, so we just delegate to them. + + Parameters are the same as __add__. + """ + return self.__mul__(-1).__add__(other) + + @abstractmethod + def __rtruediv__(self, other): + """Implement division where a real number is divided by a DiscreteRandomVariable. + + Parameters + ---------- + other : float, or int + The real number numerator. + + Returns + ------- + RandomVariable: A new instance representing the result. + + Raises + ------ + ValueError: If division by any value of the DiscreteRandomVariable is not possible. + + """ + + @abstractmethod + def __eq__(self, other: Any) -> bool: + """Check if the current instance equals another instance of a RandomVariable. + + Parameters + ---------- + other : Any + The object to compare against. + + Returns + ------- + bool + True if the objects are considered equal, False otherwise. + + """ + + @abstractmethod + def mean(self): + """Calculate the mean of the random variable. + + Returns + ------- + float + The mean of the random variable. + + Notes + ----- + This is an abstract method and must be implemented by subclasses. + + """ + + @staticmethod + @abstractmethod + def means_vectorized(rvs: Sequence["RandomVariable"]) -> np.ndarray: + """Abstract static method to compute means for an array of RandomVariable instances using a vectorized approach. + + Parameters + ---------- + rvs : Sequence[RandomVariable] + An array or sequence of RandomVariable instances. + + Returns + ------- + np.ndarray + An array of floats representing the means of the random variables. + + Notes + ----- + This is an abstract method and must be implemented by subclasses. + + """ + + @abstractmethod + def var(self): + """Calculate the variance of the random variable. + + Returns + ------- + float + The variance of the discrete random variable. + + Notes + ----- + This is an abstract method and must be implemented by subclasses. + + """ + + @staticmethod + @abstractmethod + def vars_vectorized(rvs: Sequence["RandomVariable"]) -> np.ndarray: + """Abstract static method to compute variances for an array of RandomVariable instances using a vectorized approach. + + Parameters + ---------- + rvs : Sequence[RandomVariable] + An array or sequence of RandomVariable instances. + + Returns + ------- + np.ndarray + An array of floats representing the variances of the random variables. + + Notes + ----- + This is an abstract method and must be implemented by subclasses. + + """ + + @abstractmethod + def compute_cdf(self): + """Compute the Cumulative Distribution Function (CDF) for the random variable.""" + + @abstractmethod + def compute_var(self, percentile=95): + r"""Compute the Value at Risk :math:`V^{p}_{X}` for a random variable :math:`X`. + + The Value at Risk (:math:`V^{p}_{X}`) of a discrete random variable :math:`X` at the level + :math:`p \in (0, 1)` is the p-quantile of :math:`X` defined by the condition that the cumulative + distribution function :math:`F_{X}(x)` is greater than or equal to :math:`p`. Formally, + :math:`V^{p}_{X}` is given by: + + .. math:: V^{p}_{X} := \inf\{x \in \mathbb{R} : P(X \leq x) \geq p\}. + + Notes + ----- + This is an abstract method and must be implemented by subclasses. + + """ + + @staticmethod + @abstractmethod + def compute_var_vectorized(rvs): + """Compute VaRs for an array of RandomVariable instances using a vectorized approach. + + Parameters + ---------- + rvs : Sequence[RandomVariable] + An array or sequence of RandomVariable instances. + + Returns + ------- + np.ndarray + An array of floats representing the VaRs of the random variables. + + Notes + ----- + This is an abstract method and must be implemented by subclasses. + + """ + + +class DiscreteRandomVariable(RandomVariable): + """A class to represent a discrete random variable derived from observed data. + + Parameters + ---------- + probabilities : array like + The probabilities associated with each interval or value in the histogram. + values : array like, optional + The specific values representing the discrete random variable. Required if `intervals` is not provided. + intervals : array like, optional + The intervals (bins) of the histogram representing the discrete random variable. Required if `values` is not provided. + convert_to_osc_format : bool, optional + If True, it ensures that the probabilities sum to 1 by adjusting the zero-impact bin. + This is needed for `ImpactDistrib` from OS-C. Default, False. + + Examples + -------- + Values Example: + + >>> values = [0.1, 0.3, 0.5, 0.7, 0.9] + >>> probabilities = [0.1, 0.3, 0.3, 0.2, 0.1] # This should sum up to 1 + >>> drv = DiscreteRandomVariable(values=values, probabilities=probabilities) + + Intervals Example: + + >>> intervals = [0, 0.2, 0.4, 0.6, 0.8, 1.0] + >>> probabilities = [0.1, 0.3, 0.3, 0.2, 0.1] # This should sum up to 1 + >>> drv = DiscreteRandomVariable(intervals=intervals, probabilities=probabilities) + + Notes + ----- + - We use intervals following OS-C convention. Internally, we work with the midpoints of each interval. + - We define this class since classes like rv_discrete from scipy do not support some important operations like multiplication + by scalar or adding a scalar to the random variable. However, it would be nice to have these features since they seem standard. + Maybe from another library outside Scipy. + - When the probabilities do not sum to one, as in the case of the ImpactDistrib class from OS-C, we add the missing value to zero + to make the sum equal to one. In this way, we create a "mass point" at zero, meaning that we take the mean value for each interval + except for zero, where we assign the remaining the probability. + TODO: We need to check the output (methodology implemented in code) of OS-C impact distribution so we are sure the constructor of + this class is properly defined. That is to say, verify that methodologically this is what we want given OS-C code. + + """ + + def __init__( + self, + probabilities: Sequence[Union[float, int]], + values: Optional[Sequence[Union[float, int]]] = None, + intervals: Sequence[Union[float, int]] = None, + convert_to_osc_format : Optional[bool] = False, + ): + """Initialize the ExampleClass with probabilities, and either values or intervals. + + Exactly one of `values` or `intervals` must be provided. + + Parameters + ---------- + probabilities : Sequence[Union[float, int]] + A sequence of probabilities which can be float or int. + values : Optional[Sequence[Union[float, int]]], optional + An optional sequence of values corresponding to the probabilities, by default None. + intervals : Optional[Sequence[Union[float, int]]], optional + An optional sequence of intervals, by default None. + convert_to_osc_format : Optional[bool] + Ensures that the probabilities sum to 1 by adjusting the zero-impact bin. False by default. + + Raises + ------ + ValueError: If both `values` and `intervals` are provided, or if neither is provided. + + """ + super().__init__() + if intervals is None and values is None: + raise ValueError("Either intervals or values must be provided.") + if intervals is not None and values is not None: + raise ValueError( + "Only one of intervals or values should be provided, not both." + ) + + self.probabilities = np.array(probabilities) + if intervals is not None: + if convert_to_osc_format: + probabilities = np.array(probabilities) + if not np.all((0 <= probabilities) & (probabilities <= 1)): + raise ValueError("All probabilities must be between 0 and 1.") + + if not np.all(np.diff(intervals) >= 0): + raise ValueError( + "Impact bins must be sorted in non-decreasing order." + ) + total_prob = np.sum(probabilities) + print(total_prob) + if not np.isclose(total_prob,1): + intervals = np.array(intervals) + if 0 in intervals: + zero_index = np.where(intervals == 0)[0][0] + # Adjust the zero-impact probability + probabilities[zero_index] += 1 - total_prob + else: + intervals = np.insert(intervals, 0, 0) + probabilities = np.insert(probabilities, 0, 1 - total_prob) + self.intervals = intervals + self.probabilities = np.array(probabilities) + self.values = (self.intervals[1:-1] + self.intervals[2:]) / 2 + self.values = np.insert(self.values, 0, 0) + else: + self.intervals = np.array(intervals) + if not (self.intervals == np.sort(self.intervals)).all(): + raise ValueError("The intervals must be sorted increasingly.") + if len(self.intervals) != len(probabilities) + 1: + raise ValueError( + "The number of intervals must be one more than the number of probabilities." + ) + self.values = (self.intervals[:-1] + self.intervals[1:]) / 2 + self.probabilities = np.array(probabilities) + else: + if len(values) != len(probabilities): + raise ValueError( + "The number of values must match the number of probabilities." + ) + sorted_indices = np.argsort(values) + self.values = np.array(values)[sorted_indices] + self.probabilities = np.array(probabilities)[sorted_indices] + + # Ensure probabilities sum up to 1 + if not np.isclose(self.probabilities.sum(), 1): + raise ValueError("The probabilities must sum up to 1.") + + def __mul__(self, other: Union[float, int]): + """Multiply the discrete random variable by a scalar. + + This method scales the values of the random variable by a given scalar + while keeping the probabilities unchanged. + + Parameters + ---------- + other : float, or int + The scalar by which to multiply the values of the random variable. + + Returns + ------- + DiscreteRandomVariable + A new instance of DiscreteRandomVariable with scaled values. + + """ + if isinstance(other, (int, float)): + scaled_values = self.values * other + return DiscreteRandomVariable( + values=scaled_values, probabilities=self.probabilities + ) + else: + return NotImplemented + + def __add__(self, other: Union[float, int]): + """Add a scalar to the discrete random variable. + + This method shifts the values of the random variable by a given scalar + while keeping the probabilities unchanged. + + Parameters + ---------- + other : float, or int + The scalar to add to the values of the random variable. + + Returns + ------- + DiscreteRandomVariable + A new instance of DiscreteRandomVariable with shifted values. + + """ + if isinstance(other, (int, float)): + shifted_values = self.values + other + return DiscreteRandomVariable( + values=shifted_values, probabilities=self.probabilities + ) + else: + return NotImplemented + + def __rtruediv__(self, other: Union[float, int]): + r"""Implement division where a real number is divided by a DiscreteRandomVariable. + + :math:`a / X` where :math:`a, \\ X` are a Real number and a Discrete Random Variable, respectively. + + Parameters + ---------- + other : float, or int + The scalar to add to the values of the random variable. + + Returns + ------- + DiscreteRandomVariable + A new instance representing the result. + + Raises + ------ + ValueError: If division by any value of the DiscreteRandomVariable is not possible. + + Notes + ----- + We don't really need to define :math:`a / X` but rather :math:`1 / X` since __mul__ and __rmul__ + could be used. For convenience, we have done so, although it wasn't strictly necessary. + + """ + if not isinstance(other, (int, float)): + raise TypeError("Numerator must be a real number") + + # Check for zeros in self.values to avoid division by zero + if np.any(self.values == 0): + raise ValueError( + "Division by zero encountered in DiscreteRandomVariable values" + ) + + # Calculate new values as the real number divided by each value of the DiscreteRandomVariable + new_values = other / self.values + + return DiscreteRandomVariable( + values=new_values, probabilities=self.probabilities + ) + + def __eq__(self, other: Any) -> bool: + """Determine if two DiscreteRandomVariable instances are equal based on their values and probabilities. + + Parameters + ---------- + other : Any + The other DiscreteRandomVariable instance to compare against. + + Returns + ------- + bool + Returns True if both the values and probabilities match, False otherwise. + + """ + if not isinstance(other, DiscreteRandomVariable): + return False + return np.allclose(self.values, other.values) and np.allclose( + self.probabilities, other.probabilities + ) + + def mean(self): + """Calculate the mean of the discrete random variable. + + Returns + ------- + float + The mean of the discrete random variable. + + Examples + -------- + >>> values = [0.1, 0.3, 0.5, 0.7, 0.9] + >>> probabilities = [0.1, 0.3, 0.3, 0.2, 0.1] + >>> drv = DiscreteRandomVariable(values=values, probabilities=probabilities) + >>> drv.mean() + 0.48000000000000004 + + """ + return np.sum(self.values * self.probabilities) + + @staticmethod + def means_vectorized(drvs): + """Compute means for an array of DiscreteRandomVariable instances using a vectorized approach. + + Parameters + ---------- + drvs : np.ndarray + An array of DiscreteRandomVariable instances. + + Returns + ------- + np.ndarray + An array of floats representing the means of the discrete random variables. + + Notes + ----- + This method utilizes np.vectorize to apply the mean calculation to each instance in the array. It is primarily + for convenience and does not offer performance benefits over a traditional loop. + + Examples + -------- + >>> values = [0.1, 0.3, 0.5, 0.7, 0.9] + >>> probabilities = [0.1, 0.3, 0.3, 0.2, 0.1] + >>> drv = DiscreteRandomVariable(values=values, probabilities=probabilities) + >>> drvs = np.array([drv, 1 / drv]) + >>> DiscreteRandomVariable.means_vectorized(drvs) + array([0.48 , 2.9968254]) + + """ + # TODO: CHeck https://github.com/os-climate/physrisk/blob/main/src/physrisk/kernel/impact_distrib.py#L40 + compute_mean = np.vectorize(lambda drv: drv.mean()) + return compute_mean(drvs) + + def var(self): + """Calculate the variance of the discrete random variable. + + Returns + ------- + float + The variance of the discrete random variable. + + Examples + -------- + >>> values = [0.1, 0.3, 0.5, 0.7, 0.9] + >>> probabilities = [0.1, 0.3, 0.3, 0.2, 0.1] + >>> drv = DiscreteRandomVariable(values=values, probabilities=probabilities) + >>> drv.var() + 0.05160000000000001 + + """ + mean = self.mean() + variance = np.sum(((self.values - mean) ** 2) * self.probabilities) + return variance + + @staticmethod + def vars_vectorized(drvs): + """Compute variances for an array of DiscreteRandomVariable instances using a vectorized approach. + + Parameters + ---------- + drvs : np.ndarray + An array of DiscreteRandomVariable instances. + + Returns + ------- + np.ndarray + An array of floats representing the means of the discrete random variables. + + Notes + ----- + This method utilizes np.vectorize to apply the variance calculation to each instance in the array. It is primarily + for convenience and does not offer performance benefits over a traditional loop. + + Examples + -------- + >>> values = [0.1, 0.3, 0.5, 0.7, 0.9] + >>> probabilities = [0.1, 0.3, 0.3, 0.2, 0.1] + >>> drv = DiscreteRandomVariable(values=values, probabilities=probabilities) + >>> drvs = np.array([drv, 1 / drv]) + >>> DiscreteRandomVariable.vars_vectorized(drvs) + array([0.0516 , 6.08399093]) + + """ + compute_var = np.vectorize(lambda drv: drv.var()) + return compute_var(drvs) + + def plot_pmf(self): + """Plot an interactive histogram representing the probability mass function (PMF) of the discrete random variable. + + This method uses Plotly to create an interactive histogram that provides a visual representation of how + probabilities are distributed across different intervals. + """ + # Bar chart with Plotly + fig = go.Figure( + data=[ + go.Bar( + x=self.values, + y=self.probabilities, + marker=dict(line=dict(color="black", width=1)), + ) + ] + ) + fig.update_layout( + title="Histogram of the Discrete random variable", + xaxis_title="Value", + yaxis_title="Probability", + bargap=0.2, + ) + fig.show() + + def check_values(self, min_value: float = 0, max_value: float = 1) -> bool: + """Check if all values of the DiscreteRandomVariable instance fall within a specified range. + + This method verifies that each value defined in the DiscreteRandomVariable instance is + between a specified minimum value and maximum value, inclusive. By default, it checks + whether the values are between 0 and 1. + + Parameters + ---------- + min_value : float, optional + The minimum allowable value for the values. This value is inclusive, meaning that + values can be equal to this minimum value. The default is 0. + max_value : float, optional + The maximum allowable value for the values. This value is inclusive, meaning that + values can be equal to this maximum value. The default is 1. + + Returns + ------- + bool + Returns True if all values are within the specified range (min_value to max_value, inclusive). + Otherwise, returns False. + + Examples + -------- + >>> values = [0.1, 0.3, 0.5, 0.7, 0.9] + >>> probabilities = [0.1, 0.3, 0.3, 0.2, 0.1] + >>> drv = DiscreteRandomVariable(values=values, probabilities=probabilities) + >>> drv.check_values() + True + >>> drv.check_values(0,0.5) + False + + Notes + ----- + The method utilizes numpy's vectorized operations to efficiently check all values + against the provided bounds. This approach is effective for instances with a large + number of values. + + """ + return np.all((min_value <= self.values) & (self.values <= max_value)) + + def sample(self, n: Optional[int] = 1): + """Generate `n` random samples from the discrete random variable. + + Parameters + ---------- + n : int, optional + The number of samples to generate. The default is 1. + + Returns + ------- + np.ndarray + An array of sampled values. + + Examples + -------- + >>> values = [0.1, 0.3, 0.5, 0.7, 0.9] + >>> probabilities = [0.1, 0.3, 0.3, 0.2, 0.1] + >>> drv = DiscreteRandomVariable(values=values, probabilities=probabilities) + >>> sample = drv.sample(5) + + """ + return np.random.choice(self.values, size=n, p=self.probabilities) + + def compute_cdf(self): + r"""Compute the Cumulative Distribution Function (CDF) for the discrete random variable. + + The CDF is defined as the probability that the variable takes a value less than or equal to `x`. + Formally, for a discrete random variable `X` with values `x_i` and corresponding probabilities `p_i`, + the CDF at a point `x` is given by: + + .. math:: F(x) = P(X \leq x) = \sum_{x_i \leq x} p_i + + Returns + ------- + cdf : np.ndarray + An array representing the cumulative probabilities corresponding to the values of the random variable. + + Examples + -------- + >>> values = [0.1, 0.3, 0.5, 0.7, 0.9] + >>> probabilities = [0.1, 0.3, 0.3, 0.2, 0.1] + >>> drv = DiscreteRandomVariable(values=values, probabilities=probabilities) + >>> drv.compute_cdf() + array([0.1, 0.4, 0.7, 0.9, 1. ]) + + """ + # Compute the cumulative distribution function (CDF) + cdf = np.cumsum(self.probabilities) + + return cdf + + def compute_exceedance_probability(self): + """Compute the exceedance probability for a given threshold. + + The exceedance probability is the probability that the discrete random variable exceeds a certain value `x`. + Formally: + + .. math:: F_X^c(x) = P(X > x) = 1 - F_X(x) + + Returns + ------- + exceed_prob : np.ndarray + An array representing the exceedance probabilities corresponding to the values of the random variable. + + Examples + -------- + >>> values = [0.1, 0.3, 0.5, 0.7, 0.9] + >>> probabilities = [0.1, 0.3, 0.3, 0.2, 0.1] + >>> drv = DiscreteRandomVariable(values=values, probabilities=probabilities) + >>> drv.compute_exceedance_probability() + array([9.00000000e-01, 6.00000000e-01, 3.00000000e-01, 1.00000000e-01, + 1.11022302e-16]) + + """ + cdf = self.compute_cdf() + exceed_prob = 1 - cdf + return exceed_prob + + @staticmethod + def compute_exceedance_probability_vectorized(drvs, x): + """Compute the exceedance probabilities for an array of DiscreteRandomVariable instances using a vectorized approach. + + Parameters + ---------- + drvs : np.ndarray + An array of DiscreteRandomVariable instances. + x : float + Value at which to evaluate the exceedance probability function. + + Returns + ------- + np.ndarray + An array of floats representing the exceedance probabilities of the discrete random variables evaluated at `x`. + + Notes + ----- + This method utilizes np.vectorize to apply the exceedance probability calculation to each instance in the array. It is primarily + for convenience and does not offer performance benefits over a traditional loop. + + Examples + -------- + >>> values = [0.1, 0.3, 0.5, 0.7, 0.9] + >>> probabilities = [0.1, 0.3, 0.3, 0.2, 0.1] + >>> drv = DiscreteRandomVariable(values=values, probabilities=probabilities) + >>> drvs = np.array([drv, 1 / drv]) + >>> DiscreteRandomVariable.compute_exceedance_probability_vectorized(drvs, 2) + array([1.11022302e-16, 4.00000000e-01]) + + """ + compute_exceedance = np.vectorize( + lambda drv, x: 1 - np.sum(drv.probabilities[np.where(drv.values <= x)[0]]) + ) + return compute_exceedance(drvs, x) + + def compute_occurrence_probability(self, lambda_value): + r"""Compute the occurrence probability :math:`O(x)` for the discrete random variable using a Poisson process model. + + We assume i.i.d. random variables. + + In this case we have: + + .. math:: F_X(x) = \\frac{1}{\\lambda} \\log(1 - O(x)) + 1, + + where :math:`F_X(x)` is the CDF of the random variable. + + Parameters + ---------- + lambda_value : float + The rate parameter of the Poisson process (number of occurrences per time unit). + + Returns + ------- + occurrence_prob : np.ndarray + An array representing the occurrence probabilities O(s) for the values of the random variable. + + Examples + -------- + >>> values = [0.1, 0.3, 0.5, 0.7, 0.9] + >>> probabilities = [0.1, 0.3, 0.3, 0.2, 0.1] + >>> drv = DiscreteRandomVariable(values=values, probabilities=probabilities) + >>> lambda_value = 0.5 # Example rate parameter for the Poisson process + >>> drv.compute_occurrence_probability(lambda_value) + array([0.36237185, 0.25918178, 0.13929202, 0.04877058, 0. ]) + + """ + fs = self.compute_cdf() + occurrence_prob = 1 - np.exp(-lambda_value * (1 - fs)) + return occurrence_prob + + @staticmethod + def compute_occurrence_probability_vectorized(drvs, lambda_value, x): + """Compute the occurrence probabilities at `x` for an array of DiscreteRandomVariable instances using a vectorized approach. + + Parameters + ---------- + drvs : np.ndarray + An array of DiscreteRandomVariable instances. + lambda_value : float + The rate parameter of the Poisson process (number of occurrences per time unit). + x : float + Value at which to evaluate the occurrence probability function. + + Returns + ------- + np.ndarray + An array of floats representing the occurrence probabilities of the discrete random variables evaluated at `x`. + + Notes + ----- + This method utilizes np.vectorize to apply the occurrence probability calculation to each instance in the array. It is primarily + for convenience and does not offer performance benefits over a traditional loop. + + Examples + -------- + >>> values = [0.1, 0.3, 0.5, 0.7, 0.9] + >>> probabilities = [0.1, 0.3, 0.3, 0.2, 0.1] + >>> drv = DiscreteRandomVariable(values=values, probabilities=probabilities) + >>> drvs = np.array([drv, 1 / drv]) + >>> lambda_value = 0.5 # Example rate parameter for the Poisson process + >>> DiscreteRandomVariable.compute_occurrence_probability_vectorized(drvs, lambda_value, 0.3) + array([0.25918178, 0.39346934]) + + """ + compute_occurrence = np.vectorize( + lambda drv, lambda_value, x: 1 + - np.exp( + -lambda_value + * (1 - np.sum(drv.probabilities[np.where(drv.values <= x)[0]])) + ) + ) + return compute_occurrence(drvs, lambda_value, x) + + def compute_var(self, percentile=95): + r"""Compute the Value at Risk :math:`V^{p}_{X}` for a discrete random variable :math:`X`. + + The Value at Risk (:math:`V^{p}_{X}`) of a discrete random variable :math:`X` at the level + :math:`p \in (0, 1)` is the p-quantile of :math:`X` defined by the condition that the cumulative + distribution function :math:`F_{X}(x)` is greater than or equal to :math:`p`. Formally, + :math:`V^{p}_{X}` is given by: + + .. math:: V^{p}_{X} := \inf\{x \in \mathbb{R} : P(X \leq x) \geq p\}. + + Parameters + ---------- + percentile : float, optional + The confidence level (:math:`p`) for VaR expressed as a percentile (0-100). Default is 95. + + Returns + ------- + var_value : float + The computed VaR at the given percentile (confidence level). + + Examples + -------- + >>> values = [0.1, 0.3, 0.5, 0.7, 0.9] + >>> probabilities = [0.1, 0.3, 0.3, 0.2, 0.1] + >>> drv = DiscreteRandomVariable(values=values, probabilities=probabilities) + >>> drv.compute_var() + 0.9 + + """ + if not 0 < percentile < 100: + raise ValueError("Percentile must be between 0 and 100.") + + # Compute the cumulative distribution function (CDF) + cdf = self.compute_cdf() + + # Find the index of the first occurrence where the CDF exceeds the target percentile + # np.isclose is used to avoid comparison numerical errors # TODO: Think of better ways to do this. + target_index = np.where( + np.isclose(cdf, percentile / 100.0) + (cdf > percentile / 100.0) + )[0][0] + var_value = self.values[target_index] + + return var_value + + @staticmethod + def compute_var_vectorized(drvs, percentile=95): + """Compute VaRs for an array of DiscreteRandomVariable instances using a vectorized approach. + + Parameters + ---------- + drvs : np.ndarray + An array of DiscreteRandomVariable instances. + percentile : float, optional + The confidence level (:math:`p`) for VaR expressed as a percentile (0-100). Default is 95. + + Returns + ------- + np.ndarray + An array of floats representing the VaRs of the discrete random variables. + + Notes + ----- + This method utilizes np.vectorize to apply the VaR calculation to each instance in the array. It is primarily + for convenience and does not offer performance benefits over a traditional loop. + + Examples + -------- + >>> values = [0.1, 0.3, 0.5, 0.7, 0.9] + >>> probabilities = [0.1, 0.3, 0.3, 0.2, 0.1] + >>> drv = DiscreteRandomVariable(values=values, probabilities=probabilities) + >>> drvs = np.array([drv, 1 / drv]) + >>> DiscreteRandomVariable.compute_var_vectorized(drvs) + array([ 0.9, 10. ]) + + """ + compute_var_percentile = np.vectorize( + lambda drv: drv.compute_var(percentile=percentile) + ) + return compute_var_percentile(drvs) + + def compute_es(self, percentile=95): + r"""Compute the Expected Shortfall :math:`\\mathrm{ES}^{p}_{X}` for a discrete random variable :math:`X`. + + The Expected Shortfall at level :math:`p` for a discrete random variable :math:`X`, is defined formally as: + + .. math:: \\text{ES}^{p}_X = \\frac{1}{1-p} \int_{p}^{1} V^{q}_X \, dq + + Where :math:`V^{p}_X` is the Value at Risk at level :math:`p`. + + + Parameters + ---------- + percentile : float, optional + The confidence level (:math:`p`) for ES, expressed as a percentile (0-100). Default is 95. + + Returns + ------- + es_value : float + The computed ES at the given percentile (confidence level). + + Raises + ------ + ValueError + If `percentile` is not within the range (0, 100). + + Examples + -------- + >>> values = [0.1, 0.3, 0.5, 0.7, 0.9] + >>> probabilities = [0.1, 0.3, 0.3, 0.2, 0.1] + >>> drv = DiscreteRandomVariable(values=values, probabilities=probabilities) + >>> drv.compute_es() + 0.899999999999998 + + """ + # Check that percentile is between 0 and 100 + if not 0 < percentile < 100: + raise ValueError("Percentile must be between 0 and 100.") + + p = percentile / 100.0 + cdf = self.compute_cdf() + + target_indices = np.where(cdf >= p)[0] + + es = ( + np.sum((self.values * self.probabilities)[target_indices][1:]) + + self.values[target_indices[0]] * (cdf[target_indices[0]] - p) + ) / (1 - p) + + return es + + @staticmethod + def compute_es_vectorized(drvs, percentile=95): + """Compute the Expected Shortfall (ES) for an array of DiscreteRandomVariable instances using a vectorized approach. + + Parameters + ---------- + drvs : np.ndarray + An array of DiscreteRandomVariable instances. + percentile : float, optional + The confidence level (:math:`p`) for ES expressed as a percentile (0-100). Default is 95. + + Returns + ------- + np.ndarray + An array of floats representing the ESs of the discrete random variables. + + Notes + ----- + This method utilizes np.vectorize to apply the ES calculation to each instance in the array. It is primarily + for convenience and does not offer performance benefits over a traditional loop. + + Examples + -------- + >>> values = [0.1, 0.3, 0.5, 0.7, 0.9] + >>> probabilities = [0.1, 0.3, 0.3, 0.2, 0.1] + >>> drv = DiscreteRandomVariable(values=values, probabilities=probabilities) + >>> drvs = np.array([drv, 1 / drv]) + >>> DiscreteRandomVariable.compute_es_vectorized(drvs) + array([ 0.9, 10. ]) + + """ + compute_es_percentile = np.vectorize( + lambda drv: drv.compute_es(percentile=percentile) + ) + return compute_es_percentile(drvs) diff --git a/src/osc_physrisk_financial/skeleton.py b/src/osc_physrisk_financial/skeleton.py deleted file mode 100644 index 46239d8..0000000 --- a/src/osc_physrisk_financial/skeleton.py +++ /dev/null @@ -1,149 +0,0 @@ -""" -This is a skeleton file that can serve as a starting point for a Python -console script. To run this script uncomment the following lines in the -``[options.entry_points]`` section in ``setup.cfg``:: - - console_scripts = - fibonacci = osc_physrisk_financial.skeleton:run - -Then run ``pip install .`` (or ``pip install -e .`` for editable mode) -which will install the command ``fibonacci`` inside your current environment. - -Besides console scripts, the header (i.e. until ``_logger``...) of this file can -also be used as template for Python modules. - -Note: - This file can be renamed depending on your needs or safely removed if not needed. - -References: - - https://setuptools.pypa.io/en/latest/userguide/entry_point.html - - https://pip.pypa.io/en/stable/reference/pip_install -""" - -import argparse -import logging -import sys - -from osc_physrisk_financial import __version__ - -__author__ = "github-actions[bot]" -__copyright__ = "github-actions[bot]" -__license__ = "Apache-2.0" - -_logger = logging.getLogger(__name__) - - -# ---- Python API ---- -# The functions defined in this section can be imported by users in their -# Python scripts/interactive interpreter, e.g. via -# `from osc_physrisk_financial.skeleton import fib`, -# when using this Python module as a library. - - -def fib(n): - """Fibonacci example function - - Args: - n (int): integer - - Returns: - int: n-th Fibonacci number - """ - assert n > 0 - a, b = 1, 1 - for _i in range(n - 1): - a, b = b, a + b - return a - - -# ---- CLI ---- -# The functions defined in this section are wrappers around the main Python -# API allowing them to be called directly from the terminal as a CLI -# executable/script. - - -def parse_args(args): - """Parse command line parameters - - Args: - args (List[str]): command line parameters as list of strings - (for example ``["--help"]``). - - Returns: - :obj:`argparse.Namespace`: command line parameters namespace - """ - parser = argparse.ArgumentParser(description="Just a Fibonacci demonstration") - parser.add_argument( - "--version", - action="version", - version=f"osc-physrisk-financial {__version__}", - ) - parser.add_argument(dest="n", help="n-th Fibonacci number", type=int, metavar="INT") - parser.add_argument( - "-v", - "--verbose", - dest="loglevel", - help="set loglevel to INFO", - action="store_const", - const=logging.INFO, - ) - parser.add_argument( - "-vv", - "--very-verbose", - dest="loglevel", - help="set loglevel to DEBUG", - action="store_const", - const=logging.DEBUG, - ) - return parser.parse_args(args) - - -def setup_logging(loglevel): - """Setup basic logging - - Args: - loglevel (int): minimum loglevel for emitting messages - """ - logformat = "[%(asctime)s] %(levelname)s:%(name)s:%(message)s" - logging.basicConfig( - level=loglevel, stream=sys.stdout, format=logformat, datefmt="%Y-%m-%d %H:%M:%S" - ) - - -def main(args): - """Wrapper allowing :func:`fib` to be called with string arguments in a CLI fashion - - Instead of returning the value from :func:`fib`, it prints the result to the - ``stdout`` in a nicely formatted message. - - Args: - args (List[str]): command line parameters as list of strings - (for example ``["--verbose", "42"]``). - """ - args = parse_args(args) - setup_logging(args.loglevel) - _logger.debug("Starting crazy calculations...") - print(f"The {args.n}-th Fibonacci number is {fib(args.n)}") - _logger.info("Script ends here") - - -def run(): - """Calls :func:`main` passing the CLI arguments extracted from :obj:`sys.argv` - - This function can be used as entry point to create console scripts with setuptools. - """ - main(sys.argv[1:]) - - -if __name__ == "__main__": - # ^ This is a guard statement that will prevent the following code from - # being executed in the case someone imports this file instead of - # executing it as a script. - # https://docs.python.org/3/library/__main__.html - - # After installing your project with pip, users can also run your Python - # modules as scripts via the ``-m`` flag, as defined in PEP 338:: - # - # python -m osc_physrisk_financial.skeleton 42 - # - run() diff --git a/tests/conftest.py b/tests/conftest.py index 067af0d..b163a33 100644 --- a/tests/conftest.py +++ b/tests/conftest.py @@ -1,10 +1,12 @@ -""" -Dummy conftest.py for osc_physrisk_financial. +"""Dummy conftest.py for osc_physrisk_financial. If you don't know what this is for, just leave it empty. Read more about conftest.py under: - https://docs.pytest.org/en/stable/fixture.html - https://docs.pytest.org/en/stable/writing_plugins.html """ +import os +import sys -# import pytest +sys.path.append(os.path.normpath(os.path.join(os.path.dirname(__file__), '../src')) +) diff --git a/tests/test_dynamics.py b/tests/test_dynamics.py new file mode 100644 index 0000000..4031917 --- /dev/null +++ b/tests/test_dynamics.py @@ -0,0 +1,24 @@ +import numpy as np +import pandas as pd + +from osc_physrisk_financial.dynamics import ConstantGrowth + +value0 = 1000 +growth_rate = 0.05 +dates = pd.date_range(start="2020-01-01", periods=5, freq="YE") + + +def test_init(): + assert ( + ConstantGrowth(growth_rate=growth_rate, value0=value0, name="Test Growth") + is not None + ) + + +def test_compute_value(): + const_growth = ConstantGrowth( + growth_rate=growth_rate, value0=value0, name="Test Growth" + ) + expected_values = value0 * (1 + growth_rate) ** np.arange(0, 5) + expected_values = np.array(expected_values) + assert const_growth.compute_value(dates).all() == expected_values.all() diff --git a/tests/test_functions.py b/tests/test_functions.py new file mode 100644 index 0000000..cc62d71 --- /dev/null +++ b/tests/test_functions.py @@ -0,0 +1,205 @@ +import pytest +import pandas as pd +import numpy as np +from scipy import optimize +from osc_physrisk_financial.functions import ( + find_root, + check_all_nonnumeric, + dates_formatting, + contains_word, +) + + +# Test for find_root +def func_quad(x): + return x**2 - 2 + + +def fprime_quad(x): + return 2 * x + + +def func_cos(x): + return np.cos(x) - x + + +def func_non_quadratic(x): + return np.tan(x) + + +def func_cubic(x): + return x**3 - x - 2 + + +def func_no_real_root(x): + return x**2 + 1 + + +def func_large_error(x): + return (x - 1) ** 2 - 0.01 # This will trigger the numerical error too large + + +interval_quad = [0, 2] +interval_cubic = [1, 2] +interval_no_real_root = [-1, 1] +interval_large_error = [0, 2] + + +def test_newton_secant_method(): + root = find_root(func_quad, x0=1.0, interval=interval_quad) + assert np.isclose(root, np.sqrt(2), atol=1e-8) + + +def test_newton_raphson_method(): + root = find_root(func_quad, x0=1.0, interval=interval_quad, fprime=fprime_quad) + assert np.isclose(root, np.sqrt(2), atol=1e-8) + + +def test_fixed_point_method(): + root = find_root(func_non_quadratic, x0=0.5, interval=[0, 1]) + expected_root = optimize.fixed_point(lambda x: x - func_non_quadratic(x), 0.5) + assert np.isclose(root, expected_root, atol=1e-8) + + +def test_bisection_method(): + root = find_root(func_cubic, x0=1.5, interval=interval_cubic) + expected_root = optimize.bisect(func_cubic, interval_cubic[0], interval_cubic[1]) + assert np.isclose(root, expected_root, atol=1e-8) + + +def test_brentq_method(): + root = find_root(func_cubic, x0=1.5, interval=interval_cubic) + expected_root = optimize.brentq(func_cubic, interval_cubic[0], interval_cubic[1]) + assert np.isclose(root, expected_root, atol=10**-300) + + +def test_ridder_method(): + root = find_root(func_cubic, x0=1.5, interval=interval_cubic) + expected_root = optimize.ridder(func_cubic, interval_cubic[0], interval_cubic[1]) + assert np.isclose(root, expected_root, atol=1e-8) + + +def test_all_methods_fail(): + with pytest.raises(Exception, match="All methods failed"): + find_root(func_no_real_root, x0=0, interval=interval_no_real_root) + + +def test_numerical_error_too_large(): + with pytest.raises(Exception, match="The numerical error is too large."): + find_root( + func_large_error, x0=1.0, interval=interval_large_error, tolerance=10**-300 + ) + + +# test for check_all_nonnumeric + + +def test_all_nonnumeric_empty(): + arr = np.array([]) + assert check_all_nonnumeric(arr) + arr = [] + assert check_all_nonnumeric(arr) + + +def test_all_nonnumeric_numeric_elements(): + arr = np.array([1, 2, 3]) + assert not check_all_nonnumeric(arr) + arr = [1, 2, 3] + assert not check_all_nonnumeric(arr) + + +def test_all_nonnumeric_float_and_nan(): + arr = np.array([np.nan, 1.0, 3.5]) + assert not check_all_nonnumeric(arr) + arr = [np.nan, 1.0, 3.5] + assert not check_all_nonnumeric(arr) + + +def test_all_nonnumeric_strings(): + arr = np.array(["abc", "def", "ghi"]) + assert check_all_nonnumeric(arr) + arr = ["abc", "def", "ghi"] + assert check_all_nonnumeric(arr) + + +def test_all_nonnumeric_mixed(): + arr = np.array([1, "abc", np.nan]) + assert not check_all_nonnumeric(arr) + arr = [1, "abc", np.nan] + assert not check_all_nonnumeric(arr) + + +def test_all_nonnumeric_non_iterable(): + with pytest.raises(TypeError): + check_all_nonnumeric(123) + + +def test_all_nonnumeric_numeric_with_non_nan(): + arr = np.array([1.0, 2.0, 3.0]) + assert not check_all_nonnumeric(arr) + arr = [1.0, 2.0, 3.0] + assert not check_all_nonnumeric(arr) + + +def test_all_nonnumeric_integers_and_non_nan(): + arr = np.array([1, 2, 3]) + assert not check_all_nonnumeric(arr) + arr = [1, 2, 3] + assert not check_all_nonnumeric(arr) + + +def test_all_nonnumeric_float_and_integer(): + arr = np.array([1.0, 2, 3.5]) + assert not check_all_nonnumeric(arr) + arr = [1.0, 2, 3.5] + assert not check_all_nonnumeric(arr) + + +# test for dates_formatting + + +def test_single_date_string(): + result = dates_formatting("2022-01-01") + expected = pd.DatetimeIndex(["2022-01-01"]) + assert result.equals(expected) + + +def test_list_of_dates_strings(): + result = dates_formatting(["2022-01-03", "2022-01-01", "2022-01-02"]) + expected = pd.DatetimeIndex(["2022-01-01", "2022-01-02", "2022-01-03"]) + assert result.equals(expected) + + +def test_list_of_dates_strings_with_single_date(): + result = dates_formatting(["2022-01-03", "2022-01-01", "2022-01-02"], "2022-01-01") + expected1 = pd.DatetimeIndex(["2022-01-01", "2022-01-02", "2022-01-03"]) + expected2 = pd.DatetimeIndex(["2022-01-01"]) + assert result[0].equals(expected1) + assert result[1].equals(expected2) + + +def test_pandas_datetime_index(): + dates = pd.to_datetime(["2022-01-03", "2022-01-01", "2022-01-02"]) + result = dates_formatting(dates) + expected = pd.DatetimeIndex(["2022-01-01", "2022-01-02", "2022-01-03"]) + assert result.equals(expected) + + +def test_mixed_input_formats(): + dates = ["2022-01-03", "2022-01-01", "2022-01-02"] + mixed_dates = [pd.to_datetime(dates), "2022-01-01"] + result = dates_formatting(*mixed_dates) + expected1 = pd.DatetimeIndex(["2022-01-01", "2022-01-02", "2022-01-03"]) + expected2 = pd.DatetimeIndex(["2022-01-01"]) + assert result[0].equals(expected1) + assert result[1].equals(expected2) + + +# test for contains_word (delete if function is deleted from functions.py) + + +def test_contains_word_single_match(): + string_list = ["word1_word2", "word3_word4", "word2_word5"] + word = "word2" + expected_output = ["word1_word2", "word2_word5"] + assert contains_word(string_list, word) == expected_output diff --git a/tests/test_powerPlant.py b/tests/test_powerPlant.py new file mode 100644 index 0000000..f8e9ee2 --- /dev/null +++ b/tests/test_powerPlant.py @@ -0,0 +1,68 @@ +import numpy as np +import pytest + +from osc_physrisk_financial.assets import PowerPlants +from osc_physrisk_financial.random_variables import DiscreteRandomVariable + + +def test_power_plants(): + # Check Random variables + values = [0.1, 0.3, 0.5, 0.7, 0.9] + probabilities = (0.1, 0.2, 0.3, 0.1, 0.3) # This should sum up to 1 + _ = DiscreteRandomVariable(probabilities, values) + + intervals = [0, 0.2, 0.4, 0.6, 0.8, 1] + probabilities = (0.1, 0.2, 0.3, 0.1, 0.3) # This should sum up to 1 + drv_intervals = DiscreteRandomVariable(probabilities, intervals=intervals) + + prod = 7892 * (10**9) # Wh generated in 2019 + elec_price = 48.87 / (10**6) # euros/Wh + name = "Central Nuclear Trillo" + + with pytest.raises( + ValueError, + match="Must provide either 'production' or both 'capacity' and 'av_rate'.", + ): + PowerPlants() + + pp = PowerPlants(production=prod, name=name) + + n_years = 2050 - 2019 + r_cst = [0.02] + r_var = n_years * r_cst + + disc_cst = pp.discount(r=r_cst, n=n_years) + disc_var = pp.discount(r_var) + + with pytest.raises( + ValueError, match="Discounting cash flows in negative number of year" + ): + pp.discount(r=r_cst, n=0.1) + + with pytest.raises(ValueError, match="Discounting cash flows has a wrong format"): + pp.discount(r=[0.01, 0.02], n=1.1) + + assert np.isclose(disc_cst, disc_var), "Discount is not calculated properly" + + damage = drv_intervals + + loss_cst = pp.financial_losses( + damages=damage, energy_price=elec_price, r=r_cst, n=n_years + ) + + loss_var = pp.financial_losses(damages=damage, energy_price=elec_price, r=r_var) + + assert np.isclose( + loss_cst.mean(), loss_var.mean() + ), "Losses are not calculated properly" + + # Now the same pp in two different ways + + pp2 = PowerPlants(capacity=900913242.0091324, av_rate=1, name=name) + loss_var2 = pp2.financial_losses(damages=damage, energy_price=elec_price, r=r_var) + + assert np.isclose( + loss_var.mean(), loss_var2.mean() + ), "Losses are not calculated properly" + + print("FINISHED DCV TEST SUCCESSFULLY!!!") diff --git a/tests/test_random_variables.py b/tests/test_random_variables.py new file mode 100644 index 0000000..12a6d65 --- /dev/null +++ b/tests/test_random_variables.py @@ -0,0 +1,292 @@ +import numpy as np +import pytest + +from osc_physrisk_financial.random_variables import DiscreteRandomVariable + +values = [0.1, 0.3, 0.5, 0.7, 0.9] +intervals = [0, 0.2, 0.4, 0.6, 0.8, 1.0] +probabilities = [0.1, 0.3, 0.3, 0.2, 0.1] +percentiles = [10, 20, 30, 40, 50, 60, 70, 80, 90] + +drv = DiscreteRandomVariable(values=values, probabilities=probabilities) +drv2 = DiscreteRandomVariable(intervals=intervals, probabilities=probabilities) +drvs = np.array([drv, 1 / drv]) + + +def test_init(): + assert drv == drv2 + assert drv == DiscreteRandomVariable(values=values, probabilities=probabilities) + assert ( + np.array_equal( + np.array([0.1, 0.3, 0.3, 0.2, 0.05]), + DiscreteRandomVariable( + intervals=intervals, + values=None, + probabilities=[0.1, 0.3, 0.3, 0.2, 0.05], + convert_to_osc_format=True, + ).probabilities, + ) + is False + ) + assert DiscreteRandomVariable( + intervals=intervals, + values=None, + probabilities=probabilities, + convert_to_osc_format=True, + ) is not None + + +def test_init_value_errors(): + with pytest.raises( + ValueError, match="Either intervals or values must be provided." + ): + DiscreteRandomVariable(intervals=None, values=None, probabilities=probabilities) + + with pytest.raises( + ValueError, + match="Only one of intervals or values should be provided, not both.", + ): + DiscreteRandomVariable( + intervals=intervals, values=values, probabilities=probabilities + ) + + with pytest.raises(ValueError, match="The intervals must be sorted increasingly."): + DiscreteRandomVariable( + intervals=[0.4, 0.3, 0.2, 0.1], values=None, probabilities=probabilities + ) + + with pytest.raises( + ValueError, + match="The number of intervals must be one more than the number of probabilities.", + ): + DiscreteRandomVariable( + intervals=[0, 0.2, 0.4, 0.6, 0.8], values=None, probabilities=probabilities + ) + + with pytest.raises( + ValueError, match="The number of values must match the number of probabilities." + ): + DiscreteRandomVariable( + intervals=None, values=[0.1, 0.3, 0.5], probabilities=[0.1, 0.2, 0.3, 0.7] + ) + + with pytest.raises(ValueError, match="The probabilities must sum up to 1."): + DiscreteRandomVariable( + intervals=intervals, values=None, probabilities=[0.1, 0.3, 0.3, 0.2, 0.7] + ) + + with pytest.raises(ValueError, match="All probabilities must be between 0 and 1."): + DiscreteRandomVariable( + intervals=intervals, + values=None, + probabilities=[-0.1, 0.3, 0.3, 0.2, 0.3], + convert_to_osc_format=True, + ) + + with pytest.raises( + ValueError, match="Impact bins must be sorted in non-decreasing order." + ): + DiscreteRandomVariable( + intervals=[1.0, 0.8, 0.6, 0.4, 0.2, 0], + values=None, + probabilities=probabilities, + convert_to_osc_format=True, + ) + + +def test_not_implemented(): + assert NotImplemented == DiscreteRandomVariable( + intervals=intervals, values=None, probabilities=probabilities + ).__mul__(other="a") + assert NotImplemented == DiscreteRandomVariable( + intervals=intervals, values=None, probabilities=probabilities + ).__add__(other="a") + + +def test_rtruediv(): + with pytest.raises(TypeError, match="Numerator must be a real number"): + DiscreteRandomVariable( + intervals=intervals, values=None, probabilities=probabilities + ).__rtruediv__(other="a") + + with pytest.raises( + ValueError, + match="Division by zero encountered in DiscreteRandomVariable values", + ): + DiscreteRandomVariable( + intervals=None, + values=[0.0, 0.3, 0.5, 0.7, 0.9], + probabilities=probabilities, + ).__rtruediv__(other=1.0) + + +def test_eq(): + assert ( + DiscreteRandomVariable( + intervals=intervals, values=None, probabilities=probabilities + ).__eq__(1.0) + is False + ) + + +def test_check_values(): + assert ( + 0.0 + <= DiscreteRandomVariable( + intervals=intervals, values=None, probabilities=probabilities + ).check_values(0.0, 1.0) + <= 1.0 + ) + + +def test_sample(): + assert 4 == len( + DiscreteRandomVariable( + intervals=intervals, values=None, probabilities=probabilities + ).sample(4) + ) + + +def test_compute_var(): + with pytest.raises(ValueError, match="Percentile must be between 0 and 100."): + DiscreteRandomVariable( + intervals=intervals, values=None, probabilities=probabilities + ).compute_var(percentile=101) + + +def test_compute_es(): + with pytest.raises(ValueError, match="Percentile must be between 0 and 100."): + DiscreteRandomVariable( + intervals=intervals, values=None, probabilities=probabilities + ).compute_es(percentile=101) + + +def test_plot(): + DiscreteRandomVariable( + intervals=intervals, values=None, probabilities=probabilities + ).plot_pmf() + + +def test_magic(): + # Negative + assert -drv == DiscreteRandomVariable( + values=[-x for x in values], probabilities=probabilities + ) + + # Multiplication + assert -6 * drv == DiscreteRandomVariable( + values=[-6 * x for x in values], probabilities=probabilities + ) + assert -6 * drv == drv * (-6) + + # Addition + assert 6 + drv == DiscreteRandomVariable( + values=[6 + x for x in values], probabilities=probabilities + ) + assert 6 + drv == drv + 6 + + # Subtraction + assert drv - 6 == DiscreteRandomVariable( + values=[x - 6 for x in values], probabilities=probabilities + ) + assert 6 - drv == DiscreteRandomVariable( + values=[6 - x for x in values], probabilities=probabilities + ) + + # Division + assert -6 / drv == DiscreteRandomVariable( + values=[-6 / x for x in values], probabilities=probabilities + ) + + +def test_metrics(): + # Mean + assert np.isclose(drv.mean(), 0.48) + + # Variance + assert np.isclose(drv.var(), 0.0516) + + # Exceedance Probability + assert np.allclose( + drv.compute_exceedance_probability(), + np.array([0.9, 0.6, 0.3, 0.1, 0.0]), + ) + + # Occurrence Probability + assert np.allclose( + drv.compute_occurrence_probability(1), + 1 - np.exp(np.array([0.1, 0.4, 0.7, 0.9, 1]) - 1), + ) + + # VaR + assert np.allclose( + [drv.compute_var(p) for p in percentiles], + [0.1, 0.3, 0.3, 0.3, 0.5, 0.5, 0.5, 0.7, 0.7], + ) + + # Expected Shortfall + assert np.allclose( + [drv.compute_es(p) for p in percentiles], + (48 - np.array([1, 4, 7, 10, 15, 20, 25, 32, 39])) + / 100 + / (1 - np.array(percentiles) / 100), + ) + + +def test_metrics_vectorized(): + # Mean + assert np.allclose(DiscreteRandomVariable.means_vectorized(drvs), [0.48, 2.9968]) + + # Variance + assert np.allclose( + DiscreteRandomVariable.vars_vectorized(drvs), + [0.0516, 6.08399], + ) + + # Exceedance Probability + assert np.allclose( + DiscreteRandomVariable.compute_exceedance_probability_vectorized(drvs, 0.8), + [0.1, 1], + ) + assert np.allclose( + DiscreteRandomVariable.compute_exceedance_probability_vectorized(drvs, 3), + [0, 0.4], + ) + + # Occurrence Probability + assert np.allclose( + DiscreteRandomVariable.compute_occurrence_probability_vectorized(drvs, 1, 0.8), + [1 - np.exp(-0.1), 1 - np.exp(-1)], + ) + assert np.allclose( + DiscreteRandomVariable.compute_occurrence_probability_vectorized(drvs, 1, 3), + [0, 1 - np.exp(-0.4)], + ) + + # VaR + assert np.allclose( + [DiscreteRandomVariable.compute_var_vectorized(drvs, p) for p in percentiles], + np.vstack( + ( + [0.1, 0.3, 0.3, 0.3, 0.5, 0.5, 0.5, 0.7, 0.7], + [10 / 9, 10 / 7, 10 / 7, 2, 2, 2, 10 / 3, 10 / 3, 10 / 3], + ) + ).transpose(), + ) + + # Expected Shortfall + assert np.allclose( + np.array( + [DiscreteRandomVariable.compute_es_vectorized(drvs, p) for p in percentiles] + ), + ( + np.vstack( + ( + (48 - np.array([1, 4, 7, 10, 15, 20, 25, 32, 39])) / 100, + (1888 - np.array([70, 160, 250, 376, 502, 628, 838, 1048, 1258])) + / 630, + ) + ) + / (1 - np.array(percentiles) / 100) + ).transpose(), + ) diff --git a/tests/test_realstate.py b/tests/test_realstate.py new file mode 100644 index 0000000..7ed500b --- /dev/null +++ b/tests/test_realstate.py @@ -0,0 +1,309 @@ +from osc_physrisk_financial.assets import RealAsset +from osc_physrisk_financial.dynamics import ConstantGrowth +from osc_physrisk_financial.random_variables import DiscreteRandomVariable + +import pytest +import numpy as np + + +def test_real_asset(): + # TODO: This script should be transformed in a proper test. + + # Check dynamics + constant_g = ConstantGrowth(growth_rate=0.02, name="RealAsset", value0=100) + valuet = constant_g.compute_value( + dates=["2024-02-09", "2025-12-25", "2023-07-01", "2022-07-01"] + ) + expected_values = [ + 100.0, + 102, + 104.04, + 106.1208, + ] # Expected values from simple calculation + assert np.allclose(valuet, expected_values), f"Value_t = {valuet}" + + # Check Random variables + values = [0.1, 0.3, 0.5, 0.7, 0.9] + probabilities = (0.1, 0.2, 0.3, 0.1, 0.3) # This should sum up to 1 + discrete_rand_var_values = DiscreteRandomVariable(probabilities, values) + + intervals = [0, 0.2, 0.4, 0.6, 0.8, 1] + probabilities = (0.1, 0.2, 0.3, 0.1, 0.3) # This should sum up to 1 + discrete_rand_var_intervals = DiscreteRandomVariable( + probabilities, intervals=intervals + ) + assert discrete_rand_var_values == discrete_rand_var_intervals + discrete_rand_var = discrete_rand_var_values + print(discrete_rand_var.mean()) + discrete_rand_var_1 = 1.3 + discrete_rand_var + discrete_rand_var_2 = discrete_rand_var + 1.3 + assert discrete_rand_var_1 == discrete_rand_var_2 + + five_discrete_rand_var = 5 * discrete_rand_var + rfive_discrete_rand_var = discrete_rand_var * 5 + assert five_discrete_rand_var == rfive_discrete_rand_var + + divided_rv = 1 / discrete_rand_var + + # Create a numpy array of these random variables + rv_array = np.array( + [discrete_rand_var, five_discrete_rand_var, rfive_discrete_rand_var], + dtype=object, + ) + # Check __eq__ and np.array stuff + rv_array_div = 1 / rv_array + + assert divided_rv == rv_array_div[0] # Dummy test for __rtruediv__ and __eq__ + + assert (1 + discrete_rand_var) == (1 + rv_array)[0] # Dummy test for __sum__ + + +def test_asset(): + constant_g = ConstantGrowth(growth_rate=0.02, name="RealAsset", value0=100) + probabilities = (0.1, 0.2, 0.3, 0.1, 0.3) # This should sum up to 1 + values = [0.1, 0.3, 0.5, 0.7, 0.9] + discrete_rand_var_values = DiscreteRandomVariable(probabilities, values) + discrete_rand_var = discrete_rand_var_values + # Check assets + real_asset = RealAsset(value_0=100, dynamics=constant_g, name="RealState") + real_asset.financial_losses(["2030-02-09"], damage=discrete_rand_var) + # real_asset.financial_losses(["2030-02-09"], damage=discrete_rand_var)[0].plot_pmf() + losses = real_asset.financial_losses(["2030-02-09"], damage=discrete_rand_var) + mean_loss = losses[0].mean() + expected_mean_loss = 56.0 + variance_loss = losses[0].var() + expected_variance_loss = 724.0 + print( + f'Mean Financial Losses: {real_asset.financial_losses(["2030-02-09"], damage=discrete_rand_var)[0].mean()}' + ) + print( + f'Variance Financial Losses: {real_asset.financial_losses(["2030-02-09"], damage=discrete_rand_var)[0].var()}' + ) + assert np.allclose(mean_loss, expected_mean_loss), "Mean is not calculated properly" + assert np.allclose( + variance_loss, expected_variance_loss + ), "Variance is not calculated properly" + + intervals_osc = np.array( + [ + 0.00012346, + 0.00021273, + 0.000302, + 0.0003516, + 0.00040436, + 0.00043349, + 0.00048287, + 0.000516, + 0.0005943, + ] + ) + probabilities_osc = np.array( + [ + 0.00166667, + 0.00083333, + 0.0005, + 0.00033333, + 0.0002381, + 0.00017857, + 0.00013889, + 0.00011111, + ] + ) + discrete_rand_var_osc = DiscreteRandomVariable( + probabilities=probabilities_osc, + intervals=intervals_osc, + convert_to_osc_format=True, + ) + + expected_intervals = np.array( + [ + 0.0, + 0.00012346, + 0.00021273, + 0.000302, + 0.0003516, + 0.00040436, + 0.00043349, + 0.00048287, + 0.000516, + 0.0005943, + ] + ) + expected_probabilities = np.array( + [ + [ + 9.96000000e-01, + 1.66666667e-03, + 8.33333333e-04, + 5.00000000e-04, + 3.33333333e-04, + 2.38095238e-04, + 1.78571429e-04, + 1.38888889e-04, + 1.11111111e-04, + ] + ] + ) + + assert np.allclose( + discrete_rand_var_osc.intervals, expected_intervals + ), "Intervals are not calculated properly" + assert np.allclose( + discrete_rand_var_osc.probabilities, expected_probabilities + ), "Probabilities are not calculated properly" + + # zero included + + intervals_osc_zero = np.array( + [ + 0, + 0.00012346, + 0.00021273, + 0.000302, + 0.0003516, + 0.00040436, + 0.00043349, + 0.00048287, + 0.000516, + ] + ) + probabilities_osc_zero = np.array( + [ + 0.00166667, + 0.00083333, + 0.0005, + 0.00033333, + 0.0002381, + 0.00017857, + 0.00013889, + 0.00011111, + ] + ) + + discrete_rand_var_osc_zero = DiscreteRandomVariable( + probabilities=probabilities_osc_zero, + intervals=intervals_osc_zero, + convert_to_osc_format=True, + ) + + a = np.array(intervals_osc_zero[:-1] + intervals_osc_zero[1:]) / 2 + b = discrete_rand_var_osc_zero.values + + assert np.allclose(a[1:], b[1:]), "Intervals are not calculated properly" + assert np.isclose(b[0], 0), "Values are not calculated properly" + + # zero not included + discrete_rand_var_osc_zero = DiscreteRandomVariable( + probabilities=probabilities_osc, + intervals=intervals_osc, + convert_to_osc_format=True, + ) + + a = np.array(intervals_osc[:-1] + intervals_osc[1:]) / 2 + b = discrete_rand_var_osc_zero.values + + assert np.all(np.isclose(a, b[1:])), "Intervals are not calculated properly" + assert np.isclose(b[0], 0), "Values are not calculated properly" + # LTV + damage_1 = 1 / 100 * discrete_rand_var + damage_2 = 2 / 100 * discrete_rand_var + damage_3 = 0.01 + 1 / 100 * discrete_rand_var + loan_amounts = [1, 3, 5] + damages = [damage_1, damage_2, damage_3] + ltv = real_asset.ltv( + dates=["2030-02-09", "2031-02-09"], damages=damages, loan_amounts=loan_amounts + ) + + with pytest.raises( + ValueError, match="One or more damages have values outside the 0 to 1 range." + ): + damage_4 = damage_1 + 1 + ltv = real_asset.ltv( + dates=["2030-02-09", "2031-02-09"], + damages=[damage_4, damage_2, damage_3], + loan_amounts=loan_amounts, + ) + + with pytest.raises( + ValueError, + match="The lengths of 'damage' and 'loan_amount' \\(number of assets\\) must match\\.", + ): + ltv = real_asset.ltv( + dates=["2030-02-09", "2031-02-09"], + damages=[damage_1, damage_2], + loan_amounts=loan_amounts, + ) + + print(f" LTV mean value (first date, fist asset): {ltv[0,0].mean()}") + means = DiscreteRandomVariable.means_vectorized(ltv) + print(f" LTV mean values: {means}") + + expected_means = np.array( + [[0.01005639, 0.0303407, 0.05079274], [0.0098592, 0.02974579, 0.0497968]] + ) + + assert np.allclose(means, expected_means), "LTV mean values calculation failed" + + # Variances + print(f" LTV variance (first date, fist asset): {ltv[0,0].var()}") + vars = DiscreteRandomVariable.vars_vectorized(ltv) + print(f" LTV variances: {vars}") + + expected_vars = np.array( + [ + [7.40214348e-10, 2.72496428e-08, 1.92687839e-08], + [7.11470923e-10, 2.61915059e-08, 1.85205535e-08], + ] + ) + + assert np.allclose(vars, expected_vars), "LTV variance calculation failed" + + # VaR + values = np.array([-100, -20, 0, 50]) + probabilities = np.array([0.1, 0.3, 0.4, 0.2]) + drv_var = DiscreteRandomVariable(values=values, probabilities=probabilities) + percentile = 95 + # drv_var.plot_pmf() + var = drv_var.compute_var(percentile=percentile) + print(f"The Value at Risk (VaR) at the {percentile}% confidence level is: {var}") + + vars = DiscreteRandomVariable.compute_var_vectorized(ltv) + print(f" LTV VaRs: {vars}") + print(f"Works as expected? {vars[0][0] == ltv[0][0].compute_var()}") # Dummy test + expected_var = 50 + expected_es = 50 + # VaR & ES + es = drv_var.compute_es(percentile=percentile) + print(f"Percentile = {percentile}, VaR: {var}, ES: {es}") + assert np.allclose(var, expected_var), "VaR calculation failed" + assert np.allclose(es, expected_es), "ES calculation failed" + + # CDF & EP + + values = [0.1, 0.3, 0.5, 0.7, 0.9] + probabilities = [0.1, 0.3, 0.3, 0.2, 0.1] + discrete_rand_var = DiscreteRandomVariable( + values=values, probabilities=probabilities + ) + + _ = discrete_rand_var.compute_cdf() + + check_values = np.linspace(min(values), max(values), 20) + results = [] + for _ in check_values: + exceedance_probability = discrete_rand_var.compute_exceedance_probability() + cdf = discrete_rand_var.compute_cdf() + sum_check = exceedance_probability + cdf + results.append(sum_check) + + print(f"Check EP & CDF: {np.allclose(results, 1)}") + + # O(s) + + values = [0.1, 0.3, 0.5, 0.7, 0.9] + probabilities = [0.1, 0.3, 0.3, 0.2, 0.1] + lambda_value = 0.5 # Example rate parameter for the Poisson process + discrete_rand_var = DiscreteRandomVariable( + values=values, probabilities=probabilities + ) + _ = discrete_rand_var.compute_occurrence_probability(lambda_value) diff --git a/tests/test_skeleton.py b/tests/test_skeleton.py deleted file mode 100644 index 06da5fc..0000000 --- a/tests/test_skeleton.py +++ /dev/null @@ -1,25 +0,0 @@ -import pytest - -from osc_physrisk_financial.skeleton import fib, main - -__author__ = "github-actions[bot]" -__copyright__ = "github-actions[bot]" -__license__ = "Apache-2.0" - - -def test_fib(): - """API Tests""" - assert fib(1) == 1 - assert fib(2) == 1 - assert fib(7) == 13 - with pytest.raises(AssertionError): - fib(-10) - - -def test_main(capsys): - """CLI Tests""" - # capsys is a pytest fixture that allows asserts against stdout/stderr - # https://docs.pytest.org/en/stable/capture.html - main(["7"]) - captured = capsys.readouterr() - assert "The 7-th Fibonacci number is 13" in captured.out