diff --git a/.github/workflows/all-os-tests.yml b/.github/workflows/all-os-tests.yml
new file mode 100644
index 00000000..7c44adc0
--- /dev/null
+++ b/.github/workflows/all-os-tests.yml
@@ -0,0 +1,34 @@
+# This workflow will install OS dependencies and run a 'base' set of unit tests with Python 3.9
+# For more information see: https://docs.github.com/en/actions/automating-builds-and-tests/building-and-testing-python
+
+name: Unit tests on macOS/Linux/Windows
+
+on:
+ push:
+ pull_request:
+ branches: [ "dev", "main" ]
+
+jobs:
+ build:
+ runs-on: ${{ matrix.os }}
+ strategy:
+ fail-fast: false
+ matrix:
+ python-version: ["3.9"]
+ os: ["macos-latest", "ubuntu-latest", "windows-latest"]
+ steps:
+ - uses: actions/checkout@v3
+ - name: Set up Python ${{ matrix.python-version }}
+ uses: actions/setup-python@v3
+ with:
+ python-version: ${{ matrix.python-version }}
+ - name: Install dependencies
+ run: |
+ python -m pip install --upgrade pip
+ pip install -r requirements.txt
+ - name: Run Integration Tests # run only those tests marked runinteg & with no osmosis deps
+ run: |
+ pytest -m runinteg --runinteg --deselect tests/osm/
+ - name: Test with pytest # run only tests with no osmosis deps
+ run: |
+ pytest --deselect tests/osm/
diff --git a/.github/workflows/python-package.yml b/.github/workflows/python-package.yml
index 047d1369..9955734d 100644
--- a/.github/workflows/python-package.yml
+++ b/.github/workflows/python-package.yml
@@ -1,4 +1,4 @@
-# This workflow will install Python dependencies, run tests and lint with a variety of Python versions
+# This workflow will install Python dependencies, run tests and lint with Python 3.9
# For more information see: https://docs.github.com/en/actions/automating-builds-and-tests/building-and-testing-python
name: Python package
@@ -10,7 +10,6 @@ on:
jobs:
build:
-
runs-on: ${{ matrix.os }}
strategy:
fail-fast: false
@@ -23,11 +22,6 @@ jobs:
uses: actions/setup-python@v3
with:
python-version: ${{ matrix.python-version }}
- - name: Install geos # required for cartopy installation
- uses: ConorMacBride/install-package@v1
- with:
- brew: geos
- apt: libgeos-dev
- name: Install dependencies
run: |
python -m pip install --upgrade pip
@@ -40,11 +34,19 @@ jobs:
- name: Check Java Install
run: |
java --version
- - name: Install Osmosis
- uses: ConorMacBride/install-package@v1
- with:
- brew: osmosis
- apt: osmosis
+ - name: Install mac depencies with brew
+ if: runner.os == 'macOS' # not updating brew version, issue with aom
+ run: |
+ brew install geos
+ brew install osmosis
+ shell: sh
+ - name: Install linux depencies with apt
+ if: runner.os == 'Linux'
+ run: |
+ sudo apt update
+ sudo apt install -y libgeos-dev
+ sudo apt install -y osmosis
+ shell: sh
- name: Run Integration Tests
run: |
pytest -m runinteg --runinteg # run only those tests marked runinteg
@@ -56,10 +58,12 @@ jobs:
run: |
pytest
- name: Generate Report
+ if: runner.os == 'macOS' # run coverage report only on macOS
run: |
coverage run -m pytest
coverage xml
- name: Upload coverage reports to Codecov
+ if: runner.os == 'macOS' # run coverage report only on macOS
uses: codecov/codecov-action@v3
with:
file: ./coverage.xml
diff --git a/.github/workflows/sphinx-render.yml b/.github/workflows/sphinx-render.yml
new file mode 100644
index 00000000..94cd86e8
--- /dev/null
+++ b/.github/workflows/sphinx-render.yml
@@ -0,0 +1,34 @@
+name: "Render docs"
+
+on: push
+
+env:
+ PYTHON_VERSION: "3.9"
+ PUSH_BRANCH: "refs/heads/dev"
+
+jobs:
+ build:
+ runs-on: ubuntu-latest
+ permissions:
+ contents: write
+ steps:
+ - uses: actions/checkout@v3
+ - name: Set up Python ${{ env.PYTHON_VERSION }}
+ uses: actions/setup-python@v3
+ with:
+ python-version: ${{ env.PYTHON_VERSION }}
+ - name: Install dependencies
+ run: |
+ python -m pip install --upgrade pip
+ if [ -f requirements.txt ]; then pip install -r requirements.txt; fi
+ - name: Sphinx build # use -W to turn warnings into errors
+ run: |
+ make -C docs/ html SPHINXOPTS="-W"
+ - name: Deploy
+ uses: peaceiris/actions-gh-pages@v3
+ # deploy if it's being pushed only to this branch
+ if: ${{ github.ref == env.PUSH_BRANCH }}
+ with:
+ github_token: ${{ secrets.GITHUB_TOKEN }}
+ publish_dir: docs/build/html
+ commit_message: ${{ github.event.head_commit.message }}
diff --git a/.gitignore b/.gitignore
index 5a5f920d..2cc64822 100644
--- a/.gitignore
+++ b/.gitignore
@@ -5,10 +5,18 @@
# moved blanket rules above specific exceptions for test fixtures
*.zip
*.pkl
-# except test fixtures
+*.html
+
+# exception for test fixtures
!tests/data/newport-2023-06-13.osm.pbf
!tests/data/newport-20230613_gtfs.zip
!tests/data/gtfs/route_lookup.pkl
+!tests/data/gtfs/report/html_template.html
+
+# exception for html templates
+!src/transport_performance/gtfs/report/html_templates/evaluation_template.html
+!src/transport_performance/gtfs/report/html_templates/stops_template.html
+!src/transport_performance/gtfs/report/html_templates/summary_template.html
### Project structure ###
data/*
@@ -35,7 +43,6 @@ outputs/*
*.Rproj
-*.html
*.pdf
*.csv
*.rds
@@ -167,7 +174,12 @@ instance/
.scrapy
# Sphinx documentation
-docs/_build/
+docs/source/reference/_autosummary/
+docs/build/
+!docs/README.md
+!docs/gtfs
+!docs/source/_static/dsc.png
+!docs/source/_templates/footer.html
# PyBuilder
.pybuilder/
@@ -306,11 +318,6 @@ vignettes/*.pdf
# R Environment Variables
.Renviron
-# pkgdown site
-docs/*
-!docs/README.md
-!docs/gtfs
-
# translation temp files
po/*~
diff --git a/README.md b/README.md
index 673c4e24..419bfe23 100644
--- a/README.md
+++ b/README.md
@@ -1,5 +1,5 @@
-
+
diff --git a/conftest.py b/conftest.py
index 3e7bdd1e..6f644831 100644
--- a/conftest.py
+++ b/conftest.py
@@ -41,28 +41,37 @@ def pytest_configure(config):
)
-def pytest_collection_modifyitems(config, items): # noqa C901
+def pytest_collection_modifyitems(config, items): # noqa:C901
"""Handle switching based on cli args."""
- if config.getoption("--runsetup"):
- # --runsetup given in cli: do not skip slow tests
+ if (
+ config.getoption("--runsetup")
+ & config.getoption("--runinteg")
+ & config.getoption("--runexpensive")
+ ):
+ # do full test suite when all flags are given
return
- skip_setup = pytest.mark.skip(reason="need --runsetup option to run")
- for item in items:
- if "setup" in item.keywords:
- item.add_marker(skip_setup)
- if config.getoption("--runinteg"):
- return
- skip_runinteg = pytest.mark.skip(reason="need --runinteg option to run")
- for item in items:
- if "runinteg" in item.keywords:
- item.add_marker(skip_runinteg)
+ # do not add setup marks when the runsetup flag is given
+ if not config.getoption("--runsetup"):
+ skip_setup = pytest.mark.skip(reason="need --runsetup option to run")
+ for item in items:
+ if "setup" in item.keywords:
+ item.add_marker(skip_setup)
- if config.getoption("--runexpensive"):
- return
- skip_runexpensive = pytest.mark.skip(
- reason="need --runexpensive option to run"
- )
- for item in items:
- if "runexpensive" in item.keywords:
- item.add_marker(skip_runexpensive)
+ # do not add integ marks when the runinteg flag is given
+ if not config.getoption("--runinteg"):
+ skip_runinteg = pytest.mark.skip(
+ reason="need --runinteg option to run"
+ )
+ for item in items:
+ if "runinteg" in item.keywords:
+ item.add_marker(skip_runinteg)
+
+ # do not add expensive marks when the runexpensive flag is given
+ if not config.getoption("--runexpensive"):
+ skip_runexpensive = pytest.mark.skip(
+ reason="need --runexpensive option to run"
+ )
+ for item in items:
+ if "runexpensive" in item.keywords:
+ item.add_marker(skip_runexpensive)
diff --git a/docs/Makefile b/docs/Makefile
new file mode 100644
index 00000000..d0c3cbf1
--- /dev/null
+++ b/docs/Makefile
@@ -0,0 +1,20 @@
+# Minimal makefile for Sphinx documentation
+#
+
+# You can set these variables from the command line, and also
+# from the environment for the first two.
+SPHINXOPTS ?=
+SPHINXBUILD ?= sphinx-build
+SOURCEDIR = source
+BUILDDIR = build
+
+# Put it first so that "make" without argument is like "make help".
+help:
+ @$(SPHINXBUILD) -M help "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O)
+
+.PHONY: help Makefile
+
+# Catch-all target: route all unknown targets to Sphinx using the new
+# "make mode" option. $(O) is meant as a shortcut for $(SPHINXOPTS).
+%: Makefile
+ @$(SPHINXBUILD) -M $@ "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O)
diff --git a/docs/README.md b/docs/README.md
index deecd1ec..3f78bbdd 100644
--- a/docs/README.md
+++ b/docs/README.md
@@ -1,3 +1,88 @@
# `docs` folder overview
-All documentation for the project should be included in this folder.
+This folder contains all the source files needed to build package documentation
+using [`sphinx`](https://www.sphinx-doc.org/en/master/).
+
+## Building the documentation locally
+
+This is useful whilst editing the documentation locally and previewing new
+additions/edits. Following the steps below will render the documenation locally
+allowing you to check for any warnings or errors during the build stage.
+
+1. Ensure the dependencies in `requirements.txt` have been installed. This will
+install `sphinx`, the necessary themes, and all the other Python dependecies
+for this package.
+
+2. Call the following from the project root:
+
+ ```bash
+ make -C docs/ html
+ ```
+
+ Or, from the within this docs directory:
+
+ ```bash
+ make html
+ ```
+
+ > Note: On Windows, if you are using PowerShell the make command may not
+ work. If this is the case, you should be able to run `.\make.bat html`
+ after navigating to this directory.
+
+ Calling one of the commands above will trigger `sphinx-build` and render
+ the documentaion in HTML format within the `build` directory.
+
+3. Inside `docs/build/html/`, opening/refreshing `index.html` in a browser will
+display the documentation landing page.
+
+## Cleaning the docs folder
+
+From time to time, it maybe necessary to clean the build folder (e.g., to
+unpick some edits that have not made their way through to the browser for some
+reason).
+
+> Note: `sphinx-build` will only rebuild pages if the respective source file(s)
+has changed. Calling clean maybe helpful to either force an entire rebuild of
+all pages, or include an update that isn't picked up via a source (e.g. a CSS
+file update).
+
+To clean the build folder, call the following:
+
+```bash
+# from the project root
+make -C docs/ clean
+
+# or, from within the docs folder
+make clean
+```
+
+It's also possible to combine both the clean and HTML build commands together
+as follows:
+
+```bash
+# from the project root
+make -C docs/ clean html
+
+# or, from within the docs folder
+make clean html
+```
+
+> Note: the contents of the `docs/build` folder are ignored by Git. Cleaning
+the build folder locally will therefore only impact your local documentation
+build.
+
+## Building the documentation 'on push' to a remote branch
+
+There is a GitHub action set-up (`.github/workflows/sphinx-render.yml`) that
+runs on all pushes to any branch. This will attempt to build the `docs/source`
+folder content and will fail if `sphinx-build` throws any errors or warnings.
+This helps ensure the quality of the documentation on each push and allows
+developers to correct any issues sooner.
+
+The deployment stage of this GitHub action is only done when pushing to the
+`dev` branch (i.e. after merging in a PR). Therefore, any changes made to
+`docs` in a feature branch will not appear in the deployed documentation.
+
+> Note: the current implementation of the GitHub action deploys on push to
+`dev` but this is subject to change at a later date. It will likely be change
+to puses to `main` once an inital release of this package is available.
diff --git a/docs/make.bat b/docs/make.bat
new file mode 100644
index 00000000..747ffb7b
--- /dev/null
+++ b/docs/make.bat
@@ -0,0 +1,35 @@
+@ECHO OFF
+
+pushd %~dp0
+
+REM Command file for Sphinx documentation
+
+if "%SPHINXBUILD%" == "" (
+ set SPHINXBUILD=sphinx-build
+)
+set SOURCEDIR=source
+set BUILDDIR=build
+
+%SPHINXBUILD% >NUL 2>NUL
+if errorlevel 9009 (
+ echo.
+ echo.The 'sphinx-build' command was not found. Make sure you have Sphinx
+ echo.installed, then set the SPHINXBUILD environment variable to point
+ echo.to the full path of the 'sphinx-build' executable. Alternatively you
+ echo.may add the Sphinx directory to PATH.
+ echo.
+ echo.If you don't have Sphinx installed, grab it from
+ echo.https://www.sphinx-doc.org/
+ exit /b 1
+)
+
+if "%1" == "" goto help
+
+%SPHINXBUILD% -M %1 %SOURCEDIR% %BUILDDIR% %SPHINXOPTS% %O%
+goto end
+
+:help
+%SPHINXBUILD% -M help %SOURCEDIR% %BUILDDIR% %SPHINXOPTS% %O%
+
+:end
+popd
diff --git a/docs/source/_static/custom.css b/docs/source/_static/custom.css
new file mode 100644
index 00000000..4db02d8e
--- /dev/null
+++ b/docs/source/_static/custom.css
@@ -0,0 +1,6 @@
+/* Provision for custom css */
+
+/* wraps text in toctree to fit into sidebar width */
+li[class^="toctree"] {
+ word-break: break-all;
+}
diff --git a/docs/source/_static/dsc.png b/docs/source/_static/dsc.png
new file mode 100644
index 00000000..ee31b7aa
Binary files /dev/null and b/docs/source/_static/dsc.png differ
diff --git a/docs/source/_static/favicon.ico b/docs/source/_static/favicon.ico
new file mode 100644
index 00000000..6af29abc
Binary files /dev/null and b/docs/source/_static/favicon.ico differ
diff --git a/docs/source/_templates/autosummary/base.rst b/docs/source/_templates/autosummary/base.rst
new file mode 100644
index 00000000..a5042f66
--- /dev/null
+++ b/docs/source/_templates/autosummary/base.rst
@@ -0,0 +1,8 @@
+..
+ base.rst
+
+{{ objname | escape | underline }}
+
+.. currentmodule:: {{ module }}
+
+.. auto{{ objtype }}:: {{ objname }}
diff --git a/docs/source/_templates/autosummary/class.rst b/docs/source/_templates/autosummary/class.rst
new file mode 100644
index 00000000..352f05e7
--- /dev/null
+++ b/docs/source/_templates/autosummary/class.rst
@@ -0,0 +1,23 @@
+..
+ class.rst
+
+{{ objname | escape | underline }}
+
+.. currentmodule:: {{ module }}
+
+.. autoclass:: {{ objname }}
+ :members:
+ :show-inheritance:
+ :inherited-members:
+
+ {% block methods %}
+ {% if methods %}
+ .. rubric:: {{ _('Methods') }}
+
+ .. autosummary::
+ :nosignatures:
+ {% for item in methods %}
+ ~{{ name }}.{{ item }}
+ {%- endfor %}
+ {% endif %}
+ {% endblock %}
diff --git a/docs/source/_templates/autosummary/module.rst b/docs/source/_templates/autosummary/module.rst
new file mode 100644
index 00000000..818f2e9f
--- /dev/null
+++ b/docs/source/_templates/autosummary/module.rst
@@ -0,0 +1,74 @@
+..
+ module.rst
+
+{{ fullname | escape | underline }}
+
+.. automodule:: {{ fullname }}
+
+ {% block attributes %}
+ {% if attributes %}
+ .. rubric:: Module Attributes
+
+ .. autosummary::
+ :toctree:
+ {% for item in attributes %}
+ {{ item }}
+ {%- endfor %}
+ {% endif %}
+ {% endblock %}
+
+ {% block functions %}
+ {% if functions %}
+ .. rubric:: {{ _('Functions') }}
+
+ .. autosummary::
+ :toctree:
+ :nosignatures:
+ :template: autosummary/base.rst
+ {% for item in functions %}
+ {{ item }}
+ {%- endfor %}
+ {% endif %}
+ {% endblock %}
+
+ {% block classes %}
+ {% if classes %}
+ .. rubric:: {{ _('Classes') }}
+
+ .. autosummary::
+ :toctree:
+ :nosignatures:
+ :template: autosummary/class.rst
+ {% for item in classes %}
+ {{ item }}
+ {%- endfor %}
+ {% endif %}
+ {% endblock %}
+
+ {% block exceptions %}
+ {% if exceptions %}
+ .. rubric:: {{ _('Exceptions') }}
+
+ .. autosummary::
+ :toctree:
+ :nosignatures:
+ :template: autosummary/base.rst
+ {% for item in exceptions %}
+ {{ item }}
+ {%- endfor %}
+ {% endif %}
+ {% endblock %}
+
+{% block modules %}
+ {% if modules %}
+ .. rubric:: Modules
+
+ .. autosummary::
+ :toctree:
+ :template: autosummary/module.rst
+ :recursive:
+ {% for item in modules %}
+ {{ item }}
+ {%- endfor %}
+ {% endif %}
+{% endblock %}
diff --git a/docs/source/_templates/footer.html b/docs/source/_templates/footer.html
new file mode 100644
index 00000000..e582f0a6
--- /dev/null
+++ b/docs/source/_templates/footer.html
@@ -0,0 +1,27 @@
+{% extends "!footer.html" %}
+
+{%- block extrafooter %}
+
+
"""
+
+ # Add additional information for repeated pairs
+ # to the HTML report
+ try:
+ for counter, var in enumerate(duplicate_counts):
+ if counter == 0:
+ table_html = (
+ table_html
+ + """
"
+ except NameError:
+ pass
+
+ table_html = table_html + build_table(
+ impacted_rows, scheme, padding="10px", escape=False
+ )
+
+ table_html = table_html + ""
+
+ # save the output
+ save_name = f"{'_'.join(message.split(' '))}_{table}"
+ with open(f"{output_path}/gtfs_report/{save_name}.html", "w") as f:
+ f.write(table_html)
+
+ return None
+
+ def html_report(
+ self,
+ report_dir: Union[str, pathlib.Path] = "outputs",
+ overwrite: bool = False,
+ summary_type: str = "mean",
+ extended_validation: bool = True,
+ ) -> None:
+ """Generate a HTML report describing the GTFS data.
+
+ Parameters
+ ----------
+ report_dir : Union[str, pathlib.Path], optional
+ The directory to save the report to,
+ by default "outputs"
+ overwrite : bool, optional
+ Whether or not to overwrite the existing report
+ if it already exists in the report_dir,
+ by default False
+ summary_type : str, optional
+ The type of summary to show on the
+ summaries on the gtfs report.,
+ by default "mean"
+ extended_validation : bool, optional
+ Whether or not to create extended reports
+ for gtfs validation errors/warnings.
+
+ Returns
+ -------
+ None
+
+ Raises
+ ------
+ ValueError
+ An error raised if the type of summary passed is invalid
+
+ """
+ _type_defence(overwrite, "overwrite", bool)
+ _type_defence(summary_type, "summary_type", str)
+ _set_up_report_dir(path=report_dir, overwrite=overwrite)
+ summary_type = summary_type.lower()
+ if summary_type not in ["mean", "min", "max", "median"]:
+ raise ValueError("'summary type' must be mean, median, min or max")
+
+ # store todays date
+ date = datetime.datetime.strftime(datetime.datetime.now(), "%d-%m-%Y")
+
+ # feed evaluation
+ self.clean_feed()
+ validation_dataframe = self.is_valid()
+
+ # create extended reports if requested
+ if extended_validation:
+ self._extended_validation(output_path=report_dir)
+ info_href = (
+ validation_dataframe["message"].apply(
+ lambda x: "_".join(x.split(" "))
+ )
+ + "_"
+ + validation_dataframe["table"]
+ + ".html"
+ )
+ validation_dataframe["info"] = [
+ f""" Further Info"""
+ if len(rows) > 1
+ else "Unavailable"
+ for href, rows in zip(info_href, validation_dataframe["rows"])
+ ]
+
+ eval_temp = TemplateHTML(
+ path=(
+ "src/transport_performance/gtfs/report/"
+ "html_templates/evaluation_template.html"
+ )
+ )
+ eval_temp._insert(
+ "eval_placeholder_1",
+ build_table(
+ validation_dataframe,
+ "green_dark",
+ padding="10px",
+ escape=False,
+ ),
+ )
+ eval_temp._insert("eval_title_1", "GTFS Feed Warnings and Errors")
+
+ eval_temp._insert(
+ "eval_placeholder_2",
+ build_table(self.feed.agency, "green_dark", padding="10px"),
+ )
+ eval_temp._insert("eval_title_2", "GTFS Agency Information")
+
+ eval_temp._insert(
+ "name_placeholder", self.feed.feed_info["feed_publisher_name"][0]
+ )
+ eval_temp._insert(
+ "url_placeholder",
+ self.feed.feed_info["feed_publisher_url"][0],
+ replace_multiple=True,
+ )
+ eval_temp._insert(
+ "lang_placeholder", self.feed.feed_info["feed_lang"][0]
+ )
+ eval_temp._insert(
+ "start_placeholder", self.feed.feed_info["feed_start_date"][0]
+ )
+ eval_temp._insert(
+ "end_placeholder", self.feed.feed_info["feed_end_date"][0]
+ )
+ eval_temp._insert(
+ "version_placeholder", self.feed.feed_info["feed_version"][0]
+ )
+
+ count_lookup = dict(self.feed.describe().to_numpy())
+ eval_temp._insert(
+ "agency_placeholder", str(len(count_lookup["agencies"]))
+ )
+ eval_temp._insert(
+ "routes_placeholder", str(count_lookup["num_routes"])
+ )
+ eval_temp._insert("trips_placeholder", str(count_lookup["num_trips"]))
+ eval_temp._insert("stops_placeholder", str(count_lookup["num_stops"]))
+ eval_temp._insert(
+ "shapes_placeholder", str(count_lookup["num_shapes"])
+ )
+
+ self.get_gtfs_files()
+ file_list_html = ""
+ for num, file in enumerate(self.file_list, start=1):
+ file_list_html = (
+ file_list_html
+ + f"""
+
+
{num}.
+
{file}
+
"""
+ )
+
+ eval_temp._insert("eval_placeholder_3", file_list_html)
+ eval_temp._insert("eval_title_3", "GTFS Files Included")
+
+ eval_temp._insert("date", date)
+
+ with open(
+ f"{report_dir}/gtfs_report/index.html", "w", encoding="utf8"
+ ) as eval_f:
+ eval_f.writelines(eval_temp._get_template())
+
+ # stops
+ self.viz_stops(
+ out_pth=(
+ pathlib.Path(f"{report_dir}/gtfs_report/stop_locations.html")
+ )
+ )
+ self.viz_stops(
+ out_pth=pathlib.Path(f"{report_dir}/gtfs_report/convex_hull.html"),
+ geoms="hull",
+ geom_crs=27700,
+ )
+ stops_temp = TemplateHTML(
+ (
+ "src/transport_performance/gtfs/report/"
+ "html_templates/stops_template.html"
+ )
+ )
+ stops_temp._insert("stops_placeholder_1", "stop_locations.html")
+ stops_temp._insert("stops_placeholder_2", "convex_hull.html")
+ stops_temp._insert("stops_title_1", "Stops from GTFS data")
+ stops_temp._insert(
+ "stops_title_2", "Convex Hull Generated from GTFS Data"
+ )
+ stops_temp._insert("date", date)
+ with open(
+ f"{report_dir}/gtfs_report/stops.html", "w", encoding="utf8"
+ ) as stops_f:
+ stops_f.writelines(stops_temp._get_template())
+
+ # summaries
+ self.summarise_routes()
+ self.summarise_trips()
+ route_html = self._plot_summary(
+ which="route",
+ target_column=summary_type,
+ return_html=True,
+ width=1200,
+ height=800,
+ ylabel="Route Count",
+ xlabel="Day",
+ )
+ trip_html = self._plot_summary(
+ which="trip",
+ target_column=summary_type,
+ return_html=True,
+ width=1200,
+ height=800,
+ ylabel="Trip Count",
+ xlabel="Day",
+ )
+
+ summ_temp = TemplateHTML(
+ path=(
+ "src/transport_performance/gtfs/report/"
+ "html_templates/summary_template.html"
+ )
+ )
+ summ_temp._insert("plotly_placeholder_1", route_html)
+ summ_temp._insert(
+ "plotly_title_1",
+ f"Route Summary by Day and Route Type ({summary_type})",
+ )
+ summ_temp._insert("plotly_placeholder_2", trip_html)
+ summ_temp._insert(
+ "plotly_title_2",
+ f"Trip Summary by Day and Route Type ({summary_type})",
+ )
+ summ_temp._insert("date", date)
+ with open(
+ f"{report_dir}/gtfs_report/summaries.html", "w", encoding="utf8"
+ ) as summ_f:
+ summ_f.writelines(summ_temp._get_template())
+
+ print(
+ f"GTFS Report Created at {report_dir}\n"
+ f"View your report here: {report_dir}/gtfs_report"
+ )
+
+ return None
diff --git a/src/transport_performance/osm/osm_utils.py b/src/transport_performance/osm/osm_utils.py
index e4501416..d086eb6a 100644
--- a/src/transport_performance/osm/osm_utils.py
+++ b/src/transport_performance/osm/osm_utils.py
@@ -3,7 +3,7 @@
from pyprojroot import here
from transport_performance.utils.defence import (
- _bool_defence,
+ _type_defence,
_check_list,
_check_parent_dir_exists,
_is_expected_filetype,
@@ -54,7 +54,7 @@ def filter_osm(
"tag_filter": tag_filter,
"install_osmosis": install_osmosis,
}.items():
- _bool_defence(val, param_nm=nm)
+ _type_defence(val, nm, bool)
# check bbox values makes sense, else osmosis will error
if not bbox[0] < bbox[2]:
raise ValueError(
diff --git a/src/transport_performance/population/rasterpop.py b/src/transport_performance/population/rasterpop.py
index be7de416..934793a2 100644
--- a/src/transport_performance/population/rasterpop.py
+++ b/src/transport_performance/population/rasterpop.py
@@ -34,7 +34,7 @@ class RasterPop:
pop_gdf : gpd.GeoDataFrame
A geopandas dataframe of the input data, with gridded geometry. This is
in the same CRS as the input raster data.
- centroid_gdf
+ centroid_gdf : gpd.GeoDataFrame
A geopandas dataframe of grid centroids, converted to EPSG:4326 for
transport analysis.
@@ -153,7 +153,7 @@ def plot(
save : str, optional
Filepath to save file, with the file extension, by default None
meaning a file will not be saved.
- **kwargs
+ kwargs : dict, optional
Extra arguments passed to plotting functions to configure the plot
styling. See Notes for more support.
@@ -177,9 +177,10 @@ def plot(
-----
Calling `help` as follows will provide more insights on possible kwarg
arguments for the valid plotting backends:
- - Folium backend: `help(RasterPop._plot_folium)
- - Matplotlib backend: `help(RasterPop._plot_matplotlib)
- - Cartopy backend: `help(RasterPop._plot_cartopy)
+
+ - Folium backend: `help(RasterPop._plot_folium)`
+ - Matplotlib backend: `help(RasterPop._plot_matplotlib)`
+ - Cartopy backend: `help(RasterPop._plot_cartopy)`
"""
# record of valid which values
@@ -463,18 +464,30 @@ def _plot_folium(
)
# add the centroids to a separate layer
- self.centroid_gdf.explore(
- self.__UC_COL_NAME,
- name="Centroids",
- m=m,
- show=False,
- style_kwds={
+ # conditionally style plot based on whether UC is provided
+ if self._uc_gdf is not None:
+ centroid_plot_col = self.__UC_COL_NAME
+ # this dict will change the centriod color in/out the UC.
+ centroid_style_dict = {
"style_function": lambda x: {
"color": "#BC544B"
if x["properties"][self.__UC_COL_NAME] is False
else "#8B0000"
}
- },
+ }
+ else:
+ centroid_plot_col = None
+ centroid_style_dict = {
+ "style_function": lambda x: {"color": "#BC544B"}
+ }
+
+ # add in the centroid layer with the conditional styling
+ self.centroid_gdf.explore(
+ centroid_plot_col,
+ name="Centroids",
+ m=m,
+ show=False,
+ style_kwds=centroid_style_dict,
legend=False,
)
diff --git a/src/transport_performance/urban_centres/raster_uc.py b/src/transport_performance/urban_centres/raster_uc.py
index 8c028072..d6fd4067 100644
--- a/src/transport_performance/urban_centres/raster_uc.py
+++ b/src/transport_performance/urban_centres/raster_uc.py
@@ -14,7 +14,7 @@
from rasterio.mask import raster_geometry_mask
from rasterio.transform import rowcol
from scipy.ndimage import generic_filter, label
-from transport_performance.utils.defence import _is_path_like
+from transport_performance.utils.defence import _handle_path_like
class UrbanCentre:
@@ -23,7 +23,7 @@ class UrbanCentre:
def __init__(self, file):
# check that path is str or PosixPath
- _is_path_like(file, "filepath")
+ file = _handle_path_like(file, "file")
self.file = file
def get_urban_centre(
diff --git a/src/transport_performance/utils/defence.py b/src/transport_performance/utils/defence.py
index 5d60d06f..69df5670 100644
--- a/src/transport_performance/utils/defence.py
+++ b/src/transport_performance/utils/defence.py
@@ -3,13 +3,14 @@
import numpy as np
import os
from typing import Union
+import pandas as pd
-def _is_path_like(pth, param_nm):
+def _handle_path_like(pth, param_nm):
"""Handle path-like parameter values.
- It is important to note that paths including backslashes are not accepted,
- with forward slashes being the only option.
+ Checks a path for symlinks and relative paths. Converts to realpath &
+ outputs pathlib.Path object (platform agnostic).
Parameters
----------
@@ -21,23 +22,25 @@ def _is_path_like(pth, param_nm):
Raises
------
- TypeError: `pth` is not either of string or pathlib.PosixPath.
+ TypeError: `pth` is not either of string or pathlib.Path.
Returns
-------
- None
+ pathlib.Path
+ Platform agnostic representation of pth.
"""
if not isinstance(pth, (str, pathlib.Path)):
raise TypeError(f"`{param_nm}` expected path-like, found {type(pth)}.")
+ # ensure returned path is not relative or contains symbolic links
+ pth = os.path.realpath(pth)
+
if not isinstance(pth, pathlib.Path):
- if "\\" in repr(pth):
- raise ValueError(
- "Please specify string paths with single forward"
- " slashes only."
- f" Got {repr(pth)}"
- )
+ # coerce to Path even if user passes string
+ pth = pathlib.Path(pth)
+
+ return pth
def _check_parent_dir_exists(
@@ -73,11 +76,7 @@ def _check_parent_dir_exists(
the create parameter is False.
"""
- _is_path_like(pth, param_nm)
- # convert path to the correct OS specific format
- pth = pathlib.Path(pth)
- # realpath helps to catch cases where relative paths are passed in main
- pth = os.path.realpath(pth)
+ pth = _handle_path_like(pth, param_nm)
parent = os.path.dirname(pth)
if not os.path.exists(parent):
if create:
@@ -116,7 +115,7 @@ def _is_expected_filetype(pth, param_nm, check_existing=True, exp_ext=".zip"):
None
"""
- _is_path_like(pth=pth, param_nm=param_nm)
+ pth = _handle_path_like(pth=pth, param_nm=param_nm)
_, ext = os.path.splitext(pth)
if check_existing and not os.path.exists(pth):
@@ -151,19 +150,35 @@ def _check_namespace_export(pkg=np, func=np.min):
def _url_defence(url):
"""Defence checking. Not exported."""
- if not isinstance(url, str):
- raise TypeError(f"url {url} expected string, instead got {type(url)}")
- elif not url.startswith((r"http://", r"https://")):
+ _type_defence(url, "url", str)
+ if not url.startswith((r"http://", r"https://")):
raise ValueError(f"url string expected protocol, instead found {url}")
return None
-def _bool_defence(some_bool, param_nm):
- """Defence checking. Not exported."""
- if not isinstance(some_bool, bool):
+def _type_defence(some_object, param_nm, types) -> None:
+ """Defence checking utility. Can handle NoneType.
+
+ Parameters
+ ----------
+ some_object : Any
+ Object to test with isinstance.
+ param_nm : str
+ A name for the parameter. Useful when this utility is used in a wrapper
+ to inherit the parent's parameter name and present in error message.
+ types : type or tuple
+ A type or a tuple of types to test `some_object` against.
+
+ Raises
+ ------
+ TypeError
+ `some_object` is not of type `types`.
+
+ """
+ if not isinstance(some_object, types):
raise TypeError(
- f"`{param_nm}` expected boolean. Got {type(some_bool)}"
+ f"`{param_nm}` expected {types}. Got {type(some_object)}"
)
return None
@@ -208,3 +223,88 @@ def _check_list(ls, param_nm, check_elements=True, exp_type=str):
)
return None
+
+
+def _check_column_in_df(df: pd.DataFrame, column_name: str) -> None:
+ """Defences to check that a column exists in a df.
+
+ Parameters
+ ----------
+ df : pd.DataFrame
+ A pandas dataframe to check if the specified column exists in.
+ column_name : str
+ The name of the column to check for
+
+ Returns
+ -------
+ None
+
+ Raises
+ ------
+ IndexError
+ Raises an error if the column (column_name) does not exist in the
+ dataframe
+
+ """
+ if column_name not in df.columns:
+ raise IndexError(f"'{column_name}' is not a column in the dataframe.")
+
+ return None
+
+
+def _check_item_in_list(item: str, _list: list, param_nm: str) -> None:
+ """Defence to check if an item is present in a list.
+
+ Parameters
+ ----------
+ item : str
+ THe item to check the list for
+ _list : list
+ The list to check that the item is in
+ param_nm : str
+ The name of the param that the item has been passed to
+
+ Returns
+ -------
+ None
+
+ Raises
+ ------
+ ValueError
+ Error raised when item not in the list.
+
+ """
+ if item not in _list:
+ raise ValueError(
+ f"'{param_nm}' expected one of the following:"
+ f"{_list} Got {item}"
+ )
+ return None
+
+
+def _check_attribute(obj, attr: str, message: str = None):
+ """Test to check if an attribute exists in an object.
+
+ Parameters
+ ----------
+ obj : any
+ The object to check that the attr exists in
+ attr : str
+ The attribute to check exists in an object
+ message : str, optional
+ The error message to display, by default None
+
+ Raises
+ ------
+ AttributeError
+ An error raised if the attr does not exist
+
+ """
+ err_msg = (
+ message
+ if message
+ else (f"{obj.__class__.__name__} has no attribute {attr}")
+ )
+
+ if attr not in obj.__dir__():
+ raise AttributeError(err_msg)
diff --git a/tests/data/gtfs/report/html_template.html b/tests/data/gtfs/report/html_template.html
new file mode 100644
index 00000000..8ba13060
--- /dev/null
+++ b/tests/data/gtfs/report/html_template.html
@@ -0,0 +1,6 @@
+
+
+
+
+
[test_placeholder] Tester [test_placeholder]
+
diff --git a/tests/gtfs/report/test_report_utils.py b/tests/gtfs/report/test_report_utils.py
new file mode 100644
index 00000000..4a3727df
--- /dev/null
+++ b/tests/gtfs/report/test_report_utils.py
@@ -0,0 +1,113 @@
+"""Test scripts for the GTFS report utility functions."""
+
+import os
+import pathlib
+import re
+
+import pytest
+from pyprojroot import here
+
+from transport_performance.gtfs.report.report_utils import (
+ TemplateHTML,
+ _set_up_report_dir,
+)
+
+
+@pytest.fixture(scope="function")
+def template_fixture():
+ """Fixture for test funcs expecting a valid feed object."""
+ template = TemplateHTML(
+ path=here("tests/data/gtfs/report/html_template.html")
+ )
+ return template
+
+
+class TestTemplateHTML(object):
+ """Tests related to the TemplateHTML class."""
+
+ def test_init(self, template_fixture):
+ """Test initialising the TemplateHTML class."""
+ expected_template = """
+
+
+
+
[test_placeholder] Tester [test_placeholder]
+
+"""
+ assert (
+ expected_template == template_fixture._get_template()
+ ), "Test template not as expected"
+
+ def test__insert_defence(self, template_fixture):
+ """Test defences for ._insert()."""
+ with pytest.raises(
+ ValueError,
+ match=(
+ "`replace_multiple` requires True as found \n"
+ "multiple placeholder matches in template."
+ ),
+ ):
+ template_fixture._insert("test_placeholder", "test_value")
+
+ def test__insert_on_pass(self, template_fixture):
+ """Test functionality for ._insert() when defences are passed."""
+ template_fixture._insert(
+ placeholder="test_placeholder",
+ value="test_value_insert_test",
+ replace_multiple=True,
+ )
+ assert (
+ "test_value_insert_test"
+ in template_fixture._get_template().replace(r"\n", "")
+ ), ("Test placeholder replacement not acting as expected")
+
+
+class TestSetUpReportDir(object):
+ """Test setting up a dir for a report."""
+
+ def test__set_up_report_dir_defence(self, tmp_path):
+ """Test the defences for _set_up_report_dir()."""
+ _set_up_report_dir(os.path.join(tmp_path))
+ with pytest.raises(
+ FileExistsError,
+ match=(
+ re.escape(
+ "Report already exists at path: "
+ f"[{tmp_path}]."
+ "Consider setting overwrite=True"
+ "if you'd like to overwrite this."
+ )
+ ),
+ ):
+ _set_up_report_dir(os.path.join(tmp_path), overwrite=False)
+
+ def test__set_up_report_dir_on_pass(self, tmp_path):
+ """Test _set_up_report_dir() when defences are passed."""
+ # create original report
+ _set_up_report_dir(
+ pathlib.Path(os.path.join(tmp_path)), overwrite=False
+ )
+ assert os.path.exists(
+ os.path.join(tmp_path, "gtfs_report")
+ ), "Failed to create report in temporary directory"
+ # attempt to overwrite the previous report
+ _set_up_report_dir(
+ pathlib.Path(os.path.join(tmp_path)), overwrite=True
+ )
+ assert os.path.exists(
+ os.path.join(tmp_path, "gtfs_report")
+ ), "Failed to create report in temporary directory"
+ # attempt to create report in different paths
+ _set_up_report_dir(os.path.join(tmp_path, "testing"))
+ assert os.path.exists(
+ os.path.join(tmp_path, "testing", "gtfs_report")
+ ), (
+ f"Failed to create report dir in {tmp_path}/testing/" "gtfs_report"
+ )
+ _set_up_report_dir(os.path.join(tmp_path, "testing", "testing"))
+ assert os.path.exists(
+ os.path.join(tmp_path, "testing", "testing", "gtfs_report")
+ ), (
+ f"Failed to create report dir in {tmp_path}/testing/testing/"
+ "gtfs_report"
+ )
diff --git a/tests/gtfs/test_gtfs_utils.py b/tests/gtfs/test_gtfs_utils.py
index fbd12b43..443a9a57 100644
--- a/tests/gtfs/test_gtfs_utils.py
+++ b/tests/gtfs/test_gtfs_utils.py
@@ -4,8 +4,15 @@
import os
import pytest
import pathlib
+import re
-from transport_performance.gtfs.gtfs_utils import bbox_filter_gtfs
+import pandas as pd
+from plotly.graph_objects import Figure as PlotlyFigure
+
+from transport_performance.gtfs.gtfs_utils import (
+ bbox_filter_gtfs,
+ convert_pandas_to_plotly,
+)
from transport_performance.gtfs.validation import GtfsInstance
@@ -40,3 +47,49 @@ def test_bbox_filter_gtfs_writes_as_expected(self, tmpdir):
assert isinstance(
feed, GtfsInstance
), f"Expected class `Gtfs_Instance but found: {type(feed)}`"
+
+
+@pytest.fixture(scope="function")
+def test_df():
+ """A test fixture."""
+ test_df = pd.DataFrame(
+ {
+ "ID": [1, 2, 3, 4, 1],
+ "score": [45, 34, 23, 12, 23],
+ "grade": ["A", "B", "C", "D", "C"],
+ }
+ )
+ return test_df
+
+
+class TestConvertPandasToPlotly(object):
+ """Test convert_pandas_to_plotly()."""
+
+ def test_convert_pandas_to_plotly_defences(self, test_df):
+ """Test convert_pandas_to_plotly defences."""
+ multi_index_df = test_df.groupby(["ID", "grade"]).agg(
+ {"score": ["mean", "min", "max"]}
+ )
+ with pytest.raises(
+ TypeError,
+ match="Pandas dataframe must have a singular index, not "
+ "MultiIndex. "
+ "This means that 'df.columns' or 'df.index' does not return a "
+ "MultiIndex.",
+ ):
+ convert_pandas_to_plotly(multi_index_df)
+
+ def test_convert_pandas_to_plotly_on_pass(self, test_df):
+ """Test convert_pandas_to_plotly() when defences pass."""
+ # return_html
+ html_return = convert_pandas_to_plotly(test_df, return_html=True)
+ assert isinstance(html_return, str), re.escape(
+ f"Expected type str but {type(html_return)} found"
+ )
+
+ # return plotly figure
+ fig_return = convert_pandas_to_plotly(test_df, return_html=False)
+ assert isinstance(fig_return, PlotlyFigure), re.escape(
+ "Expected type plotly.graph_objects.Figure but "
+ f"{type(fig_return)} found"
+ )
diff --git a/tests/gtfs/test_routes.py b/tests/gtfs/test_routes.py
index 38eaa23a..efabfc2f 100644
--- a/tests/gtfs/test_routes.py
+++ b/tests/gtfs/test_routes.py
@@ -47,12 +47,12 @@ def test_defensive_exceptions(self):
"""Test the defensive checks raise as expected."""
with pytest.raises(
TypeError,
- match=r"url 1 expected string, instead got ",
+ match=r"`url` expected . Got ",
):
scrape_route_type_lookup(gtfs_url=1)
with pytest.raises(
TypeError,
- match=r"url False expected string, instead got ",
+ match=r"`url` expected . Got ",
):
scrape_route_type_lookup(ext_spec_url=False)
with pytest.raises(
@@ -62,7 +62,7 @@ def test_defensive_exceptions(self):
scrape_route_type_lookup(gtfs_url="foobar")
with pytest.raises(
TypeError,
- match=r"`extended_schema` expected boolean. Got ",
+ match=r"`extended_schema` .* . Got ",
):
scrape_route_type_lookup(extended_schema="True")
diff --git a/tests/gtfs/test_validation.py b/tests/gtfs/test_validation.py
index 5ebd7ee5..40b9c3e8 100644
--- a/tests/gtfs/test_validation.py
+++ b/tests/gtfs/test_validation.py
@@ -1,19 +1,22 @@
"""Tests for validation module."""
+import re
+import os
+
import pytest
from pyprojroot import here
import gtfs_kit as gk
import pandas as pd
from unittest.mock import patch, call
-import os
from geopandas import GeoDataFrame
import numpy as np
-import re
import pathlib
+from plotly.graph_objects import Figure as PlotlyFigure
from transport_performance.gtfs.validation import (
GtfsInstance,
_get_intermediate_dates,
_create_map_title_text,
+ _convert_multi_index_to_single,
)
@@ -35,7 +38,10 @@ def test_init_defensive_behaviours(self):
):
GtfsInstance(gtfs_pth=1)
with pytest.raises(
- FileExistsError, match=r"doesnt/exist not found on file."
+ # match refactored to work on windows & mac
+ # see https://regex101.com/r/i1C4I4/1
+ FileExistsError,
+ match=r"doesnt(/|\\)exist not found on file.",
):
GtfsInstance(gtfs_pth="doesnt/exist")
# a case where file is found but not a zip directory
@@ -76,6 +82,24 @@ def test_init_on_pass(self):
gtfs2.feed.dist_units == "m"
), f"Expected 'm', found: {gtfs2.feed.dist_units}"
+ def test_get_gtfs_files(self, gtfs_fixture):
+ """Assert files that make up the GTFS."""
+ expected_files = [
+ "agency.txt",
+ "calendar_dates.txt",
+ "stop_times.txt",
+ "frequencies.txt",
+ "shapes.txt",
+ "trips.txt",
+ "feed_info.txt",
+ "stops.txt",
+ "calendar.txt",
+ "routes.txt",
+ ]
+ assert (
+ gtfs_fixture.get_gtfs_files() == expected_files
+ ), "GTFS files not as expected"
+
def test_is_valid(self, gtfs_fixture):
"""Assertions about validity_df table."""
gtfs_fixture.is_valid()
@@ -261,6 +285,26 @@ def test__get_intermediate_dates(self):
pd.Timestamp("2023-05-08"),
]
+ def test__convert_multi_index_to_single(self):
+ """Light testing got _convert_multi_index_to_single()."""
+ test_df = pd.DataFrame(
+ {"test": [1, 2, 3, 4], "id": ["E", "E", "C", "D"]}
+ )
+ test_df = test_df.groupby("id").agg({"test": ["min", "mean", "max"]})
+ expected_cols = pd.Index(
+ ["test_min", "test_mean", "test_max"], dtype="object"
+ )
+ output_cols = _convert_multi_index_to_single(df=test_df).columns
+ assert isinstance(
+ output_cols, pd.Index
+ ), "_convert_multi_index_to_single() not behaving as expected"
+ expected_cols = list(expected_cols)
+ output_cols = list(output_cols)
+ for col in output_cols:
+ assert col in expected_cols, f"{col} not an expected column"
+ expected_cols.remove(col)
+ assert len(expected_cols) == 0, "Not all expected cols in output cols"
+
def test__order_dataframe_by_day_defence(self, gtfs_fixture):
"""Test __order_dataframe_by_day defences."""
with pytest.raises(
@@ -454,15 +498,16 @@ def test_summarise_trips_on_pass(self, gtfs_fixture):
)
found_ds = gtfs_fixture.daily_trip_summary.columns
- exp_cols_ds = pd.MultiIndex.from_tuples(
+ exp_cols_ds = pd.Index(
[
- ("day", ""),
- ("route_type", ""),
- ("trip_count", "max"),
- ("trip_count", "mean"),
- ("trip_count", "median"),
- ("trip_count", "min"),
- ]
+ "day",
+ "route_type",
+ "trip_count_max",
+ "trip_count_mean",
+ "trip_count_median",
+ "trip_count_min",
+ ],
+ dtype="object",
)
assert (
@@ -487,12 +532,12 @@ def test_summarise_trips_on_pass(self, gtfs_fixture):
# tests the output of the daily_route_summary table
# using tests/data/newport-20230613_gtfs.zip
expected_df = {
- ("day", ""): {8: "friday", 9: "friday"},
- ("route_type", ""): {8: 3, 9: 200},
- ("trip_count", "max"): {8: 1211, 9: 90},
- ("trip_count", "mean"): {8: 1211.0, 9: 88.0},
- ("trip_count", "median"): {8: 1211.0, 9: 88.0},
- ("trip_count", "min"): {8: 1211, 9: 88},
+ "day": {8: "friday", 9: "friday"},
+ "route_type": {8: 3, 9: 200},
+ "trip_count_max": {8: 1211, 9: 90},
+ "trip_count_min": {8: 1211, 9: 88},
+ "trip_count_mean": {8: 1211.0, 9: 88.0},
+ "trip_count_median": {8: 1211.0, 9: 88.0},
}
found_df = gtfs_fixture.daily_trip_summary[
@@ -522,15 +567,16 @@ def test_summarise_routes_on_pass(self, gtfs_fixture):
)
found_ds = gtfs_fixture.daily_route_summary.columns
- exp_cols_ds = pd.MultiIndex.from_tuples(
+ exp_cols_ds = pd.Index(
[
- ("day", ""),
- ("route_count", "max"),
- ("route_count", "mean"),
- ("route_count", "median"),
- ("route_count", "min"),
- ("route_type", ""),
- ]
+ "day",
+ "route_count_max",
+ "route_count_mean",
+ "route_count_median",
+ "route_count_min",
+ "route_type",
+ ],
+ dtype="object",
)
assert (
@@ -555,12 +601,12 @@ def test_summarise_routes_on_pass(self, gtfs_fixture):
# tests the output of the daily_route_summary table
# using tests/data/newport-20230613_gtfs.zip
expected_df = {
- ("day", ""): {8: "friday", 9: "friday"},
- ("route_count", "max"): {8: 74, 9: 10},
- ("route_count", "mean"): {8: 74.0, 9: 9.0},
- ("route_count", "median"): {8: 74.0, 9: 9.0},
- ("route_count", "min"): {8: 74, 9: 9},
- ("route_type", ""): {8: 3, 9: 200},
+ "day": {8: "friday", 9: "friday"},
+ "route_count_max": {8: 74, 9: 10},
+ "route_count_min": {8: 74, 9: 9},
+ "route_count_mean": {8: 74.0, 9: 9.0},
+ "route_count_median": {8: 74.0, 9: 9.0},
+ "route_type": {8: 3, 9: 200},
}
found_df = gtfs_fixture.daily_route_summary[
@@ -577,3 +623,196 @@ def test_summarise_routes_on_pass(self, gtfs_fixture):
"Size of date_route_counts not as expected. "
"Expected {expected_size}"
)
+
+ def test__plot_summary_defences(self, tmp_path, gtfs_fixture):
+ """Test defences for _plot_summary()."""
+ # test defences for checks summaries exist
+ with pytest.raises(
+ AttributeError,
+ match=re.escape(
+ "The daily_trip_summary table could not be found."
+ " Did you forget to call '.summarise_trips()' first?"
+ ),
+ ):
+ gtfs_fixture._plot_summary(which="trip", target_column="mean")
+
+ with pytest.raises(
+ AttributeError,
+ match=re.escape(
+ "The daily_route_summary table could not be found."
+ " Did you forget to call '.summarise_routes()' first?"
+ ),
+ ):
+ gtfs_fixture._plot_summary(which="route", target_column="mean")
+
+ gtfs_fixture.summarise_routes()
+
+ # test parameters that are yet to be tested
+ options = ["v", "h"]
+ with pytest.raises(
+ ValueError,
+ match=re.escape(
+ "'orientation' expected one of the following:"
+ f"{options} Got i"
+ ),
+ ):
+ gtfs_fixture._plot_summary(
+ which="route",
+ target_column="route_count_mean",
+ orientation="i",
+ )
+
+ # save test for an image with invalid file extension
+ valid_img_formats = ["png", "pdf", "jpg", "jpeg", "webp", "svg"]
+ with pytest.raises(
+ ValueError,
+ match=re.escape(
+ "Please specify a valid image format. Valid formats "
+ f"include {valid_img_formats}"
+ ),
+ ):
+ gtfs_fixture._plot_summary(
+ which="route",
+ target_column="route_count_mean",
+ save_image=True,
+ out_dir=os.path.join(tmp_path, "outputs"),
+ img_type="test",
+ )
+
+ # test choosing an invalid value for 'which'
+ with pytest.raises(
+ ValueError,
+ match=re.escape(
+ "'which' expected one of the following:"
+ "['trip', 'route'] Got tester"
+ ),
+ ):
+ gtfs_fixture._plot_summary(which="tester", target_column="tester")
+
+ def test__plot_summary_on_pass(self, gtfs_fixture, tmp_path):
+ """Test plotting a summary when defences are passed."""
+ current_fixture = gtfs_fixture
+ current_fixture.summarise_routes()
+
+ # test returning a html string
+ test_html = gtfs_fixture._plot_summary(
+ which="route",
+ target_column="route_count_mean",
+ return_html=True,
+ )
+ assert type(test_html) == str, "Failed to return HTML for the plot"
+
+ # test returning a plotly figure
+ test_image = gtfs_fixture._plot_summary(
+ which="route", target_column="route_count_mean"
+ )
+ assert (
+ type(test_image) == PlotlyFigure
+ ), "Failed to return plotly.graph_objects.Figure type"
+
+ # test returning a plotly for trips
+ gtfs_fixture.summarise_trips()
+ test_image = gtfs_fixture._plot_summary(
+ which="trip", target_column="trip_count_mean"
+ )
+ assert (
+ type(test_image) == PlotlyFigure
+ ), "Failed to return plotly.graph_objects.Figure type"
+
+ # test saving plots in html and png format
+ gtfs_fixture._plot_summary(
+ which="route",
+ target_column="mean",
+ width=1200,
+ height=800,
+ save_html=True,
+ save_image=True,
+ ylabel="Mean",
+ xlabel="Day",
+ orientation="h",
+ plotly_kwargs={"legend": dict(bgcolor="lightgrey")},
+ out_dir=os.path.join(tmp_path, "save_test"),
+ )
+
+ # general save test
+ save_dir = os.listdir(os.path.join(tmp_path, "save_test"))
+ counts = {"html": 0, "png": 0}
+ for pth in save_dir:
+ if ".html" in pth:
+ counts["html"] += 1
+ elif ".png" in pth:
+ counts["png"] += 1
+
+ assert os.path.exists(
+ os.path.join(tmp_path, "save_test")
+ ), "'save_test' dir could not be created'"
+ assert counts["html"] == 1, "Failed to save plot as HTML"
+ assert counts["png"] == 1, "Failed to save plot as png"
+
+ def test__create_extended_repeated_pair_table(self, gtfs_fixture):
+ """Test _create_extended_repeated_pair_table()."""
+ test_table = pd.DataFrame(
+ {
+ "trip_name": ["Newport", "Cwmbran", "Cardiff", "Newport"],
+ "trip_abbrev": ["Newp", "Cwm", "Card", "Newp"],
+ "type": ["bus", "train", "bus", "train"],
+ }
+ )
+
+ expected_table = pd.DataFrame(
+ {
+ "trip_name": {0: "Newport"},
+ "trip_abbrev": {0: "Newp"},
+ "type_original": {0: "bus"},
+ "type_duplicate": {0: "train"},
+ }
+ ).to_dict()
+
+ returned_table = gtfs_fixture._create_extended_repeated_pair_table(
+ table=test_table,
+ join_vars=["trip_name", "trip_abbrev"],
+ original_rows=[0],
+ ).to_dict()
+
+ assert (
+ expected_table == returned_table
+ ), "_create_extended_repeated_pair_table() failed"
+
+ def test_html_report_defences(self, gtfs_fixture, tmp_path):
+ """Test the defences whilst generating a HTML report."""
+ with pytest.raises(
+ ValueError, match="'summary type' must be mean, median, min or max"
+ ):
+ gtfs_fixture.html_report(
+ report_dir=tmp_path,
+ overwrite=True,
+ summary_type="test_sum",
+ )
+
+ def test_html_report_on_pass(self, gtfs_fixture, tmp_path):
+ """Test that a HTML report is generated if defences are passed."""
+ gtfs_fixture.html_report(report_dir=pathlib.Path(tmp_path))
+
+ # assert that the report has been completely generated
+ assert os.path.exists(
+ pathlib.Path(os.path.join(tmp_path, "gtfs_report"))
+ ), "gtfs_report dir was not created"
+ assert os.path.exists(
+ pathlib.Path(os.path.join(tmp_path, "gtfs_report", "index.html"))
+ ), "gtfs_report/index.html was not created"
+ assert os.path.exists(
+ pathlib.Path(os.path.join(tmp_path, "gtfs_report", "styles.css"))
+ ), "gtfs_report/styles.css was not created"
+ assert os.path.exists(
+ pathlib.Path(
+ os.path.join(tmp_path, "gtfs_report", "summaries.html")
+ )
+ ), "gtfs_report/summaries.html was not created"
+ assert os.path.exists(
+ pathlib.Path(
+ os.path.join(tmp_path, "gtfs_report", "stop_locations.html")
+ )
+ ), "gtfs_report/stop_locations.html was not created"
+ assert os.path.exists(
+ pathlib.Path(os.path.join(tmp_path, "gtfs_report", "stops.html"))
+ ), "gtfs_report/stops.html was not created"
diff --git a/tests/osm/test_osm_utils.py b/tests/osm/test_osm_utils.py
index 04ce3803..17e145f2 100644
--- a/tests/osm/test_osm_utils.py
+++ b/tests/osm/test_osm_utils.py
@@ -30,13 +30,14 @@ def test_filter_osm_defense(self):
# out_pth is not a path_like
filter_osm(out_pth=False)
with pytest.raises(
- TypeError, match="`tag_filter` expected boolean. Got "
+ TypeError,
+ match="`tag_filter` expected . Got ",
):
# check for boolean defense
filter_osm(tag_filter=1)
with pytest.raises(
TypeError,
- match="`install_osmosis` expected boolean. Got ",
+ match="`install_osmosis` .* . Got ",
):
# check for boolean defense
filter_osm(install_osmosis="False")
diff --git a/tests/urban_centres/test_urban_centres.py b/tests/urban_centres/test_urban_centres.py
index 16ad1e41..10ecaf53 100644
--- a/tests/urban_centres/test_urban_centres.py
+++ b/tests/urban_centres/test_urban_centres.py
@@ -1,6 +1,13 @@
"""Unit tests for transport_performance/urban_centres/urban_centres_class.
TODO: add docs.
+
+Note: in the class parameterised tests below there are some arguments that are
+not used across all tests within them. This is a deliberate design choice,
+since pytest expects all parameterised arguments to be passed - removing or
+excluding from a signle test triggers errors. The alternative would be to
+separate the tests and reparameterise each separetly, but this would lead to a
+larger codebase that is more difficult to maintain.
"""
import os
@@ -20,6 +27,7 @@
import transport_performance.urban_centres.raster_uc as ucc
+# fixtures
@pytest.fixture
def dummy_pop_array(tmp_path: str):
"""Create dummy population array."""
@@ -28,9 +36,9 @@ def dummy_pop_array(tmp_path: str):
dummy = np.array(
[
[5000, 5000, 5000, 1500, 1500, 0, 0, 0, 5000, 5000],
- [5000, 5000, 5000, 0, 0, 0, 0, 0, 0, 0],
+ [5000, 5000, 5000, 0, 0, 1500, 0, 0, 0, 0],
[5000, 5000, 5000, 1500, 1500, 0, 0, 0, 0, 0],
- [1500, 1500, 1500, 0, 0, 0, 0, 0, 0, 0],
+ [5000, 1500, 1500, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 500, 500, 100, 0, 0, 0],
[1000, 0, 0, 0, 100, 40, 5000, 0, 0, 0],
@@ -140,7 +148,8 @@ def outside_cluster_centre():
return (41.74, -13.25)
-# test exceptions for input parameters
+# tests
+# test exceptions for file path
@pytest.mark.parametrize(
"filepath, func, expected",
[
@@ -162,6 +171,7 @@ def test_file(filepath, func, bbox, cluster_centre, expected):
)
+# test exceptions for bounding box
@pytest.mark.parametrize(
"window, expected",
[
@@ -187,6 +197,7 @@ def test_bbox(dummy_pop_array, window, cluster_centre, expected):
)
+# test exceptions for area centre
@pytest.mark.parametrize(
"centre_coords, expected",
[
@@ -209,6 +220,7 @@ def test_centre(dummy_pop_array, bbox, centre_coords, expected):
)
+# test exceptions for band
@pytest.mark.parametrize(
"band, expected",
[
@@ -229,95 +241,225 @@ def test_band_n(dummy_pop_array, bbox, cluster_centre, band, expected):
)
+# test cell population threshold
@pytest.mark.parametrize(
- "cell_pop_t, expected",
+ "cell_pop_t, expected, flags",
[
- (1500, does_not_raise()),
- (1500.5, pytest.raises(TypeError)),
- ("1500", pytest.raises(TypeError)),
+ (1500, does_not_raise(), [True, True, False]),
+ (5000, does_not_raise(), [True, False, False]),
+ (1500.5, pytest.raises(TypeError), []),
+ ("1500", pytest.raises(TypeError), []),
# tests value that would not create any cluster
- (150000, pytest.raises(ValueError)),
+ (150000, pytest.raises(ValueError), []),
],
)
-def test_cell_pop_t(
- dummy_pop_array, bbox, cluster_centre, cell_pop_t, expected
-):
- """Test cell_pop_threshold parameter."""
- with expected:
- assert (
- ucc.UrbanCentre(dummy_pop_array).get_urban_centre(
+class TestCellPop:
+ """Class to test effect of cell pop threshold on output."""
+
+ def test_cell_pop_t(
+ self,
+ dummy_pop_array,
+ bbox,
+ cluster_centre,
+ cell_pop_t,
+ expected,
+ flags,
+ ):
+ """Test cell_pop_threshold parameter."""
+ with expected:
+ assert (
+ ucc.UrbanCentre(dummy_pop_array).get_urban_centre(
+ bbox, cluster_centre, cell_pop_threshold=cell_pop_t
+ )
+ is not None
+ )
+
+ def test_cell_pop_t_output(
+ self,
+ dummy_pop_array,
+ bbox,
+ cluster_centre,
+ cell_pop_t,
+ expected,
+ flags,
+ ):
+ """Test cell_pop_threshold output."""
+ if flags != []:
+ uc = ucc.UrbanCentre(dummy_pop_array)
+ uc.get_urban_centre(
bbox, cluster_centre, cell_pop_threshold=cell_pop_t
)
- is not None
- )
+ # fills with 5 and 7
+ assert uc._UrbanCentre__pop_filt_array[0, 2] == flags[0]
+ # fills with 5 but not 7
+ assert uc._UrbanCentre__pop_filt_array[0, 3] == flags[1]
+ # doesn't fill (checks if outside bounds are 0)
+ assert uc._UrbanCentre__pop_filt_array[6, 0] == flags[2]
+# test diagonal boolean
@pytest.mark.parametrize(
- "diagonal, expected",
+ "diagonal, expected, cluster, num_clusters",
[
- (True, does_not_raise()),
- (False, does_not_raise()),
- ("True", pytest.raises(TypeError)),
+ (True, does_not_raise(), 1, 3),
+ (False, does_not_raise(), 3, 4),
+ (1, pytest.raises(TypeError), 0, 0),
+ ("True", pytest.raises(TypeError), 0, 0),
],
)
-def test_diag(dummy_pop_array, bbox, cluster_centre, diagonal, expected):
- """Test diag parameter."""
- with expected:
- assert (
- ucc.UrbanCentre(dummy_pop_array).get_urban_centre(
- bbox, cluster_centre, diag=diagonal
+class TestDiag:
+ """Class to test effect of diagonal boolean on output."""
+
+ def test_diag(
+ self,
+ dummy_pop_array,
+ bbox,
+ cluster_centre,
+ diagonal,
+ expected,
+ cluster,
+ num_clusters,
+ ):
+ """Test diag parameter."""
+ with expected:
+ assert (
+ ucc.UrbanCentre(dummy_pop_array).get_urban_centre(
+ bbox, cluster_centre, diag=diagonal
+ )
+ is not None
)
- is not None
- )
-
+ def test_diag_output(
+ self,
+ dummy_pop_array,
+ bbox,
+ cluster_centre,
+ diagonal,
+ expected,
+ cluster,
+ num_clusters,
+ ):
+ """Test diag parameter output."""
+ if cluster != 0:
+ uc = ucc.UrbanCentre(dummy_pop_array)
+ uc.get_urban_centre(bbox, cluster_centre, diag=diagonal)
+ # checks if diagonal cell is clustered with main blob or separate
+ assert uc._UrbanCentre__cluster_array[1, 5] == cluster
+ assert uc._UrbanCentre__num_clusters == num_clusters
+
+
+# test cluster population threshold
@pytest.mark.parametrize(
- "cluster_pop_t, expected",
+ "cluster_pop_t, expected, clusters",
[
- (50000, does_not_raise()),
- (50000.5, pytest.raises(TypeError)),
- ("50000", pytest.raises(TypeError)),
+ (50000, does_not_raise(), [1, 0, 0]),
+ (10000, does_not_raise(), [1, 2, 0]),
+ (50000.5, pytest.raises(TypeError), []),
+ ("50000", pytest.raises(TypeError), []),
# test value that would filter out all clusters
- (1000000, pytest.raises(ValueError)),
+ (1000000, pytest.raises(ValueError), []),
],
)
-def test_cluster_pop_t(
- dummy_pop_array, bbox, cluster_centre, cluster_pop_t, expected
-):
- """Test pop_threshold parameter."""
- with expected:
- assert (
- ucc.UrbanCentre(dummy_pop_array).get_urban_centre(
+class TestClusterPop:
+ """Class to test effect of clustering pop threshold on output."""
+
+ def test_cluster_pop_t(
+ self,
+ dummy_pop_array,
+ bbox,
+ cluster_centre,
+ cluster_pop_t,
+ expected,
+ clusters,
+ ):
+ """Test pop_threshold parameter."""
+ with expected:
+ assert (
+ ucc.UrbanCentre(dummy_pop_array).get_urban_centre(
+ bbox, cluster_centre, cluster_pop_threshold=cluster_pop_t
+ )
+ is not None
+ )
+
+ def test_cluster_pop_t_output(
+ self,
+ dummy_pop_array,
+ bbox,
+ cluster_centre,
+ cluster_pop_t,
+ expected,
+ clusters,
+ ):
+ """Test pop_threshold outputs."""
+ if clusters != []:
+ uc = ucc.UrbanCentre(dummy_pop_array)
+ uc.get_urban_centre(
bbox, cluster_centre, cluster_pop_threshold=cluster_pop_t
)
- is not None
- )
+ # checks if diagonal cell is clustered with main blob or separate
+ assert uc._UrbanCentre__urban_centres_array[0, 0] == clusters[0]
+ assert uc._UrbanCentre__urban_centres_array[0, 9] == clusters[1]
+ assert uc._UrbanCentre__urban_centres_array[6, 6] == clusters[2]
+# test adjacent cells threshold to fill
@pytest.mark.parametrize(
- "cell_fill_t, expected",
+ "cell_fill_t, expected, fills",
[
- (5, does_not_raise()),
- (5.5, pytest.raises(TypeError)),
- ("5", pytest.raises(TypeError)),
+ (5, does_not_raise(), [1, 1, 0]),
+ (7, does_not_raise(), [1, 0, 0]),
+ (5.5, pytest.raises(TypeError), []),
+ ("5", pytest.raises(TypeError), []),
# test values outside boundaries
- (11, pytest.raises(ValueError)),
- (0, pytest.raises(ValueError)),
+ (11, pytest.raises(ValueError), []),
+ (0, pytest.raises(ValueError), []),
],
)
-def test_cell_fill_t(
- dummy_pop_array, bbox, cluster_centre, cell_fill_t, expected
-):
- """Test cell_fill_threshold parameter."""
- with expected:
- assert (
- ucc.UrbanCentre(dummy_pop_array).get_urban_centre(
+class TestFill:
+ """Class to test effect of fill threshold on output."""
+
+ def test_cell_fill_t(
+ self,
+ dummy_pop_array,
+ bbox,
+ cluster_centre,
+ cell_fill_t,
+ expected,
+ fills,
+ ):
+ """Test cell_fill_threshold parameter."""
+ with expected:
+ assert (
+ ucc.UrbanCentre(dummy_pop_array).get_urban_centre(
+ bbox, cluster_centre, cell_fill_treshold=cell_fill_t
+ )
+ is not None
+ )
+
+ def test_cell_fill_output(
+ self,
+ dummy_pop_array,
+ bbox,
+ cluster_centre,
+ cell_fill_t,
+ expected,
+ fills,
+ ):
+ """Test fill output."""
+ if fills != []:
+ uc = ucc.UrbanCentre(dummy_pop_array)
+ uc.get_urban_centre(
bbox, cluster_centre, cell_fill_treshold=cell_fill_t
)
- is not None
- )
+ # fills with 5 and 7
+ assert uc._UrbanCentre__filled_array[1, 3] == fills[0]
+ # fills with 5 but not 7
+ assert uc._UrbanCentre__filled_array[1, 4] == fills[1]
+ # doesn't fill (checks if outside bounds are 0)
+ assert uc._UrbanCentre__filled_array[4, 0] == fills[2]
+# test nodata parameter
@pytest.mark.parametrize(
"v_nodata, expected",
[
@@ -337,6 +479,7 @@ def test_v_nodata(dummy_pop_array, bbox, cluster_centre, v_nodata, expected):
)
+# test buffer parameter
@pytest.mark.parametrize(
"buffer, expected",
[
@@ -357,6 +500,7 @@ def test_buffer(dummy_pop_array, bbox, cluster_centre, buffer, expected):
)
+# test intermediate output types
@pytest.mark.parametrize(
"output, expected",
[
@@ -381,11 +525,37 @@ def test_output_types(dummy_pop_array, bbox, cluster_centre, output, expected):
assert type(getattr(obj, output)) == expected
+# test final output characteristics using defaults
def test_final_output(dummy_pop_array, bbox, cluster_centre):
"""Test final output."""
out = ucc.UrbanCentre(dummy_pop_array).get_urban_centre(
bbox, cluster_centre
)
+
+ # uc expected coordinates
+ # coordinates will need to be recalculated if array fixture changes
+ # you can just do list(Polygon.exterior.coords) to get coordinates
+ uc_coords = [
+ (-243000.0, 6056000.0),
+ (-243000.0, 6052000.0),
+ (-240000.0, 6052000.0),
+ (-240000.0, 6053000.0),
+ (-238000.0, 6053000.0),
+ (-238000.0, 6056000.0),
+ (-243000.0, 6056000.0),
+ ]
+ assert out.loc[0][1] == Polygon(uc_coords)
+
+ # bbox expected coordinates
+ bbox_coords = [
+ (-253000.0, 6042000.0),
+ (-228000.0, 6042000.0),
+ (-228000.0, 6066000.0),
+ (-253000.0, 6066000.0),
+ (-253000.0, 6042000.0),
+ ]
+ assert out.loc[2][1] == Polygon(bbox_coords)
+
# type of output
assert type(out) == gpd.GeoDataFrame
diff --git a/tests/utils/test_defence.py b/tests/utils/test_defence.py
index 6bd4b992..2cf617f6 100644
--- a/tests/utils/test_defence.py
+++ b/tests/utils/test_defence.py
@@ -4,10 +4,15 @@
import pathlib
import pytest
+import pandas as pd
from transport_performance.utils.defence import (
_check_list,
_check_parent_dir_exists,
+ _type_defence,
+ _check_column_in_df,
+ _check_item_in_list,
+ _check_attribute,
)
@@ -65,21 +70,6 @@ def test_check_parent_dir_exists_defence(self):
pth="missing/file.someext", param_nm="not_found", create=False
)
- error_pth = "test_folder\\test_file.py"
- with pytest.raises(
- ValueError,
- match=re.escape(
- "Please specify string paths with single forward"
- " slashes only."
- f" Got {repr(error_pth)}"
- ),
- ):
- _check_parent_dir_exists(
- pth="test_folder\\test_file.py",
- param_nm="test_prm",
- create=False,
- )
-
def test_check_parents_dir_exists(self, tmp_path):
"""Test that a parent directory is created."""
# test without create
@@ -136,3 +126,207 @@ def test_check_parents_dir_exists(self, tmp_path):
"_check_parent_dir_exists did not make parent dir"
" when 'create=True' (multiple levels)"
)
+
+
+class Test_TypeDefence(object):
+ """Assertions for _type_defence()."""
+
+ def test_type_defence_raises_on_single_types(self):
+ """Assert func raises for single values to the `types` parameter."""
+ with pytest.raises(
+ TypeError,
+ match="`empty_list` expected . Got ",
+ ):
+ _type_defence(list(), "empty_list", str)
+ with pytest.raises(
+ TypeError,
+ match="`int_1` expected . Got ",
+ ):
+ _type_defence(1, "int_1", list)
+ with pytest.raises(
+ TypeError,
+ match="`string_1` expected . Got ",
+ ):
+ _type_defence("1", "string_1", int)
+ with pytest.raises(
+ TypeError,
+ match="`float_1` expected . Got ",
+ ):
+ _type_defence(1.0, "float_1", int)
+ with pytest.raises(
+ TypeError,
+ match="`empty_dict` expected . Got ",
+ ):
+ _type_defence(dict(), "empty_dict", tuple)
+ with pytest.raises(
+ TypeError,
+ match="`empty_tuple` expected . Got ",
+ ):
+ _type_defence(tuple(), "empty_tuple", dict)
+ with pytest.raises(
+ TypeError,
+ match="`None` expected . Got ",
+ ):
+ _type_defence(None, "None", int)
+
+ def test_type_defence_raises_on_multiple_types(object):
+ """Assert func raises for multiple values to the `types` parameter."""
+ with pytest.raises(
+ TypeError,
+ match=re.escape(
+ "pected (, ). Got "
+ ),
+ ):
+ _type_defence(1, "int_1", (str, type(None)))
+ with pytest.raises(
+ TypeError,
+ match=re.escape(
+ "`str_1` expected (, , , , , , , , , , <"
+ ),
+ ):
+ _type_defence(
+ tuple(),
+ "empty_tuple",
+ (type(None), list, dict, str, int, float),
+ )
+ with pytest.raises(
+ TypeError,
+ match=re.escape(
+ "`None` expected (, , None:
+ """Intialise dummy object."""
+ self.tester = "test"
+ self.tester_also = "also_test"
+
+ new_dummy = dummy()
+ return new_dummy
+
+
+class TestCheckAttribute(object):
+ """Tests for _check_item_in_list()."""
+
+ def test_check_attribute_defence(self, dummy_obj):
+ """Defensive tests for check_attribute."""
+ with pytest.raises(AttributeError, match="dummy test msg"):
+ _check_attribute(
+ obj=dummy_obj, attr="not_in_test", message="dummy test msg"
+ )
+
+ def test_check_attribute_on_pass(self, dummy_obj):
+ """General tests for check_attribute()."""
+ _check_attribute(dummy_obj, "tester")