Skip to content

Commit

Permalink
Refactor test methods in test_main/index.rst and main.py
Browse files Browse the repository at this point in the history
  • Loading branch information
StTysh committed Oct 29, 2024
1 parent 9f51e35 commit b1d8737
Show file tree
Hide file tree
Showing 6 changed files with 455 additions and 143 deletions.
12 changes: 5 additions & 7 deletions .pre-commit-config.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -12,7 +12,6 @@ repos:
entry: poetry run black
language: system
types: [ file, python ]
exclude: '/workspaces/Jasmin-Metrics-Client/jasmin-metrics-client/docs/autoapi/jasmin_metrics_client/tests/unit_tests/test_main/index\.rst'
- id: isort
name: isort
entry: poetry run isort
Expand All @@ -27,21 +26,20 @@ repos:
name: bandit
entry: poetry run bandit -c pyproject.toml -r .
language: system
types: [ file, python, toml ]
types: [ file ]
- id: xenon
name: xenon
entry: bash -c "poetry run xenon -a $(poetry run python -c \"import tomllib; f = open('pyproject.toml','rb') ; data = tomllib.load(f); f.close(); print(data['tool']['quality']['mccabe']['average'])\") -b $(poetry run python -c \"import tomllib; f = open('pyproject.toml','rb') ; data = tomllib.load(f); f.close(); print(data['tool']['quality']['mccabe']['block'])\") -m $(poetry run python -c \"import tomllib; f = open('pyproject.toml','rb') ; data = tomllib.load(f); f.close(); print(data['tool']['quality']['mccabe']['module'])\") . "
language: system
types: [ file, python, toml ]
types: [ file]
- id: mypy
name: mypy
# entry: poetry run mypy --no-namespace-packages --exclude Jasmin Metrics Client/models/__init__.py
entry: poetry run mypy --no-namespace-packages
language: system
types: [ file, python, toml ]
types: [python]
- id: docs
name: docs
entry: bash -c "poetry run sphinx-build -M dummy ./docs ./docs/_build -q -a -D exclude_patterns=docs/autoapi/jasmin_metrics_client/tests/unit_tests/test_main/index.rst"
entry: bash -c "poetry run sphinx-build -M dummy ./docs ./docs/_build -q -a"
language: system
types_or: [ file, python, rst, toml, markdown ]
exclude: '/workspaces/Jasmin-Metrics-Client/jasmin-metrics-client/docs/autoapi/jasmin_metrics_client/tests/unit_tests/test_main/index\.rst'
types_or: [ file, python, rst, toml, markdown ]
19 changes: 6 additions & 13 deletions docs/autoapi/jasmin_metrics_client/main/index.rst
Original file line number Diff line number Diff line change
Expand Up @@ -4,14 +4,6 @@ jasmin_metrics_client.main
.. py:module:: jasmin_metrics_client.main
Attributes
----------

.. autoapisummary::

jasmin_metrics_client.main.metrics_client


Classes
-------

Expand All @@ -23,16 +15,17 @@ Classes
Module Contents
---------------

.. py:class:: MetricsClient(token=None, hosts=None)
.. py:class:: MetricsClient(token: Optional[str] = None)
.. py:attribute:: kwargs
.. py:method:: get_all_metrics()
.. py:method:: get_all_metrics() -> Optional[List[str]]
.. py:method:: get_metric_labels(metric_name)
.. py:method:: get_metric_labels(metric_name: str) -> Optional[List[str]]
.. py:method:: get_metric(metric_name, filters=None)
.. py:method:: get_metric(metric_name: str, filters: Optional[Dict[str, Any]] = None, size: int = 10000) -> Optional[pandas.DataFrame]
.. py:data:: metrics_client
Original file line number Diff line number Diff line change
Expand Up @@ -52,12 +52,12 @@ Module Contents
attribute so can be configured by individual tests if required.


.. py:method:: test_get_all_metrics(mock_es_client)
.. py:method:: test_get_all_metrics(mock_search: Any) -> None
.. py:method:: test_get_metric_labels(mock_es_client)
.. py:method:: test_get_metric_labels(mock_search: Any) -> None
.. py:method:: test_get_metric(mock_es_client)
.. py:method:: test_get_metric(mock_search: Any) -> None
73 changes: 25 additions & 48 deletions jasmin_metrics_client/main.py
Original file line number Diff line number Diff line change
@@ -1,22 +1,24 @@
import logging
from typing import Any, Dict, List, Optional, Union

import pandas as pd
from ceda_elasticsearch_tools import CEDAElasticsearchClient
from elasticsearch import ElasticsearchException


class MetricsClient:
def __init__(self, token=None, hosts=None):
def __init__(self, token: Optional[str] = None) -> None:
kwargs = {}
if token:
kwargs["headers"] = {"Authorization": f"Bearer {token}"}
try:
self.es = CEDAElasticsearchClient(
headers={"x-api-key": token} if token else {}, hosts=hosts
)
self.es = CEDAElasticsearchClient(**kwargs)
logging.info("Elasticsearch client initialized successfully.")
except ElasticsearchException as e:
logging.error(f"Error initializing Elasticsearch client: {str(e)}")
raise e

def get_all_metrics(self):
def get_all_metrics(self) -> Optional[List[str]]:
try:
query = {
"aggs": {
Expand All @@ -29,7 +31,7 @@ def get_all_metrics(self):
},
"size": 0,
}
response = self.es.search(index="jasmin-metrics-production", body=query)
response = self.es.search(index="jasmin-metrics-production", query=query)
return [
bucket["key"]
for bucket in response["aggregations"]["unique_metrics"]["buckets"]
Expand All @@ -38,19 +40,19 @@ def get_all_metrics(self):
logging.error(f"Error fetching all metrics: {str(e)}")
return None

def get_metric_labels(self, metric_name):
def get_metric_labels(self, metric_name: str) -> Optional[List[str]]:
try:
# Get labels for a specific metric
query = {
"query": {
"match": {"prometheus.labels.metric_name.keyword": metric_name}
}
},
"size": 1,
}
response = self.es.search(index="jasmin-metrics-production", body=query)
response = self.es.search(index="jasmin-metrics-production", query=query)
if not response["hits"]["hits"]:
logging.info(f"No labels found for metric: {metric_name}")
return []
# Extract unique labels from the hits

labels = set()
for hit in response["hits"]["hits"]:
labels.update(hit["_source"]["prometheus"]["labels"].keys())
Expand All @@ -59,10 +61,16 @@ def get_metric_labels(self, metric_name):
logging.error(f"Error fetching metric labels for {metric_name}: {str(e)}")
return None

def get_metric(self, metric_name, filters=None):
def get_metric(
self,
metric_name: str,
filters: Optional[Dict[str, Any]] = None,
size: int = 10000,
) -> Optional[pd.DataFrame]:
try:
# Construct the base query
query = {

query: Dict[str, Any] = {
"size": size,
"query": {
"bool": {
"must": [
Expand All @@ -73,10 +81,9 @@ def get_metric(self, metric_name, filters=None):
}
]
}
}
},
}

# Apply filters if provided
if filters:
if "labels" in filters:
for key, value in filters["labels"].items():
Expand All @@ -96,10 +103,9 @@ def get_metric(self, metric_name, filters=None):
}
]

response = self.es.search(index="jasmin-metrics-production", body=query)
response = self.es.search(index="jasmin-metrics-production", query=query)

# Convert the response to a Pandas DataFrame
data = []
data: List[Dict[str, Union[str, float]]] = []
for hit in response["hits"]["hits"]:
timestamp = hit["_source"]["@timestamp"]
value = hit["_source"]["prometheus"]["metrics"].get(metric_name)
Expand All @@ -109,32 +115,3 @@ def get_metric(self, metric_name, filters=None):
except ElasticsearchException as e:
logging.error(f"Error fetching metric {metric_name}: {str(e)}")
return None


# Example usage (just for demonstration, not part of the client):
if __name__ == "__main__":
# Setup logging
logging.basicConfig(level=logging.INFO)

metrics_client = MetricsClient(token="your_api_key")

# Get all metrics
metrics = metrics_client.get_all_metrics()
if metrics:
print("Available Metrics:", metrics)

# Get labels for a specific metric
labels = metrics_client.get_metric_labels("storage_tape_provisioned")
if labels:
print("Labels for storage_tape_provisioned:", labels)

# Get metric data with filters
df = metrics_client.get_metric(
"storage_tape_provisioned",
filters={
"labels": {"consortium": "atmos"},
"time": {"start": "2024-09-01T00:00:00Z", "end": "latest"},
},
)
if df is not None:
print(df)
Loading

0 comments on commit b1d8737

Please sign in to comment.