Skip to content

Commit

Permalink
Remove all usage of statsd metrics
Browse files Browse the repository at this point in the history
  • Loading branch information
Swatinem committed Oct 21, 2024
1 parent f0e213c commit 570fa5f
Show file tree
Hide file tree
Showing 13 changed files with 28 additions and 140 deletions.
1 change: 0 additions & 1 deletion setup.py
Original file line number Diff line number Diff line change
Expand Up @@ -47,7 +47,6 @@
"pytz",
"redis",
"sqlalchemy==1.*",
"statsd>=3.3.0",
"tlslite-ng>=0.8.0b1",
"typing_extensions",
"typing",
Expand Down
21 changes: 6 additions & 15 deletions shared/helpers/cache.py
Original file line number Diff line number Diff line change
Expand Up @@ -8,8 +8,6 @@

from redis import Redis, RedisError

from shared.metrics import metrics

log = logging.getLogger(__name__)

NO_VALUE = object()
Expand Down Expand Up @@ -179,7 +177,10 @@ def get_backend(self) -> BaseBackend:
return self._backend

def cache_function(
self, ttl: int = DEFAULT_TTL, log_hits: bool = False, log_map: LogMapping = None
self,
ttl: int = DEFAULT_TTL,
log_hits: bool = False,
log_map: LogMapping | None = None,
) -> "FunctionCacher":
"""Creates a FunctionCacher with all the needed configuration to cache a function
Expand Down Expand Up @@ -229,15 +230,10 @@ def wrapped(*args, **kwargs):
key = self.generate_key(func, args, kwargs)
value = self.cache_instance.get_backend().get(key)
if value is not NO_VALUE:
metrics.incr(f"{self.cache_instance._app}.caches.{func.__name__}.hits")
if self.log_hits:
self._log_hits(func, args, kwargs, key)
return value
metrics.incr(f"{self.cache_instance._app}.caches.{func.__name__}.misses")
with metrics.timer(
f"{self.cache_instance._app}.caches.{func.__name__}.runtime"
):
result = func(*args, **kwargs)
result = func(*args, **kwargs)
self.cache_instance.get_backend().set(key, self.ttl, result)
return result

Expand All @@ -255,15 +251,10 @@ async def wrapped(*args, **kwargs):
key = self.generate_key(func, args, kwargs)
value = self.cache_instance.get_backend().get(key)
if value is not NO_VALUE:
metrics.incr(f"{self.cache_instance._app}.caches.{func.__name__}.hits")
if self.log_hits:
self._log_hits(func, args, kwargs, key)
return value
metrics.incr(f"{self.cache_instance._app}.caches.{func.__name__}.misses")
with metrics.timer(
f"{self.cache_instance._app}.caches.{func.__name__}.runtime"
):
result = await func(*args, **kwargs)
result = await func(*args, **kwargs)
self.cache_instance.get_backend().set(key, self.ttl, result)
return result

Expand Down
5 changes: 0 additions & 5 deletions shared/metrics/__init__.py
Original file line number Diff line number Diff line change
@@ -1,7 +1,4 @@
from prometheus_client import Counter, Histogram, Summary, start_http_server
from statsd.defaults.env import statsd

metrics = statsd

start_prometheus = start_http_server

Expand All @@ -10,7 +7,5 @@
"Counter",
"Histogram",
"Summary",
"statsd",
"metrics",
"start_prometheus",
]
4 changes: 1 addition & 3 deletions shared/reports/carryforward.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,6 @@
import re
from typing import Mapping, Sequence

from shared.metrics import metrics
from shared.reports.editable import EditableReport
from shared.reports.resources import Report
from shared.utils.match import match
Expand Down Expand Up @@ -32,12 +31,11 @@ def carriedforward_session_name(original_session_name: str) -> str:
return f"CF[1] - {original_session_name}"


@metrics.timer("services.report.carryforward.generate_carryforward_report")
def generate_carryforward_report(
report: Report,
flags: Sequence[str],
paths: Sequence[str],
session_extras: Mapping[str, str] = None,
session_extras: Mapping[str, str] | None = None,
) -> EditableReport:
"""
Generates a carriedforward report starting from report `report`, flags `flags`
Expand Down
2 changes: 0 additions & 2 deletions shared/reports/filtered.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,7 +3,6 @@

from shared.config import get_config
from shared.helpers.numeric import ratio
from shared.metrics import metrics
from shared.reports.types import EMPTY, ReportTotals
from shared.utils.make_network_file import make_network_file
from shared.utils.match import match, match_any
Expand Down Expand Up @@ -258,7 +257,6 @@ def _iter_totals(self):
if res and res.lines > 0:
yield res

@metrics.timer("shared.reports.filtered._process_totals")
def _process_totals(self):
"""Runs through the file network to aggregate totals
returns <ReportTotals>
Expand Down
2 changes: 0 additions & 2 deletions shared/reports/readonly.py
Original file line number Diff line number Diff line change
Expand Up @@ -6,7 +6,6 @@
from cc_rustyribs import FilterAnalyzer, SimpleAnalyzer, parse_report

from shared.helpers.flag import Flag
from shared.metrics import metrics
from shared.reports.resources import (
END_OF_HEADER,
Report,
Expand Down Expand Up @@ -140,7 +139,6 @@ def get(self, *args, **kwargs):
return self.inner_report.get(*args, **kwargs)

@sentry_sdk.trace
@metrics.timer("shared.reports.readonly._process_totals")
def _process_totals(self):
if self.inner_report.has_precalculated_totals():
return self.inner_report.totals
Expand Down
15 changes: 5 additions & 10 deletions shared/torngit/bitbucket.py
Original file line number Diff line number Diff line change
Expand Up @@ -6,8 +6,6 @@
import httpx
from oauthlib import oauth1

# from shared.config import get_config
from shared.metrics import metrics
from shared.torngit.base import TokenType, TorngitBaseAdapter
from shared.torngit.enums import Endpoints
from shared.torngit.exceptions import (
Expand Down Expand Up @@ -91,31 +89,26 @@ async def api(
repo_slug=self.slug,
)
try:
with metrics.timer(f"{METRICS_PREFIX}.api.run") as timer:
res = await client.request(method.upper(), url, **kwargs)
res = await client.request(method.upper(), url, **kwargs)
logged_body = None
if res.status_code >= 300 and res.text is not None:
logged_body = res.text
log.log(
logging.WARNING if res.status_code >= 300 else logging.INFO,
"Bitbucket HTTP %s",
res.status_code,
extra=dict(time_taken=timer.ms, body=logged_body, **log_dict),
extra=dict(body=logged_body, **log_dict),
)
except (httpx.NetworkError, httpx.TimeoutException):
metrics.incr(f"{METRICS_PREFIX}.api.unreachable")
raise TorngitServerUnreachableError("Bitbucket was not able to be reached.")
if res.status_code == 599:
metrics.incr(f"{METRICS_PREFIX}.api.unreachable")
raise TorngitServerUnreachableError(
"Bitbucket was not able to be reached, server timed out."
)
elif res.status_code >= 500:
metrics.incr(f"{METRICS_PREFIX}.api.5xx")
raise TorngitServer5xxCodeError("Bitbucket is having 5xx issues")
elif res.status_code >= 300:
message = f"Bitbucket API: {res.reason_phrase}"
metrics.incr(f"{METRICS_PREFIX}.api.clienterror")
raise TorngitClientGeneralError(
res.status_code, response_data={"content": res.content}, message=message
)
Expand Down Expand Up @@ -890,7 +883,9 @@ async def get_repository(self, token=None):
),
)

async def get_repo_languages(self, token=None, language: str = None) -> List[str]:
async def get_repo_languages(
self, token=None, language: str | None = None
) -> list[str]:
"""
Gets the languages belonging to this repository. Bitbucket has no way to
track languages, so we'll return a list with the existing language
Expand Down
27 changes: 7 additions & 20 deletions shared/torngit/github.py
Original file line number Diff line number Diff line change
Expand Up @@ -12,15 +12,10 @@
from httpx import Response

from shared.config import get_config
from shared.github import (
get_github_integration_token,
get_github_jwt_token,
)
from shared.github import get_github_integration_token, get_github_jwt_token
from shared.helpers.redis import get_redis_connection
from shared.metrics import Counter, metrics
from shared.rate_limits import (
set_entity_to_rate_limited,
)
from shared.metrics import Counter
from shared.rate_limits import set_entity_to_rate_limited
from shared.rollouts.features import INCLUDE_GITHUB_COMMENT_ACTIONS_BY_OWNER
from shared.torngit.base import TokenType, TorngitBaseAdapter
from shared.torngit.cache import torngit_cache
Expand Down Expand Up @@ -830,11 +825,10 @@ async def make_http_call(
for current_retry in range(1, max_number_retries + 1):
retry_reason = "retriable_status"
try:
with metrics.timer(f"{METRICS_PREFIX}.api.run") as timer:
res = await client.request(method, url, **kwargs)
if current_retry > 1:
# count retries without getting a url
self.count_and_get_url_template(url_name="make_http_call_retry")
res = await client.request(method, url, **kwargs)
if current_retry > 1:
# count retries without getting a url
self.count_and_get_url_template(url_name="make_http_call_retry")
logged_body = None
if res.status_code >= 300 and res.text is not None:
logged_body = res.text
Expand All @@ -844,7 +838,6 @@ async def make_http_call(
res.status_code,
extra=dict(
current_retry=current_retry,
time_taken=timer.ms,
body=logged_body,
rl_remaining=res.headers.get("X-RateLimit-Remaining"),
rl_limit=res.headers.get("X-RateLimit-Limit"),
Expand All @@ -855,7 +848,6 @@ async def make_http_call(
),
)
except (httpx.TimeoutException, httpx.NetworkError):
metrics.incr(f"{METRICS_PREFIX}.api.unreachable")
raise TorngitServerUnreachableError(
"GitHub was not able to be reached."
)
Expand Down Expand Up @@ -902,7 +894,6 @@ async def make_http_call(
is_primary_rate_limit = (
int(res.headers.get("X-RateLimit-Remaining", -1)) == 0
)
metrics.incr(f"{METRICS_PREFIX}.api.ratelimiterror")

# ! side effect: mark current token as rate limited
retry_after = res.headers.get("Retry-After")
Expand Down Expand Up @@ -938,22 +929,18 @@ async def make_http_call(
or current_retry >= max_number_retries # Last retry
):
if res.status_code == 599:
metrics.incr(f"{METRICS_PREFIX}.api.unreachable")
raise TorngitServerUnreachableError(
"Github was not able to be reached, server timed out."
)
elif res.status_code >= 500:
metrics.incr(f"{METRICS_PREFIX}.api.5xx")
raise TorngitServer5xxCodeError("Github is having 5xx issues")
elif res.status_code == 401:
message = f"Github API unauthorized error: {res.reason_phrase}"
metrics.incr(f"{METRICS_PREFIX}.api.unauthorizederror")
raise TorngitUnauthorizedError(
response_data=res.text, message=message
)
elif res.status_code >= 300:
message = f"Github API: {res.reason_phrase}"
metrics.incr(f"{METRICS_PREFIX}.api.clienterror")
raise TorngitClientGeneralError(
res.status_code, response_data=res.text, message=message
)
Expand Down
23 changes: 9 additions & 14 deletions shared/torngit/gitlab.py
Original file line number Diff line number Diff line change
Expand Up @@ -9,7 +9,7 @@
import httpx

from shared.config import get_config
from shared.metrics import Counter, metrics
from shared.metrics import Counter
from shared.torngit.base import TokenType, TorngitBaseAdapter
from shared.torngit.enums import Endpoints
from shared.torngit.exceptions import (
Expand Down Expand Up @@ -407,7 +407,7 @@ async def fetch_and_handle_errors(
url_path,
*,
body=None,
token: OauthConsumerToken = None,
token: OauthConsumerToken | None = None,
version=4,
**args,
):
Expand Down Expand Up @@ -437,30 +437,27 @@ async def fetch_and_handle_errors(
headers["Authorization"] = "Bearer %s" % (token or self.token)["key"]

try:
with metrics.timer(f"{METRICS_PREFIX}.api.run") as timer:
res = await client.request(
method.upper(), url, headers=headers, data=body
)
if current_retry > 1:
# count retries without getting a url
self.count_and_get_url_template("fetch_and_handle_errors_retry")
res = await client.request(
method.upper(), url, headers=headers, data=body
)
if current_retry > 1:
# count retries without getting a url
self.count_and_get_url_template("fetch_and_handle_errors_retry")
logged_body = None
if res.status_code >= 300 and res.text is not None:
logged_body = res.text
log.log(
logging.WARNING if res.status_code >= 300 else logging.INFO,
"GitLab HTTP %s",
res.status_code,
extra=dict(time_taken=timer.ms, body=logged_body, **_log),
extra=dict(body=logged_body, **_log),
)

if res.status_code == 599:
metrics.incr(f"{METRICS_PREFIX}.api.unreachable")
raise TorngitServerUnreachableError(
"Gitlab was not able to be reached, server timed out."
)
elif res.status_code >= 500:
metrics.incr(f"{METRICS_PREFIX}.api.5xx")
raise TorngitServer5xxCodeError("Gitlab is having 5xx issues")
elif (
res.status_code == 401
Expand All @@ -473,15 +470,13 @@ async def fetch_and_handle_errors(
await self._on_token_refresh(token)
elif res.status_code >= 400:
message = f"Gitlab API: {res.status_code}"
metrics.incr(f"{METRICS_PREFIX}.api.clienterror")
raise TorngitClientGeneralError(
res.status_code, response_data=res.json(), message=message
)
else:
# Success case
return res
except (httpx.TimeoutException, httpx.NetworkError):
metrics.incr(f"{METRICS_PREFIX}.api.unreachable")
raise TorngitServerUnreachableError(
"GitLab was not able to be reached. Gateway 502. Please try again."
)
Expand Down
2 changes: 0 additions & 2 deletions shared/utils/test_utils/__init__.py
Original file line number Diff line number Diff line change
@@ -1,7 +1,5 @@
from .mock_config_helper import mock_config_helper
from .mock_metrics import mock_metrics

__all__ = [
"mock_metrics",
"mock_config_helper",
]
20 changes: 0 additions & 20 deletions shared/utils/test_utils/mock_metrics.py

This file was deleted.

Loading

0 comments on commit 570fa5f

Please sign in to comment.