Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Change loglevel of cancelled errors to info #2402

Open
wants to merge 1 commit into
base: master
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
10 changes: 9 additions & 1 deletion kafka/consumer/fetcher.py
Original file line number Diff line number Diff line change
Expand Up @@ -125,7 +125,7 @@ def send_fetches(self):
log.debug("Sending FetchRequest to node %s", node_id)
future = self._client.send(node_id, request, wakeup=False)
future.add_callback(self._handle_fetch_response, request, time.time())
future.add_errback(log.error, 'Fetch to node %s failed: %s', node_id)
future.add_errback(self._handle_fetch_error, node_id)
futures.append(future)
self._fetch_futures.extend(futures)
self._clean_done_fetch_futures()
Expand Down Expand Up @@ -778,6 +778,14 @@ def _handle_fetch_response(self, request, send_time, response):
self._sensors.fetch_throttle_time_sensor.record(response.throttle_time_ms)
self._sensors.fetch_latency.record((time.time() - send_time) * 1000)

def _handle_fetch_error(self, node_id, exception):
log.log(
logging.INFO if isinstance(exception, Errors.Cancelled) else logging.ERROR,
'Fetch to node %s failed: %s',
node_id,
exception
)

def _parse_fetched_data(self, completed_fetch):
tp = completed_fetch.topic_partition
fetch_offset = completed_fetch.fetched_offset
Expand Down
18 changes: 18 additions & 0 deletions test/test_fetcher.py
Original file line number Diff line number Diff line change
@@ -1,5 +1,6 @@
# pylint: skip-file
from __future__ import absolute_import
import logging

import pytest

Expand All @@ -12,6 +13,7 @@
CompletedFetch, ConsumerRecord, Fetcher, NoOffsetForPartitionError
)
from kafka.consumer.subscription_state import SubscriptionState
import kafka.errors as Errors
from kafka.future import Future
from kafka.metrics import Metrics
from kafka.protocol.fetch import FetchRequest, FetchResponse
Expand Down Expand Up @@ -378,6 +380,22 @@ def test__handle_fetch_response(fetcher, fetch_request, fetch_response, num_part
assert len(fetcher._completed_fetches) == num_partitions


@pytest.mark.parametrize(("exception", "log_level"), [
(
Errors.Cancelled(),
logging.INFO
),
(
Errors.KafkaError(),
logging.ERROR
)
])
def test__handle_fetch_error(fetcher, caplog, exception, log_level):
fetcher._handle_fetch_error(3, exception)
assert len(caplog.records) == 1
assert caplog.records[0].levelname == logging.getLevelName(log_level)


def test__unpack_message_set(fetcher):
fetcher.config['check_crcs'] = False
tp = TopicPartition('foo', 0)
Expand Down