-
Notifications
You must be signed in to change notification settings - Fork 14.1k
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
KAFKA-18592: Cleanup ReplicaManager #18621
Changes from all commits
File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
There are no files selected for viewing
Original file line number | Diff line number | Diff line change |
---|---|---|
|
@@ -1635,24 +1635,6 @@ class Partition(val topicPartition: TopicPartition, | |
localLog.fetchOffsetSnapshot | ||
} | ||
|
||
def legacyFetchOffsetsForTimestamp(timestamp: Long, | ||
maxNumOffsets: Int, | ||
isFromConsumer: Boolean, | ||
fetchOnlyFromLeader: Boolean): Seq[Long] = inReadLock(leaderIsrUpdateLock) { | ||
val localLog = localLogWithEpochOrThrow(Optional.empty(), fetchOnlyFromLeader) | ||
val allOffsets = localLog.legacyFetchOffsetsBefore(timestamp, maxNumOffsets) | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Perhaps it can be removed from There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. You are right. There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. |
||
|
||
if (!isFromConsumer) { | ||
allOffsets | ||
} else { | ||
val hw = localLog.highWatermark | ||
if (allOffsets.exists(_ > hw)) | ||
hw +: allOffsets.dropWhile(_ > hw) | ||
else | ||
allOffsets | ||
} | ||
} | ||
|
||
def logStartOffset: Long = { | ||
inReadLock(leaderIsrUpdateLock) { | ||
leaderLogIfLocal.map(_.logStartOffset).getOrElse(-1) | ||
|
Original file line number | Diff line number | Diff line change |
---|---|---|
|
@@ -53,7 +53,7 @@ import org.apache.kafka.server.{ActionQueue, DelayedActionQueue, common} | |
import org.apache.kafka.server.common.{DirectoryEventHandler, RequestLocal, StopPartition, TopicOptionalIdPartition} | ||
import org.apache.kafka.server.metrics.KafkaMetricsGroup | ||
import org.apache.kafka.server.network.BrokerEndPoint | ||
import org.apache.kafka.server.purgatory.{DelayedOperationKey, DelayedOperationPurgatory, TopicPartitionOperationKey} | ||
import org.apache.kafka.server.purgatory.{DelayedOperationPurgatory, TopicPartitionOperationKey} | ||
import org.apache.kafka.server.share.fetch.{DelayedShareFetchKey, DelayedShareFetchPartitionKey} | ||
import org.apache.kafka.server.storage.log.{FetchParams, FetchPartitionData} | ||
import org.apache.kafka.server.util.{Scheduler, ShutdownableThread} | ||
|
@@ -274,7 +274,6 @@ class ReplicaManager(val config: KafkaConfig, | |
delayedProducePurgatoryParam: Option[DelayedOperationPurgatory[DelayedProduce]] = None, | ||
delayedFetchPurgatoryParam: Option[DelayedOperationPurgatory[DelayedFetch]] = None, | ||
delayedDeleteRecordsPurgatoryParam: Option[DelayedOperationPurgatory[DelayedDeleteRecords]] = None, | ||
delayedElectLeaderPurgatoryParam: Option[DelayedOperationPurgatory[DelayedElectLeader]] = None, | ||
delayedRemoteFetchPurgatoryParam: Option[DelayedOperationPurgatory[DelayedRemoteFetch]] = None, | ||
delayedRemoteListOffsetsPurgatoryParam: Option[DelayedOperationPurgatory[DelayedRemoteListOffsets]] = None, | ||
delayedShareFetchPurgatoryParam: Option[DelayedOperationPurgatory[DelayedShareFetch]] = None, | ||
|
@@ -298,9 +297,6 @@ class ReplicaManager(val config: KafkaConfig, | |
new DelayedOperationPurgatory[DelayedDeleteRecords]( | ||
"DeleteRecords", config.brokerId, | ||
config.deleteRecordsPurgatoryPurgeIntervalRequests)) | ||
val delayedElectLeaderPurgatory = delayedElectLeaderPurgatoryParam.getOrElse( | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. please remove There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Also, this also remove the following metrics.
please add them into There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Interesting, we still allow elect leader to be called. Why are these metrics no longer relevant? There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. The zk broker waits for the metadata update before returning a response to the There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Thanks for the explanation. |
||
new DelayedOperationPurgatory[DelayedElectLeader]( | ||
"ElectLeader", config.brokerId)) | ||
val delayedRemoteFetchPurgatory = delayedRemoteFetchPurgatoryParam.getOrElse( | ||
new DelayedOperationPurgatory[DelayedRemoteFetch]( | ||
"RemoteFetch", config.brokerId)) | ||
|
@@ -387,13 +383,6 @@ class ReplicaManager(val config: KafkaConfig, | |
|
||
def getLog(topicPartition: TopicPartition): Option[UnifiedLog] = logManager.getLog(topicPartition) | ||
|
||
def hasDelayedElectionOperations: Boolean = delayedElectLeaderPurgatory.numDelayed != 0 | ||
|
||
def tryCompleteElection(key: DelayedOperationKey): Unit = { | ||
val completed = delayedElectLeaderPurgatory.checkAndComplete(key) | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Could we remove the purgatory too? |
||
debug("Request key %s unblocked %d ElectLeader.".format(key.keyLabel, completed)) | ||
} | ||
|
||
def startup(): Unit = { | ||
// start ISR expiration thread | ||
// A follower can lag behind leader for up to config.replicaLagTimeMaxMs x 1.5 before it is removed from ISR | ||
|
@@ -628,10 +617,6 @@ class ReplicaManager(val config: KafkaConfig, | |
onlinePartition(topicPartition).flatMap(_.log) | ||
} | ||
|
||
def getLogDir(topicPartition: TopicPartition): Option[String] = { | ||
localLog(topicPartition).map(_.parentDir) | ||
} | ||
|
||
def tryCompleteActions(): Unit = defaultActionQueue.tryCompleteActions() | ||
|
||
def addToActionQueue(action: Runnable): Unit = defaultActionQueue.add(action) | ||
|
@@ -1490,15 +1475,6 @@ class ReplicaManager(val config: KafkaConfig, | |
partition.fetchOffsetForTimestamp(timestamp, isolationLevel, currentLeaderEpoch, fetchOnlyFromLeader, remoteLogManager) | ||
} | ||
|
||
def legacyFetchOffsetsForTimestamp(topicPartition: TopicPartition, | ||
timestamp: Long, | ||
maxNumOffsets: Int, | ||
isFromConsumer: Boolean, | ||
fetchOnlyFromLeader: Boolean): Seq[Long] = { | ||
val partition = getPartitionOrException(topicPartition) | ||
partition.legacyFetchOffsetsForTimestamp(timestamp, maxNumOffsets, isFromConsumer, fetchOnlyFromLeader) | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Could we remove it from partition also? |
||
} | ||
|
||
/** | ||
* Returns [[LogReadResult]] with error if a task for RemoteStorageFetchInfo could not be scheduled successfully | ||
* else returns [[None]]. | ||
|
@@ -2525,7 +2501,6 @@ class ReplicaManager(val config: KafkaConfig, | |
delayedRemoteListOffsetsPurgatory.shutdown() | ||
delayedProducePurgatory.shutdown() | ||
delayedDeleteRecordsPurgatory.shutdown() | ||
delayedElectLeaderPurgatory.shutdown() | ||
delayedShareFetchPurgatory.shutdown() | ||
if (checkpointHW) | ||
checkpointHighWatermarks() | ||
|
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
Is there a reason why you can't remove the field(s) associated with this setter (and others) as well? As far as I can see it is only used as an argument for creating a ReplicaManager and you could just leave the instantiation of the purgatory in the constructor of ReplicaManager. I am happy for this to be done in a follow-up as long as it has already been accounted for and it isn't just a miss.
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
You are right. These fields are something we miss and also need to be clean.
I will file a jira for this one.
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
https://issues.apache.org/jira/browse/KAFKA-18630