-
Notifications
You must be signed in to change notification settings - Fork 1.3k
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
add a freshness based consumption status checker #9244
Changes from 1 commit
6a0346f
f1469a6
3110f75
2202733
8df4076
a4d3316
362dd84
39f74d0
914b1e7
cc6be52
561338a
5e3e804
File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
There are no files selected for viewing
Original file line number | Diff line number | Diff line change |
---|---|---|
|
@@ -1440,6 +1440,17 @@ private void fetchLatestStreamOffset() { | |
} | ||
} | ||
|
||
public StreamPartitionMsgOffset fetchLatestStreamOffset(Long maxWaitTimeMs) { | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Seems we are always using 5s as the wait time. So we should either:
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Looks like there is already a public method called There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. agreed on making it public |
||
try (StreamMetadataProvider metadataProvider = _streamConsumerFactory | ||
.createPartitionMetadataProvider(_clientId, _partitionGroupId)) { | ||
return metadataProvider.fetchStreamPartitionOffset(OffsetCriteria.LARGEST_OFFSET_CRITERIA, maxWaitTimeMs); | ||
} catch (Exception e) { | ||
_segmentLogger.warn("Cannot fetch latest stream offset for clientId {} and partitionGroupId {}", _clientId, | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Let's also log the There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. done |
||
_partitionGroupId); | ||
} | ||
return null; | ||
} | ||
|
||
/* | ||
* set the following partition parameters in RT segment config builder: | ||
* - partition column | ||
|
Original file line number | Diff line number | Diff line change |
---|---|---|
|
@@ -236,12 +236,24 @@ private void registerServiceStatusHandler() { | |
boolean isOffsetBasedConsumptionStatusCheckerEnabled = | ||
_serverConf.getProperty(Server.CONFIG_OF_ENABLE_REALTIME_OFFSET_BASED_CONSUMPTION_STATUS_CHECKER, | ||
Server.DEFAULT_ENABLE_REALTIME_OFFSET_BASED_CONSUMPTION_STATUS_CHECKER); | ||
boolean isFreshnessStatusCheckerEnabled = | ||
_serverConf.getProperty(Server.CONFIG_OF_ENABLE_REALTIME_FRESHNESS_BASED_CONSUMPTION_STATUS_CHECKER, | ||
Server.DEFAULT_ENABLE_REALTIME_FRESHNESS_BASED_CONSUMPTION_STATUS_CHECKER); | ||
int realtimeMinFreshnessMs = _serverConf.getProperty(Server.CONFIG_OF_STARTUP_REALTIME_MIN_FRESHNESS_MS, | ||
Server.DEFAULT_STARTUP_REALTIME_MIN_FRESHNESS_MS); | ||
|
||
// collect all resources which have this instance in the ideal state | ||
List<String> resourcesToMonitor = new ArrayList<>(); | ||
|
||
Set<String> consumingSegments = new HashSet<>(); | ||
boolean checkRealtime = realtimeConsumptionCatchupWaitMs > 0; | ||
if (isFreshnessStatusCheckerEnabled && realtimeMinFreshnessMs <= 0) { | ||
LOGGER.warn("Realtime min freshness {} must be > 0. Will not setup freshness based checker.", realtimeMinFreshnessMs); | ||
} | ||
if (isFreshnessStatusCheckerEnabled && isOffsetBasedConsumptionStatusCheckerEnabled) { | ||
LOGGER.warn("Offset and Freshness checkers both enabled. Will only setup freshness based checker."); | ||
} | ||
boolean checkRealtimeFreshness = realtimeMinFreshnessMs > 0; | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. This condition checks are confusing. so, if my config has offsetBasedConsumptionStatusChecker = true, freshnessBasedConsumptionStatusCheckerEnabled = true and realtimeMinFreshness = -50, the logs will be very confusing. It seems simpler to do this: if both are setup, choose freshness based check and use default freshness value if the minFreshness is not configured correctly. Also, the check for the correct config value can be moved further down Line 298 nit: the boolean There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. good call, it's definitely confusing. I've changed this to
|
||
|
||
for (String resourceName : _helixAdmin.getResourcesInCluster(_helixClusterName)) { | ||
// Only monitor table resources | ||
|
@@ -280,16 +292,34 @@ private void registerServiceStatusHandler() { | |
_instanceId, resourcesToMonitor, minResourcePercentForStartup)); | ||
boolean foundConsuming = !consumingSegments.isEmpty(); | ||
if (checkRealtime && foundConsuming) { | ||
Supplier<Integer> getNumConsumingSegmentsNotReachedTheirLatestOffset = null; | ||
if (isOffsetBasedConsumptionStatusCheckerEnabled) { | ||
// We specifically put the freshness based checker first to ensure it's the only one setup if both checkers | ||
// are accidentally enabled together. The freshness based checker is a stricter version of the offset based | ||
// checker. But in the end, both checkers are bounded in time by realtimeConsumptionCatchupWaitMs. | ||
if (isFreshnessStatusCheckerEnabled) { | ||
LOGGER.info("Setting up freshness based status checker"); | ||
FreshnessBasedConsumptionStatusChecker freshnessStatusChecker = | ||
new FreshnessBasedConsumptionStatusChecker(_serverInstance.getInstanceDataManager(), consumingSegments, | ||
(long) realtimeMinFreshnessMs); | ||
Supplier<Integer> getNumConsumingSegmentsNotReachedMinFreshness = | ||
freshnessStatusChecker::getNumConsumingSegmentsNotReachedMinFreshness; | ||
serviceStatusCallbackListBuilder.add( | ||
new ServiceStatus.RealtimeConsumptionCatchupServiceStatusCallback(_helixManager, _helixClusterName, | ||
_instanceId, realtimeConsumptionCatchupWaitMs, getNumConsumingSegmentsNotReachedMinFreshness)); | ||
} else if (isOffsetBasedConsumptionStatusCheckerEnabled) { | ||
LOGGER.info("Setting up offset based status checker"); | ||
OffsetBasedConsumptionStatusChecker consumptionStatusChecker = | ||
new OffsetBasedConsumptionStatusChecker(_serverInstance.getInstanceDataManager(), consumingSegments); | ||
getNumConsumingSegmentsNotReachedTheirLatestOffset = | ||
Supplier<Integer> getNumConsumingSegmentsNotReachedTheirLatestOffset = | ||
consumptionStatusChecker::getNumConsumingSegmentsNotReachedTheirLatestOffset; | ||
serviceStatusCallbackListBuilder.add( | ||
new ServiceStatus.RealtimeConsumptionCatchupServiceStatusCallback(_helixManager, _helixClusterName, | ||
_instanceId, realtimeConsumptionCatchupWaitMs, getNumConsumingSegmentsNotReachedTheirLatestOffset)); | ||
} else { | ||
LOGGER.info("Setting up static time based status checker"); | ||
serviceStatusCallbackListBuilder.add( | ||
new ServiceStatus.RealtimeConsumptionCatchupServiceStatusCallback(_helixManager, _helixClusterName, | ||
_instanceId, realtimeConsumptionCatchupWaitMs, null)); | ||
} | ||
serviceStatusCallbackListBuilder.add( | ||
new ServiceStatus.RealtimeConsumptionCatchupServiceStatusCallback(_helixManager, _helixClusterName, | ||
_instanceId, realtimeConsumptionCatchupWaitMs, getNumConsumingSegmentsNotReachedTheirLatestOffset)); | ||
} | ||
LOGGER.info("Registering service status handler"); | ||
ServiceStatus.setServiceStatusCallback(_instanceId, | ||
|
@@ -340,10 +370,10 @@ private void updateInstanceConfigIfNeeded(ServerConf serverConf) { | |
|
||
// Update multi-stage query engine ports | ||
if (serverConf.isMultiStageServerEnabled()) { | ||
updated |= updatePortIfNeeded(simpleFields, | ||
Instance.MULTI_STAGE_QUERY_ENGINE_SERVICE_PORT_KEY, serverConf.getMultiStageServicePort()); | ||
updated |= updatePortIfNeeded(simpleFields, | ||
Instance.MULTI_STAGE_QUERY_ENGINE_MAILBOX_PORT_KEY, serverConf.getMultiStageMailboxPort()); | ||
updated |= updatePortIfNeeded(simpleFields, Instance.MULTI_STAGE_QUERY_ENGINE_SERVICE_PORT_KEY, | ||
serverConf.getMultiStageServicePort()); | ||
updated |= updatePortIfNeeded(simpleFields, Instance.MULTI_STAGE_QUERY_ENGINE_MAILBOX_PORT_KEY, | ||
serverConf.getMultiStageMailboxPort()); | ||
} | ||
|
||
// Update environment properties | ||
|
Original file line number | Diff line number | Diff line change | ||||
---|---|---|---|---|---|---|
@@ -0,0 +1,154 @@ | ||||||
/** | ||||||
* Licensed to the Apache Software Foundation (ASF) under one | ||||||
* or more contributor license agreements. See the NOTICE file | ||||||
* distributed with this work for additional information | ||||||
* regarding copyright ownership. The ASF licenses this file | ||||||
* to you under the Apache License, Version 2.0 (the | ||||||
* "License"); you may not use this file except in compliance | ||||||
* with the License. You may obtain a copy of the License at | ||||||
* | ||||||
* http://www.apache.org/licenses/LICENSE-2.0 | ||||||
* | ||||||
* Unless required by applicable law or agreed to in writing, | ||||||
* software distributed under the License is distributed on an | ||||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY | ||||||
* KIND, either express or implied. See the License for the | ||||||
* specific language governing permissions and limitations | ||||||
* under the License. | ||||||
*/ | ||||||
|
||||||
package org.apache.pinot.server.starter.helix; | ||||||
|
||||||
import java.util.HashSet; | ||||||
import java.util.Set; | ||||||
import org.apache.pinot.common.utils.LLCSegmentName; | ||||||
import org.apache.pinot.core.data.manager.InstanceDataManager; | ||||||
import org.apache.pinot.core.data.manager.realtime.LLRealtimeSegmentDataManager; | ||||||
import org.apache.pinot.segment.local.data.manager.SegmentDataManager; | ||||||
import org.apache.pinot.segment.local.data.manager.TableDataManager; | ||||||
import org.apache.pinot.spi.config.table.TableType; | ||||||
import org.apache.pinot.spi.stream.LongMsgOffset; | ||||||
import org.apache.pinot.spi.stream.StreamPartitionMsgOffset; | ||||||
import org.apache.pinot.spi.utils.builder.TableNameBuilder; | ||||||
import org.slf4j.Logger; | ||||||
import org.slf4j.LoggerFactory; | ||||||
|
||||||
|
||||||
/** | ||||||
* This class is used at startup time to have a more accurate estimate of the catchup period in which no query execution | ||||||
* happens and consumers try to catch up to the latest messages available in streams. | ||||||
* To achieve this, every time status check is called - {@link #getNumConsumingSegmentsNotReachedMinFreshness} - | ||||||
* for each consuming segment, we check if either: | ||||||
* - the segment's latest ingested offset has reached the current stream offset that's | ||||||
* - the last ingested message is within {@link #_minFreshnessMs} of the current system time | ||||||
*/ | ||||||
public class FreshnessBasedConsumptionStatusChecker { | ||||||
private static final Logger LOGGER = LoggerFactory.getLogger(FreshnessBasedConsumptionStatusChecker.class); | ||||||
|
||||||
// constructor parameters | ||||||
private final InstanceDataManager _instanceDataManager; | ||||||
private final Set<String> _consumingSegments; | ||||||
private final Long _minFreshnessMs; | ||||||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. (minor) Same for other boxed
Suggested change
|
||||||
|
||||||
// helper variable | ||||||
private final Set<String> _caughtUpSegments = new HashSet<>(); | ||||||
|
||||||
public FreshnessBasedConsumptionStatusChecker(InstanceDataManager instanceDataManager, Set<String> consumingSegments, | ||||||
Long minFreshnessMs) { | ||||||
_instanceDataManager = instanceDataManager; | ||||||
_consumingSegments = consumingSegments; | ||||||
_minFreshnessMs = minFreshnessMs; | ||||||
} | ||||||
|
||||||
private boolean isOffsetCaughtUp(StreamPartitionMsgOffset currentOffset, StreamPartitionMsgOffset latestOffset) { | ||||||
if (currentOffset != null && latestOffset != null) { | ||||||
if (currentOffset.compareTo(latestOffset) == 0) { | ||||||
return true; | ||||||
} | ||||||
long currentOffsetLong = ((LongMsgOffset) currentOffset).getOffset(); | ||||||
long latestOffsetLong = ((LongMsgOffset) latestOffset).getOffset(); | ||||||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. This we can't do. Not all streams have long offsets like Kafka does. There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. my bad, i missed the type checking before it. We already do this in |
||||||
// Kafka's "latest" offset is actually the next available offset. Therefore it will be 1 ahead of the | ||||||
// current offset in the case we are caught up. | ||||||
// We expect currentOffset == latestOffset if no messages have ever been published. Both will be 0. | ||||||
// Otherwise, we never expect currentOffset > latestOffset, but we allow this to be caught up in case | ||||||
// it ever happens so we're not stuck starting up. | ||||||
return currentOffsetLong >= latestOffsetLong - 1; | ||||||
} | ||||||
return false; | ||||||
} | ||||||
|
||||||
protected Long now() { | ||||||
return System.currentTimeMillis(); | ||||||
} | ||||||
|
||||||
public int getNumConsumingSegmentsNotReachedMinFreshness() { | ||||||
for (String segName : _consumingSegments) { | ||||||
if (_caughtUpSegments.contains(segName)) { | ||||||
continue; | ||||||
} | ||||||
TableDataManager tableDataManager = getTableDataManager(segName); | ||||||
if (tableDataManager == null) { | ||||||
LOGGER.info("TableDataManager is not yet setup for segment {}. Will check consumption status later", segName); | ||||||
continue; | ||||||
} | ||||||
SegmentDataManager segmentDataManager = null; | ||||||
try { | ||||||
segmentDataManager = tableDataManager.acquireSegment(segName); | ||||||
if (segmentDataManager == null) { | ||||||
LOGGER.info("SegmentDataManager is not yet setup for segment {}. Will check consumption status later", | ||||||
segName); | ||||||
continue; | ||||||
} | ||||||
if (!(segmentDataManager instanceof LLRealtimeSegmentDataManager)) { | ||||||
// There's a possibility that a consuming segment has converted to a committed segment. If that's the case, | ||||||
// segment data manager will not be of type LLRealtime. | ||||||
LOGGER.info("Segment {} is already committed and is considered caught up.", segName); | ||||||
_caughtUpSegments.add(segName); | ||||||
continue; | ||||||
} | ||||||
LLRealtimeSegmentDataManager rtSegmentDataManager = (LLRealtimeSegmentDataManager) segmentDataManager; | ||||||
Long now = now(); | ||||||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. (minor) Change boxed There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. good catch, done |
||||||
Long latestIngestionTimestamp = | ||||||
rtSegmentDataManager.getSegment().getSegmentMetadata().getLatestIngestionTimestamp(); | ||||||
Long freshnessMs = now - latestIngestionTimestamp; | ||||||
|
||||||
// We check latestIngestionTimestamp >= 0 because the default freshness when unknown is Long.MIN_VALUE | ||||||
if (latestIngestionTimestamp >= 0 && freshnessMs <= _minFreshnessMs) { | ||||||
LOGGER.info("Segment {} with freshness {}ms has caught up within min freshness {}", segName, freshnessMs, | ||||||
_minFreshnessMs); | ||||||
_caughtUpSegments.add(segName); | ||||||
continue; | ||||||
} | ||||||
|
||||||
// For stream partitions that see very low volume, it's possible we're already caught up but the oldest | ||||||
// message is too old to pass the freshness check. We check this condition separately to avoid hitting | ||||||
// the stream consumer to check partition count if we're already caught up. | ||||||
StreamPartitionMsgOffset currentOffset = rtSegmentDataManager.getCurrentOffset(); | ||||||
StreamPartitionMsgOffset latestStreamOffset = rtSegmentDataManager.fetchLatestStreamOffset(5000L); | ||||||
if (isOffsetCaughtUp(currentOffset, latestStreamOffset)) { | ||||||
LOGGER.info( | ||||||
"Segment {} with freshness {}ms has not caught up within min freshness {}. But the current ingested offset is equal to the latest available offset {}.", | ||||||
segName, freshnessMs, _minFreshnessMs, currentOffset); | ||||||
_caughtUpSegments.add(segName); | ||||||
continue; | ||||||
} | ||||||
|
||||||
LOGGER.info( | ||||||
"Segment {} with freshness {}ms has not caught up within min freshness {}. At offset {}. Latest offset {}.", | ||||||
segName, freshnessMs, _minFreshnessMs, currentOffset, latestStreamOffset); | ||||||
} finally { | ||||||
if (segmentDataManager != null) { | ||||||
tableDataManager.releaseSegment(segmentDataManager); | ||||||
} | ||||||
} | ||||||
} | ||||||
return _consumingSegments.size() - _caughtUpSegments.size(); | ||||||
} | ||||||
|
||||||
private TableDataManager getTableDataManager(String segmentName) { | ||||||
LLCSegmentName llcSegmentName = new LLCSegmentName(segmentName); | ||||||
String tableName = llcSegmentName.getTableName(); | ||||||
String tableNameWithType = TableNameBuilder.forType(TableType.REALTIME).tableNameWithType(tableName); | ||||||
return _instanceDataManager.getTableDataManager(tableNameWithType); | ||||||
} | ||||||
} |
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
(minor)