Skip to content

Commit

Permalink
ban prefix increment (#7442)
Browse files Browse the repository at this point in the history
* ban prefix increment

* remove all preincrements from Java source code
  • Loading branch information
richardstartin authored Sep 18, 2021
1 parent b23a1f3 commit 31ca84e
Show file tree
Hide file tree
Showing 56 changed files with 160 additions and 130 deletions.
4 changes: 4 additions & 0 deletions config/checkstyle.xml
Original file line number Diff line number Diff line change
Expand Up @@ -99,6 +99,10 @@
<module name="StringLiteralEquality"/>
<!-- Use 'L' with long literals -->
<module name="UpperEll"/>
<!-- ban prefix increment, decrement -->
<module name="IllegalToken">
<property name="tokens" value="INC,DEC"/>
</module>

<!-- IMPORTS -->

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -70,7 +70,7 @@ int getMaxCountPerBucket(long now) {

int maxCount = 0;
// Skipping the end index here as its bucket hasn't fully gathered all the hits yet.
for (int i = startIndex; i != endIndex; i = (++i % _bucketCount)) {
for (int i = startIndex; i != endIndex; i = ((i + 1) % _bucketCount)) {
if (numTimeUnits - _bucketStartTime.get(i) < _bucketCount) {
maxCount = Math.max(_bucketHitCount.get(i), maxCount);
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -97,7 +97,7 @@ public void sendPinotQueryUpdateToChildren(PinotQuery pinotQuery) {
@Override
public String toString(int indent) {
String str = "";
for (int i = 0; i < indent; ++i) {
for (int i = 0; i < indent; i++) {
str += " ";
}
str += toString();
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -84,7 +84,7 @@ public void sendPinotQueryUpdateToChildren(PinotQuery pinotQuery) {
@Override
public String toString(int indent) {
String str = "";
for (int i = 0; i < indent; ++i) {
for (int i = 0; i < indent; i++) {
str += " ";
}
str += toString();
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -125,25 +125,25 @@ public void testMultiGet() {
try {
getMethod = completionService.take().get();
if (getMethod.getStatusCode() >= 300) {
++errors;
errors++;
Assert.assertEquals(getMethod.getResponseBodyAsString(), ERROR_MSG);
} else {
++success;
success++;
Assert.assertEquals(getMethod.getResponseBodyAsString(), SUCCESS_MSG);
}
} catch (InterruptedException e) {
LOGGER.error("Interrupted", e);
++errors;
errors++;
} catch (ExecutionException e) {
if (Throwables.getRootCause(e) instanceof SocketTimeoutException) {
LOGGER.debug("Timeout");
++timeouts;
timeouts++;
} else {
LOGGER.error("Error", e);
++errors;
errors++;
}
} catch (IOException e) {
++errors;
errors++;
} finally {
if (getMethod != null) {
getMethod.releaseConnection();
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -48,7 +48,7 @@ private ZNRecord getTestInstanceZNRecord() {
Map<String, String> groupIdMap = new HashMap<>();
Map<String, String> partitionMap = new HashMap<>();

for (int i = 0; i < 10; ++i) {
for (int i = 0; i < 10; i++) {
groupIdMap.put("testRes" + i + "_REALTIME", "groupId" + i);
partitionMap.put("testRes" + i + "_REALTIME", "part" + i);
}
Expand All @@ -62,7 +62,7 @@ private InstanceZKMetadata getTestInstanceMetadata() {
instanceMetadata.setInstanceType("Server");
instanceMetadata.setInstanceName("localhost");
instanceMetadata.setInstancePort(1234);
for (int i = 0; i < 10; ++i) {
for (int i = 0; i < 10; i++) {
instanceMetadata.setGroupId("testRes" + i, "groupId" + i);
instanceMetadata.setPartition("testRes" + i, "part" + i);
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -68,7 +68,7 @@ public static boolean comparisonZNRecords(ZNRecord record1, ZNRecord record2) {
Arrays.sort(list1Array);
String[] list2Array = list2.toArray(new String[0]);
Arrays.sort(list2Array);
for (int i = 0; i < list1.size(); ++i) {
for (int i = 0; i < list1.size(); i++) {
if (list1Array[i] == null && list2Array[i] == null) {
continue;
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -212,7 +212,7 @@ private TlsConfig getTlsConfig(Config pinotConfig) {

private static <T> T doWithRetries(int retries, Function<Integer, T> caller) {
PinotException firstError = null;
for (int i = 0; i < retries; ++i) {
for (int i = 0; i < retries; i++) {
try {
return caller.apply(i);
} catch (PinotException e) {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -658,7 +658,7 @@ private PinotResourceManagerResponse scaleUpBroker(Tenant tenant, String brokerT
LOGGER.error(message);
return PinotResourceManagerResponse.failure(message);
}
for (int i = 0; i < numberOfInstancesToAdd; ++i) {
for (int i = 0; i < numberOfInstancesToAdd; i++) {
String instanceName = unTaggedInstanceList.get(i);
retagInstance(instanceName, Helix.UNTAGGED_BROKER_INSTANCE, brokerTenantTag);
// Update idealState by adding new instance to table mapping.
Expand Down Expand Up @@ -732,7 +732,7 @@ private void addInstanceToBrokerIdealState(String brokerTenantTag, String instan
private PinotResourceManagerResponse scaleDownBroker(Tenant tenant, String brokerTenantTag,
List<String> instancesInClusterWithTag) {
int numberBrokersToUntag = instancesInClusterWithTag.size() - tenant.getNumberOfInstances();
for (int i = 0; i < numberBrokersToUntag; ++i) {
for (int i = 0; i < numberBrokersToUntag; i++) {
retagInstance(instancesInClusterWithTag.get(i), brokerTenantTag, Helix.UNTAGGED_BROKER_INSTANCE);
}
return PinotResourceManagerResponse.SUCCESS;
Expand Down Expand Up @@ -795,10 +795,10 @@ private PinotResourceManagerResponse updateIndependentServerTenant(Tenant server
List<String> unTaggedInstanceList) {
int incOffline = serverTenant.getOfflineInstances() - taggedOfflineServers.size();
int incRealtime = serverTenant.getRealtimeInstances() - taggedRealtimeServers.size();
for (int i = 0; i < incOffline; ++i) {
for (int i = 0; i < incOffline; i++) {
retagInstance(unTaggedInstanceList.get(i), Helix.UNTAGGED_SERVER_INSTANCE, offlineServerTag);
}
for (int i = incOffline; i < incOffline + incRealtime; ++i) {
for (int i = incOffline; i < incOffline + incRealtime; i++) {
String instanceName = unTaggedInstanceList.get(i);
retagInstance(instanceName, Helix.UNTAGGED_SERVER_INSTANCE, realtimeServerTag);
// TODO: update idealStates & instanceZkMetadata
Expand All @@ -813,14 +813,14 @@ private PinotResourceManagerResponse updateColocatedServerTenant(Tenant serverTe
int incRealtime = serverTenant.getRealtimeInstances() - taggedRealtimeServers.size();
taggedRealtimeServers.removeAll(taggedOfflineServers);
taggedOfflineServers.removeAll(taggedRealtimeServers);
for (int i = 0; i < incOffline; ++i) {
for (int i = 0; i < incOffline; i++) {
if (i < incInstances) {
retagInstance(unTaggedInstanceList.get(i), Helix.UNTAGGED_SERVER_INSTANCE, offlineServerTag);
} else {
_helixAdmin.addInstanceTag(_helixClusterName, taggedRealtimeServers.get(i - incInstances), offlineServerTag);
}
}
for (int i = incOffline; i < incOffline + incRealtime; ++i) {
for (int i = incOffline; i < incOffline + incRealtime; i++) {
if (i < incInstances) {
retagInstance(unTaggedInstanceList.get(i), Helix.UNTAGGED_SERVER_INSTANCE, realtimeServerTag);
// TODO: update idealStates & instanceZkMetadata
Expand Down Expand Up @@ -969,7 +969,7 @@ public PinotResourceManagerResponse createBrokerTenant(Tenant brokerTenant) {
return PinotResourceManagerResponse.failure(message);
}
String brokerTag = TagNameUtils.getBrokerTagForTenant(brokerTenant.getTenantName());
for (int i = 0; i < brokerTenant.getNumberOfInstances(); ++i) {
for (int i = 0; i < brokerTenant.getNumberOfInstances(); i++) {
retagInstance(unTaggedInstanceList.get(i), Helix.UNTAGGED_BROKER_INSTANCE, brokerTag);
}
return PinotResourceManagerResponse.SUCCESS;
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -194,7 +194,7 @@ private static void setupInstanceConfigForHighLevelConsumer(String realtimeTable
int replicaId = 0;

String groupId = getGroupIdFromRealtimeDataTable(realtimeTableName, streamConfig);
for (int i = 0; i < numInstancesPerReplica * numDataReplicas; ++i) {
for (int i = 0; i < numInstancesPerReplica * numDataReplicas; i++) {
String instance = instanceList.get(i);
InstanceZKMetadata instanceZKMetadata = ZKMetadataProvider.getInstanceZKMetadata(zkHelixPropertyStore, instance);
if (instanceZKMetadata == null) {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -646,7 +646,8 @@ static SingleSegmentAssignment getNextSingleSegmentAssignment(Map<String, String
String instanceName = entry.getKey();
if (!nextInstanceStateMap.containsKey(instanceName)) {
nextInstanceStateMap.put(instanceName, entry.getValue());
if (--instancesToKeep == 0) {
instancesToKeep--;
if (instancesToKeep == 0) {
break;
}
}
Expand All @@ -661,7 +662,8 @@ static SingleSegmentAssignment getNextSingleSegmentAssignment(Map<String, String
String instanceName = entry.getKey();
if (!nextInstanceStateMap.containsKey(instanceName)) {
nextInstanceStateMap.put(instanceName, entry.getValue());
if (--instancesToAdd == 0) {
instancesToAdd--;
if (instancesToAdd == 0) {
break;
}
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -71,7 +71,7 @@ public void testInstanceListingAndCreation()
counts[0] = instances.size();
for (int i = 0; i < counts[0]; i++) {
if (instances.get(i).asText().startsWith(Helix.PREFIX_OF_CONTROLLER_INSTANCE)) {
++counts[1];
counts[1]++;
}
}
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -61,7 +61,7 @@ public void testSegmentCrcApi()
checkCrcRequest(segmentMetadataTable, 0);

// Upload Segments
for (int i = 0; i < 5; ++i) {
for (int i = 0; i < 5; i++) {
SegmentMetadata segmentMetadata = SegmentMetadataMockUtils.mockSegmentMetadata(TABLE_NAME);
ControllerTestUtils.getHelixResourceManager()
.addNewSegment(TableNameBuilder.OFFLINE.tableNameWithType(TABLE_NAME), segmentMetadata, "downloadUrl");
Expand All @@ -72,7 +72,7 @@ public void testSegmentCrcApi()
checkCrcRequest(segmentMetadataTable, 5);

// Add more segments
for (int i = 0; i < 5; ++i) {
for (int i = 0; i < 5; i++) {
SegmentMetadata segmentMetadata = SegmentMetadataMockUtils.mockSegmentMetadata(TABLE_NAME);
ControllerTestUtils.getHelixResourceManager()
.addNewSegment(TableNameBuilder.OFFLINE.tableNameWithType(TABLE_NAME), segmentMetadata, "downloadUrl");
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -89,21 +89,21 @@ public void setUp()
s.updateMetadataMock();
s.start(URI_PATH, createSegmentMetadataHandler(200, s._segmentMetadata, 0));
_serverMap.put(serverName(counter), s);
++counter;
counter++;

// server1
s = new SegmentsServerMock("s2");
s.updateMetadataMock();
s.start(URI_PATH, createSegmentMetadataHandler(200, s._segmentMetadata, 0));
_serverMap.put(serverName(counter), s);
++counter;
counter++;

// server2
s = new SegmentsServerMock("s3");
s.updateMetadataMock();
s.start(URI_PATH, createSegmentMetadataHandler(404, s._segmentMetadata, 0));
_serverMap.put(serverName(counter), s);
++counter;
counter++;
}

private String serverName(int counter) {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -99,37 +99,37 @@ public Object answer(InvocationOnMock invocationOnMock)
FakeSizeServer s = new FakeSizeServer(Arrays.asList("s2", "s3", "s6"));
s.start(URI_PATH, createHandler(200, s._sizes, 0));
_serverMap.put(serverName(counter), s);
++counter;
counter++;

// server1
s = new FakeSizeServer(Arrays.asList("s2", "s5"));
s.start(URI_PATH, createHandler(200, s._sizes, 0));
_serverMap.put(serverName(counter), s);
++counter;
counter++;

// server2
s = new FakeSizeServer(Arrays.asList("s3", "s6"));
s.start(URI_PATH, createHandler(404, s._sizes, 0));
_serverMap.put(serverName(counter), s);
++counter;
counter++;

// server3
s = new FakeSizeServer(Arrays.asList("r1", "r2"));
s.start(URI_PATH, createHandler(200, s._sizes, 0));
_serverMap.put(serverName(counter), s);
++counter;
counter++;

// server4
s = new FakeSizeServer(Arrays.asList("r2"));
s.start(URI_PATH, createHandler(200, s._sizes, 0));
_serverMap.put(serverName(counter), s);
++counter;
counter++;

// server5 ... timing out server
s = new FakeSizeServer(Arrays.asList("s1", "s3"));
s.start(URI_PATH, createHandler(200, s._sizes, TIMEOUT_MSEC * 100));
_serverMap.put(serverName(counter), s);
++counter;
counter++;
}

@AfterClass
Expand Down Expand Up @@ -293,7 +293,7 @@ private void validateTableSubTypeSize(String[] servers, TableSizeReader.TableSub
Assert.assertTrue(segmentDetails._serverInfo.containsKey(expectedServer));
if (expectedServer.equals("server2") || expectedServer.equals("server5")) {
hasErrors = true;
--numResponses;
numResponses--;
}
}
if (numResponses != 0) {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -88,28 +88,32 @@ public void testInstanceToggle()
for (String instanceName : ControllerTestUtils
.getHelixAdmin().getInstancesInClusterWithTag(ControllerTestUtils.getHelixClusterName(), SERVER_TAG_NAME)) {
toggleInstanceState(instanceName, "disable");
checkNumOnlineInstancesFromExternalView(OFFLINE_TABLE_NAME, --numEnabledInstances);
numEnabledInstances--;
checkNumOnlineInstancesFromExternalView(OFFLINE_TABLE_NAME, numEnabledInstances);
}

// Enable server instances
for (String instanceName : ControllerTestUtils
.getHelixAdmin().getInstancesInClusterWithTag(ControllerTestUtils.getHelixClusterName(), SERVER_TAG_NAME)) {
toggleInstanceState(instanceName, "ENABLE");
checkNumOnlineInstancesFromExternalView(OFFLINE_TABLE_NAME, ++numEnabledInstances);
numEnabledInstances++;
checkNumOnlineInstancesFromExternalView(OFFLINE_TABLE_NAME, numEnabledInstances);
}

// Disable broker instances
for (String instanceName : ControllerTestUtils
.getHelixAdmin().getInstancesInClusterWithTag(ControllerTestUtils.getHelixClusterName(), BROKER_TAG_NAME)) {
toggleInstanceState(instanceName, "Disable");
checkNumOnlineInstancesFromExternalView(CommonConstants.Helix.BROKER_RESOURCE_INSTANCE, --numEnabledInstances);
numEnabledInstances--;
checkNumOnlineInstancesFromExternalView(CommonConstants.Helix.BROKER_RESOURCE_INSTANCE, numEnabledInstances);
}

// Enable broker instances
for (String instanceName : ControllerTestUtils
.getHelixAdmin().getInstancesInClusterWithTag(ControllerTestUtils.getHelixClusterName(), BROKER_TAG_NAME)) {
toggleInstanceState(instanceName, "Enable");
checkNumOnlineInstancesFromExternalView(CommonConstants.Helix.BROKER_RESOURCE_INSTANCE, ++numEnabledInstances);
numEnabledInstances++;
checkNumOnlineInstancesFromExternalView(CommonConstants.Helix.BROKER_RESOURCE_INSTANCE, numEnabledInstances);
}

// Delete table
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -66,7 +66,7 @@ public void testOfflineTableLifeCycle()
.size(), ControllerTestUtils.NUM_BROKER_INSTANCES);

// Adding segments
for (int i = 0; i < 10; ++i) {
for (int i = 0; i < 10; i++) {
Assert.assertEquals(
ControllerTestUtils
.getHelixAdmin().getResourceIdealState(ControllerTestUtils.getHelixClusterName(), TABLE_NAME + "_OFFLINE")
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -128,7 +128,7 @@ public void testBasicAndConcurrentAddingAndDeletingSegments()

// Concurrent segment deletion
ExecutorService addSegmentExecutor = Executors.newFixedThreadPool(3);
for (int i = 0; i < 3; ++i) {
for (int i = 0; i < 3; i++) {
addSegmentExecutor.execute(new Runnable() {
@Override
public void run() {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -66,7 +66,7 @@ private void testDifferentTimeUnits(long pastTimeStamp, TimeUnit timeUnit, long
// Create metadata for 10 segments really old, that will be removed by the retention manager.
final int numOlderSegments = 10;
List<String> removedSegments = new ArrayList<>();
for (int i = 0; i < numOlderSegments; ++i) {
for (int i = 0; i < numOlderSegments; i++) {
SegmentMetadata segmentMetadata = mockSegmentMetadata(pastTimeStamp, pastTimeStamp, timeUnit);
SegmentZKMetadata segmentZKMetadata = new SegmentZKMetadata(segmentMetadata.getName());
ZKMetadataUtils
Expand All @@ -75,7 +75,7 @@ private void testDifferentTimeUnits(long pastTimeStamp, TimeUnit timeUnit, long
removedSegments.add(segmentZKMetadata.getSegmentName());
}
// Create metadata for 5 segments that will not be removed.
for (int i = 0; i < 5; ++i) {
for (int i = 0; i < 5; i++) {
SegmentMetadata segmentMetadata =
mockSegmentMetadata(dayAfterTomorrowTimeStamp, dayAfterTomorrowTimeStamp, timeUnit);
SegmentZKMetadata segmentZKMetadata = new SegmentZKMetadata(segmentMetadata.getName());
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -104,13 +104,13 @@ public void setUp()
public void testSegmentLineageCleanup()
throws IOException, InterruptedException {
// Create metadata for original segments
for (int i = 0; i < 5; ++i) {
for (int i = 0; i < 5; i++) {
ControllerTestUtils.getHelixResourceManager().addNewSegment(OFFLINE_TABLE_NAME,
SegmentMetadataMockUtils.mockSegmentMetadata(OFFLINE_TABLE_NAME, "segment_" + i), "downloadUrl");
}

// Create metadata for merged segments.
for (int i = 0; i < 2; ++i) {
for (int i = 0; i < 2; i++) {
ControllerTestUtils.getHelixResourceManager().addNewSegment(OFFLINE_TABLE_NAME,
SegmentMetadataMockUtils.mockSegmentMetadata(OFFLINE_TABLE_NAME, "merged_" + i), "downloadUrl");
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -430,7 +430,8 @@ protected boolean consumeLoop()
// We did not consume any rows. Update the partition-consuming metric only if we have been idling for a long
// time.
// Create a new stream consumer wrapper, in case we are stuck on something.
if (++consecutiveIdleCount > maxIdleCountBeforeStatUpdate) {
consecutiveIdleCount++;
if (consecutiveIdleCount > maxIdleCountBeforeStatUpdate) {
_serverMetrics.setValueOfTableGauge(_metricKeyName, ServerGauge.LLC_PARTITION_CONSUMING, 1);
consecutiveIdleCount = 0;
makeStreamConsumer("Idle for too long");
Expand Down
Loading

0 comments on commit 31ca84e

Please sign in to comment.