Skip to content

Commit

Permalink
HDDS-125. Cleanup HDDS CheckStyle issues.
Browse files Browse the repository at this point in the history
Contributed by Anu Engineer.
  • Loading branch information
anuengineer committed May 29, 2018
1 parent 17aa40f commit 9502b47
Show file tree
Hide file tree
Showing 24 changed files with 263 additions and 95 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -41,7 +41,6 @@
import java.util.List;
import java.util.Map;
import java.util.Random;
import java.util.UUID;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.locks.Lock;
import java.util.concurrent.locks.ReentrantLock;
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -190,7 +190,7 @@ public void incrementCount(List<Long> txIDs) throws IOException {
try {
for(Long txID : txIDs) {
try {
byte [] deleteBlockBytes =
byte[] deleteBlockBytes =
deletedStore.get(Longs.toByteArray(txID));
if (deleteBlockBytes == null) {
LOG.warn("Delete txID {} not found", txID);
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -152,7 +152,8 @@ public ContainerInfo getContainer(final long containerID) throws
ContainerInfo containerInfo;
lock.lock();
try {
byte[] containerBytes = containerStore.get(Longs.toByteArray(containerID));
byte[] containerBytes = containerStore.get(
Longs.toByteArray(containerID));
if (containerBytes == null) {
throw new SCMException(
"Specified key does not exist. key : " + containerID,
Expand Down Expand Up @@ -229,7 +230,8 @@ public ContainerInfo allocateContainer(
containerStateManager.allocateContainer(
pipelineSelector, type, replicationFactor, owner);

byte[] containerIDBytes = Longs.toByteArray(containerInfo.getContainerID());
byte[] containerIDBytes = Longs.toByteArray(
containerInfo.getContainerID());
containerStore.put(containerIDBytes, containerInfo.getProtobuf()
.toByteArray());
} finally {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -230,18 +230,18 @@ public List<ContainerInfo> getAllContainers() {
*
* Container State Flow:
*
* [ALLOCATED]------->[CREATING]--------->[OPEN]---------->[CLOSING]------->[CLOSED]
* (CREATE) | (CREATED) (FINALIZE) (CLOSE) |
* | |
* | |
* |(TIMEOUT) (DELETE)|
* | |
* +------------------> [DELETING] <-------------------+
* |
* |
* (CLEANUP)|
* |
* [DELETED]
* [ALLOCATED]---->[CREATING]------>[OPEN]-------->[CLOSING]------->[CLOSED]
* (CREATE) | (CREATED) (FINALIZE) (CLOSE) |
* | |
* | |
* |(TIMEOUT) (DELETE)|
* | |
* +-------------> [DELETING] <-------------------+
* |
* |
* (CLEANUP)|
* |
* [DELETED]
*/
private void initializeStateMachine() {
stateMachine.addTransition(LifeCycleState.ALLOCATED,
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -45,15 +45,17 @@ public interface Mapping extends Closeable {
* The max size of the searching range cannot exceed the
* value of count.
*
* @param startContainerID start containerID, >=0, start searching at the head if 0.
* @param startContainerID start containerID, >=0,
* start searching at the head if 0.
* @param count count must be >= 0
* Usually the count will be replace with a very big
* value instead of being unlimited in case the db is very big.
*
* @return a list of container.
* @throws IOException
*/
List<ContainerInfo> listContainer(long startContainerID, int count) throws IOException;
List<ContainerInfo> listContainer(long startContainerID, int count)
throws IOException;

/**
* Allocates a new container for a given keyName and replication factor.
Expand All @@ -64,7 +66,8 @@ public interface Mapping extends Closeable {
* @throws IOException
*/
ContainerInfo allocateContainer(HddsProtos.ReplicationType type,
HddsProtos.ReplicationFactor replicationFactor, String owner) throws IOException;
HddsProtos.ReplicationFactor replicationFactor, String owner)
throws IOException;

/**
* Deletes a container from SCM.
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -31,7 +31,7 @@
@InterfaceAudience.Private
public interface SCMNodeStorageStatMXBean {
/**
* Get the capacity of the dataNode
* Get the capacity of the dataNode.
* @param datanodeID Datanode Id
* @return long
*/
Expand All @@ -52,7 +52,7 @@ public interface SCMNodeStorageStatMXBean {
long getUsedSpace(UUID datanodeId);

/**
* Returns the total capacity of all dataNodes
* Returns the total capacity of all dataNodes.
* @return long
*/
long getTotalCapacity();
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -56,7 +56,7 @@ public class SCMNodeStorageStatMap implements SCMNodeStorageStatMXBean {
// NodeStorageInfo MXBean
private ObjectName scmNodeStorageInfoBean;
/**
* constructs the scmNodeStorageReportMap object
* constructs the scmNodeStorageReportMap object.
*/
public SCMNodeStorageStatMap(OzoneConfiguration conf) {
// scmNodeStorageReportMap = new ConcurrentHashMap<>();
Expand All @@ -73,6 +73,9 @@ public SCMNodeStorageStatMap(OzoneConfiguration conf) {
HDDS_DATANODE_STORAGE_UTILIZATION_CRITICAL_THRESHOLD_DEFAULT);
}

/**
* Enum that Describes what we should do at various thresholds.
*/
public enum UtilizationThreshold {
NORMAL, WARN, CRITICAL;
}
Expand Down Expand Up @@ -107,8 +110,8 @@ public List<UUID> getDatanodeList(
* @param datanodeID -- Datanode UUID
* @param report - set if StorageReports.
*/
public void insertNewDatanode(UUID datanodeID, Set<StorageLocationReport> report)
throws SCMException {
public void insertNewDatanode(UUID datanodeID,
Set<StorageLocationReport> report) throws SCMException {
Preconditions.checkNotNull(report);
Preconditions.checkState(report.size() != 0);
Preconditions.checkNotNull(datanodeID);
Expand Down Expand Up @@ -142,8 +145,8 @@ private void unregisterMXBean() {
* @throws SCMException - if we don't know about this datanode, for new DN
* use insertNewDatanode.
*/
public void updateDatanodeMap(UUID datanodeID, Set<StorageLocationReport> report)
throws SCMException {
public void updateDatanodeMap(UUID datanodeID,
Set<StorageLocationReport> report) throws SCMException {
Preconditions.checkNotNull(datanodeID);
Preconditions.checkNotNull(report);
Preconditions.checkState(report.size() != 0);
Expand Down Expand Up @@ -301,7 +304,7 @@ public long getTotalFreeSpace() {
}

/**
* removes the dataNode from scmNodeStorageReportMap
* removes the dataNode from scmNodeStorageReportMap.
* @param datanodeID
* @throws SCMException in case the dataNode is not found in the map.
*/
Expand Down Expand Up @@ -339,11 +342,11 @@ private double truncateDecimals(double value) {
}

/**
* get the scmUsed ratio
* get the scmUsed ratio.
*/
public double getScmUsedratio(long scmUsed, long capacity) {
double scmUsedRatio =
truncateDecimals (scmUsed / (double) capacity);
truncateDecimals(scmUsed / (double) capacity);
return scmUsedRatio;
}
/**
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -69,14 +69,14 @@ public ReportResultBuilder setStatus(
}

public ReportResultBuilder setFullVolumeSet(
Set<StorageLocationReport> fullVolumes) {
this.fullVolumes = fullVolumes;
Set<StorageLocationReport> fullVolumesSet) {
this.fullVolumes = fullVolumesSet;
return this;
}

public ReportResultBuilder setFailedVolumeSet(
Set<StorageLocationReport> failedVolumes) {
this.failedVolumes = failedVolumes;
Set<StorageLocationReport> failedVolumesSet) {
this.failedVolumes = failedVolumesSet;
return this;
}

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -92,7 +92,7 @@ public void updateDatanodeMap(UUID datanodeID, Set<ContainerID> containers)
}

/**
* Removes datanode Entry from the map
* Removes datanode Entry from the map.
* @param datanodeID - Datanode ID.
*/
public void removeDatanode(UUID datanodeID) {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -170,8 +170,9 @@ public Pipeline getReplicationPipeline(ReplicationType replicationType,
throws IOException {
PipelineManager manager = getPipelineManager(replicationType);
Preconditions.checkNotNull(manager, "Found invalid pipeline manager");
LOG.debug("Getting replication pipeline forReplicationType {} : ReplicationFactor {}",
replicationType.toString(), replicationFactor.toString());
LOG.debug("Getting replication pipeline forReplicationType {} :" +
" ReplicationFactor {}", replicationType.toString(),
replicationFactor.toString());
return manager.
getPipeline(replicationFactor, replicationType);
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -45,7 +45,6 @@
import org.apache.hadoop.hdds.server.ServiceRuntimeInfoImpl;
import org.apache.hadoop.hdfs.DFSUtil;
import org.apache.hadoop.io.IOUtils;
import org.apache.hadoop.ipc.ProtobufRpcEngine;
import org.apache.hadoop.ipc.RPC;
import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem;
import org.apache.hadoop.metrics2.util.MBeans;
Expand Down Expand Up @@ -87,7 +86,7 @@
* create a container, which then can be used to store data.
*/
@InterfaceAudience.LimitedPrivate({"HDFS", "CBLOCK", "OZONE", "HBASE"})
public class StorageContainerManager extends ServiceRuntimeInfoImpl
public final class StorageContainerManager extends ServiceRuntimeInfoImpl
implements SCMMXBean {

private static final Logger LOG = LoggerFactory
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -39,7 +39,6 @@
import java.io.File;
import java.io.IOException;
import java.net.InetSocketAddress;
import java.net.MalformedURLException;
import java.net.URL;
import java.net.URLConnection;
import java.util.Arrays;
Expand Down
Original file line number Diff line number Diff line change
@@ -0,0 +1,23 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/

/**
* Make checkstyle happy.
* */
package org.apache.hadoop.hdds.scm.block;
Original file line number Diff line number Diff line change
Expand Up @@ -216,8 +216,10 @@ public void testFullContainerReport() throws IOException {

mapping.processContainerReports(crBuilder.build());

ContainerInfo updatedContainer = mapping.getContainer(info.getContainerID());
Assert.assertEquals(100000000L, updatedContainer.getNumberOfKeys());
ContainerInfo updatedContainer =
mapping.getContainer(info.getContainerID());
Assert.assertEquals(100000000L,
updatedContainer.getNumberOfKeys());
Assert.assertEquals(2000000000L, updatedContainer.getUsedBytes());
}

Expand Down Expand Up @@ -251,8 +253,10 @@ public void testContainerCloseWithContainerReport() throws IOException {

mapping.processContainerReports(crBuilder.build());

ContainerInfo updatedContainer = mapping.getContainer(info.getContainerID());
Assert.assertEquals(500000000L, updatedContainer.getNumberOfKeys());
ContainerInfo updatedContainer =
mapping.getContainer(info.getContainerID());
Assert.assertEquals(500000000L,
updatedContainer.getNumberOfKeys());
Assert.assertEquals(5368705120L, updatedContainer.getUsedBytes());
NavigableSet<ContainerID> pendingCloseContainers = mapping.getStateManager()
.getMatchingContainerIDs(
Expand Down
Original file line number Diff line number Diff line change
@@ -0,0 +1,22 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
/**
* Make CheckStyle happy.
*/
package org.apache.hadoop.hdds.scm.container.closer;
Original file line number Diff line number Diff line change
@@ -0,0 +1,22 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
/**
* Make CheckStyle Happy.
*/
package org.apache.hadoop.hdds.scm.container;
Original file line number Diff line number Diff line change
@@ -0,0 +1,22 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
/**
* Make CheckStyle Happy.
*/
package org.apache.hadoop.hdds.scm.container.states;
Loading

0 comments on commit 9502b47

Please sign in to comment.