Skip to content

Commit 324ecb6

Browse files
committed
Merge remote-tracking branch 'origin/master' into HDDS-12928
2 parents a90da37 + 7c957cc commit 324ecb6

File tree

162 files changed

+3313
-2161
lines changed

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

162 files changed

+3313
-2161
lines changed

.github/workflows/ci.yml

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -271,6 +271,7 @@ jobs:
271271
secrets: inherit
272272
with:
273273
java-version: ${{ needs.build-info.outputs.java-version }}
274+
pre-script: sudo hostname localhost
274275
ratis-args: ${{ inputs.ratis_args }}
275276
script: integration
276277
script-args: -Ptest-${{ matrix.profile }} -Drocks_tools_native

.mvn/extensions.xml

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -29,6 +29,6 @@
2929
<extension>
3030
<groupId>com.gradle</groupId>
3131
<artifactId>common-custom-user-data-maven-extension</artifactId>
32-
<version>2.0.1</version>
32+
<version>2.0.2</version>
3333
</extension>
3434
</extensions>

hadoop-hdds/client/pom.xml

Lines changed: 6 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -16,8 +16,9 @@
1616
<modelVersion>4.0.0</modelVersion>
1717
<parent>
1818
<groupId>org.apache.ozone</groupId>
19-
<artifactId>hdds</artifactId>
19+
<artifactId>hdds-hadoop-dependency-client</artifactId>
2020
<version>2.1.0-SNAPSHOT</version>
21+
<relativePath>../hadoop-dependency-client</relativePath>
2122
</parent>
2223

2324
<artifactId>hdds-client</artifactId>
@@ -47,6 +48,10 @@
4748
<groupId>org.apache.commons</groupId>
4849
<artifactId>commons-lang3</artifactId>
4950
</dependency>
51+
<dependency>
52+
<groupId>org.apache.hadoop</groupId>
53+
<artifactId>hadoop-common</artifactId>
54+
</dependency>
5055
<dependency>
5156
<groupId>org.apache.ozone</groupId>
5257
<artifactId>hdds-common</artifactId>

hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/ContainerClientMetrics.java

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -19,8 +19,8 @@
1919

2020
import com.google.common.annotations.VisibleForTesting;
2121
import java.util.Map;
22-
import java.util.UUID;
2322
import java.util.concurrent.ConcurrentHashMap;
23+
import org.apache.hadoop.hdds.protocol.DatanodeID;
2424
import org.apache.hadoop.hdds.scm.pipeline.Pipeline;
2525
import org.apache.hadoop.hdds.scm.pipeline.PipelineID;
2626
import org.apache.hadoop.metrics2.annotation.Metric;
@@ -76,7 +76,7 @@ public final class ContainerClientMetrics {
7676
private MutableQuantiles[] datanodeHsyncLatencyNs;
7777
private final Map<PipelineID, MutableCounterLong> writeChunkCallsByPipeline;
7878
private final Map<PipelineID, MutableCounterLong> writeChunkBytesByPipeline;
79-
private final Map<UUID, MutableCounterLong> writeChunksCallsByLeaders;
79+
private final Map<DatanodeID, MutableCounterLong> writeChunksCallsByLeaders;
8080
private final MetricsRegistry registry;
8181

8282
public static synchronized ContainerClientMetrics acquire() {
@@ -272,7 +272,7 @@ Map<PipelineID, MutableCounterLong> getWriteChunkCallsByPipeline() {
272272
return writeChunkCallsByPipeline;
273273
}
274274

275-
Map<UUID, MutableCounterLong> getWriteChunksCallsByLeaders() {
275+
Map<DatanodeID, MutableCounterLong> getWriteChunksCallsByLeaders() {
276276
return writeChunksCallsByLeaders;
277277
}
278278

hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientGrpc.java

Lines changed: 9 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -33,7 +33,6 @@
3333
import java.util.List;
3434
import java.util.Map;
3535
import java.util.Objects;
36-
import java.util.UUID;
3736
import java.util.concurrent.CompletableFuture;
3837
import java.util.concurrent.ConcurrentHashMap;
3938
import java.util.concurrent.ExecutionException;
@@ -42,6 +41,7 @@
4241
import org.apache.hadoop.hdds.HddsUtils;
4342
import org.apache.hadoop.hdds.conf.ConfigurationSource;
4443
import org.apache.hadoop.hdds.protocol.DatanodeDetails;
44+
import org.apache.hadoop.hdds.protocol.DatanodeID;
4545
import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos;
4646
import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.ContainerCommandRequestProto;
4747
import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.ContainerCommandResponseProto;
@@ -88,9 +88,9 @@ public class XceiverClientGrpc extends XceiverClientSpi {
8888
LoggerFactory.getLogger(XceiverClientGrpc.class);
8989
private final Pipeline pipeline;
9090
private final ConfigurationSource config;
91-
private final Map<UUID, XceiverClientProtocolServiceStub> asyncStubs;
91+
private final Map<DatanodeID, XceiverClientProtocolServiceStub> asyncStubs;
9292
private final XceiverClientMetrics metrics;
93-
private final Map<UUID, ManagedChannel> channels;
93+
private final Map<DatanodeID, ManagedChannel> channels;
9494
private final Semaphore semaphore;
9595
private long timeout;
9696
private final SecurityConfig secConfig;
@@ -178,8 +178,8 @@ private synchronized void connectToDatanode(DatanodeDetails dn)
178178
ManagedChannel channel = createChannel(dn, port).build();
179179
XceiverClientProtocolServiceStub asyncStub =
180180
XceiverClientProtocolServiceGrpc.newStub(channel);
181-
asyncStubs.put(dn.getUuid(), asyncStub);
182-
channels.put(dn.getUuid(), channel);
181+
asyncStubs.put(dn.getID(), asyncStub);
182+
channels.put(dn.getID(), channel);
183183
}
184184

185185
protected NettyChannelBuilder createChannel(DatanodeDetails dn, int port)
@@ -213,7 +213,7 @@ protected NettyChannelBuilder createChannel(DatanodeDetails dn, int port)
213213
*/
214214
@VisibleForTesting
215215
public boolean isConnected(DatanodeDetails details) {
216-
return isConnected(channels.get(details.getUuid()));
216+
return isConnected(channels.get(details.getID()));
217217
}
218218

219219
private boolean isConnected(ManagedChannel channel) {
@@ -567,7 +567,7 @@ public XceiverClientReply sendCommandAsync(
567567
ContainerCommandRequestProto request, DatanodeDetails dn)
568568
throws IOException, InterruptedException {
569569
checkOpen(dn);
570-
UUID dnId = dn.getUuid();
570+
DatanodeID dnId = dn.getID();
571571
if (LOG.isDebugEnabled()) {
572572
LOG.debug("Send command {} to datanode {}",
573573
request.getCmdType(), dn.getIpAddress());
@@ -625,7 +625,7 @@ private synchronized void checkOpen(DatanodeDetails dn)
625625
throw new IOException("This channel is not connected.");
626626
}
627627

628-
ManagedChannel channel = channels.get(dn.getUuid());
628+
ManagedChannel channel = channels.get(dn.getID());
629629
// If the channel doesn't exist for this specific datanode or the channel
630630
// is closed, just reconnect
631631
if (!isConnected(channel)) {
@@ -639,7 +639,7 @@ private void reconnect(DatanodeDetails dn)
639639
ManagedChannel channel;
640640
try {
641641
connectToDatanode(dn);
642-
channel = channels.get(dn.getUuid());
642+
channel = channels.get(dn.getID());
643643
} catch (Exception e) {
644644
throw new IOException("Error while connecting", e);
645645
}

hadoop-hdds/client/src/main/java/org/apache/hadoop/ozone/client/io/ECBlockInputStreamProxy.java

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -117,6 +117,9 @@ private synchronized void setReaderType() {
117117
int expected = expectedDataLocations(repConfig, getLength());
118118
int available = availableDataLocations(blockInfo.getPipeline(), expected);
119119
reconstructionReader = available < expected;
120+
if (reconstructionReader) {
121+
LOG.info("Data locations available: {} < expected: {}, using reconstruction read", available, expected);
122+
}
120123
}
121124

122125
private void createBlockReader() {

hadoop-hdds/client/src/test/java/org/apache/hadoop/hdds/scm/TestContainerClientMetrics.java

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -23,8 +23,8 @@
2323
import static org.mockito.Mockito.mock;
2424

2525
import java.util.Collections;
26-
import java.util.UUID;
2726
import org.apache.hadoop.hdds.client.ReplicationConfig;
27+
import org.apache.hadoop.hdds.protocol.DatanodeID;
2828
import org.apache.hadoop.hdds.scm.pipeline.Pipeline;
2929
import org.apache.hadoop.hdds.scm.pipeline.PipelineID;
3030
import org.junit.jupiter.api.BeforeEach;
@@ -45,9 +45,9 @@ public void setup() {
4545
public void testRecordChunkMetrics() {
4646
ContainerClientMetrics metrics = ContainerClientMetrics.acquire();
4747
PipelineID pipelineId1 = PipelineID.randomId();
48-
UUID leaderId1 = UUID.randomUUID();
48+
DatanodeID leaderId1 = DatanodeID.randomID();
4949
PipelineID pipelineId2 = PipelineID.randomId();
50-
UUID leaderId2 = UUID.randomUUID();
50+
DatanodeID leaderId2 = DatanodeID.randomID();
5151
PipelineID pipelineId3 = PipelineID.randomId();
5252

5353
metrics.recordWriteChunk(createPipeline(pipelineId1, leaderId1), 10);
@@ -103,7 +103,7 @@ public void testAcquireAndRelease() {
103103
assertNotNull(ContainerClientMetrics.acquire());
104104
}
105105

106-
private Pipeline createPipeline(PipelineID piplineId, UUID leaderId) {
106+
private Pipeline createPipeline(PipelineID piplineId, DatanodeID leaderId) {
107107
return Pipeline.newBuilder()
108108
.setId(piplineId)
109109
.setReplicationConfig(mock(ReplicationConfig.class))

hadoop-hdds/common/pom.xml

Lines changed: 6 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -16,8 +16,9 @@
1616
<modelVersion>4.0.0</modelVersion>
1717
<parent>
1818
<groupId>org.apache.ozone</groupId>
19-
<artifactId>hdds</artifactId>
19+
<artifactId>hdds-hadoop-dependency-client</artifactId>
2020
<version>2.1.0-SNAPSHOT</version>
21+
<relativePath>../hadoop-dependency-client</relativePath>
2122
</parent>
2223
<artifactId>hdds-common</artifactId>
2324
<version>2.1.0-SNAPSHOT</version>
@@ -112,16 +113,16 @@
112113
<artifactId>commons-lang3</artifactId>
113114
</dependency>
114115
<dependency>
115-
<groupId>org.apache.ozone</groupId>
116-
<artifactId>hdds-annotation-processing</artifactId>
116+
<groupId>org.apache.hadoop</groupId>
117+
<artifactId>hadoop-common</artifactId>
117118
</dependency>
118119
<dependency>
119120
<groupId>org.apache.ozone</groupId>
120-
<artifactId>hdds-config</artifactId>
121+
<artifactId>hdds-annotation-processing</artifactId>
121122
</dependency>
122123
<dependency>
123124
<groupId>org.apache.ozone</groupId>
124-
<artifactId>hdds-hadoop-dependency-client</artifactId>
125+
<artifactId>hdds-config</artifactId>
125126
</dependency>
126127
<dependency>
127128
<groupId>org.apache.ozone</groupId>

hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/protocol/DatanodeID.java

Lines changed: 7 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -94,6 +94,10 @@ public static DatanodeID of(final UUID id) {
9494
return CACHE.computeIfAbsent(id, DatanodeID::new);
9595
}
9696

97+
public static DatanodeID of(final HddsProtos.UUID uuid) {
98+
return of(new UUID(uuid.getMostSigBits(), uuid.getLeastSigBits()));
99+
}
100+
97101
/**
98102
* Returns a random DatanodeID.
99103
*/
@@ -114,7 +118,9 @@ private static HddsProtos.UUID toProto(final UUID id) {
114118
}
115119

116120
// TODO: Remove this in follow-up Jira. (HDDS-12015)
117-
UUID getUuid() {
121+
// Exposing this temporarily to help with refactoring.
122+
@Deprecated
123+
public UUID getUuid() {
118124
return uuid;
119125
}
120126
}

hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/ScmConfig.java

Lines changed: 14 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -128,6 +128,20 @@ public class ScmConfig extends ReconfigurableConfig {
128128
)
129129
private Duration blockDeletionInterval = Duration.ofSeconds(60);
130130

131+
@Config(key = "hdds.scm.block.deletion.txn.dn.commit.map.limit",
132+
defaultValue = "5000000",
133+
type = ConfigType.INT,
134+
tags = { ConfigTag.SCM },
135+
description =
136+
" This value indicates the size of the transactionToDNsCommitMap after which" +
137+
" we will skip one round of scm block deleting interval."
138+
)
139+
private int transactionToDNsCommitMapLimit = 5000000;
140+
141+
public int getTransactionToDNsCommitMapLimit() {
142+
return transactionToDNsCommitMapLimit;
143+
}
144+
131145
public Duration getBlockDeletionInterval() {
132146
return blockDeletionInterval;
133147
}

0 commit comments

Comments
 (0)