Skip to content

Commit c64aab9

Browse files
Support Kraft post Confluent Platform 7.4.0 (#7014)
Fixes #7010 Co-authored-by: Eddú Meléndez <eddu.melendez@gmail.com>
1 parent 3616ebf commit c64aab9

File tree

4 files changed

+63
-38
lines changed

4 files changed

+63
-38
lines changed

docs/modules/kafka.md

+1-2
Original file line numberDiff line numberDiff line change
@@ -34,8 +34,7 @@ If for some reason you want to use an externally running Zookeeper, then just pa
3434

3535
### Using Kraft mode
3636

37-
The self-managed (Kraft) mode is available as a preview feature since version 3.0 (confluentinc/cp-kafka:7.0.x) and
38-
declared as a production ready in 3.3.1 (confluentinc/cp-kafka:7.3.x).
37+
KRaft mode was declared production ready in 3.3.1 (confluentinc/cp-kafka:7.3.x)"
3938

4039
<!--codeinclude-->
4140
[Kraft mode](../../modules/kafka/src/test/java/org/testcontainers/containers/KafkaContainerTest.java) inside_block:withKraftMode

examples/kafka-cluster/src/test/java/com/example/kafkacluster/KafkaContainerClusterTest.java

+12
Original file line numberDiff line numberDiff line change
@@ -51,6 +51,18 @@ public void testKafkaContainerKraftCluster() throws Exception {
5151
}
5252
}
5353

54+
@Test
55+
public void testKafkaContainerKraftClusterAfterConfluentPlatform740() throws Exception {
56+
try (KafkaContainerKraftCluster cluster = new KafkaContainerKraftCluster("7.4.0", 3, 2)) {
57+
cluster.start();
58+
String bootstrapServers = cluster.getBootstrapServers();
59+
60+
assertThat(cluster.getBrokers()).hasSize(3);
61+
62+
testKafkaFunctionality(bootstrapServers, 3, 2);
63+
}
64+
}
65+
5466
protected void testKafkaFunctionality(String bootstrapServers, int partitions, int rf) throws Exception {
5567
try (
5668
AdminClient adminClient = AdminClient.create(

modules/kafka/src/main/java/org/testcontainers/containers/KafkaContainer.java

+38-34
Original file line numberDiff line numberDiff line change
@@ -6,11 +6,10 @@
66
import org.testcontainers.utility.ComparableVersion;
77
import org.testcontainers.utility.DockerImageName;
88

9-
import java.io.IOException;
9+
import java.util.Objects;
1010

1111
/**
1212
* This container wraps Confluent Kafka and Zookeeper (optionally)
13-
*
1413
*/
1514
public class KafkaContainer extends GenericContainer<KafkaContainer> {
1615

@@ -29,11 +28,13 @@ public class KafkaContainer extends GenericContainer<KafkaContainer> {
2928
// https://docs.confluent.io/platform/7.0.0/release-notes/index.html#ak-raft-kraft
3029
private static final String MIN_KRAFT_TAG = "7.0.0";
3130

31+
public static final String DEFAULT_CLUSTER_ID = "4L6g3nShT-eMCtK--X86sw";
32+
3233
protected String externalZookeeperConnect = null;
3334

3435
private boolean kraftEnabled = false;
3536

36-
private String clusterId;
37+
private String clusterId = DEFAULT_CLUSTER_ID;
3738

3839
/**
3940
* @deprecated use {@link KafkaContainer(DockerImageName)} instead
@@ -98,7 +99,7 @@ public KafkaContainer withKraft() {
9899
throw new IllegalStateException("Cannot configure Kraft mode when Zookeeper configured");
99100
}
100101
verifyMinKraftVersion();
101-
kraftEnabled = true;
102+
this.kraftEnabled = true;
102103
return self();
103104
}
104105

@@ -115,7 +116,13 @@ private void verifyMinKraftVersion() {
115116
}
116117
}
117118

119+
private boolean isLessThanCP740() {
120+
String actualVersion = DockerImageName.parse(getDockerImageName()).getVersionPart();
121+
return new ComparableVersion(actualVersion).isLessThan("7.4.0");
122+
}
123+
118124
public KafkaContainer withClusterId(String clusterId) {
125+
Objects.requireNonNull(clusterId, "clusterId cannot be null");
119126
this.clusterId = clusterId;
120127
return self();
121128
}
@@ -126,7 +133,7 @@ public String getBootstrapServers() {
126133

127134
@Override
128135
protected void configure() {
129-
if (kraftEnabled) {
136+
if (this.kraftEnabled) {
130137
waitingFor(Wait.forLogMessage(".*Transitioning from RECOVERY to RUNNING.*", 1));
131138
configureKraft();
132139
} else {
@@ -136,31 +143,27 @@ protected void configure() {
136143
}
137144

138145
protected void configureKraft() {
139-
withEnv(
140-
"KAFKA_NODE_ID",
141-
getEnvMap().computeIfAbsent("KAFKA_NODE_ID", key -> getEnvMap().get("KAFKA_BROKER_ID"))
142-
);
146+
//CP 7.4.0
147+
getEnvMap().computeIfAbsent("CLUSTER_ID", key -> clusterId);
148+
getEnvMap().computeIfAbsent("KAFKA_NODE_ID", key -> getEnvMap().get("KAFKA_BROKER_ID"));
143149
withEnv(
144150
"KAFKA_LISTENER_SECURITY_PROTOCOL_MAP",
145151
String.format("%s,CONTROLLER:PLAINTEXT", getEnvMap().get("KAFKA_LISTENER_SECURITY_PROTOCOL_MAP"))
146152
);
147153
withEnv("KAFKA_LISTENERS", String.format("%s,CONTROLLER://0.0.0.0:9094", getEnvMap().get("KAFKA_LISTENERS")));
148154

149155
withEnv("KAFKA_PROCESS_ROLES", "broker,controller");
150-
withEnv(
151-
"KAFKA_CONTROLLER_QUORUM_VOTERS",
152-
getEnvMap()
153-
.computeIfAbsent(
154-
"KAFKA_CONTROLLER_QUORUM_VOTERS",
155-
key -> {
156-
return String.format(
157-
"%s@%s:9094",
158-
getEnvMap().get("KAFKA_NODE_ID"),
159-
getNetwork() != null ? getNetworkAliases().get(0) : "localhost"
160-
);
161-
}
162-
)
163-
);
156+
getEnvMap()
157+
.computeIfAbsent(
158+
"KAFKA_CONTROLLER_QUORUM_VOTERS",
159+
key -> {
160+
return String.format(
161+
"%s@%s:9094",
162+
getEnvMap().get("KAFKA_NODE_ID"),
163+
getNetwork() != null ? getNetworkAliases().get(0) : "localhost"
164+
);
165+
}
166+
);
164167
withEnv("KAFKA_CONTROLLER_LISTENER_NAMES", "CONTROLLER");
165168
}
166169

@@ -186,27 +189,28 @@ protected void containerIsStarting(InspectContainerResponse containerInfo) {
186189
brokerAdvertisedListener(containerInfo)
187190
);
188191

189-
command += (kraftEnabled) ? commandKraft() : commandZookeeper();
192+
if (this.kraftEnabled && isLessThanCP740()) {
193+
// Optimization: skip the checks
194+
command += "echo '' > /etc/confluent/docker/ensure \n";
195+
command += commandKraft();
196+
}
197+
198+
if (!this.kraftEnabled) {
199+
// Optimization: skip the checks
200+
command += "echo '' > /etc/confluent/docker/ensure \n";
201+
command += commandZookeeper();
202+
}
190203

191-
// Optimization: skip the checks
192-
command += "echo '' > /etc/confluent/docker/ensure \n";
193204
// Run the original command
194205
command += "/etc/confluent/docker/run \n";
195206
copyFileToContainer(Transferable.of(command, 0777), STARTER_SCRIPT);
196207
}
197208

198209
protected String commandKraft() {
199210
String command = "sed -i '/KAFKA_ZOOKEEPER_CONNECT/d' /etc/confluent/docker/configure\n";
200-
try {
201-
if (clusterId == null) {
202-
clusterId = execInContainer("kafka-storage", "random-uuid").getStdout().trim();
203-
}
204-
} catch (IOException | InterruptedException e) {
205-
logger().error("Failed to execute `kafka-storage random-uuid`. Exception message: {}", e.getMessage());
206-
}
207211
command +=
208212
"echo 'kafka-storage format --ignore-formatted -t \"" +
209-
clusterId +
213+
this.clusterId +
210214
"\" -c /etc/kafka/kafka.properties' >> /etc/confluent/docker/configure\n";
211215
return command;
212216
}

modules/kafka/src/test/java/org/testcontainers/containers/KafkaContainerTest.java

+12-2
Original file line numberDiff line numberDiff line change
@@ -133,10 +133,20 @@ public void testWithHostExposedPortAndExternalNetwork() throws Exception {
133133
}
134134

135135
@Test
136-
public void testUsageKraft() throws Exception {
136+
public void testUsageKraftBeforeConfluentPlatformVersion74() throws Exception {
137137
try (
138-
// withKraftMode {
139138
KafkaContainer kafka = new KafkaContainer(DockerImageName.parse("confluentinc/cp-kafka:7.0.1")).withKraft()
139+
) {
140+
kafka.start();
141+
testKafkaFunctionality(kafka.getBootstrapServers());
142+
}
143+
}
144+
145+
@Test
146+
public void testUsageKraftAfterConfluentPlatformVersion74() throws Exception {
147+
try (
148+
// withKraftMode {
149+
KafkaContainer kafka = new KafkaContainer(DockerImageName.parse("confluentinc/cp-kafka:7.4.0")).withKraft()
140150
// }
141151
) {
142152
kafka.start();

0 commit comments

Comments
 (0)