6
6
import org .testcontainers .utility .ComparableVersion ;
7
7
import org .testcontainers .utility .DockerImageName ;
8
8
9
- import java .io . IOException ;
9
+ import java .util . Objects ;
10
10
11
11
/**
12
12
* This container wraps Confluent Kafka and Zookeeper (optionally)
13
- *
14
13
*/
15
14
public class KafkaContainer extends GenericContainer <KafkaContainer > {
16
15
@@ -29,11 +28,13 @@ public class KafkaContainer extends GenericContainer<KafkaContainer> {
29
28
// https://docs.confluent.io/platform/7.0.0/release-notes/index.html#ak-raft-kraft
30
29
private static final String MIN_KRAFT_TAG = "7.0.0" ;
31
30
31
+ public static final String DEFAULT_CLUSTER_ID = "4L6g3nShT-eMCtK--X86sw" ;
32
+
32
33
protected String externalZookeeperConnect = null ;
33
34
34
35
private boolean kraftEnabled = false ;
35
36
36
- private String clusterId ;
37
+ private String clusterId = DEFAULT_CLUSTER_ID ;
37
38
38
39
/**
39
40
* @deprecated use {@link KafkaContainer(DockerImageName)} instead
@@ -98,7 +99,7 @@ public KafkaContainer withKraft() {
98
99
throw new IllegalStateException ("Cannot configure Kraft mode when Zookeeper configured" );
99
100
}
100
101
verifyMinKraftVersion ();
101
- kraftEnabled = true ;
102
+ this . kraftEnabled = true ;
102
103
return self ();
103
104
}
104
105
@@ -115,7 +116,13 @@ private void verifyMinKraftVersion() {
115
116
}
116
117
}
117
118
119
+ private boolean isLessThanCP740 () {
120
+ String actualVersion = DockerImageName .parse (getDockerImageName ()).getVersionPart ();
121
+ return new ComparableVersion (actualVersion ).isLessThan ("7.4.0" );
122
+ }
123
+
118
124
public KafkaContainer withClusterId (String clusterId ) {
125
+ Objects .requireNonNull (clusterId , "clusterId cannot be null" );
119
126
this .clusterId = clusterId ;
120
127
return self ();
121
128
}
@@ -126,7 +133,7 @@ public String getBootstrapServers() {
126
133
127
134
@ Override
128
135
protected void configure () {
129
- if (kraftEnabled ) {
136
+ if (this . kraftEnabled ) {
130
137
waitingFor (Wait .forLogMessage (".*Transitioning from RECOVERY to RUNNING.*" , 1 ));
131
138
configureKraft ();
132
139
} else {
@@ -136,31 +143,27 @@ protected void configure() {
136
143
}
137
144
138
145
protected void configureKraft () {
139
- withEnv (
140
- "KAFKA_NODE_ID" ,
141
- getEnvMap ().computeIfAbsent ("KAFKA_NODE_ID" , key -> getEnvMap ().get ("KAFKA_BROKER_ID" ))
142
- );
146
+ //CP 7.4.0
147
+ getEnvMap ().computeIfAbsent ("CLUSTER_ID" , key -> clusterId );
148
+ getEnvMap ().computeIfAbsent ("KAFKA_NODE_ID" , key -> getEnvMap ().get ("KAFKA_BROKER_ID" ));
143
149
withEnv (
144
150
"KAFKA_LISTENER_SECURITY_PROTOCOL_MAP" ,
145
151
String .format ("%s,CONTROLLER:PLAINTEXT" , getEnvMap ().get ("KAFKA_LISTENER_SECURITY_PROTOCOL_MAP" ))
146
152
);
147
153
withEnv ("KAFKA_LISTENERS" , String .format ("%s,CONTROLLER://0.0.0.0:9094" , getEnvMap ().get ("KAFKA_LISTENERS" )));
148
154
149
155
withEnv ("KAFKA_PROCESS_ROLES" , "broker,controller" );
150
- withEnv (
151
- "KAFKA_CONTROLLER_QUORUM_VOTERS" ,
152
- getEnvMap ()
153
- .computeIfAbsent (
154
- "KAFKA_CONTROLLER_QUORUM_VOTERS" ,
155
- key -> {
156
- return String .format (
157
- "%s@%s:9094" ,
158
- getEnvMap ().get ("KAFKA_NODE_ID" ),
159
- getNetwork () != null ? getNetworkAliases ().get (0 ) : "localhost"
160
- );
161
- }
162
- )
163
- );
156
+ getEnvMap ()
157
+ .computeIfAbsent (
158
+ "KAFKA_CONTROLLER_QUORUM_VOTERS" ,
159
+ key -> {
160
+ return String .format (
161
+ "%s@%s:9094" ,
162
+ getEnvMap ().get ("KAFKA_NODE_ID" ),
163
+ getNetwork () != null ? getNetworkAliases ().get (0 ) : "localhost"
164
+ );
165
+ }
166
+ );
164
167
withEnv ("KAFKA_CONTROLLER_LISTENER_NAMES" , "CONTROLLER" );
165
168
}
166
169
@@ -186,27 +189,28 @@ protected void containerIsStarting(InspectContainerResponse containerInfo) {
186
189
brokerAdvertisedListener (containerInfo )
187
190
);
188
191
189
- command += (kraftEnabled ) ? commandKraft () : commandZookeeper ();
192
+ if (this .kraftEnabled && isLessThanCP740 ()) {
193
+ // Optimization: skip the checks
194
+ command += "echo '' > /etc/confluent/docker/ensure \n " ;
195
+ command += commandKraft ();
196
+ }
197
+
198
+ if (!this .kraftEnabled ) {
199
+ // Optimization: skip the checks
200
+ command += "echo '' > /etc/confluent/docker/ensure \n " ;
201
+ command += commandZookeeper ();
202
+ }
190
203
191
- // Optimization: skip the checks
192
- command += "echo '' > /etc/confluent/docker/ensure \n " ;
193
204
// Run the original command
194
205
command += "/etc/confluent/docker/run \n " ;
195
206
copyFileToContainer (Transferable .of (command , 0777 ), STARTER_SCRIPT );
196
207
}
197
208
198
209
protected String commandKraft () {
199
210
String command = "sed -i '/KAFKA_ZOOKEEPER_CONNECT/d' /etc/confluent/docker/configure\n " ;
200
- try {
201
- if (clusterId == null ) {
202
- clusterId = execInContainer ("kafka-storage" , "random-uuid" ).getStdout ().trim ();
203
- }
204
- } catch (IOException | InterruptedException e ) {
205
- logger ().error ("Failed to execute `kafka-storage random-uuid`. Exception message: {}" , e .getMessage ());
206
- }
207
211
command +=
208
212
"echo 'kafka-storage format --ignore-formatted -t \" " +
209
- clusterId +
213
+ this . clusterId +
210
214
"\" -c /etc/kafka/kafka.properties' >> /etc/confluent/docker/configure\n " ;
211
215
return command ;
212
216
}
0 commit comments