26
26
import org .apache .beam .sdk .metrics .Gauge ;
27
27
import org .apache .beam .sdk .metrics .Histogram ;
28
28
import org .apache .beam .sdk .metrics .MetricName ;
29
- import org .apache .beam .sdk .metrics .Metrics ;
30
29
import org .apache .beam .sdk .util .Preconditions ;
31
30
import org .slf4j .Logger ;
32
31
import org .slf4j .LoggerFactory ;
@@ -44,7 +43,7 @@ public interface KafkaMetrics {
44
43
void updateKafkaMetrics ();
45
44
46
45
/*Used to update backlog metrics, which is later used to update metrics container in another thread*/
47
- void recordBacklogBytes (String topic , int partitionId , long backlog );
46
+ // void recordBacklogBytes(String topic, int partitionId, long backlog);
48
47
49
48
/** No-op implementation of {@code KafkaResults}. */
50
49
class NoOpKafkaMetrics implements KafkaMetrics {
@@ -54,13 +53,13 @@ private NoOpKafkaMetrics() {}
54
53
public void updateSuccessfulRpcMetrics (String topic , Duration elapsedTime ) {}
55
54
56
55
@ Override
57
- public void updateBacklogBytes (String topic , int partitionId , long elapsedTime ) {}
56
+ public void updateBacklogBytes (String topic , int partitionId , long backlog ) {}
58
57
59
58
@ Override
60
59
public void updateKafkaMetrics () {}
61
60
62
- @ Override
63
- public void recordBacklogBytes (String topic , int partitionId , long backlog ) {};
61
+ // @Override
62
+ // public void recordBacklogBytes(String topic, int partitionId, long backlog) {};
64
63
65
64
private static NoOpKafkaMetrics singleton = new NoOpKafkaMetrics ();
66
65
@@ -89,14 +88,14 @@ abstract class KafkaMetricsImpl implements KafkaMetrics {
89
88
90
89
abstract ConcurrentHashMap <String , ConcurrentLinkedQueue <Duration >> perTopicRpcLatencies ();;
91
90
92
- abstract ConcurrentHashMap <String , Long > perTopicPartitionBacklogs ();
91
+ abstract ConcurrentHashMap <MetricName , Long > perTopicPartitionBacklogs ();
93
92
94
93
abstract AtomicBoolean isWritable ();
95
94
96
95
public static KafkaMetricsImpl create () {
97
96
return new AutoValue_KafkaMetrics_KafkaMetricsImpl (
98
97
new ConcurrentHashMap <String , ConcurrentLinkedQueue <Duration >>(),
99
- new ConcurrentHashMap <String , Long >(),
98
+ new ConcurrentHashMap <MetricName , Long >(),
100
99
new AtomicBoolean (true ));
101
100
}
102
101
@@ -133,8 +132,9 @@ public void updateSuccessfulRpcMetrics(String topic, Duration elapsedTime) {
133
132
@ Override
134
133
public void updateBacklogBytes (String topicName , int partitionId , long backlog ) {
135
134
if (isWritable ().get ()) {
136
- String name = KafkaSinkMetrics .getMetricGaugeName (topicName , partitionId ).getName ();
137
- perTopicPartitionBacklogs ().put (name , backlog );
135
+ // Key by MetricName so info isn't lost
136
+ MetricName metricName = KafkaSinkMetrics .getMetricGaugeName (topicName , partitionId );
137
+ perTopicPartitionBacklogs ().put (metricName , backlog );
138
138
}
139
139
}
140
140
@@ -161,10 +161,15 @@ private void recordRpcLatencyMetrics() {
161
161
162
162
/** This is for creating gauges from backlog bytes recorded previously. */
163
163
private void recordBacklogBytesInternal () {
164
- for (Map .Entry <String , Long > backlogs : perTopicPartitionBacklogs ().entrySet ()) {
165
- Gauge gauge =
166
- KafkaSinkMetrics .createBacklogGauge (MetricName .named ("KafkaSink" , backlogs .getKey ()));
167
- gauge .set (backlogs .getValue ());
164
+ for (Map .Entry <MetricName , Long > backlog : perTopicPartitionBacklogs ().entrySet ()) {
165
+ // MetricName perPartionBacklogName = KafkaSinkMetrics.getMetricGaugeName(topicName,
166
+ // partitionId);
167
+ // Gauge perPartition =
168
+ // Metrics.gauge(KafkaSinkMetrics.getMetricGaugeName(topicName, partitionId));
169
+ // Use lambda for more readability?
170
+ // map.forEach((key, value) -> System.out.println(key + ": " + value));
171
+ Gauge gauge = KafkaSinkMetrics .createBacklogGauge (backlog .getKey ());
172
+ gauge .set (backlog .getValue ());
168
173
}
169
174
}
170
175
@@ -177,12 +182,12 @@ private void recordBacklogBytesInternal() {
177
182
* @param backlogBytes backlog for the topic Only included in the metric key if
178
183
* 'supportsMetricsDeletion' is enabled.
179
184
*/
180
- @ Override
181
- public void recordBacklogBytes (String topicName , int partitionId , long backlogBytes ) {
182
- Gauge perPartition =
183
- Metrics .gauge (KafkaSinkMetrics .getMetricGaugeName (topicName , partitionId ));
184
- perPartition .set (backlogBytes );
185
- }
185
+ // @Override
186
+ // public void recordBacklogBytes(String topicName, int partitionId, long backlogBytes) {
187
+ // Gauge perPartition =
188
+ // Metrics.gauge(KafkaSinkMetrics.getMetricGaugeName(topicName, partitionId));
189
+ // perPartition.set(backlogBytes);
190
+ // }
186
191
187
192
/**
188
193
* Export all metrics recorded in this instance to the underlying {@code perWorkerMetrics}
0 commit comments