diff --git a/dd-java-agent/instrumentation/armeria/armeria-grpc-0.84/src/main/java/datadog/trace/instrumentation/armeria/grpc/client/GrpcClientDecorator.java b/dd-java-agent/instrumentation/armeria/armeria-grpc-0.84/src/main/java/datadog/trace/instrumentation/armeria/grpc/client/GrpcClientDecorator.java index 9fa7c98f0af..dc4c794efc6 100644 --- a/dd-java-agent/instrumentation/armeria/armeria-grpc-0.84/src/main/java/datadog/trace/instrumentation/armeria/grpc/client/GrpcClientDecorator.java +++ b/dd-java-agent/instrumentation/armeria/armeria-grpc-0.84/src/main/java/datadog/trace/instrumentation/armeria/grpc/client/GrpcClientDecorator.java @@ -3,9 +3,6 @@ import static datadog.context.propagation.Propagators.defaultPropagator; import static datadog.trace.bootstrap.instrumentation.api.AgentTracer.startSpan; import static datadog.trace.bootstrap.instrumentation.api.AgentTracer.traceConfig; -import static datadog.trace.core.datastreams.TagsProcessor.DIRECTION_OUT; -import static datadog.trace.core.datastreams.TagsProcessor.DIRECTION_TAG; -import static datadog.trace.core.datastreams.TagsProcessor.TYPE_TAG; import datadog.context.Context; import datadog.context.propagation.CarrierSetter; @@ -14,6 +11,7 @@ import datadog.trace.api.cache.DDCache; import datadog.trace.api.cache.DDCaches; import datadog.trace.api.datastreams.DataStreamsContext; +import datadog.trace.api.datastreams.DataStreamsTags; import datadog.trace.api.naming.SpanNaming; import datadog.trace.bootstrap.instrumentation.api.AgentSpan; import datadog.trace.bootstrap.instrumentation.api.InternalSpanTypes; @@ -23,7 +21,6 @@ import io.grpc.MethodDescriptor; import io.grpc.Status; import java.util.BitSet; -import java.util.LinkedHashMap; import java.util.Set; import java.util.function.Function; @@ -35,10 +32,8 @@ public class GrpcClientDecorator extends ClientDecorator { public static final CharSequence GRPC_MESSAGE = UTF8BytesString.create("grpc.message"); private static DataStreamsContext createDsmContext() { - LinkedHashMap result = new LinkedHashMap<>(); - result.put(DIRECTION_TAG, DIRECTION_OUT); - result.put(TYPE_TAG, "grpc"); - return DataStreamsContext.fromTags(result); + return DataStreamsContext.fromTags( + DataStreamsTags.create("grpc", DataStreamsTags.Direction.Outbound)); } public static final GrpcClientDecorator DECORATE = new GrpcClientDecorator(); diff --git a/dd-java-agent/instrumentation/armeria/armeria-grpc-0.84/src/main/java/datadog/trace/instrumentation/armeria/grpc/server/GrpcServerDecorator.java b/dd-java-agent/instrumentation/armeria/armeria-grpc-0.84/src/main/java/datadog/trace/instrumentation/armeria/grpc/server/GrpcServerDecorator.java index 40c1ec23736..944324f3b4d 100644 --- a/dd-java-agent/instrumentation/armeria/armeria-grpc-0.84/src/main/java/datadog/trace/instrumentation/armeria/grpc/server/GrpcServerDecorator.java +++ b/dd-java-agent/instrumentation/armeria/armeria-grpc-0.84/src/main/java/datadog/trace/instrumentation/armeria/grpc/server/GrpcServerDecorator.java @@ -1,12 +1,9 @@ package datadog.trace.instrumentation.armeria.grpc.server; -import static datadog.trace.core.datastreams.TagsProcessor.DIRECTION_IN; -import static datadog.trace.core.datastreams.TagsProcessor.DIRECTION_TAG; -import static datadog.trace.core.datastreams.TagsProcessor.TYPE_TAG; - import datadog.trace.api.Config; import datadog.trace.api.cache.DDCache; import datadog.trace.api.cache.DDCaches; +import datadog.trace.api.datastreams.DataStreamsTags; import datadog.trace.api.naming.SpanNaming; import datadog.trace.bootstrap.instrumentation.api.AgentSpan; import datadog.trace.bootstrap.instrumentation.api.ErrorPriorities; @@ -18,7 +15,6 @@ import io.grpc.StatusException; import io.grpc.StatusRuntimeException; import java.util.BitSet; -import java.util.LinkedHashMap; import java.util.function.Function; public class GrpcServerDecorator extends ServerDecorator { @@ -33,15 +29,11 @@ public class GrpcServerDecorator extends ServerDecorator { public static final CharSequence COMPONENT_NAME = UTF8BytesString.create("armeria-grpc-server"); public static final CharSequence GRPC_MESSAGE = UTF8BytesString.create("grpc.message"); - private static final LinkedHashMap createServerPathwaySortedTags() { - LinkedHashMap result = new LinkedHashMap<>(); - result.put(DIRECTION_TAG, DIRECTION_IN); - result.put(TYPE_TAG, "grpc"); - return result; + private static DataStreamsTags createServerPathwaySortedTags() { + return DataStreamsTags.create("grpc", DataStreamsTags.Direction.Inbound); } - public static final LinkedHashMap SERVER_PATHWAY_EDGE_TAGS = - createServerPathwaySortedTags(); + public static final DataStreamsTags SERVER_PATHWAY_EDGE_TAGS = createServerPathwaySortedTags(); public static final GrpcServerDecorator DECORATE = new GrpcServerDecorator(); private static final Function NORMALIZE = diff --git a/dd-java-agent/instrumentation/armeria/armeria-grpc-0.84/src/test/groovy/ArmeriaGrpcTest.groovy b/dd-java-agent/instrumentation/armeria/armeria-grpc-0.84/src/test/groovy/ArmeriaGrpcTest.groovy index c8237c1d812..d7591a19bfe 100644 --- a/dd-java-agent/instrumentation/armeria/armeria-grpc-0.84/src/test/groovy/ArmeriaGrpcTest.groovy +++ b/dd-java-agent/instrumentation/armeria/armeria-grpc-0.84/src/test/groovy/ArmeriaGrpcTest.groovy @@ -254,14 +254,12 @@ abstract class ArmeriaGrpcTest extends VersionedNamingTestBase { if (isDataStreamsEnabled()) { StatsGroup first = TEST_DATA_STREAMS_WRITER.groups.find { it.parentHash == 0 } verifyAll(first) { - edgeTags.containsAll(["direction:out", "type:grpc"]) - edgeTags.size() == 2 + tags.hasAllTags("direction:out", "type:grpc") } StatsGroup second = TEST_DATA_STREAMS_WRITER.groups.find { it.parentHash == first.hash } verifyAll(second) { - edgeTags.containsAll(["direction:in", "type:grpc"]) - edgeTags.size() == 2 + tags.hasAllTags("direction:in", "type:grpc") } } diff --git a/dd-java-agent/instrumentation/aws-java-eventbridge-2.0/src/main/java/datadog/trace/instrumentation/aws/v2/eventbridge/EventBridgeInterceptor.java b/dd-java-agent/instrumentation/aws-java-eventbridge-2.0/src/main/java/datadog/trace/instrumentation/aws/v2/eventbridge/EventBridgeInterceptor.java index a7ca3e02bb7..30d8d02d356 100644 --- a/dd-java-agent/instrumentation/aws-java-eventbridge-2.0/src/main/java/datadog/trace/instrumentation/aws/v2/eventbridge/EventBridgeInterceptor.java +++ b/dd-java-agent/instrumentation/aws-java-eventbridge-2.0/src/main/java/datadog/trace/instrumentation/aws/v2/eventbridge/EventBridgeInterceptor.java @@ -2,18 +2,14 @@ import static datadog.context.propagation.Propagators.defaultPropagator; import static datadog.trace.bootstrap.instrumentation.api.AgentTracer.traceConfig; -import static datadog.trace.core.datastreams.TagsProcessor.BUS_TAG; -import static datadog.trace.core.datastreams.TagsProcessor.DIRECTION_OUT; -import static datadog.trace.core.datastreams.TagsProcessor.DIRECTION_TAG; -import static datadog.trace.core.datastreams.TagsProcessor.TYPE_TAG; import static datadog.trace.instrumentation.aws.v2.eventbridge.TextMapInjectAdapter.SETTER; import datadog.trace.api.datastreams.DataStreamsContext; +import datadog.trace.api.datastreams.DataStreamsTags; import datadog.trace.api.datastreams.PathwayContext; import datadog.trace.bootstrap.InstanceStore; import datadog.trace.bootstrap.instrumentation.api.AgentSpan; import java.util.ArrayList; -import java.util.LinkedHashMap; import java.util.List; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -89,7 +85,9 @@ private String getTraceContextToInject( // Inject context datadog.context.Context context = span; if (traceConfig().isDataStreamsEnabled()) { - DataStreamsContext dsmContext = DataStreamsContext.fromTags(getTags(eventBusName)); + DataStreamsTags tags = + DataStreamsTags.createWithBus(DataStreamsTags.Direction.Outbound, eventBusName); + DataStreamsContext dsmContext = DataStreamsContext.fromTags(tags); context = context.with(dsmContext); } defaultPropagator().inject(context, jsonBuilder, SETTER); @@ -111,13 +109,4 @@ private String getTraceContextToInject( jsonBuilder.append('}'); return jsonBuilder.toString(); } - - private LinkedHashMap getTags(String eventBusName) { - LinkedHashMap sortedTags = new LinkedHashMap<>(); - sortedTags.put(DIRECTION_TAG, DIRECTION_OUT); - sortedTags.put(BUS_TAG, eventBusName); - sortedTags.put(TYPE_TAG, "bus"); - - return sortedTags; - } } diff --git a/dd-java-agent/instrumentation/aws-java-sdk-1.11.0/src/dsmTest/groovy/AWS1KinesisClientTest.groovy b/dd-java-agent/instrumentation/aws-java-sdk-1.11.0/src/dsmTest/groovy/AWS1KinesisClientTest.groovy index 047f8cc2ec0..5b6c8d782e7 100644 --- a/dd-java-agent/instrumentation/aws-java-sdk-1.11.0/src/dsmTest/groovy/AWS1KinesisClientTest.groovy +++ b/dd-java-agent/instrumentation/aws-java-sdk-1.11.0/src/dsmTest/groovy/AWS1KinesisClientTest.groovy @@ -115,8 +115,7 @@ abstract class AWS1KinesisClientTest extends VersionedNamingTestBase { pathwayLatencyCount += group.pathwayLatency.count edgeLatencyCount += group.edgeLatency.count verifyAll(group) { - edgeTags.containsAll(["direction:" + dsmDirection, "topic:" + streamArn, "type:kinesis"]) - edgeTags.size() == 3 + tags.hasAllTags("direction:" + dsmDirection, "topic:" + streamArn, "type:kinesis") } } verifyAll { diff --git a/dd-java-agent/instrumentation/aws-java-sdk-1.11.0/src/dsmTest/groovy/AWS1SnsClientTest.groovy b/dd-java-agent/instrumentation/aws-java-sdk-1.11.0/src/dsmTest/groovy/AWS1SnsClientTest.groovy index 28bd6dd4741..ca4ced2dbe0 100644 --- a/dd-java-agent/instrumentation/aws-java-sdk-1.11.0/src/dsmTest/groovy/AWS1SnsClientTest.groovy +++ b/dd-java-agent/instrumentation/aws-java-sdk-1.11.0/src/dsmTest/groovy/AWS1SnsClientTest.groovy @@ -96,8 +96,7 @@ abstract class AWS1SnsClientTest extends VersionedNamingTestBase { pathwayLatencyCount += group.pathwayLatency.count edgeLatencyCount += group.edgeLatency.count verifyAll(group) { - edgeTags.containsAll(["direction:" + dsmDirection, "topic:" + topicName, "type:sns"]) - edgeTags.size() == 3 + tags.hasAllTags("direction:" + dsmDirection, "topic:" + topicName, "type:sns") } } verifyAll { diff --git a/dd-java-agent/instrumentation/aws-java-sdk-1.11.0/src/main/java/datadog/trace/instrumentation/aws/v0/AwsSdkClientDecorator.java b/dd-java-agent/instrumentation/aws-java-sdk-1.11.0/src/main/java/datadog/trace/instrumentation/aws/v0/AwsSdkClientDecorator.java index 9d73e248534..b164cf54349 100644 --- a/dd-java-agent/instrumentation/aws-java-sdk-1.11.0/src/main/java/datadog/trace/instrumentation/aws/v0/AwsSdkClientDecorator.java +++ b/dd-java-agent/instrumentation/aws-java-sdk-1.11.0/src/main/java/datadog/trace/instrumentation/aws/v0/AwsSdkClientDecorator.java @@ -3,7 +3,6 @@ import static datadog.trace.api.datastreams.DataStreamsContext.create; import static datadog.trace.bootstrap.instrumentation.api.AgentTracer.traceConfig; import static datadog.trace.bootstrap.instrumentation.api.ResourceNamePriorities.RPC_COMMAND_NAME; -import static datadog.trace.core.datastreams.TagsProcessor.DIRECTION_OUT; import com.amazonaws.AmazonWebServiceRequest; import com.amazonaws.AmazonWebServiceResponse; @@ -15,6 +14,7 @@ import datadog.trace.api.DDTags; import datadog.trace.api.cache.DDCache; import datadog.trace.api.cache.DDCaches; +import datadog.trace.api.datastreams.DataStreamsTags; import datadog.trace.api.naming.SpanNaming; import datadog.trace.bootstrap.instrumentation.api.AgentScope; import datadog.trace.bootstrap.instrumentation.api.AgentSpan; @@ -23,9 +23,7 @@ import datadog.trace.bootstrap.instrumentation.api.Tags; import datadog.trace.bootstrap.instrumentation.api.UTF8BytesString; import datadog.trace.bootstrap.instrumentation.decorator.HttpClientDecorator; -import datadog.trace.core.datastreams.TagsProcessor; import java.net.URI; -import java.util.LinkedHashMap; import java.util.List; import java.util.Locale; import java.util.regex.Matcher; @@ -255,17 +253,12 @@ && traceConfig().isDataStreamsEnabled()) { if (HttpMethodName.GET.name().equals(span.getTag(Tags.HTTP_METHOD)) && ("GetObjectMetadataRequest".equalsIgnoreCase(awsOperation) || "GetObjectRequest".equalsIgnoreCase(awsOperation))) { - LinkedHashMap sortedTags = new LinkedHashMap<>(); - - sortedTags.put(TagsProcessor.DIRECTION_TAG, TagsProcessor.DIRECTION_IN); - sortedTags.put(TagsProcessor.DATASET_NAME_TAG, key); - sortedTags.put(TagsProcessor.DATASET_NAMESPACE_TAG, bucket); - sortedTags.put(TagsProcessor.TOPIC_TAG, bucket); - sortedTags.put(TagsProcessor.TYPE_TAG, "s3"); - + DataStreamsTags tags = + DataStreamsTags.createWithDataset( + "s3", DataStreamsTags.Direction.Inbound, bucket, key, bucket); AgentTracer.get() .getDataStreamsMonitoring() - .setCheckpoint(span, create(sortedTags, 0, responseSize)); + .setCheckpoint(span, create(tags, 0, responseSize)); } if ("PutObjectRequest".equalsIgnoreCase(awsOperation) @@ -275,18 +268,12 @@ && traceConfig().isDataStreamsEnabled()) { if (requestSize != null) { payloadSize = (long) requestSize; } - - LinkedHashMap sortedTags = new LinkedHashMap<>(); - - sortedTags.put(TagsProcessor.DIRECTION_TAG, DIRECTION_OUT); - sortedTags.put(TagsProcessor.DATASET_NAME_TAG, key); - sortedTags.put(TagsProcessor.DATASET_NAMESPACE_TAG, bucket); - sortedTags.put(TagsProcessor.TOPIC_TAG, bucket); - sortedTags.put(TagsProcessor.TYPE_TAG, "s3"); - + DataStreamsTags tags = + DataStreamsTags.createWithDataset( + "s3", DataStreamsTags.Direction.Outbound, bucket, key, bucket); AgentTracer.get() .getDataStreamsMonitoring() - .setCheckpoint(span, create(sortedTags, 0, payloadSize)); + .setCheckpoint(span, create(tags, 0, payloadSize)); } } } diff --git a/dd-java-agent/instrumentation/aws-java-sdk-1.11.0/src/main/java/datadog/trace/instrumentation/aws/v0/TracingRequestHandler.java b/dd-java-agent/instrumentation/aws-java-sdk-1.11.0/src/main/java/datadog/trace/instrumentation/aws/v0/TracingRequestHandler.java index 1e1db786bba..07b872a1551 100644 --- a/dd-java-agent/instrumentation/aws-java-sdk-1.11.0/src/main/java/datadog/trace/instrumentation/aws/v0/TracingRequestHandler.java +++ b/dd-java-agent/instrumentation/aws-java-sdk-1.11.0/src/main/java/datadog/trace/instrumentation/aws/v0/TracingRequestHandler.java @@ -6,10 +6,6 @@ import static datadog.trace.bootstrap.instrumentation.api.AgentTracer.blackholeSpan; import static datadog.trace.bootstrap.instrumentation.api.AgentTracer.startSpan; import static datadog.trace.bootstrap.instrumentation.api.AgentTracer.traceConfig; -import static datadog.trace.core.datastreams.TagsProcessor.DIRECTION_IN; -import static datadog.trace.core.datastreams.TagsProcessor.DIRECTION_TAG; -import static datadog.trace.core.datastreams.TagsProcessor.TOPIC_TAG; -import static datadog.trace.core.datastreams.TagsProcessor.TYPE_TAG; import static datadog.trace.instrumentation.aws.v0.AwsSdkClientDecorator.AWS_LEGACY_TRACING; import static datadog.trace.instrumentation.aws.v0.AwsSdkClientDecorator.DECORATE; @@ -20,14 +16,11 @@ import com.amazonaws.handlers.RequestHandler2; import datadog.context.propagation.Propagators; import datadog.trace.api.Config; -import datadog.trace.api.datastreams.AgentDataStreamsMonitoring; -import datadog.trace.api.datastreams.DataStreamsContext; -import datadog.trace.api.datastreams.PathwayContext; +import datadog.trace.api.datastreams.*; import datadog.trace.bootstrap.ContextStore; import datadog.trace.bootstrap.instrumentation.api.AgentSpan; import datadog.trace.bootstrap.instrumentation.api.AgentTracer; import java.util.Date; -import java.util.LinkedHashMap; import java.util.List; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -116,16 +109,14 @@ && traceConfig().isDataStreamsEnabled() List records = GetterAccess.of(response.getAwsResponse()).getRecords(response.getAwsResponse()); if (null != records) { - LinkedHashMap sortedTags = new LinkedHashMap<>(); - sortedTags.put(DIRECTION_TAG, DIRECTION_IN); - sortedTags.put(TOPIC_TAG, streamArn); - sortedTags.put(TYPE_TAG, "kinesis"); + DataStreamsTags tags = + DataStreamsTags.create("kinesis", DataStreamsTags.Direction.Inbound, streamArn); for (Object record : records) { Date arrivalTime = GetterAccess.of(record).getApproximateArrivalTimestamp(record); AgentDataStreamsMonitoring dataStreamsMonitoring = AgentTracer.get().getDataStreamsMonitoring(); PathwayContext pathwayContext = dataStreamsMonitoring.newPathwayContext(); - DataStreamsContext context = create(sortedTags, arrivalTime.getTime(), 0); + DataStreamsContext context = create(tags, arrivalTime.getTime(), 0); pathwayContext.setCheckpoint(context, dataStreamsMonitoring::add); if (!span.context().getPathwayContext().isStarted()) { span.context().mergePathwayContext(pathwayContext); diff --git a/dd-java-agent/instrumentation/aws-java-sdk-2.2/src/dsmTest/groovy/Aws2KinesisDataStreamsTest.groovy b/dd-java-agent/instrumentation/aws-java-sdk-2.2/src/dsmTest/groovy/Aws2KinesisDataStreamsTest.groovy index 1e649cbb25e..39aba2e2225 100644 --- a/dd-java-agent/instrumentation/aws-java-sdk-2.2/src/dsmTest/groovy/Aws2KinesisDataStreamsTest.groovy +++ b/dd-java-agent/instrumentation/aws-java-sdk-2.2/src/dsmTest/groovy/Aws2KinesisDataStreamsTest.groovy @@ -156,8 +156,7 @@ abstract class Aws2KinesisDataStreamsTest extends VersionedNamingTestBase { pathwayLatencyCount += group.pathwayLatency.count edgeLatencyCount += group.edgeLatency.count verifyAll(group) { - edgeTags.containsAll(["direction:" + dsmDirection, "topic:arnprefix:stream/somestream", "type:kinesis"]) - edgeTags.size() == 3 + tags.hasAllTags("direction:" + dsmDirection, "topic:arnprefix:stream/somestream", "type:kinesis") } } verifyAll { @@ -278,8 +277,7 @@ abstract class Aws2KinesisDataStreamsTest extends VersionedNamingTestBase { pathwayLatencyCount += group.pathwayLatency.count edgeLatencyCount += group.edgeLatency.count verifyAll(group) { - edgeTags.containsAll(["direction:" + dsmDirection, "topic:arnprefix:stream/somestream", "type:kinesis"]) - edgeTags.size() == 3 + tags.hasAllTags("direction:" + dsmDirection, "topic:arnprefix:stream/somestream", "type:kinesis") } } verifyAll { diff --git a/dd-java-agent/instrumentation/aws-java-sdk-2.2/src/dsmTest/groovy/Aws2SnsDataStreamsTest.groovy b/dd-java-agent/instrumentation/aws-java-sdk-2.2/src/dsmTest/groovy/Aws2SnsDataStreamsTest.groovy index 9147636660d..a312e4196fe 100644 --- a/dd-java-agent/instrumentation/aws-java-sdk-2.2/src/dsmTest/groovy/Aws2SnsDataStreamsTest.groovy +++ b/dd-java-agent/instrumentation/aws-java-sdk-2.2/src/dsmTest/groovy/Aws2SnsDataStreamsTest.groovy @@ -144,8 +144,7 @@ abstract class Aws2SnsDataStreamsTest extends VersionedNamingTestBase { pathwayLatencyCount += group.pathwayLatency.count edgeLatencyCount += group.edgeLatency.count verifyAll(group) { - edgeTags.containsAll(["direction:" + dsmDirection, "topic:mytopic", "type:sns"]) - edgeTags.size() == 3 + tags.hasAllTags("direction:" + dsmDirection, "topic:mytopic", "type:sns") } } verifyAll { @@ -243,8 +242,7 @@ abstract class Aws2SnsDataStreamsTest extends VersionedNamingTestBase { pathwayLatencyCount += group.pathwayLatency.count edgeLatencyCount += group.edgeLatency.count verifyAll(group) { - edgeTags.containsAll(["direction:" + dsmDirection, "topic:mytopic", "type:sns"]) - edgeTags.size() == 3 + tags.hasAllTags("direction:" + dsmDirection, "topic:mytopic", "type:sns") } } verifyAll { diff --git a/dd-java-agent/instrumentation/aws-java-sdk-2.2/src/main/java/datadog/trace/instrumentation/aws/v2/AwsSdkClientDecorator.java b/dd-java-agent/instrumentation/aws-java-sdk-2.2/src/main/java/datadog/trace/instrumentation/aws/v2/AwsSdkClientDecorator.java index 5ac706df645..c5aa03bf64f 100644 --- a/dd-java-agent/instrumentation/aws-java-sdk-2.2/src/main/java/datadog/trace/instrumentation/aws/v2/AwsSdkClientDecorator.java +++ b/dd-java-agent/instrumentation/aws-java-sdk-2.2/src/main/java/datadog/trace/instrumentation/aws/v2/AwsSdkClientDecorator.java @@ -2,11 +2,6 @@ import static datadog.trace.api.datastreams.DataStreamsContext.create; import static datadog.trace.bootstrap.instrumentation.api.AgentTracer.traceConfig; -import static datadog.trace.core.datastreams.TagsProcessor.DIRECTION_IN; -import static datadog.trace.core.datastreams.TagsProcessor.DIRECTION_OUT; -import static datadog.trace.core.datastreams.TagsProcessor.DIRECTION_TAG; -import static datadog.trace.core.datastreams.TagsProcessor.TOPIC_TAG; -import static datadog.trace.core.datastreams.TagsProcessor.TYPE_TAG; import datadog.context.propagation.CarrierSetter; import datadog.trace.api.Config; @@ -15,6 +10,7 @@ import datadog.trace.api.cache.DDCache; import datadog.trace.api.cache.DDCaches; import datadog.trace.api.datastreams.AgentDataStreamsMonitoring; +import datadog.trace.api.datastreams.DataStreamsTags; import datadog.trace.api.datastreams.PathwayContext; import datadog.trace.api.naming.SpanNaming; import datadog.trace.bootstrap.InstanceStore; @@ -25,7 +21,6 @@ import datadog.trace.bootstrap.instrumentation.api.Tags; import datadog.trace.bootstrap.instrumentation.api.UTF8BytesString; import datadog.trace.bootstrap.instrumentation.decorator.HttpClientDecorator; -import datadog.trace.core.datastreams.TagsProcessor; import datadog.trace.payloadtags.PayloadTagsData; import java.net.URI; import java.time.Instant; @@ -33,7 +28,6 @@ import java.util.Collection; import java.util.Collections; import java.util.HashSet; -import java.util.LinkedHashMap; import java.util.List; import java.util.Map; import java.util.Optional; @@ -338,10 +332,9 @@ public AgentSpan onSdkResponse( //noinspection unchecked List records = (List) recordsRaw; if (!records.isEmpty()) { - LinkedHashMap sortedTags = new LinkedHashMap<>(); - sortedTags.put(DIRECTION_TAG, DIRECTION_IN); - sortedTags.put(TOPIC_TAG, streamArn); - sortedTags.put(TYPE_TAG, "kinesis"); + DataStreamsTags tags = + DataStreamsTags.create( + "kinesis", DataStreamsTags.Direction.Inbound, streamArn); if (null == kinesisApproximateArrivalTimestampField) { Optional> maybeField = records.get(0).sdkFields().stream() @@ -363,7 +356,7 @@ public AgentSpan onSdkResponse( AgentTracer.get().getDataStreamsMonitoring(); PathwayContext pathwayContext = dataStreamsMonitoring.newPathwayContext(); pathwayContext.setCheckpoint( - create(sortedTags, arrivalTime.toEpochMilli(), 0), + create(tags, arrivalTime.toEpochMilli(), 0), dataStreamsMonitoring::add); if (!span.context().getPathwayContext().isStarted()) { span.context().mergePathwayContext(pathwayContext); @@ -384,17 +377,12 @@ public AgentSpan onSdkResponse( if (key != null && bucket != null && awsOperation != null) { if ("GetObject".equalsIgnoreCase(awsOperation)) { - LinkedHashMap sortedTags = new LinkedHashMap<>(); - - sortedTags.put(TagsProcessor.DIRECTION_TAG, TagsProcessor.DIRECTION_IN); - sortedTags.put(TagsProcessor.DATASET_NAME_TAG, key); - sortedTags.put(TagsProcessor.DATASET_NAMESPACE_TAG, bucket); - sortedTags.put(TagsProcessor.TOPIC_TAG, bucket); - sortedTags.put(TagsProcessor.TYPE_TAG, "s3"); - + DataStreamsTags tags = + DataStreamsTags.createWithDataset( + "s3", DataStreamsTags.Direction.Inbound, bucket, key, bucket); AgentTracer.get() .getDataStreamsMonitoring() - .setCheckpoint(span, create(sortedTags, 0, responseSize)); + .setCheckpoint(span, create(tags, 0, responseSize)); } if ("PutObject".equalsIgnoreCase(awsOperation)) { @@ -404,17 +392,12 @@ public AgentSpan onSdkResponse( payloadSize = (long) requestSize; } - LinkedHashMap sortedTags = new LinkedHashMap<>(); - - sortedTags.put(TagsProcessor.DIRECTION_TAG, DIRECTION_OUT); - sortedTags.put(TagsProcessor.DATASET_NAME_TAG, key); - sortedTags.put(TagsProcessor.DATASET_NAMESPACE_TAG, bucket); - sortedTags.put(TagsProcessor.TOPIC_TAG, bucket); - sortedTags.put(TagsProcessor.TYPE_TAG, "s3"); - + DataStreamsTags tags = + DataStreamsTags.createWithDataset( + "s3", DataStreamsTags.Direction.Outbound, bucket, key, bucket); AgentTracer.get() .getDataStreamsMonitoring() - .setCheckpoint(span, create(sortedTags, 0, payloadSize)); + .setCheckpoint(span, create(tags, 0, payloadSize)); } } } diff --git a/dd-java-agent/instrumentation/aws-java-sns-1.0/src/main/java/datadog/trace/instrumentation/aws/v1/sns/SnsInterceptor.java b/dd-java-agent/instrumentation/aws-java-sns-1.0/src/main/java/datadog/trace/instrumentation/aws/v1/sns/SnsInterceptor.java index 3d1333a777f..0b991bdef8e 100644 --- a/dd-java-agent/instrumentation/aws-java-sns-1.0/src/main/java/datadog/trace/instrumentation/aws/v1/sns/SnsInterceptor.java +++ b/dd-java-agent/instrumentation/aws-java-sns-1.0/src/main/java/datadog/trace/instrumentation/aws/v1/sns/SnsInterceptor.java @@ -2,10 +2,6 @@ import static datadog.context.propagation.Propagators.defaultPropagator; import static datadog.trace.bootstrap.instrumentation.api.AgentTracer.traceConfig; -import static datadog.trace.core.datastreams.TagsProcessor.DIRECTION_OUT; -import static datadog.trace.core.datastreams.TagsProcessor.DIRECTION_TAG; -import static datadog.trace.core.datastreams.TagsProcessor.TOPIC_TAG; -import static datadog.trace.core.datastreams.TagsProcessor.TYPE_TAG; import static datadog.trace.instrumentation.aws.v1.sns.TextMapInjectAdapter.SETTER; import com.amazonaws.AmazonWebServiceRequest; @@ -16,13 +12,13 @@ import com.amazonaws.services.sns.model.PublishRequest; import datadog.context.Context; import datadog.trace.api.datastreams.DataStreamsContext; +import datadog.trace.api.datastreams.DataStreamsTags; import datadog.trace.bootstrap.ContextStore; import datadog.trace.bootstrap.instrumentation.api.AgentSpan; import datadog.trace.bootstrap.instrumentation.api.AgentTracer; import java.nio.ByteBuffer; import java.nio.charset.StandardCharsets; import java.util.HashMap; -import java.util.LinkedHashMap; import java.util.Map; public class SnsInterceptor extends RequestHandler2 { @@ -114,12 +110,7 @@ private AgentSpan newSpan(AmazonWebServiceRequest request) { return span; } - private LinkedHashMap getTags(String snsTopicName) { - LinkedHashMap sortedTags = new LinkedHashMap<>(); - sortedTags.put(DIRECTION_TAG, DIRECTION_OUT); - sortedTags.put(TOPIC_TAG, snsTopicName); - sortedTags.put(TYPE_TAG, "sns"); - - return sortedTags; + private DataStreamsTags getTags(String snsTopicName) { + return DataStreamsTags.create("sns", DataStreamsTags.Direction.Outbound, snsTopicName); } } diff --git a/dd-java-agent/instrumentation/aws-java-sns-1.0/src/test/groovy/SnsClientTest.groovy b/dd-java-agent/instrumentation/aws-java-sns-1.0/src/test/groovy/SnsClientTest.groovy index 1653ecfa586..efa7dfe4c8c 100644 --- a/dd-java-agent/instrumentation/aws-java-sns-1.0/src/test/groovy/SnsClientTest.groovy +++ b/dd-java-agent/instrumentation/aws-java-sns-1.0/src/test/groovy/SnsClientTest.groovy @@ -194,10 +194,7 @@ abstract class SnsClientTest extends VersionedNamingTestBase { StatsGroup first = TEST_DATA_STREAMS_WRITER.groups.find { it.parentHash == 0 } verifyAll(first) { - edgeTags.contains("direction:out") - edgeTags.contains("topic:testtopic") - edgeTags.contains("type:sns") - edgeTags.size() == 3 + tags.hasAllTags("direction:out", "topic:testtopic", "type:sns") } } diff --git a/dd-java-agent/instrumentation/aws-java-sns-2.0/src/main/java/datadog/trace/instrumentation/aws/v2/sns/SnsInterceptor.java b/dd-java-agent/instrumentation/aws-java-sns-2.0/src/main/java/datadog/trace/instrumentation/aws/v2/sns/SnsInterceptor.java index 20143055b2a..3a10aa085e4 100644 --- a/dd-java-agent/instrumentation/aws-java-sns-2.0/src/main/java/datadog/trace/instrumentation/aws/v2/sns/SnsInterceptor.java +++ b/dd-java-agent/instrumentation/aws-java-sns-2.0/src/main/java/datadog/trace/instrumentation/aws/v2/sns/SnsInterceptor.java @@ -2,19 +2,15 @@ import static datadog.context.propagation.Propagators.defaultPropagator; import static datadog.trace.bootstrap.instrumentation.api.AgentTracer.traceConfig; -import static datadog.trace.core.datastreams.TagsProcessor.DIRECTION_OUT; -import static datadog.trace.core.datastreams.TagsProcessor.DIRECTION_TAG; -import static datadog.trace.core.datastreams.TagsProcessor.TOPIC_TAG; -import static datadog.trace.core.datastreams.TagsProcessor.TYPE_TAG; import static datadog.trace.instrumentation.aws.v2.sns.TextMapInjectAdapter.SETTER; import datadog.trace.api.datastreams.DataStreamsContext; +import datadog.trace.api.datastreams.DataStreamsTags; import datadog.trace.bootstrap.InstanceStore; import datadog.trace.bootstrap.instrumentation.api.AgentSpan; import java.nio.charset.StandardCharsets; import java.util.ArrayList; import java.util.HashMap; -import java.util.LinkedHashMap; import java.util.Map; import software.amazon.awssdk.core.SdkBytes; import software.amazon.awssdk.core.SdkRequest; @@ -106,12 +102,7 @@ public SdkRequest modifyRequest( return context.request(); } - private LinkedHashMap getTags(String snsTopicName) { - LinkedHashMap sortedTags = new LinkedHashMap<>(); - sortedTags.put(DIRECTION_TAG, DIRECTION_OUT); - sortedTags.put(TOPIC_TAG, snsTopicName); - sortedTags.put(TYPE_TAG, "sns"); - - return sortedTags; + private DataStreamsTags getTags(String snsTopicName) { + return DataStreamsTags.create("sns", DataStreamsTags.Direction.Outbound, snsTopicName); } } diff --git a/dd-java-agent/instrumentation/aws-java-sns-2.0/src/test/groovy/SnsClientTest.groovy b/dd-java-agent/instrumentation/aws-java-sns-2.0/src/test/groovy/SnsClientTest.groovy index 3e40aa138dc..4e4f83ccc84 100644 --- a/dd-java-agent/instrumentation/aws-java-sns-2.0/src/test/groovy/SnsClientTest.groovy +++ b/dd-java-agent/instrumentation/aws-java-sns-2.0/src/test/groovy/SnsClientTest.groovy @@ -164,10 +164,7 @@ abstract class SnsClientTest extends VersionedNamingTestBase { StatsGroup first = TEST_DATA_STREAMS_WRITER.groups.find { it.parentHash == 0 } verifyAll(first) { - edgeTags.contains("direction:out") - edgeTags.contains("topic:testtopic") - edgeTags.contains("type:sns") - edgeTags.size() == 3 + tags.hasAllTags("direction:out", "topic:testtopic", "type:sns") } } diff --git a/dd-java-agent/instrumentation/aws-java-sqs-1.0/src/main/java/datadog/trace/instrumentation/aws/v1/sqs/SqsInterceptor.java b/dd-java-agent/instrumentation/aws-java-sqs-1.0/src/main/java/datadog/trace/instrumentation/aws/v1/sqs/SqsInterceptor.java index 4b353f12591..ef6c1e516a1 100644 --- a/dd-java-agent/instrumentation/aws-java-sqs-1.0/src/main/java/datadog/trace/instrumentation/aws/v1/sqs/SqsInterceptor.java +++ b/dd-java-agent/instrumentation/aws-java-sqs-1.0/src/main/java/datadog/trace/instrumentation/aws/v1/sqs/SqsInterceptor.java @@ -4,10 +4,6 @@ import static datadog.trace.bootstrap.instrumentation.api.AgentPropagation.DSM_CONCERN; import static datadog.trace.bootstrap.instrumentation.api.AgentTracer.startSpan; import static datadog.trace.bootstrap.instrumentation.api.URIUtils.urlFileName; -import static datadog.trace.core.datastreams.TagsProcessor.DIRECTION_OUT; -import static datadog.trace.core.datastreams.TagsProcessor.DIRECTION_TAG; -import static datadog.trace.core.datastreams.TagsProcessor.TOPIC_TAG; -import static datadog.trace.core.datastreams.TagsProcessor.TYPE_TAG; import static datadog.trace.instrumentation.aws.v1.sqs.MessageAttributeInjector.SETTER; import com.amazonaws.AmazonWebServiceRequest; @@ -21,11 +17,11 @@ import datadog.context.propagation.Propagator; import datadog.context.propagation.Propagators; import datadog.trace.api.datastreams.DataStreamsContext; +import datadog.trace.api.datastreams.DataStreamsTags; import datadog.trace.bootstrap.ContextStore; import datadog.trace.bootstrap.instrumentation.api.AgentSpan; import java.util.ArrayList; import java.util.HashMap; -import java.util.LinkedHashMap; import java.util.List; import java.util.Map; @@ -95,11 +91,7 @@ private AgentSpan newSpan(AmazonWebServiceRequest request) { return span; } - private static LinkedHashMap getTags(String queueUrl) { - LinkedHashMap sortedTags = new LinkedHashMap<>(); - sortedTags.put(DIRECTION_TAG, DIRECTION_OUT); - sortedTags.put(TOPIC_TAG, urlFileName(queueUrl)); - sortedTags.put(TYPE_TAG, "sqs"); - return sortedTags; + private static DataStreamsTags getTags(String queueUrl) { + return DataStreamsTags.create("sqs", DataStreamsTags.Direction.Outbound, urlFileName(queueUrl)); } } diff --git a/dd-java-agent/instrumentation/aws-java-sqs-1.0/src/main/java/datadog/trace/instrumentation/aws/v1/sqs/TracingIterator.java b/dd-java-agent/instrumentation/aws-java-sqs-1.0/src/main/java/datadog/trace/instrumentation/aws/v1/sqs/TracingIterator.java index 8eb154800c9..25a82dbac65 100644 --- a/dd-java-agent/instrumentation/aws-java-sqs-1.0/src/main/java/datadog/trace/instrumentation/aws/v1/sqs/TracingIterator.java +++ b/dd-java-agent/instrumentation/aws-java-sqs-1.0/src/main/java/datadog/trace/instrumentation/aws/v1/sqs/TracingIterator.java @@ -6,10 +6,6 @@ import static datadog.trace.bootstrap.instrumentation.api.AgentTracer.closePrevious; import static datadog.trace.bootstrap.instrumentation.api.AgentTracer.startSpan; import static datadog.trace.bootstrap.instrumentation.api.URIUtils.urlFileName; -import static datadog.trace.core.datastreams.TagsProcessor.DIRECTION_IN; -import static datadog.trace.core.datastreams.TagsProcessor.DIRECTION_TAG; -import static datadog.trace.core.datastreams.TagsProcessor.TOPIC_TAG; -import static datadog.trace.core.datastreams.TagsProcessor.TYPE_TAG; import static datadog.trace.instrumentation.aws.v1.sqs.MessageExtractAdapter.GETTER; import static datadog.trace.instrumentation.aws.v1.sqs.SqsDecorator.BROKER_DECORATE; import static datadog.trace.instrumentation.aws.v1.sqs.SqsDecorator.CONSUMER_DECORATE; @@ -20,11 +16,11 @@ import com.amazonaws.services.sqs.model.Message; import datadog.trace.api.Config; +import datadog.trace.api.datastreams.DataStreamsTags; import datadog.trace.bootstrap.instrumentation.api.AgentSpan; import datadog.trace.bootstrap.instrumentation.api.AgentSpanContext; import datadog.trace.bootstrap.instrumentation.api.AgentTracer; import java.util.Iterator; -import java.util.LinkedHashMap; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -89,11 +85,9 @@ protected void startNewMessageSpan(Message message) { } AgentSpan span = startSpan(SQS_INBOUND_OPERATION, batchContext); - LinkedHashMap sortedTags = new LinkedHashMap<>(); - sortedTags.put(DIRECTION_TAG, DIRECTION_IN); - sortedTags.put(TOPIC_TAG, urlFileName(queueUrl)); - sortedTags.put(TYPE_TAG, "sqs"); - AgentTracer.get().getDataStreamsMonitoring().setCheckpoint(span, create(sortedTags, 0, 0)); + DataStreamsTags tags = + DataStreamsTags.create("sqs", DataStreamsTags.Direction.Inbound, urlFileName(queueUrl)); + AgentTracer.get().getDataStreamsMonitoring().setCheckpoint(span, create(tags, 0, 0)); CONSUMER_DECORATE.afterStart(span); CONSUMER_DECORATE.onConsume(span, queueUrl); diff --git a/dd-java-agent/instrumentation/aws-java-sqs-1.0/src/test/groovy/SqsClientTest.groovy b/dd-java-agent/instrumentation/aws-java-sqs-1.0/src/test/groovy/SqsClientTest.groovy index 9ae956e5068..bcf94da0e1c 100644 --- a/dd-java-agent/instrumentation/aws-java-sqs-1.0/src/test/groovy/SqsClientTest.groovy +++ b/dd-java-agent/instrumentation/aws-java-sqs-1.0/src/test/groovy/SqsClientTest.groovy @@ -17,6 +17,7 @@ import datadog.trace.api.DDSpanId import datadog.trace.api.DDSpanTypes import datadog.trace.api.DDTags import datadog.trace.api.config.GeneralConfig +import datadog.trace.api.datastreams.DataStreamsTags import datadog.trace.api.naming.SpanNaming import datadog.trace.bootstrap.instrumentation.api.InstrumentationTags import datadog.trace.bootstrap.instrumentation.api.Tags @@ -172,14 +173,12 @@ abstract class SqsClientTest extends VersionedNamingTestBase { StatsGroup first = TEST_DATA_STREAMS_WRITER.groups.find { it.parentHash == 0 } verifyAll(first) { - edgeTags == ["direction:out", "topic:somequeue", "type:sqs"] - edgeTags.size() == 3 + tags.hasAllTags("direction:out", "topic:somequeue", "type:sqs") } StatsGroup second = TEST_DATA_STREAMS_WRITER.groups.find { it.parentHash == first.hash } verifyAll(second) { - edgeTags == ["direction:in", "topic:somequeue", "type:sqs"] - edgeTags.size() == 3 + tags.hasAllTags("direction:in", "topic:somequeue", "type:sqs") } } @@ -629,8 +628,7 @@ class SqsClientV1DataStreamsForkedTest extends SqsClientTest { StatsGroup first = TEST_DATA_STREAMS_WRITER.groups.find { it.parentHash == -2734507826469073289 } verifyAll(first) { - edgeTags == ["direction:in", "topic:somequeue", "type:sqs"] - edgeTags.size() == 3 + tags.hasAllTags("direction:in", "topic:somequeue", "type:sqs") } cleanup: @@ -659,8 +657,7 @@ class SqsClientV1DataStreamsForkedTest extends SqsClientTest { StatsGroup first = TEST_DATA_STREAMS_WRITER.groups.find { it.parentHash == 0 } verifyAll(first) { - edgeTags == ["direction:in", "topic:somequeue", "type:sqs"] - edgeTags.size() == 3 + tags.hasAllTags("direction:in", "topic:somequeue", "type:sqs") } cleanup: @@ -690,8 +687,10 @@ class SqsClientV1DataStreamsForkedTest extends SqsClientTest { StatsGroup first = TEST_DATA_STREAMS_WRITER.groups.find { it.parentHash == 0 } verifyAll(first) { - edgeTags == ["direction:in", "topic:somequeue", "type:sqs"] - edgeTags.size() == 3 + tags.direction == DataStreamsTags.DIRECTION_TAG + ":in" + tags.topic == DataStreamsTags.TOPIC_TAG + ":somequeue" + tags.type == DataStreamsTags.TYPE_TAG + ":sqs" + tags.nonNullSize() == 3 } cleanup: diff --git a/dd-java-agent/instrumentation/aws-java-sqs-2.0/src/main/java/datadog/trace/instrumentation/aws/v2/sqs/SqsInterceptor.java b/dd-java-agent/instrumentation/aws-java-sqs-2.0/src/main/java/datadog/trace/instrumentation/aws/v2/sqs/SqsInterceptor.java index bc5729e49c1..399cd889a31 100644 --- a/dd-java-agent/instrumentation/aws-java-sqs-2.0/src/main/java/datadog/trace/instrumentation/aws/v2/sqs/SqsInterceptor.java +++ b/dd-java-agent/instrumentation/aws-java-sqs-2.0/src/main/java/datadog/trace/instrumentation/aws/v2/sqs/SqsInterceptor.java @@ -3,20 +3,16 @@ import static datadog.trace.api.datastreams.PathwayContext.DATADOG_KEY; import static datadog.trace.bootstrap.instrumentation.api.AgentPropagation.DSM_CONCERN; import static datadog.trace.bootstrap.instrumentation.api.URIUtils.urlFileName; -import static datadog.trace.core.datastreams.TagsProcessor.DIRECTION_OUT; -import static datadog.trace.core.datastreams.TagsProcessor.DIRECTION_TAG; -import static datadog.trace.core.datastreams.TagsProcessor.TOPIC_TAG; -import static datadog.trace.core.datastreams.TagsProcessor.TYPE_TAG; import static datadog.trace.instrumentation.aws.v2.sqs.MessageAttributeInjector.SETTER; import datadog.context.propagation.Propagator; import datadog.context.propagation.Propagators; import datadog.trace.api.datastreams.DataStreamsContext; +import datadog.trace.api.datastreams.DataStreamsTags; import datadog.trace.bootstrap.InstanceStore; import datadog.trace.bootstrap.instrumentation.api.AgentSpan; import java.util.ArrayList; import java.util.HashMap; -import java.util.LinkedHashMap; import java.util.List; import java.util.Map; import java.util.Optional; @@ -95,15 +91,10 @@ public SdkRequest modifyRequest( private datadog.context.Context getContext( ExecutionAttributes executionAttributes, String queueUrl) { AgentSpan span = executionAttributes.getAttribute(SPAN_ATTRIBUTE); - DataStreamsContext dsmContext = DataStreamsContext.fromTags(getTags(queueUrl)); - return span.with(dsmContext); - } - private LinkedHashMap getTags(String queueUrl) { - LinkedHashMap sortedTags = new LinkedHashMap<>(); - sortedTags.put(DIRECTION_TAG, DIRECTION_OUT); - sortedTags.put(TOPIC_TAG, urlFileName(queueUrl)); - sortedTags.put(TYPE_TAG, "sqs"); - return sortedTags; + DataStreamsTags tags = + DataStreamsTags.create("sqs", DataStreamsTags.Direction.Outbound, urlFileName(queueUrl)); + DataStreamsContext dsmContext = DataStreamsContext.fromTags(tags); + return span.with(dsmContext); } } diff --git a/dd-java-agent/instrumentation/aws-java-sqs-2.0/src/main/java/datadog/trace/instrumentation/aws/v2/sqs/TracingIterator.java b/dd-java-agent/instrumentation/aws-java-sqs-2.0/src/main/java/datadog/trace/instrumentation/aws/v2/sqs/TracingIterator.java index 2213ac64d6e..3a00836e44d 100644 --- a/dd-java-agent/instrumentation/aws-java-sqs-2.0/src/main/java/datadog/trace/instrumentation/aws/v2/sqs/TracingIterator.java +++ b/dd-java-agent/instrumentation/aws-java-sqs-2.0/src/main/java/datadog/trace/instrumentation/aws/v2/sqs/TracingIterator.java @@ -6,10 +6,6 @@ import static datadog.trace.bootstrap.instrumentation.api.AgentTracer.closePrevious; import static datadog.trace.bootstrap.instrumentation.api.AgentTracer.startSpan; import static datadog.trace.bootstrap.instrumentation.api.URIUtils.urlFileName; -import static datadog.trace.core.datastreams.TagsProcessor.DIRECTION_IN; -import static datadog.trace.core.datastreams.TagsProcessor.DIRECTION_TAG; -import static datadog.trace.core.datastreams.TagsProcessor.TOPIC_TAG; -import static datadog.trace.core.datastreams.TagsProcessor.TYPE_TAG; import static datadog.trace.instrumentation.aws.v2.sqs.MessageExtractAdapter.GETTER; import static datadog.trace.instrumentation.aws.v2.sqs.SqsDecorator.BROKER_DECORATE; import static datadog.trace.instrumentation.aws.v2.sqs.SqsDecorator.CONSUMER_DECORATE; @@ -19,11 +15,11 @@ import static java.util.concurrent.TimeUnit.MILLISECONDS; import datadog.trace.api.Config; +import datadog.trace.api.datastreams.DataStreamsTags; import datadog.trace.bootstrap.instrumentation.api.AgentSpan; import datadog.trace.bootstrap.instrumentation.api.AgentSpanContext; import datadog.trace.bootstrap.instrumentation.api.AgentTracer; import java.util.Iterator; -import java.util.LinkedHashMap; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import software.amazon.awssdk.services.sqs.model.Message; @@ -91,11 +87,9 @@ protected void startNewMessageSpan(Message message) { } AgentSpan span = startSpan(SQS_INBOUND_OPERATION, batchContext); - LinkedHashMap sortedTags = new LinkedHashMap<>(); - sortedTags.put(DIRECTION_TAG, DIRECTION_IN); - sortedTags.put(TOPIC_TAG, urlFileName(queueUrl)); - sortedTags.put(TYPE_TAG, "sqs"); - AgentTracer.get().getDataStreamsMonitoring().setCheckpoint(span, create(sortedTags, 0, 0)); + DataStreamsTags tags = + DataStreamsTags.create("sqs", DataStreamsTags.Direction.Inbound, urlFileName(queueUrl)); + AgentTracer.get().getDataStreamsMonitoring().setCheckpoint(span, create(tags, 0, 0)); CONSUMER_DECORATE.afterStart(span); CONSUMER_DECORATE.onConsume(span, queueUrl, requestId); diff --git a/dd-java-agent/instrumentation/aws-java-sqs-2.0/src/test/groovy/SqsClientTest.groovy b/dd-java-agent/instrumentation/aws-java-sqs-2.0/src/test/groovy/SqsClientTest.groovy index c0a1085d6ad..fe196f12a6f 100644 --- a/dd-java-agent/instrumentation/aws-java-sqs-2.0/src/test/groovy/SqsClientTest.groovy +++ b/dd-java-agent/instrumentation/aws-java-sqs-2.0/src/test/groovy/SqsClientTest.groovy @@ -173,14 +173,12 @@ abstract class SqsClientTest extends VersionedNamingTestBase { StatsGroup first = TEST_DATA_STREAMS_WRITER.groups.find { it.parentHash == 0 } verifyAll(first) { - edgeTags == ["direction:out", "topic:somequeue", "type:sqs"] - edgeTags.size() == 3 + tags.hasAllTags("direction:out", "topic:somequeue", "type:sqs") } StatsGroup second = TEST_DATA_STREAMS_WRITER.groups.find { it.parentHash == first.hash } verifyAll(second) { - edgeTags == ["direction:in", "topic:somequeue", "type:sqs"] - edgeTags.size() == 3 + tags.hasAllTags("direction:in", "topic:somequeue", "type:sqs") } } diff --git a/dd-java-agent/instrumentation/google-pubsub/src/main/java/datadog/trace/instrumentation/googlepubsub/PubSubDecorator.java b/dd-java-agent/instrumentation/google-pubsub/src/main/java/datadog/trace/instrumentation/googlepubsub/PubSubDecorator.java index 3e61cd28753..a6287c22cef 100644 --- a/dd-java-agent/instrumentation/google-pubsub/src/main/java/datadog/trace/instrumentation/googlepubsub/PubSubDecorator.java +++ b/dd-java-agent/instrumentation/google-pubsub/src/main/java/datadog/trace/instrumentation/googlepubsub/PubSubDecorator.java @@ -2,10 +2,6 @@ import static datadog.trace.bootstrap.instrumentation.api.AgentPropagation.extractContextAndGetSpanContext; import static datadog.trace.bootstrap.instrumentation.api.AgentTracer.startSpan; -import static datadog.trace.core.datastreams.TagsProcessor.DIRECTION_IN; -import static datadog.trace.core.datastreams.TagsProcessor.DIRECTION_TAG; -import static datadog.trace.core.datastreams.TagsProcessor.SUBSCRIPTION_TAG; -import static datadog.trace.core.datastreams.TagsProcessor.TYPE_TAG; import com.google.protobuf.Timestamp; import com.google.pubsub.v1.PubsubMessage; @@ -14,15 +10,10 @@ import datadog.trace.api.cache.DDCache; import datadog.trace.api.cache.DDCaches; import datadog.trace.api.datastreams.DataStreamsContext; +import datadog.trace.api.datastreams.DataStreamsTags; import datadog.trace.api.naming.SpanNaming; -import datadog.trace.bootstrap.instrumentation.api.AgentSpan; -import datadog.trace.bootstrap.instrumentation.api.AgentSpanContext; -import datadog.trace.bootstrap.instrumentation.api.AgentTracer; -import datadog.trace.bootstrap.instrumentation.api.InternalSpanTypes; -import datadog.trace.bootstrap.instrumentation.api.Tags; -import datadog.trace.bootstrap.instrumentation.api.UTF8BytesString; +import datadog.trace.bootstrap.instrumentation.api.*; import datadog.trace.bootstrap.instrumentation.decorator.MessagingClientDecorator; -import java.util.LinkedHashMap; import java.util.function.Function; import java.util.function.Supplier; import java.util.regex.Matcher; @@ -133,10 +124,9 @@ public AgentSpan onConsume(final PubsubMessage message, final String subscriptio extractContextAndGetSpanContext(message, TextMapExtractAdapter.GETTER); final AgentSpan span = startSpan(PUBSUB_CONSUME, spanContext); final CharSequence parsedSubscription = extractSubscription(subscription); - final LinkedHashMap sortedTags = new LinkedHashMap<>(3); - sortedTags.put(DIRECTION_TAG, DIRECTION_IN); - sortedTags.put(SUBSCRIPTION_TAG, parsedSubscription.toString()); - sortedTags.put(TYPE_TAG, "google-pubsub"); + DataStreamsTags tags = + DataStreamsTags.createWithSubscription( + "google-pubsub", DataStreamsTags.Direction.Inbound, parsedSubscription.toString()); final Timestamp publishTime = message.getPublishTime(); // FIXME: use full nanosecond resolution when this method will accept nanos AgentTracer.get() @@ -144,7 +134,7 @@ public AgentSpan onConsume(final PubsubMessage message, final String subscriptio .setCheckpoint( span, DataStreamsContext.create( - sortedTags, + tags, publishTime.getSeconds() * 1_000 + publishTime.getNanos() / (int) 1e6, message.getSerializedSize())); afterStart(span); diff --git a/dd-java-agent/instrumentation/google-pubsub/src/main/java/datadog/trace/instrumentation/googlepubsub/PublisherInstrumentation.java b/dd-java-agent/instrumentation/google-pubsub/src/main/java/datadog/trace/instrumentation/googlepubsub/PublisherInstrumentation.java index 87f8413d80b..7c51970ffc8 100644 --- a/dd-java-agent/instrumentation/google-pubsub/src/main/java/datadog/trace/instrumentation/googlepubsub/PublisherInstrumentation.java +++ b/dd-java-agent/instrumentation/google-pubsub/src/main/java/datadog/trace/instrumentation/googlepubsub/PublisherInstrumentation.java @@ -5,10 +5,6 @@ import static datadog.trace.bootstrap.instrumentation.api.AgentTracer.activateSpan; import static datadog.trace.bootstrap.instrumentation.api.AgentTracer.startSpan; import static datadog.trace.bootstrap.instrumentation.java.concurrent.ExcludeFilter.ExcludeType.RUNNABLE; -import static datadog.trace.core.datastreams.TagsProcessor.DIRECTION_OUT; -import static datadog.trace.core.datastreams.TagsProcessor.DIRECTION_TAG; -import static datadog.trace.core.datastreams.TagsProcessor.TOPIC_TAG; -import static datadog.trace.core.datastreams.TagsProcessor.TYPE_TAG; import static datadog.trace.instrumentation.googlepubsub.PubSubDecorator.PRODUCER_DECORATE; import static datadog.trace.instrumentation.googlepubsub.PubSubDecorator.PUBSUB_PRODUCE; import static datadog.trace.instrumentation.googlepubsub.TextMapInjectAdapter.SETTER; @@ -23,11 +19,11 @@ import datadog.trace.agent.tooling.Instrumenter; import datadog.trace.agent.tooling.InstrumenterModule; import datadog.trace.api.datastreams.DataStreamsContext; +import datadog.trace.api.datastreams.DataStreamsTags; import datadog.trace.bootstrap.instrumentation.api.AgentScope; import datadog.trace.bootstrap.instrumentation.api.AgentSpan; import datadog.trace.bootstrap.instrumentation.java.concurrent.ExcludeFilter; import java.util.Collection; -import java.util.LinkedHashMap; import java.util.Map; import net.bytebuddy.asm.Advice; @@ -75,13 +71,11 @@ public static AgentScope before( PRODUCER_DECORATE.afterStart(span); PRODUCER_DECORATE.onProduce(span, topicName); - LinkedHashMap sortedTags = new LinkedHashMap<>(3); - sortedTags.put(DIRECTION_TAG, DIRECTION_OUT); - sortedTags.put(TOPIC_TAG, topicName.toString()); - sortedTags.put(TYPE_TAG, "google-pubsub"); - + DataStreamsTags tags = + DataStreamsTags.create( + "google-pubsub", DataStreamsTags.Direction.Outbound, topicName.toString()); PubsubMessage.Builder builder = msg.toBuilder(); - DataStreamsContext dsmContext = DataStreamsContext.fromTags(sortedTags); + DataStreamsContext dsmContext = DataStreamsContext.fromTags(tags); defaultPropagator().inject(span.with(dsmContext), builder, SETTER); msg = builder.build(); return activateSpan(span); diff --git a/dd-java-agent/instrumentation/google-pubsub/src/test/groovy/PubSubTest.groovy b/dd-java-agent/instrumentation/google-pubsub/src/test/groovy/PubSubTest.groovy index cf24e0115bc..1564628cc86 100644 --- a/dd-java-agent/instrumentation/google-pubsub/src/test/groovy/PubSubTest.groovy +++ b/dd-java-agent/instrumentation/google-pubsub/src/test/groovy/PubSubTest.groovy @@ -1,3 +1,4 @@ + import static datadog.trace.agent.test.utils.TraceUtils.basicSpan import com.google.api.gax.core.NoCredentialsProvider @@ -38,10 +39,6 @@ import spock.lang.Shared import java.nio.charset.StandardCharsets import java.util.concurrent.CountDownLatch -import java.util.function.Function -import java.util.function.ToDoubleFunction -import java.util.function.ToIntFunction -import java.util.function.ToLongFunction abstract class PubSubTest extends VersionedNamingTestBase { private static final String PROJECT_ID = "dd-trace-java" @@ -236,13 +233,11 @@ abstract class PubSubTest extends VersionedNamingTestBase { StatsGroup sendStat = TEST_DATA_STREAMS_WRITER.groups.find { it.parentHash == 0} verifyAll (sendStat) { - edgeTags.containsAll(["direction:out" , "topic:test-topic", "type:google-pubsub"]) - edgeTags.size() == 3 + tags.hasAllTags("direction:out" , "topic:test-topic", "type:google-pubsub") } StatsGroup receiveStat = TEST_DATA_STREAMS_WRITER.groups.find { it.parentHash == sendStat.hash} verifyAll(receiveStat) { - edgeTags.containsAll(["direction:in" , "subscription:my-subscription", "type:google-pubsub"]) - edgeTags.size() == 3 + tags.hasAllTags("direction:in" , "subscription:my-subscription", "type:google-pubsub") pathwayLatency.count == 1 pathwayLatency.minValue > 0.0 edgeLatency.count == 1 diff --git a/dd-java-agent/instrumentation/grpc-1.5/src/main/java/datadog/trace/instrumentation/grpc/client/GrpcClientDecorator.java b/dd-java-agent/instrumentation/grpc-1.5/src/main/java/datadog/trace/instrumentation/grpc/client/GrpcClientDecorator.java index 3e70978df16..c35da6a0e34 100644 --- a/dd-java-agent/instrumentation/grpc-1.5/src/main/java/datadog/trace/instrumentation/grpc/client/GrpcClientDecorator.java +++ b/dd-java-agent/instrumentation/grpc-1.5/src/main/java/datadog/trace/instrumentation/grpc/client/GrpcClientDecorator.java @@ -3,9 +3,6 @@ import static datadog.context.propagation.Propagators.defaultPropagator; import static datadog.trace.bootstrap.instrumentation.api.AgentTracer.startSpan; import static datadog.trace.bootstrap.instrumentation.api.AgentTracer.traceConfig; -import static datadog.trace.core.datastreams.TagsProcessor.DIRECTION_OUT; -import static datadog.trace.core.datastreams.TagsProcessor.DIRECTION_TAG; -import static datadog.trace.core.datastreams.TagsProcessor.TYPE_TAG; import datadog.context.Context; import datadog.context.propagation.CarrierSetter; @@ -14,6 +11,7 @@ import datadog.trace.api.cache.DDCache; import datadog.trace.api.cache.DDCaches; import datadog.trace.api.datastreams.DataStreamsContext; +import datadog.trace.api.datastreams.DataStreamsTags; import datadog.trace.api.naming.SpanNaming; import datadog.trace.bootstrap.instrumentation.api.AgentSpan; import datadog.trace.bootstrap.instrumentation.api.InternalSpanTypes; @@ -23,7 +21,6 @@ import io.grpc.MethodDescriptor; import io.grpc.Status; import java.util.BitSet; -import java.util.LinkedHashMap; import java.util.Set; import java.util.function.Function; @@ -35,10 +32,8 @@ public class GrpcClientDecorator extends ClientDecorator { public static final CharSequence GRPC_MESSAGE = UTF8BytesString.create("grpc.message"); private static DataStreamsContext createDsmContext() { - LinkedHashMap result = new LinkedHashMap<>(); - result.put(DIRECTION_TAG, DIRECTION_OUT); - result.put(TYPE_TAG, "grpc"); - return DataStreamsContext.fromTags(result); + return DataStreamsContext.fromTags( + DataStreamsTags.create("grpc", DataStreamsTags.Direction.Outbound)); } public static final GrpcClientDecorator DECORATE = new GrpcClientDecorator(); diff --git a/dd-java-agent/instrumentation/grpc-1.5/src/main/java/datadog/trace/instrumentation/grpc/server/GrpcServerDecorator.java b/dd-java-agent/instrumentation/grpc-1.5/src/main/java/datadog/trace/instrumentation/grpc/server/GrpcServerDecorator.java index 905ba71ca40..cafd0d64ad7 100644 --- a/dd-java-agent/instrumentation/grpc-1.5/src/main/java/datadog/trace/instrumentation/grpc/server/GrpcServerDecorator.java +++ b/dd-java-agent/instrumentation/grpc-1.5/src/main/java/datadog/trace/instrumentation/grpc/server/GrpcServerDecorator.java @@ -1,12 +1,9 @@ package datadog.trace.instrumentation.grpc.server; -import static datadog.trace.core.datastreams.TagsProcessor.DIRECTION_IN; -import static datadog.trace.core.datastreams.TagsProcessor.DIRECTION_TAG; -import static datadog.trace.core.datastreams.TagsProcessor.TYPE_TAG; - import datadog.trace.api.Config; import datadog.trace.api.cache.DDCache; import datadog.trace.api.cache.DDCaches; +import datadog.trace.api.datastreams.DataStreamsTags; import datadog.trace.api.naming.SpanNaming; import datadog.trace.bootstrap.instrumentation.api.AgentSpan; import datadog.trace.bootstrap.instrumentation.api.ErrorPriorities; @@ -18,7 +15,6 @@ import io.grpc.StatusException; import io.grpc.StatusRuntimeException; import java.util.BitSet; -import java.util.LinkedHashMap; import java.util.function.Function; public class GrpcServerDecorator extends ServerDecorator { @@ -33,15 +29,11 @@ public class GrpcServerDecorator extends ServerDecorator { public static final CharSequence COMPONENT_NAME = UTF8BytesString.create("grpc-server"); public static final CharSequence GRPC_MESSAGE = UTF8BytesString.create("grpc.message"); - private static final LinkedHashMap createServerPathwaySortedTags() { - LinkedHashMap result = new LinkedHashMap<>(); - result.put(DIRECTION_TAG, DIRECTION_IN); - result.put(TYPE_TAG, "grpc"); - return result; + private static DataStreamsTags createServerPathwaySortedTags() { + return DataStreamsTags.create("grpc", DataStreamsTags.Direction.Inbound); } - public static final LinkedHashMap SERVER_PATHWAY_EDGE_TAGS = - createServerPathwaySortedTags(); + public static final DataStreamsTags SERVER_PATHWAY_EDGE_TAGS = createServerPathwaySortedTags(); public static final GrpcServerDecorator DECORATE = new GrpcServerDecorator(); private static final Function NORMALIZE = diff --git a/dd-java-agent/instrumentation/grpc-1.5/src/test/groovy/GrpcTest.groovy b/dd-java-agent/instrumentation/grpc-1.5/src/test/groovy/GrpcTest.groovy index 0b4d174b4dc..37d87ce30c5 100644 --- a/dd-java-agent/instrumentation/grpc-1.5/src/test/groovy/GrpcTest.groovy +++ b/dd-java-agent/instrumentation/grpc-1.5/src/test/groovy/GrpcTest.groovy @@ -1,3 +1,4 @@ + import static datadog.trace.agent.test.asserts.TagsAssert.codeOriginTags import static datadog.trace.api.config.TraceInstrumentationConfig.GRPC_SERVER_ERROR_STATUSES @@ -243,14 +244,12 @@ abstract class GrpcTest extends VersionedNamingTestBase { if (isDataStreamsEnabled()) { StatsGroup first = TEST_DATA_STREAMS_WRITER.groups.find { it.parentHash == 0 } verifyAll(first) { - edgeTags.containsAll(["direction:out", "type:grpc"]) - edgeTags.size() == 2 + tags.hasAllTags("direction:out", "type:grpc") } StatsGroup second = TEST_DATA_STREAMS_WRITER.groups.find { it.parentHash == first.hash } verifyAll(second) { - edgeTags.containsAll(["direction:in", "type:grpc"]) - edgeTags.size() == 2 + tags.hasAllTags("direction:in", "type:grpc") } } diff --git a/dd-java-agent/instrumentation/kafka-clients-0.11/src/latestDepTest/groovy/KafkaClientTestBase.groovy b/dd-java-agent/instrumentation/kafka-clients-0.11/src/latestDepTest/groovy/KafkaClientTestBase.groovy index 493e1af0967..14e5029357d 100644 --- a/dd-java-agent/instrumentation/kafka-clients-0.11/src/latestDepTest/groovy/KafkaClientTestBase.groovy +++ b/dd-java-agent/instrumentation/kafka-clients-0.11/src/latestDepTest/groovy/KafkaClientTestBase.groovy @@ -1,11 +1,8 @@ -import static datadog.trace.agent.test.utils.TraceUtils.basicSpan -import static datadog.trace.agent.test.utils.TraceUtils.runUnderTrace -import static datadog.trace.bootstrap.instrumentation.api.AgentTracer.isAsyncPropagationEnabled - import datadog.trace.agent.test.asserts.TraceAssert import datadog.trace.agent.test.naming.VersionedNamingTestBase import datadog.trace.api.Config import datadog.trace.api.DDTags +import datadog.trace.api.datastreams.DataStreamsTags import datadog.trace.bootstrap.instrumentation.api.InstrumentationTags import datadog.trace.bootstrap.instrumentation.api.Tags import datadog.trace.common.writer.ListWriter @@ -21,15 +18,19 @@ import org.junit.Rule import org.springframework.kafka.core.DefaultKafkaConsumerFactory import org.springframework.kafka.listener.KafkaMessageListenerContainer import org.springframework.kafka.listener.MessageListener +import org.springframework.kafka.test.EmbeddedKafkaBroker +import org.springframework.kafka.test.rule.EmbeddedKafkaRule import org.springframework.kafka.test.utils.ContainerTestUtils import org.springframework.kafka.test.utils.KafkaTestUtils -import org.springframework.kafka.test.rule.EmbeddedKafkaRule -import org.springframework.kafka.test.EmbeddedKafkaBroker import spock.lang.Shared import java.util.concurrent.LinkedBlockingQueue import java.util.concurrent.TimeUnit +import static datadog.trace.agent.test.utils.TraceUtils.basicSpan +import static datadog.trace.agent.test.utils.TraceUtils.runUnderTrace +import static datadog.trace.bootstrap.instrumentation.api.AgentTracer.isAsyncPropagationEnabled + abstract class KafkaClientTestBase extends VersionedNamingTestBase { static final SHARED_TOPIC = "shared.topic" @@ -233,37 +234,31 @@ abstract class KafkaClientTestBase extends VersionedNamingTestBase { if (isDataStreamsEnabled()) { StatsGroup first = TEST_DATA_STREAMS_WRITER.groups.find { it.parentHash == 0 } verifyAll(first) { - edgeTags == ["direction:out", "kafka_cluster_id:$clusterId", "topic:$SHARED_TOPIC".toString(), "type:kafka"] - edgeTags.size() == 4 + tags.hasAllTags("direction:out", "kafka_cluster_id:$clusterId", "topic:$SHARED_TOPIC".toString(), "type:kafka") } StatsGroup second = TEST_DATA_STREAMS_WRITER.groups.find { it.parentHash == first.hash } verifyAll(second) { - edgeTags == [ + tags.hasAllTags( "direction:in", "group:sender", "kafka_cluster_id:$clusterId", "topic:$SHARED_TOPIC".toString(), "type:kafka" - ] - edgeTags.size() == 5 + ) } - List produce = [ - "kafka_cluster_id:$clusterId", - "partition:" + received.partition(), - "topic:" + SHARED_TOPIC, - "type:kafka_produce" - ] - List commit = [ - "consumer_group:sender", - "kafka_cluster_id:$clusterId", - "partition:" + received.partition(), - "topic:" + SHARED_TOPIC, - "type:kafka_commit" - ] - verifyAll(TEST_DATA_STREAMS_WRITER.backlogs) { - contains(new AbstractMap.SimpleEntry, Long>(commit, 1).toString()) - contains(new AbstractMap.SimpleEntry, Long>(produce, 0).toString()) + def sorted = new ArrayList(TEST_DATA_STREAMS_WRITER.backlogs).sort() + verifyAll(sorted) { + size() == 2 + get(0).hasAllTags("consumer_group:sender", + "kafka_cluster_id:$clusterId", + "partition:" + received.partition(), + "topic:" + SHARED_TOPIC, + "type:kafka_commit") + get(1).hasAllTags("kafka_cluster_id:$clusterId", + "partition:" + received.partition(), + "topic:" + SHARED_TOPIC, + "type:kafka_produce") } } diff --git a/dd-java-agent/instrumentation/kafka-clients-0.11/src/main/java/datadog/trace/instrumentation/kafka_clients/ConsumerCoordinatorInstrumentation.java b/dd-java-agent/instrumentation/kafka-clients-0.11/src/main/java/datadog/trace/instrumentation/kafka_clients/ConsumerCoordinatorInstrumentation.java index 2479889d88c..67712ff706f 100644 --- a/dd-java-agent/instrumentation/kafka-clients-0.11/src/main/java/datadog/trace/instrumentation/kafka_clients/ConsumerCoordinatorInstrumentation.java +++ b/dd-java-agent/instrumentation/kafka-clients-0.11/src/main/java/datadog/trace/instrumentation/kafka_clients/ConsumerCoordinatorInstrumentation.java @@ -2,20 +2,15 @@ import static datadog.trace.agent.tooling.bytebuddy.matcher.ClassLoaderMatchers.hasClassNamed; import static datadog.trace.agent.tooling.bytebuddy.matcher.NameMatchers.named; -import static datadog.trace.core.datastreams.TagsProcessor.CONSUMER_GROUP_TAG; -import static datadog.trace.core.datastreams.TagsProcessor.KAFKA_CLUSTER_ID_TAG; -import static datadog.trace.core.datastreams.TagsProcessor.PARTITION_TAG; -import static datadog.trace.core.datastreams.TagsProcessor.TOPIC_TAG; -import static datadog.trace.core.datastreams.TagsProcessor.TYPE_TAG; import static net.bytebuddy.matcher.ElementMatchers.*; import com.google.auto.service.AutoService; import datadog.trace.agent.tooling.Instrumenter; import datadog.trace.agent.tooling.InstrumenterModule; +import datadog.trace.api.datastreams.DataStreamsTags; import datadog.trace.bootstrap.InstrumentationContext; import datadog.trace.bootstrap.instrumentation.api.AgentTracer; import java.util.HashMap; -import java.util.LinkedHashMap; import java.util.Map; import net.bytebuddy.asm.Advice; import net.bytebuddy.matcher.ElementMatcher; @@ -105,17 +100,15 @@ public static void trackCommitOffset( if (entry.getKey() == null || entry.getValue() == null) { continue; } - LinkedHashMap sortedTags = new LinkedHashMap<>(); - sortedTags.put(CONSUMER_GROUP_TAG, consumerGroup); - if (clusterId != null) { - sortedTags.put(KAFKA_CLUSTER_ID_TAG, clusterId); - } - sortedTags.put(PARTITION_TAG, String.valueOf(entry.getKey().partition())); - sortedTags.put(TOPIC_TAG, entry.getKey().topic()); - sortedTags.put(TYPE_TAG, "kafka_commit"); - AgentTracer.get() - .getDataStreamsMonitoring() - .trackBacklog(sortedTags, entry.getValue().offset()); + + DataStreamsTags tags = + DataStreamsTags.createWithPartition( + "kafka_commit", + entry.getKey().topic(), + String.valueOf(entry.getKey().partition()), + clusterId, + consumerGroup); + AgentTracer.get().getDataStreamsMonitoring().trackBacklog(tags, entry.getValue().offset()); } } diff --git a/dd-java-agent/instrumentation/kafka-clients-0.11/src/main/java/datadog/trace/instrumentation/kafka_clients/KafkaProducerCallback.java b/dd-java-agent/instrumentation/kafka-clients-0.11/src/main/java/datadog/trace/instrumentation/kafka_clients/KafkaProducerCallback.java index d1aa1cd9ec1..83962b9c56e 100644 --- a/dd-java-agent/instrumentation/kafka-clients-0.11/src/main/java/datadog/trace/instrumentation/kafka_clients/KafkaProducerCallback.java +++ b/dd-java-agent/instrumentation/kafka-clients-0.11/src/main/java/datadog/trace/instrumentation/kafka_clients/KafkaProducerCallback.java @@ -1,16 +1,12 @@ package datadog.trace.instrumentation.kafka_clients; import static datadog.trace.bootstrap.instrumentation.api.AgentTracer.activateSpan; -import static datadog.trace.core.datastreams.TagsProcessor.KAFKA_CLUSTER_ID_TAG; -import static datadog.trace.core.datastreams.TagsProcessor.PARTITION_TAG; -import static datadog.trace.core.datastreams.TagsProcessor.TOPIC_TAG; -import static datadog.trace.core.datastreams.TagsProcessor.TYPE_TAG; import static datadog.trace.instrumentation.kafka_clients.KafkaDecorator.PRODUCER_DECORATE; +import datadog.trace.api.datastreams.DataStreamsTags; import datadog.trace.bootstrap.instrumentation.api.AgentScope; import datadog.trace.bootstrap.instrumentation.api.AgentSpan; import datadog.trace.bootstrap.instrumentation.api.AgentTracer; -import java.util.LinkedHashMap; import javax.annotation.Nullable; import org.apache.kafka.clients.producer.Callback; import org.apache.kafka.clients.producer.RecordMetadata; @@ -49,13 +45,14 @@ public void onCompletion(final RecordMetadata metadata, final Exception exceptio if (metadata == null) { return; } - LinkedHashMap sortedTags = new LinkedHashMap<>(); - if (clusterId != null) { - sortedTags.put(KAFKA_CLUSTER_ID_TAG, clusterId); - } - sortedTags.put(PARTITION_TAG, String.valueOf(metadata.partition())); - sortedTags.put(TOPIC_TAG, metadata.topic()); - sortedTags.put(TYPE_TAG, "kafka_produce"); - AgentTracer.get().getDataStreamsMonitoring().trackBacklog(sortedTags, metadata.offset()); + + DataStreamsTags tags = + DataStreamsTags.createWithPartition( + "kafka_produce", + metadata.topic(), + String.valueOf(metadata.partition()), + clusterId, + null); + AgentTracer.get().getDataStreamsMonitoring().trackBacklog(tags, metadata.offset()); } } diff --git a/dd-java-agent/instrumentation/kafka-clients-0.11/src/main/java/datadog/trace/instrumentation/kafka_clients/KafkaProducerInstrumentation.java b/dd-java-agent/instrumentation/kafka-clients-0.11/src/main/java/datadog/trace/instrumentation/kafka_clients/KafkaProducerInstrumentation.java index a85ee9ce95d..7bb21a7e893 100644 --- a/dd-java-agent/instrumentation/kafka-clients-0.11/src/main/java/datadog/trace/instrumentation/kafka_clients/KafkaProducerInstrumentation.java +++ b/dd-java-agent/instrumentation/kafka-clients-0.11/src/main/java/datadog/trace/instrumentation/kafka_clients/KafkaProducerInstrumentation.java @@ -8,11 +8,6 @@ import static datadog.trace.bootstrap.instrumentation.api.AgentTracer.activateSpan; import static datadog.trace.bootstrap.instrumentation.api.AgentTracer.activeSpan; import static datadog.trace.bootstrap.instrumentation.api.AgentTracer.startSpan; -import static datadog.trace.core.datastreams.TagsProcessor.DIRECTION_OUT; -import static datadog.trace.core.datastreams.TagsProcessor.DIRECTION_TAG; -import static datadog.trace.core.datastreams.TagsProcessor.KAFKA_CLUSTER_ID_TAG; -import static datadog.trace.core.datastreams.TagsProcessor.TOPIC_TAG; -import static datadog.trace.core.datastreams.TagsProcessor.TYPE_TAG; import static datadog.trace.instrumentation.kafka_clients.KafkaDecorator.KAFKA_PRODUCE; import static datadog.trace.instrumentation.kafka_clients.KafkaDecorator.PRODUCER_DECORATE; import static datadog.trace.instrumentation.kafka_clients.KafkaDecorator.TIME_IN_QUEUE_ENABLED; @@ -31,13 +26,13 @@ import datadog.trace.agent.tooling.InstrumenterModule; import datadog.trace.api.Config; import datadog.trace.api.datastreams.DataStreamsContext; +import datadog.trace.api.datastreams.DataStreamsTags; import datadog.trace.api.datastreams.StatsPoint; import datadog.trace.bootstrap.InstrumentationContext; import datadog.trace.bootstrap.instrumentation.api.AgentScope; import datadog.trace.bootstrap.instrumentation.api.AgentSpan; import datadog.trace.bootstrap.instrumentation.api.AgentTracer; import datadog.trace.bootstrap.instrumentation.api.InstrumentationTags; -import java.util.LinkedHashMap; import java.util.Map; import net.bytebuddy.asm.Advice; import net.bytebuddy.matcher.ElementMatcher; @@ -145,13 +140,9 @@ public static AgentScope onEnter( && !Config.get().isKafkaClientPropagationDisabledForTopic(record.topic())) { setter = TextMapInjectAdapter.SETTER; } - LinkedHashMap sortedTags = new LinkedHashMap<>(); - sortedTags.put(DIRECTION_TAG, DIRECTION_OUT); - if (clusterId != null) { - sortedTags.put(KAFKA_CLUSTER_ID_TAG, clusterId); - } - sortedTags.put(TOPIC_TAG, record.topic()); - sortedTags.put(TYPE_TAG, "kafka"); + DataStreamsTags tags = + DataStreamsTags.createWithClusterId( + "" + "kafka", DataStreamsTags.Direction.Outbound, record.topic(), clusterId); try { defaultPropagator().inject(span, record.headers(), setter); if (STREAMING_CONTEXT.isDisabledForTopic(record.topic()) @@ -160,7 +151,7 @@ public static AgentScope onEnter( // message size. // The stats are saved in the pathway context and sent in PayloadSizeAdvice. Propagator dsmPropagator = Propagators.forConcern(DSM_CONCERN); - DataStreamsContext dsmContext = fromTagsWithoutCheckpoint(sortedTags); + DataStreamsContext dsmContext = fromTagsWithoutCheckpoint(tags); dsmPropagator.inject(span.with(dsmContext), record.headers(), setter); AvroSchemaExtractor.tryExtractProducer(record, span); } @@ -179,7 +170,7 @@ record = if (STREAMING_CONTEXT.isDisabledForTopic(record.topic()) || STREAMING_CONTEXT.isSinkTopic(record.topic())) { Propagator dsmPropagator = Propagators.forConcern(DSM_CONCERN); - DataStreamsContext dsmContext = fromTagsWithoutCheckpoint(sortedTags); + DataStreamsContext dsmContext = fromTagsWithoutCheckpoint(tags); dsmPropagator.inject(span.with(dsmContext), record.headers(), setter); AvroSchemaExtractor.tryExtractProducer(record, span); } @@ -213,7 +204,7 @@ public static void onEnter(@Advice.Argument(value = 0) int estimatedPayloadSize) // create new stats including the payload size StatsPoint updated = new StatsPoint( - saved.getEdgeTags(), + saved.getTags(), saved.getHash(), saved.getParentHash(), saved.getAggregationHash(), diff --git a/dd-java-agent/instrumentation/kafka-clients-0.11/src/main/java/datadog/trace/instrumentation/kafka_clients/TracingIterator.java b/dd-java-agent/instrumentation/kafka-clients-0.11/src/main/java/datadog/trace/instrumentation/kafka_clients/TracingIterator.java index 30e40b40f79..832a5097bb5 100644 --- a/dd-java-agent/instrumentation/kafka-clients-0.11/src/main/java/datadog/trace/instrumentation/kafka_clients/TracingIterator.java +++ b/dd-java-agent/instrumentation/kafka-clients-0.11/src/main/java/datadog/trace/instrumentation/kafka_clients/TracingIterator.java @@ -7,12 +7,6 @@ import static datadog.trace.bootstrap.instrumentation.api.AgentTracer.closePrevious; import static datadog.trace.bootstrap.instrumentation.api.AgentTracer.startSpan; import static datadog.trace.bootstrap.instrumentation.api.AgentTracer.traceConfig; -import static datadog.trace.core.datastreams.TagsProcessor.DIRECTION_IN; -import static datadog.trace.core.datastreams.TagsProcessor.DIRECTION_TAG; -import static datadog.trace.core.datastreams.TagsProcessor.GROUP_TAG; -import static datadog.trace.core.datastreams.TagsProcessor.KAFKA_CLUSTER_ID_TAG; -import static datadog.trace.core.datastreams.TagsProcessor.TOPIC_TAG; -import static datadog.trace.core.datastreams.TagsProcessor.TYPE_TAG; import static datadog.trace.instrumentation.kafka_clients.KafkaDecorator.BROKER_DECORATE; import static datadog.trace.instrumentation.kafka_clients.KafkaDecorator.KAFKA_DELIVER; import static datadog.trace.instrumentation.kafka_clients.KafkaDecorator.TIME_IN_QUEUE_ENABLED; @@ -26,12 +20,12 @@ import datadog.context.propagation.Propagators; import datadog.trace.api.Config; import datadog.trace.api.datastreams.DataStreamsContext; +import datadog.trace.api.datastreams.DataStreamsTags; import datadog.trace.bootstrap.instrumentation.api.AgentSpan; import datadog.trace.bootstrap.instrumentation.api.AgentSpanContext; import datadog.trace.bootstrap.instrumentation.api.AgentTracer; import datadog.trace.bootstrap.instrumentation.api.InstrumentationTags; import java.util.Iterator; -import java.util.LinkedHashMap; import org.apache.kafka.clients.consumer.ConsumerRecord; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -101,21 +95,15 @@ protected void startNewRecordSpan(ConsumerRecord val) { // spans are written out together by TraceStructureWriter when running in strict mode } - LinkedHashMap sortedTags = new LinkedHashMap<>(); - sortedTags.put(DIRECTION_TAG, DIRECTION_IN); - sortedTags.put(GROUP_TAG, group); - if (clusterId != null) { - sortedTags.put(KAFKA_CLUSTER_ID_TAG, clusterId); - } - sortedTags.put(TOPIC_TAG, val.topic()); - sortedTags.put(TYPE_TAG, "kafka"); - + DataStreamsTags tags = + DataStreamsTags.create( + "kafka", DataStreamsTags.Direction.Inbound, val.topic(), group, clusterId); final long payloadSize = traceConfig().isDataStreamsEnabled() ? computePayloadSizeBytes(val) : 0; if (STREAMING_CONTEXT.isDisabledForTopic(val.topic())) { AgentTracer.get() .getDataStreamsMonitoring() - .setCheckpoint(span, create(sortedTags, val.timestamp(), payloadSize)); + .setCheckpoint(span, create(tags, val.timestamp(), payloadSize)); } else { // when we're in a streaming context we want to consume only from source topics if (STREAMING_CONTEXT.isSourceTopic(val.topic())) { @@ -124,7 +112,7 @@ protected void startNewRecordSpan(ConsumerRecord val) { // some other instance of the application, breaking the context propagation // for DSM users Propagator dsmPropagator = Propagators.forConcern(DSM_CONCERN); - DataStreamsContext dsmContext = create(sortedTags, val.timestamp(), payloadSize); + DataStreamsContext dsmContext = create(tags, val.timestamp(), payloadSize); dsmPropagator.inject(span.with(dsmContext), val.headers(), SETTER); } } diff --git a/dd-java-agent/instrumentation/kafka-clients-0.11/src/test/groovy/KafkaClientTestBase.groovy b/dd-java-agent/instrumentation/kafka-clients-0.11/src/test/groovy/KafkaClientTestBase.groovy index 569538197d4..9870627fda3 100644 --- a/dd-java-agent/instrumentation/kafka-clients-0.11/src/test/groovy/KafkaClientTestBase.groovy +++ b/dd-java-agent/instrumentation/kafka-clients-0.11/src/test/groovy/KafkaClientTestBase.groovy @@ -1,3 +1,5 @@ +import datadog.trace.api.datastreams.DataStreamsTags + import static datadog.trace.agent.test.utils.TraceUtils.basicSpan import static datadog.trace.agent.test.utils.TraceUtils.runUnderTrace import static datadog.trace.bootstrap.instrumentation.api.AgentTracer.activeSpan @@ -239,7 +241,7 @@ abstract class KafkaClientTestBase extends VersionedNamingTestBase { if (isDataStreamsEnabled()) { TEST_DATA_STREAMS_WRITER.waitForGroups(2) // wait for produce offset 0, commit offset 0 on partition 0 and 1, and commit offset 1 on 1 partition. - TEST_DATA_STREAMS_WRITER.waitForBacklogs(4) + TEST_DATA_STREAMS_WRITER.waitForBacklogs(3) } then: @@ -283,37 +285,42 @@ abstract class KafkaClientTestBase extends VersionedNamingTestBase { if (isDataStreamsEnabled()) { StatsGroup first = TEST_DATA_STREAMS_WRITER.groups.find { it.parentHash == 0 } verifyAll(first) { - edgeTags == ["direction:out", "kafka_cluster_id:$clusterId", "topic:$SHARED_TOPIC".toString(), "type:kafka"] - edgeTags.size() == 4 + tags.hasAllTags("direction:out", "kafka_cluster_id:$clusterId", "topic:$SHARED_TOPIC".toString(), "type:kafka") } StatsGroup second = TEST_DATA_STREAMS_WRITER.groups.find { it.parentHash == first.hash } verifyAll(second) { - edgeTags == [ + tags.hasAllTags( "direction:in", "group:sender", "kafka_cluster_id:$clusterId", "topic:$SHARED_TOPIC".toString(), "type:kafka" - ] - edgeTags.size() == 5 + ) } - List produce = [ - "kafka_cluster_id:$clusterId", - "partition:" + received.partition(), - "topic:" + SHARED_TOPIC, - "type:kafka_produce" - ] - List commit = [ - "consumer_group:sender", - "kafka_cluster_id:$clusterId", - "partition:" + received.partition(), - "topic:" + SHARED_TOPIC, - "type:kafka_commit" - ] - verifyAll(TEST_DATA_STREAMS_WRITER.backlogs) { - contains(new AbstractMap.SimpleEntry, Long>(commit, 1).toString()) - contains(new AbstractMap.SimpleEntry, Long>(produce, 0).toString()) + def items = new ArrayList(TEST_DATA_STREAMS_WRITER.backlogs).sort { it.type + it.partition} + verifyAll(items) { + size() == 3 + get(0).hasAllTags( + "consumer_group:sender", + "kafka_cluster_id:$clusterId", + "partition:0", + "topic:" + SHARED_TOPIC, + "type:kafka_commit" + ) + get(1).hasAllTags( + "consumer_group:sender", + "kafka_cluster_id:$clusterId", + "partition:1", + "topic:" + SHARED_TOPIC, + "type:kafka_commit" + ) + get(2).hasAllTags( + "kafka_cluster_id:$clusterId", + "partition:" + received.partition(), + "topic:" + SHARED_TOPIC, + "type:kafka_produce" + ) } } @@ -396,7 +403,7 @@ abstract class KafkaClientTestBase extends VersionedNamingTestBase { if (isDataStreamsEnabled()) { TEST_DATA_STREAMS_WRITER.waitForGroups(2) // wait for produce offset 0, commit offset 0 on partition 0 and 1, and commit offset 1 on 1 partition. - TEST_DATA_STREAMS_WRITER.waitForBacklogs(4) + TEST_DATA_STREAMS_WRITER.waitForBacklogs(3) } then: @@ -431,42 +438,47 @@ abstract class KafkaClientTestBase extends VersionedNamingTestBase { if (isDataStreamsEnabled()) { StatsGroup first = TEST_DATA_STREAMS_WRITER.groups.find { it.parentHash == 0 } verifyAll(first) { - edgeTags == [ + tags.hasAllTags( "direction:out", "kafka_cluster_id:$clusterId".toString(), "topic:$SHARED_TOPIC".toString(), "type:kafka" - ] - edgeTags.size() == 4 + ) } StatsGroup second = TEST_DATA_STREAMS_WRITER.groups.find { it.parentHash == first.hash } verifyAll(second) { - edgeTags == [ + tags.hasAllTags( "direction:in", "group:sender", "kafka_cluster_id:$clusterId".toString(), "topic:$SHARED_TOPIC".toString(), "type:kafka" - ] - edgeTags.size() == 5 + ) } - List produce = [ - "kafka_cluster_id:$clusterId".toString(), - "partition:" + received.partition(), - "topic:" + SHARED_TOPIC, - "type:kafka_produce" - ] - List commit = [ - "consumer_group:sender", - "kafka_cluster_id:$clusterId".toString(), - "partition:" + received.partition(), - "topic:" + SHARED_TOPIC, - "type:kafka_commit" - ] - verifyAll(TEST_DATA_STREAMS_WRITER.backlogs) { - contains(new AbstractMap.SimpleEntry, Long>(commit, 1).toString()) - contains(new AbstractMap.SimpleEntry, Long>(produce, 0).toString()) + def items = new ArrayList(TEST_DATA_STREAMS_WRITER.backlogs).sort {it.type + it.partition} + verifyAll(items) { + size() == 3 + get(0).hasAllTags( + "consumer_group:sender", + "kafka_cluster_id:$clusterId".toString(), + "partition:0", + "topic:" + SHARED_TOPIC, + "type:kafka_commit" + ) + get(1).hasAllTags( + "consumer_group:sender", + "kafka_cluster_id:$clusterId".toString(), + "partition:1", + "topic:" + SHARED_TOPIC, + "type:kafka_commit" + ) + get(2).hasAllTags( + "kafka_cluster_id:$clusterId".toString(), + "partition:" + received.partition(), + "topic:" + SHARED_TOPIC, + "type:kafka_produce" + ) } } @@ -921,20 +933,18 @@ abstract class KafkaClientTestBase extends VersionedNamingTestBase { if (isDataStreamsEnabled()) { StatsGroup first = TEST_DATA_STREAMS_WRITER.groups.find { it.parentHash == 0 } verifyAll(first) { - edgeTags == ["direction:out", "kafka_cluster_id:$clusterId", "topic:$SHARED_TOPIC".toString(), "type:kafka"] - edgeTags.size() == 4 + tags.hasAllTags("direction:out", "kafka_cluster_id:$clusterId", "topic:$SHARED_TOPIC".toString(), "type:kafka") } StatsGroup second = TEST_DATA_STREAMS_WRITER.groups.find { it.parentHash == first.hash } verifyAll(second) { - edgeTags == [ + tags.hasAllTags( "direction:in", "group:sender", "kafka_cluster_id:$clusterId", "topic:$SHARED_TOPIC".toString(), "type:kafka" - ] - edgeTags.size() == 5 + ) } } diff --git a/dd-java-agent/instrumentation/kafka-clients-3.8/src/main/java17/datadog/trace/instrumentation/kafka_clients38/ConsumerCoordinatorAdvice.java b/dd-java-agent/instrumentation/kafka-clients-3.8/src/main/java17/datadog/trace/instrumentation/kafka_clients38/ConsumerCoordinatorAdvice.java index ecec8bd1e77..0592aa22855 100644 --- a/dd-java-agent/instrumentation/kafka-clients-3.8/src/main/java17/datadog/trace/instrumentation/kafka_clients38/ConsumerCoordinatorAdvice.java +++ b/dd-java-agent/instrumentation/kafka-clients-3.8/src/main/java17/datadog/trace/instrumentation/kafka_clients38/ConsumerCoordinatorAdvice.java @@ -1,14 +1,8 @@ package datadog.trace.instrumentation.kafka_clients38; -import static datadog.trace.core.datastreams.TagsProcessor.CONSUMER_GROUP_TAG; -import static datadog.trace.core.datastreams.TagsProcessor.KAFKA_CLUSTER_ID_TAG; -import static datadog.trace.core.datastreams.TagsProcessor.PARTITION_TAG; -import static datadog.trace.core.datastreams.TagsProcessor.TOPIC_TAG; -import static datadog.trace.core.datastreams.TagsProcessor.TYPE_TAG; - +import datadog.trace.api.datastreams.DataStreamsTags; import datadog.trace.bootstrap.InstrumentationContext; import datadog.trace.bootstrap.instrumentation.api.AgentTracer; -import java.util.LinkedHashMap; import java.util.Map; import net.bytebuddy.asm.Advice; import org.apache.kafka.clients.Metadata; @@ -52,17 +46,14 @@ public static void trackCommitOffset( if (entry.getKey() == null || entry.getValue() == null) { continue; } - LinkedHashMap sortedTags = new LinkedHashMap<>(); - sortedTags.put(CONSUMER_GROUP_TAG, consumerGroup); - if (clusterId != null) { - sortedTags.put(KAFKA_CLUSTER_ID_TAG, clusterId); - } - sortedTags.put(PARTITION_TAG, String.valueOf(entry.getKey().partition())); - sortedTags.put(TOPIC_TAG, entry.getKey().topic()); - sortedTags.put(TYPE_TAG, "kafka_commit"); - AgentTracer.get() - .getDataStreamsMonitoring() - .trackBacklog(sortedTags, entry.getValue().offset()); + DataStreamsTags tags = + DataStreamsTags.createWithPartition( + "kafka_commit", + entry.getKey().topic(), + String.valueOf(entry.getKey().partition()), + clusterId, + consumerGroup); + AgentTracer.get().getDataStreamsMonitoring().trackBacklog(tags, entry.getValue().offset()); } } diff --git a/dd-java-agent/instrumentation/kafka-clients-3.8/src/main/java17/datadog/trace/instrumentation/kafka_clients38/DDOffsetCommitCallback.java b/dd-java-agent/instrumentation/kafka-clients-3.8/src/main/java17/datadog/trace/instrumentation/kafka_clients38/DDOffsetCommitCallback.java index 65b2b94b26b..59b4677b7b3 100644 --- a/dd-java-agent/instrumentation/kafka-clients-3.8/src/main/java17/datadog/trace/instrumentation/kafka_clients38/DDOffsetCommitCallback.java +++ b/dd-java-agent/instrumentation/kafka-clients-3.8/src/main/java17/datadog/trace/instrumentation/kafka_clients38/DDOffsetCommitCallback.java @@ -1,14 +1,8 @@ package datadog.trace.instrumentation.kafka_clients38; -import static datadog.trace.core.datastreams.TagsProcessor.CONSUMER_GROUP_TAG; -import static datadog.trace.core.datastreams.TagsProcessor.KAFKA_CLUSTER_ID_TAG; -import static datadog.trace.core.datastreams.TagsProcessor.PARTITION_TAG; -import static datadog.trace.core.datastreams.TagsProcessor.TOPIC_TAG; -import static datadog.trace.core.datastreams.TagsProcessor.TYPE_TAG; - +import datadog.trace.api.datastreams.DataStreamsTags; import datadog.trace.bootstrap.InstrumentationContext; import datadog.trace.bootstrap.instrumentation.api.AgentTracer; -import java.util.LinkedHashMap; import java.util.Map; import org.apache.kafka.clients.Metadata; import org.apache.kafka.clients.consumer.OffsetAndMetadata; @@ -34,27 +28,26 @@ public void onComplete(Map map, Exception e) if (entry.getKey() == null || entry.getValue() == null) { continue; } - LinkedHashMap sortedTags = new LinkedHashMap<>(); + String consumerGroup = null; + String clusterId = null; + if (kafkaConsumerInfo != null) { - String consumerGroup = kafkaConsumerInfo.getConsumerGroup().get(); + consumerGroup = kafkaConsumerInfo.getConsumerGroup().get(); Metadata consumerMetadata = kafkaConsumerInfo.getmetadata().get(); - String clusterId = null; if (consumerMetadata != null) { clusterId = InstrumentationContext.get(Metadata.class, String.class).get(consumerMetadata); } - sortedTags.put(CONSUMER_GROUP_TAG, consumerGroup); - if (clusterId != null) { - sortedTags.put(KAFKA_CLUSTER_ID_TAG, clusterId); - } } - sortedTags.put(PARTITION_TAG, String.valueOf(entry.getKey().partition())); - sortedTags.put(TOPIC_TAG, entry.getKey().topic()); - sortedTags.put(TYPE_TAG, "kafka_commit"); - AgentTracer.get() - .getDataStreamsMonitoring() - .trackBacklog(sortedTags, entry.getValue().offset()); + DataStreamsTags tags = + DataStreamsTags.createWithPartition( + "kafka_commit", + entry.getKey().topic(), + String.valueOf(entry.getKey().partition()), + clusterId, + consumerGroup); + AgentTracer.get().getDataStreamsMonitoring().trackBacklog(tags, entry.getValue().offset()); } } } diff --git a/dd-java-agent/instrumentation/kafka-clients-3.8/src/main/java17/datadog/trace/instrumentation/kafka_clients38/KafkaProducerCallback.java b/dd-java-agent/instrumentation/kafka-clients-3.8/src/main/java17/datadog/trace/instrumentation/kafka_clients38/KafkaProducerCallback.java index c6252206ab2..58c6bbbb8a1 100644 --- a/dd-java-agent/instrumentation/kafka-clients-3.8/src/main/java17/datadog/trace/instrumentation/kafka_clients38/KafkaProducerCallback.java +++ b/dd-java-agent/instrumentation/kafka-clients-3.8/src/main/java17/datadog/trace/instrumentation/kafka_clients38/KafkaProducerCallback.java @@ -1,16 +1,12 @@ package datadog.trace.instrumentation.kafka_clients38; import static datadog.trace.bootstrap.instrumentation.api.AgentTracer.activateSpan; -import static datadog.trace.core.datastreams.TagsProcessor.KAFKA_CLUSTER_ID_TAG; -import static datadog.trace.core.datastreams.TagsProcessor.PARTITION_TAG; -import static datadog.trace.core.datastreams.TagsProcessor.TOPIC_TAG; -import static datadog.trace.core.datastreams.TagsProcessor.TYPE_TAG; import static datadog.trace.instrumentation.kafka_clients38.KafkaDecorator.PRODUCER_DECORATE; +import datadog.trace.api.datastreams.DataStreamsTags; import datadog.trace.bootstrap.instrumentation.api.AgentScope; import datadog.trace.bootstrap.instrumentation.api.AgentSpan; import datadog.trace.bootstrap.instrumentation.api.AgentTracer; -import java.util.LinkedHashMap; import org.apache.kafka.clients.producer.Callback; import org.apache.kafka.clients.producer.RecordMetadata; @@ -48,13 +44,13 @@ public void onCompletion(final RecordMetadata metadata, final Exception exceptio if (metadata == null) { return; } - LinkedHashMap sortedTags = new LinkedHashMap<>(); - if (clusterId != null) { - sortedTags.put(KAFKA_CLUSTER_ID_TAG, clusterId); - } - sortedTags.put(PARTITION_TAG, String.valueOf(metadata.partition())); - sortedTags.put(TOPIC_TAG, metadata.topic()); - sortedTags.put(TYPE_TAG, "kafka_produce"); - AgentTracer.get().getDataStreamsMonitoring().trackBacklog(sortedTags, metadata.offset()); + DataStreamsTags tags = + DataStreamsTags.createWithPartition( + "kafka_produce", + metadata.topic(), + String.valueOf(metadata.partition()), + clusterId, + null); + AgentTracer.get().getDataStreamsMonitoring().trackBacklog(tags, metadata.offset()); } } diff --git a/dd-java-agent/instrumentation/kafka-clients-3.8/src/main/java17/datadog/trace/instrumentation/kafka_clients38/PayloadSizeAdvice.java b/dd-java-agent/instrumentation/kafka-clients-3.8/src/main/java17/datadog/trace/instrumentation/kafka_clients38/PayloadSizeAdvice.java index 8d0a8b6a7b0..7db4e95e711 100644 --- a/dd-java-agent/instrumentation/kafka-clients-3.8/src/main/java17/datadog/trace/instrumentation/kafka_clients38/PayloadSizeAdvice.java +++ b/dd-java-agent/instrumentation/kafka-clients-3.8/src/main/java17/datadog/trace/instrumentation/kafka_clients38/PayloadSizeAdvice.java @@ -20,7 +20,7 @@ public static void onEnter(@Advice.Argument(value = 0) int estimatedPayloadSize) // create new stats including the payload size StatsPoint updated = new StatsPoint( - saved.getEdgeTags(), + saved.getTags(), saved.getHash(), saved.getParentHash(), saved.getAggregationHash(), diff --git a/dd-java-agent/instrumentation/kafka-clients-3.8/src/main/java17/datadog/trace/instrumentation/kafka_clients38/ProducerAdvice.java b/dd-java-agent/instrumentation/kafka-clients-3.8/src/main/java17/datadog/trace/instrumentation/kafka_clients38/ProducerAdvice.java index 0756f585b52..c37d99796da 100644 --- a/dd-java-agent/instrumentation/kafka-clients-3.8/src/main/java17/datadog/trace/instrumentation/kafka_clients38/ProducerAdvice.java +++ b/dd-java-agent/instrumentation/kafka-clients-3.8/src/main/java17/datadog/trace/instrumentation/kafka_clients38/ProducerAdvice.java @@ -6,11 +6,6 @@ import static datadog.trace.bootstrap.instrumentation.api.AgentTracer.activateSpan; import static datadog.trace.bootstrap.instrumentation.api.AgentTracer.activeSpan; import static datadog.trace.bootstrap.instrumentation.api.AgentTracer.startSpan; -import static datadog.trace.core.datastreams.TagsProcessor.DIRECTION_OUT; -import static datadog.trace.core.datastreams.TagsProcessor.DIRECTION_TAG; -import static datadog.trace.core.datastreams.TagsProcessor.KAFKA_CLUSTER_ID_TAG; -import static datadog.trace.core.datastreams.TagsProcessor.TOPIC_TAG; -import static datadog.trace.core.datastreams.TagsProcessor.TYPE_TAG; import static datadog.trace.instrumentation.kafka_clients38.KafkaDecorator.KAFKA_PRODUCE; import static datadog.trace.instrumentation.kafka_clients38.KafkaDecorator.PRODUCER_DECORATE; import static datadog.trace.instrumentation.kafka_clients38.KafkaDecorator.TIME_IN_QUEUE_ENABLED; @@ -20,11 +15,11 @@ import datadog.context.propagation.Propagators; import datadog.trace.api.Config; import datadog.trace.api.datastreams.DataStreamsContext; +import datadog.trace.api.datastreams.DataStreamsTags; import datadog.trace.bootstrap.InstrumentationContext; import datadog.trace.bootstrap.instrumentation.api.AgentScope; import datadog.trace.bootstrap.instrumentation.api.AgentSpan; import datadog.trace.bootstrap.instrumentation.api.InstrumentationTags; -import java.util.LinkedHashMap; import net.bytebuddy.asm.Advice; import org.apache.kafka.clients.Metadata; import org.apache.kafka.clients.producer.Callback; @@ -67,13 +62,9 @@ public static AgentScope onEnter( && !Config.get().isKafkaClientPropagationDisabledForTopic(record.topic())) { setter = TextMapInjectAdapter.SETTER; } - LinkedHashMap sortedTags = new LinkedHashMap<>(); - sortedTags.put(DIRECTION_TAG, DIRECTION_OUT); - if (clusterId != null) { - sortedTags.put(KAFKA_CLUSTER_ID_TAG, clusterId); - } - sortedTags.put(TOPIC_TAG, record.topic()); - sortedTags.put(TYPE_TAG, "kafka"); + DataStreamsTags tags = + DataStreamsTags.create( + "kafka", DataStreamsTags.Direction.Outbound, record.topic(), null, clusterId); try { defaultPropagator().inject(span, record.headers(), setter); if (STREAMING_CONTEXT.isDisabledForTopic(record.topic()) @@ -82,7 +73,7 @@ public static AgentScope onEnter( // message size. // The stats are saved in the pathway context and sent in PayloadSizeAdvice. Propagator dsmPropagator = Propagators.forConcern(DSM_CONCERN); - DataStreamsContext dsmContext = fromTagsWithoutCheckpoint(sortedTags); + DataStreamsContext dsmContext = fromTagsWithoutCheckpoint(tags); dsmPropagator.inject(span.with(dsmContext), record.headers(), setter); AvroSchemaExtractor.tryExtractProducer(record, span); } @@ -101,7 +92,7 @@ record = if (STREAMING_CONTEXT.isDisabledForTopic(record.topic()) || STREAMING_CONTEXT.isSinkTopic(record.topic())) { Propagator dsmPropagator = Propagators.forConcern(DSM_CONCERN); - DataStreamsContext dsmContext = fromTagsWithoutCheckpoint(sortedTags); + DataStreamsContext dsmContext = fromTagsWithoutCheckpoint(tags); dsmPropagator.inject(span.with(dsmContext), record.headers(), setter); AvroSchemaExtractor.tryExtractProducer(record, span); } diff --git a/dd-java-agent/instrumentation/kafka-clients-3.8/src/main/java17/datadog/trace/instrumentation/kafka_clients38/TracingIterator.java b/dd-java-agent/instrumentation/kafka-clients-3.8/src/main/java17/datadog/trace/instrumentation/kafka_clients38/TracingIterator.java index b43534af550..75ef16c865a 100644 --- a/dd-java-agent/instrumentation/kafka-clients-3.8/src/main/java17/datadog/trace/instrumentation/kafka_clients38/TracingIterator.java +++ b/dd-java-agent/instrumentation/kafka-clients-3.8/src/main/java17/datadog/trace/instrumentation/kafka_clients38/TracingIterator.java @@ -7,12 +7,6 @@ import static datadog.trace.bootstrap.instrumentation.api.AgentTracer.closePrevious; import static datadog.trace.bootstrap.instrumentation.api.AgentTracer.startSpan; import static datadog.trace.bootstrap.instrumentation.api.AgentTracer.traceConfig; -import static datadog.trace.core.datastreams.TagsProcessor.DIRECTION_IN; -import static datadog.trace.core.datastreams.TagsProcessor.DIRECTION_TAG; -import static datadog.trace.core.datastreams.TagsProcessor.GROUP_TAG; -import static datadog.trace.core.datastreams.TagsProcessor.KAFKA_CLUSTER_ID_TAG; -import static datadog.trace.core.datastreams.TagsProcessor.TOPIC_TAG; -import static datadog.trace.core.datastreams.TagsProcessor.TYPE_TAG; import static datadog.trace.instrumentation.kafka_clients38.TextMapExtractAdapter.GETTER; import static datadog.trace.instrumentation.kafka_clients38.TextMapInjectAdapter.SETTER; import static java.util.concurrent.TimeUnit.MILLISECONDS; @@ -21,6 +15,7 @@ import datadog.context.propagation.Propagators; import datadog.trace.api.Config; import datadog.trace.api.datastreams.DataStreamsContext; +import datadog.trace.api.datastreams.DataStreamsTags; import datadog.trace.bootstrap.instrumentation.api.AgentSpan; import datadog.trace.bootstrap.instrumentation.api.AgentSpanContext; import datadog.trace.bootstrap.instrumentation.api.AgentTracer; @@ -28,7 +23,6 @@ import datadog.trace.instrumentation.kafka_common.StreamingContext; import datadog.trace.instrumentation.kafka_common.Utils; import java.util.Iterator; -import java.util.LinkedHashMap; import org.apache.kafka.clients.consumer.ConsumerRecord; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -101,21 +95,15 @@ protected void startNewRecordSpan(ConsumerRecord val) { // spans are written out together by TraceStructureWriter when running in strict mode } - LinkedHashMap sortedTags = new LinkedHashMap<>(); - sortedTags.put(DIRECTION_TAG, DIRECTION_IN); - sortedTags.put(GROUP_TAG, group); - if (clusterId != null) { - sortedTags.put(KAFKA_CLUSTER_ID_TAG, clusterId); - } - sortedTags.put(TOPIC_TAG, val.topic()); - sortedTags.put(TYPE_TAG, "kafka"); - + DataStreamsTags tags = + DataStreamsTags.create( + "kafka", DataStreamsTags.Direction.Inbound, val.topic(), group, clusterId); final long payloadSize = traceConfig().isDataStreamsEnabled() ? Utils.computePayloadSizeBytes(val) : 0; if (StreamingContext.STREAMING_CONTEXT.isDisabledForTopic(val.topic())) { AgentTracer.get() .getDataStreamsMonitoring() - .setCheckpoint(span, create(sortedTags, val.timestamp(), payloadSize)); + .setCheckpoint(span, create(tags, val.timestamp(), payloadSize)); } else { // when we're in a streaming context we want to consume only from source topics if (StreamingContext.STREAMING_CONTEXT.isSourceTopic(val.topic())) { @@ -124,7 +112,7 @@ protected void startNewRecordSpan(ConsumerRecord val) { // some other instance of the application, breaking the context propagation // for DSM users Propagator dsmPropagator = Propagators.forConcern(DSM_CONCERN); - DataStreamsContext dsmContext = create(sortedTags, val.timestamp(), payloadSize); + DataStreamsContext dsmContext = create(tags, val.timestamp(), payloadSize); dsmPropagator.inject(span.with(dsmContext), val.headers(), SETTER); } } diff --git a/dd-java-agent/instrumentation/kafka-clients-3.8/src/test/groovy/KafkaClientTestBase.groovy b/dd-java-agent/instrumentation/kafka-clients-3.8/src/test/groovy/KafkaClientTestBase.groovy index 9e71a218b7a..665e2df70d8 100644 --- a/dd-java-agent/instrumentation/kafka-clients-3.8/src/test/groovy/KafkaClientTestBase.groovy +++ b/dd-java-agent/instrumentation/kafka-clients-3.8/src/test/groovy/KafkaClientTestBase.groovy @@ -2,6 +2,7 @@ import datadog.trace.agent.test.asserts.TraceAssert import datadog.trace.agent.test.naming.VersionedNamingTestBase import datadog.trace.api.Config import datadog.trace.api.DDTags +import datadog.trace.api.datastreams.DataStreamsTags import datadog.trace.bootstrap.instrumentation.api.InstrumentationTags import datadog.trace.bootstrap.instrumentation.api.Tags import datadog.trace.common.writer.ListWriter @@ -10,11 +11,7 @@ import datadog.trace.core.datastreams.StatsGroup import org.apache.kafka.clients.consumer.ConsumerConfig import org.apache.kafka.clients.consumer.ConsumerRecord import org.apache.kafka.clients.consumer.KafkaConsumer -import org.apache.kafka.clients.producer.KafkaProducer -import org.apache.kafka.clients.producer.Producer -import org.apache.kafka.clients.producer.ProducerConfig -import org.apache.kafka.clients.producer.ProducerRecord -import org.apache.kafka.clients.producer.RecordMetadata +import org.apache.kafka.clients.producer.* import org.apache.kafka.common.TopicPartition import org.apache.kafka.common.serialization.StringSerializer import org.junit.Rule @@ -29,15 +26,12 @@ import org.springframework.kafka.test.rule.EmbeddedKafkaRule import org.springframework.kafka.test.utils.ContainerTestUtils import org.springframework.kafka.test.utils.KafkaTestUtils - import java.util.concurrent.ExecutionException import java.util.concurrent.Future - -import static datadog.trace.agent.test.asserts.TagsAssert.codeOriginTags - import java.util.concurrent.LinkedBlockingQueue import java.util.concurrent.TimeUnit +import static datadog.trace.agent.test.asserts.TagsAssert.codeOriginTags import static datadog.trace.agent.test.utils.TraceUtils.basicSpan import static datadog.trace.agent.test.utils.TraceUtils.runUnderTrace import static datadog.trace.bootstrap.instrumentation.api.AgentTracer.activeSpan @@ -260,36 +254,35 @@ abstract class KafkaClientTestBase extends VersionedNamingTestBase { if (isDataStreamsEnabled()) { StatsGroup first = TEST_DATA_STREAMS_WRITER.groups.find { it.parentHash == 0 } verifyAll(first) { - edgeTags == ["direction:out", "kafka_cluster_id:$clusterId", "topic:$SHARED_TOPIC".toString(), "type:kafka"] - edgeTags.size() == 4 + tags.hasAllTags("direction:out", "kafka_cluster_id:$clusterId", "topic:$SHARED_TOPIC".toString(), "type:kafka") } StatsGroup second = TEST_DATA_STREAMS_WRITER.groups.find { it.parentHash == first.hash } verifyAll(second) { - edgeTags == [ + tags.hasAllTags( "direction:in", "group:sender", "kafka_cluster_id:$clusterId", "topic:$SHARED_TOPIC".toString(), "type:kafka" - ] - edgeTags.size() == 5 + ) } - List produce = [ - "kafka_cluster_id:$clusterId", - "partition:"+received.partition(), - "topic:"+SHARED_TOPIC, - "type:kafka_produce" - ] - List commit = [ - "consumer_group:sender", - "kafka_cluster_id:$clusterId", - "partition:"+received.partition(), - "topic:$SHARED_TOPIC", - "type:kafka_commit" - ] - verifyAll(TEST_DATA_STREAMS_WRITER.backlogs) { - contains(new AbstractMap.SimpleEntry, Long>(commit, 1).toString()) - contains(new AbstractMap.SimpleEntry, Long>(produce, 0).toString()) + + def sorted = new ArrayList(TEST_DATA_STREAMS_WRITER.backlogs).sort{ it.type } + verifyAll(sorted) { + size() == 2 + get(0).hasAllTags( + "consumer_group:sender", + "kafka_cluster_id:$clusterId", + "partition:"+received.partition(), + "topic:$SHARED_TOPIC", + "type:kafka_commit" + ) + get(1).hasAllTags( + "kafka_cluster_id:$clusterId", + "partition:"+received.partition(), + "topic:"+SHARED_TOPIC, + "type:kafka_produce" + ) } } @@ -412,42 +405,40 @@ abstract class KafkaClientTestBase extends VersionedNamingTestBase { if (isDataStreamsEnabled()) { StatsGroup first = TEST_DATA_STREAMS_WRITER.groups.find { it.parentHash == 0 } verifyAll(first) { - edgeTags == [ + tags.hasAllTags( "direction:out", "kafka_cluster_id:$clusterId".toString(), "topic:$SHARED_TOPIC".toString(), "type:kafka" - ] - edgeTags.size() == 4 + ) } StatsGroup second = TEST_DATA_STREAMS_WRITER.groups.find { it.parentHash == first.hash } verifyAll(second) { - edgeTags == [ + tags.hasAllTags( "direction:in", "group:sender", "kafka_cluster_id:$clusterId".toString(), "topic:$SHARED_TOPIC".toString(), "type:kafka" - ] - edgeTags.size() == 5 + ) } - List produce = [ - "kafka_cluster_id:$clusterId".toString(), - "partition:"+received.partition(), - "topic:"+SHARED_TOPIC, - "type:kafka_produce" - ] - List commit = [ - "consumer_group:sender", - "kafka_cluster_id:$clusterId".toString(), - "partition:"+received.partition(), - "topic:"+SHARED_TOPIC, - "type:kafka_commit" - ] - verifyAll(TEST_DATA_STREAMS_WRITER.backlogs) { - contains(new AbstractMap.SimpleEntry, Long>(commit, 1).toString()) - contains(new AbstractMap.SimpleEntry, Long>(produce, 0).toString()) + def items = new ArrayList(TEST_DATA_STREAMS_WRITER.backlogs).sort {it.type} + verifyAll(items) { + size() == 2 + get(0).hasAllTags( + "consumer_group:sender", + "kafka_cluster_id:$clusterId".toString(), + "partition:"+received.partition(), + "topic:"+SHARED_TOPIC, + "type:kafka_commit" + ) + get(1).hasAllTags( + "kafka_cluster_id:$clusterId".toString(), + "partition:"+received.partition(), + "topic:"+SHARED_TOPIC, + "type:kafka_produce" + ) } } diff --git a/dd-java-agent/instrumentation/kafka-connect-0.11/src/test/groovy/ConnectWorkerInstrumentationTest.groovy b/dd-java-agent/instrumentation/kafka-connect-0.11/src/test/groovy/ConnectWorkerInstrumentationTest.groovy index 244a5213ff2..031242c592e 100644 --- a/dd-java-agent/instrumentation/kafka-connect-0.11/src/test/groovy/ConnectWorkerInstrumentationTest.groovy +++ b/dd-java-agent/instrumentation/kafka-connect-0.11/src/test/groovy/ConnectWorkerInstrumentationTest.groovy @@ -13,12 +13,12 @@ import org.apache.kafka.common.utils.Time import org.apache.kafka.connect.connector.policy.AllConnectorClientConfigOverridePolicy import org.apache.kafka.connect.connector.policy.ConnectorClientConfigOverridePolicy import org.apache.kafka.connect.runtime.Herder -import org.apache.kafka.connect.runtime.rest.entities.ConnectorInfo -import org.apache.kafka.connect.runtime.standalone.StandaloneConfig -import org.apache.kafka.connect.runtime.standalone.StandaloneHerder import org.apache.kafka.connect.runtime.Worker import org.apache.kafka.connect.runtime.WorkerConfig import org.apache.kafka.connect.runtime.isolation.Plugins +import org.apache.kafka.connect.runtime.rest.entities.ConnectorInfo +import org.apache.kafka.connect.runtime.standalone.StandaloneConfig +import org.apache.kafka.connect.runtime.standalone.StandaloneHerder import org.apache.kafka.connect.storage.FileOffsetBackingStore import org.apache.kafka.connect.util.Callback import org.springframework.kafka.test.EmbeddedKafkaBroker @@ -153,21 +153,16 @@ class ConnectWorkerInstrumentationTest extends AgentTestRunner { StatsGroup first = TEST_DATA_STREAMS_WRITER.groups.find { it.parentHash == 0 } verifyAll(first) { - assert [ - "direction:out", - "topic:test-topic", - "type:kafka" - ].every( tag -> edgeTags.contains(tag) ) + tags.hasAllTags( + "direction:out", + "topic:test-topic", + "type:kafka" + ) } StatsGroup second = TEST_DATA_STREAMS_WRITER.groups.find { it.parentHash == first.hash } verifyAll(second) { - assert [ - "direction:in", - "group:test-consumer-group", - "topic:test-topic", - "type:kafka" - ].every( tag -> edgeTags.contains(tag) ) + tags.hasAllTags("direction:in", "group:test-consumer-group", "topic:test-topic", "type:kafka") } TEST_DATA_STREAMS_WRITER.getServices().contains('file-source-connector') @@ -285,21 +280,12 @@ class ConnectWorkerInstrumentationTest extends AgentTestRunner { StatsGroup first = TEST_DATA_STREAMS_WRITER.groups.find { it.parentHash == 0 } verifyAll(first) { - assert [ - "direction:out", - "topic:test-topic", - "type:kafka" - ].every( tag -> edgeTags.contains(tag) ) + tags.hasAllTags("direction:out", "topic:test-topic", "type:kafka", "kafka_cluster_id:" + clusterId) } StatsGroup second = TEST_DATA_STREAMS_WRITER.groups.find { it.parentHash == first.hash } verifyAll(second) { - assert [ - "direction:in", - "group:connect-file-sink-connector", - "topic:test-topic", - "type:kafka" - ].every( tag -> edgeTags.contains(tag) ) + tags.hasAllTags("direction:in", "group:connect-file-sink-connector", "topic:test-topic", "type:kafka", "kafka_cluster_id:" + clusterId) } TEST_DATA_STREAMS_WRITER.getServices().contains('file-sink-connector') diff --git a/dd-java-agent/instrumentation/kafka-streams-0.11/src/latestDepTest/groovy/KafkaStreamsTest.groovy b/dd-java-agent/instrumentation/kafka-streams-0.11/src/latestDepTest/groovy/KafkaStreamsTest.groovy index 095d479e2d9..436fdf6d68e 100644 --- a/dd-java-agent/instrumentation/kafka-streams-0.11/src/latestDepTest/groovy/KafkaStreamsTest.groovy +++ b/dd-java-agent/instrumentation/kafka-streams-0.11/src/latestDepTest/groovy/KafkaStreamsTest.groovy @@ -226,31 +226,25 @@ class KafkaStreamsTest extends AgentTestRunner { if (isDataStreamsEnabled()) { StatsGroup originProducerPoint = TEST_DATA_STREAMS_WRITER.groups.find { it.parentHash == 0 } verifyAll(originProducerPoint) { - edgeTags == ["direction:out", "topic:$STREAM_PENDING", "type:kafka"] - edgeTags.size() == 3 + tags.hasAllTags("direction:out", "topic:$STREAM_PENDING", "type:kafka") } StatsGroup kafkaStreamsConsumerPoint = TEST_DATA_STREAMS_WRITER.groups.find { it.parentHash == originProducerPoint.hash } verifyAll(kafkaStreamsConsumerPoint) { - edgeTags == [ - "direction:in", + tags.hasAllTags("direction:in", "group:test-application", "topic:$STREAM_PENDING".toString(), - "type:kafka" - ] - edgeTags.size() == 4 + "type:kafka") } StatsGroup kafkaStreamsProducerPoint = TEST_DATA_STREAMS_WRITER.groups.find { it.parentHash == kafkaStreamsConsumerPoint.hash } verifyAll(kafkaStreamsProducerPoint) { - edgeTags == ["direction:out", "topic:$STREAM_PROCESSED", "type:kafka"] - edgeTags.size() == 3 + tags.hasAllTags("direction:out", "topic:$STREAM_PROCESSED", "type:kafka") } StatsGroup finalConsumerPoint = TEST_DATA_STREAMS_WRITER.groups.find { it.parentHash == kafkaStreamsProducerPoint.hash } verifyAll(finalConsumerPoint) { - edgeTags == ["direction:in", "group:sender", "topic:$STREAM_PROCESSED".toString(), "type:kafka"] - edgeTags.size() == 4 + tags.hasAllTags("direction:in", "group:sender", "topic:$STREAM_PROCESSED".toString(), "type:kafka") } } diff --git a/dd-java-agent/instrumentation/kafka-streams-0.11/src/main/java/datadog/trace/instrumentation/kafka_streams/KafkaStreamTaskInstrumentation.java b/dd-java-agent/instrumentation/kafka-streams-0.11/src/main/java/datadog/trace/instrumentation/kafka_streams/KafkaStreamTaskInstrumentation.java index f477f40a14b..ffd6a4df88e 100644 --- a/dd-java-agent/instrumentation/kafka-streams-0.11/src/main/java/datadog/trace/instrumentation/kafka_streams/KafkaStreamTaskInstrumentation.java +++ b/dd-java-agent/instrumentation/kafka-streams-0.11/src/main/java/datadog/trace/instrumentation/kafka_streams/KafkaStreamTaskInstrumentation.java @@ -7,11 +7,6 @@ import static datadog.trace.bootstrap.instrumentation.api.AgentTracer.activateSpan; import static datadog.trace.bootstrap.instrumentation.api.AgentTracer.startSpan; import static datadog.trace.bootstrap.instrumentation.api.AgentTracer.traceConfig; -import static datadog.trace.core.datastreams.TagsProcessor.DIRECTION_IN; -import static datadog.trace.core.datastreams.TagsProcessor.DIRECTION_TAG; -import static datadog.trace.core.datastreams.TagsProcessor.GROUP_TAG; -import static datadog.trace.core.datastreams.TagsProcessor.TOPIC_TAG; -import static datadog.trace.core.datastreams.TagsProcessor.TYPE_TAG; import static datadog.trace.instrumentation.kafka_common.StreamingContext.STREAMING_CONTEXT; import static datadog.trace.instrumentation.kafka_common.Utils.computePayloadSizeBytes; import static datadog.trace.instrumentation.kafka_streams.KafkaStreamsDecorator.BROKER_DECORATE; @@ -38,13 +33,13 @@ import datadog.trace.agent.tooling.InstrumenterModule; import datadog.trace.api.Config; import datadog.trace.api.datastreams.DataStreamsContext; +import datadog.trace.api.datastreams.DataStreamsTags; import datadog.trace.bootstrap.InstrumentationContext; import datadog.trace.bootstrap.instrumentation.api.AgentScope; import datadog.trace.bootstrap.instrumentation.api.AgentSpan; import datadog.trace.bootstrap.instrumentation.api.AgentSpanContext; import datadog.trace.bootstrap.instrumentation.api.AgentTracer; import datadog.trace.instrumentation.kafka_clients.TracingIterableDelegator; -import java.util.LinkedHashMap; import java.util.Map; import net.bytebuddy.asm.Advice; import org.apache.kafka.clients.consumer.ConsumerRecord; @@ -250,28 +245,25 @@ public static void start( // The queueSpan will be finished after inner span has been activated to ensure that // spans are written out together by TraceStructureWriter when running in strict mode } - LinkedHashMap sortedTags = new LinkedHashMap<>(); - sortedTags.put(DIRECTION_TAG, DIRECTION_IN); + + String applicationId = null; if (streamTaskContext != null) { - String applicationId = streamTaskContext.getApplicationId(); - if (applicationId != null) { - // Kafka Streams uses the application ID as the consumer group.id. - sortedTags.put(GROUP_TAG, applicationId); - } + applicationId = streamTaskContext.getApplicationId(); } - sortedTags.put(TOPIC_TAG, record.topic()); - sortedTags.put(TYPE_TAG, "kafka"); + DataStreamsTags tags = + DataStreamsTags.createWithGroup( + "kafka", DataStreamsTags.Direction.Inbound, applicationId, record.topic()); final long payloadSize = traceConfig().isDataStreamsEnabled() ? computePayloadSizeBytes(record.value) : 0; if (STREAMING_CONTEXT.isDisabledForTopic(record.topic())) { AgentTracer.get() .getDataStreamsMonitoring() - .setCheckpoint(span, create(sortedTags, record.timestamp, payloadSize)); + .setCheckpoint(span, create(tags, record.timestamp, payloadSize)); } else { if (STREAMING_CONTEXT.isSourceTopic(record.topic())) { Propagator dsmPropagator = Propagators.forConcern(DSM_CONCERN); - DataStreamsContext dsmContext = create(sortedTags, record.timestamp, payloadSize); + DataStreamsContext dsmContext = create(tags, record.timestamp, payloadSize); dsmPropagator.inject(span.with(dsmContext), record, SR_SETTER); } } @@ -327,17 +319,13 @@ public static void start( // spans are written out together by TraceStructureWriter when running in strict mode } - LinkedHashMap sortedTags = new LinkedHashMap<>(); - sortedTags.put(DIRECTION_TAG, DIRECTION_IN); + String applicationId = null; if (streamTaskContext != null) { - String applicationId = streamTaskContext.getApplicationId(); - if (applicationId != null) { - // Kafka Streams uses the application ID as the consumer group.id. - sortedTags.put(GROUP_TAG, applicationId); - } + applicationId = streamTaskContext.getApplicationId(); } - sortedTags.put(TOPIC_TAG, record.topic()); - sortedTags.put(TYPE_TAG, "kafka"); + DataStreamsTags tags = + DataStreamsTags.createWithGroup( + "kafka", DataStreamsTags.Direction.Inbound, applicationId, record.topic()); long payloadSize = 0; // we have to go through Object to get the RecordMetadata here because the class of `record` @@ -350,11 +338,11 @@ public static void start( if (STREAMING_CONTEXT.isDisabledForTopic(record.topic())) { AgentTracer.get() .getDataStreamsMonitoring() - .setCheckpoint(span, create(sortedTags, record.timestamp(), payloadSize)); + .setCheckpoint(span, create(tags, record.timestamp(), payloadSize)); } else { if (STREAMING_CONTEXT.isSourceTopic(record.topic())) { Propagator dsmPropagator = Propagators.forConcern(DSM_CONCERN); - DataStreamsContext dsmContext = create(sortedTags, record.timestamp(), payloadSize); + DataStreamsContext dsmContext = create(tags, record.timestamp(), payloadSize); dsmPropagator.inject(span.with(dsmContext), record, PR_SETTER); } } diff --git a/dd-java-agent/instrumentation/kafka-streams-0.11/src/test/groovy/KafkaStreamsTestBase.groovy b/dd-java-agent/instrumentation/kafka-streams-0.11/src/test/groovy/KafkaStreamsTestBase.groovy index 1267187681b..c7f2b5d20c6 100644 --- a/dd-java-agent/instrumentation/kafka-streams-0.11/src/test/groovy/KafkaStreamsTestBase.groovy +++ b/dd-java-agent/instrumentation/kafka-streams-0.11/src/test/groovy/KafkaStreamsTestBase.groovy @@ -289,31 +289,25 @@ abstract class KafkaStreamsTestBase extends VersionedNamingTestBase { if (isDataStreamsEnabled()) { StatsGroup originProducerPoint = TEST_DATA_STREAMS_WRITER.groups.find { it.parentHash == 0 } verifyAll(originProducerPoint) { - edgeTags == ["direction:out", "topic:$STREAM_PENDING", "type:kafka"] - edgeTags.size() == 3 + tags.hasAllTags("direction:out", "topic:$STREAM_PENDING", "type:kafka") } StatsGroup kafkaStreamsConsumerPoint = TEST_DATA_STREAMS_WRITER.groups.find { it.parentHash == originProducerPoint.hash } verifyAll(kafkaStreamsConsumerPoint) { - edgeTags == [ - "direction:in", + tags.hasAllTags("direction:in", "group:test-application", "topic:$STREAM_PENDING".toString(), - "type:kafka" - ] - edgeTags.size() == 4 + "type:kafka") } StatsGroup kafkaStreamsProducerPoint = TEST_DATA_STREAMS_WRITER.groups.find { it.parentHash == kafkaStreamsConsumerPoint.hash } verifyAll(kafkaStreamsProducerPoint) { - edgeTags == ["direction:out", "topic:$STREAM_PROCESSED", "type:kafka"] - edgeTags.size() == 3 + tags.hasAllTags("direction:out", "topic:$STREAM_PROCESSED", "type:kafka") } StatsGroup finalConsumerPoint = TEST_DATA_STREAMS_WRITER.groups.find { it.parentHash == kafkaStreamsProducerPoint.hash } verifyAll(finalConsumerPoint) { - edgeTags == ["direction:in", "group:sender", "topic:$STREAM_PROCESSED".toString(), "type:kafka"] - edgeTags.size() == 4 + tags.hasAllTags("direction:in", "group:sender", "topic:$STREAM_PROCESSED".toString(), "type:kafka") } } diff --git a/dd-java-agent/instrumentation/rabbitmq-amqp-2.7/src/main/java/datadog/trace/instrumentation/rabbitmq/amqp/RabbitChannelInstrumentation.java b/dd-java-agent/instrumentation/rabbitmq-amqp-2.7/src/main/java/datadog/trace/instrumentation/rabbitmq/amqp/RabbitChannelInstrumentation.java index 1c762039653..0308399aee9 100644 --- a/dd-java-agent/instrumentation/rabbitmq-amqp-2.7/src/main/java/datadog/trace/instrumentation/rabbitmq/amqp/RabbitChannelInstrumentation.java +++ b/dd-java-agent/instrumentation/rabbitmq-amqp-2.7/src/main/java/datadog/trace/instrumentation/rabbitmq/amqp/RabbitChannelInstrumentation.java @@ -9,11 +9,6 @@ import static datadog.trace.bootstrap.instrumentation.api.AgentTracer.activateSpan; import static datadog.trace.bootstrap.instrumentation.api.AgentTracer.noopSpan; import static datadog.trace.bootstrap.instrumentation.api.AgentTracer.startSpan; -import static datadog.trace.core.datastreams.TagsProcessor.DIRECTION_OUT; -import static datadog.trace.core.datastreams.TagsProcessor.DIRECTION_TAG; -import static datadog.trace.core.datastreams.TagsProcessor.EXCHANGE_TAG; -import static datadog.trace.core.datastreams.TagsProcessor.HAS_ROUTING_KEY_TAG; -import static datadog.trace.core.datastreams.TagsProcessor.TYPE_TAG; import static datadog.trace.instrumentation.rabbitmq.amqp.RabbitDecorator.CLIENT_DECORATE; import static datadog.trace.instrumentation.rabbitmq.amqp.RabbitDecorator.CONSUMER_DECORATE; import static datadog.trace.instrumentation.rabbitmq.amqp.RabbitDecorator.OPERATION_AMQP_COMMAND; @@ -41,12 +36,12 @@ import datadog.trace.agent.tooling.InstrumenterModule; import datadog.trace.api.Config; import datadog.trace.api.datastreams.DataStreamsContext; +import datadog.trace.api.datastreams.DataStreamsTags; import datadog.trace.bootstrap.CallDepthThreadLocalMap; import datadog.trace.bootstrap.instrumentation.api.AgentScope; import datadog.trace.bootstrap.instrumentation.api.AgentSpan; import java.io.IOException; import java.util.HashMap; -import java.util.LinkedHashMap; import java.util.Map; import net.bytebuddy.asm.Advice; import net.bytebuddy.description.type.TypeDescription; @@ -190,13 +185,13 @@ public static AgentScope setResourceNameAddHeaders( if (TIME_IN_QUEUE_ENABLED) { RabbitDecorator.injectTimeInQueueStart(headers); } - LinkedHashMap sortedTags = new LinkedHashMap<>(); - sortedTags.put(DIRECTION_TAG, DIRECTION_OUT); - sortedTags.put(EXCHANGE_TAG, exchange); - sortedTags.put( - HAS_ROUTING_KEY_TAG, routingKey == null || routingKey.isEmpty() ? "false" : "true"); - sortedTags.put(TYPE_TAG, "rabbitmq"); - DataStreamsContext dsmContext = DataStreamsContext.fromTags(sortedTags); + DataStreamsTags tags = + DataStreamsTags.createWithExchange( + "rabbitmq", + DataStreamsTags.Direction.Outbound, + exchange, + routingKey != null && !routingKey.isEmpty()); + DataStreamsContext dsmContext = DataStreamsContext.fromTags(tags); defaultPropagator().inject(span.with(dsmContext), headers, SETTER); props = new AMQP.BasicProperties( diff --git a/dd-java-agent/instrumentation/rabbitmq-amqp-2.7/src/main/java/datadog/trace/instrumentation/rabbitmq/amqp/RabbitDecorator.java b/dd-java-agent/instrumentation/rabbitmq-amqp-2.7/src/main/java/datadog/trace/instrumentation/rabbitmq/amqp/RabbitDecorator.java index c3d1797c4fc..fb4ac5e72dc 100644 --- a/dd-java-agent/instrumentation/rabbitmq-amqp-2.7/src/main/java/datadog/trace/instrumentation/rabbitmq/amqp/RabbitDecorator.java +++ b/dd-java-agent/instrumentation/rabbitmq-amqp-2.7/src/main/java/datadog/trace/instrumentation/rabbitmq/amqp/RabbitDecorator.java @@ -9,16 +9,13 @@ import static datadog.trace.bootstrap.instrumentation.api.InstrumentationTags.AMQP_QUEUE; import static datadog.trace.bootstrap.instrumentation.api.InstrumentationTags.AMQP_ROUTING_KEY; import static datadog.trace.bootstrap.instrumentation.api.InstrumentationTags.RECORD_QUEUE_TIME_MS; -import static datadog.trace.core.datastreams.TagsProcessor.DIRECTION_IN; -import static datadog.trace.core.datastreams.TagsProcessor.DIRECTION_TAG; -import static datadog.trace.core.datastreams.TagsProcessor.TOPIC_TAG; -import static datadog.trace.core.datastreams.TagsProcessor.TYPE_TAG; import com.rabbitmq.client.AMQP; import com.rabbitmq.client.Command; import com.rabbitmq.client.Consumer; import com.rabbitmq.client.Envelope; import datadog.trace.api.Config; +import datadog.trace.api.datastreams.DataStreamsTags; import datadog.trace.api.naming.SpanNaming; import datadog.trace.bootstrap.instrumentation.api.AgentScope; import datadog.trace.bootstrap.instrumentation.api.AgentSpan; @@ -29,7 +26,6 @@ import datadog.trace.bootstrap.instrumentation.api.Tags; import datadog.trace.bootstrap.instrumentation.api.UTF8BytesString; import datadog.trace.bootstrap.instrumentation.decorator.MessagingClientDecorator; -import java.util.LinkedHashMap; import java.util.Map; import java.util.concurrent.TimeUnit; import java.util.function.Supplier; @@ -250,13 +246,11 @@ public static AgentScope startReceivingSpan( } if (null != headers) { - LinkedHashMap sortedTags = new LinkedHashMap<>(); - sortedTags.put(DIRECTION_TAG, DIRECTION_IN); - sortedTags.put(TOPIC_TAG, queue); - sortedTags.put(TYPE_TAG, "rabbitmq"); + DataStreamsTags tags = + DataStreamsTags.create("rabbitmq", DataStreamsTags.Direction.Inbound, queue); AgentTracer.get() .getDataStreamsMonitoring() - .setCheckpoint(span, create(sortedTags, produceMillis, 0)); + .setCheckpoint(span, create(tags, produceMillis, 0)); } CONSUMER_DECORATE.afterStart(span); diff --git a/dd-java-agent/instrumentation/rabbitmq-amqp-2.7/src/test/groovy/RabbitMQTest.groovy b/dd-java-agent/instrumentation/rabbitmq-amqp-2.7/src/test/groovy/RabbitMQTest.groovy index b47668d272a..c3c69a167fd 100644 --- a/dd-java-agent/instrumentation/rabbitmq-amqp-2.7/src/test/groovy/RabbitMQTest.groovy +++ b/dd-java-agent/instrumentation/rabbitmq-amqp-2.7/src/test/groovy/RabbitMQTest.groovy @@ -171,14 +171,12 @@ abstract class RabbitMQTestBase extends VersionedNamingTestBase { if (isDataStreamsEnabled()) { StatsGroup first = TEST_DATA_STREAMS_WRITER.groups.find { it.parentHash == 0 } verifyAll(first) { - edgeTags == ["direction:out", "exchange:" + exchangeName, "has_routing_key:true", "type:rabbitmq"] - edgeTags.size() == 4 + tags.hasAllTags("direction:out", "exchange:" + exchangeName, "has_routing_key:true", "type:rabbitmq") } StatsGroup second = TEST_DATA_STREAMS_WRITER.groups.find { it.parentHash == first.hash } verifyAll(second) { - edgeTags == ["direction:in", "topic:" + queueName, "type:rabbitmq"] - edgeTags.size() == 3 + tags.hasAllTags("direction:in", "topic:" + queueName, "type:rabbitmq") } } @@ -225,14 +223,12 @@ abstract class RabbitMQTestBase extends VersionedNamingTestBase { if (isDataStreamsEnabled()) { StatsGroup first = TEST_DATA_STREAMS_WRITER.groups.find { it.parentHash == 0 } verifyAll(first) { - edgeTags == ["direction:out", "exchange:", "has_routing_key:true", "type:rabbitmq"] - edgeTags.size() == 4 + tags.hasAllTags("direction:out", "exchange:", "has_routing_key:true", "type:rabbitmq") } StatsGroup second = TEST_DATA_STREAMS_WRITER.groups.find { it.parentHash == first.hash } verifyAll(second) { - edgeTags == ["direction:in", "topic:" + queueName, "type:rabbitmq"] - edgeTags.size() == 3 + tags.hasAllTags("direction:in", "topic:" + queueName, "type:rabbitmq") } } } @@ -322,15 +318,13 @@ abstract class RabbitMQTestBase extends VersionedNamingTestBase { List producerPoints = TEST_DATA_STREAMS_WRITER.groups.findAll { it.parentHash == 0 } producerPoints.each { producerPoint -> verifyAll(producerPoint) { - edgeTags == ["direction:out", "exchange:" + exchangeName, "has_routing_key:false", "type:rabbitmq"] - edgeTags.size() == 4 + tags.hasAllTags("direction:out", "exchange:" + exchangeName, "has_routing_key:false", "type:rabbitmq") } } StatsGroup consumerPoint = TEST_DATA_STREAMS_WRITER.groups.find { it.parentHash == producerPoints.get(0).hash } verifyAll(consumerPoint) { - edgeTags == ["direction:in", "topic:" + queueName, "type:rabbitmq"] - edgeTags.size() == 3 + tags.hasAllTags("direction:in", "topic:" + queueName, "type:rabbitmq") } } @@ -414,14 +408,12 @@ abstract class RabbitMQTestBase extends VersionedNamingTestBase { if (isDataStreamsEnabled()) { StatsGroup first = TEST_DATA_STREAMS_WRITER.groups.find { it.parentHash == 0 } verifyAll(first) { - edgeTags == ["direction:out", "exchange:" + exchangeName, "has_routing_key:false", "type:rabbitmq"] - edgeTags.size() == 4 + tags.hasAllTags("direction:out", "exchange:" + exchangeName, "has_routing_key:false", "type:rabbitmq") } StatsGroup second = TEST_DATA_STREAMS_WRITER.groups.find { it.parentHash == first.hash } verifyAll(second) { - edgeTags == ["direction:in", "topic:" + queueName, "type:rabbitmq"] - edgeTags.size() == 3 + tags.hasAllTags("direction:in", "topic:" + queueName, "type:rabbitmq") } } @@ -499,14 +491,12 @@ abstract class RabbitMQTestBase extends VersionedNamingTestBase { if (isDataStreamsEnabled()) { StatsGroup first = TEST_DATA_STREAMS_WRITER.groups.find { it.parentHash == 0 } verifyAll(first) { - edgeTags.containsAll(["direction:out", "exchange:", "has_routing_key:true", "type:rabbitmq"]) - edgeTags.size() == 4 + tags.hasAllTags("direction:out", "exchange:", "has_routing_key:true", "type:rabbitmq") } StatsGroup second = TEST_DATA_STREAMS_WRITER.groups.find { it.parentHash == first.hash } verifyAll(second) { - edgeTags == ["direction:in", "topic:some-routing-queue", "type:rabbitmq"] - edgeTags.size() == 3 + tags.hasAllTags("direction:in", "topic:some-routing-queue", "type:rabbitmq") } } } @@ -583,14 +573,12 @@ abstract class RabbitMQTestBase extends VersionedNamingTestBase { if (isDataStreamsEnabled() && !noParent) { StatsGroup first = TEST_DATA_STREAMS_WRITER.groups.find { it.parentHash == 0 } verifyAll(first) { - edgeTags == ["direction:out", "exchange:" + exchangeName, "has_routing_key:true", "type:rabbitmq"] - edgeTags.size() == 4 + tags.hasAllTags("direction:out", "exchange:" + exchangeName, "has_routing_key:true", "type:rabbitmq") } StatsGroup second = TEST_DATA_STREAMS_WRITER.groups.find { it.parentHash == first.hash } verifyAll(second) { - edgeTags == ["direction:in", "topic:" + queueName, "type:rabbitmq"] - edgeTags.size() == 3 + tags.hasAllTags("direction:in", "topic:" + queueName, "type:rabbitmq") } } @@ -679,14 +667,12 @@ abstract class RabbitMQTestBase extends VersionedNamingTestBase { // assert with retries in case DSM data is split in more groups that take a bit longer to arrive. StatsGroup first = TEST_DATA_STREAMS_WRITER.groups.find { it.parentHash == 0 } verifyAll(first) { - edgeTags == ["direction:out", "exchange:" + exchangeName, "has_routing_key:true", "type:rabbitmq"] - edgeTags.size() == 4 + tags.hasAllTags("direction:out", "exchange:" + exchangeName, "has_routing_key:true", "type:rabbitmq") } StatsGroup second = TEST_DATA_STREAMS_WRITER.groups.find { it.parentHash == first.hash } verifyAll(second) { - edgeTags == ["direction:in", "topic:" + queueName, "type:rabbitmq"] - edgeTags.size() == 3 + tags.hasAllTags("direction:in", "topic:" + queueName, "type:rabbitmq") } } } diff --git a/dd-java-agent/instrumentation/spark/src/main/java/datadog/trace/instrumentation/spark/AbstractDatadogSparkListener.java b/dd-java-agent/instrumentation/spark/src/main/java/datadog/trace/instrumentation/spark/AbstractDatadogSparkListener.java index a4a340792e7..5ac7f524ce7 100644 --- a/dd-java-agent/instrumentation/spark/src/main/java/datadog/trace/instrumentation/spark/AbstractDatadogSparkListener.java +++ b/dd-java-agent/instrumentation/spark/src/main/java/datadog/trace/instrumentation/spark/AbstractDatadogSparkListener.java @@ -1,16 +1,13 @@ package datadog.trace.instrumentation.spark; import static datadog.trace.bootstrap.instrumentation.api.AgentTracer.traceConfig; -import static datadog.trace.core.datastreams.TagsProcessor.CONSUMER_GROUP_TAG; -import static datadog.trace.core.datastreams.TagsProcessor.PARTITION_TAG; -import static datadog.trace.core.datastreams.TagsProcessor.TOPIC_TAG; -import static datadog.trace.core.datastreams.TagsProcessor.TYPE_TAG; import com.fasterxml.jackson.databind.JsonNode; import com.fasterxml.jackson.databind.ObjectMapper; import datadog.trace.api.Config; import datadog.trace.api.DDTags; import datadog.trace.api.DDTraceId; +import datadog.trace.api.datastreams.DataStreamsTags; import datadog.trace.api.sampling.PrioritySampling; import datadog.trace.api.sampling.SamplingMechanism; import datadog.trace.bootstrap.InstanceStore; @@ -32,7 +29,6 @@ import java.util.Collection; import java.util.HashMap; import java.util.Iterator; -import java.util.LinkedHashMap; import java.util.List; import java.util.Map; import java.util.Optional; @@ -1317,20 +1313,14 @@ private static void reportKafkaOffsets( JsonNode topicNode = jsonNode.get(topic); // iterate thought reported partitions Iterator allPartitions = topicNode.fieldNames(); - // dsm tags - LinkedHashMap sortedTags = new LinkedHashMap<>(); - sortedTags.put(CONSUMER_GROUP_TAG, appName); - // will be overwritten - sortedTags.put(PARTITION_TAG, ""); - sortedTags.put(TOPIC_TAG, topic); - sortedTags.put(TYPE_TAG, "kafka_commit"); - while (allPartitions.hasNext()) { String partition = allPartitions.next(); - sortedTags.put(PARTITION_TAG, partition); + DataStreamsTags tags = + DataStreamsTags.createWithPartition( + "kafka_commit", topic, partition, null, appName); AgentTracer.get() .getDataStreamsMonitoring() - .trackBacklog(sortedTags, topicNode.get(partition).asLong()); + .trackBacklog(tags, topicNode.get(partition).asLong()); } } } catch (Throwable e) { diff --git a/dd-java-agent/testing/src/main/groovy/datadog/trace/agent/test/base/HttpClientTest.groovy b/dd-java-agent/testing/src/main/groovy/datadog/trace/agent/test/base/HttpClientTest.groovy index f08d8a556c0..19f87288b02 100644 --- a/dd-java-agent/testing/src/main/groovy/datadog/trace/agent/test/base/HttpClientTest.groovy +++ b/dd-java-agent/testing/src/main/groovy/datadog/trace/agent/test/base/HttpClientTest.groovy @@ -36,9 +36,7 @@ abstract class HttpClientTest extends VersionedNamingTestBase { protected static final int READ_TIMEOUT_MS = TimeUnit.SECONDS.toMillis(5) as int protected static final BASIC_AUTH_KEY = "custom_authorization_header" protected static final BASIC_AUTH_VAL = "plain text auth token" - protected static final DSM_EDGE_TAGS = DataStreamsContext.forHttpClient().sortedTags().collect { key, value -> - return key + ":" + value - } + protected static final DSM_EDGE_TAGS = DataStreamsContext.forHttpClient().tags() @AutoCleanup @Shared @@ -178,8 +176,7 @@ abstract class HttpClientTest extends VersionedNamingTestBase { if (isDataStreamsEnabled()) { StatsGroup first = TEST_DATA_STREAMS_WRITER.groups.find { it.parentHash == 0 } verifyAll(first) { - edgeTags.containsAll(DSM_EDGE_TAGS) - edgeTags.size() == DSM_EDGE_TAGS.size() + getTags() == DSM_EDGE_TAGS } } @@ -221,8 +218,7 @@ abstract class HttpClientTest extends VersionedNamingTestBase { if (isDataStreamsEnabled()) { StatsGroup first = TEST_DATA_STREAMS_WRITER.groups.find { it.parentHash == 0 } verifyAll(first) { - edgeTags.containsAll(DSM_EDGE_TAGS) - edgeTags.size() == DSM_EDGE_TAGS.size() + getTags() == DSM_EDGE_TAGS } } @@ -269,8 +265,7 @@ abstract class HttpClientTest extends VersionedNamingTestBase { if (isDataStreamsEnabled()) { StatsGroup first = TEST_DATA_STREAMS_WRITER.groups.find { it.parentHash == 0 } verifyAll(first) { - edgeTags.containsAll(DSM_EDGE_TAGS) - edgeTags.size() == DSM_EDGE_TAGS.size() + getTags() == DSM_EDGE_TAGS } } @@ -304,8 +299,7 @@ abstract class HttpClientTest extends VersionedNamingTestBase { if (isDataStreamsEnabled()) { StatsGroup first = TEST_DATA_STREAMS_WRITER.groups.find { it.parentHash == 0 } verifyAll(first) { - edgeTags.containsAll(DSM_EDGE_TAGS) - edgeTags.size() == DSM_EDGE_TAGS.size() + getTags() == DSM_EDGE_TAGS } } @@ -342,8 +336,7 @@ abstract class HttpClientTest extends VersionedNamingTestBase { if (isDataStreamsEnabled()) { StatsGroup first = TEST_DATA_STREAMS_WRITER.groups.find { it.parentHash == 0 } verifyAll(first) { - edgeTags.containsAll(DSM_EDGE_TAGS) - edgeTags.size() == DSM_EDGE_TAGS.size() + getTags() == DSM_EDGE_TAGS } } @@ -381,8 +374,7 @@ abstract class HttpClientTest extends VersionedNamingTestBase { if (isDataStreamsEnabled()) { StatsGroup first = TEST_DATA_STREAMS_WRITER.groups.find { it.parentHash == 0 } verifyAll(first) { - edgeTags.containsAll(DSM_EDGE_TAGS) - edgeTags.size() == DSM_EDGE_TAGS.size() + getTags() == DSM_EDGE_TAGS } } @@ -415,8 +407,7 @@ abstract class HttpClientTest extends VersionedNamingTestBase { if (isDataStreamsEnabled()) { StatsGroup first = TEST_DATA_STREAMS_WRITER.groups.find { it.parentHash == 0 } verifyAll(first) { - edgeTags.containsAll(DSM_EDGE_TAGS) - edgeTags.size() == DSM_EDGE_TAGS.size() + getTags() == DSM_EDGE_TAGS } } @@ -450,8 +441,7 @@ abstract class HttpClientTest extends VersionedNamingTestBase { if (isDataStreamsEnabled()) { StatsGroup first = TEST_DATA_STREAMS_WRITER.groups.find { it.parentHash == 0 } verifyAll(first) { - edgeTags.containsAll(DSM_EDGE_TAGS) - edgeTags.size() == DSM_EDGE_TAGS.size() + getTags() == DSM_EDGE_TAGS } } @@ -495,8 +485,7 @@ abstract class HttpClientTest extends VersionedNamingTestBase { if (isDataStreamsEnabled()) { StatsGroup first = TEST_DATA_STREAMS_WRITER.groups.find { it.parentHash == 0 } verifyAll(first) { - edgeTags.containsAll(DSM_EDGE_TAGS) - edgeTags.size() == DSM_EDGE_TAGS.size() + getTags() == DSM_EDGE_TAGS } } } @@ -543,8 +532,7 @@ abstract class HttpClientTest extends VersionedNamingTestBase { if (isDataStreamsEnabled()) { StatsGroup first = TEST_DATA_STREAMS_WRITER.groups.find { it.parentHash == 0 } verifyAll(first) { - edgeTags.containsAll(DSM_EDGE_TAGS) - edgeTags.size() == DSM_EDGE_TAGS.size() + getTags() == DSM_EDGE_TAGS } } @@ -580,8 +568,7 @@ abstract class HttpClientTest extends VersionedNamingTestBase { if (isDataStreamsEnabled()) { StatsGroup first = TEST_DATA_STREAMS_WRITER.groups.find { it.parentHash == 0 } verifyAll(first) { - edgeTags.containsAll(DSM_EDGE_TAGS) - edgeTags.size() == DSM_EDGE_TAGS.size() + getTags() == DSM_EDGE_TAGS } } @@ -615,8 +602,7 @@ abstract class HttpClientTest extends VersionedNamingTestBase { if (isDataStreamsEnabled()) { StatsGroup first = TEST_DATA_STREAMS_WRITER.groups.find { it.parentHash == 0 } verifyAll(first) { - edgeTags.containsAll(DSM_EDGE_TAGS) - edgeTags.size() == DSM_EDGE_TAGS.size() + getTags() == DSM_EDGE_TAGS } } @@ -674,8 +660,7 @@ abstract class HttpClientTest extends VersionedNamingTestBase { if (isDataStreamsEnabled()) { StatsGroup first = TEST_DATA_STREAMS_WRITER.groups.find { it.parentHash == 0 } verifyAll(first) { - edgeTags.containsAll(DSM_EDGE_TAGS) - edgeTags.size() == DSM_EDGE_TAGS.size() + getTags() == DSM_EDGE_TAGS } } @@ -759,8 +744,7 @@ abstract class HttpClientTest extends VersionedNamingTestBase { if (isDataStreamsEnabled()) { StatsGroup first = TEST_DATA_STREAMS_WRITER.groups.find { it.parentHash == 0 } verifyAll(first) { - edgeTags.containsAll(DSM_EDGE_TAGS) - edgeTags.size() == DSM_EDGE_TAGS.size() + getTags() == DSM_EDGE_TAGS } } @@ -788,8 +772,7 @@ abstract class HttpClientTest extends VersionedNamingTestBase { if (isDataStreamsEnabled()) { StatsGroup first = TEST_DATA_STREAMS_WRITER.groups.find { it.parentHash == 0 } verifyAll(first) { - edgeTags.containsAll(DSM_EDGE_TAGS) - edgeTags.size() == DSM_EDGE_TAGS.size() + getTags() == DSM_EDGE_TAGS } } @@ -822,8 +805,7 @@ abstract class HttpClientTest extends VersionedNamingTestBase { if (isDataStreamsEnabled()) { StatsGroup first = TEST_DATA_STREAMS_WRITER.groups.find { it.parentHash == 0 } verifyAll(first) { - edgeTags.containsAll(DSM_EDGE_TAGS) - edgeTags.size() == DSM_EDGE_TAGS.size() + getTags() == DSM_EDGE_TAGS } } diff --git a/dd-java-agent/testing/src/main/groovy/datadog/trace/agent/test/base/HttpServerTest.groovy b/dd-java-agent/testing/src/main/groovy/datadog/trace/agent/test/base/HttpServerTest.groovy index 69760e48eb9..ed077d10621 100644 --- a/dd-java-agent/testing/src/main/groovy/datadog/trace/agent/test/base/HttpServerTest.groovy +++ b/dd-java-agent/testing/src/main/groovy/datadog/trace/agent/test/base/HttpServerTest.groovy @@ -106,10 +106,7 @@ import static org.junit.Assume.assumeTrue abstract class HttpServerTest extends WithHttpServer { public static final Logger SERVER_LOGGER = LoggerFactory.getLogger("http-server") - protected static final DSM_EDGE_TAGS = DataStreamsContext.forHttpServer().sortedTags().collect { - key, value -> - return key + ":" + value - } + protected static final DSM_EDGE_TAGS = DataStreamsContext.forHttpServer().tags() static { try { ((ch.qos.logback.classic.Logger) SERVER_LOGGER).setLevel(Level.DEBUG) @@ -640,8 +637,7 @@ abstract class HttpServerTest extends WithHttpServer { if (isDataStreamsEnabled()) { StatsGroup first = TEST_DATA_STREAMS_WRITER.groups.find { it.parentHash == 0 } verifyAll(first) { - edgeTags.containsAll(DSM_EDGE_TAGS) - edgeTags.size() == DSM_EDGE_TAGS.size() + tags == DSM_EDGE_TAGS } } @@ -684,8 +680,7 @@ abstract class HttpServerTest extends WithHttpServer { if (isDataStreamsEnabled()) { StatsGroup first = TEST_DATA_STREAMS_WRITER.groups.find { it.parentHash == 0 } verifyAll(first) { - edgeTags.containsAll(DSM_EDGE_TAGS) - edgeTags.size() == DSM_EDGE_TAGS.size() + tags == DSM_EDGE_TAGS } } @@ -730,8 +725,7 @@ abstract class HttpServerTest extends WithHttpServer { if (isDataStreamsEnabled()) { StatsGroup first = TEST_DATA_STREAMS_WRITER.groups.find { it.parentHash == 0 } verifyAll(first) { - edgeTags.containsAll(DSM_EDGE_TAGS) - edgeTags.size() == DSM_EDGE_TAGS.size() + tags == DSM_EDGE_TAGS } } @@ -759,7 +753,7 @@ abstract class HttpServerTest extends WithHttpServer { assertTraces(1) { trace(spanCount(SUCCESS)) { sortSpansByStart() - serverSpan(it, null, null, method, SUCCESS, tags) + serverSpan(it, null, null, method, SUCCESS, spanTags) if (hasHandlerSpan()) { handlerSpan(it) } @@ -774,13 +768,12 @@ abstract class HttpServerTest extends WithHttpServer { if (isDataStreamsEnabled()) { StatsGroup first = TEST_DATA_STREAMS_WRITER.groups.find { it.parentHash == 0 } verifyAll(first) { - edgeTags.containsAll(DSM_EDGE_TAGS) - edgeTags.size() == DSM_EDGE_TAGS.size() + tags == DSM_EDGE_TAGS } } where: - method | body | header | value | tags + method | body | header | value | spanTags 'GET' | null | 'x-datadog-test-both-header' | 'foo' | ['both_header_tag': 'foo'] 'GET' | null | 'x-datadog-test-request-header' | 'bar' | ['request_header_tag': 'bar'] } @@ -794,7 +787,7 @@ abstract class HttpServerTest extends WithHttpServer { def body = null def header = IG_RESPONSE_HEADER def mapping = 'mapped_response_header_tag' - def tags = ['mapped_response_header_tag': "$IG_RESPONSE_HEADER_VALUE"] + def spanTags = ['mapped_response_header_tag': "$IG_RESPONSE_HEADER_VALUE"] injectSysConfig(HTTP_SERVER_TAG_QUERY_STRING, "true") injectSysConfig(RESPONSE_HEADER_TAGS, "$header:$mapping") @@ -813,7 +806,7 @@ abstract class HttpServerTest extends WithHttpServer { assertTraces(1) { trace(spanCount(endpoint)) { sortSpansByStart() - serverSpan(it, null, null, method, endpoint, tags) + serverSpan(it, null, null, method, endpoint, spanTags) if (hasHandlerSpan()) { handlerSpan(it, endpoint) } @@ -828,8 +821,7 @@ abstract class HttpServerTest extends WithHttpServer { if (isDataStreamsEnabled()) { StatsGroup first = TEST_DATA_STREAMS_WRITER.groups.find { it.parentHash == 0 } verifyAll(first) { - edgeTags.containsAll(DSM_EDGE_TAGS) - edgeTags.size() == DSM_EDGE_TAGS.size() + tags == DSM_EDGE_TAGS } } } @@ -871,8 +863,7 @@ abstract class HttpServerTest extends WithHttpServer { if (isDataStreamsEnabled()) { StatsGroup first = TEST_DATA_STREAMS_WRITER.groups.find { it.parentHash == 0 } verifyAll(first) { - edgeTags.containsAll(DSM_EDGE_TAGS) - edgeTags.size() == DSM_EDGE_TAGS.size() + tags == DSM_EDGE_TAGS } } @@ -926,8 +917,7 @@ abstract class HttpServerTest extends WithHttpServer { if (isDataStreamsEnabled()) { StatsGroup first = TEST_DATA_STREAMS_WRITER.groups.find { it.parentHash == 0 } verifyAll(first) { - edgeTags.containsAll(DSM_EDGE_TAGS) - edgeTags.size() == DSM_EDGE_TAGS.size() + tags == DSM_EDGE_TAGS } } @@ -974,8 +964,7 @@ abstract class HttpServerTest extends WithHttpServer { if (isDataStreamsEnabled()) { StatsGroup first = TEST_DATA_STREAMS_WRITER.groups.find { it.parentHash == 0 } verifyAll(first) { - edgeTags.containsAll(DSM_EDGE_TAGS) - edgeTags.size() == DSM_EDGE_TAGS.size() + tags == DSM_EDGE_TAGS } } } @@ -1003,8 +992,7 @@ abstract class HttpServerTest extends WithHttpServer { if (isDataStreamsEnabled()) { StatsGroup first = TEST_DATA_STREAMS_WRITER.groups.find { it.parentHash == 0 } verifyAll(first) { - edgeTags.containsAll(DSM_EDGE_TAGS) - edgeTags.size() == DSM_EDGE_TAGS.size() + tags == DSM_EDGE_TAGS } } } @@ -1047,8 +1035,7 @@ abstract class HttpServerTest extends WithHttpServer { if (isDataStreamsEnabled()) { StatsGroup first = TEST_DATA_STREAMS_WRITER.groups.find { it.parentHash == 0 } verifyAll(first) { - edgeTags.containsAll(DSM_EDGE_TAGS) - edgeTags.size() == DSM_EDGE_TAGS.size() + tags == DSM_EDGE_TAGS } } @@ -1094,8 +1081,7 @@ abstract class HttpServerTest extends WithHttpServer { if (isDataStreamsEnabled()) { StatsGroup first = TEST_DATA_STREAMS_WRITER.groups.find { it.parentHash == 0 } verifyAll(first) { - edgeTags.containsAll(DSM_EDGE_TAGS) - edgeTags.size() == DSM_EDGE_TAGS.size() + tags == DSM_EDGE_TAGS } } } @@ -1135,8 +1121,7 @@ abstract class HttpServerTest extends WithHttpServer { if (isDataStreamsEnabled()) { StatsGroup first = TEST_DATA_STREAMS_WRITER.groups.find { it.parentHash == 0 } verifyAll(first) { - edgeTags.containsAll(DSM_EDGE_TAGS) - edgeTags.size() == DSM_EDGE_TAGS.size() + tags == DSM_EDGE_TAGS } } } @@ -1178,8 +1163,7 @@ abstract class HttpServerTest extends WithHttpServer { if (isDataStreamsEnabled()) { StatsGroup first = TEST_DATA_STREAMS_WRITER.groups.find { it.parentHash == 0 } verifyAll(first) { - edgeTags.containsAll(DSM_EDGE_TAGS) - edgeTags.size() == DSM_EDGE_TAGS.size() + tags == DSM_EDGE_TAGS } } } @@ -1220,8 +1204,7 @@ abstract class HttpServerTest extends WithHttpServer { if (isDataStreamsEnabled()) { StatsGroup first = TEST_DATA_STREAMS_WRITER.groups.find { it.parentHash == 0 } verifyAll(first) { - edgeTags.containsAll(DSM_EDGE_TAGS) - edgeTags.size() == DSM_EDGE_TAGS.size() + tags == DSM_EDGE_TAGS } } } @@ -1262,8 +1245,7 @@ abstract class HttpServerTest extends WithHttpServer { if (isDataStreamsEnabled()) { StatsGroup first = TEST_DATA_STREAMS_WRITER.groups.find { it.parentHash == 0 } verifyAll(first) { - edgeTags.containsAll(DSM_EDGE_TAGS) - edgeTags.size() == DSM_EDGE_TAGS.size() + tags == DSM_EDGE_TAGS } } } @@ -1303,8 +1285,7 @@ abstract class HttpServerTest extends WithHttpServer { if (isDataStreamsEnabled()) { StatsGroup first = TEST_DATA_STREAMS_WRITER.groups.find { it.parentHash == 0 } verifyAll(first) { - edgeTags.containsAll(DSM_EDGE_TAGS) - edgeTags.size() == DSM_EDGE_TAGS.size() + tags == DSM_EDGE_TAGS } } } @@ -1358,8 +1339,7 @@ abstract class HttpServerTest extends WithHttpServer { if (isDataStreamsEnabled()) { StatsGroup first = TEST_DATA_STREAMS_WRITER.groups.find { it.parentHash == 0 } verifyAll(first) { - edgeTags.containsAll(DSM_EDGE_TAGS) - edgeTags.size() == DSM_EDGE_TAGS.size() + tags == DSM_EDGE_TAGS } } @@ -1405,8 +1385,7 @@ abstract class HttpServerTest extends WithHttpServer { if (isDataStreamsEnabled()) { StatsGroup first = TEST_DATA_STREAMS_WRITER.groups.find { it.parentHash == 0 } verifyAll(first) { - edgeTags.containsAll(DSM_EDGE_TAGS) - edgeTags.size() == DSM_EDGE_TAGS.size() + tags == DSM_EDGE_TAGS } } } @@ -1438,8 +1417,7 @@ abstract class HttpServerTest extends WithHttpServer { if (isDataStreamsEnabled()) { StatsGroup first = TEST_DATA_STREAMS_WRITER.groups.find { it.parentHash == 0 } verifyAll(first) { - edgeTags.containsAll(DSM_EDGE_TAGS) - edgeTags.size() == DSM_EDGE_TAGS.size() + tags == DSM_EDGE_TAGS } } } @@ -1471,8 +1449,7 @@ abstract class HttpServerTest extends WithHttpServer { if (isDataStreamsEnabled()) { StatsGroup first = TEST_DATA_STREAMS_WRITER.groups.find { it.parentHash == 0 } verifyAll(first) { - edgeTags.containsAll(DSM_EDGE_TAGS) - edgeTags.size() == DSM_EDGE_TAGS.size() + tags == DSM_EDGE_TAGS } } } @@ -1506,8 +1483,7 @@ abstract class HttpServerTest extends WithHttpServer { if (isDataStreamsEnabled()) { StatsGroup first = TEST_DATA_STREAMS_WRITER.groups.find { it.parentHash == 0 } verifyAll(first) { - edgeTags.containsAll(DSM_EDGE_TAGS) - edgeTags.size() == DSM_EDGE_TAGS.size() + tags == DSM_EDGE_TAGS } } } @@ -1539,8 +1515,7 @@ abstract class HttpServerTest extends WithHttpServer { if (isDataStreamsEnabled()) { StatsGroup first = TEST_DATA_STREAMS_WRITER.groups.find { it.parentHash == 0 } verifyAll(first) { - edgeTags.containsAll(DSM_EDGE_TAGS) - edgeTags.size() == DSM_EDGE_TAGS.size() + tags == DSM_EDGE_TAGS } } } @@ -1588,8 +1563,7 @@ abstract class HttpServerTest extends WithHttpServer { if (isDataStreamsEnabled()) { StatsGroup first = TEST_DATA_STREAMS_WRITER.groups.find { it.parentHash == 0 } verifyAll(first) { - edgeTags.containsAll(DSM_EDGE_TAGS) - edgeTags.size() == DSM_EDGE_TAGS.size() + tags == DSM_EDGE_TAGS } } @@ -1632,8 +1606,7 @@ abstract class HttpServerTest extends WithHttpServer { if (isDataStreamsEnabled()) { StatsGroup first = TEST_DATA_STREAMS_WRITER.groups.find { it.parentHash == 0 } verifyAll(first) { - edgeTags.containsAll(DSM_EDGE_TAGS) - edgeTags.size() == DSM_EDGE_TAGS.size() + tags == DSM_EDGE_TAGS } } } @@ -1671,8 +1644,7 @@ abstract class HttpServerTest extends WithHttpServer { if (isDataStreamsEnabled()) { StatsGroup first = TEST_DATA_STREAMS_WRITER.groups.find { it.parentHash == 0 } verifyAll(first) { - edgeTags.containsAll(DSM_EDGE_TAGS) - edgeTags.size() == DSM_EDGE_TAGS.size() + tags == DSM_EDGE_TAGS } } } @@ -1708,8 +1680,7 @@ abstract class HttpServerTest extends WithHttpServer { if (isDataStreamsEnabled()) { StatsGroup first = TEST_DATA_STREAMS_WRITER.groups.find { it.parentHash == 0 } verifyAll(first) { - edgeTags.containsAll(DSM_EDGE_TAGS) - edgeTags.size() == DSM_EDGE_TAGS.size() + tags == DSM_EDGE_TAGS } } } @@ -1765,8 +1736,7 @@ abstract class HttpServerTest extends WithHttpServer { if (isDataStreamsEnabled()) { StatsGroup first = TEST_DATA_STREAMS_WRITER.groups.find { it.parentHash == 0 } verifyAll(first) { - edgeTags.containsAll(DSM_EDGE_TAGS) - edgeTags.size() == DSM_EDGE_TAGS.size() + tags == DSM_EDGE_TAGS } } } @@ -1818,8 +1788,7 @@ abstract class HttpServerTest extends WithHttpServer { if (isDataStreamsEnabled()) { StatsGroup first = TEST_DATA_STREAMS_WRITER.groups.find { it.parentHash == 0 } verifyAll(first) { - edgeTags.containsAll(DSM_EDGE_TAGS) - edgeTags.size() == DSM_EDGE_TAGS.size() + tags == DSM_EDGE_TAGS } } diff --git a/dd-java-agent/testing/src/main/groovy/datadog/trace/agent/test/datastreams/RecordingDatastreamsPayloadWriter.groovy b/dd-java-agent/testing/src/main/groovy/datadog/trace/agent/test/datastreams/RecordingDatastreamsPayloadWriter.groovy index cedcf14724b..b963a0a08bc 100644 --- a/dd-java-agent/testing/src/main/groovy/datadog/trace/agent/test/datastreams/RecordingDatastreamsPayloadWriter.groovy +++ b/dd-java-agent/testing/src/main/groovy/datadog/trace/agent/test/datastreams/RecordingDatastreamsPayloadWriter.groovy @@ -1,5 +1,6 @@ package datadog.trace.agent.test.datastreams +import datadog.trace.api.datastreams.DataStreamsTags import datadog.trace.core.datastreams.DatastreamsPayloadWriter import datadog.trace.core.datastreams.StatsBucket import datadog.trace.core.datastreams.StatsGroup @@ -16,7 +17,7 @@ class RecordingDatastreamsPayloadWriter implements DatastreamsPayloadWriter { private final List groups = [] @SuppressWarnings('UnusedPrivateField') - private final Set backlogs = [] + private final Set backlogs = [] private final Set serviceNameOverrides = [] @@ -28,8 +29,8 @@ class RecordingDatastreamsPayloadWriter implements DatastreamsPayloadWriter { data.each { this.@groups.addAll(it.groups) } for (StatsBucket bucket : data) { if (bucket.backlogs != null) { - for (Map.Entry, Long> backlog : bucket.backlogs) { - this.@backlogs.add(backlog.toString()) + for (Map.Entry backlog : bucket.backlogs) { + this.@backlogs.add(backlog.getKey()) } } } @@ -47,7 +48,7 @@ class RecordingDatastreamsPayloadWriter implements DatastreamsPayloadWriter { Collections.unmodifiableList(new ArrayList<>(this.@groups)) } - synchronized List getBacklogs() { + synchronized List getBacklogs() { Collections.unmodifiableList(new ArrayList<>(this.@backlogs)) } diff --git a/dd-trace-core/src/main/java/datadog/trace/core/datastreams/DefaultDataStreamsMonitoring.java b/dd-trace-core/src/main/java/datadog/trace/core/datastreams/DefaultDataStreamsMonitoring.java index d92fac69e87..58093fae2d2 100644 --- a/dd-trace-core/src/main/java/datadog/trace/core/datastreams/DefaultDataStreamsMonitoring.java +++ b/dd-trace-core/src/main/java/datadog/trace/core/datastreams/DefaultDataStreamsMonitoring.java @@ -3,12 +3,6 @@ import static datadog.communication.ddagent.DDAgentFeaturesDiscovery.V01_DATASTREAMS_ENDPOINT; import static datadog.trace.api.datastreams.DataStreamsContext.fromTags; import static datadog.trace.bootstrap.instrumentation.api.AgentTracer.activeSpan; -import static datadog.trace.core.datastreams.TagsProcessor.DIRECTION_IN; -import static datadog.trace.core.datastreams.TagsProcessor.DIRECTION_OUT; -import static datadog.trace.core.datastreams.TagsProcessor.DIRECTION_TAG; -import static datadog.trace.core.datastreams.TagsProcessor.MANUAL_TAG; -import static datadog.trace.core.datastreams.TagsProcessor.TOPIC_TAG; -import static datadog.trace.core.datastreams.TagsProcessor.TYPE_TAG; import static datadog.trace.util.AgentThreadFactory.AgentThread.DATA_STREAMS_MONITORING; import static datadog.trace.util.AgentThreadFactory.THREAD_JOIN_TIMOUT_MS; import static datadog.trace.util.AgentThreadFactory.newAgentThread; @@ -19,12 +13,7 @@ import datadog.trace.api.Config; import datadog.trace.api.TraceConfig; import datadog.trace.api.WellKnownTags; -import datadog.trace.api.datastreams.Backlog; -import datadog.trace.api.datastreams.DataStreamsContext; -import datadog.trace.api.datastreams.InboxItem; -import datadog.trace.api.datastreams.NoopPathwayContext; -import datadog.trace.api.datastreams.PathwayContext; -import datadog.trace.api.datastreams.StatsPoint; +import datadog.trace.api.datastreams.*; import datadog.trace.api.experimental.DataStreamsContextCarrier; import datadog.trace.api.time.TimeSource; import datadog.trace.bootstrap.instrumentation.api.AgentSpan; @@ -36,11 +25,9 @@ import datadog.trace.core.DDSpan; import datadog.trace.core.DDTraceCoreInfo; import datadog.trace.util.AgentTaskScheduler; -import java.util.ArrayList; import java.util.Collections; import java.util.HashMap; import java.util.Iterator; -import java.util.LinkedHashMap; import java.util.LinkedList; import java.util.List; import java.util.Map; @@ -57,9 +44,9 @@ public class DefaultDataStreamsMonitoring implements DataStreamsMonitoring, Even static final long FEATURE_CHECK_INTERVAL_NANOS = TimeUnit.MINUTES.toNanos(5); private static final StatsPoint REPORT = - new StatsPoint(Collections.emptyList(), 0, 0, 0, 0, 0, 0, 0, null); + new StatsPoint(DataStreamsTags.EMPTY, 0, 0, 0, 0, 0, 0, 0, null); private static final StatsPoint POISON_PILL = - new StatsPoint(Collections.emptyList(), 0, 0, 0, 0, 0, 0, 0, null); + new StatsPoint(DataStreamsTags.EMPTY, 0, 0, 0, 0, 0, 0, 0, null); private final Map> timeToBucket = new HashMap<>(); private final MpscArrayQueue inbox = new MpscArrayQueue<>(1024); @@ -136,6 +123,9 @@ public DefaultDataStreamsMonitoring( this.propagator = new DataStreamsPropagator(this, this.timeSource, this.hashOfKnownTags, serviceNameOverride); + // configure global tags behavior + DataStreamsTags.setGlobalBaseHash(this.hashOfKnownTags); + DataStreamsTags.setServiceNameOverride(serviceNameOverride); } @Override @@ -223,15 +213,7 @@ public void mergePathwayContextIntoSpan(AgentSpan span, DataStreamsContextCarrie } } - public void trackBacklog(LinkedHashMap sortedTags, long value) { - List tags = new ArrayList<>(sortedTags.size()); - for (Map.Entry entry : sortedTags.entrySet()) { - String tag = TagsProcessor.createTag(entry.getKey(), entry.getValue()); - if (tag == null) { - continue; - } - tags.add(tag); - } + public void trackBacklog(DataStreamsTags tags, long value) { inbox.offer(new Backlog(tags, value, timeSource.getCurrentTimeNanos(), getThreadServiceName())); } @@ -245,6 +227,11 @@ public void setCheckpoint(AgentSpan span, DataStreamsContext context) { @Override public void setConsumeCheckpoint(String type, String source, DataStreamsContextCarrier carrier) { + setConsumeCheckpoint(type, source, carrier, true); + } + + public void setConsumeCheckpoint( + String type, String source, DataStreamsContextCarrier carrier, Boolean isManual) { if (type == null || type.isEmpty() || source == null || source.isEmpty()) { log.warn("setConsumeCheckpoint should be called with non-empty type and source"); return; @@ -257,13 +244,14 @@ public void setConsumeCheckpoint(String type, String source, DataStreamsContextC } mergePathwayContextIntoSpan(span, carrier); - LinkedHashMap sortedTags = new LinkedHashMap<>(); - sortedTags.put(DIRECTION_TAG, DIRECTION_IN); - sortedTags.put(MANUAL_TAG, "true"); - sortedTags.put(TOPIC_TAG, source); - sortedTags.put(TYPE_TAG, type); + DataStreamsTags tags; + if (isManual) { + tags = DataStreamsTags.createManual(type, DataStreamsTags.Direction.Inbound, source); + } else { + tags = DataStreamsTags.create(type, DataStreamsTags.Direction.Inbound, source); + } - setCheckpoint(span, fromTags(sortedTags)); + setCheckpoint(span, fromTags(tags)); } public void setProduceCheckpoint( @@ -278,16 +266,14 @@ public void setProduceCheckpoint( log.warn("SetProduceCheckpoint is called with no active span"); return; } - - LinkedHashMap sortedTags = new LinkedHashMap<>(); - sortedTags.put(DIRECTION_TAG, DIRECTION_OUT); + DataStreamsTags tags; if (manualCheckpoint) { - sortedTags.put(MANUAL_TAG, "true"); + tags = DataStreamsTags.createManual(type, DataStreamsTags.Direction.Outbound, target); + } else { + tags = DataStreamsTags.create(type, DataStreamsTags.Direction.Outbound, target); } - sortedTags.put(TOPIC_TAG, target); - sortedTags.put(TYPE_TAG, type); - DataStreamsContext dsmContext = fromTags(sortedTags); + DataStreamsContext dsmContext = fromTags(tags); this.propagator.inject( span.with(dsmContext), carrier, DataStreamsContextCarrierAdapter.INSTANCE); } diff --git a/dd-trace-core/src/main/java/datadog/trace/core/datastreams/DefaultPathwayContext.java b/dd-trace-core/src/main/java/datadog/trace/core/datastreams/DefaultPathwayContext.java index 3d5a5266bf6..9fe9af7e9e6 100644 --- a/dd-trace-core/src/main/java/datadog/trace/core/datastreams/DefaultPathwayContext.java +++ b/dd-trace-core/src/main/java/datadog/trace/core/datastreams/DefaultPathwayContext.java @@ -12,19 +12,13 @@ import datadog.trace.api.ProcessTags; import datadog.trace.api.WellKnownTags; import datadog.trace.api.datastreams.DataStreamsContext; +import datadog.trace.api.datastreams.DataStreamsTags; import datadog.trace.api.datastreams.PathwayContext; import datadog.trace.api.datastreams.StatsPoint; import datadog.trace.api.time.TimeSource; import datadog.trace.util.FNV64Hash; import java.io.IOException; -import java.util.ArrayList; -import java.util.Arrays; import java.util.Base64; -import java.util.HashSet; -import java.util.LinkedHashMap; -import java.util.List; -import java.util.Map; -import java.util.Set; import java.util.concurrent.TimeUnit; import java.util.function.BiConsumer; import java.util.function.Consumer; @@ -51,23 +45,7 @@ public class DefaultPathwayContext implements PathwayContext { // state variables used to memoize the pathway hash with // direction != current direction private long closestOppositeDirectionHash; - private String previousDirection; - - private static final Set hashableTagKeys = - new HashSet( - Arrays.asList( - TagsProcessor.GROUP_TAG, - TagsProcessor.TYPE_TAG, - TagsProcessor.DIRECTION_TAG, - TagsProcessor.TOPIC_TAG, - TagsProcessor.EXCHANGE_TAG)); - - private static final Set extraAggregationTagKeys = - new HashSet( - Arrays.asList( - TagsProcessor.DATASET_NAME_TAG, - TagsProcessor.DATASET_NAMESPACE_TAG, - TagsProcessor.MANUAL_TAG)); + private DataStreamsTags.Direction previousDirection; public DefaultPathwayContext( TimeSource timeSource, long hashOfKnownTags, String serviceNameOverride) { @@ -109,14 +87,6 @@ public synchronized void setCheckpoint( long startNanos = timeSource.getCurrentTimeNanos(); long nanoTicks = timeSource.getNanoTicks(); - // So far, each tag key has only one tag value, so we're initializing the capacity to match - // the number of tag keys for now. We should revisit this later if it's no longer the case. - LinkedHashMap sortedTags = context.sortedTags(); - List allTags = new ArrayList<>(sortedTags.size()); - PathwayHashBuilder pathwayHashBuilder = - new PathwayHashBuilder(hashOfKnownTags, serviceNameOverride); - DataSetHashBuilder aggregationHashBuilder = new DataSetHashBuilder(); - if (!started) { long defaultTimestamp = context.defaultTimestamp(); if (defaultTimestamp == 0) { @@ -135,43 +105,32 @@ public synchronized void setCheckpoint( log.debug("Started {}", this); } - for (Map.Entry entry : sortedTags.entrySet()) { - String tag = TagsProcessor.createTag(entry.getKey(), entry.getValue()); - if (tag == null) { - continue; - } - if (hashableTagKeys.contains(entry.getKey())) { - pathwayHashBuilder.addTag(tag); - } - if (extraAggregationTagKeys.contains(entry.getKey())) { - aggregationHashBuilder.addValue(tag); - } - allTags.add(tag); - } - - long nodeHash = generateNodeHash(pathwayHashBuilder); + // generate node hash + long nodeHash = context.tags().getHash(); // loop protection - a node should not be chosen as parent // for a sequential node with the same direction, as this // will cause a `cardinality explosion` for hash / parentHash tag values - if (sortedTags.containsKey(TagsProcessor.DIRECTION_TAG)) { - String direction = sortedTags.get(TagsProcessor.DIRECTION_TAG); - if (direction.equals(previousDirection)) { - hash = closestOppositeDirectionHash; - } else { - previousDirection = direction; - closestOppositeDirectionHash = hash; - } + DataStreamsTags.Direction direction = context.tags().getDirectionValue(); + if (direction == previousDirection && previousDirection != null) { + hash = closestOppositeDirectionHash; + } else { + previousDirection = direction; + closestOppositeDirectionHash = hash; } long newHash = generatePathwayHash(nodeHash, hash); - long aggregationHash = aggregationHashBuilder.addValue(newHash); + long aggregationHash = + FNV64Hash.continueHash( + context.tags().getAggregationHash(), + DataStreamsTags.longToBytes(newHash), + FNV64Hash.Version.v1); long pathwayLatencyNano = nanoTicks - pathwayStartNanoTicks; long edgeLatencyNano = nanoTicks - edgeStartNanoTicks; StatsPoint point = new StatsPoint( - allTags, + context.tags(), newHash, hash, aggregationHash, @@ -184,7 +143,6 @@ public synchronized void setCheckpoint( hash = newHash; pointConsumer.accept(point); - log.debug("Checkpoint set {}, hash source: {}", this, pathwayHashBuilder); } @Override @@ -290,7 +248,7 @@ private static DefaultPathwayContext decode( long pathwayStartMillis = VarEncodingHelper.decodeSignedVarLong(input); long pathwayStartNanos = TimeUnit.MILLISECONDS.toNanos(pathwayStartMillis); - // Convert the start time to the current JVM's nanoclock + // Convert the start time to the current JVM's nano clock long nowNanos = timeSource.getCurrentTimeNanos(); long nanosSinceStart = nowNanos - pathwayStartNanos; long nowNanoTicks = timeSource.getNanoTicks(); @@ -310,51 +268,6 @@ private static DefaultPathwayContext decode( serviceNameOverride); } - static class DataSetHashBuilder { - private long currentHash = 0L; - - public long addValue(String val) { - currentHash = FNV64Hash.generateHash(currentHash + val, FNV64Hash.Version.v1); - return currentHash; - } - - public long addValue(long val) { - byte[] b = - new byte[] { - (byte) val, - (byte) (val >> 8), - (byte) (val >> 16), - (byte) (val >> 24), - (byte) (val >> 32), - (byte) (val >> 40), - (byte) (val >> 48), - (byte) (val >> 56) - }; - - currentHash = FNV64Hash.continueHash(currentHash, b, FNV64Hash.Version.v1); - return currentHash; - } - } - - private static class PathwayHashBuilder { - private long hash; - - public PathwayHashBuilder(long baseHash, String serviceNameOverride) { - hash = baseHash; - if (serviceNameOverride != null) { - addTag(serviceNameOverride); - } - } - - public void addTag(String tag) { - hash = FNV64Hash.continueHash(hash, tag, FNV64Hash.Version.v1); - } - - public long getHash() { - return hash; - } - } - public static long getBaseHash(WellKnownTags wellKnownTags) { StringBuilder builder = new StringBuilder(); builder.append(wellKnownTags.getService()); @@ -371,10 +284,6 @@ public static long getBaseHash(WellKnownTags wellKnownTags) { return FNV64Hash.generateHash(builder.toString(), FNV64Hash.Version.v1); } - private long generateNodeHash(PathwayHashBuilder pathwayHashBuilder) { - return pathwayHashBuilder.getHash(); - } - private long generatePathwayHash(long nodeHash, long parentHash) { outputBuffer.clear(); outputBuffer.writeLongLE(nodeHash); diff --git a/dd-trace-core/src/main/java/datadog/trace/core/datastreams/MsgPackDatastreamsPayloadWriter.java b/dd-trace-core/src/main/java/datadog/trace/core/datastreams/MsgPackDatastreamsPayloadWriter.java index 6dbf342b27b..0df8e7291d7 100644 --- a/dd-trace-core/src/main/java/datadog/trace/core/datastreams/MsgPackDatastreamsPayloadWriter.java +++ b/dd-trace-core/src/main/java/datadog/trace/core/datastreams/MsgPackDatastreamsPayloadWriter.java @@ -9,6 +9,7 @@ import datadog.trace.api.Config; import datadog.trace.api.ProcessTags; import datadog.trace.api.WellKnownTags; +import datadog.trace.api.datastreams.DataStreamsTags; import datadog.trace.bootstrap.instrumentation.api.UTF8BytesString; import datadog.trace.common.metrics.Sink; import java.util.Collection; @@ -160,8 +161,7 @@ private void writeBucket(StatsBucket bucket, Writable packer) { Collection groups = bucket.getGroups(); packer.startArray(groups.size()); for (StatsGroup group : groups) { - boolean firstNode = group.getEdgeTags().isEmpty(); - + boolean firstNode = group.getTags().nonNullSize() == 0; packer.startMap(firstNode ? 5 : 6); /* 1 */ @@ -187,26 +187,34 @@ private void writeBucket(StatsBucket bucket, Writable packer) { if (!firstNode) { /* 6 */ packer.writeUTF8(EDGE_TAGS); - packer.startArray(group.getEdgeTags().size()); - for (String tag : group.getEdgeTags()) { - packer.writeString(tag, null); - } + writeDataStreamsTags(group.getTags(), packer); } } } - private void writeBacklogs(Collection, Long>> backlogs, Writable packer) { + private void writeBacklogs( + Collection> backlogs, Writable packer) { packer.writeUTF8(BACKLOGS); packer.startArray(backlogs.size()); - for (Map.Entry, Long> entry : backlogs) { + for (Map.Entry entry : backlogs) { packer.startMap(2); + packer.writeUTF8(BACKLOG_TAGS); - packer.startArray(entry.getKey().size()); - for (String tag : entry.getKey()) { - packer.writeString(tag, null); - } + writeDataStreamsTags(entry.getKey(), packer); + packer.writeUTF8(BACKLOG_VALUE); packer.writeLong(entry.getValue()); } } + + private void writeDataStreamsTags(DataStreamsTags tags, Writable packer) { + packer.startArray(tags.nonNullSize()); + + for (int i = 0; i < tags.size(); i++) { + String val = tags.tagByIndex(i); + if (val != null) { + packer.writeString(val, null); + } + } + } } diff --git a/dd-trace-core/src/main/java/datadog/trace/core/datastreams/StatsBucket.java b/dd-trace-core/src/main/java/datadog/trace/core/datastreams/StatsBucket.java index 26bdd9ba105..c61550d0e3e 100644 --- a/dd-trace-core/src/main/java/datadog/trace/core/datastreams/StatsBucket.java +++ b/dd-trace-core/src/main/java/datadog/trace/core/datastreams/StatsBucket.java @@ -1,17 +1,17 @@ package datadog.trace.core.datastreams; import datadog.trace.api.datastreams.Backlog; +import datadog.trace.api.datastreams.DataStreamsTags; import datadog.trace.api.datastreams.StatsPoint; import java.util.Collection; import java.util.HashMap; -import java.util.List; import java.util.Map; public class StatsBucket { private final long startTimeNanos; private final long bucketDurationNanos; private final Map hashToGroup = new HashMap<>(); - private final Map, Long> backlogs = new HashMap<>(); + private final Map backlogs = new HashMap<>(); public StatsBucket(long startTimeNanos, long bucketDurationNanos) { this.startTimeNanos = startTimeNanos; @@ -27,7 +27,7 @@ public void addPoint(StatsPoint statsPoint) { statsPoint.getAggregationHash(), hash -> new StatsGroup( - statsPoint.getEdgeTags(), statsPoint.getHash(), statsPoint.getParentHash())) + statsPoint.getTags(), statsPoint.getHash(), statsPoint.getParentHash())) .add( statsPoint.getPathwayLatencyNano(), statsPoint.getEdgeLatencyNano(), @@ -36,7 +36,7 @@ public void addPoint(StatsPoint statsPoint) { public void addBacklog(Backlog backlog) { backlogs.compute( - backlog.getSortedTags(), + backlog.getTags(), (k, v) -> (v == null) ? backlog.getValue() : Math.max(v, backlog.getValue())); } @@ -52,7 +52,7 @@ public Collection getGroups() { return hashToGroup.values(); } - public Collection, Long>> getBacklogs() { + public Collection> getBacklogs() { return backlogs.entrySet(); } } diff --git a/dd-trace-core/src/main/java/datadog/trace/core/datastreams/StatsGroup.java b/dd-trace-core/src/main/java/datadog/trace/core/datastreams/StatsGroup.java index b5923c36e6d..7831b58e365 100644 --- a/dd-trace-core/src/main/java/datadog/trace/core/datastreams/StatsGroup.java +++ b/dd-trace-core/src/main/java/datadog/trace/core/datastreams/StatsGroup.java @@ -1,21 +1,21 @@ package datadog.trace.core.datastreams; +import datadog.trace.api.datastreams.DataStreamsTags; import datadog.trace.core.histogram.Histogram; import datadog.trace.core.histogram.Histograms; -import java.util.List; public class StatsGroup { private static final double NANOSECONDS_TO_SECOND = 1_000_000_000d; - private final List edgeTags; + private final DataStreamsTags tags; private final long hash; private final long parentHash; private final Histogram pathwayLatency; private final Histogram edgeLatency; private final Histogram payloadSize; - public StatsGroup(List edgeTags, long hash, long parentHash) { - this.edgeTags = edgeTags; + public StatsGroup(DataStreamsTags tags, long hash, long parentHash) { + this.tags = tags; this.hash = hash; this.parentHash = parentHash; pathwayLatency = Histograms.newLogHistogram(); @@ -31,8 +31,8 @@ public void add(long pathwayLatencyNano, long edgeLatencyNano, long payloadSizeB if (payloadSizeBytes != 0) payloadSize.accept((double) payloadSizeBytes); } - public List getEdgeTags() { - return edgeTags; + public DataStreamsTags getTags() { + return tags; } public long getHash() { @@ -58,8 +58,8 @@ public Histogram getPayloadSize() { @Override public String toString() { return "StatsGroup{" - + "edgeTags='" - + edgeTags + + "tags='" + + tags + '\'' + ", hash=" + hash diff --git a/dd-trace-core/src/main/java/datadog/trace/core/datastreams/TagsProcessor.java b/dd-trace-core/src/main/java/datadog/trace/core/datastreams/TagsProcessor.java deleted file mode 100644 index 1838b47239b..00000000000 --- a/dd-trace-core/src/main/java/datadog/trace/core/datastreams/TagsProcessor.java +++ /dev/null @@ -1,142 +0,0 @@ -package datadog.trace.core.datastreams; - -import datadog.trace.api.cache.DDCache; -import datadog.trace.api.cache.DDCaches; -import java.util.HashMap; -import java.util.Map; -import java.util.function.Function; - -public class TagsProcessor { - public static final class StringPrefix implements Function { - private final String prefix; - - public StringPrefix(String prefix) { - this.prefix = prefix; - } - - @Override - public String apply(String key) { - return prefix + key; - } - } - - public static final String MANUAL_TAG = "manual_checkpoint"; - public static final String TYPE_TAG = "type"; - private static final DDCache TYPE_TAG_CACHE = DDCaches.newFixedSizeCache(32); - private static final Function TYPE_TAG_PREFIX = new StringPrefix("type:"); - - public static final String DIRECTION_TAG = "direction"; - // service centric direction - data flowing into the service - public static final String DIRECTION_IN = "in"; - // service centric direction - data flowing out of the service - public static final String DIRECTION_OUT = "out"; - private static final DDCache DIRECTION_TAG_CACHE = DDCaches.newFixedSizeCache(32); - private static final Function DIRECTION_TAG_PREFIX = - new StringPrefix("direction:"); - // SNS Topic - public static final String TOPIC_TAG = "topic"; - private static final DDCache TOPIC_TAG_CACHE = DDCaches.newFixedSizeCache(32); - private static final Function TOPIC_TAG_PREFIX = new StringPrefix("topic:"); - // EventBridge Bus - public static final String BUS_TAG = "bus"; - private static final DDCache BUS_TAG_CACHE = DDCaches.newFixedSizeCache(32); - private static final Function BUS_TAG_PREFIX = new StringPrefix("bus:"); - - public static final String PARTITION_TAG = "partition"; - private static final DDCache PARTITION_TAG_CACHE = DDCaches.newFixedSizeCache(32); - private static final Function PARTITION_TAG_PREFIX = - new StringPrefix("partition:"); - public static final String GROUP_TAG = "group"; - public static final String CONSUMER_GROUP_TAG = "consumer_group"; - private static final DDCache GROUP_TAG_CACHE = DDCaches.newFixedSizeCache(32); - private static final DDCache CONSUMER_GROUP_TAG_CACHE = - DDCaches.newFixedSizeCache(32); - private static final Function GROUP_TAG_PREFIX = new StringPrefix("group:"); - private static final Function CONSUMER_GROUP_TAG_PREFIX = - new StringPrefix("consumer_group:"); - public static final String SUBSCRIPTION_TAG = "subscription"; - private static final DDCache SUBSCRIPTION_TAG_CACHE = - DDCaches.newFixedSizeCache(32); - private static final Function SUBSCRIPTION_TAG_PREFIX = - new StringPrefix("subscription:"); - public static final String EXCHANGE_TAG = "exchange"; - private static final DDCache EXCHANGE_TAG_CACHE = DDCaches.newFixedSizeCache(32); - private static final Function EXCHANGE_TAG_PREFIX = new StringPrefix("exchange:"); - - public static final String DATASET_NAME_TAG = "ds.name"; - private static final DDCache DATASET_NAME_TAG_CACHE = - DDCaches.newFixedSizeCache(32); - private static final Function DATASET_NAME_TAG_PREFIX = - new StringPrefix("ds.name:"); - - public static final String DATASET_NAMESPACE_TAG = "ds.namespace"; - private static final DDCache DATASET_NAMESPACE_TAG_CACHE = - DDCaches.newFixedSizeCache(32); - private static final Function DATASET_NAMESPACE_TAG_PREFIX = - new StringPrefix("ds.namespace:"); - - public static final String HAS_ROUTING_KEY_TAG = "has_routing_key"; - private static final DDCache HAS_ROUTING_KEY_TAG_CACHE = - DDCaches.newFixedSizeCache(2); // true or false - private static final Function HAS_ROUTING_KEY_TAG_PREFIX = - new StringPrefix("has_routing_key:"); - - public static final String KAFKA_CLUSTER_ID_TAG = "kafka_cluster_id"; - private static final DDCache KAFKA_CLUSTER_ID_TAG_CACHE = - DDCaches.newFixedSizeCache(32); - private static final Function KAFKA_CLUSTER_ID_TAG_PREFIX = - new StringPrefix("kafka_cluster_id:"); - - private static final Map> TAG_TO_CACHE = createTagToCacheMap(); - private static final Map> TAG_TO_PREFIX = createTagToPrefixMap(); - - private static Map> createTagToCacheMap() { - Map> result = new HashMap<>(); - result.put(TYPE_TAG, TYPE_TAG_CACHE); - result.put(DIRECTION_TAG, DIRECTION_TAG_CACHE); - result.put(TOPIC_TAG, TOPIC_TAG_CACHE); - result.put(BUS_TAG, BUS_TAG_CACHE); - result.put(PARTITION_TAG, PARTITION_TAG_CACHE); - result.put(GROUP_TAG, GROUP_TAG_CACHE); - result.put(CONSUMER_GROUP_TAG, CONSUMER_GROUP_TAG_CACHE); - result.put(SUBSCRIPTION_TAG, SUBSCRIPTION_TAG_CACHE); - result.put(EXCHANGE_TAG, EXCHANGE_TAG_CACHE); - result.put(HAS_ROUTING_KEY_TAG, HAS_ROUTING_KEY_TAG_CACHE); - result.put(KAFKA_CLUSTER_ID_TAG, KAFKA_CLUSTER_ID_TAG_CACHE); - result.put(DATASET_NAME_TAG, DATASET_NAME_TAG_CACHE); - result.put(DATASET_NAMESPACE_TAG, DATASET_NAMESPACE_TAG_CACHE); - return result; - } - - private static Map> createTagToPrefixMap() { - Map> result = new HashMap<>(); - result.put(TYPE_TAG, TYPE_TAG_PREFIX); - result.put(DIRECTION_TAG, DIRECTION_TAG_PREFIX); - result.put(TOPIC_TAG, TOPIC_TAG_PREFIX); - result.put(BUS_TAG, BUS_TAG_PREFIX); - result.put(PARTITION_TAG, PARTITION_TAG_PREFIX); - result.put(GROUP_TAG, GROUP_TAG_PREFIX); - result.put(CONSUMER_GROUP_TAG, CONSUMER_GROUP_TAG_PREFIX); - result.put(SUBSCRIPTION_TAG, SUBSCRIPTION_TAG_PREFIX); - result.put(EXCHANGE_TAG, EXCHANGE_TAG_PREFIX); - result.put(HAS_ROUTING_KEY_TAG, HAS_ROUTING_KEY_TAG_PREFIX); - result.put(KAFKA_CLUSTER_ID_TAG, KAFKA_CLUSTER_ID_TAG_PREFIX); - result.put(DATASET_NAME_TAG, DATASET_NAME_TAG_PREFIX); - result.put(DATASET_NAMESPACE_TAG, DATASET_NAMESPACE_TAG_PREFIX); - return result; - } - - // Creates the tag string using the provided tagKey and tagValue. - // Returns null if either tagKey or tagValue is null. - public static String createTag(String tagKey, String tagValue) { - if (tagKey == null || tagValue == null) { - return null; - } - DDCache cache = TAG_TO_CACHE.get(tagKey); - Function prefix = TAG_TO_PREFIX.get(tagKey); - if (cache != null && prefix != null) { - return cache.computeIfAbsent(tagValue, prefix); - } - return tagKey + ":" + tagValue; - } -} diff --git a/dd-trace-core/src/test/groovy/datadog/trace/core/datastreams/DataSetHashBuilderTest.groovy b/dd-trace-core/src/test/groovy/datadog/trace/core/datastreams/DataSetHashBuilderTest.groovy deleted file mode 100644 index 8ae9cee4405..00000000000 --- a/dd-trace-core/src/test/groovy/datadog/trace/core/datastreams/DataSetHashBuilderTest.groovy +++ /dev/null @@ -1,22 +0,0 @@ -package datadog.trace.core.datastreams - -import datadog.trace.core.test.DDCoreSpecification - -class DataSetHashBuilderTest extends DDCoreSpecification { - - def "Dataset hash generation"() { - given: - var tag = "ds.namespace=s3://my_bucket" - var builderOne = new DefaultPathwayContext.DataSetHashBuilder() - builderOne.addValue(tag) - - var builderTwo = new DefaultPathwayContext.DataSetHashBuilder() - builderTwo.addValue(tag) - - expect: - // hashing should be consistent - assert builderOne.addValue("0") == builderTwo.addValue("0") - // different parent hashes should produce different results - assert builderOne.addValue("1") != builderTwo.addValue("0") - } -} diff --git a/dd-trace-core/src/test/groovy/datadog/trace/core/datastreams/DataStreamsWritingTest.groovy b/dd-trace-core/src/test/groovy/datadog/trace/core/datastreams/DataStreamsWritingTest.groovy index e9bf3803e1e..c37033e23d9 100644 --- a/dd-trace-core/src/test/groovy/datadog/trace/core/datastreams/DataStreamsWritingTest.groovy +++ b/dd-trace-core/src/test/groovy/datadog/trace/core/datastreams/DataStreamsWritingTest.groovy @@ -7,6 +7,7 @@ import datadog.trace.api.Config import datadog.trace.api.ProcessTags import datadog.trace.api.TraceConfig import datadog.trace.api.WellKnownTags +import datadog.trace.api.datastreams.DataStreamsTags import datadog.trace.api.time.ControllableTimeSource import datadog.trace.api.datastreams.StatsPoint import datadog.trace.core.DDTraceCoreInfo @@ -82,8 +83,8 @@ class DataStreamsWritingTest extends DDCoreSpecification { def dataStreams = new DefaultDataStreamsMonitoring(fakeConfig, sharedCommObjects, timeSource, { traceConfig }) dataStreams.start() dataStreams.setThreadServiceName(serviceNameOverride) - dataStreams.add(new StatsPoint([], 9, 0, 10, timeSource.currentTimeNanos, 0, 0, 0, serviceNameOverride)) - dataStreams.trackBacklog(new LinkedHashMap<>(["partition": "1", "topic": "testTopic", "type": "kafka_produce"]), 130) + dataStreams.add(new StatsPoint(DataStreamsTags.create(null, null), 9, 0, 10, timeSource.currentTimeNanos, 0, 0, 0, serviceNameOverride)) + dataStreams.trackBacklog(DataStreamsTags.createWithPartition("kafka_produce", "testTopic", "1", null, null), 130) timeSource.advance(DEFAULT_BUCKET_DURATION_NANOS) // force flush dataStreams.report() @@ -140,15 +141,15 @@ class DataStreamsWritingTest extends DDCoreSpecification { when: def dataStreams = new DefaultDataStreamsMonitoring(fakeConfig, sharedCommObjects, timeSource, { traceConfig }) dataStreams.start() - dataStreams.add(new StatsPoint([], 9, 0, 10, timeSource.currentTimeNanos, 0, 0, 0, null)) - dataStreams.add(new StatsPoint(["type:testType", "group:testGroup", "topic:testTopic"], 1, 2, 5, timeSource.currentTimeNanos, 0, 0, 0, null)) - dataStreams.trackBacklog(new LinkedHashMap<>(["partition": "1", "topic": "testTopic", "type": "kafka_produce"]), 100) - dataStreams.trackBacklog(new LinkedHashMap<>(["partition": "1", "topic": "testTopic", "type": "kafka_produce"]), 130) + dataStreams.add(new StatsPoint(DataStreamsTags.create(null, null), 9, 0, 10, timeSource.currentTimeNanos, 0, 0, 0, null)) + dataStreams.add(new StatsPoint(DataStreamsTags.create("testType", DataStreamsTags.Direction.Inbound, "testTopic", "testGroup", null), 1, 2, 5, timeSource.currentTimeNanos, 0, 0, 0, null)) + dataStreams.trackBacklog(DataStreamsTags.createWithPartition("kafka_produce", "testTopic", "1", null, null), 100) + dataStreams.trackBacklog(DataStreamsTags.createWithPartition("kafka_produce", "testTopic", "1", null, null), 130) timeSource.advance(DEFAULT_BUCKET_DURATION_NANOS - 100l) - dataStreams.add(new StatsPoint(["type:testType", "group:testGroup", "topic:testTopic"], 1, 2, 5, timeSource.currentTimeNanos, SECONDS.toNanos(10), SECONDS.toNanos(10), 10, null)) + dataStreams.add(new StatsPoint(DataStreamsTags.create("testType", DataStreamsTags.Direction.Inbound, "testTopic", "testGroup", null), 1, 2, 5, timeSource.currentTimeNanos, SECONDS.toNanos(10), SECONDS.toNanos(10), 10, null)) timeSource.advance(DEFAULT_BUCKET_DURATION_NANOS) - dataStreams.add(new StatsPoint(["type:testType", "group:testGroup", "topic:testTopic"], 1, 2, 5, timeSource.currentTimeNanos, SECONDS.toNanos(5), SECONDS.toNanos(5), 5, null)) - dataStreams.add(new StatsPoint(["type:testType", "group:testGroup", "topic:testTopic2"], 3, 4, 6, timeSource.currentTimeNanos, SECONDS.toNanos(2), 0, 2, null)) + dataStreams.add(new StatsPoint(DataStreamsTags.create("testType", DataStreamsTags.Direction.Inbound, "testTopic", "testGroup", null), 1, 2, 5, timeSource.currentTimeNanos, SECONDS.toNanos(5), SECONDS.toNanos(5), 5, null)) + dataStreams.add(new StatsPoint(DataStreamsTags.create("testType", DataStreamsTags.Direction.Inbound, "testTopic2", "testGroup", null), 3, 4, 6, timeSource.currentTimeNanos, SECONDS.toNanos(2), 0, 2, null)) timeSource.advance(DEFAULT_BUCKET_DURATION_NANOS) dataStreams.close() @@ -225,10 +226,11 @@ class DataStreamsWritingTest extends DDCoreSpecification { assert unpacker.unpackString() == "ParentHash" assert unpacker.unpackLong() == 2 assert unpacker.unpackString() == "EdgeTags" - assert unpacker.unpackArrayHeader() == 3 + assert unpacker.unpackArrayHeader() == 4 + assert unpacker.unpackString() == "direction:in" + assert unpacker.unpackString() == "topic:testTopic" assert unpacker.unpackString() == "type:testType" assert unpacker.unpackString() == "group:testGroup" - assert unpacker.unpackString() == "topic:testTopic" } } @@ -238,9 +240,9 @@ class DataStreamsWritingTest extends DDCoreSpecification { assert unpacker.unpackMapHeader() == 2 assert unpacker.unpackString() == "Tags" assert unpacker.unpackArrayHeader() == 3 - assert unpacker.unpackString() == "partition:1" assert unpacker.unpackString() == "topic:testTopic" assert unpacker.unpackString() == "type:kafka_produce" + assert unpacker.unpackString() == "partition:1" assert unpacker.unpackString() == "Value" assert unpacker.unpackLong() == 130 @@ -268,10 +270,11 @@ class DataStreamsWritingTest extends DDCoreSpecification { assert unpacker.unpackString() == "ParentHash" assert unpacker.unpackLong() == (hash == 1 ? 2 : 4) assert unpacker.unpackString() == "EdgeTags" - assert unpacker.unpackArrayHeader() == 3 + assert unpacker.unpackArrayHeader() == 4 + assert unpacker.unpackString() == "direction:in" + assert unpacker.unpackString() == (hash == 1 ? "topic:testTopic" : "topic:testTopic2") assert unpacker.unpackString() == "type:testType" assert unpacker.unpackString() == "group:testGroup" - assert unpacker.unpackString() == (hash == 1 ? "topic:testTopic" : "topic:testTopic2") } assert unpacker.unpackString() == "ProductMask" diff --git a/dd-trace-core/src/test/groovy/datadog/trace/core/datastreams/DefaultDataStreamsMonitoringTest.groovy b/dd-trace-core/src/test/groovy/datadog/trace/core/datastreams/DefaultDataStreamsMonitoringTest.groovy index ffcd15cd0c3..bf7de275af9 100644 --- a/dd-trace-core/src/test/groovy/datadog/trace/core/datastreams/DefaultDataStreamsMonitoringTest.groovy +++ b/dd-trace-core/src/test/groovy/datadog/trace/core/datastreams/DefaultDataStreamsMonitoringTest.groovy @@ -4,6 +4,7 @@ import datadog.communication.ddagent.DDAgentFeaturesDiscovery import datadog.trace.api.Config import datadog.trace.api.TraceConfig import datadog.trace.api.WellKnownTags +import datadog.trace.api.datastreams.DataStreamsTags import datadog.trace.api.datastreams.StatsPoint import datadog.trace.api.experimental.DataStreamsContextCarrier import datadog.trace.api.time.ControllableTimeSource @@ -40,7 +41,7 @@ class DefaultDataStreamsMonitoringTest extends DDCoreSpecification { when: def dataStreams = new DefaultDataStreamsMonitoring(sink, features, timeSource, { traceConfig }, wellKnownTags, payloadWriter, DEFAULT_BUCKET_DURATION_NANOS) dataStreams.start() - dataStreams.add(new StatsPoint(["type:testType", "group:testGroup", "topic:testTopic"], 0, 0, 0, timeSource.currentTimeNanos, 0, 0, 0, null)) + dataStreams.add(new StatsPoint(DataStreamsTags.create("testType", null, "testTopic", "testGroup", null), 0, 0, 0, timeSource.currentTimeNanos, 0, 0, 0, null)) dataStreams.report() then: @@ -130,7 +131,8 @@ class DefaultDataStreamsMonitoringTest extends DDCoreSpecification { when: def dataStreams = new DefaultDataStreamsMonitoring(sink, features, timeSource, { traceConfig }, wellKnownTags, payloadWriter, DEFAULT_BUCKET_DURATION_NANOS) dataStreams.start() - dataStreams.add(new StatsPoint(["type:testType", "group:testGroup", "topic:testTopic"], 1, 2, 3, timeSource.currentTimeNanos, 0, 0, 0, null)) + def tg = DataStreamsTags.create("testType", null, "testTopic", "testGroup", null) + dataStreams.add(new StatsPoint(tg, 1, 2, 3, timeSource.currentTimeNanos, 0, 0, 0, null)) timeSource.advance(DEFAULT_BUCKET_DURATION_NANOS) dataStreams.report() @@ -145,8 +147,10 @@ class DefaultDataStreamsMonitoringTest extends DDCoreSpecification { groups.size() == 1 with(groups.iterator().next()) { - edgeTags.containsAll(["type:testType", "group:testGroup", "topic:testTopic"]) - edgeTags.size() == 3 + tags.type == "type:testType" + tags.group == "group:testGroup" + tags.topic == "topic:testTopic" + tags.nonNullSize() == 3 hash == 1 parentHash == 2 } @@ -176,7 +180,8 @@ class DefaultDataStreamsMonitoringTest extends DDCoreSpecification { when: def dataStreams = new DefaultDataStreamsMonitoring(sink, features, timeSource, { traceConfig }, wellKnownTags, payloadWriter, bucketDuration) dataStreams.start() - dataStreams.add(new StatsPoint(["type:testType", "group:testGroup", "topic:testTopic"], 1, 2, 3, timeSource.currentTimeNanos, 0, 0, 0, null)) + def tg = DataStreamsTags.create("testType", null, "testTopic", "testGroup", null) + dataStreams.add(new StatsPoint(tg, 1, 2, 3, timeSource.currentTimeNanos, 0, 0, 0, null)) timeSource.advance(bucketDuration) then: @@ -190,8 +195,10 @@ class DefaultDataStreamsMonitoringTest extends DDCoreSpecification { groups.size() == 1 with(groups.iterator().next()) { - edgeTags.containsAll(["type:testType", "group:testGroup", "topic:testTopic"]) - edgeTags.size() == 3 + tags.type == "type:testType" + tags.group == "group:testGroup" + tags.topic == "topic:testTopic" + tags.nonNullSize() == 3 hash == 1 parentHash == 2 } @@ -219,9 +226,10 @@ class DefaultDataStreamsMonitoringTest extends DDCoreSpecification { when: def dataStreams = new DefaultDataStreamsMonitoring(sink, features, timeSource, { traceConfig }, wellKnownTags, payloadWriter, DEFAULT_BUCKET_DURATION_NANOS) dataStreams.start() - dataStreams.add(new StatsPoint(["type:testType", "group:testGroup", "topic:testTopic"], 1, 2, 3, timeSource.currentTimeNanos, 0, 0, 0, null)) + def tg = DataStreamsTags.create("testType", null, "testTopic", "testGroup", null) + dataStreams.add(new StatsPoint(tg, 1, 2, 3, timeSource.currentTimeNanos, 0, 0, 0, null)) timeSource.advance(DEFAULT_BUCKET_DURATION_NANOS) - dataStreams.add(new StatsPoint(["type:testType", "group:testGroup", "topic:testTopic"], 3, 4, 3, timeSource.currentTimeNanos, 0, 0, 0, null)) + dataStreams.add(new StatsPoint(tg, 3, 4, 3, timeSource.currentTimeNanos, 0, 0, 0, null)) timeSource.advance(DEFAULT_BUCKET_DURATION_NANOS - 100l) dataStreams.report() @@ -236,8 +244,10 @@ class DefaultDataStreamsMonitoringTest extends DDCoreSpecification { groups.size() == 1 with(groups.iterator().next()) { - edgeTags.containsAll(["type:testType", "group:testGroup", "topic:testTopic"]) - edgeTags.size() == 3 + tags.type == "type:testType" + tags.group == "group:testGroup" + tags.topic == "topic:testTopic" + tags.nonNullSize() == 3 hash == 1 parentHash == 2 } @@ -265,9 +275,11 @@ class DefaultDataStreamsMonitoringTest extends DDCoreSpecification { when: def dataStreams = new DefaultDataStreamsMonitoring(sink, features, timeSource, { traceConfig }, wellKnownTags, payloadWriter, DEFAULT_BUCKET_DURATION_NANOS) dataStreams.start() - dataStreams.add(new StatsPoint(["type:testType", "group:testGroup", "topic:testTopic"], 1, 2, 5, timeSource.currentTimeNanos, 0, 0, 0, null)) + def tg = DataStreamsTags.create("testType", null, "testTopic", "testGroup", null) + def tg2 = DataStreamsTags.create("testType", null, "testTopic2", "testGroup", null) + dataStreams.add(new StatsPoint(tg, 1, 2, 5, timeSource.currentTimeNanos, 0, 0, 0, null)) timeSource.advance(DEFAULT_BUCKET_DURATION_NANOS) - dataStreams.add(new StatsPoint(["type:testType", "group:testGroup", "topic:testTopic2"], 3, 4, 6, timeSource.currentTimeNanos, 0, 0, 0, null)) + dataStreams.add(new StatsPoint(tg2, 3, 4, 6, timeSource.currentTimeNanos, 0, 0, 0, null)) timeSource.advance(DEFAULT_BUCKET_DURATION_NANOS - 100l) dataStreams.close() @@ -282,8 +294,10 @@ class DefaultDataStreamsMonitoringTest extends DDCoreSpecification { groups.size() == 1 with(groups.iterator().next()) { - edgeTags.containsAll(["type:testType", "group:testGroup", "topic:testTopic"]) - edgeTags.size() == 3 + tags.type == "type:testType" + tags.group == "group:testGroup" + tags.topic == "topic:testTopic" + tags.nonNullSize() == 3 hash == 1 parentHash == 2 } @@ -293,8 +307,10 @@ class DefaultDataStreamsMonitoringTest extends DDCoreSpecification { groups.size() == 1 with(groups.iterator().next()) { - edgeTags.containsAll(["type:testType", "group:testGroup", "topic:testTopic2"]) - edgeTags.size() == 3 + tags.type == "type:testType" + tags.group == "group:testGroup" + tags.topic == "topic:testTopic2" + tags.nonNullSize() == 3 hash == 3 parentHash == 4 } @@ -321,11 +337,11 @@ class DefaultDataStreamsMonitoringTest extends DDCoreSpecification { when: def dataStreams = new DefaultDataStreamsMonitoring(sink, features, timeSource, { traceConfig }, wellKnownTags, payloadWriter, DEFAULT_BUCKET_DURATION_NANOS) dataStreams.start() - dataStreams.trackBacklog(new LinkedHashMap<>(["consumer_group": "testGroup", "partition": "2", "topic": "testTopic", "type": "kafka_commit"]), 23) - dataStreams.trackBacklog(new LinkedHashMap<>(["consumer_group": "testGroup", "partition": "2", "topic": "testTopic", "type": "kafka_commit"]), 24) - dataStreams.trackBacklog(new LinkedHashMap<>(["partition": "2", "topic": "testTopic", "type": "kafka_produce"]), 23) - dataStreams.trackBacklog(new LinkedHashMap<>(["partition": "2", "topic": "testTopic2", "type": "kafka_produce"]), 23) - dataStreams.trackBacklog(new LinkedHashMap<>(["partition": "2", "topic": "testTopic", "type": "kafka_produce"]), 45) + dataStreams.trackBacklog(DataStreamsTags.createWithPartition("kafka_commit", "testTopic", "2", null, "testGroup"), 23) + dataStreams.trackBacklog(DataStreamsTags.createWithPartition("kafka_commit", "testTopic", "2", null, "testGroup"), 24) + dataStreams.trackBacklog(DataStreamsTags.createWithPartition("kafka_produce", "testTopic", "2", null, null), 23) + dataStreams.trackBacklog(DataStreamsTags.createWithPartition("kafka_produce", "testTopic2", "2", null, null), 23) + dataStreams.trackBacklog(DataStreamsTags.createWithPartition("kafka_produce", "testTopic", "2", null, null), 45) timeSource.advance(DEFAULT_BUCKET_DURATION_NANOS) dataStreams.report() @@ -338,18 +354,17 @@ class DefaultDataStreamsMonitoringTest extends DDCoreSpecification { with(payloadWriter.buckets.get(0)) { backlogs.size() == 3 - List, Long>> sortedBacklogs = new ArrayList<>(backlogs) - sortedBacklogs.sort({ it.key.toString() }) - with(sortedBacklogs[0]) { - it.key == ["consumer_group:testGroup", "partition:2", "topic:testTopic", "type:kafka_commit"] + def list = backlogs.sort({ it.key.toString() }) + with(list[0]) { + it.key == DataStreamsTags.createWithPartition("kafka_commit", "testTopic", "2", null, "testGroup") it.value == 24 } - with(sortedBacklogs[1]) { - it.key == ["partition:2", "topic:testTopic", "type:kafka_produce"] + with(list[1]) { + it.key == DataStreamsTags.createWithPartition("kafka_produce", "testTopic", "2", null, null) it.value == 45 } - with(sortedBacklogs[2]) { - it.key == ["partition:2", "topic:testTopic2", "type:kafka_produce"] + with(list[2]) { + it.key == DataStreamsTags.createWithPartition("kafka_produce", "testTopic2", "2", null, null) it.value == 23 } } @@ -376,9 +391,11 @@ class DefaultDataStreamsMonitoringTest extends DDCoreSpecification { when: def dataStreams = new DefaultDataStreamsMonitoring(sink, features, timeSource, { traceConfig }, wellKnownTags, payloadWriter, DEFAULT_BUCKET_DURATION_NANOS) dataStreams.start() - dataStreams.add(new StatsPoint(["type:testType", "group:testGroup", "topic:testTopic"], 1, 2, 5, timeSource.currentTimeNanos, 0, 0, 0, null)) - timeSource.advance(DEFAULT_BUCKET_DURATION_NANOS) - dataStreams.add(new StatsPoint(["type:testType", "group:testGroup", "topic:testTopic2"], 3, 4, 6, timeSource.currentTimeNanos, 0, 0, 0, null)) + def tg = DataStreamsTags.create("testType", null, "testTopic", "testGroup", null) + dataStreams.add(new StatsPoint(tg, 1, 2, 5, timeSource.currentTimeNanos, 0, 0, 0, null)) + timeSource.advance(DEFAULT_BUCKET_DURATION_NANOS*10) + def tg2 = DataStreamsTags.create("testType", null, "testTopic2", "testGroup", null) + dataStreams.add(new StatsPoint(tg2, 3, 4, 6, timeSource.currentTimeNanos, 0, 0, 0, null)) timeSource.advance(DEFAULT_BUCKET_DURATION_NANOS) dataStreams.report() @@ -391,10 +408,12 @@ class DefaultDataStreamsMonitoringTest extends DDCoreSpecification { with(payloadWriter.buckets.get(0)) { groups.size() == 1 - + groups with(groups.iterator().next()) { - edgeTags.containsAll(["type:testType", "group:testGroup", "topic:testTopic"]) - edgeTags.size() == 3 + tags.nonNullSize() == 3 + tags.getType() == "type:testType" + tags.getGroup() == "group:testGroup" + tags.getTopic() == "topic:testTopic" hash == 1 parentHash == 2 } @@ -404,8 +423,10 @@ class DefaultDataStreamsMonitoringTest extends DDCoreSpecification { groups.size() == 1 with(groups.iterator().next()) { - edgeTags.containsAll(["type:testType", "group:testGroup", "topic:testTopic2"]) - edgeTags.size() == 3 + tags.getType() == "type:testType" + tags.getGroup() == "group:testGroup" + tags.getTopic() == "topic:testTopic2" + tags.nonNullSize() == 3 hash == 3 parentHash == 4 } @@ -431,14 +452,15 @@ class DefaultDataStreamsMonitoringTest extends DDCoreSpecification { } when: + def tg = DataStreamsTags.create("testType", null, "testTopic", "testGroup", null) def dataStreams = new DefaultDataStreamsMonitoring(sink, features, timeSource, { traceConfig }, wellKnownTags, payloadWriter, DEFAULT_BUCKET_DURATION_NANOS) dataStreams.start() - dataStreams.add(new StatsPoint(["type:testType", "group:testGroup", "topic:testTopic"], 1, 2, 1, timeSource.currentTimeNanos, 0, 0, 0, null)) + dataStreams.add(new StatsPoint(tg, 1, 2, 1, timeSource.currentTimeNanos, 0, 0, 0, null)) timeSource.advance(DEFAULT_BUCKET_DURATION_NANOS - 100l) - dataStreams.add(new StatsPoint(["type:testType", "group:testGroup", "topic:testTopic"], 1, 2, 1, timeSource.currentTimeNanos, SECONDS.toNanos(10), SECONDS.toNanos(10), 10, null)) + dataStreams.add(new StatsPoint(tg, 1, 2, 1, timeSource.currentTimeNanos, SECONDS.toNanos(10), SECONDS.toNanos(10), 10, null)) timeSource.advance(DEFAULT_BUCKET_DURATION_NANOS) - dataStreams.add(new StatsPoint(["type:testType", "group:testGroup", "topic:testTopic"], 1, 2,1, timeSource.currentTimeNanos, SECONDS.toNanos(5), SECONDS.toNanos(5), 5, null)) - dataStreams.add(new StatsPoint(["type:testType", "group:testGroup", "topic:testTopic2"], 3, 4, 5, timeSource.currentTimeNanos, SECONDS.toNanos(2), 0, 0, null)) + dataStreams.add(new StatsPoint(tg, 1, 2,1, timeSource.currentTimeNanos, SECONDS.toNanos(5), SECONDS.toNanos(5), 5, null)) + dataStreams.add(new StatsPoint(tg, 3, 4, 5, timeSource.currentTimeNanos, SECONDS.toNanos(2), 0, 0, null)) timeSource.advance(DEFAULT_BUCKET_DURATION_NANOS) dataStreams.report() @@ -453,8 +475,10 @@ class DefaultDataStreamsMonitoringTest extends DDCoreSpecification { groups.size() == 1 with(groups.iterator().next()) { - edgeTags.containsAll(["type:testType", "group:testGroup", "topic:testTopic"]) - edgeTags.size() == 3 + tags.type == "type:testType" + tags.group == "group:testGroup" + tags.topic == "topic:testTopic" + tags.nonNullSize() == 3 hash == 1 parentHash == 2 Math.abs((pathwayLatency.getMaxValue()-10)/10) < 0.01 @@ -470,16 +494,20 @@ class DefaultDataStreamsMonitoringTest extends DDCoreSpecification { with(sortedGroups[0]) { hash == 1 parentHash == 2 - edgeTags.containsAll(["type:testType", "group:testGroup", "topic:testTopic"]) - edgeTags.size() == 3 + tags.type == "type:testType" + tags.group == "group:testGroup" + tags.topic == "topic:testTopic" + tags.nonNullSize() == 3 Math.abs((pathwayLatency.getMaxValue()-5)/5) < 0.01 } with(sortedGroups[1]) { hash == 3 parentHash == 4 - edgeTags.containsAll(["type:testType", "group:testGroup", "topic:testTopic2"]) - edgeTags.size() == 3 + tags.type == "type:testType" + tags.group == "group:testGroup" + tags.topic == "topic:testTopic" + tags.nonNullSize() == 3 Math.abs((pathwayLatency.getMaxValue()-2)/2) < 0.01 } } @@ -507,7 +535,8 @@ class DefaultDataStreamsMonitoringTest extends DDCoreSpecification { when: "reporting points when data streams is not supported" def dataStreams = new DefaultDataStreamsMonitoring(sink, features, timeSource, { traceConfig }, wellKnownTags, payloadWriter, DEFAULT_BUCKET_DURATION_NANOS) dataStreams.start() - dataStreams.add(new StatsPoint(["type:testType", "group:testGroup", "topic:testTopic"], 1, 2, 3, timeSource.currentTimeNanos, 0, 0, 0, null)) + def tg = DataStreamsTags.create("testType", null, "testTopic", "testGroup", null) + dataStreams.add(new StatsPoint(tg, 1, 2, 3, timeSource.currentTimeNanos, 0, 0, 0, null)) timeSource.advance(DEFAULT_BUCKET_DURATION_NANOS) dataStreams.report() @@ -537,7 +566,7 @@ class DefaultDataStreamsMonitoringTest extends DDCoreSpecification { timeSource.advance(FEATURE_CHECK_INTERVAL_NANOS) dataStreams.report() - dataStreams.add(new StatsPoint(["type:testType", "group:testGroup", "topic:testTopic"], 1, 2, 3, timeSource.currentTimeNanos, 0, 0, 0, null)) + dataStreams.add(new StatsPoint(tg, 1, 2, 3, timeSource.currentTimeNanos, 0, 0, 0, null)) timeSource.advance(DEFAULT_BUCKET_DURATION_NANOS) dataStreams.report() @@ -551,8 +580,10 @@ class DefaultDataStreamsMonitoringTest extends DDCoreSpecification { groups.size() == 1 with(groups.iterator().next()) { - edgeTags.containsAll(["type:testType", "group:testGroup", "topic:testTopic"]) - edgeTags.size() == 3 + tags.type == "type:testType" + tags.group == "group:testGroup" + tags.topic == "topic:testTopic" + tags.nonNullSize() == 3 hash == 1 parentHash == 2 } @@ -583,7 +614,8 @@ class DefaultDataStreamsMonitoringTest extends DDCoreSpecification { dataStreams.start() supportsDataStreaming = false dataStreams.onEvent(EventListener.EventType.DOWNGRADED, "") - dataStreams.add(new StatsPoint(["type:testType", "group:testGroup", "topic:testTopic"], 1, 2, 3, timeSource.currentTimeNanos, 0, 0, 0, null)) + def tg = DataStreamsTags.create("testType", null, "testTopic", "testGroup", null) + dataStreams.add(new StatsPoint(tg, 1, 2, 3, timeSource.currentTimeNanos, 0, 0, 0, null)) timeSource.advance(DEFAULT_BUCKET_DURATION_NANOS) dataStreams.report() @@ -600,7 +632,7 @@ class DefaultDataStreamsMonitoringTest extends DDCoreSpecification { timeSource.advance(FEATURE_CHECK_INTERVAL_NANOS) dataStreams.report() - dataStreams.add(new StatsPoint(["type:testType", "group:testGroup", "topic:testTopic"], 1, 2, 3, timeSource.currentTimeNanos, 0, 0, 0, null)) + dataStreams.add(new StatsPoint(tg, 1, 2, 3, timeSource.currentTimeNanos, 0, 0, 0, null)) timeSource.advance(DEFAULT_BUCKET_DURATION_NANOS) dataStreams.report() @@ -614,8 +646,10 @@ class DefaultDataStreamsMonitoringTest extends DDCoreSpecification { groups.size() == 1 with(groups.iterator().next()) { - edgeTags.containsAll(["type:testType", "group:testGroup", "topic:testTopic"]) - edgeTags.size() == 3 + tags.type == "type:testType" + tags.group == "group:testGroup" + tags.topic == "topic:testTopic" + tags.nonNullSize() == 3 hash == 1 parentHash == 2 } @@ -643,9 +677,10 @@ class DefaultDataStreamsMonitoringTest extends DDCoreSpecification { } when: "reporting points when data streams is not enabled" + def tg = DataStreamsTags.create("testType", null, "testTopic", "testGroup", null) def dataStreams = new DefaultDataStreamsMonitoring(sink, features, timeSource, { traceConfig }, wellKnownTags, payloadWriter, DEFAULT_BUCKET_DURATION_NANOS) dataStreams.start() - dataStreams.add(new StatsPoint(["type:testType", "group:testGroup", "topic:testTopic"], 1, 2, 3, timeSource.currentTimeNanos, 0, 0, 0, null)) + dataStreams.add(new StatsPoint(tg, 1, 2, 3, timeSource.currentTimeNanos, 0, 0, 0, null)) timeSource.advance(DEFAULT_BUCKET_DURATION_NANOS) dataStreams.report() @@ -661,7 +696,7 @@ class DefaultDataStreamsMonitoringTest extends DDCoreSpecification { dsmEnabled = true dataStreams.report() - dataStreams.add(new StatsPoint(["type:testType", "group:testGroup", "topic:testTopic"], 1, 2, 3, timeSource.currentTimeNanos, 0, 0, 0, null)) + dataStreams.add(new StatsPoint(tg, 1, 2, 3, timeSource.currentTimeNanos, 0, 0, 0, null)) timeSource.advance(DEFAULT_BUCKET_DURATION_NANOS) dataStreams.report() @@ -675,8 +710,10 @@ class DefaultDataStreamsMonitoringTest extends DDCoreSpecification { groups.size() == 1 with(groups.iterator().next()) { - edgeTags.containsAll(["type:testType", "group:testGroup", "topic:testTopic"]) - edgeTags.size() == 3 + tags.type == "type:testType" + tags.group == "group:testGroup" + tags.topic == "topic:testTopic" + tags.nonNullSize() == 3 hash == 1 parentHash == 2 } @@ -694,7 +731,7 @@ class DefaultDataStreamsMonitoringTest extends DDCoreSpecification { when: "submitting points after being disabled" payloadWriter.buckets.clear() - dataStreams.add(new StatsPoint(["type:testType", "group:testGroup", "topic:testTopic"], 1, 2, 3, timeSource.currentTimeNanos, 0, 0, 0, null)) + dataStreams.add(new StatsPoint(tg, 1, 2, 3, timeSource.currentTimeNanos, 0, 0, 0, null)) timeSource.advance(DEFAULT_BUCKET_DURATION_NANOS) dataStreams.report() @@ -728,7 +765,8 @@ class DefaultDataStreamsMonitoringTest extends DDCoreSpecification { when: "reporting points when data streams is not supported" def dataStreams = new DefaultDataStreamsMonitoring(sink, features, timeSource, { traceConfig }, wellKnownTags, payloadWriter, DEFAULT_BUCKET_DURATION_NANOS) dataStreams.start() - dataStreams.add(new StatsPoint(["type:testType", "group:testGroup", "topic:testTopic"], 1, 2, 3, timeSource.currentTimeNanos, 0, 0, 0, null)) + def tg = DataStreamsTags.create("testType", null, "testTopic", "testGroup", null) + dataStreams.add(new StatsPoint(tg, 1, 2, 3, timeSource.currentTimeNanos, 0, 0, 0, null)) timeSource.advance(DEFAULT_BUCKET_DURATION_NANOS) dataStreams.report() @@ -745,7 +783,7 @@ class DefaultDataStreamsMonitoringTest extends DDCoreSpecification { timeSource.advance(FEATURE_CHECK_INTERVAL_NANOS) dataStreams.report() - dataStreams.add(new StatsPoint(["type:testType", "group:testGroup", "topic:testTopic"], 1, 2, 3, timeSource.currentTimeNanos, 0, 0, 0, null)) + dataStreams.add(new StatsPoint(tg, 1, 2, 3, timeSource.currentTimeNanos, 0, 0, 0, null)) timeSource.advance(DEFAULT_BUCKET_DURATION_NANOS) dataStreams.report() @@ -759,7 +797,7 @@ class DefaultDataStreamsMonitoringTest extends DDCoreSpecification { dsmEnabled = true dataStreams.report() - dataStreams.add(new StatsPoint(["type:testType", "group:testGroup", "topic:testTopic"], 1, 2, 3, timeSource.currentTimeNanos, 0, 0, 0, null)) + dataStreams.add(new StatsPoint(tg, 1, 2, 3, timeSource.currentTimeNanos, 0, 0, 0, null)) timeSource.advance(DEFAULT_BUCKET_DURATION_NANOS) dataStreams.report() @@ -773,8 +811,10 @@ class DefaultDataStreamsMonitoringTest extends DDCoreSpecification { groups.size() == 1 with(groups.iterator().next()) { - edgeTags.containsAll(["type:testType", "group:testGroup", "topic:testTopic"]) - edgeTags.size() == 3 + tags.type == "type:testType" + tags.group == "group:testGroup" + tags.topic == "topic:testTopic" + tags.nonNullSize() == 3 hash == 1 parentHash == 2 } @@ -804,7 +844,8 @@ class DefaultDataStreamsMonitoringTest extends DDCoreSpecification { when: "reporting points when data streams is not supported" def dataStreams = new DefaultDataStreamsMonitoring(sink, features, timeSource, { traceConfig }, wellKnownTags, payloadWriter, DEFAULT_BUCKET_DURATION_NANOS) dataStreams.start() - dataStreams.add(new StatsPoint(["type:testType", "group:testGroup", "topic:testTopic"], 1, 2, 3, timeSource.currentTimeNanos, 0, 0, 0, null)) + def tg = DataStreamsTags.create("testType", null, "testTopic", "testGroup", null) + dataStreams.add(new StatsPoint(tg, 1, 2, 3, timeSource.currentTimeNanos, 0, 0, 0, null)) timeSource.advance(DEFAULT_BUCKET_DURATION_NANOS) dataStreams.report() @@ -820,7 +861,7 @@ class DefaultDataStreamsMonitoringTest extends DDCoreSpecification { dsmEnabled = true dataStreams.report() - dataStreams.add(new StatsPoint(["type:testType", "group:testGroup", "topic:testTopic"], 1, 2, 3, timeSource.currentTimeNanos, 0, 0, 0, null)) + dataStreams.add(new StatsPoint(tg, 1, 2, 3, timeSource.currentTimeNanos, 0, 0, 0, null)) timeSource.advance(DEFAULT_BUCKET_DURATION_NANOS) dataStreams.report() diff --git a/dd-trace-core/src/test/groovy/datadog/trace/core/datastreams/DefaultPathwayContextTest.groovy b/dd-trace-core/src/test/groovy/datadog/trace/core/datastreams/DefaultPathwayContextTest.groovy index 80249d77cda..802670b6fe5 100644 --- a/dd-trace-core/src/test/groovy/datadog/trace/core/datastreams/DefaultPathwayContextTest.groovy +++ b/dd-trace-core/src/test/groovy/datadog/trace/core/datastreams/DefaultPathwayContextTest.groovy @@ -7,6 +7,7 @@ import datadog.trace.api.ProcessTags import datadog.trace.api.TagMap import datadog.trace.api.TraceConfig import datadog.trace.api.WellKnownTags +import datadog.trace.api.datastreams.DataStreamsTags import datadog.trace.api.datastreams.StatsPoint import datadog.trace.api.time.ControllableTimeSource import datadog.trace.bootstrap.instrumentation.api.AgentPropagation @@ -55,7 +56,7 @@ class DefaultPathwayContextTest extends DDCoreSpecification { when: timeSource.advance(50) - context.setCheckpoint(fromTags(new LinkedHashMap<>(["type": "internal"])), pointConsumer) + context.setCheckpoint(fromTags(DataStreamsTags.create("internal", null)), pointConsumer) then: context.isStarted() @@ -70,18 +71,21 @@ class DefaultPathwayContextTest extends DDCoreSpecification { when: timeSource.advance(50) - context.setCheckpoint(fromTags(new LinkedHashMap<>(["type": "internal"])), pointConsumer) + context.setCheckpoint(fromTags(DataStreamsTags.create("internal", DataStreamsTags.Direction.Inbound)), pointConsumer) timeSource.advance(25) - context.setCheckpoint( - fromTags(new LinkedHashMap<>(["group": "group", "topic": "topic", "type": "kafka"])), pointConsumer) + def tags = DataStreamsTags.create("kafka", DataStreamsTags.Direction.Outbound, "topic", "group", null) + context.setCheckpoint(fromTags(tags), pointConsumer) then: context.isStarted() pointConsumer.points.size() == 2 verifyFirstPoint(pointConsumer.points[0]) with(pointConsumer.points[1]) { - edgeTags == ["group:group", "topic:topic", "type:kafka"] - edgeTags.size() == 3 + tags.group == "group:group" + tags.topic == "topic:topic" + tags.type == "type:kafka" + tags.getDirection() == "direction:out" + tags.nonNullSize() == 4 parentHash == pointConsumer.points[0].hash hash != 0 pathwayLatencyNano == 25 @@ -97,15 +101,17 @@ class DefaultPathwayContextTest extends DDCoreSpecification { when: timeSource.advance(25) context.setCheckpoint( - create(new LinkedHashMap<>(["group": "group", "topic": "topic", "type": "kafka"]), 0, 72), + create(DataStreamsTags.create("kafka", null, "topic", "group", null), 0, 72), pointConsumer) then: context.isStarted() pointConsumer.points.size() == 1 with(pointConsumer.points[0]) { - edgeTags == ["group:group", "topic:topic", "type:kafka"] - edgeTags.size() == 3 + tags.getGroup() == "group:group" + tags.getTopic() == "topic:topic" + tags.getType() == "type:kafka" + tags.nonNullSize() == 3 hash != 0 payloadSizeBytes == 72 } @@ -118,29 +124,34 @@ class DefaultPathwayContextTest extends DDCoreSpecification { when: timeSource.advance(50) - context.setCheckpoint(fromTags(new LinkedHashMap<>(["direction": "out", "type": "kafka"])), pointConsumer) + context.setCheckpoint(fromTags(DataStreamsTags.create("kafka", DataStreamsTags.Direction.Outbound)), pointConsumer) timeSource.advance(25) - context.setCheckpoint( - fromTags(new LinkedHashMap<>(["direction": "in", "group": "group", "topic": "topic", "type": "kafka"])), pointConsumer) + def tg = DataStreamsTags.create("kafka", DataStreamsTags.Direction.Inbound, "topic", "group", null) + context.setCheckpoint(fromTags(tg), pointConsumer) timeSource.advance(30) - context.setCheckpoint( - fromTags(new LinkedHashMap<>(["direction": "in", "group": "group", "topic": "topic", "type": "kafka"])), pointConsumer) + context.setCheckpoint(fromTags(tg), pointConsumer) then: context.isStarted() pointConsumer.points.size() == 3 verifyFirstPoint(pointConsumer.points[0]) with(pointConsumer.points[1]) { - edgeTags == ["direction:in", "group:group", "topic:topic", "type:kafka"] - edgeTags.size() == 4 + tags.nonNullSize() == 4 + tags.direction == "direction:in" + tags.group == "group:group" + tags.topic == "topic:topic" + tags.type == "type:kafka" parentHash == pointConsumer.points[0].hash hash != 0 pathwayLatencyNano == 25 edgeLatencyNano == 25 } with(pointConsumer.points[2]) { - edgeTags == ["direction:in", "group:group", "topic:topic", "type:kafka"] - edgeTags.size() == 4 + tags.nonNullSize() == 4 + tags.direction == "direction:in" + tags.group == "group:group" + tags.topic == "topic:topic" + tags.type == "type:kafka" // this point should have the first point as parent, // as the loop protection will reset the parent if two identical // points (same hash for tag values) are about to form a hierarchy @@ -170,19 +181,20 @@ class DefaultPathwayContextTest extends DDCoreSpecification { when: timeSource.advance(MILLISECONDS.toNanos(50)) - context.setCheckpoint(fromTags(new LinkedHashMap<>(["type": "s3", "ds.namespace": "my_bucket", "ds.name": "my_object.csv", "direction": "in"])), pointConsumer) + context.setCheckpoint(fromTags(DataStreamsTags.createWithDataset("s3", DataStreamsTags.Direction.Inbound, null, "my_object.csv", "my_bucket")), pointConsumer) def encoded = context.encode() timeSource.advance(MILLISECONDS.toNanos(2)) def decodedContext = DefaultPathwayContext.decode(timeSource, baseHash, null, encoded) timeSource.advance(MILLISECONDS.toNanos(25)) - context.setCheckpoint(fromTags(new LinkedHashMap<>(["type": "s3", "ds.namespace": "my_bucket", "ds.name": "my_object.csv", "direction": "out"])), pointConsumer) + def tg = DataStreamsTags.createWithDataset("s3", DataStreamsTags.Direction.Outbound, null, "my_object.csv", "my_bucket") + context.setCheckpoint(fromTags(tg), pointConsumer) then: decodedContext.isStarted() pointConsumer.points.size() == 2 // all points should have datasetHash, which is not equal to hash or 0 - for (var i = 0; i < pointConsumer.points.size(); i++){ + for (def i = 0; i < pointConsumer.points.size(); i++){ pointConsumer.points[i].aggregationHash != pointConsumer.points[i].hash pointConsumer.points[i].aggregationHash != 0 } @@ -196,20 +208,22 @@ class DefaultPathwayContextTest extends DDCoreSpecification { when: timeSource.advance(MILLISECONDS.toNanos(50)) - context.setCheckpoint(fromTags(new LinkedHashMap<>(["type": "internal"])), pointConsumer) + context.setCheckpoint(fromTags(DataStreamsTags.create("internal", DataStreamsTags.Direction.Inbound)), pointConsumer) def encoded = context.encode() timeSource.advance(MILLISECONDS.toNanos(2)) def decodedContext = DefaultPathwayContext.decode(timeSource, baseHash, null, encoded) timeSource.advance(MILLISECONDS.toNanos(25)) - context.setCheckpoint(fromTags(new LinkedHashMap<>(["group": "group", "topic": "topic", "type": "kafka"])), pointConsumer) + context.setCheckpoint(fromTags(DataStreamsTags.create("kafka", null, "topic", "group", null)), pointConsumer) then: decodedContext.isStarted() pointConsumer.points.size() == 2 with(pointConsumer.points[1]) { - edgeTags == ["group:group", "topic:topic", "type:kafka"] - edgeTags.size() == 3 + tags.nonNullSize() == 3 + tags.getGroup() == "group:group" + tags.getType() == "type:kafka" + tags.getTopic() == "topic:topic" parentHash == pointConsumer.points[0].hash hash != 0 pathwayLatencyNano == MILLISECONDS.toNanos(27) @@ -223,13 +237,13 @@ class DefaultPathwayContextTest extends DDCoreSpecification { def context = new DefaultPathwayContext(timeSource, baseHash, null) def timeFromQueue = timeSource.getCurrentTimeMillis() - 200 when: - context.setCheckpoint(create(["type": "internal"], timeFromQueue, 0), pointConsumer) + context.setCheckpoint(create(DataStreamsTags.create("internal", null), timeFromQueue, 0), pointConsumer) then: context.isStarted() pointConsumer.points.size() == 1 with(pointConsumer.points[0]) { - edgeTags == ["type:internal"] - edgeTags.size() == 1 + tags.getType() == "type:internal" + tags.nonNullSize() == 1 parentHash == 0 hash != 0 pathwayLatencyNano == MILLISECONDS.toNanos(200) @@ -245,20 +259,23 @@ class DefaultPathwayContextTest extends DDCoreSpecification { when: timeSource.advance(MILLISECONDS.toNanos(50)) - context.setCheckpoint(fromTags(new LinkedHashMap<>(["type": "internal"])), pointConsumer) + context.setCheckpoint(fromTags(DataStreamsTags.create("internal", DataStreamsTags.Direction.Inbound)), pointConsumer) def encoded = context.encode() timeSource.advance(MILLISECONDS.toNanos(1)) def decodedContext = DefaultPathwayContext.decode(timeSource, baseHash, null, encoded) timeSource.advance(MILLISECONDS.toNanos(25)) - context.setCheckpoint(fromTags(new LinkedHashMap<>(["group": "group", "topic": "topic", "type": "kafka"])), pointConsumer) + context.setCheckpoint(fromTags(DataStreamsTags.create("kafka", DataStreamsTags.Direction.Outbound, "topic", "group", null)), pointConsumer) then: decodedContext.isStarted() pointConsumer.points.size() == 2 with(pointConsumer.points[1]) { - edgeTags == ["group:group", "topic:topic", "type:kafka"] - edgeTags.size() == 3 + tags.group == "group:group" + tags.topic == "topic:topic" + tags.type == "type:kafka" + tags.direction == "direction:out" + tags.nonNullSize() == 4 parentHash == pointConsumer.points[0].hash hash != 0 pathwayLatencyNano == MILLISECONDS.toNanos(26) @@ -270,14 +287,17 @@ class DefaultPathwayContextTest extends DDCoreSpecification { timeSource.advance(MILLISECONDS.toNanos(2)) def secondDecode = DefaultPathwayContext.decode(timeSource, baseHash, null, secondEncode) timeSource.advance(MILLISECONDS.toNanos(30)) - context.setCheckpoint(fromTags(new LinkedHashMap<>(["group": "group", "topic": "topicB", "type": "kafka"])), pointConsumer) + context.setCheckpoint(fromTags(DataStreamsTags.create("kafka", DataStreamsTags.Direction.Inbound, "topicB", "group", null)), pointConsumer) then: secondDecode.isStarted() pointConsumer.points.size() == 3 with(pointConsumer.points[2]) { - edgeTags == ["group:group", "topic:topicB", "type:kafka"] - edgeTags.size() == 3 + tags.group == "group:group" + tags.topic == "topic:topicB" + tags.type == "type:kafka" + tags.direction == "direction:in" + tags.nonNullSize() == 4 parentHash == pointConsumer.points[1].hash hash != 0 pathwayLatencyNano == MILLISECONDS.toNanos(58) @@ -294,21 +314,24 @@ class DefaultPathwayContextTest extends DDCoreSpecification { when: timeSource.advance(MILLISECONDS.toNanos(50)) - context.setCheckpoint(fromTags(new LinkedHashMap<>(["type": "internal"])), pointConsumer) + context.setCheckpoint(fromTags(DataStreamsTags.create("internal", DataStreamsTags.Direction.Inbound)), pointConsumer) def encoded = context.encode() Map carrier = [(PROPAGATION_KEY_BASE64): encoded, "someotherkey": "someothervalue"] timeSource.advance(MILLISECONDS.toNanos(1)) def decodedContext = DefaultPathwayContext.extract(carrier, contextVisitor, timeSource, baseHash, null) timeSource.advance(MILLISECONDS.toNanos(25)) - context.setCheckpoint(fromTags(new LinkedHashMap<>(["group": "group", "topic": "topic", "type": "kafka"])), pointConsumer) + context.setCheckpoint(fromTags(DataStreamsTags.create("kafka", DataStreamsTags.Direction.Outbound, "topic", "group", null)), pointConsumer) then: decodedContext.isStarted() pointConsumer.points.size() == 2 with(pointConsumer.points[1]) { - edgeTags == ["group:group", "topic:topic", "type:kafka"] - edgeTags.size() == 3 + tags.nonNullSize() == 4 + tags.group == "group:group" + tags.topic == "topic:topic" + tags.type == "type:kafka" + tags.direction == "direction:out" parentHash == pointConsumer.points[0].hash hash != 0 pathwayLatencyNano == MILLISECONDS.toNanos(26) @@ -321,14 +344,17 @@ class DefaultPathwayContextTest extends DDCoreSpecification { timeSource.advance(MILLISECONDS.toNanos(2)) def secondDecode = DefaultPathwayContext.extract(carrier, contextVisitor, timeSource, baseHash, null) timeSource.advance(MILLISECONDS.toNanos(30)) - context.setCheckpoint(fromTags(new LinkedHashMap<>(["group": "group", "topic": "topicB", "type": "kafka"])), pointConsumer) + context.setCheckpoint(fromTags(DataStreamsTags.create("kafka", DataStreamsTags.Direction.Inbound, "topicB", "group", null)), pointConsumer) then: secondDecode.isStarted() pointConsumer.points.size() == 3 with(pointConsumer.points[2]) { - edgeTags == ["group:group", "topic:topicB", "type:kafka"] - edgeTags.size() == 3 + tags.nonNullSize() == 4 + tags.group == "group:group" + tags.topic == "topic:topicB" + tags.type == "type:kafka" + tags.direction == "direction:in" parentHash == pointConsumer.points[1].hash hash != 0 pathwayLatencyNano == MILLISECONDS.toNanos(58) @@ -345,21 +371,23 @@ class DefaultPathwayContextTest extends DDCoreSpecification { when: timeSource.advance(MILLISECONDS.toNanos(50)) - context.setCheckpoint(fromTags(new LinkedHashMap<>(["type": "internal"])), pointConsumer) + context.setCheckpoint(fromTags(DataStreamsTags.create("internal", DataStreamsTags.Direction.Inbound)), pointConsumer) def encoded = context.encode() Map carrier = [(PROPAGATION_KEY_BASE64): encoded, "someotherkey": "someothervalue"] timeSource.advance(MILLISECONDS.toNanos(1)) def decodedContext = DefaultPathwayContext.extract(carrier, contextVisitor, timeSource, baseHash, null) timeSource.advance(MILLISECONDS.toNanos(25)) - context.setCheckpoint(fromTags(new LinkedHashMap<>(["topic": "topic", "type": "sqs"])), pointConsumer) + context.setCheckpoint(fromTags(DataStreamsTags.create("sqs", DataStreamsTags.Direction.Outbound, "topic", null, null)), pointConsumer) then: decodedContext.isStarted() pointConsumer.points.size() == 2 with(pointConsumer.points[1]) { - edgeTags == ["topic:topic", "type:sqs"] - edgeTags.size() == 2 + tags.direction == "direction:out" + tags.topic == "topic:topic" + tags.type == "type:sqs" + tags.nonNullSize() == 3 parentHash == pointConsumer.points[0].hash hash != 0 pathwayLatencyNano == MILLISECONDS.toNanos(26) @@ -372,14 +400,15 @@ class DefaultPathwayContextTest extends DDCoreSpecification { timeSource.advance(MILLISECONDS.toNanos(2)) def secondDecode = DefaultPathwayContext.extract(carrier, contextVisitor, timeSource, baseHash, null) timeSource.advance(MILLISECONDS.toNanos(30)) - context.setCheckpoint(fromTags(new LinkedHashMap<>(["topic": "topicB", "type": "sqs"])), pointConsumer) + context.setCheckpoint(fromTags(DataStreamsTags.create("sqs", DataStreamsTags.Direction.Inbound, "topicB", null, null)), pointConsumer) then: secondDecode.isStarted() pointConsumer.points.size() == 3 with(pointConsumer.points[2]) { - edgeTags == ["topic:topicB", "type:sqs"] - edgeTags.size() == 2 + tags.type == "type:sqs" + tags.topic == "topic:topicB" + tags.nonNullSize() == 3 parentHash == pointConsumer.points[1].hash hash != 0 pathwayLatencyNano == MILLISECONDS.toNanos(58) @@ -394,26 +423,29 @@ class DefaultPathwayContextTest extends DDCoreSpecification { when: timeSource.advance(50) - context.setCheckpoint(fromTags(new LinkedHashMap<>(["type": "internal"])), pointConsumer) + context.setCheckpoint(fromTags(DataStreamsTags.create("internal", DataStreamsTags.Direction.Inbound)), pointConsumer) timeSource.advance(25) - context.setCheckpoint(fromTags(new LinkedHashMap<>(["group": "group", "topic": "topic", "type": "type"])), pointConsumer) + context.setCheckpoint(fromTags(DataStreamsTags.create("type", DataStreamsTags.Direction.Outbound, "topic", "group", null)), pointConsumer) timeSource.advance(25) - context.setCheckpoint(fromTags(new LinkedHashMap<>()), pointConsumer) + context.setCheckpoint(fromTags(DataStreamsTags.create(null, null)), pointConsumer) then: context.isStarted() pointConsumer.points.size() == 3 verifyFirstPoint(pointConsumer.points[0]) with(pointConsumer.points[1]) { - edgeTags == ["group:group", "topic:topic", "type:type"] - edgeTags.size() == 3 + tags.type == "type:type" + tags.topic == "topic:topic" + tags.group == "group:group" + tags.direction == "direction:out" + tags.nonNullSize() == 4 parentHash == pointConsumer.points[0].hash hash != 0 pathwayLatencyNano == 25 edgeLatencyNano == 25 } with(pointConsumer.points[2]) { - edgeTags.size() == 0 + tags.nonNullSize() == 0 parentHash == pointConsumer.points[1].hash hash != 0 pathwayLatencyNano == 50 @@ -470,9 +502,10 @@ class DefaultPathwayContextTest extends DDCoreSpecification { def dataStreams = new DefaultDataStreamsMonitoring(sink, features, timeSource, { globalTraceConfig }, wellKnownTags, payloadWriter, DEFAULT_BUCKET_DURATION_NANOS) + DataStreamsTags.setGlobalBaseHash(baseHash) def context = new DefaultPathwayContext(timeSource, baseHash, null) timeSource.advance(MILLISECONDS.toNanos(50)) - context.setCheckpoint(fromTags(new LinkedHashMap<>(["type": "internal"])), pointConsumer) + context.setCheckpoint(fromTags(DataStreamsTags.create("internal", DataStreamsTags.Direction.Inbound)), pointConsumer) def encoded = context.encode() Map carrier = [ (PROPAGATION_KEY_BASE64): encoded, @@ -486,6 +519,7 @@ class DefaultPathwayContextTest extends DDCoreSpecification { def extractedSpan = AgentSpan.fromContext(extractedContext) then: + encoded == "L+lDG/Pa9hRkZA==" !dynamicConfigEnabled || extractedSpan != null if (dynamicConfigEnabled) { def extracted = extractedSpan.context() @@ -514,11 +548,18 @@ class DefaultPathwayContextTest extends DDCoreSpecification { isDataStreamsEnabled() >> { return globalDsmEnabled } } + def tracerApi = Mock(AgentTracer.TracerAPI) { + captureTraceConfig() >> globalTraceConfig + } + AgentTracer.TracerAPI originalTracer = AgentTracer.get() + AgentTracer.forceRegister(tracerApi) + def dataStreams = new DefaultDataStreamsMonitoring(sink, features, timeSource, { globalTraceConfig }, wellKnownTags, payloadWriter, DEFAULT_BUCKET_DURATION_NANOS) + DataStreamsTags.setGlobalBaseHash(baseHash) def context = new DefaultPathwayContext(timeSource, baseHash, null) timeSource.advance(MILLISECONDS.toNanos(50)) - context.setCheckpoint(fromTags(new LinkedHashMap<>(["type": "internal"])), pointConsumer) + context.setCheckpoint(fromTags(DataStreamsTags.create("internal", DataStreamsTags.Direction.Inbound)), pointConsumer) def encoded = context.encode() Map carrier = [(PROPAGATION_KEY_BASE64): encoded, "someotherkey": "someothervalue"] @@ -530,6 +571,7 @@ class DefaultPathwayContextTest extends DDCoreSpecification { def extractedSpan = AgentSpan.fromContext(extractedContext) then: + encoded == "L+lDG/Pa9hRkZA==" if (globalDsmEnabled) { extractedSpan != null def extracted = extractedSpan.context() @@ -540,6 +582,9 @@ class DefaultPathwayContextTest extends DDCoreSpecification { extractedSpan == null } + cleanup: + AgentTracer.forceRegister(originalTracer) + where: globalDsmEnabled << [true, false] } @@ -557,19 +602,27 @@ class DefaultPathwayContextTest extends DDCoreSpecification { isDataStreamsEnabled() >> { return globalDsmEnabled } } - def dataStreams = new DefaultDataStreamsMonitoring(sink, features, timeSource, { globalTraceConfig }, wellKnownTags, payloadWriter, DEFAULT_BUCKET_DURATION_NANOS) + def tracerApi = Mock(AgentTracer.TracerAPI) { + captureTraceConfig() >> globalTraceConfig + } + AgentTracer.TracerAPI originalTracer = AgentTracer.get() + AgentTracer.forceRegister(tracerApi) + def dataStreams = new DefaultDataStreamsMonitoring(sink, features, timeSource, { globalTraceConfig }, + wellKnownTags, payloadWriter, DEFAULT_BUCKET_DURATION_NANOS) + + DataStreamsTags.setGlobalBaseHash(baseHash) def context = new DefaultPathwayContext(timeSource, baseHash, null) timeSource.advance(MILLISECONDS.toNanos(50)) - context.setCheckpoint(fromTags(new LinkedHashMap<>(["type": "internal"])), pointConsumer) + context.setCheckpoint(fromTags(DataStreamsTags.create("internal", DataStreamsTags.Direction.Inbound)), pointConsumer) def encoded = context.encode() Map carrier = [(PROPAGATION_KEY_BASE64): encoded, "someotherkey": "someothervalue"] def contextVisitor = new Base64MapContextVisitor() - def spanContext = new ExtractedContext(DDTraceId.ONE, 1, 0, null, 0, null, (TagMap)null, null, null, null, DATADOG) + def spanContext = new ExtractedContext(DDTraceId.ONE, 1, 0, null, 0, + null, (TagMap)null, null, null, globalTraceConfig, DATADOG) def baseContext = AgentSpan.fromSpanContext(spanContext).storeInto(root()) def propagator = dataStreams.propagator() - when: def extractedContext = propagator.extract(baseContext, carrier, contextVisitor) def extractedSpan = AgentSpan.fromContext(extractedContext) @@ -582,6 +635,7 @@ class DefaultPathwayContextTest extends DDCoreSpecification { then: extracted != null + encoded == "L+lDG/Pa9hRkZA==" if (globalDsmEnabled) { extracted.pathwayContext != null extracted.pathwayContext.isStarted() @@ -589,6 +643,9 @@ class DefaultPathwayContextTest extends DDCoreSpecification { extracted.pathwayContext == null } + cleanup: + AgentTracer.forceRegister(originalTracer) + where: globalDsmEnabled << [true, false] } @@ -606,7 +663,8 @@ class DefaultPathwayContextTest extends DDCoreSpecification { isDataStreamsEnabled() >> true } - def dataStreams = new DefaultDataStreamsMonitoring(sink, features, timeSource, { traceConfig }, wellKnownTags, payloadWriter, DEFAULT_BUCKET_DURATION_NANOS) + def dataStreams = new DefaultDataStreamsMonitoring(sink, features, timeSource, { traceConfig }, + wellKnownTags, payloadWriter, DEFAULT_BUCKET_DURATION_NANOS) Map carrier = ["someotherkey": "someothervalue"] def contextVisitor = new Base64MapContextVisitor() @@ -628,4 +686,4 @@ class DefaultPathwayContextTest extends DDCoreSpecification { } } } -} \ No newline at end of file +} diff --git a/dd-trace-core/src/traceAgentTest/groovy/DataStreamsIntegrationTest.groovy b/dd-trace-core/src/traceAgentTest/groovy/DataStreamsIntegrationTest.groovy index 526fa6b785f..15f71c0e45c 100644 --- a/dd-trace-core/src/traceAgentTest/groovy/DataStreamsIntegrationTest.groovy +++ b/dd-trace-core/src/traceAgentTest/groovy/DataStreamsIntegrationTest.groovy @@ -3,6 +3,7 @@ import datadog.communication.ddagent.SharedCommunicationObjects import datadog.communication.http.OkHttpUtils import datadog.trace.api.Config import datadog.trace.api.TraceConfig +import datadog.trace.api.datastreams.DataStreamsTags import datadog.trace.api.time.ControllableTimeSource import datadog.trace.api.datastreams.StatsPoint import datadog.trace.common.metrics.EventListener @@ -46,7 +47,8 @@ class DataStreamsIntegrationTest extends AbstractTraceAgentTest { when: def dataStreams = new DefaultDataStreamsMonitoring(sink, sharedCommunicationObjects.featuresDiscovery(Config.get()), timeSource, { traceConfig }, Config.get()) dataStreams.start() - dataStreams.add(new StatsPoint(["type:testType", "group:testGroup", "topic:testTopic"], 1, 2, 5, timeSource.currentTimeNanos, 0, 0, 0, null)) + def tg = DataStreamsTags.create("testType", null, "testTopic", "testGroup", null) + dataStreams.add(new StatsPoint(tg, 1, 2, 5, timeSource.currentTimeNanos, 0, 0, 0, null)) timeSource.advance(Config.get().getDataStreamsBucketDurationNanoseconds()) dataStreams.report() diff --git a/internal-api/src/main/java/datadog/trace/api/datastreams/AgentDataStreamsMonitoring.java b/internal-api/src/main/java/datadog/trace/api/datastreams/AgentDataStreamsMonitoring.java index e6ddac36bac..b7c51bd36ec 100644 --- a/internal-api/src/main/java/datadog/trace/api/datastreams/AgentDataStreamsMonitoring.java +++ b/internal-api/src/main/java/datadog/trace/api/datastreams/AgentDataStreamsMonitoring.java @@ -4,10 +4,9 @@ import datadog.trace.bootstrap.instrumentation.api.AgentSpan; import datadog.trace.bootstrap.instrumentation.api.Schema; import datadog.trace.bootstrap.instrumentation.api.SchemaIterator; -import java.util.LinkedHashMap; public interface AgentDataStreamsMonitoring extends DataStreamsCheckpointer { - void trackBacklog(LinkedHashMap sortedTags, long value); + void trackBacklog(DataStreamsTags tags, long value); /** * Sets data streams checkpoint, used for both produce and consume operations. diff --git a/internal-api/src/main/java/datadog/trace/api/datastreams/Backlog.java b/internal-api/src/main/java/datadog/trace/api/datastreams/Backlog.java index 378ddf4b95c..8920e844d07 100644 --- a/internal-api/src/main/java/datadog/trace/api/datastreams/Backlog.java +++ b/internal-api/src/main/java/datadog/trace/api/datastreams/Backlog.java @@ -1,13 +1,11 @@ package datadog.trace.api.datastreams; -import java.util.List; - // Backlog allows us to track the size of a queue in data streams. For example, by monitoring both // the consumer and the producer, // we can get the size in bytes of a Kafka queue. public class Backlog implements InboxItem { - public List getSortedTags() { - return sortedTags; + public DataStreamsTags getTags() { + return tags; } public long getValue() { @@ -22,14 +20,14 @@ public String getServiceNameOverride() { return serviceNameOverride; } - private final List sortedTags; + private final DataStreamsTags tags; private final long value; private final long timestampNanos; private final String serviceNameOverride; public Backlog( - List sortedTags, long value, long timestampNanos, String serviceNameOverride) { - this.sortedTags = sortedTags; + DataStreamsTags tags, long value, long timestampNanos, String serviceNameOverride) { + this.tags = tags; this.value = value; this.timestampNanos = timestampNanos; this.serviceNameOverride = serviceNameOverride; diff --git a/internal-api/src/main/java/datadog/trace/api/datastreams/DataStreamsContext.java b/internal-api/src/main/java/datadog/trace/api/datastreams/DataStreamsContext.java index 92d8c8a3b38..22cc02c74fe 100644 --- a/internal-api/src/main/java/datadog/trace/api/datastreams/DataStreamsContext.java +++ b/internal-api/src/main/java/datadog/trace/api/datastreams/DataStreamsContext.java @@ -3,28 +3,21 @@ import datadog.context.Context; import datadog.context.ContextKey; import datadog.context.ImplicitContextKeyed; -import java.util.LinkedHashMap; public class DataStreamsContext implements ImplicitContextKeyed { private static final ContextKey CONTEXT_KEY = ContextKey.named("dsm-context-key"); - private static final LinkedHashMap CLIENT_PATHWAY_EDGE_TAGS; - private static final LinkedHashMap SERVER_PATHWAY_EDGE_TAGS; + private static final DataStreamsTags CLIENT_PATHWAY_EDGE_TAGS; + private static final DataStreamsTags SERVER_PATHWAY_EDGE_TAGS; - final LinkedHashMap sortedTags; + final DataStreamsTags tags; final long defaultTimestamp; final long payloadSizeBytes; final boolean sendCheckpoint; static { - CLIENT_PATHWAY_EDGE_TAGS = new LinkedHashMap<>(2); - // TODO: Refactor TagsProcessor to move it into a package that we can link the constants for. - CLIENT_PATHWAY_EDGE_TAGS.put("direction", "out"); - CLIENT_PATHWAY_EDGE_TAGS.put("type", "http"); - SERVER_PATHWAY_EDGE_TAGS = new LinkedHashMap<>(2); - // TODO: Refactor TagsProcessor to move it into a package that we can link the constants for. - SERVER_PATHWAY_EDGE_TAGS.put("direction", "in"); - SERVER_PATHWAY_EDGE_TAGS.put("type", "http"); + CLIENT_PATHWAY_EDGE_TAGS = DataStreamsTags.create("http", DataStreamsTags.Direction.Outbound); + SERVER_PATHWAY_EDGE_TAGS = DataStreamsTags.create("http", DataStreamsTags.Direction.Inbound); } public static DataStreamsContext fromContext(Context context) { @@ -52,17 +45,17 @@ public static DataStreamsContext forHttpServer() { /** * Creates a DSM context. * - * @param sortedTags alphabetically sorted tags for the checkpoint (direction, queue type etc) + * @param tags DataStreamsTags object * @return the created context. */ - public static DataStreamsContext fromTags(LinkedHashMap sortedTags) { - return new DataStreamsContext(sortedTags, 0, 0, true); + public static DataStreamsContext fromTags(DataStreamsTags tags) { + return new DataStreamsContext(tags, 0, 0, true); } /** * Creates a DSM context. * - * @param sortedTags alphabetically sorted tags for the checkpoint (direction, queue type etc) + * @param tags object * @param defaultTimestamp unix timestamp to use as a start of the pathway if this is the first * checkpoint in the chain. Zero should be passed if we can't extract the timestamp from the * message / payload itself (for instance: produce operations; http produce / consume etc). @@ -72,29 +65,25 @@ public static DataStreamsContext fromTags(LinkedHashMap sortedTa * @return the created context. */ public static DataStreamsContext create( - LinkedHashMap sortedTags, long defaultTimestamp, long payloadSizeBytes) { - return new DataStreamsContext(sortedTags, defaultTimestamp, payloadSizeBytes, true); + DataStreamsTags tags, long defaultTimestamp, long payloadSizeBytes) { + return new DataStreamsContext(tags, defaultTimestamp, payloadSizeBytes, true); } - public static DataStreamsContext fromTagsWithoutCheckpoint( - LinkedHashMap sortedTags) { - return new DataStreamsContext(sortedTags, 0, 0, false); + public static DataStreamsContext fromTagsWithoutCheckpoint(DataStreamsTags tags) { + return new DataStreamsContext(tags, 0, 0, false); } // That's basically a record for now private DataStreamsContext( - LinkedHashMap sortedTags, - long defaultTimestamp, - long payloadSizeBytes, - boolean sendCheckpoint) { - this.sortedTags = sortedTags; + DataStreamsTags tags, long defaultTimestamp, long payloadSizeBytes, boolean sendCheckpoint) { + this.tags = tags; this.defaultTimestamp = defaultTimestamp; this.payloadSizeBytes = payloadSizeBytes; this.sendCheckpoint = sendCheckpoint; } - public LinkedHashMap sortedTags() { - return this.sortedTags; + public DataStreamsTags tags() { + return this.tags; } public long defaultTimestamp() { diff --git a/internal-api/src/main/java/datadog/trace/api/datastreams/DataStreamsTags.java b/internal-api/src/main/java/datadog/trace/api/datastreams/DataStreamsTags.java new file mode 100644 index 00000000000..938d8d953f2 --- /dev/null +++ b/internal-api/src/main/java/datadog/trace/api/datastreams/DataStreamsTags.java @@ -0,0 +1,538 @@ +package datadog.trace.api.datastreams; + +import datadog.trace.util.FNV64Hash; +import java.util.Objects; + +public class DataStreamsTags { + public enum Direction { + Unknown, + Inbound, + Outbound, + } + + public static DataStreamsTags EMPTY = DataStreamsTags.create(null, null); + + private long hash; + private long aggregationHash; + private long completeHash; + private int nonNullSize; + + // hash tags + protected final String bus; + protected final String direction; + protected final Direction directionValue; + protected final String exchange; + protected final String topic; + protected final String type; + protected final String subscription; + // additional grouping tags + protected final String datasetName; + protected final String datasetNamespace; + protected final String isManual; + // informational tags + protected final String group; + protected final String consumerGroup; + protected final String hasRoutingKey; + protected final String kafkaClusterId; + protected final String partition; + + public static final String MANUAL_TAG = "manual_checkpoint"; + public static final String TYPE_TAG = "type"; + public static final String DIRECTION_TAG = "direction"; + public static final String TOPIC_TAG = "topic"; + public static final String BUS_TAG = "bus"; + public static final String PARTITION_TAG = "partition"; + public static final String GROUP_TAG = "group"; + public static final String CONSUMER_GROUP_TAG = "consumer_group"; + public static final String SUBSCRIPTION_TAG = "subscription"; + public static final String EXCHANGE_TAG = "exchange"; + public static final String DATASET_NAME_TAG = "ds.name"; + public static final String DATASET_NAMESPACE_TAG = "ds.namespace"; + public static final String HAS_ROUTING_KEY_TAG = "has_routing_key"; + public static final String KAFKA_CLUSTER_ID_TAG = "kafka_cluster_id"; + + private static volatile ThreadLocal serviceNameOverride; + private static volatile long baseHash; + + public static byte[] longToBytes(long val) { + return new byte[] { + (byte) (val >> 56), + (byte) (val >> 48), + (byte) (val >> 40), + (byte) (val >> 32), + (byte) (val >> 24), + (byte) (val >> 16), + (byte) (val >> 8), + (byte) val + }; + } + + public static DataStreamsTags create(String type, Direction direction) { + return DataStreamsTags.create(type, direction, null); + } + + public static DataStreamsTags create(String type, Direction direction, String topic) { + return DataStreamsTags.createWithGroup(type, direction, topic, null); + } + + public static DataStreamsTags createWithSubscription( + String type, Direction direction, String subscription) { + return new DataStreamsTags( + null, + direction, + null, + null, + type, + subscription, + null, + null, + null, + null, + null, + null, + null, + null); + } + + public static DataStreamsTags create( + String type, Direction direction, String topic, String group, String kafkaClusterId) { + return new DataStreamsTags( + null, + direction, + null, + topic, + type, + null, + null, + null, + null, + group, + null, + null, + kafkaClusterId, + null); + } + + public static DataStreamsTags createManual(String type, Direction direction, String topic) { + return new DataStreamsTags( + null, direction, null, topic, type, null, null, null, true, null, null, null, null, null); + } + + public static DataStreamsTags createWithBus(Direction direction, String bus) { + return new DataStreamsTags( + bus, direction, null, null, "bus", null, null, null, null, null, null, null, null, null); + } + + public static DataStreamsTags createWithPartition( + String type, String topic, String partition, String kafkaClusterId, String consumerGroup) { + return new DataStreamsTags( + null, + null, + null, + topic, + type, + null, + null, + null, + null, + null, + consumerGroup, + null, + kafkaClusterId, + partition); + } + + /// For usage in tests *only* + public Boolean hasAllTags(String[] tags) { + for (String tag : tags) { + if (tag.indexOf(':') == -1) { + return false; + } + String key = tag.substring(0, tag.indexOf(':')); + String value = tag.substring(tag.indexOf(':') + 1); + switch (key) { + case BUS_TAG: + if (!Objects.equals(this.bus, tag)) { + return false; + } + break; + case DIRECTION_TAG: + if (!Objects.equals( + this.directionValue, + Objects.equals(value, "out") ? Direction.Outbound : Direction.Inbound)) { + return false; + } + break; + case EXCHANGE_TAG: + if (!Objects.equals(this.exchange, tag)) { + return false; + } + break; + case TOPIC_TAG: + if (!Objects.equals(this.topic, tag)) { + return false; + } + break; + case TYPE_TAG: + if (!Objects.equals(this.type, tag)) { + return false; + } + break; + case SUBSCRIPTION_TAG: + if (!Objects.equals(this.subscription, tag)) { + return false; + } + break; + case DATASET_NAME_TAG: + if (!Objects.equals(this.datasetName, tag)) { + return false; + } + break; + case DATASET_NAMESPACE_TAG: + if (!Objects.equals(this.datasetNamespace, tag)) { + return false; + } + break; + case MANUAL_TAG: + if (!Objects.equals(this.isManual, tag)) { + return false; + } + break; + case GROUP_TAG: + if (!Objects.equals(this.group, tag)) { + return false; + } + break; + case CONSUMER_GROUP_TAG: + if (!Objects.equals(this.consumerGroup, tag)) { + return false; + } + break; + case HAS_ROUTING_KEY_TAG: + if (!Objects.equals(this.hasRoutingKey, tag)) { + return false; + } + break; + case KAFKA_CLUSTER_ID_TAG: + if (!Objects.equals(this.kafkaClusterId, tag)) { + return false; + } + break; + case PARTITION_TAG: + if (!Objects.equals(this.partition, tag)) { + return false; + } + break; + default: + return false; + } + } + + return true; + } + + public static DataStreamsTags createWithGroup( + String type, Direction direction, String topic, String group) { + return new DataStreamsTags( + null, direction, null, topic, type, null, null, null, null, group, null, null, null, null); + } + + public static DataStreamsTags createWithDataset( + String type, Direction direction, String topic, String datasetName, String datasetNamespace) { + return new DataStreamsTags( + null, + direction, + null, + topic, + type, + null, + datasetName, + datasetNamespace, + null, + null, + null, + null, + null, + null); + } + + public static void setServiceNameOverride(ThreadLocal serviceNameOverride) { + DataStreamsTags.serviceNameOverride = serviceNameOverride; + } + + public static void setGlobalBaseHash(long hash) { + DataStreamsTags.baseHash = hash; + } + + public static DataStreamsTags createWithClusterId( + String type, Direction direction, String topic, String clusterId) { + return new DataStreamsTags( + null, direction, null, topic, type, null, null, null, null, null, null, null, clusterId, + null); + } + + public static DataStreamsTags createWithExchange( + String type, Direction direction, String exchange, Boolean hasRoutingKey) { + return new DataStreamsTags( + null, + direction, + exchange, + null, + type, + null, + null, + null, + false, + null, + null, + hasRoutingKey, + null, + null); + } + + public DataStreamsTags( + String bus, + Direction direction, + String exchange, + String topic, + String type, + String subscription, + String datasetName, + String datasetNamespace, + Boolean isManual, + String group, + String consumerGroup, + Boolean hasRoutingKey, + String kafkaClusterId, + String partition) { + this.bus = bus != null ? BUS_TAG + ":" + bus : null; + this.directionValue = direction; + if (direction == Direction.Inbound) { + this.direction = DIRECTION_TAG + ":in"; + } else if (direction == Direction.Outbound) { + this.direction = DIRECTION_TAG + ":out"; + } else { + this.direction = null; + } + this.exchange = exchange != null ? EXCHANGE_TAG + ":" + exchange : null; + this.topic = topic != null ? TOPIC_TAG + ":" + topic : null; + this.type = type != null ? TYPE_TAG + ":" + type : null; + this.subscription = subscription != null ? SUBSCRIPTION_TAG + ":" + subscription : null; + this.datasetName = datasetName != null ? DATASET_NAME_TAG + ":" + datasetName : null; + this.datasetNamespace = + datasetNamespace != null ? DATASET_NAMESPACE_TAG + ":" + datasetNamespace : null; + this.isManual = isManual != null ? MANUAL_TAG + ":" + isManual : null; + this.group = group != null ? GROUP_TAG + ":" + group : null; + this.consumerGroup = consumerGroup != null ? CONSUMER_GROUP_TAG + ":" + consumerGroup : null; + this.hasRoutingKey = hasRoutingKey != null ? HAS_ROUTING_KEY_TAG + ":" + hasRoutingKey : null; + this.kafkaClusterId = + kafkaClusterId != null ? KAFKA_CLUSTER_ID_TAG + ":" + kafkaClusterId : null; + this.partition = partition != null ? PARTITION_TAG + ":" + partition : null; + + if (DataStreamsTags.baseHash != 0) { + this.hash = DataStreamsTags.baseHash; + } + + if (DataStreamsTags.serviceNameOverride != null) { + String val = DataStreamsTags.serviceNameOverride.get(); + if (val != null) { + this.hash = FNV64Hash.continueHash(this.hash, val, FNV64Hash.Version.v1); + } + } + + // hashable tags are 0-4 + for (int i = 0; i < 7; i++) { + String tag = this.tagByIndex(i); + if (tag != null) { + this.nonNullSize++; + this.hash = FNV64Hash.continueHash(this.hash, tag, FNV64Hash.Version.v1); + } + } + + // aggregation tags are 5-7 + this.aggregationHash = this.hash; + for (int i = 7; i < 10; i++) { + String tag = this.tagByIndex(i); + if (tag != null) { + this.nonNullSize++; + this.aggregationHash = + FNV64Hash.continueHash(this.aggregationHash, tag, FNV64Hash.Version.v1); + } + } + + // the rest are values + this.completeHash = aggregationHash; + for (int i = 10; i < this.size(); i++) { + String tag = this.tagByIndex(i); + if (tag != null) { + this.nonNullSize++; + this.completeHash = FNV64Hash.continueHash(this.completeHash, tag, FNV64Hash.Version.v1); + } + } + } + + public int size() { + // make sure it's in sync with tagByIndex logic + return 14; + } + + public String tagByIndex(int index) { + switch (index) { + case 0: + return this.bus; + case 1: + return this.direction; + case 2: + return this.exchange; + case 3: + return this.topic; + case 4: + return this.type; + case 5: + return this.subscription; + case 6: + return this.datasetName; + case 7: + return this.datasetNamespace; + case 8: + return this.isManual; + case 9: + return this.group; + case 10: + return this.consumerGroup; + case 11: + return this.hasRoutingKey; + case 12: + return this.kafkaClusterId; + case 13: + return this.partition; + default: + return null; + } + } + + public String getDirection() { + return this.direction; + } + + public String getTopic() { + return this.topic; + } + + public String getType() { + return this.type; + } + + public String getIsManual() { + return this.isManual; + } + + public String getBus() { + return this.bus; + } + + public String getExchange() { + return this.exchange; + } + + public Direction getDirectionValue() { + return this.directionValue; + } + + public String getSubscription() { + return this.subscription; + } + + public String getDatasetName() { + return this.datasetName; + } + + public String getDatasetNamespace() { + return this.datasetNamespace; + } + + public String getGroup() { + return this.group; + } + + public String getConsumerGroup() { + return this.consumerGroup; + } + + public String getPartition() { + return this.partition; + } + + public String getKafkaClusterId() { + return this.kafkaClusterId; + } + + public String getHasRoutingKey() { + return this.hasRoutingKey; + } + + public int nonNullSize() { + return this.nonNullSize; + } + + public long getHash() { + return hash; + } + + public long getAggregationHash() { + return aggregationHash; + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + + DataStreamsTags that = (DataStreamsTags) o; + return this.completeHash == that.completeHash; + } + + @Override + public int hashCode() { + return Long.hashCode(this.completeHash); + } + + @Override + public String toString() { + return "DataStreamsTags{" + + "bus='" + + this.bus + + "', direction='" + + this.direction + + "', exchange='" + + this.exchange + + "', topic='" + + this.topic + + "', type='" + + this.type + + "', subscription='" + + this.subscription + + "', datasetName='" + + this.datasetName + + "', datasetNamespace='" + + this.datasetNamespace + + "', isManual=" + + this.isManual + + "', group='" + + this.group + + "', consumerGroup='" + + this.consumerGroup + + "', hasRoutingKey='" + + this.hasRoutingKey + + "', kafkaClusterId='" + + this.kafkaClusterId + + "', partition='" + + this.partition + + "', hash='" + + hash + + "', aggregationHash='" + + aggregationHash + + "', size='" + + size(); + } +} diff --git a/internal-api/src/main/java/datadog/trace/api/datastreams/NoopDataStreamsMonitoring.java b/internal-api/src/main/java/datadog/trace/api/datastreams/NoopDataStreamsMonitoring.java index 020b492639d..f5cdcb0c82f 100644 --- a/internal-api/src/main/java/datadog/trace/api/datastreams/NoopDataStreamsMonitoring.java +++ b/internal-api/src/main/java/datadog/trace/api/datastreams/NoopDataStreamsMonitoring.java @@ -4,13 +4,12 @@ import datadog.trace.bootstrap.instrumentation.api.AgentSpan; import datadog.trace.bootstrap.instrumentation.api.Schema; import datadog.trace.bootstrap.instrumentation.api.SchemaIterator; -import java.util.LinkedHashMap; public class NoopDataStreamsMonitoring implements AgentDataStreamsMonitoring { public static final NoopDataStreamsMonitoring INSTANCE = new NoopDataStreamsMonitoring(); @Override - public void trackBacklog(LinkedHashMap sortedTags, long value) {} + public void trackBacklog(DataStreamsTags tags, long value) {} @Override public void setCheckpoint(AgentSpan span, DataStreamsContext context) {} diff --git a/internal-api/src/main/java/datadog/trace/api/datastreams/StatsPoint.java b/internal-api/src/main/java/datadog/trace/api/datastreams/StatsPoint.java index 9379267f46c..3e8f8aeab01 100644 --- a/internal-api/src/main/java/datadog/trace/api/datastreams/StatsPoint.java +++ b/internal-api/src/main/java/datadog/trace/api/datastreams/StatsPoint.java @@ -1,9 +1,7 @@ package datadog.trace.api.datastreams; -import java.util.List; - public class StatsPoint implements InboxItem { - private final List edgeTags; + private final DataStreamsTags tags; private final long hash; private final long parentHash; private final long aggregationHash; @@ -14,7 +12,7 @@ public class StatsPoint implements InboxItem { private final String serviceNameOverride; public StatsPoint( - List edgeTags, + DataStreamsTags tags, long hash, long parentHash, long aggregationHash, @@ -23,7 +21,7 @@ public StatsPoint( long edgeLatencyNano, long payloadSizeBytes, String serviceNameOverride) { - this.edgeTags = edgeTags; + this.tags = tags; this.hash = hash; this.parentHash = parentHash; this.aggregationHash = aggregationHash; @@ -34,8 +32,8 @@ public StatsPoint( this.serviceNameOverride = serviceNameOverride; } - public List getEdgeTags() { - return edgeTags; + public DataStreamsTags getTags() { + return tags; } public long getHash() { @@ -74,7 +72,7 @@ public String getServiceNameOverride() { public String toString() { return "StatsPoint{" + "tags='" - + edgeTags + + tags + '\'' + ", hash=" + hash diff --git a/internal-api/src/test/groovy/datadog/trace/api/datastreams/DataStreamsContextTest.groovy b/internal-api/src/test/groovy/datadog/trace/api/datastreams/DataStreamsContextTest.groovy index a565e0ed5e0..babc061fa2a 100644 --- a/internal-api/src/test/groovy/datadog/trace/api/datastreams/DataStreamsContextTest.groovy +++ b/internal-api/src/test/groovy/datadog/trace/api/datastreams/DataStreamsContextTest.groovy @@ -6,13 +6,13 @@ import spock.lang.Specification class DataStreamsContextTest extends Specification { def 'test constructor'() { setup: - def tags = new LinkedHashMap() + def tags = DataStreamsTags.EMPTY when: def dsmContext = DataStreamsContext.fromTags(tags) then: - dsmContext.sortedTags() == tags + dsmContext.tags() == tags dsmContext.defaultTimestamp() == 0 dsmContext.payloadSizeBytes() == 0 dsmContext.sendCheckpoint() @@ -21,7 +21,7 @@ class DataStreamsContextTest extends Specification { dsmContext = DataStreamsContext.fromTagsWithoutCheckpoint(tags) then: - dsmContext.sortedTags() == tags + dsmContext.tags() == tags dsmContext.defaultTimestamp() == 0 dsmContext.payloadSizeBytes() == 0 !dsmContext.sendCheckpoint() @@ -32,7 +32,7 @@ class DataStreamsContextTest extends Specification { dsmContext = DataStreamsContext.create(tags, timestamp, payloadSize) then: - dsmContext.sortedTags() == tags + dsmContext.tags() == tags dsmContext.defaultTimestamp() == timestamp dsmContext.payloadSizeBytes() == payloadSize dsmContext.sendCheckpoint() @@ -40,7 +40,7 @@ class DataStreamsContextTest extends Specification { def 'test context store'() { setup: - def tags = new LinkedHashMap() + def tags = DataStreamsTags.EMPTY when: def dsmContext = DataStreamsContext.fromTags(tags) diff --git a/internal-api/src/test/groovy/datadog/trace/api/datastreams/DataStreamsTagsTest.groovy b/internal-api/src/test/groovy/datadog/trace/api/datastreams/DataStreamsTagsTest.groovy new file mode 100644 index 00000000000..ef73cfaae4c --- /dev/null +++ b/internal-api/src/test/groovy/datadog/trace/api/datastreams/DataStreamsTagsTest.groovy @@ -0,0 +1,122 @@ +package datadog.trace.api.datastreams + +import spock.lang.Specification +import java.nio.ByteBuffer + + +class DataStreamsTagsTest extends Specification { + def getTags(int idx) { + return new DataStreamsTags("bus" + idx, DataStreamsTags.Direction.Outbound, "exchange" + idx, "topic" + idx, "type" + idx, "subscription" + idx, + "dataset_name" + idx, "dataset_namespace" + idx, true, "group" + idx, "consumer_group" + idx, true, + "kafka_cluster_id" + idx, "partition" + idx) + } + + def 'test tags are properly set'() { + setup: + def tg = getTags(0) + + + expect: + tg.getBus() == DataStreamsTags.BUS_TAG + ":bus0" + tg.getDirection() == DataStreamsTags.DIRECTION_TAG + ":out" + tg.getExchange() == DataStreamsTags.EXCHANGE_TAG + ":exchange0" + tg.getTopic() == DataStreamsTags.TOPIC_TAG + ":topic0" + tg.getType() == DataStreamsTags.TYPE_TAG + ":type0" + tg.getSubscription() == DataStreamsTags.SUBSCRIPTION_TAG + ":subscription0" + tg.getDatasetName() == DataStreamsTags.DATASET_NAME_TAG + ":dataset_name0" + tg.getDatasetNamespace() == DataStreamsTags.DATASET_NAMESPACE_TAG + ":dataset_namespace0" + tg.getIsManual() == DataStreamsTags.MANUAL_TAG + ":true" + tg.getGroup() == DataStreamsTags.GROUP_TAG + ":group0" + tg.getConsumerGroup() == DataStreamsTags.CONSUMER_GROUP_TAG + ":consumer_group0" + tg.getHasRoutingKey() == DataStreamsTags.HAS_ROUTING_KEY_TAG + ":true" + tg.getKafkaClusterId() == DataStreamsTags.KAFKA_CLUSTER_ID_TAG + ":kafka_cluster_id0" + tg.getPartition() == DataStreamsTags.PARTITION_TAG + ":partition0" + tg.getDirectionValue() == DataStreamsTags.Direction.Outbound + tg.toString() != null + } + + def 'test has all tags'() { + setup: + def tags = new DataStreamsTags("bus", DataStreamsTags.Direction.Outbound, + "exchange", "topic", "type", "subscription", "dataset_name", "dataset_namespace", true, + "group", "consumer_group", true, "kafka_cluster_id", "partition") + expect: + tags.hasAllTags( + "bus:bus", + "direction:out", + "exchange:exchange", + "topic:topic", + "type:type", + "subscription:subscription", + "ds.name:dataset_name", + "ds.namespace:dataset_namespace", + "manual_checkpoint:true", + "group:group", + "consumer_group:consumer_group", + "has_routing_key:true", + "kafka_cluster_id:kafka_cluster_id", + "partition:partition" + ) + !tags.hasAllTags("garbage") + } + + def 'test long to bytes'() { + setup: + def value = 123444L + def bts = DataStreamsTags.longToBytes(value) + ByteBuffer buffer = ByteBuffer.allocate(Long.BYTES) + buffer.putLong(value) + def ctrl = buffer.array() + expect: + bts == ctrl + } + + def 'test service name override and global hash'() { + setup: + def one = getTags(0) + + def serviceName = new ThreadLocal() + serviceName.set("test") + DataStreamsTags.setServiceNameOverride(serviceName) + def two = getTags(0) + + DataStreamsTags.setGlobalBaseHash(12) + def three = getTags(0) + + expect: + one.getHash() != two.getHash() + one.getAggregationHash() != two.getAggregationHash() + one.getHash() != three.getHash() + one.getAggregationHash() != three.getAggregationHash() + two.getHash() != three.getHash() + two.getAggregationHash() != three.getAggregationHash() + } + + def 'test compare'() { + setup: + def one = getTags(0) + def two = getTags(0) + def three = getTags(1) + expect: + one == two + one != three + two != three + } + + def 'test create'() { + setup: + def one = DataStreamsTags.create("type", DataStreamsTags.Direction.Outbound) + def two = DataStreamsTags.create("type", DataStreamsTags.Direction.Outbound, "topic") + def three = DataStreamsTags.create("type", DataStreamsTags.Direction.Outbound, "topic", "group", "cluster") + def four = DataStreamsTags.createWithPartition("type", "topic", "partition", "cluster", "group") + def five = DataStreamsTags.createWithDataset("type", DataStreamsTags.Direction.Outbound, "topic", "dataset", "namespace") + def six = DataStreamsTags.createWithSubscription("type", DataStreamsTags.Direction.Inbound, "subscription") + expect: + one.hasAllTags("type:type", "direction:out") + two.hasAllTags("type:type", "direction:out", "topic:topic") + three.hasAllTags("type:type", "direction:out", "topic:topic", "group:group", "kafka_cluster_id:cluster") + four.hasAllTags("type:type", "topic:topic", "partition:partition", "kafka_cluster_id:cluster", "consumer_group:group") + five.hasAllTags("type:type", "direction:out", "topic:topic", "ds.name:dataset", "ds.namespace:namespace") + six.hasAllTags("type:type", "direction:in", "subscription:subscription") + } +}