From c226dd1f64a03e0fc9d815783f0883f9876b7b6c Mon Sep 17 00:00:00 2001 From: David Sanchez <838104+sanchda@users.noreply.github.com> Date: Tue, 10 Dec 2024 09:39:34 -0600 Subject: [PATCH 01/78] fix(setup): suppress int-ptr conversion errors for stack profiler v1 (#11651) The root issue is that * Alpine 3.21.0 was released on Dec 5 * Alpine 3.21.0 includes an update to gcc (gcc 14) * gcc 14 is more strict (yay!) about pointer<->integer conversions. However, cython does not generate code with the proper incantation to avoid compiler errors (I blame pthreads having opaque pointer types) * Subsequently, any and every python-alpine container image (even and especially patch versions) cut after Dec 5 will have the updated Alpine image, which will have the updated gcc, which will start to break during builds. This PR effectively undoes the gcc 14 behavior by making int<->ptr conversions less strict again (only for the stack.c cython-generated file which is currently throwing the error). ## Checklist - [X] PR author has checked that all the criteria below are met - The PR description includes an overview of the change - The PR description articulates the motivation for the change - The change includes tests OR the PR description describes a testing strategy - The PR description notes risks associated with the change, if any - Newly-added code is easy to change - The change follows the [library release note guidelines](https://ddtrace.readthedocs.io/en/stable/releasenotes.html) - The change includes or references documentation updates if necessary - Backport labels are set (if [applicable](https://ddtrace.readthedocs.io/en/latest/contributing.html#backporting)) ## Reviewer Checklist - [x] Reviewer has checked that all the criteria below are met - Title is accurate - All changes are related to the pull request's stated goal - Avoids breaking [API](https://ddtrace.readthedocs.io/en/stable/versioning.html#interfaces) changes - Testing strategy adequately addresses listed risks - Newly-added code is easy to change - Release note makes sense to a user of the library - If necessary, author has acknowledged and discussed the performance implications of this PR as reported in the benchmarks PR comment - Backport labels are set in a manner that is consistent with the [release branch maintenance policy](https://ddtrace.readthedocs.io/en/latest/contributing.html#backporting) --- .../fix-profiler-int-ptr-conversion-4377fbd8724eeaec.yaml | 6 ++++++ setup.py | 6 +++++- 2 files changed, 11 insertions(+), 1 deletion(-) create mode 100644 releasenotes/notes/fix-profiler-int-ptr-conversion-4377fbd8724eeaec.yaml diff --git a/releasenotes/notes/fix-profiler-int-ptr-conversion-4377fbd8724eeaec.yaml b/releasenotes/notes/fix-profiler-int-ptr-conversion-4377fbd8724eeaec.yaml new file mode 100644 index 00000000000..cadb50628fa --- /dev/null +++ b/releasenotes/notes/fix-profiler-int-ptr-conversion-4377fbd8724eeaec.yaml @@ -0,0 +1,6 @@ +--- +fixes: + - | + Updates setup.py to ignore int-ptr conversion warnings for the profiler stack.pyx file. + This is important because gcc 14 makes these conversions an error, alpine 3.21.0 ships with gcc 14, + and any patch version of a Python alpine image cut after December 5th, 2024, will have this issue. diff --git a/setup.py b/setup.py index 6b097d46f6b..06200930c56 100644 --- a/setup.py +++ b/setup.py @@ -612,7 +612,11 @@ def get_exts_for(name): "ddtrace.profiling.collector.stack", sources=["ddtrace/profiling/collector/stack.pyx"], language="c", - extra_compile_args=extra_compile_args, + # cython generated code errors on build in toolchains that are strict about int->ptr conversion + # OTOH, the MSVC toolchain is different. In a perfect world we'd deduce the underlying toolchain and + # emit the right flags, but as a compromise we assume Windows implies MSVC and everything else is on a + # GNU-like toolchain + extra_compile_args=extra_compile_args + (["-Wno-int-conversion"] if CURRENT_OS != "Windows" else []), ), Cython.Distutils.Extension( "ddtrace.profiling.collector._traceback", From e04c8acf72186e63998cb4c97df0d3708aaeb15d Mon Sep 17 00:00:00 2001 From: ncybul <124532568+ncybul@users.noreply.github.com> Date: Tue, 10 Dec 2024 14:19:31 -0500 Subject: [PATCH 02/78] feat(langchain): [MLOB-1972] update langchain to handle vertex and gemini llm calls (#11642) This PR updates the Langchain integration to handle LLM calls instrumented in the Vertex AI and Gemini integrations by checking for their respective provider names (`vertexai` and `google_palm`) and instrumenting the wrapper Langchain calls as workflow spans if these provider names are detected. Importantly, because of the way Langchain invokes chat generations for Vertex AI and Gemini, our integrations will not capture these inner LLM calls (in both cases, the prediction client is called directly with the input instead of using the `chat.send_message` method which we have instrumented in both cases). Therefore, we will only capture the Langchain LLM call and hence leave it as an LLM span for chat generations. ## Testing As we work on a more stable way to test our langchain integration, I opted to manually verify these changes by submitting traces to staging. Below I have included the code that I ran and the resulting trace that appeared in the product. It is expected that LLM calls have a langchain workflow span with a Gemini/Vertex AI child LLM span whereas Chat calls have only the Langchain LLM span instrumented (see above for details on why this is the case). ### Gemini LLM call ``` from langchain_google_genai import GoogleGenerativeAI llm = GoogleGenerativeAI(model="gemini-pro") print( llm.invoke( "What are some tips for improving sleep quality?" ) ) ``` ![image](https://github.com/user-attachments/assets/5d68ad43-36c1-413a-822a-eac37e650c91) ### Gemini Chat call ``` from langchain_google_genai import ChatGoogleGenerativeAI llm = ChatGoogleGenerativeAI(model="gemini-pro") resp = llm.invoke("Tell me a joke.") print(resp) ``` ![image](https://github.com/user-attachments/assets/57b9dadb-2c2b-40d5-905e-3fe261132262) ### Vertex AI LLM call ``` from langchain_google_vertexai import VertexAI model = VertexAI(model_name="gemini-pro") message = "What is the optimal temperature for sleeping?" model.invoke(message) ``` ![image](https://github.com/user-attachments/assets/74d84f58-ff71-4f60-948f-4a8494b60a17) ### Vertex AI Chat call ``` from langchain_google_vertexai import ChatVertexAI llm = ChatVertexAI( model="gemini-1.5-flash-001", temperature=0, max_tokens=None, max_retries=6, stop=None, ) messages = [ ( "system", "You are a helpful assistant that translates English to French. Translate the user sentence.", ), ("human", "I love programming."), ] ai_msg = llm.invoke(messages) print(ai_msg) ``` ![image](https://github.com/user-attachments/assets/9bb398a3-4803-4189-af11-8afe7a6add1e) ## Checklist - [x] PR author has checked that all the criteria below are met - The PR description includes an overview of the change - The PR description articulates the motivation for the change - The change includes tests OR the PR description describes a testing strategy - The PR description notes risks associated with the change, if any - Newly-added code is easy to change - The change follows the [library release note guidelines](https://ddtrace.readthedocs.io/en/stable/releasenotes.html) - The change includes or references documentation updates if necessary - Backport labels are set (if [applicable](https://ddtrace.readthedocs.io/en/latest/contributing.html#backporting)) ## Reviewer Checklist - [x] Reviewer has checked that all the criteria below are met - Title is accurate - All changes are related to the pull request's stated goal - Avoids breaking [API](https://ddtrace.readthedocs.io/en/stable/versioning.html#interfaces) changes - Testing strategy adequately addresses listed risks - Newly-added code is easy to change - Release note makes sense to a user of the library - If necessary, author has acknowledged and discussed the performance implications of this PR as reported in the benchmarks PR comment - Backport labels are set in a manner that is consistent with the [release branch maintenance policy](https://ddtrace.readthedocs.io/en/latest/contributing.html#backporting) --- ddtrace/llmobs/_integrations/langchain.py | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/ddtrace/llmobs/_integrations/langchain.py b/ddtrace/llmobs/_integrations/langchain.py index c2304289c2c..2128458253d 100644 --- a/ddtrace/llmobs/_integrations/langchain.py +++ b/ddtrace/llmobs/_integrations/langchain.py @@ -44,6 +44,8 @@ ANTHROPIC_PROVIDER_NAME = "anthropic" BEDROCK_PROVIDER_NAME = "amazon_bedrock" OPENAI_PROVIDER_NAME = "openai" +VERTEXAI_PROVIDER_NAME = "vertexai" +GEMINI_PROVIDER_NAME = "google_palm" ROLE_MAPPING = { "human": "user", @@ -81,6 +83,12 @@ def _llmobs_set_tags( if model_provider: if model_provider.startswith(BEDROCK_PROVIDER_NAME): llmobs_integration = "bedrock" + # only the llm interface for Vertex AI will get instrumented + elif model_provider.startswith(VERTEXAI_PROVIDER_NAME) and operation == "llm": + llmobs_integration = "vertexai" + # only the llm interface for Gemini will get instrumented + elif model_provider.startswith(GEMINI_PROVIDER_NAME) and operation == "llm": + llmobs_integration = "google_generativeai" elif model_provider.startswith(OPENAI_PROVIDER_NAME): llmobs_integration = "openai" elif operation == "chat" and model_provider.startswith(ANTHROPIC_PROVIDER_NAME): From 8354fdffc66339db9d458996b0333c31b310d0aa Mon Sep 17 00:00:00 2001 From: "Gabriele N. Tornetta" Date: Wed, 11 Dec 2024 09:54:16 +0000 Subject: [PATCH 03/78] refactor(debugger): use single dispatch on probes (#11596) We refactor the debugger code to use a single dispatch function solution instead of a battery of instance check branches. ## Checklist - [x] PR author has checked that all the criteria below are met - The PR description includes an overview of the change - The PR description articulates the motivation for the change - The change includes tests OR the PR description describes a testing strategy - The PR description notes risks associated with the change, if any - Newly-added code is easy to change - The change follows the [library release note guidelines](https://ddtrace.readthedocs.io/en/stable/releasenotes.html) - The change includes or references documentation updates if necessary - Backport labels are set (if [applicable](https://ddtrace.readthedocs.io/en/latest/contributing.html#backporting)) ## Reviewer Checklist - [ ] Reviewer has checked that all the criteria below are met - Title is accurate - All changes are related to the pull request's stated goal - Avoids breaking [API](https://ddtrace.readthedocs.io/en/stable/versioning.html#interfaces) changes - Testing strategy adequately addresses listed risks - Newly-added code is easy to change - Release note makes sense to a user of the library - If necessary, author has acknowledged and discussed the performance implications of this PR as reported in the benchmarks PR comment - Backport labels are set in a manner that is consistent with the [release branch maintenance policy](https://ddtrace.readthedocs.io/en/latest/contributing.html#backporting) --- ddtrace/debugging/_debugger.py | 74 +++--------------- ddtrace/debugging/_encoding.py | 2 +- ddtrace/debugging/_expressions.py | 1 + ddtrace/debugging/_probe/model.py | 9 ++- ddtrace/debugging/_signal/__init__.py | 5 ++ ddtrace/debugging/_signal/collector.py | 2 +- ddtrace/debugging/_signal/log.py | 67 +++++++++++++++++ ddtrace/debugging/_signal/metric_sample.py | 15 +++- ddtrace/debugging/_signal/model.py | 87 +++++++--------------- ddtrace/debugging/_signal/snapshot.py | 15 +++- ddtrace/debugging/_signal/tracing.py | 20 ++++- tests/debugging/signal/test_collector.py | 2 +- 12 files changed, 166 insertions(+), 133 deletions(-) create mode 100644 ddtrace/debugging/_signal/log.py diff --git a/ddtrace/debugging/_debugger.py b/ddtrace/debugging/_debugger.py index 6d0edf3a224..65b9ecfec5e 100644 --- a/ddtrace/debugging/_debugger.py +++ b/ddtrace/debugging/_debugger.py @@ -31,26 +31,15 @@ from ddtrace.debugging._probe.model import FunctionProbe from ddtrace.debugging._probe.model import LineLocationMixin from ddtrace.debugging._probe.model import LineProbe -from ddtrace.debugging._probe.model import LogFunctionProbe -from ddtrace.debugging._probe.model import LogLineProbe -from ddtrace.debugging._probe.model import MetricFunctionProbe -from ddtrace.debugging._probe.model import MetricLineProbe from ddtrace.debugging._probe.model import Probe -from ddtrace.debugging._probe.model import SpanDecorationFunctionProbe -from ddtrace.debugging._probe.model import SpanDecorationLineProbe -from ddtrace.debugging._probe.model import SpanFunctionProbe from ddtrace.debugging._probe.registry import ProbeRegistry from ddtrace.debugging._probe.remoteconfig import ProbePollerEvent from ddtrace.debugging._probe.remoteconfig import ProbePollerEventType from ddtrace.debugging._probe.remoteconfig import ProbeRCAdapter from ddtrace.debugging._probe.status import ProbeStatusLogger from ddtrace.debugging._signal.collector import SignalCollector -from ddtrace.debugging._signal.metric_sample import MetricSample from ddtrace.debugging._signal.model import Signal from ddtrace.debugging._signal.model import SignalState -from ddtrace.debugging._signal.snapshot import Snapshot -from ddtrace.debugging._signal.tracing import DynamicSpan -from ddtrace.debugging._signal.tracing import SpanDecoration from ddtrace.debugging._uploader import LogsIntakeUploaderV1 from ddtrace.debugging._uploader import UploaderProduct from ddtrace.internal import compat @@ -62,7 +51,6 @@ from ddtrace.internal.module import register_post_run_module_hook from ddtrace.internal.module import unregister_post_run_module_hook from ddtrace.internal.rate_limiter import BudgetRateLimiterWithJitter as RateLimiter -from ddtrace.internal.rate_limiter import RateLimitExceeded from ddtrace.internal.remoteconfig.worker import remoteconfig_poller from ddtrace.internal.service import Service from ddtrace.internal.wrapping.context import WrappingContext @@ -190,35 +178,15 @@ def _open_signals(self) -> None: # for each probe. trace_context = self._tracer.current_trace_context() - if isinstance(probe, MetricFunctionProbe): - signal = MetricSample( - probe=probe, + try: + signal = Signal.from_probe( + probe, frame=frame, thread=thread, trace_context=trace_context, meter=self._probe_meter, ) - elif isinstance(probe, LogFunctionProbe): - signal = Snapshot( - probe=probe, - frame=frame, - thread=thread, - trace_context=trace_context, - ) - elif isinstance(probe, SpanFunctionProbe): - signal = DynamicSpan( - probe=probe, - frame=frame, - thread=thread, - trace_context=trace_context, - ) - elif isinstance(probe, SpanDecorationFunctionProbe): - signal = SpanDecoration( - probe=probe, - frame=frame, - thread=thread, - ) - else: + except TypeError: log.error("Unsupported probe type: %s", type(probe)) continue @@ -385,39 +353,19 @@ def _dd_debugger_hook(self, probe: Probe) -> None: instrumented code is running. """ try: - actual_frame = sys._getframe(1) - signal: Optional[Signal] = None - if isinstance(probe, MetricLineProbe): - signal = MetricSample( - probe=probe, - frame=actual_frame, + try: + signal = Signal.from_probe( + probe, + frame=sys._getframe(1), thread=threading.current_thread(), trace_context=self._tracer.current_trace_context(), meter=self._probe_meter, ) - elif isinstance(probe, LogLineProbe): - if probe.take_snapshot: - # TODO: Global limit evaluated before probe conditions - if self._global_rate_limiter.limit() is RateLimitExceeded: - return - - signal = Snapshot( - probe=probe, - frame=actual_frame, - thread=threading.current_thread(), - trace_context=self._tracer.current_trace_context(), - ) - elif isinstance(probe, SpanDecorationLineProbe): - signal = SpanDecoration( - probe=probe, - frame=actual_frame, - thread=threading.current_thread(), - ) - else: - log.error("Unsupported probe type: %r", type(probe)) + except TypeError: + log.error("Unsupported probe type: %r", type(probe), exc_info=True) return - signal.do_line() + signal.do_line(self._global_rate_limiter if probe.is_global_rate_limited() else None) if signal.state is SignalState.DONE: self._probe_registry.set_emitting(probe) diff --git a/ddtrace/debugging/_encoding.py b/ddtrace/debugging/_encoding.py index aa54add676a..b5f6458f4e2 100644 --- a/ddtrace/debugging/_encoding.py +++ b/ddtrace/debugging/_encoding.py @@ -15,7 +15,7 @@ from typing import Union from ddtrace.debugging._config import di_config -from ddtrace.debugging._signal.model import LogSignal +from ddtrace.debugging._signal.log import LogSignal from ddtrace.debugging._signal.snapshot import Snapshot from ddtrace.internal import forksafe from ddtrace.internal._encoding import BufferFull diff --git a/ddtrace/debugging/_expressions.py b/ddtrace/debugging/_expressions.py index 50028b9c6d2..ccab7549d8f 100644 --- a/ddtrace/debugging/_expressions.py +++ b/ddtrace/debugging/_expressions.py @@ -23,6 +23,7 @@ arg_operation => {"": []} arg_op_type => filter | substring | getmember | index """ # noqa + from dataclasses import dataclass from itertools import chain import re diff --git a/ddtrace/debugging/_probe/model.py b/ddtrace/debugging/_probe/model.py index 6f989d627f4..f832484ac6d 100644 --- a/ddtrace/debugging/_probe/model.py +++ b/ddtrace/debugging/_probe/model.py @@ -85,6 +85,9 @@ def update(self, other: "Probe") -> None: for attrib in (f.name for f in fields(self) if f.compare): setattr(self, attrib, getattr(other, attrib)) + def is_global_rate_limited(self) -> bool: + return False + def __hash__(self): return hash(self.probe_id) @@ -245,12 +248,14 @@ class LogProbeMixin(AbstractProbeMixIn): @dataclass class LogLineProbe(Probe, LineLocationMixin, LogProbeMixin, ProbeConditionMixin, RateLimitMixin): - pass + def is_global_rate_limited(self) -> bool: + return self.take_snapshot @dataclass class LogFunctionProbe(Probe, FunctionLocationMixin, TimingMixin, LogProbeMixin, ProbeConditionMixin, RateLimitMixin): - pass + def is_global_rate_limited(self) -> bool: + return self.take_snapshot @dataclass diff --git a/ddtrace/debugging/_signal/__init__.py b/ddtrace/debugging/_signal/__init__.py index e69de29bb2d..6dea9afe965 100644 --- a/ddtrace/debugging/_signal/__init__.py +++ b/ddtrace/debugging/_signal/__init__.py @@ -0,0 +1,5 @@ +# DEV: Import these modules to allow registering the single dispatch functions +from ddtrace.debugging._signal.metric_sample import MetricSample # noqa +from ddtrace.debugging._signal.snapshot import Snapshot # noqa +from ddtrace.debugging._signal.tracing import DynamicSpan # noqa +from ddtrace.debugging._signal.tracing import SpanDecoration # noqa diff --git a/ddtrace/debugging/_signal/collector.py b/ddtrace/debugging/_signal/collector.py index 461e8ff1af6..57868b30485 100644 --- a/ddtrace/debugging/_signal/collector.py +++ b/ddtrace/debugging/_signal/collector.py @@ -6,7 +6,7 @@ from ddtrace.debugging._encoding import BufferedEncoder from ddtrace.debugging._metrics import metrics -from ddtrace.debugging._signal.model import LogSignal +from ddtrace.debugging._signal.log import LogSignal from ddtrace.debugging._signal.model import Signal from ddtrace.debugging._signal.model import SignalState from ddtrace.internal._encoding import BufferFull diff --git a/ddtrace/debugging/_signal/log.py b/ddtrace/debugging/_signal/log.py new file mode 100644 index 00000000000..23cde73f642 --- /dev/null +++ b/ddtrace/debugging/_signal/log.py @@ -0,0 +1,67 @@ +import abc +from dataclasses import dataclass +import typing as t + +from ddtrace.debugging._probe.model import FunctionLocationMixin +from ddtrace.debugging._probe.model import LineLocationMixin +from ddtrace.debugging._signal.model import Signal + + +@dataclass +class LogSignal(Signal): + """A signal that also emits a log message. + + Some signals might require sending a log message along with the base signal + data. For example, all the collected errors from expression evaluations + (e.g. conditions) might need to be reported. + """ + + @property + @abc.abstractmethod + def message(self) -> t.Optional[str]: + """The log message to emit.""" + pass + + @abc.abstractmethod + def has_message(self) -> bool: + """Whether the signal has a log message to emit.""" + pass + + @property + def data(self) -> t.Dict[str, t.Any]: + """Extra data to include in the snapshot portion of the log message.""" + return {} + + def _probe_details(self) -> t.Dict[str, t.Any]: + probe = self.probe + if isinstance(probe, LineLocationMixin): + location = { + "file": str(probe.resolved_source_file), + "lines": [str(probe.line)], + } + elif isinstance(probe, FunctionLocationMixin): + location = { + "type": probe.module, + "method": probe.func_qname, + } + else: + return {} + + return { + "id": probe.probe_id, + "version": probe.version, + "location": location, + } + + @property + def snapshot(self) -> t.Dict[str, t.Any]: + full_data = { + "id": self.uuid, + "timestamp": int(self.timestamp * 1e3), # milliseconds + "evaluationErrors": [{"expr": e.expr, "message": e.message} for e in self.errors], + "probe": self._probe_details(), + "language": "python", + } + full_data.update(self.data) + + return full_data diff --git a/ddtrace/debugging/_signal/metric_sample.py b/ddtrace/debugging/_signal/metric_sample.py index f8bebc17d83..d92cdcec173 100644 --- a/ddtrace/debugging/_signal/metric_sample.py +++ b/ddtrace/debugging/_signal/metric_sample.py @@ -4,9 +4,12 @@ from typing import cast from ddtrace.debugging._metrics import probe_metrics +from ddtrace.debugging._probe.model import MetricFunctionProbe +from ddtrace.debugging._probe.model import MetricLineProbe from ddtrace.debugging._probe.model import MetricProbeKind from ddtrace.debugging._probe.model import MetricProbeMixin -from ddtrace.debugging._signal.model import LogSignal +from ddtrace.debugging._signal.log import LogSignal +from ddtrace.debugging._signal.model import probe_to_signal from ddtrace.internal.metrics import Metrics @@ -50,3 +53,13 @@ def message(self) -> Optional[str]: def has_message(self) -> bool: return bool(self.errors) + + +@probe_to_signal.register +def _(probe: MetricFunctionProbe, frame, thread, trace_context, meter): + return MetricSample(probe=probe, frame=frame, thread=thread, trace_context=trace_context, meter=meter) + + +@probe_to_signal.register +def _(probe: MetricLineProbe, frame, thread, trace_context, meter): + return MetricSample(probe=probe, frame=frame, thread=thread, trace_context=trace_context, meter=meter) diff --git a/ddtrace/debugging/_signal/model.py b/ddtrace/debugging/_signal/model.py index a03b157adde..9c9448677c0 100644 --- a/ddtrace/debugging/_signal/model.py +++ b/ddtrace/debugging/_signal/model.py @@ -3,6 +3,8 @@ from dataclasses import dataclass from dataclasses import field from enum import Enum +from functools import singledispatch +import threading from threading import Thread import time from types import FrameType @@ -19,8 +21,6 @@ from ddtrace._trace.context import Context from ddtrace._trace.span import Span from ddtrace.debugging._expressions import DDExpressionEvaluationError -from ddtrace.debugging._probe.model import FunctionLocationMixin -from ddtrace.debugging._probe.model import LineLocationMixin from ddtrace.debugging._probe.model import Probe from ddtrace.debugging._probe.model import ProbeConditionMixin from ddtrace.debugging._probe.model import ProbeEvalTiming @@ -28,6 +28,8 @@ from ddtrace.debugging._probe.model import TimingMixin from ddtrace.debugging._safety import get_args from ddtrace.internal.compat import ExcInfoType +from ddtrace.internal.metrics import Metrics +from ddtrace.internal.rate_limiter import BudgetRateLimiterWithJitter as RateLimiter from ddtrace.internal.rate_limiter import RateLimitExceeded @@ -183,13 +185,17 @@ def do_exit(self, retval: Any, exc_info: ExcInfoType, duration: int) -> None: self.state = SignalState.DONE - def do_line(self) -> None: + def do_line(self, global_limiter: Optional[RateLimiter] = None) -> None: frame = self.frame scope = ChainMap(frame.f_locals, frame.f_globals) if not self._eval_condition(scope): return + if global_limiter is not None and global_limiter.limit() is RateLimitExceeded: + self.state = SignalState.SKIP_RATE + return + if self._rate_limit_exceeded(): return @@ -197,62 +203,19 @@ def do_line(self) -> None: self.state = SignalState.DONE - -@dataclass -class LogSignal(Signal): - """A signal that also emits a log message. - - Some signals might require sending a log message along with the base signal - data. For example, all the collected errors from expression evaluations - (e.g. conditions) might need to be reported. - """ - - @property - @abc.abstractmethod - def message(self) -> Optional[str]: - """The log message to emit.""" - pass - - @abc.abstractmethod - def has_message(self) -> bool: - """Whether the signal has a log message to emit.""" - pass - - @property - def data(self) -> Dict[str, Any]: - """Extra data to include in the snapshot portion of the log message.""" - return {} - - def _probe_details(self) -> Dict[str, Any]: - probe = self.probe - if isinstance(probe, LineLocationMixin): - location = { - "file": str(probe.resolved_source_file), - "lines": [str(probe.line)], - } - elif isinstance(probe, FunctionLocationMixin): - location = { - "type": probe.module, - "method": probe.func_qname, - } - else: - return {} - - return { - "id": probe.probe_id, - "version": probe.version, - "location": location, - } - - @property - def snapshot(self) -> Dict[str, Any]: - full_data = { - "id": self.uuid, - "timestamp": int(self.timestamp * 1e3), # milliseconds - "evaluationErrors": [{"expr": e.expr, "message": e.message} for e in self.errors], - "probe": self._probe_details(), - "language": "python", - } - full_data.update(self.data) - - return full_data + @staticmethod + def from_probe( + probe: Probe, frame: FrameType, thread: Thread, trace_context: Optional[Any], meter: Metrics.Meter + ) -> "Signal": + return probe_to_signal(probe, frame, thread, trace_context, meter) + + +@singledispatch +def probe_to_signal( + probe: Probe, + frame: FrameType, + thread: threading.Thread, + trace_context: Optional[Any], + meter: Metrics.Meter, +) -> Signal: + raise TypeError(f"Unsupported probe type: {type(probe)}") diff --git a/ddtrace/debugging/_signal/snapshot.py b/ddtrace/debugging/_signal/snapshot.py index 9f42921a7a3..5bb02f16659 100644 --- a/ddtrace/debugging/_signal/snapshot.py +++ b/ddtrace/debugging/_signal/snapshot.py @@ -17,6 +17,8 @@ from ddtrace.debugging._probe.model import FunctionLocationMixin from ddtrace.debugging._probe.model import LineLocationMixin from ddtrace.debugging._probe.model import LiteralTemplateSegment +from ddtrace.debugging._probe.model import LogFunctionProbe +from ddtrace.debugging._probe.model import LogLineProbe from ddtrace.debugging._probe.model import LogProbeMixin from ddtrace.debugging._probe.model import TemplateSegment from ddtrace.debugging._redaction import REDACTED_PLACEHOLDER @@ -25,8 +27,9 @@ from ddtrace.debugging._safety import get_globals from ddtrace.debugging._safety import get_locals from ddtrace.debugging._signal import utils +from ddtrace.debugging._signal.log import LogSignal from ddtrace.debugging._signal.model import EvaluationError -from ddtrace.debugging._signal.model import LogSignal +from ddtrace.debugging._signal.model import probe_to_signal from ddtrace.debugging._signal.utils import serialize from ddtrace.internal.compat import ExcInfoType from ddtrace.internal.utils.time import HourGlass @@ -177,3 +180,13 @@ def data(self): "captures": captures, "duration": self.duration, } + + +@probe_to_signal.register +def _(probe: LogFunctionProbe, frame, thread, trace_context, meter): + return Snapshot(probe=probe, frame=frame, thread=thread, trace_context=trace_context) + + +@probe_to_signal.register +def _(probe: LogLineProbe, frame, thread, trace_context, meter): + return Snapshot(probe=probe, frame=frame, thread=thread, trace_context=trace_context) diff --git a/ddtrace/debugging/_signal/tracing.py b/ddtrace/debugging/_signal/tracing.py index 9d3712a963b..3c9eb3f447e 100644 --- a/ddtrace/debugging/_signal/tracing.py +++ b/ddtrace/debugging/_signal/tracing.py @@ -7,12 +7,15 @@ from ddtrace.constants import ORIGIN_KEY from ddtrace.debugging._expressions import DDExpressionEvaluationError from ddtrace.debugging._probe.model import Probe +from ddtrace.debugging._probe.model import SpanDecorationFunctionProbe +from ddtrace.debugging._probe.model import SpanDecorationLineProbe from ddtrace.debugging._probe.model import SpanDecorationMixin from ddtrace.debugging._probe.model import SpanDecorationTargetSpan from ddtrace.debugging._probe.model import SpanFunctionProbe +from ddtrace.debugging._signal.log import LogSignal from ddtrace.debugging._signal.model import EvaluationError -from ddtrace.debugging._signal.model import LogSignal from ddtrace.debugging._signal.model import Signal +from ddtrace.debugging._signal.model import probe_to_signal from ddtrace.debugging._signal.utils import serialize from ddtrace.internal.compat import ExcInfoType from ddtrace.internal.logger import get_logger @@ -112,3 +115,18 @@ def message(self): def has_message(self) -> bool: return bool(self.errors) + + +@probe_to_signal.register +def _(probe: SpanFunctionProbe, frame, thread, trace_context, meter): + return DynamicSpan(probe=probe, frame=frame, thread=thread, trace_context=trace_context) + + +@probe_to_signal.register +def _(probe: SpanDecorationFunctionProbe, frame, thread, trace_context, meter): + return SpanDecoration(probe=probe, frame=frame, thread=thread) + + +@probe_to_signal.register +def _(probe: SpanDecorationLineProbe, frame, thread, trace_context, meter): + return SpanDecoration(probe=probe, frame=frame, thread=thread) diff --git a/tests/debugging/signal/test_collector.py b/tests/debugging/signal/test_collector.py index 2e2a77ec098..49e4f1aef2c 100644 --- a/tests/debugging/signal/test_collector.py +++ b/tests/debugging/signal/test_collector.py @@ -6,7 +6,7 @@ import mock from ddtrace.debugging._signal.collector import SignalCollector -from ddtrace.debugging._signal.model import LogSignal +from ddtrace.debugging._signal.log import LogSignal from ddtrace.debugging._signal.model import SignalState from ddtrace.debugging._signal.snapshot import Snapshot from tests.debugging.utils import create_snapshot_line_probe From 3a56826f72611155d278b0abd0bd0f9072f4cd65 Mon Sep 17 00:00:00 2001 From: "Gabriele N. Tornetta" Date: Wed, 11 Dec 2024 10:16:35 +0000 Subject: [PATCH 04/78] chore(profile): more accurate code-provenance (#11577) We make the code provenance feature for the Python profiler more accurate by considering "library" anything that is not user code, in the sense of file paths that do not correspond to well-known third-party dependencies. ## Checklist - [x] PR author has checked that all the criteria below are met - The PR description includes an overview of the change - The PR description articulates the motivation for the change - The change includes tests OR the PR description describes a testing strategy - The PR description notes risks associated with the change, if any - Newly-added code is easy to change - The change follows the [library release note guidelines](https://ddtrace.readthedocs.io/en/stable/releasenotes.html) - The change includes or references documentation updates if necessary - Backport labels are set (if [applicable](https://ddtrace.readthedocs.io/en/latest/contributing.html#backporting)) ## Reviewer Checklist - [ ] Reviewer has checked that all the criteria below are met - Title is accurate - All changes are related to the pull request's stated goal - Avoids breaking [API](https://ddtrace.readthedocs.io/en/stable/versioning.html#interfaces) changes - Testing strategy adequately addresses listed risks - Newly-added code is easy to change - Release note makes sense to a user of the library - If necessary, author has acknowledged and discussed the performance implications of this PR as reported in the benchmarks PR comment - Backport labels are set in a manner that is consistent with the [release branch maintenance policy](https://ddtrace.readthedocs.io/en/latest/contributing.html#backporting) --- ddtrace/profiling/exporter/pprof.pyx | 1 + 1 file changed, 1 insertion(+) diff --git a/ddtrace/profiling/exporter/pprof.pyx b/ddtrace/profiling/exporter/pprof.pyx index 121909727f1..9ed4aed5f0f 100644 --- a/ddtrace/profiling/exporter/pprof.pyx +++ b/ddtrace/profiling/exporter/pprof.pyx @@ -455,6 +455,7 @@ class _PprofConverter(object): for _ in ( (packages.filename_to_package(filename), filename) for filename, lineno, funcname in self._locations + if not packages.is_user_code(filename) ) if _[0] is not None }, _ITEMGETTER_ZERO From 264671fd51c50811faec4b82fdc56d83ed06620a Mon Sep 17 00:00:00 2001 From: Christophe Papazian <114495376+christophe-papazian@users.noreply.github.com> Date: Wed, 11 Dec 2024 11:29:43 +0100 Subject: [PATCH 05/78] chore(asm): libddwaf 1.22.0 (#11667) Upgrade libddwaf to 1.22.0 ## Checklist - [x] PR author has checked that all the criteria below are met - The PR description includes an overview of the change - The PR description articulates the motivation for the change - The change includes tests OR the PR description describes a testing strategy - The PR description notes risks associated with the change, if any - Newly-added code is easy to change - The change follows the [library release note guidelines](https://ddtrace.readthedocs.io/en/stable/releasenotes.html) - The change includes or references documentation updates if necessary - Backport labels are set (if [applicable](https://ddtrace.readthedocs.io/en/latest/contributing.html#backporting)) ## Reviewer Checklist - [x] Reviewer has checked that all the criteria below are met - Title is accurate - All changes are related to the pull request's stated goal - Avoids breaking [API](https://ddtrace.readthedocs.io/en/stable/versioning.html#interfaces) changes - Testing strategy adequately addresses listed risks - Newly-added code is easy to change - Release note makes sense to a user of the library - If necessary, author has acknowledged and discussed the performance implications of this PR as reported in the benchmarks PR comment - Backport labels are set in a manner that is consistent with the [release branch maintenance policy](https://ddtrace.readthedocs.io/en/latest/contributing.html#backporting) --- releasenotes/notes/waf_1.22.0-05b1dfbaa0d47059.yaml | 4 ++++ setup.py | 2 +- ...est_processor.test_appsec_body_no_collection_snapshot.json | 2 +- ..._processor.test_appsec_cookies_no_collection_snapshot.json | 2 +- ....appsec.test_processor.test_appsec_span_tags_snapshot.json | 2 +- ..._processor.test_appsec_span_tags_snapshot_with_errors.json | 2 +- ...ango.test_django_appsec_snapshots.test_appsec_enabled.json | 2 +- ...st_django_appsec_snapshots.test_appsec_enabled_attack.json | 2 +- ...jango_appsec_snapshots.test_request_ipblock_match_403.json | 2 +- ..._appsec_snapshots.test_request_ipblock_match_403_json.json | 2 +- ...ngo_appsec_snapshots.test_request_ipblock_nomatch_200.json | 2 +- ..._flask_ipblock_match_403[flask_appsec_good_rules_env].json | 2 +- ...sk_ipblock_match_403[flask_appsec_good_rules_env]_220.json | 2 +- ...k_ipblock_match_403_json[flask_appsec_good_rules_env].json | 2 +- ...block_match_403_json[flask_appsec_good_rules_env]_220.json | 2 +- ...lask_processexec_osspawn[flask_appsec_good_rules_env].json | 2 +- ..._processexec_osspawn[flask_appsec_good_rules_env]_220.json | 2 +- ...ask_processexec_ossystem[flask_appsec_good_rules_env].json | 2 +- ...processexec_ossystem[flask_appsec_good_rules_env]_220.json | 2 +- ...rocesscommunicatenoshell[flask_appsec_good_rules_env].json | 2 +- ...sscommunicatenoshell[flask_appsec_good_rules_env]_220.json | 2 +- ...bprocesscommunicateshell[flask_appsec_good_rules_env].json | 2 +- ...cesscommunicateshell[flask_appsec_good_rules_env]_220.json | 2 +- ...userblock_match_200_json[flask_appsec_good_rules_env].json | 2 +- ...block_match_200_json[flask_appsec_good_rules_env]_220.json | 2 +- ...userblock_match_403_json[flask_appsec_good_rules_env].json | 2 +- ...block_match_403_json[flask_appsec_good_rules_env]_220.json | 2 +- 27 files changed, 30 insertions(+), 26 deletions(-) create mode 100644 releasenotes/notes/waf_1.22.0-05b1dfbaa0d47059.yaml diff --git a/releasenotes/notes/waf_1.22.0-05b1dfbaa0d47059.yaml b/releasenotes/notes/waf_1.22.0-05b1dfbaa0d47059.yaml new file mode 100644 index 00000000000..def80385e74 --- /dev/null +++ b/releasenotes/notes/waf_1.22.0-05b1dfbaa0d47059.yaml @@ -0,0 +1,4 @@ +--- +upgrade: + - | + ASM: This upgrades libddwaf to 1.22.0 diff --git a/setup.py b/setup.py index 06200930c56..13b0cb4a4f0 100644 --- a/setup.py +++ b/setup.py @@ -57,7 +57,7 @@ CURRENT_OS = platform.system() -LIBDDWAF_VERSION = "1.21.0" +LIBDDWAF_VERSION = "1.22.0" # DEV: update this accordingly when src/core upgrades libdatadog dependency. # libdatadog v14.1.0 requires rust 1.76. diff --git a/tests/snapshots/tests.appsec.appsec.test_processor.test_appsec_body_no_collection_snapshot.json b/tests/snapshots/tests.appsec.appsec.test_processor.test_appsec_body_no_collection_snapshot.json index 4f093b82c80..847daf52c50 100644 --- a/tests/snapshots/tests.appsec.appsec.test_processor.test_appsec_body_no_collection_snapshot.json +++ b/tests/snapshots/tests.appsec.appsec.test_processor.test_appsec_body_no_collection_snapshot.json @@ -10,7 +10,7 @@ "meta": { "_dd.appsec.event_rules.version": "1.13.3", "_dd.appsec.json": "{\"triggers\":[\n {\n \"rule\": {\n \"id\": \"nfd-000-006\",\n \"name\": \"Detect failed attempt to fetch sensitive files\",\n \"tags\": {\n \"capec\": \"1000/118/169\",\n \"category\": \"attack_attempt\",\n \"confidence\": \"1\",\n \"cwe\": \"200\",\n \"type\": \"security_scanner\"\n }\n },\n \"rule_matches\": [\n {\n \"operator\": \"match_regex\",\n \"operator_value\": \"^404$\",\n \"parameters\": [\n {\n \"address\": \"server.response.status\",\n \"highlight\": [\n \"404\"\n ],\n \"key_path\": [],\n \"value\": \"404\"\n }\n ]\n },\n {\n \"operator\": \"match_regex\",\n \"operator_value\": \"\\\\.(cgi|bat|dll|exe|key|cert|crt|pem|der|pkcs|pkcs|pkcs[0-9]*|nsf|jsa|war|java|class|vb|vba|so|git|svn|hg|cvs)([^a-zA-Z0-9_]|$)\",\n \"parameters\": [\n {\n \"address\": \"server.request.uri.raw\",\n \"highlight\": [\n \".git\"\n ],\n \"key_path\": [],\n \"value\": \"/.git\"\n }\n ]\n }\n ]\n }\n]}", - "_dd.appsec.waf.version": "1.21.0", + "_dd.appsec.waf.version": "1.22.0", "_dd.origin": "appsec", "_dd.p.appsec": "1", "_dd.p.dm": "-5", diff --git a/tests/snapshots/tests.appsec.appsec.test_processor.test_appsec_cookies_no_collection_snapshot.json b/tests/snapshots/tests.appsec.appsec.test_processor.test_appsec_cookies_no_collection_snapshot.json index cbdf4ac389d..b3f0d82c699 100644 --- a/tests/snapshots/tests.appsec.appsec.test_processor.test_appsec_cookies_no_collection_snapshot.json +++ b/tests/snapshots/tests.appsec.appsec.test_processor.test_appsec_cookies_no_collection_snapshot.json @@ -10,7 +10,7 @@ "meta": { "_dd.appsec.event_rules.version": "1.13.3", "_dd.appsec.json": "{\"triggers\":[\n {\n \"rule\": {\n \"id\": \"nfd-000-006\",\n \"name\": \"Detect failed attempt to fetch sensitive files\",\n \"tags\": {\n \"capec\": \"1000/118/169\",\n \"category\": \"attack_attempt\",\n \"confidence\": \"1\",\n \"cwe\": \"200\",\n \"type\": \"security_scanner\"\n }\n },\n \"rule_matches\": [\n {\n \"operator\": \"match_regex\",\n \"operator_value\": \"^404$\",\n \"parameters\": [\n {\n \"address\": \"server.response.status\",\n \"highlight\": [\n \"404\"\n ],\n \"key_path\": [],\n \"value\": \"404\"\n }\n ]\n },\n {\n \"operator\": \"match_regex\",\n \"operator_value\": \"\\\\.(cgi|bat|dll|exe|key|cert|crt|pem|der|pkcs|pkcs|pkcs[0-9]*|nsf|jsa|war|java|class|vb|vba|so|git|svn|hg|cvs)([^a-zA-Z0-9_]|$)\",\n \"parameters\": [\n {\n \"address\": \"server.request.uri.raw\",\n \"highlight\": [\n \".git\"\n ],\n \"key_path\": [],\n \"value\": \"/.git\"\n }\n ]\n }\n ]\n }\n]}", - "_dd.appsec.waf.version": "1.21.0", + "_dd.appsec.waf.version": "1.22.0", "_dd.origin": "appsec", "_dd.p.appsec": "1", "_dd.p.dm": "-5", diff --git a/tests/snapshots/tests.appsec.appsec.test_processor.test_appsec_span_tags_snapshot.json b/tests/snapshots/tests.appsec.appsec.test_processor.test_appsec_span_tags_snapshot.json index 4908df4eed0..1cab0e7c25e 100644 --- a/tests/snapshots/tests.appsec.appsec.test_processor.test_appsec_span_tags_snapshot.json +++ b/tests/snapshots/tests.appsec.appsec.test_processor.test_appsec_span_tags_snapshot.json @@ -10,7 +10,7 @@ "meta": { "_dd.appsec.event_rules.version": "1.13.3", "_dd.appsec.json": "{\"triggers\":[\n {\n \"rule\": {\n \"id\": \"nfd-000-006\",\n \"name\": \"Detect failed attempt to fetch sensitive files\",\n \"tags\": {\n \"capec\": \"1000/118/169\",\n \"category\": \"attack_attempt\",\n \"confidence\": \"1\",\n \"cwe\": \"200\",\n \"type\": \"security_scanner\"\n }\n },\n \"rule_matches\": [\n {\n \"operator\": \"match_regex\",\n \"operator_value\": \"^404$\",\n \"parameters\": [\n {\n \"address\": \"server.response.status\",\n \"highlight\": [\n \"404\"\n ],\n \"key_path\": [],\n \"value\": \"404\"\n }\n ]\n },\n {\n \"operator\": \"match_regex\",\n \"operator_value\": \"\\\\.(cgi|bat|dll|exe|key|cert|crt|pem|der|pkcs|pkcs|pkcs[0-9]*|nsf|jsa|war|java|class|vb|vba|so|git|svn|hg|cvs)([^a-zA-Z0-9_]|$)\",\n \"parameters\": [\n {\n \"address\": \"server.request.uri.raw\",\n \"highlight\": [\n \".git\"\n ],\n \"key_path\": [],\n \"value\": \"/.git\"\n }\n ]\n }\n ]\n }\n]}", - "_dd.appsec.waf.version": "1.21.0", + "_dd.appsec.waf.version": "1.22.0", "_dd.base_service": "tests.appsec.appsec", "_dd.origin": "appsec", "_dd.p.appsec": "1", diff --git a/tests/snapshots/tests.appsec.appsec.test_processor.test_appsec_span_tags_snapshot_with_errors.json b/tests/snapshots/tests.appsec.appsec.test_processor.test_appsec_span_tags_snapshot_with_errors.json index 60c918bdc2f..28e1dd48dd3 100644 --- a/tests/snapshots/tests.appsec.appsec.test_processor.test_appsec_span_tags_snapshot_with_errors.json +++ b/tests/snapshots/tests.appsec.appsec.test_processor.test_appsec_span_tags_snapshot_with_errors.json @@ -10,7 +10,7 @@ "meta": { "_dd.appsec.event_rules.errors": "{\"missing key 'conditions'\": [\"crs-913-110\"], \"missing key 'tags'\": [\"crs-942-100\"]}", "_dd.appsec.event_rules.version": "5.5.5", - "_dd.appsec.waf.version": "1.21.0", + "_dd.appsec.waf.version": "1.22.0", "_dd.base_service": "tests.appsec.appsec", "_dd.p.dm": "-0", "_dd.runtime_family": "python", diff --git a/tests/snapshots/tests.contrib.django.test_django_appsec_snapshots.test_appsec_enabled.json b/tests/snapshots/tests.contrib.django.test_django_appsec_snapshots.test_appsec_enabled.json index 344f63429a7..cd37846b283 100644 --- a/tests/snapshots/tests.contrib.django.test_django_appsec_snapshots.test_appsec_enabled.json +++ b/tests/snapshots/tests.contrib.django.test_django_appsec_snapshots.test_appsec_enabled.json @@ -10,7 +10,7 @@ "error": 0, "meta": { "_dd.appsec.event_rules.version": "1.13.3", - "_dd.appsec.waf.version": "1.21.0", + "_dd.appsec.waf.version": "1.22.0", "_dd.base_service": "", "_dd.p.dm": "-0", "_dd.p.tid": "654a694400000000", diff --git a/tests/snapshots/tests.contrib.django.test_django_appsec_snapshots.test_appsec_enabled_attack.json b/tests/snapshots/tests.contrib.django.test_django_appsec_snapshots.test_appsec_enabled_attack.json index 02956faa875..70652b2a242 100644 --- a/tests/snapshots/tests.contrib.django.test_django_appsec_snapshots.test_appsec_enabled_attack.json +++ b/tests/snapshots/tests.contrib.django.test_django_appsec_snapshots.test_appsec_enabled_attack.json @@ -11,7 +11,7 @@ "meta": { "_dd.appsec.event_rules.version": "1.13.3", "_dd.appsec.json": "{\"triggers\":[\n {\n \"rule\": {\n \"id\": \"nfd-000-006\",\n \"name\": \"Detect failed attempt to fetch sensitive files\",\n \"tags\": {\n \"capec\": \"1000/118/169\",\n \"category\": \"attack_attempt\",\n \"confidence\": \"1\",\n \"cwe\": \"200\",\n \"type\": \"security_scanner\"\n }\n },\n \"rule_matches\": [\n {\n \"operator\": \"match_regex\",\n \"operator_value\": \"^404$\",\n \"parameters\": [\n {\n \"address\": \"server.response.status\",\n \"highlight\": [\n \"404\"\n ],\n \"key_path\": [],\n \"value\": \"404\"\n }\n ]\n },\n {\n \"operator\": \"match_regex\",\n \"operator_value\": \"\\\\.(cgi|bat|dll|exe|key|cert|crt|pem|der|pkcs|pkcs|pkcs[0-9]*|nsf|jsa|war|java|class|vb|vba|so|git|svn|hg|cvs)([^a-zA-Z0-9_]|$)\",\n \"parameters\": [\n {\n \"address\": \"server.request.uri.raw\",\n \"highlight\": [\n \".git\"\n ],\n \"key_path\": [],\n \"value\": \"/.git\"\n }\n ]\n }\n ]\n }\n]}", - "_dd.appsec.waf.version": "1.21.0", + "_dd.appsec.waf.version": "1.22.0", "_dd.base_service": "", "_dd.origin": "appsec", "_dd.p.appsec": "1", diff --git a/tests/snapshots/tests.contrib.django.test_django_appsec_snapshots.test_request_ipblock_match_403.json b/tests/snapshots/tests.contrib.django.test_django_appsec_snapshots.test_request_ipblock_match_403.json index c5ce0d7abf9..a87fcfe4cac 100644 --- a/tests/snapshots/tests.contrib.django.test_django_appsec_snapshots.test_request_ipblock_match_403.json +++ b/tests/snapshots/tests.contrib.django.test_django_appsec_snapshots.test_request_ipblock_match_403.json @@ -11,7 +11,7 @@ "meta": { "_dd.appsec.event_rules.version": "rules_good", "_dd.appsec.json": "{\"triggers\":[{\"rule\":{\"id\":\"blk-001-001\",\"name\":\"Block IP addresses\",\"on_match\":[\"block\"],\"tags\":{\"category\":\"blocking\",\"type\":\"ip_addresses\"}},\"rule_matches\":[{\"operator\":\"ip_match\",\"operator_value\":\"\",\"parameters\":[{\"address\":\"http.client_ip\",\"key_path\":[],\"value\":\"8.8.4.4\",\"highlight\":[\"8.8.4.4\"]}]}],\"span_id\":10192376353237234254}]}", - "_dd.appsec.waf.version": "1.21.0", + "_dd.appsec.waf.version": "1.22.0", "_dd.base_service": "", "_dd.origin": "appsec", "_dd.p.appsec": "1", diff --git a/tests/snapshots/tests.contrib.django.test_django_appsec_snapshots.test_request_ipblock_match_403_json.json b/tests/snapshots/tests.contrib.django.test_django_appsec_snapshots.test_request_ipblock_match_403_json.json index 467d576ad85..e1a05bff80e 100644 --- a/tests/snapshots/tests.contrib.django.test_django_appsec_snapshots.test_request_ipblock_match_403_json.json +++ b/tests/snapshots/tests.contrib.django.test_django_appsec_snapshots.test_request_ipblock_match_403_json.json @@ -11,7 +11,7 @@ "meta": { "_dd.appsec.event_rules.version": "rules_good", "_dd.appsec.json": "{\"triggers\":[{\"rule\":{\"id\":\"blk-001-001\",\"name\":\"Block IP addresses\",\"on_match\":[\"block\"],\"tags\":{\"category\":\"blocking\",\"type\":\"ip_addresses\"}},\"rule_matches\":[{\"operator\":\"ip_match\",\"operator_value\":\"\",\"parameters\":[{\"address\":\"http.client_ip\",\"key_path\":[],\"value\":\"8.8.4.4\",\"highlight\":[\"8.8.4.4\"]}]}],\"span_id\":865087550764298227}]}", - "_dd.appsec.waf.version": "1.21.0", + "_dd.appsec.waf.version": "1.22.0", "_dd.base_service": "", "_dd.origin": "appsec", "_dd.p.appsec": "1", diff --git a/tests/snapshots/tests.contrib.django.test_django_appsec_snapshots.test_request_ipblock_nomatch_200.json b/tests/snapshots/tests.contrib.django.test_django_appsec_snapshots.test_request_ipblock_nomatch_200.json index 52a99d13b63..15d1b9c3565 100644 --- a/tests/snapshots/tests.contrib.django.test_django_appsec_snapshots.test_request_ipblock_nomatch_200.json +++ b/tests/snapshots/tests.contrib.django.test_django_appsec_snapshots.test_request_ipblock_nomatch_200.json @@ -10,7 +10,7 @@ "error": 0, "meta": { "_dd.appsec.event_rules.version": "rules_good", - "_dd.appsec.waf.version": "1.21.0", + "_dd.appsec.waf.version": "1.22.0", "_dd.base_service": "", "_dd.p.dm": "-0", "_dd.p.tid": "654a694400000000", diff --git a/tests/snapshots/tests.contrib.flask.test_appsec_flask_snapshot.test_flask_ipblock_match_403[flask_appsec_good_rules_env].json b/tests/snapshots/tests.contrib.flask.test_appsec_flask_snapshot.test_flask_ipblock_match_403[flask_appsec_good_rules_env].json index 40295cc8b37..625cad59f0a 100644 --- a/tests/snapshots/tests.contrib.flask.test_appsec_flask_snapshot.test_flask_ipblock_match_403[flask_appsec_good_rules_env].json +++ b/tests/snapshots/tests.contrib.flask.test_appsec_flask_snapshot.test_flask_ipblock_match_403[flask_appsec_good_rules_env].json @@ -11,7 +11,7 @@ "meta": { "_dd.appsec.event_rules.version": "rules_good", "_dd.appsec.json": "{\"triggers\":[\n {\n \"rule\": {\n \"id\": \"blk-001-001\",\n \"name\": \"Block IP addresses\",\n \"on_match\": [\n \"block\"\n ],\n \"tags\": {\n \"category\": \"blocking\",\n \"type\": \"ip_addresses\"\n }\n },\n \"rule_matches\": [\n {\n \"operator\": \"ip_match\",\n \"operator_value\": \"\",\n \"parameters\": [\n {\n \"address\": \"http.client_ip\",\n \"highlight\": [\n \"8.8.4.4\"\n ],\n \"key_path\": [],\n \"value\": \"8.8.4.4\"\n }\n ]\n }\n ]\n }\n]}", - "_dd.appsec.waf.version": "1.21.0", + "_dd.appsec.waf.version": "1.22.0", "_dd.base_service": "", "_dd.origin": "appsec", "_dd.p.appsec": "1", diff --git a/tests/snapshots/tests.contrib.flask.test_appsec_flask_snapshot.test_flask_ipblock_match_403[flask_appsec_good_rules_env]_220.json b/tests/snapshots/tests.contrib.flask.test_appsec_flask_snapshot.test_flask_ipblock_match_403[flask_appsec_good_rules_env]_220.json index 32d8e8e4dde..c8e20851d2d 100644 --- a/tests/snapshots/tests.contrib.flask.test_appsec_flask_snapshot.test_flask_ipblock_match_403[flask_appsec_good_rules_env]_220.json +++ b/tests/snapshots/tests.contrib.flask.test_appsec_flask_snapshot.test_flask_ipblock_match_403[flask_appsec_good_rules_env]_220.json @@ -11,7 +11,7 @@ "meta": { "_dd.appsec.event_rules.version": "rules_good", "_dd.appsec.json": "{\"triggers\":[\n {\n \"rule\": {\n \"id\": \"blk-001-001\",\n \"name\": \"Block IP addresses\",\n \"on_match\": [\n \"block\"\n ],\n \"tags\": {\n \"category\": \"blocking\",\n \"type\": \"ip_addresses\"\n }\n },\n \"rule_matches\": [\n {\n \"operator\": \"ip_match\",\n \"operator_value\": \"\",\n \"parameters\": [\n {\n \"address\": \"http.client_ip\",\n \"highlight\": [\n \"8.8.4.4\"\n ],\n \"key_path\": [],\n \"value\": \"8.8.4.4\"\n }\n ]\n }\n ]\n }\n]}", - "_dd.appsec.waf.version": "1.21.0", + "_dd.appsec.waf.version": "1.22.0", "_dd.base_service": "", "_dd.origin": "appsec", "_dd.p.appsec": "1", diff --git a/tests/snapshots/tests.contrib.flask.test_appsec_flask_snapshot.test_flask_ipblock_match_403_json[flask_appsec_good_rules_env].json b/tests/snapshots/tests.contrib.flask.test_appsec_flask_snapshot.test_flask_ipblock_match_403_json[flask_appsec_good_rules_env].json index a8d620726eb..aec0ebaad30 100644 --- a/tests/snapshots/tests.contrib.flask.test_appsec_flask_snapshot.test_flask_ipblock_match_403_json[flask_appsec_good_rules_env].json +++ b/tests/snapshots/tests.contrib.flask.test_appsec_flask_snapshot.test_flask_ipblock_match_403_json[flask_appsec_good_rules_env].json @@ -11,7 +11,7 @@ "meta": { "_dd.appsec.event_rules.version": "rules_good", "_dd.appsec.json": "{\"triggers\":[\n {\n \"rule\": {\n \"id\": \"blk-001-001\",\n \"name\": \"Block IP addresses\",\n \"on_match\": [\n \"block\"\n ],\n \"tags\": {\n \"category\": \"blocking\",\n \"type\": \"ip_addresses\"\n }\n },\n \"rule_matches\": [\n {\n \"operator\": \"ip_match\",\n \"operator_value\": \"\",\n \"parameters\": [\n {\n \"address\": \"http.client_ip\",\n \"highlight\": [\n \"8.8.4.4\"\n ],\n \"key_path\": [],\n \"value\": \"8.8.4.4\"\n }\n ]\n }\n ]\n }\n]}", - "_dd.appsec.waf.version": "1.21.0", + "_dd.appsec.waf.version": "1.22.0", "_dd.base_service": "", "_dd.origin": "appsec", "_dd.p.appsec": "1", diff --git a/tests/snapshots/tests.contrib.flask.test_appsec_flask_snapshot.test_flask_ipblock_match_403_json[flask_appsec_good_rules_env]_220.json b/tests/snapshots/tests.contrib.flask.test_appsec_flask_snapshot.test_flask_ipblock_match_403_json[flask_appsec_good_rules_env]_220.json index 9e98ab1fa2d..ce549766664 100644 --- a/tests/snapshots/tests.contrib.flask.test_appsec_flask_snapshot.test_flask_ipblock_match_403_json[flask_appsec_good_rules_env]_220.json +++ b/tests/snapshots/tests.contrib.flask.test_appsec_flask_snapshot.test_flask_ipblock_match_403_json[flask_appsec_good_rules_env]_220.json @@ -11,7 +11,7 @@ "meta": { "_dd.appsec.event_rules.version": "rules_good", "_dd.appsec.json": "{\"triggers\":[\n {\n \"rule\": {\n \"id\": \"blk-001-001\",\n \"name\": \"Block IP addresses\",\n \"on_match\": [\n \"block\"\n ],\n \"tags\": {\n \"category\": \"blocking\",\n \"type\": \"ip_addresses\"\n }\n },\n \"rule_matches\": [\n {\n \"operator\": \"ip_match\",\n \"operator_value\": \"\",\n \"parameters\": [\n {\n \"address\": \"http.client_ip\",\n \"highlight\": [\n \"8.8.4.4\"\n ],\n \"key_path\": [],\n \"value\": \"8.8.4.4\"\n }\n ]\n }\n ]\n }\n]}", - "_dd.appsec.waf.version": "1.21.0", + "_dd.appsec.waf.version": "1.22.0", "_dd.base_service": "", "_dd.origin": "appsec", "_dd.p.appsec": "1", diff --git a/tests/snapshots/tests.contrib.flask.test_appsec_flask_snapshot.test_flask_processexec_osspawn[flask_appsec_good_rules_env].json b/tests/snapshots/tests.contrib.flask.test_appsec_flask_snapshot.test_flask_processexec_osspawn[flask_appsec_good_rules_env].json index d9bf74ab521..dbd705248e8 100644 --- a/tests/snapshots/tests.contrib.flask.test_appsec_flask_snapshot.test_flask_processexec_osspawn[flask_appsec_good_rules_env].json +++ b/tests/snapshots/tests.contrib.flask.test_appsec_flask_snapshot.test_flask_processexec_osspawn[flask_appsec_good_rules_env].json @@ -10,7 +10,7 @@ "error": 0, "meta": { "_dd.appsec.event_rules.version": "rules_good", - "_dd.appsec.waf.version": "1.21.0", + "_dd.appsec.waf.version": "1.22.0", "_dd.base_service": "", "_dd.p.dm": "-0", "_dd.p.tid": "654a694400000000", diff --git a/tests/snapshots/tests.contrib.flask.test_appsec_flask_snapshot.test_flask_processexec_osspawn[flask_appsec_good_rules_env]_220.json b/tests/snapshots/tests.contrib.flask.test_appsec_flask_snapshot.test_flask_processexec_osspawn[flask_appsec_good_rules_env]_220.json index 4e3c507fb24..7503a153364 100644 --- a/tests/snapshots/tests.contrib.flask.test_appsec_flask_snapshot.test_flask_processexec_osspawn[flask_appsec_good_rules_env]_220.json +++ b/tests/snapshots/tests.contrib.flask.test_appsec_flask_snapshot.test_flask_processexec_osspawn[flask_appsec_good_rules_env]_220.json @@ -10,7 +10,7 @@ "error": 0, "meta": { "_dd.appsec.event_rules.version": "rules_good", - "_dd.appsec.waf.version": "1.21.0", + "_dd.appsec.waf.version": "1.22.0", "_dd.base_service": "", "_dd.p.dm": "-0", "_dd.p.tid": "654a694400000000", diff --git a/tests/snapshots/tests.contrib.flask.test_appsec_flask_snapshot.test_flask_processexec_ossystem[flask_appsec_good_rules_env].json b/tests/snapshots/tests.contrib.flask.test_appsec_flask_snapshot.test_flask_processexec_ossystem[flask_appsec_good_rules_env].json index a53a26279ee..168dd987711 100644 --- a/tests/snapshots/tests.contrib.flask.test_appsec_flask_snapshot.test_flask_processexec_ossystem[flask_appsec_good_rules_env].json +++ b/tests/snapshots/tests.contrib.flask.test_appsec_flask_snapshot.test_flask_processexec_ossystem[flask_appsec_good_rules_env].json @@ -10,7 +10,7 @@ "error": 0, "meta": { "_dd.appsec.event_rules.version": "rules_good", - "_dd.appsec.waf.version": "1.21.0", + "_dd.appsec.waf.version": "1.22.0", "_dd.base_service": "", "_dd.p.dm": "-0", "_dd.p.tid": "654a694400000000", diff --git a/tests/snapshots/tests.contrib.flask.test_appsec_flask_snapshot.test_flask_processexec_ossystem[flask_appsec_good_rules_env]_220.json b/tests/snapshots/tests.contrib.flask.test_appsec_flask_snapshot.test_flask_processexec_ossystem[flask_appsec_good_rules_env]_220.json index 89a294b6e8d..73c4525efbf 100644 --- a/tests/snapshots/tests.contrib.flask.test_appsec_flask_snapshot.test_flask_processexec_ossystem[flask_appsec_good_rules_env]_220.json +++ b/tests/snapshots/tests.contrib.flask.test_appsec_flask_snapshot.test_flask_processexec_ossystem[flask_appsec_good_rules_env]_220.json @@ -10,7 +10,7 @@ "error": 0, "meta": { "_dd.appsec.event_rules.version": "rules_good", - "_dd.appsec.waf.version": "1.21.0", + "_dd.appsec.waf.version": "1.22.0", "_dd.base_service": "", "_dd.p.dm": "-0", "_dd.p.tid": "654a694400000000", diff --git a/tests/snapshots/tests.contrib.flask.test_appsec_flask_snapshot.test_flask_processexec_subprocesscommunicatenoshell[flask_appsec_good_rules_env].json b/tests/snapshots/tests.contrib.flask.test_appsec_flask_snapshot.test_flask_processexec_subprocesscommunicatenoshell[flask_appsec_good_rules_env].json index 5265382cf59..83b9e0bae3e 100644 --- a/tests/snapshots/tests.contrib.flask.test_appsec_flask_snapshot.test_flask_processexec_subprocesscommunicatenoshell[flask_appsec_good_rules_env].json +++ b/tests/snapshots/tests.contrib.flask.test_appsec_flask_snapshot.test_flask_processexec_subprocesscommunicatenoshell[flask_appsec_good_rules_env].json @@ -10,7 +10,7 @@ "error": 0, "meta": { "_dd.appsec.event_rules.version": "rules_good", - "_dd.appsec.waf.version": "1.21.0", + "_dd.appsec.waf.version": "1.22.0", "_dd.base_service": "", "_dd.p.dm": "-0", "_dd.p.tid": "654a694400000000", diff --git a/tests/snapshots/tests.contrib.flask.test_appsec_flask_snapshot.test_flask_processexec_subprocesscommunicatenoshell[flask_appsec_good_rules_env]_220.json b/tests/snapshots/tests.contrib.flask.test_appsec_flask_snapshot.test_flask_processexec_subprocesscommunicatenoshell[flask_appsec_good_rules_env]_220.json index ae473681205..4fd585c203c 100644 --- a/tests/snapshots/tests.contrib.flask.test_appsec_flask_snapshot.test_flask_processexec_subprocesscommunicatenoshell[flask_appsec_good_rules_env]_220.json +++ b/tests/snapshots/tests.contrib.flask.test_appsec_flask_snapshot.test_flask_processexec_subprocesscommunicatenoshell[flask_appsec_good_rules_env]_220.json @@ -10,7 +10,7 @@ "error": 0, "meta": { "_dd.appsec.event_rules.version": "rules_good", - "_dd.appsec.waf.version": "1.21.0", + "_dd.appsec.waf.version": "1.22.0", "_dd.base_service": "", "_dd.p.dm": "-0", "_dd.p.tid": "654a694400000000", diff --git a/tests/snapshots/tests.contrib.flask.test_appsec_flask_snapshot.test_flask_processexec_subprocesscommunicateshell[flask_appsec_good_rules_env].json b/tests/snapshots/tests.contrib.flask.test_appsec_flask_snapshot.test_flask_processexec_subprocesscommunicateshell[flask_appsec_good_rules_env].json index 2eb36d25a38..20b135b759b 100644 --- a/tests/snapshots/tests.contrib.flask.test_appsec_flask_snapshot.test_flask_processexec_subprocesscommunicateshell[flask_appsec_good_rules_env].json +++ b/tests/snapshots/tests.contrib.flask.test_appsec_flask_snapshot.test_flask_processexec_subprocesscommunicateshell[flask_appsec_good_rules_env].json @@ -10,7 +10,7 @@ "error": 0, "meta": { "_dd.appsec.event_rules.version": "rules_good", - "_dd.appsec.waf.version": "1.21.0", + "_dd.appsec.waf.version": "1.22.0", "_dd.base_service": "", "_dd.p.dm": "-0", "_dd.p.tid": "654a694400000000", diff --git a/tests/snapshots/tests.contrib.flask.test_appsec_flask_snapshot.test_flask_processexec_subprocesscommunicateshell[flask_appsec_good_rules_env]_220.json b/tests/snapshots/tests.contrib.flask.test_appsec_flask_snapshot.test_flask_processexec_subprocesscommunicateshell[flask_appsec_good_rules_env]_220.json index 800d73259aa..5fec712f209 100644 --- a/tests/snapshots/tests.contrib.flask.test_appsec_flask_snapshot.test_flask_processexec_subprocesscommunicateshell[flask_appsec_good_rules_env]_220.json +++ b/tests/snapshots/tests.contrib.flask.test_appsec_flask_snapshot.test_flask_processexec_subprocesscommunicateshell[flask_appsec_good_rules_env]_220.json @@ -10,7 +10,7 @@ "error": 0, "meta": { "_dd.appsec.event_rules.version": "rules_good", - "_dd.appsec.waf.version": "1.21.0", + "_dd.appsec.waf.version": "1.22.0", "_dd.base_service": "", "_dd.p.dm": "-0", "_dd.p.tid": "654a694400000000", diff --git a/tests/snapshots/tests.contrib.flask.test_appsec_flask_snapshot.test_flask_userblock_match_200_json[flask_appsec_good_rules_env].json b/tests/snapshots/tests.contrib.flask.test_appsec_flask_snapshot.test_flask_userblock_match_200_json[flask_appsec_good_rules_env].json index d29fdcda126..96245135611 100644 --- a/tests/snapshots/tests.contrib.flask.test_appsec_flask_snapshot.test_flask_userblock_match_200_json[flask_appsec_good_rules_env].json +++ b/tests/snapshots/tests.contrib.flask.test_appsec_flask_snapshot.test_flask_userblock_match_200_json[flask_appsec_good_rules_env].json @@ -10,7 +10,7 @@ "error": 0, "meta": { "_dd.appsec.event_rules.version": "rules_good", - "_dd.appsec.waf.version": "1.21.0", + "_dd.appsec.waf.version": "1.22.0", "_dd.base_service": "", "_dd.p.dm": "-0", "_dd.p.tid": "654a694400000000", diff --git a/tests/snapshots/tests.contrib.flask.test_appsec_flask_snapshot.test_flask_userblock_match_200_json[flask_appsec_good_rules_env]_220.json b/tests/snapshots/tests.contrib.flask.test_appsec_flask_snapshot.test_flask_userblock_match_200_json[flask_appsec_good_rules_env]_220.json index e099df45417..3166f83bff1 100644 --- a/tests/snapshots/tests.contrib.flask.test_appsec_flask_snapshot.test_flask_userblock_match_200_json[flask_appsec_good_rules_env]_220.json +++ b/tests/snapshots/tests.contrib.flask.test_appsec_flask_snapshot.test_flask_userblock_match_200_json[flask_appsec_good_rules_env]_220.json @@ -10,7 +10,7 @@ "error": 0, "meta": { "_dd.appsec.event_rules.version": "rules_good", - "_dd.appsec.waf.version": "1.21.0", + "_dd.appsec.waf.version": "1.22.0", "_dd.base_service": "", "_dd.p.dm": "-0", "_dd.p.tid": "654a694400000000", diff --git a/tests/snapshots/tests.contrib.flask.test_appsec_flask_snapshot.test_flask_userblock_match_403_json[flask_appsec_good_rules_env].json b/tests/snapshots/tests.contrib.flask.test_appsec_flask_snapshot.test_flask_userblock_match_403_json[flask_appsec_good_rules_env].json index 5a0c8e301a5..3e25ed6da8e 100644 --- a/tests/snapshots/tests.contrib.flask.test_appsec_flask_snapshot.test_flask_userblock_match_403_json[flask_appsec_good_rules_env].json +++ b/tests/snapshots/tests.contrib.flask.test_appsec_flask_snapshot.test_flask_userblock_match_403_json[flask_appsec_good_rules_env].json @@ -11,7 +11,7 @@ "meta": { "_dd.appsec.event_rules.version": "rules_good", "_dd.appsec.json": "{\"triggers\":[\n {\n \"rule\": {\n \"id\": \"blk-001-002\",\n \"name\": \"Block User Addresses\",\n \"on_match\": [\n \"block\"\n ],\n \"tags\": {\n \"category\": \"security_response\",\n \"type\": \"block_user\"\n }\n },\n \"rule_matches\": [\n {\n \"operator\": \"exact_match\",\n \"operator_value\": \"\",\n \"parameters\": [\n {\n \"address\": \"usr.id\",\n \"highlight\": [\n \"123456\"\n ],\n \"key_path\": [],\n \"value\": \"123456\"\n }\n ]\n }\n ]\n }\n]}", - "_dd.appsec.waf.version": "1.21.0", + "_dd.appsec.waf.version": "1.22.0", "_dd.base_service": "", "_dd.origin": "appsec", "_dd.p.appsec": "1", diff --git a/tests/snapshots/tests.contrib.flask.test_appsec_flask_snapshot.test_flask_userblock_match_403_json[flask_appsec_good_rules_env]_220.json b/tests/snapshots/tests.contrib.flask.test_appsec_flask_snapshot.test_flask_userblock_match_403_json[flask_appsec_good_rules_env]_220.json index 806e0de6295..41661a28f5a 100644 --- a/tests/snapshots/tests.contrib.flask.test_appsec_flask_snapshot.test_flask_userblock_match_403_json[flask_appsec_good_rules_env]_220.json +++ b/tests/snapshots/tests.contrib.flask.test_appsec_flask_snapshot.test_flask_userblock_match_403_json[flask_appsec_good_rules_env]_220.json @@ -11,7 +11,7 @@ "meta": { "_dd.appsec.event_rules.version": "rules_good", "_dd.appsec.json": "{\"triggers\":[\n {\n \"rule\": {\n \"id\": \"blk-001-002\",\n \"name\": \"Block User Addresses\",\n \"on_match\": [\n \"block\"\n ],\n \"tags\": {\n \"category\": \"security_response\",\n \"type\": \"block_user\"\n }\n },\n \"rule_matches\": [\n {\n \"operator\": \"exact_match\",\n \"operator_value\": \"\",\n \"parameters\": [\n {\n \"address\": \"usr.id\",\n \"highlight\": [\n \"123456\"\n ],\n \"key_path\": [],\n \"value\": \"123456\"\n }\n ]\n }\n ]\n }\n]}", - "_dd.appsec.waf.version": "1.21.0", + "_dd.appsec.waf.version": "1.22.0", "_dd.base_service": "", "_dd.origin": "appsec", "_dd.p.appsec": "1", From 2ff8083cabcde3961188d2e3dd406287c6eb0cf2 Mon Sep 17 00:00:00 2001 From: Christophe Papazian <114495376+christophe-papazian@users.noreply.github.com> Date: Wed, 11 Dec 2024 11:35:00 +0100 Subject: [PATCH 06/78] refactor(asm): update package resolution (#11650) - package resolution was not working optimally if only a sub module of a package was loaded. This PR improves that by trying to explore the parent module if the package is not found. - remove dead code using import hook as it was never loaded. Simplify code base. APPSEC-56104 ## Checklist - [x] PR author has checked that all the criteria below are met - The PR description includes an overview of the change - The PR description articulates the motivation for the change - The change includes tests OR the PR description describes a testing strategy - The PR description notes risks associated with the change, if any - Newly-added code is easy to change - The change follows the [library release note guidelines](https://ddtrace.readthedocs.io/en/stable/releasenotes.html) - The change includes or references documentation updates if necessary - Backport labels are set (if [applicable](https://ddtrace.readthedocs.io/en/latest/contributing.html#backporting)) ## Reviewer Checklist - [x] Reviewer has checked that all the criteria below are met - Title is accurate - All changes are related to the pull request's stated goal - Avoids breaking [API](https://ddtrace.readthedocs.io/en/stable/versioning.html#interfaces) changes - Testing strategy adequately addresses listed risks - Newly-added code is easy to change - Release note makes sense to a user of the library - If necessary, author has acknowledged and discussed the performance implications of this PR as reported in the benchmarks PR comment - Backport labels are set in a manner that is consistent with the [release branch maintenance policy](https://ddtrace.readthedocs.io/en/latest/contributing.html#backporting) --- ddtrace/internal/packages.py | 40 ++++++++---- ddtrace/internal/telemetry/data.py | 14 ++-- ddtrace/internal/telemetry/modules.py | 93 ++------------------------- ddtrace/internal/telemetry/writer.py | 4 +- 4 files changed, 41 insertions(+), 110 deletions(-) diff --git a/ddtrace/internal/packages.py b/ddtrace/internal/packages.py index 8b369b9709c..ab4023d93dd 100644 --- a/ddtrace/internal/packages.py +++ b/ddtrace/internal/packages.py @@ -59,26 +59,38 @@ def get_package_distributions() -> t.Mapping[str, t.List[str]]: return _packages_distributions() -@cached(maxsize=256) -def get_module_distribution_versions(module_name: str) -> t.Dict[str, str]: +@cached(maxsize=1024) +def get_module_distribution_versions(module_name: str) -> t.Optional[t.Tuple[str, str]]: + if not module_name: + return None try: import importlib.metadata as importlib_metadata except ImportError: import importlib_metadata # type: ignore[no-redef] - try: - return { - module_name: importlib_metadata.distribution(module_name).version, - } - except importlib_metadata.PackageNotFoundError: - pass - + names: t.List[str] = [] pkgs = get_package_distributions() - names = pkgs.get(module_name) - if not names: - return {} - - return {name: get_version_for_package(name) for name in names} + while names == []: + try: + return ( + module_name, + importlib_metadata.distribution(module_name).version, + ) + except Exception: # nosec + pass + names = pkgs.get(module_name, []) + if not names: + # try to resolve the parent package + p = module_name.rfind(".") + if p > 0: + module_name = module_name[:p] + else: + break + if len(names) != 1: + # either it was not resolved due to multiple packages with the same name + # or it's a multipurpose package (like '__pycache__') + return None + return (names[0], get_version_for_package(names[0])) @cached(maxsize=256) diff --git a/ddtrace/internal/telemetry/data.py b/ddtrace/internal/telemetry/data.py index 3b73ac8b97d..a11e7f4db36 100644 --- a/ddtrace/internal/telemetry/data.py +++ b/ddtrace/internal/telemetry/data.py @@ -77,15 +77,15 @@ def update_imported_dependencies(already_imported: Dict[str, str], new_modules: if not dists: continue - for name, version in dists.items(): - if name == "ddtrace": - continue + name, version = dists + if name == "ddtrace": + continue - if name in already_imported: - continue + if name in already_imported: + continue - already_imported[name] = version - deps.append({"name": name, "version": version}) + already_imported[name] = version + deps.append({"name": name, "version": version}) return deps diff --git a/ddtrace/internal/telemetry/modules.py b/ddtrace/internal/telemetry/modules.py index 3b916fb1282..555e0b70d7e 100644 --- a/ddtrace/internal/telemetry/modules.py +++ b/ddtrace/internal/telemetry/modules.py @@ -1,92 +1,13 @@ import sys -from types import ModuleType -from typing import Any from typing import Set -from typing import Tuple -from ..compat import PYTHON_VERSION_INFO -from ..module import BaseModuleWatchdog +ALL_MODULES: Set[str] = set() # All modules that have been already imported -NEW_MODULES: Set[str] = set() # New modules that have been imported since the last check -ALL_MODULES: Set[str] = set() # All modules that have been imported -MODULE_HOOK_INSTALLED = False -# For Python >= 3.8 we can use the sys.audit event import(module, filename, sys.path, sys.meta_path, sys.path_hooks) -if PYTHON_VERSION_INFO >= (3, 8): - - def audit_hook(event: str, args: Tuple[Any, ...]): - if event != "import": - return - - global NEW_MODULES, ALL_MODULES - NEW_MODULES.add(args[0]) - ALL_MODULES.add(args[0]) - - def get_newly_imported_modules() -> Set[str]: - global MODULE_HOOK_INSTALLED, NEW_MODULES, ALL_MODULES - - # Our hook is not installed, so we are not getting notified of new imports, - # we need to track the changes manually - if not NEW_MODULES and not MODULE_HOOK_INSTALLED: - latest_modules = set(sys.modules.keys()) - NEW_MODULES = latest_modules - ALL_MODULES - ALL_MODULES = latest_modules - - new_modules = NEW_MODULES - NEW_MODULES = set() - return new_modules - - def install_import_hook(): - global MODULE_HOOK_INSTALLED, NEW_MODULES, ALL_MODULES - - # If we have not called get_newly_imported_modules yet, we can initialize to all imported modules - if not NEW_MODULES: - NEW_MODULES = set(sys.modules.keys()) - ALL_MODULES = NEW_MODULES.copy() - sys.addaudithook(audit_hook) - MODULE_HOOK_INSTALLED = True - - def uninstall_import_hook(): - # We cannot uninstall a sys audit hook - pass - -else: - - class TelemetryWriterModuleWatchdog(BaseModuleWatchdog): - _initial = True - _new_imported: Set[str] = set() - - def after_import(self, module: ModuleType) -> None: - self._new_imported.add(module.__name__) - - @classmethod - def get_new_imports(cls): - if cls._initial: - try: - # On the first call, use sys.modules to cover all imports before we started. This is not - # done on __init__ because we want to do this slow operation on the writer's periodic call - # and not on instantiation. - new_imports = list(sys.modules.keys()) - except RuntimeError: - new_imports = [] - finally: - # If there is any problem with the above we don't want to repeat this slow process, instead we just - # switch to report new dependencies on further calls - cls._initial = False - else: - new_imports = list(cls._new_imported) - - cls._new_imported.clear() - return new_imports - - def get_newly_imported_modules() -> Set[str]: - return set(TelemetryWriterModuleWatchdog.get_new_imports()) - - def install_import_hook(): - if not TelemetryWriterModuleWatchdog.is_installed(): - TelemetryWriterModuleWatchdog.install() - - def uninstall_import_hook(): - if TelemetryWriterModuleWatchdog.is_installed(): - TelemetryWriterModuleWatchdog.uninstall() +def get_newly_imported_modules() -> Set[str]: + global ALL_MODULES + latest_modules = set(sys.modules.keys()) + new_modules = latest_modules - ALL_MODULES + ALL_MODULES = latest_modules + return new_modules diff --git a/ddtrace/internal/telemetry/writer.py b/ddtrace/internal/telemetry/writer.py index 899c77c1108..d10d0aac7f4 100644 --- a/ddtrace/internal/telemetry/writer.py +++ b/ddtrace/internal/telemetry/writer.py @@ -236,9 +236,8 @@ def enable(self): self.start() return True + # currently self._is_periodic is always true self.status = ServiceStatus.RUNNING - if _TelemetryConfig.DEPENDENCY_COLLECTION: - modules.install_import_hook() return True def disable(self): @@ -248,7 +247,6 @@ def disable(self): Once disabled, telemetry collection can not be re-enabled. """ self._enabled = False - modules.uninstall_import_hook() self.reset_queues() if self._is_running(): self.stop() From 3b076e624c12b76f304e6658e57c5c8b10a2082b Mon Sep 17 00:00:00 2001 From: "Gabriele N. Tornetta" Date: Wed, 11 Dec 2024 11:58:41 +0000 Subject: [PATCH 07/78] chore: clarify wrapping context behaviour (#11576) We clarify the comments around the behaviour of wrapping context. ## Checklist - [x] PR author has checked that all the criteria below are met - The PR description includes an overview of the change - The PR description articulates the motivation for the change - The change includes tests OR the PR description describes a testing strategy - The PR description notes risks associated with the change, if any - Newly-added code is easy to change - The change follows the [library release note guidelines](https://ddtrace.readthedocs.io/en/stable/releasenotes.html) - The change includes or references documentation updates if necessary - Backport labels are set (if [applicable](https://ddtrace.readthedocs.io/en/latest/contributing.html#backporting)) ## Reviewer Checklist - [x] Reviewer has checked that all the criteria below are met - Title is accurate - All changes are related to the pull request's stated goal - Avoids breaking [API](https://ddtrace.readthedocs.io/en/stable/versioning.html#interfaces) changes - Testing strategy adequately addresses listed risks - Newly-added code is easy to change - Release note makes sense to a user of the library - If necessary, author has acknowledged and discussed the performance implications of this PR as reported in the benchmarks PR comment - Backport labels are set in a manner that is consistent with the [release branch maintenance policy](https://ddtrace.readthedocs.io/en/latest/contributing.html#backporting) --- ddtrace/internal/wrapping/context.py | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/ddtrace/internal/wrapping/context.py b/ddtrace/internal/wrapping/context.py index 24fd91d483e..138f542720e 100644 --- a/ddtrace/internal/wrapping/context.py +++ b/ddtrace/internal/wrapping/context.py @@ -1,3 +1,4 @@ +from abc import ABC from contextvars import ContextVar from inspect import iscoroutinefunction import sys @@ -30,8 +31,9 @@ # # Because we also want to capture the return value, our context manager extends # the Python one by implementing a __return__ method that will be called with -# the return value of the function. The __exit__ method is only called if the -# function raises an exception. +# the return value of the function. Contrary to ordinary context managers, +# though, the __exit__ method is only called if the function raises an +# exception. # # Because CPython 3.11 introduced zero-cost exceptions, we cannot nest try # blocks in the function's bytecode. In this case, we call the context manager @@ -256,7 +258,7 @@ # This is abstract and should not be used directly -class BaseWrappingContext(t.ContextManager): +class BaseWrappingContext(ABC): __priority__: int = 0 def __init__(self, f: FunctionType): From c801f288449069383b768f0ea07bb5ab8eb5c052 Mon Sep 17 00:00:00 2001 From: Taegyun Kim Date: Wed, 11 Dec 2024 09:22:06 -0500 Subject: [PATCH 08/78] chore(profiling): revert simplify usage of cancellation token (#11654) Reverts DataDog/dd-trace-py#11644 and fix tsan errors ## Checklist - [X] PR author has checked that all the criteria below are met - The PR description includes an overview of the change - The PR description articulates the motivation for the change - The change includes tests OR the PR description describes a testing strategy - The PR description notes risks associated with the change, if any - Newly-added code is easy to change - The change follows the [library release note guidelines](https://ddtrace.readthedocs.io/en/stable/releasenotes.html) - The change includes or references documentation updates if necessary - Backport labels are set (if [applicable](https://ddtrace.readthedocs.io/en/latest/contributing.html#backporting)) ## Reviewer Checklist - [x] Reviewer has checked that all the criteria below are met - Title is accurate - All changes are related to the pull request's stated goal - Avoids breaking [API](https://ddtrace.readthedocs.io/en/stable/versioning.html#interfaces) changes - Testing strategy adequately addresses listed risks - Newly-added code is easy to change - Release note makes sense to a user of the library - If necessary, author has acknowledged and discussed the performance implications of this PR as reported in the benchmarks PR comment - Backport labels are set in a manner that is consistent with the [release branch maintenance policy](https://ddtrace.readthedocs.io/en/latest/contributing.html#backporting) --------- Co-authored-by: Charles de Beauchesne --- .github/workflows/system-tests.yml | 8 +++++++ .../profiling/dd_wrapper/include/uploader.hpp | 4 +--- .../profiling/dd_wrapper/src/uploader.cpp | 22 +++++++++---------- .../profiling_v2/collector/test_threading.py | 2 +- 4 files changed, 21 insertions(+), 15 deletions(-) diff --git a/.github/workflows/system-tests.yml b/.github/workflows/system-tests.yml index 697b0f77c48..06604dc811c 100644 --- a/.github/workflows/system-tests.yml +++ b/.github/workflows/system-tests.yml @@ -153,6 +153,14 @@ jobs: if: always() && steps.docker_load.outcome == 'success' && matrix.scenario == 'other' run: ./run.sh CROSSED_TRACING_LIBRARIES + - name: Run PROFILING + if: always() && steps.docker_load.outcome == 'success' && matrix.scenario == 'other' + run: | + cat /proc/sys/kernel/perf_event_paranoid + sudo sysctl kernel.perf_event_paranoid=1 + sudo sysctl -p + ./run.sh PROFILING + - name: Run REMOTE_CONFIG_MOCKED_BACKEND_ASM_FEATURES if: always() && steps.docker_load.outcome == 'success' && matrix.scenario == 'remote-config' run: ./run.sh REMOTE_CONFIG_MOCKED_BACKEND_ASM_FEATURES diff --git a/ddtrace/internal/datadog/profiling/dd_wrapper/include/uploader.hpp b/ddtrace/internal/datadog/profiling/dd_wrapper/include/uploader.hpp index 8a5394b0cb2..ed19f316fc3 100644 --- a/ddtrace/internal/datadog/profiling/dd_wrapper/include/uploader.hpp +++ b/ddtrace/internal/datadog/profiling/dd_wrapper/include/uploader.hpp @@ -24,9 +24,7 @@ class Uploader private: static inline std::mutex upload_lock{}; std::string errmsg; - static inline std::unique_ptr cancel{ - ddog_CancellationToken_new() - }; + static inline std::unique_ptr cancel; static inline std::atomic upload_seq{ 0 }; std::string output_filename; std::unique_ptr ddog_exporter; diff --git a/ddtrace/internal/datadog/profiling/dd_wrapper/src/uploader.cpp b/ddtrace/internal/datadog/profiling/dd_wrapper/src/uploader.cpp index 1e04a45fb41..375c2e09e9e 100644 --- a/ddtrace/internal/datadog/profiling/dd_wrapper/src/uploader.cpp +++ b/ddtrace/internal/datadog/profiling/dd_wrapper/src/uploader.cpp @@ -109,20 +109,20 @@ Datadog::Uploader::upload(ddog_prof_Profile& profile) return false; } - // If we're here, we're about to create a new upload, so cancel any inflight ones - cancel_inflight(); - - // Create a new cancellation token. Maybe we can get away without doing this, but - // since we're recreating the uploader fresh every time anyway, we recreate one more thing. - // NB wrapping this in a unique_ptr to easily add RAII semantics; maybe should just wrap it in a - // class instead. - std::unique_ptr cancel_for_request( - ddog_CancellationToken_clone(cancel.get())); - // The upload operation sets up some global state in libdatadog (the tokio runtime), so // we ensure exclusivity here. { + // If we're here, we're about to create a new upload, so cancel any inflight ones const std::lock_guard lock_guard(upload_lock); + cancel_inflight(); + + // Create a new cancellation token. Maybe we can get away without doing this, but + // since we're recreating the uploader fresh every time anyway, we recreate one more thing. + // NB wrapping this in a unique_ptr to easily add RAII semantics; maybe should just wrap it in a + // class instead + cancel.reset(ddog_CancellationToken_new()); + std::unique_ptr cancel_for_request; + cancel_for_request.reset(ddog_CancellationToken_clone(cancel.get())); // Build and check the response object ddog_prof_Exporter_Request* req = build_res.ok; // NOLINT (cppcoreguidelines-pro-type-union-access) @@ -156,7 +156,7 @@ Datadog::Uploader::unlock() void Datadog::Uploader::cancel_inflight() { - ddog_CancellationToken_cancel(cancel.get()); + cancel.reset(); } void diff --git a/tests/profiling_v2/collector/test_threading.py b/tests/profiling_v2/collector/test_threading.py index 5ca09dd8da5..abed7d0cfda 100644 --- a/tests/profiling_v2/collector/test_threading.py +++ b/tests/profiling_v2/collector/test_threading.py @@ -83,7 +83,7 @@ def test_patch(): @pytest.mark.skipif(not sys.platform.startswith("linux"), reason="only works on linux") -@pytest.mark.subprocess(err=None) +@pytest.mark.subprocess() def test_user_threads_have_native_id(): from os import getpid from threading import Thread From 07db667aa1fe1b1c1bdffcb432dad24e5bb20b70 Mon Sep 17 00:00:00 2001 From: "github-actions[bot]" <41898282+github-actions[bot]@users.noreply.github.com> Date: Wed, 11 Dec 2024 09:25:32 -0500 Subject: [PATCH 09/78] chore: update langchain latest version to 0.3.10 (#11519) Update langchain lockfiles and dependency package lockfiles. This performs the following updates: 1) Some langchain lockfiles use langchain `latest`. This will update langchain and dependencies. 2) Some langchain lockfiles use a pinned (non-latest) version of langchain, but require the `latest` version of another package. This will update all such packages. ## Checklist - [x] PR author has checked that all the criteria below are met - The PR description includes an overview of the change - The PR description articulates the motivation for the change - The change includes tests OR the PR description describes a testing strategy - The PR description notes risks associated with the change, if any - Newly-added code is easy to change - The change follows the [library release note guidelines](https://ddtrace.readthedocs.io/en/stable/releasenotes.html) - The change includes or references documentation updates if necessary - Backport labels are set (if [applicable](https://ddtrace.readthedocs.io/en/latest/contributing.html#backporting)) ## Reviewer Checklist - [x] Reviewer has checked that all the criteria below are met - Title is accurate - All changes are related to the pull request's stated goal - Avoids breaking [API](https://ddtrace.readthedocs.io/en/stable/versioning.html#interfaces) changes - Testing strategy adequately addresses listed risks - Newly-added code is easy to change - Release note makes sense to a user of the library - If necessary, author has acknowledged and discussed the performance implications of this PR as reported in the benchmarks PR comment - Backport labels are set in a manner that is consistent with the [release branch maintenance policy](https://ddtrace.readthedocs.io/en/latest/contributing.html#backporting) Co-authored-by: quinna-h <175135214+quinna-h@users.noreply.github.com> Co-authored-by: Quinna Halim --- .riot/requirements/11063bf.txt | 60 +++++++++--------- .riot/requirements/16c3b9f.txt | 61 +++++++++--------- .riot/requirements/1761cfc.txt | 72 +++++++++++----------- .riot/requirements/18bc2ac.txt | 107 ++++++++++++++++---------------- .riot/requirements/19f2225.txt | 62 ++++++++++--------- .riot/requirements/1ec1dbf.txt | 109 +++++++++++++++++---------------- .riot/requirements/457db9b.txt | 70 +++++++++++---------- .riot/requirements/55a4977.txt | 70 +++++++++++---------- .riot/requirements/585e779.txt | 59 +++++++++--------- .riot/requirements/a311bc2.txt | 109 +++++++++++++++++---------------- .riot/requirements/aa1fe5c.txt | 74 +++++++++++----------- .riot/requirements/cbbb0eb.txt | 107 ++++++++++++++++---------------- .riot/requirements/cf9bdda.txt | 64 +++++++++---------- .riot/requirements/d39d3de.txt | 59 +++++++++--------- 14 files changed, 556 insertions(+), 527 deletions(-) diff --git a/.riot/requirements/11063bf.txt b/.riot/requirements/11063bf.txt index 672870755a7..5f06cd4e04a 100644 --- a/.riot/requirements/11063bf.txt +++ b/.riot/requirements/11063bf.txt @@ -4,38 +4,38 @@ # # pip-compile --allow-unsafe --no-annotate .riot/requirements/11063bf.in # -ai21==2.14.1 +ai21==3.0.1 ai21-tokenizer==0.12.0 -aiohappyeyeballs==2.4.0 -aiohttp==3.10.5 +aiohappyeyeballs==2.4.4 +aiohttp==3.11.10 aiosignal==1.3.1 annotated-types==0.7.0 anthropic==0.26.0 -anyio==4.4.0 +anyio==4.7.0 attrs==24.2.0 boto3==1.34.51 botocore==1.34.51 certifi==2024.8.30 -charset-normalizer==3.3.2 +charset-normalizer==3.4.0 cohere==5.4.0 -coverage[toml]==7.6.1 +coverage[toml]==7.6.9 dataclasses-json==0.6.7 defusedxml==0.7.1 distro==1.9.0 exceptiongroup==1.2.2 faiss-cpu==1.8.0 fastavro==1.9.7 -filelock==3.16.0 -frozenlist==1.4.1 -fsspec==2024.9.0 +filelock==3.16.1 +frozenlist==1.5.0 +fsspec==2024.10.0 greenlet==3.0.3 h11==0.14.0 -httpcore==1.0.5 +httpcore==1.0.7 httpx==0.27.2 httpx-sse==0.4.0 -huggingface-hub==0.24.6 +huggingface-hub==0.26.5 hypothesis==6.45.0 -idna==3.8 +idna==3.10 iniconfig==2.0.0 jmespath==1.0.1 jsonpatch==1.33 @@ -49,8 +49,8 @@ langchain-core==0.1.52 langchain-openai==0.1.6 langchain-pinecone==0.1.0 langchain-text-splitters==0.0.2 -langsmith==0.1.117 -marshmallow==3.22.0 +langsmith==0.1.147 +marshmallow==3.23.1 mock==5.1.0 multidict==6.1.0 mypy-extensions==1.0.0 @@ -58,36 +58,38 @@ numexpr==2.8.5 numpy==1.26.4 openai==1.30.3 opentracing==2.4.0 -orjson==3.10.7 +orjson==3.10.12 packaging==23.2 pinecone-client==3.2.2 pluggy==1.5.0 -psutil==6.0.0 -pydantic==2.9.1 -pydantic-core==2.23.3 -pytest==8.3.3 +propcache==0.2.1 +psutil==6.1.0 +pydantic==2.10.3 +pydantic-core==2.27.1 +pytest==8.3.4 pytest-asyncio==0.23.7 -pytest-cov==5.0.0 +pytest-cov==6.0.0 pytest-mock==3.14.0 pytest-randomly==3.10.1 python-dateutil==2.9.0.post0 pyyaml==6.0.2 -regex==2024.7.24 +regex==2024.11.6 requests==2.32.3 -s3transfer==0.10.2 +requests-toolbelt==1.0.0 +s3transfer==0.10.4 sentencepiece==0.2.0 -six==1.16.0 +six==1.17.0 sniffio==1.3.1 sortedcontainers==2.4.0 -sqlalchemy==2.0.34 +sqlalchemy==2.0.36 tenacity==8.5.0 -tiktoken==0.7.0 +tiktoken==0.8.0 tokenizers==0.19.1 -tqdm==4.66.5 -types-requests==2.32.0.20240907 +tqdm==4.67.1 +types-requests==2.32.0.20241016 typing-extensions==4.12.2 typing-inspect==0.9.0 urllib3==2.0.7 vcrpy==5.1.0 -wrapt==1.16.0 -yarl==1.11.1 +wrapt==1.17.0 +yarl==1.18.3 diff --git a/.riot/requirements/16c3b9f.txt b/.riot/requirements/16c3b9f.txt index 1919ccd9e72..ac3ccd42fa3 100644 --- a/.riot/requirements/16c3b9f.txt +++ b/.riot/requirements/16c3b9f.txt @@ -4,33 +4,33 @@ # # pip-compile --allow-unsafe --no-annotate .riot/requirements/16c3b9f.in # -ai21==2.14.1 +ai21==3.0.1 ai21-tokenizer==0.12.0 -aiohappyeyeballs==2.4.0 -aiohttp==3.10.5 +aiohappyeyeballs==2.4.4 +aiohttp==3.11.10 aiosignal==1.3.1 -anyio==4.4.0 +anyio==4.7.0 async-timeout==4.0.3 attrs==24.2.0 backoff==2.2.1 certifi==2024.8.30 -charset-normalizer==3.3.2 +charset-normalizer==3.4.0 cohere==4.57 -coverage[toml]==7.6.1 +coverage[toml]==7.6.9 dataclasses-json==0.5.14 -dnspython==2.6.1 +dnspython==2.7.0 exceptiongroup==1.2.2 fastavro==1.9.7 -filelock==3.16.0 -frozenlist==1.4.1 -fsspec==2024.9.0 +filelock==3.16.1 +frozenlist==1.5.0 +fsspec==2024.10.0 greenlet==3.0.3 h11==0.14.0 -httpcore==1.0.5 +httpcore==1.0.7 httpx==0.27.2 -huggingface-hub==0.24.6 +huggingface-hub==0.26.5 hypothesis==6.45.0 -idna==3.8 +idna==3.10 importlib-metadata==6.11.0 iniconfig==2.0.0 jsonpatch==1.33 @@ -40,8 +40,8 @@ langchain-community==0.0.14 langchain-core==0.1.23 langchainplus-sdk==0.0.4 langsmith==0.0.87 -loguru==0.7.2 -marshmallow==3.22.0 +loguru==0.7.3 +marshmallow==3.23.1 mock==5.1.0 multidict==6.1.0 mypy-extensions==1.0.0 @@ -53,31 +53,32 @@ opentracing==2.4.0 packaging==23.2 pinecone-client==2.2.4 pluggy==1.5.0 -psutil==6.0.0 -pydantic==1.10.18 -pytest==8.3.3 +propcache==0.2.1 +psutil==6.1.0 +pydantic==1.10.19 +pytest==8.3.4 pytest-asyncio==0.23.7 -pytest-cov==5.0.0 +pytest-cov==6.0.0 pytest-mock==3.14.0 pytest-randomly==3.10.1 python-dateutil==2.9.0.post0 pyyaml==6.0.2 -regex==2024.7.24 +regex==2024.11.6 requests==2.32.3 sentencepiece==0.2.0 -six==1.16.0 +six==1.17.0 sniffio==1.3.1 sortedcontainers==2.4.0 -sqlalchemy==2.0.34 +sqlalchemy==2.0.36 tenacity==8.5.0 -tiktoken==0.7.0 -tokenizers==0.20.0 -tomli==2.0.1 -tqdm==4.66.5 +tiktoken==0.8.0 +tokenizers==0.21.0 +tomli==2.2.1 +tqdm==4.67.1 typing-extensions==4.12.2 typing-inspect==0.9.0 -urllib3==2.2.2 +urllib3==2.2.3 vcrpy==6.0.1 -wrapt==1.16.0 -yarl==1.11.1 -zipp==3.20.1 +wrapt==1.17.0 +yarl==1.18.3 +zipp==3.21.0 diff --git a/.riot/requirements/1761cfc.txt b/.riot/requirements/1761cfc.txt index 4ccba3f60cb..6eb2c9fe558 100644 --- a/.riot/requirements/1761cfc.txt +++ b/.riot/requirements/1761cfc.txt @@ -4,40 +4,40 @@ # # pip-compile --allow-unsafe --no-annotate .riot/requirements/1761cfc.in # -ai21==2.14.1 +ai21==3.0.1 ai21-tokenizer==0.12.0 -aiohappyeyeballs==2.4.0 -aiohttp==3.10.5 +aiohappyeyeballs==2.4.4 +aiohttp==3.11.10 aiosignal==1.3.1 annotated-types==0.7.0 anthropic==0.26.0 -anyio==4.4.0 +anyio==4.7.0 async-timeout==4.0.3 attrs==24.2.0 boto3==1.34.51 botocore==1.34.51 certifi==2024.8.30 -charset-normalizer==3.3.2 -cohere==5.9.1 -coverage[toml]==7.6.1 +charset-normalizer==3.4.0 +cohere==5.13.3 +coverage[toml]==7.6.9 dataclasses-json==0.6.7 defusedxml==0.7.1 distro==1.9.0 exceptiongroup==1.2.2 fastavro==1.9.7 -filelock==3.16.0 -frozenlist==1.4.1 -fsspec==2024.9.0 +filelock==3.16.1 +frozenlist==1.5.0 +fsspec==2024.10.0 greenlet==3.0.3 h11==0.14.0 -httpcore==1.0.5 +httpcore==1.0.7 httpx==0.27.2 httpx-sse==0.4.0 -huggingface-hub==0.24.6 +huggingface-hub==0.26.5 hypothesis==6.45.0 -idna==3.8 +idna==3.10 iniconfig==2.0.0 -jiter==0.5.0 +jiter==0.8.0 jmespath==1.0.1 jsonpatch==1.33 jsonpointer==3.0.0 @@ -49,49 +49,51 @@ langchain-core==0.2.0 langchain-openai==0.1.7 langchain-pinecone==0.1.3 langchain-text-splitters==0.2.1 -langsmith==0.1.117 -marshmallow==3.22.0 +langsmith==0.1.147 +marshmallow==3.23.1 mock==5.1.0 multidict==6.1.0 mypy-extensions==1.0.0 numexpr==2.8.5 numpy==1.26.4 -openai==1.44.1 +openai==1.57.0 opentracing==2.4.0 -orjson==3.10.7 +orjson==3.10.12 packaging==23.2 parameterized==0.9.0 pinecone-client==5.0.1 -pinecone-plugin-inference==1.0.3 +pinecone-plugin-inference==1.1.0 pinecone-plugin-interface==0.0.7 pluggy==1.5.0 -psutil==6.0.0 -pydantic==2.9.1 -pydantic-core==2.23.3 -pytest==8.3.3 +propcache==0.2.1 +psutil==6.1.0 +pydantic==2.10.3 +pydantic-core==2.27.1 +pytest==8.3.4 pytest-asyncio==0.23.7 -pytest-cov==5.0.0 +pytest-cov==6.0.0 pytest-mock==3.14.0 pytest-randomly==3.10.1 python-dateutil==2.9.0.post0 pyyaml==6.0.2 -regex==2024.7.24 +regex==2024.11.6 requests==2.32.3 -s3transfer==0.10.2 +requests-toolbelt==1.0.0 +s3transfer==0.10.4 sentencepiece==0.2.0 -six==1.16.0 +six==1.17.0 sniffio==1.3.1 sortedcontainers==2.4.0 -sqlalchemy==2.0.34 +sqlalchemy==2.0.36 tenacity==8.5.0 -tiktoken==0.7.0 -tokenizers==0.20.0 -tomli==2.0.1 -tqdm==4.66.5 -types-requests==2.32.0.20240907 +tiktoken==0.8.0 +tokenizers==0.21.0 +tomli==2.2.1 +tqdm==4.67.1 +types-requests==2.32.0.20241016 typing-extensions==4.12.2 typing-inspect==0.9.0 urllib3==2.0.7 vcrpy==5.1.0 -wrapt==1.16.0 -yarl==1.11.1 +wrapt==1.17.0 +yarl==1.18.3 diff --git a/.riot/requirements/18bc2ac.txt b/.riot/requirements/18bc2ac.txt index e3f60b3aad2..aaf19bf9fe2 100644 --- a/.riot/requirements/18bc2ac.txt +++ b/.riot/requirements/18bc2ac.txt @@ -4,98 +4,101 @@ # # pip-compile --allow-unsafe --no-annotate .riot/requirements/18bc2ac.in # -ai21==2.14.1 +ai21==3.0.1 ai21-tokenizer==0.12.0 -aiohappyeyeballs==2.4.0 -aiohttp==3.10.5 +aiohttp==3.9.5 aiosignal==1.3.1 annotated-types==0.7.0 -anthropic==0.34.2 -anyio==4.4.0 +anthropic==0.40.0 +anyio==4.7.0 attrs==24.2.0 -boto3==1.34.162 -botocore==1.34.162 +boto3==1.35.76 +botocore==1.35.76 certifi==2024.8.30 -charset-normalizer==3.3.2 -cohere==5.9.1 -coverage[toml]==7.6.1 +charset-normalizer==3.4.0 +cohere==5.13.3 +coverage[toml]==7.6.9 dataclasses-json==0.6.7 defusedxml==0.7.1 distro==1.9.0 exceptiongroup==1.2.2 fastavro==1.9.7 -filelock==3.16.0 -frozenlist==1.4.1 -fsspec==2024.9.0 +filelock==3.16.1 +frozenlist==1.5.0 +fsspec==2024.10.0 greenlet==3.0.3 h11==0.14.0 -httpcore==1.0.5 +httpcore==1.0.7 httpx==0.27.2 httpx-sse==0.4.0 -huggingface-hub==0.24.6 +huggingface-hub==0.26.5 hypothesis==6.45.0 -idna==3.8 +idna==3.10 iniconfig==2.0.0 -jiter==0.5.0 +jiter==0.8.0 jmespath==1.0.1 jsonpatch==1.33 jsonpointer==3.0.0 -langchain==0.2.16 -langchain-anthropic==0.1.23 -langchain-aws==0.1.18 -langchain-cohere==0.2.4 -langchain-community==0.2.16 -langchain-core==0.2.39 -langchain-experimental==0.0.65 -langchain-openai==0.1.23 -langchain-pinecone==0.1.3 -langchain-text-splitters==0.2.4 -langsmith==0.1.117 -marshmallow==3.22.0 +langchain==0.3.10 +langchain-anthropic==0.3.0 +langchain-aws==0.2.9 +langchain-cohere==0.3.3 +langchain-community==0.3.10 +langchain-core==0.3.22 +langchain-experimental==0.3.3 +langchain-openai==0.2.11 +langchain-pinecone==0.2.0 +langchain-text-splitters==0.3.2 +langsmith==0.1.147 +marshmallow==3.23.1 mock==5.1.0 multidict==6.1.0 mypy-extensions==1.0.0 numexpr==2.8.5 numpy==1.26.4 -openai==1.44.1 +openai==1.57.0 opentracing==2.4.0 -orjson==3.10.7 -packaging==24.1 -pandas==2.2.2 +orjson==3.10.12 +packaging==24.2 +pandas==2.2.3 parameterized==0.9.0 pinecone-client==5.0.1 -pinecone-plugin-inference==1.0.3 +pinecone-plugin-inference==1.1.0 pinecone-plugin-interface==0.0.7 pluggy==1.5.0 -psutil==6.0.0 -pydantic==2.9.1 -pydantic-core==2.23.3 -pytest==8.3.3 +propcache==0.2.1 +psutil==6.1.0 +pydantic==2.10.3 +pydantic-core==2.27.1 +pydantic-settings==2.6.1 +pytest==8.3.4 pytest-asyncio==0.23.7 -pytest-cov==5.0.0 +pytest-cov==6.0.0 pytest-mock==3.14.0 pytest-randomly==3.10.1 python-dateutil==2.9.0.post0 -pytz==2024.1 +python-dotenv==1.0.1 +pytz==2024.2 pyyaml==6.0.2 -regex==2024.7.24 +regex==2024.11.6 requests==2.32.3 -s3transfer==0.10.2 +requests-toolbelt==1.0.0 +s3transfer==0.10.4 sentencepiece==0.2.0 -six==1.16.0 +six==1.17.0 sniffio==1.3.1 sortedcontainers==2.4.0 -sqlalchemy==2.0.34 +sqlalchemy==2.0.36 tabulate==0.9.0 tenacity==8.5.0 -tiktoken==0.7.0 -tokenizers==0.20.0 -tqdm==4.66.5 -types-requests==2.32.0.20240907 +tiktoken==0.8.0 +tokenizers==0.21.0 +tqdm==4.67.1 +types-requests==2.32.0.20241016 typing-extensions==4.12.2 typing-inspect==0.9.0 -tzdata==2024.1 -urllib3==2.2.2 +tzdata==2024.2 +urllib3==2.2.3 vcrpy==5.1.0 -wrapt==1.16.0 -yarl==1.11.1 +wrapt==1.17.0 +yarl==1.18.3 diff --git a/.riot/requirements/19f2225.txt b/.riot/requirements/19f2225.txt index dc86f7981bf..63df4f55d90 100644 --- a/.riot/requirements/19f2225.txt +++ b/.riot/requirements/19f2225.txt @@ -4,39 +4,39 @@ # # pip-compile --allow-unsafe --no-annotate .riot/requirements/19f2225.in # -ai21==2.14.1 +ai21==3.0.1 ai21-tokenizer==0.12.0 -aiohappyeyeballs==2.4.0 -aiohttp==3.10.5 +aiohappyeyeballs==2.4.4 +aiohttp==3.11.10 aiosignal==1.3.1 annotated-types==0.7.0 anthropic==0.26.0 -anyio==4.4.0 +anyio==4.7.0 async-timeout==4.0.3 attrs==24.2.0 boto3==1.34.51 botocore==1.34.51 certifi==2024.8.30 -charset-normalizer==3.3.2 +charset-normalizer==3.4.0 cohere==5.4.0 -coverage[toml]==7.6.1 +coverage[toml]==7.6.9 dataclasses-json==0.6.7 defusedxml==0.7.1 distro==1.9.0 exceptiongroup==1.2.2 faiss-cpu==1.8.0 fastavro==1.9.7 -filelock==3.16.0 -frozenlist==1.4.1 -fsspec==2024.9.0 +filelock==3.16.1 +frozenlist==1.5.0 +fsspec==2024.10.0 greenlet==3.0.3 h11==0.14.0 -httpcore==1.0.5 +httpcore==1.0.7 httpx==0.27.2 httpx-sse==0.4.0 -huggingface-hub==0.24.6 +huggingface-hub==0.26.5 hypothesis==6.45.0 -idna==3.8 +idna==3.10 iniconfig==2.0.0 jmespath==1.0.1 jsonpatch==1.33 @@ -50,8 +50,8 @@ langchain-core==0.1.52 langchain-openai==0.1.6 langchain-pinecone==0.1.0 langchain-text-splitters==0.0.2 -langsmith==0.1.117 -marshmallow==3.22.0 +langsmith==0.1.147 +marshmallow==3.23.1 mock==5.1.0 multidict==6.1.0 mypy-extensions==1.0.0 @@ -59,37 +59,39 @@ numexpr==2.8.5 numpy==1.26.4 openai==1.30.3 opentracing==2.4.0 -orjson==3.10.7 +orjson==3.10.12 packaging==23.2 pinecone-client==3.2.2 pluggy==1.5.0 -psutil==6.0.0 -pydantic==2.9.1 -pydantic-core==2.23.3 -pytest==8.3.3 +propcache==0.2.1 +psutil==6.1.0 +pydantic==2.10.3 +pydantic-core==2.27.1 +pytest==8.3.4 pytest-asyncio==0.23.7 -pytest-cov==5.0.0 +pytest-cov==6.0.0 pytest-mock==3.14.0 pytest-randomly==3.10.1 python-dateutil==2.9.0.post0 pyyaml==6.0.2 -regex==2024.7.24 +regex==2024.11.6 requests==2.32.3 -s3transfer==0.10.2 +requests-toolbelt==1.0.0 +s3transfer==0.10.4 sentencepiece==0.2.0 -six==1.16.0 +six==1.17.0 sniffio==1.3.1 sortedcontainers==2.4.0 -sqlalchemy==2.0.34 +sqlalchemy==2.0.36 tenacity==8.5.0 -tiktoken==0.7.0 +tiktoken==0.8.0 tokenizers==0.19.1 -tomli==2.0.1 -tqdm==4.66.5 -types-requests==2.32.0.20240907 +tomli==2.2.1 +tqdm==4.67.1 +types-requests==2.32.0.20241016 typing-extensions==4.12.2 typing-inspect==0.9.0 urllib3==2.0.7 vcrpy==5.1.0 -wrapt==1.16.0 -yarl==1.11.1 +wrapt==1.17.0 +yarl==1.18.3 diff --git a/.riot/requirements/1ec1dbf.txt b/.riot/requirements/1ec1dbf.txt index 3f7fffea275..0a093e6e676 100644 --- a/.riot/requirements/1ec1dbf.txt +++ b/.riot/requirements/1ec1dbf.txt @@ -4,100 +4,103 @@ # # pip-compile --allow-unsafe --no-annotate .riot/requirements/1ec1dbf.in # -ai21==2.14.1 +ai21==3.0.1 ai21-tokenizer==0.12.0 -aiohappyeyeballs==2.4.0 -aiohttp==3.10.5 +aiohttp==3.9.5 aiosignal==1.3.1 annotated-types==0.7.0 -anthropic==0.34.2 -anyio==4.4.0 +anthropic==0.40.0 +anyio==4.7.0 async-timeout==4.0.3 attrs==24.2.0 -boto3==1.34.162 -botocore==1.34.162 +boto3==1.35.76 +botocore==1.35.76 certifi==2024.8.30 -charset-normalizer==3.3.2 -cohere==5.9.1 -coverage[toml]==7.6.1 +charset-normalizer==3.4.0 +cohere==5.13.3 +coverage[toml]==7.6.9 dataclasses-json==0.6.7 defusedxml==0.7.1 distro==1.9.0 exceptiongroup==1.2.2 fastavro==1.9.7 -filelock==3.16.0 -frozenlist==1.4.1 -fsspec==2024.9.0 +filelock==3.16.1 +frozenlist==1.5.0 +fsspec==2024.10.0 greenlet==3.0.3 h11==0.14.0 -httpcore==1.0.5 +httpcore==1.0.7 httpx==0.27.2 httpx-sse==0.4.0 -huggingface-hub==0.24.6 +huggingface-hub==0.26.5 hypothesis==6.45.0 -idna==3.8 +idna==3.10 iniconfig==2.0.0 -jiter==0.5.0 +jiter==0.8.0 jmespath==1.0.1 jsonpatch==1.33 jsonpointer==3.0.0 -langchain==0.2.16 -langchain-anthropic==0.1.23 -langchain-aws==0.1.18 -langchain-cohere==0.2.4 -langchain-community==0.2.16 -langchain-core==0.2.39 -langchain-experimental==0.0.65 -langchain-openai==0.1.23 -langchain-pinecone==0.1.3 -langchain-text-splitters==0.2.4 -langsmith==0.1.117 -marshmallow==3.22.0 +langchain==0.3.10 +langchain-anthropic==0.3.0 +langchain-aws==0.2.9 +langchain-cohere==0.3.3 +langchain-community==0.3.10 +langchain-core==0.3.22 +langchain-experimental==0.3.3 +langchain-openai==0.2.11 +langchain-pinecone==0.2.0 +langchain-text-splitters==0.3.2 +langsmith==0.1.147 +marshmallow==3.23.1 mock==5.1.0 multidict==6.1.0 mypy-extensions==1.0.0 numexpr==2.8.5 numpy==1.26.4 -openai==1.44.1 +openai==1.57.0 opentracing==2.4.0 -orjson==3.10.7 -packaging==24.1 -pandas==2.2.2 +orjson==3.10.12 +packaging==24.2 +pandas==2.2.3 parameterized==0.9.0 pinecone-client==5.0.1 -pinecone-plugin-inference==1.0.3 +pinecone-plugin-inference==1.1.0 pinecone-plugin-interface==0.0.7 pluggy==1.5.0 -psutil==6.0.0 -pydantic==2.9.1 -pydantic-core==2.23.3 -pytest==8.3.3 +propcache==0.2.1 +psutil==6.1.0 +pydantic==2.10.3 +pydantic-core==2.27.1 +pydantic-settings==2.6.1 +pytest==8.3.4 pytest-asyncio==0.23.7 -pytest-cov==5.0.0 +pytest-cov==6.0.0 pytest-mock==3.14.0 pytest-randomly==3.10.1 python-dateutil==2.9.0.post0 -pytz==2024.1 +python-dotenv==1.0.1 +pytz==2024.2 pyyaml==6.0.2 -regex==2024.7.24 +regex==2024.11.6 requests==2.32.3 -s3transfer==0.10.2 +requests-toolbelt==1.0.0 +s3transfer==0.10.4 sentencepiece==0.2.0 -six==1.16.0 +six==1.17.0 sniffio==1.3.1 sortedcontainers==2.4.0 -sqlalchemy==2.0.34 +sqlalchemy==2.0.36 tabulate==0.9.0 tenacity==8.5.0 -tiktoken==0.7.0 -tokenizers==0.20.0 -tomli==2.0.1 -tqdm==4.66.5 -types-requests==2.32.0.20240907 +tiktoken==0.8.0 +tokenizers==0.21.0 +tomli==2.2.1 +tqdm==4.67.1 +types-requests==2.32.0.20241016 typing-extensions==4.12.2 typing-inspect==0.9.0 -tzdata==2024.1 -urllib3==2.2.2 +tzdata==2024.2 +urllib3==2.2.3 vcrpy==5.1.0 -wrapt==1.16.0 -yarl==1.11.1 +wrapt==1.17.0 +yarl==1.18.3 diff --git a/.riot/requirements/457db9b.txt b/.riot/requirements/457db9b.txt index 28def9f2321..12da4c338c5 100644 --- a/.riot/requirements/457db9b.txt +++ b/.riot/requirements/457db9b.txt @@ -4,39 +4,39 @@ # # pip-compile --allow-unsafe --no-annotate .riot/requirements/457db9b.in # -ai21==2.14.1 +ai21==3.0.1 ai21-tokenizer==0.12.0 -aiohappyeyeballs==2.4.0 -aiohttp==3.10.5 +aiohappyeyeballs==2.4.4 +aiohttp==3.11.10 aiosignal==1.3.1 annotated-types==0.7.0 anthropic==0.26.0 -anyio==4.4.0 +anyio==4.7.0 attrs==24.2.0 boto3==1.34.51 botocore==1.34.51 certifi==2024.8.30 -charset-normalizer==3.3.2 -cohere==5.9.1 -coverage[toml]==7.6.1 +charset-normalizer==3.4.0 +cohere==5.13.3 +coverage[toml]==7.6.9 dataclasses-json==0.6.7 defusedxml==0.7.1 distro==1.9.0 exceptiongroup==1.2.2 fastavro==1.9.7 -filelock==3.16.0 -frozenlist==1.4.1 -fsspec==2024.9.0 +filelock==3.16.1 +frozenlist==1.5.0 +fsspec==2024.10.0 greenlet==3.0.3 h11==0.14.0 -httpcore==1.0.5 +httpcore==1.0.7 httpx==0.27.2 httpx-sse==0.4.0 -huggingface-hub==0.24.6 +huggingface-hub==0.26.5 hypothesis==6.45.0 -idna==3.8 +idna==3.10 iniconfig==2.0.0 -jiter==0.5.0 +jiter==0.8.0 jmespath==1.0.1 jsonpatch==1.33 jsonpointer==3.0.0 @@ -48,48 +48,50 @@ langchain-core==0.2.0 langchain-openai==0.1.7 langchain-pinecone==0.1.3 langchain-text-splitters==0.2.1 -langsmith==0.1.117 -marshmallow==3.22.0 +langsmith==0.1.147 +marshmallow==3.23.1 mock==5.1.0 multidict==6.1.0 mypy-extensions==1.0.0 numexpr==2.8.5 numpy==1.26.4 -openai==1.44.1 +openai==1.57.0 opentracing==2.4.0 -orjson==3.10.7 +orjson==3.10.12 packaging==23.2 parameterized==0.9.0 pinecone-client==5.0.1 -pinecone-plugin-inference==1.0.3 +pinecone-plugin-inference==1.1.0 pinecone-plugin-interface==0.0.7 pluggy==1.5.0 -psutil==6.0.0 -pydantic==2.9.1 -pydantic-core==2.23.3 -pytest==8.3.3 +propcache==0.2.1 +psutil==6.1.0 +pydantic==2.10.3 +pydantic-core==2.27.1 +pytest==8.3.4 pytest-asyncio==0.23.7 -pytest-cov==5.0.0 +pytest-cov==6.0.0 pytest-mock==3.14.0 pytest-randomly==3.10.1 python-dateutil==2.9.0.post0 pyyaml==6.0.2 -regex==2024.7.24 +regex==2024.11.6 requests==2.32.3 -s3transfer==0.10.2 +requests-toolbelt==1.0.0 +s3transfer==0.10.4 sentencepiece==0.2.0 -six==1.16.0 +six==1.17.0 sniffio==1.3.1 sortedcontainers==2.4.0 -sqlalchemy==2.0.34 +sqlalchemy==2.0.36 tenacity==8.5.0 -tiktoken==0.7.0 -tokenizers==0.20.0 -tqdm==4.66.5 -types-requests==2.32.0.20240907 +tiktoken==0.8.0 +tokenizers==0.21.0 +tqdm==4.67.1 +types-requests==2.32.0.20241016 typing-extensions==4.12.2 typing-inspect==0.9.0 urllib3==2.0.7 vcrpy==5.1.0 -wrapt==1.16.0 -yarl==1.11.1 +wrapt==1.17.0 +yarl==1.18.3 diff --git a/.riot/requirements/55a4977.txt b/.riot/requirements/55a4977.txt index d2fdde55602..9c65a62f94a 100644 --- a/.riot/requirements/55a4977.txt +++ b/.riot/requirements/55a4977.txt @@ -4,39 +4,39 @@ # # pip-compile --allow-unsafe --no-annotate .riot/requirements/55a4977.in # -ai21==2.14.1 +ai21==3.0.1 ai21-tokenizer==0.12.0 -aiohappyeyeballs==2.4.0 -aiohttp==3.10.5 +aiohappyeyeballs==2.4.4 +aiohttp==3.11.10 aiosignal==1.3.1 annotated-types==0.7.0 anthropic==0.26.0 -anyio==4.4.0 +anyio==4.7.0 attrs==24.2.0 boto3==1.34.51 botocore==1.34.51 certifi==2024.8.30 -charset-normalizer==3.3.2 -cohere==5.9.1 -coverage[toml]==7.6.1 +charset-normalizer==3.4.0 +cohere==5.13.3 +coverage[toml]==7.6.9 dataclasses-json==0.6.7 defusedxml==0.7.1 distro==1.9.0 exceptiongroup==1.2.2 fastavro==1.9.7 -filelock==3.16.0 -frozenlist==1.4.1 -fsspec==2024.9.0 +filelock==3.16.1 +frozenlist==1.5.0 +fsspec==2024.10.0 greenlet==3.0.3 h11==0.14.0 -httpcore==1.0.5 +httpcore==1.0.7 httpx==0.27.2 httpx-sse==0.4.0 -huggingface-hub==0.24.6 +huggingface-hub==0.26.5 hypothesis==6.45.0 -idna==3.8 +idna==3.10 iniconfig==2.0.0 -jiter==0.5.0 +jiter==0.8.0 jmespath==1.0.1 jsonpatch==1.33 jsonpointer==3.0.0 @@ -48,48 +48,50 @@ langchain-core==0.2.0 langchain-openai==0.1.7 langchain-pinecone==0.1.3 langchain-text-splitters==0.2.1 -langsmith==0.1.117 -marshmallow==3.22.0 +langsmith==0.1.147 +marshmallow==3.23.1 mock==5.1.0 multidict==6.1.0 mypy-extensions==1.0.0 numexpr==2.8.5 numpy==1.26.4 -openai==1.44.1 +openai==1.57.0 opentracing==2.4.0 -orjson==3.10.7 +orjson==3.10.12 packaging==23.2 parameterized==0.9.0 pinecone-client==5.0.1 -pinecone-plugin-inference==1.0.3 +pinecone-plugin-inference==1.1.0 pinecone-plugin-interface==0.0.7 pluggy==1.5.0 -psutil==6.0.0 -pydantic==2.9.1 -pydantic-core==2.23.3 -pytest==8.3.3 +propcache==0.2.1 +psutil==6.1.0 +pydantic==2.10.3 +pydantic-core==2.27.1 +pytest==8.3.4 pytest-asyncio==0.23.7 -pytest-cov==5.0.0 +pytest-cov==6.0.0 pytest-mock==3.14.0 pytest-randomly==3.10.1 python-dateutil==2.9.0.post0 pyyaml==6.0.2 -regex==2024.7.24 +regex==2024.11.6 requests==2.32.3 -s3transfer==0.10.2 +requests-toolbelt==1.0.0 +s3transfer==0.10.4 sentencepiece==0.2.0 -six==1.16.0 +six==1.17.0 sniffio==1.3.1 sortedcontainers==2.4.0 -sqlalchemy==2.0.34 +sqlalchemy==2.0.36 tenacity==8.5.0 -tiktoken==0.7.0 -tokenizers==0.20.0 -tqdm==4.66.5 -types-requests==2.32.0.20240907 +tiktoken==0.8.0 +tokenizers==0.21.0 +tqdm==4.67.1 +types-requests==2.32.0.20241016 typing-extensions==4.12.2 typing-inspect==0.9.0 urllib3==2.0.7 vcrpy==5.1.0 -wrapt==1.16.0 -yarl==1.11.1 +wrapt==1.17.0 +yarl==1.18.3 diff --git a/.riot/requirements/585e779.txt b/.riot/requirements/585e779.txt index 2429e3e442d..3e328720bd3 100644 --- a/.riot/requirements/585e779.txt +++ b/.riot/requirements/585e779.txt @@ -4,32 +4,32 @@ # # pip-compile --allow-unsafe --no-annotate .riot/requirements/585e779.in # -ai21==2.14.1 +ai21==3.0.1 ai21-tokenizer==0.12.0 -aiohappyeyeballs==2.4.0 -aiohttp==3.10.5 +aiohappyeyeballs==2.4.4 +aiohttp==3.11.10 aiosignal==1.3.1 -anyio==4.4.0 +anyio==4.7.0 attrs==24.2.0 backoff==2.2.1 certifi==2024.8.30 -charset-normalizer==3.3.2 +charset-normalizer==3.4.0 cohere==4.57 -coverage[toml]==7.6.1 +coverage[toml]==7.6.9 dataclasses-json==0.5.14 -dnspython==2.6.1 +dnspython==2.7.0 exceptiongroup==1.2.2 fastavro==1.9.7 -filelock==3.16.0 -frozenlist==1.4.1 -fsspec==2024.9.0 +filelock==3.16.1 +frozenlist==1.5.0 +fsspec==2024.10.0 greenlet==3.0.3 h11==0.14.0 -httpcore==1.0.5 +httpcore==1.0.7 httpx==0.27.2 -huggingface-hub==0.24.6 +huggingface-hub==0.26.5 hypothesis==6.45.0 -idna==3.8 +idna==3.10 importlib-metadata==6.11.0 iniconfig==2.0.0 jsonpatch==1.33 @@ -39,8 +39,8 @@ langchain-community==0.0.14 langchain-core==0.1.23 langchainplus-sdk==0.0.4 langsmith==0.0.87 -loguru==0.7.2 -marshmallow==3.22.0 +loguru==0.7.3 +marshmallow==3.23.1 mock==5.1.0 multidict==6.1.0 mypy-extensions==1.0.0 @@ -52,30 +52,31 @@ opentracing==2.4.0 packaging==23.2 pinecone-client==2.2.4 pluggy==1.5.0 -psutil==6.0.0 -pydantic==1.10.18 -pytest==8.3.3 +propcache==0.2.1 +psutil==6.1.0 +pydantic==1.10.19 +pytest==8.3.4 pytest-asyncio==0.23.7 -pytest-cov==5.0.0 +pytest-cov==6.0.0 pytest-mock==3.14.0 pytest-randomly==3.10.1 python-dateutil==2.9.0.post0 pyyaml==6.0.2 -regex==2024.7.24 +regex==2024.11.6 requests==2.32.3 sentencepiece==0.2.0 -six==1.16.0 +six==1.17.0 sniffio==1.3.1 sortedcontainers==2.4.0 -sqlalchemy==2.0.34 +sqlalchemy==2.0.36 tenacity==8.5.0 -tiktoken==0.7.0 -tokenizers==0.20.0 -tqdm==4.66.5 +tiktoken==0.8.0 +tokenizers==0.21.0 +tqdm==4.67.1 typing-extensions==4.12.2 typing-inspect==0.9.0 -urllib3==2.2.2 +urllib3==2.2.3 vcrpy==6.0.1 -wrapt==1.16.0 -yarl==1.11.1 -zipp==3.20.1 +wrapt==1.17.0 +yarl==1.18.3 +zipp==3.21.0 diff --git a/.riot/requirements/a311bc2.txt b/.riot/requirements/a311bc2.txt index 48ecb036fea..42f18e00ad7 100644 --- a/.riot/requirements/a311bc2.txt +++ b/.riot/requirements/a311bc2.txt @@ -4,103 +4,106 @@ # # pip-compile --allow-unsafe --no-annotate .riot/requirements/a311bc2.in # -ai21==2.14.1 +ai21==3.0.1 ai21-tokenizer==0.12.0 -aiohappyeyeballs==2.4.0 -aiohttp==3.10.5 +aiohttp==3.9.5 aiosignal==1.3.1 annotated-types==0.7.0 -anthropic==0.34.2 -anyio==4.4.0 +anthropic==0.40.0 +anyio==4.7.0 async-timeout==4.0.3 attrs==24.2.0 -boto3==1.34.162 -botocore==1.34.162 +boto3==1.35.76 +botocore==1.35.76 certifi==2024.8.30 -charset-normalizer==3.3.2 -cohere==5.9.1 -coverage[toml]==7.6.1 +charset-normalizer==3.4.0 +cohere==5.13.3 +coverage[toml]==7.6.9 dataclasses-json==0.6.7 defusedxml==0.7.1 distro==1.9.0 exceptiongroup==1.2.2 fastavro==1.9.7 -filelock==3.16.0 -frozenlist==1.4.1 -fsspec==2024.9.0 +filelock==3.16.1 +frozenlist==1.5.0 +fsspec==2024.10.0 greenlet==3.0.3 h11==0.14.0 -httpcore==1.0.5 +httpcore==1.0.7 httpx==0.27.2 httpx-sse==0.4.0 -huggingface-hub==0.24.6 +huggingface-hub==0.26.5 hypothesis==6.45.0 -idna==3.8 -importlib-metadata==8.4.0 +idna==3.10 +importlib-metadata==8.5.0 iniconfig==2.0.0 -jiter==0.5.0 +jiter==0.8.0 jmespath==1.0.1 jsonpatch==1.33 jsonpointer==3.0.0 -langchain==0.2.16 -langchain-anthropic==0.1.23 -langchain-aws==0.1.18 -langchain-cohere==0.2.4 -langchain-community==0.2.16 -langchain-core==0.2.39 -langchain-experimental==0.0.65 -langchain-openai==0.1.23 -langchain-pinecone==0.1.3 -langchain-text-splitters==0.2.4 -langsmith==0.1.117 -marshmallow==3.22.0 +langchain==0.3.10 +langchain-anthropic==0.3.0 +langchain-aws==0.2.9 +langchain-cohere==0.3.3 +langchain-community==0.3.10 +langchain-core==0.3.22 +langchain-experimental==0.3.3 +langchain-openai==0.2.11 +langchain-pinecone==0.2.0 +langchain-text-splitters==0.3.2 +langsmith==0.1.147 +marshmallow==3.23.1 mock==5.1.0 multidict==6.1.0 mypy-extensions==1.0.0 numexpr==2.8.5 numpy==1.26.4 -openai==1.44.1 +openai==1.57.0 opentracing==2.4.0 -orjson==3.10.7 -packaging==24.1 -pandas==2.2.2 +orjson==3.10.12 +packaging==24.2 +pandas==2.2.3 parameterized==0.9.0 pinecone-client==5.0.1 -pinecone-plugin-inference==1.0.3 +pinecone-plugin-inference==1.1.0 pinecone-plugin-interface==0.0.7 pluggy==1.5.0 -psutil==6.0.0 -pydantic==2.9.1 -pydantic-core==2.23.3 -pytest==8.3.3 +propcache==0.2.1 +psutil==6.1.0 +pydantic==2.10.3 +pydantic-core==2.27.1 +pydantic-settings==2.6.1 +pytest==8.3.4 pytest-asyncio==0.23.7 -pytest-cov==5.0.0 +pytest-cov==6.0.0 pytest-mock==3.14.0 pytest-randomly==3.10.1 python-dateutil==2.9.0.post0 -pytz==2024.1 +python-dotenv==1.0.1 +pytz==2024.2 pyyaml==6.0.2 -regex==2024.7.24 +regex==2024.11.6 requests==2.32.3 -s3transfer==0.10.2 +requests-toolbelt==1.0.0 +s3transfer==0.10.4 sentencepiece==0.2.0 -six==1.16.0 +six==1.17.0 sniffio==1.3.1 sortedcontainers==2.4.0 -sqlalchemy==2.0.34 +sqlalchemy==2.0.36 tabulate==0.9.0 tenacity==8.5.0 -tiktoken==0.7.0 -tokenizers==0.20.0 -tomli==2.0.1 -tqdm==4.66.5 +tiktoken==0.8.0 +tokenizers==0.21.0 +tomli==2.2.1 +tqdm==4.67.1 types-requests==2.31.0.6 types-urllib3==1.26.25.14 typing-extensions==4.12.2 typing-inspect==0.9.0 -tzdata==2024.1 +tzdata==2024.2 urllib3==1.26.20 vcrpy==5.1.0 -wrapt==1.16.0 -yarl==1.11.1 -zipp==3.20.1 +wrapt==1.17.0 +yarl==1.18.3 +zipp==3.21.0 diff --git a/.riot/requirements/aa1fe5c.txt b/.riot/requirements/aa1fe5c.txt index 7912c6eec65..bf4c4ba301f 100644 --- a/.riot/requirements/aa1fe5c.txt +++ b/.riot/requirements/aa1fe5c.txt @@ -4,41 +4,41 @@ # # pip-compile --allow-unsafe --no-annotate .riot/requirements/aa1fe5c.in # -ai21==2.14.1 +ai21==3.0.1 ai21-tokenizer==0.12.0 -aiohappyeyeballs==2.4.0 -aiohttp==3.10.5 +aiohappyeyeballs==2.4.4 +aiohttp==3.11.10 aiosignal==1.3.1 annotated-types==0.7.0 anthropic==0.26.0 -anyio==4.4.0 +anyio==4.7.0 async-timeout==4.0.3 attrs==24.2.0 boto3==1.34.51 botocore==1.34.51 certifi==2024.8.30 -charset-normalizer==3.3.2 -cohere==5.9.1 -coverage[toml]==7.6.1 +charset-normalizer==3.4.0 +cohere==5.13.3 +coverage[toml]==7.6.9 dataclasses-json==0.6.7 defusedxml==0.7.1 distro==1.9.0 exceptiongroup==1.2.2 fastavro==1.9.7 -filelock==3.16.0 -frozenlist==1.4.1 -fsspec==2024.9.0 +filelock==3.16.1 +frozenlist==1.5.0 +fsspec==2024.10.0 greenlet==3.0.3 h11==0.14.0 -httpcore==1.0.5 +httpcore==1.0.7 httpx==0.27.2 httpx-sse==0.4.0 -huggingface-hub==0.24.6 +huggingface-hub==0.26.5 hypothesis==6.45.0 -idna==3.8 -importlib-metadata==8.4.0 +idna==3.10 +importlib-metadata==8.5.0 iniconfig==2.0.0 -jiter==0.5.0 +jiter==0.8.0 jmespath==1.0.1 jsonpatch==1.33 jsonpointer==3.0.0 @@ -50,51 +50,53 @@ langchain-core==0.2.0 langchain-openai==0.1.7 langchain-pinecone==0.1.3 langchain-text-splitters==0.2.1 -langsmith==0.1.117 -marshmallow==3.22.0 +langsmith==0.1.147 +marshmallow==3.23.1 mock==5.1.0 multidict==6.1.0 mypy-extensions==1.0.0 numexpr==2.8.5 numpy==1.26.4 -openai==1.44.1 +openai==1.57.0 opentracing==2.4.0 -orjson==3.10.7 +orjson==3.10.12 packaging==23.2 parameterized==0.9.0 pinecone-client==5.0.1 -pinecone-plugin-inference==1.0.3 +pinecone-plugin-inference==1.1.0 pinecone-plugin-interface==0.0.7 pluggy==1.5.0 -psutil==6.0.0 -pydantic==2.9.1 -pydantic-core==2.23.3 -pytest==8.3.3 +propcache==0.2.1 +psutil==6.1.0 +pydantic==2.10.3 +pydantic-core==2.27.1 +pytest==8.3.4 pytest-asyncio==0.23.7 -pytest-cov==5.0.0 +pytest-cov==6.0.0 pytest-mock==3.14.0 pytest-randomly==3.10.1 python-dateutil==2.9.0.post0 pyyaml==6.0.2 -regex==2024.7.24 +regex==2024.11.6 requests==2.32.3 -s3transfer==0.10.2 +requests-toolbelt==1.0.0 +s3transfer==0.10.4 sentencepiece==0.2.0 -six==1.16.0 +six==1.17.0 sniffio==1.3.1 sortedcontainers==2.4.0 -sqlalchemy==2.0.34 +sqlalchemy==2.0.36 tenacity==8.5.0 -tiktoken==0.7.0 -tokenizers==0.20.0 -tomli==2.0.1 -tqdm==4.66.5 +tiktoken==0.8.0 +tokenizers==0.21.0 +tomli==2.2.1 +tqdm==4.67.1 types-requests==2.31.0.6 types-urllib3==1.26.25.14 typing-extensions==4.12.2 typing-inspect==0.9.0 urllib3==1.26.20 vcrpy==5.1.0 -wrapt==1.16.0 -yarl==1.11.1 -zipp==3.20.1 +wrapt==1.17.0 +yarl==1.18.3 +zipp==3.21.0 diff --git a/.riot/requirements/cbbb0eb.txt b/.riot/requirements/cbbb0eb.txt index 6f976af7055..91e45e45546 100644 --- a/.riot/requirements/cbbb0eb.txt +++ b/.riot/requirements/cbbb0eb.txt @@ -4,98 +4,101 @@ # # pip-compile --allow-unsafe --no-annotate .riot/requirements/cbbb0eb.in # -ai21==2.14.1 +ai21==3.0.1 ai21-tokenizer==0.12.0 -aiohappyeyeballs==2.4.0 -aiohttp==3.10.5 +aiohttp==3.9.5 aiosignal==1.3.1 annotated-types==0.7.0 -anthropic==0.34.2 -anyio==4.4.0 +anthropic==0.40.0 +anyio==4.7.0 attrs==24.2.0 -boto3==1.34.162 -botocore==1.34.162 +boto3==1.35.76 +botocore==1.35.76 certifi==2024.8.30 -charset-normalizer==3.3.2 -cohere==5.9.1 -coverage[toml]==7.6.1 +charset-normalizer==3.4.0 +cohere==5.13.3 +coverage[toml]==7.6.9 dataclasses-json==0.6.7 defusedxml==0.7.1 distro==1.9.0 exceptiongroup==1.2.2 fastavro==1.9.7 -filelock==3.16.0 -frozenlist==1.4.1 -fsspec==2024.9.0 +filelock==3.16.1 +frozenlist==1.5.0 +fsspec==2024.10.0 greenlet==3.0.3 h11==0.14.0 -httpcore==1.0.5 +httpcore==1.0.7 httpx==0.27.2 httpx-sse==0.4.0 -huggingface-hub==0.24.6 +huggingface-hub==0.26.5 hypothesis==6.45.0 -idna==3.8 +idna==3.10 iniconfig==2.0.0 -jiter==0.5.0 +jiter==0.8.0 jmespath==1.0.1 jsonpatch==1.33 jsonpointer==3.0.0 -langchain==0.2.16 -langchain-anthropic==0.1.23 -langchain-aws==0.1.18 -langchain-cohere==0.2.4 -langchain-community==0.2.16 -langchain-core==0.2.39 -langchain-experimental==0.0.65 -langchain-openai==0.1.23 -langchain-pinecone==0.1.3 -langchain-text-splitters==0.2.4 -langsmith==0.1.117 -marshmallow==3.22.0 +langchain==0.3.10 +langchain-anthropic==0.3.0 +langchain-aws==0.2.9 +langchain-cohere==0.3.3 +langchain-community==0.3.10 +langchain-core==0.3.22 +langchain-experimental==0.3.3 +langchain-openai==0.2.11 +langchain-pinecone==0.2.0 +langchain-text-splitters==0.3.2 +langsmith==0.1.147 +marshmallow==3.23.1 mock==5.1.0 multidict==6.1.0 mypy-extensions==1.0.0 numexpr==2.8.5 numpy==1.26.4 -openai==1.44.1 +openai==1.57.0 opentracing==2.4.0 -orjson==3.10.7 -packaging==24.1 -pandas==2.2.2 +orjson==3.10.12 +packaging==24.2 +pandas==2.2.3 parameterized==0.9.0 pinecone-client==5.0.1 -pinecone-plugin-inference==1.0.3 +pinecone-plugin-inference==1.1.0 pinecone-plugin-interface==0.0.7 pluggy==1.5.0 -psutil==6.0.0 -pydantic==2.9.1 -pydantic-core==2.23.3 -pytest==8.3.3 +propcache==0.2.1 +psutil==6.1.0 +pydantic==2.10.3 +pydantic-core==2.27.1 +pydantic-settings==2.6.1 +pytest==8.3.4 pytest-asyncio==0.23.7 -pytest-cov==5.0.0 +pytest-cov==6.0.0 pytest-mock==3.14.0 pytest-randomly==3.10.1 python-dateutil==2.9.0.post0 -pytz==2024.1 +python-dotenv==1.0.1 +pytz==2024.2 pyyaml==6.0.2 -regex==2024.7.24 +regex==2024.11.6 requests==2.32.3 -s3transfer==0.10.2 +requests-toolbelt==1.0.0 +s3transfer==0.10.4 sentencepiece==0.2.0 -six==1.16.0 +six==1.17.0 sniffio==1.3.1 sortedcontainers==2.4.0 -sqlalchemy==2.0.34 +sqlalchemy==2.0.36 tabulate==0.9.0 tenacity==8.5.0 -tiktoken==0.7.0 -tokenizers==0.20.0 -tqdm==4.66.5 -types-requests==2.32.0.20240907 +tiktoken==0.8.0 +tokenizers==0.21.0 +tqdm==4.67.1 +types-requests==2.32.0.20241016 typing-extensions==4.12.2 typing-inspect==0.9.0 -tzdata==2024.1 -urllib3==2.2.2 +tzdata==2024.2 +urllib3==2.2.3 vcrpy==5.1.0 -wrapt==1.16.0 -yarl==1.11.1 +wrapt==1.17.0 +yarl==1.18.3 diff --git a/.riot/requirements/cf9bdda.txt b/.riot/requirements/cf9bdda.txt index 943438066ca..d08448d036c 100644 --- a/.riot/requirements/cf9bdda.txt +++ b/.riot/requirements/cf9bdda.txt @@ -4,40 +4,40 @@ # # pip-compile --allow-unsafe --no-annotate .riot/requirements/cf9bdda.in # -ai21==2.14.1 +ai21==3.0.1 ai21-tokenizer==0.12.0 -aiohappyeyeballs==2.4.0 -aiohttp==3.10.5 +aiohappyeyeballs==2.4.4 +aiohttp==3.11.10 aiosignal==1.3.1 annotated-types==0.7.0 anthropic==0.26.0 -anyio==4.4.0 +anyio==4.7.0 async-timeout==4.0.3 attrs==24.2.0 boto3==1.34.51 botocore==1.34.51 certifi==2024.8.30 -charset-normalizer==3.3.2 +charset-normalizer==3.4.0 cohere==5.4.0 -coverage[toml]==7.6.1 +coverage[toml]==7.6.9 dataclasses-json==0.6.7 defusedxml==0.7.1 distro==1.9.0 exceptiongroup==1.2.2 faiss-cpu==1.8.0 fastavro==1.9.7 -filelock==3.16.0 -frozenlist==1.4.1 -fsspec==2024.9.0 +filelock==3.16.1 +frozenlist==1.5.0 +fsspec==2024.10.0 greenlet==3.0.3 h11==0.14.0 -httpcore==1.0.5 +httpcore==1.0.7 httpx==0.27.2 httpx-sse==0.4.0 -huggingface-hub==0.24.6 +huggingface-hub==0.26.5 hypothesis==6.45.0 -idna==3.8 -importlib-metadata==8.4.0 +idna==3.10 +importlib-metadata==8.5.0 iniconfig==2.0.0 jmespath==1.0.1 jsonpatch==1.33 @@ -51,8 +51,8 @@ langchain-core==0.1.52 langchain-openai==0.1.6 langchain-pinecone==0.1.0 langchain-text-splitters==0.0.2 -langsmith==0.1.117 -marshmallow==3.22.0 +langsmith==0.1.147 +marshmallow==3.23.1 mock==5.1.0 multidict==6.1.0 mypy-extensions==1.0.0 @@ -60,39 +60,41 @@ numexpr==2.8.5 numpy==1.26.4 openai==1.30.3 opentracing==2.4.0 -orjson==3.10.7 +orjson==3.10.12 packaging==23.2 pinecone-client==3.2.2 pluggy==1.5.0 -psutil==6.0.0 -pydantic==2.9.1 -pydantic-core==2.23.3 -pytest==8.3.3 +propcache==0.2.1 +psutil==6.1.0 +pydantic==2.10.3 +pydantic-core==2.27.1 +pytest==8.3.4 pytest-asyncio==0.23.7 -pytest-cov==5.0.0 +pytest-cov==6.0.0 pytest-mock==3.14.0 pytest-randomly==3.10.1 python-dateutil==2.9.0.post0 pyyaml==6.0.2 -regex==2024.7.24 +regex==2024.11.6 requests==2.32.3 -s3transfer==0.10.2 +requests-toolbelt==1.0.0 +s3transfer==0.10.4 sentencepiece==0.2.0 -six==1.16.0 +six==1.17.0 sniffio==1.3.1 sortedcontainers==2.4.0 -sqlalchemy==2.0.34 +sqlalchemy==2.0.36 tenacity==8.5.0 -tiktoken==0.7.0 +tiktoken==0.8.0 tokenizers==0.19.1 -tomli==2.0.1 -tqdm==4.66.5 +tomli==2.2.1 +tqdm==4.67.1 types-requests==2.31.0.6 types-urllib3==1.26.25.14 typing-extensions==4.12.2 typing-inspect==0.9.0 urllib3==1.26.20 vcrpy==5.1.0 -wrapt==1.16.0 -yarl==1.11.1 -zipp==3.20.1 +wrapt==1.17.0 +yarl==1.18.3 +zipp==3.21.0 diff --git a/.riot/requirements/d39d3de.txt b/.riot/requirements/d39d3de.txt index a1e88b6d11b..53ccfd3b257 100644 --- a/.riot/requirements/d39d3de.txt +++ b/.riot/requirements/d39d3de.txt @@ -4,33 +4,33 @@ # # pip-compile --allow-unsafe --no-annotate .riot/requirements/d39d3de.in # -ai21==2.14.1 +ai21==3.0.1 ai21-tokenizer==0.12.0 -aiohappyeyeballs==2.4.0 -aiohttp==3.10.5 +aiohappyeyeballs==2.4.4 +aiohttp==3.11.10 aiosignal==1.3.1 -anyio==4.4.0 +anyio==4.7.0 async-timeout==4.0.3 attrs==24.2.0 backoff==2.2.1 certifi==2024.8.30 -charset-normalizer==3.3.2 +charset-normalizer==3.4.0 cohere==4.57 -coverage[toml]==7.6.1 +coverage[toml]==7.6.9 dataclasses-json==0.5.14 -dnspython==2.6.1 +dnspython==2.7.0 exceptiongroup==1.2.2 fastavro==1.9.7 -filelock==3.16.0 -frozenlist==1.4.1 -fsspec==2024.9.0 +filelock==3.16.1 +frozenlist==1.5.0 +fsspec==2024.10.0 greenlet==3.0.3 h11==0.14.0 -httpcore==1.0.5 +httpcore==1.0.7 httpx==0.27.2 -huggingface-hub==0.24.6 +huggingface-hub==0.26.5 hypothesis==6.45.0 -idna==3.8 +idna==3.10 importlib-metadata==6.11.0 iniconfig==2.0.0 jsonpatch==1.33 @@ -40,8 +40,8 @@ langchain-community==0.0.14 langchain-core==0.1.23 langchainplus-sdk==0.0.4 langsmith==0.0.87 -loguru==0.7.2 -marshmallow==3.22.0 +loguru==0.7.3 +marshmallow==3.23.1 mock==5.1.0 multidict==6.1.0 mypy-extensions==1.0.0 @@ -53,31 +53,32 @@ opentracing==2.4.0 packaging==23.2 pinecone-client==2.2.4 pluggy==1.5.0 -psutil==6.0.0 -pydantic==1.10.18 -pytest==8.3.3 +propcache==0.2.1 +psutil==6.1.0 +pydantic==1.10.19 +pytest==8.3.4 pytest-asyncio==0.23.7 -pytest-cov==5.0.0 +pytest-cov==6.0.0 pytest-mock==3.14.0 pytest-randomly==3.10.1 python-dateutil==2.9.0.post0 pyyaml==6.0.2 -regex==2024.7.24 +regex==2024.11.6 requests==2.32.3 sentencepiece==0.2.0 -six==1.16.0 +six==1.17.0 sniffio==1.3.1 sortedcontainers==2.4.0 -sqlalchemy==2.0.34 +sqlalchemy==2.0.36 tenacity==8.5.0 -tiktoken==0.7.0 -tokenizers==0.20.0 -tomli==2.0.1 -tqdm==4.66.5 +tiktoken==0.8.0 +tokenizers==0.21.0 +tomli==2.2.1 +tqdm==4.67.1 typing-extensions==4.12.2 typing-inspect==0.9.0 urllib3==1.26.20 vcrpy==6.0.1 -wrapt==1.16.0 -yarl==1.11.1 -zipp==3.20.1 +wrapt==1.17.0 +yarl==1.18.3 +zipp==3.21.0 From fd37c0bf53b96c5e77c7c70e597f0846517c5a36 Mon Sep 17 00:00:00 2001 From: "Gabriele N. Tornetta" Date: Wed, 11 Dec 2024 14:47:14 +0000 Subject: [PATCH 10/78] refactor: encapsulate container info (#11649) We encapsulate the logic required to add container information headers to HTTP requests in the connection constructor. This elimiates the need to add the same logic across all products/services that make requests. ## Checklist - [x] PR author has checked that all the criteria below are met - The PR description includes an overview of the change - The PR description articulates the motivation for the change - The change includes tests OR the PR description describes a testing strategy - The PR description notes risks associated with the change, if any - Newly-added code is easy to change - The change follows the [library release note guidelines](https://ddtrace.readthedocs.io/en/stable/releasenotes.html) - The change includes or references documentation updates if necessary - Backport labels are set (if [applicable](https://ddtrace.readthedocs.io/en/latest/contributing.html#backporting)) ## Reviewer Checklist - [x] Reviewer has checked that all the criteria below are met - Title is accurate - All changes are related to the pull request's stated goal - Avoids breaking [API](https://ddtrace.readthedocs.io/en/stable/versioning.html#interfaces) changes - Testing strategy adequately addresses listed risks - Newly-added code is easy to change - Release note makes sense to a user of the library - If necessary, author has acknowledged and discussed the performance implications of this PR as reported in the benchmarks PR comment - Backport labels are set in a manner that is consistent with the [release branch maintenance policy](https://ddtrace.readthedocs.io/en/latest/contributing.html#backporting) --- ddtrace/debugging/_uploader.py | 4 -- ddtrace/internal/http.py | 22 +++++++-- ddtrace/internal/processor/stats.py | 3 -- ddtrace/internal/remoteconfig/client.py | 4 -- ddtrace/internal/runtime/container.py | 66 ++++++++++++------------- ddtrace/internal/telemetry/writer.py | 3 -- ddtrace/internal/uds.py | 4 +- ddtrace/internal/utils/__init__.py | 3 +- ddtrace/internal/writer/writer.py | 4 -- ddtrace/profiling/exporter/http.py | 5 -- tests/tracer/runtime/test_container.py | 2 + 11 files changed, 56 insertions(+), 64 deletions(-) diff --git a/ddtrace/debugging/_uploader.py b/ddtrace/debugging/_uploader.py index c6ff84fc190..f8f1a22a9d2 100644 --- a/ddtrace/debugging/_uploader.py +++ b/ddtrace/debugging/_uploader.py @@ -12,7 +12,6 @@ from ddtrace.internal import compat from ddtrace.internal.logger import get_logger from ddtrace.internal.periodic import ForksafeAwakeablePeriodicService -from ddtrace.internal.runtime import container from ddtrace.internal.utils.http import connector from ddtrace.internal.utils.retry import fibonacci_backoff_with_jitter @@ -55,9 +54,6 @@ def __init__(self, interval: Optional[float] = None) -> None: "Accept": "text/plain", } - container.update_headers_with_container_info(self._headers, container.get_container_info()) - container.update_header_with_external_info(self._headers) - if di_config._tags_in_qs and di_config.tags: self.ENDPOINT += f"?ddtags={quote(di_config.tags)}" self._connect = connector(di_config._intake_url, timeout=di_config.upload_timeout) diff --git a/ddtrace/internal/http.py b/ddtrace/internal/http.py index c8e6c772d6b..34cee7e0793 100644 --- a/ddtrace/internal/http.py +++ b/ddtrace/internal/http.py @@ -1,10 +1,15 @@ from ddtrace.internal.compat import httplib from ddtrace.internal.compat import parse +from ddtrace.internal.runtime import container -class BasePathMixin(httplib.HTTPConnection, object): +class HTTPConnectionMixin: """ - Mixin for HTTPConnection to insert a base path to requested URLs + Mixin for HTTP(S) connections for performing internal adjustments. + + Currently this mixin performs the following adjustments: + - insert a base path to requested URLs + - update headers with container info """ _base_path = "/" # type: str @@ -12,7 +17,7 @@ class BasePathMixin(httplib.HTTPConnection, object): def putrequest(self, method, url, skip_host=False, skip_accept_encoding=False): # type: (str, str, bool, bool) -> None url = parse.urljoin(self._base_path, url) - return super(BasePathMixin, self).putrequest( + return super().putrequest( # type: ignore[misc] method, url, skip_host=skip_host, skip_accept_encoding=skip_accept_encoding ) @@ -23,14 +28,21 @@ def with_base_path(cls, *args, **kwargs): obj._base_path = base_path return obj + def request(self, method, url, body=None, headers={}, *, encode_chunked=False): + _headers = headers.copy() + + container.update_headers(_headers) + + return super().request(method, url, body=body, headers=_headers, encode_chunked=encode_chunked) + -class HTTPConnection(BasePathMixin, httplib.HTTPConnection): +class HTTPConnection(HTTPConnectionMixin, httplib.HTTPConnection): """ httplib.HTTPConnection wrapper to add a base path to requested URLs """ -class HTTPSConnection(BasePathMixin, httplib.HTTPSConnection): +class HTTPSConnection(HTTPConnectionMixin, httplib.HTTPSConnection): """ httplib.HTTPSConnection wrapper to add a base path to requested URLs """ diff --git a/ddtrace/internal/processor/stats.py b/ddtrace/internal/processor/stats.py index 1ba8e008105..f79f460582e 100644 --- a/ddtrace/internal/processor/stats.py +++ b/ddtrace/internal/processor/stats.py @@ -19,7 +19,6 @@ from ..hostname import get_hostname from ..logger import get_logger from ..periodic import PeriodicService -from ..runtime import container from ..writer import _human_size @@ -108,8 +107,6 @@ def __init__(self, agent_url, interval=None, timeout=1.0, retry_attempts=3): "Datadog-Meta-Tracer-Version": ddtrace.__version__, "Content-Type": "application/msgpack", } # type: Dict[str, str] - container.update_headers_with_container_info(self._headers, container.get_container_info()) - container.update_header_with_external_info(self._headers) self._hostname = "" if config._report_hostname: self._hostname = get_hostname() diff --git a/ddtrace/internal/remoteconfig/client.py b/ddtrace/internal/remoteconfig/client.py index 3f9315d0be0..6d77f220d81 100644 --- a/ddtrace/internal/remoteconfig/client.py +++ b/ddtrace/internal/remoteconfig/client.py @@ -29,7 +29,6 @@ from ddtrace.internal.logger import get_logger from ddtrace.internal.packages import is_distribution_available from ddtrace.internal.remoteconfig.constants import REMOTE_CONFIG_AGENT_ENDPOINT -from ddtrace.internal.runtime import container from ddtrace.internal.service import ServiceStatus from ddtrace.internal.utils.time import parse_isoformat @@ -240,9 +239,6 @@ def __init__(self) -> None: if additional_header_str is not None: self._headers.update(parse_tags_str(additional_header_str)) - container.update_headers_with_container_info(self._headers, container.get_container_info()) - container.update_header_with_external_info(self._headers) - tags = ddtrace.config.tags.copy() # Add git metadata tags, if available diff --git a/ddtrace/internal/runtime/container.py b/ddtrace/internal/runtime/container.py index c12a091fe21..6025981a726 100644 --- a/ddtrace/internal/runtime/container.py +++ b/ddtrace/internal/runtime/container.py @@ -1,9 +1,18 @@ import errno +from functools import lru_cache import os import re +import sys from typing import Any from typing import Dict from typing import Optional +from typing import Union + + +if sys.version_info >= (3, 8): + from typing import Literal # noqa:F401 +else: + from typing_extensions import Literal from ..constants import CONTAINER_ID_HEADER_NAME from ..constants import ENTITY_ID_HEADER_NAME @@ -130,25 +139,17 @@ def from_line(cls, line): ) -def get_container_info(pid="self"): - # type: (str) -> Optional[CGroupInfo] +@lru_cache(64) +def get_container_info(pid: Union[Literal["self"], int] = "self") -> Optional[CGroupInfo]: """ - Helper to fetch the current container id, if we are running in a container + Helper to fetch the current container id, if we are running in a container. We will parse `/proc/{pid}/cgroup` to determine our container id. - The results of calling this function are cached - - :param pid: The pid of the cgroup file to parse (default: 'self') - :type pid: str | int - :returns: The cgroup file info if found, or else None - :rtype: :class:`CGroupInfo` | None + The results of calling this function are cached. """ - - cgroup_file = "/proc/{0}/cgroup".format(pid) - try: - with open(cgroup_file, mode="r") as fp: + with open(f"/proc/{pid}/cgroup", mode="r") as fp: for line in fp: info = CGroupInfo.from_line(line) if info and (info.container_id or info.node_inode): @@ -161,27 +162,26 @@ def get_container_info(pid="self"): return None -def update_headers_with_container_info(headers: Dict, container_info: Optional[CGroupInfo]) -> None: +def update_headers(headers: Dict) -> None: """Get the container info (either the container ID or the cgroup inode) and add it to the headers.""" - if container_info is None: - return - if container_info.container_id: - headers.update( - { - CONTAINER_ID_HEADER_NAME: container_info.container_id, - ENTITY_ID_HEADER_NAME: f"ci-{container_info.container_id}", - } - ) - elif container_info.node_inode: - headers.update( - { - ENTITY_ID_HEADER_NAME: f"in-{container_info.node_inode}", - } - ) - - -def update_header_with_external_info(headers: Dict) -> None: - """Get the external environment info from the environment variable and add it to the headers.""" + container_info = get_container_info() + if container_info is not None: + if container_info.container_id: + headers.update( + { + CONTAINER_ID_HEADER_NAME: container_info.container_id, + ENTITY_ID_HEADER_NAME: f"ci-{container_info.container_id}", + } + ) + elif container_info.node_inode: + headers.update( + { + ENTITY_ID_HEADER_NAME: f"in-{container_info.node_inode}", + } + ) + + # Get the external environment info from the environment variable and add it + # to the headers external_info = os.environ.get(EXTERNAL_ENV_ENVIRONMENT_VARIABLE) if external_info: headers.update( diff --git a/ddtrace/internal/telemetry/writer.py b/ddtrace/internal/telemetry/writer.py index d10d0aac7f4..63e672e24dd 100644 --- a/ddtrace/internal/telemetry/writer.py +++ b/ddtrace/internal/telemetry/writer.py @@ -23,7 +23,6 @@ from ..compat import get_connection_response from ..encoding import JSONEncoderV2 from ..periodic import PeriodicService -from ..runtime import container from ..runtime import get_runtime_id from ..service import ServiceStatus from ..utils.formats import asbool @@ -136,8 +135,6 @@ def get_headers(self, request): headers["DD-Telemetry-Debug-Enabled"] = request["debug"] headers["DD-Telemetry-Request-Type"] = request["request_type"] headers["DD-Telemetry-API-Version"] = request["api_version"] - container.update_headers_with_container_info(headers, container.get_container_info()) - container.update_header_with_external_info(headers) return headers def get_endpoint(self, agentless: bool) -> str: diff --git a/ddtrace/internal/uds.py b/ddtrace/internal/uds.py index 5c8dbf02882..fcf4e52e916 100644 --- a/ddtrace/internal/uds.py +++ b/ddtrace/internal/uds.py @@ -2,10 +2,10 @@ from typing import Any # noqa:F401 from .compat import httplib -from .http import BasePathMixin +from .http import HTTPConnectionMixin -class UDSHTTPConnection(BasePathMixin, httplib.HTTPConnection): +class UDSHTTPConnection(HTTPConnectionMixin, httplib.HTTPConnection): """An HTTP connection established over a Unix Domain Socket.""" # It's "important" to keep the hostname and port arguments here; while there are not used by the connection diff --git a/ddtrace/internal/utils/__init__.py b/ddtrace/internal/utils/__init__.py index 294e99d1263..1d7ee493953 100644 --- a/ddtrace/internal/utils/__init__.py +++ b/ddtrace/internal/utils/__init__.py @@ -3,6 +3,7 @@ from typing import List # noqa:F401 from typing import Optional # noqa:F401 from typing import Tuple # noqa:F401 +from typing import Union # noqa:F401 class ArgumentError(Exception): @@ -13,7 +14,7 @@ class ArgumentError(Exception): def get_argument_value( - args: List[Any], + args: Union[Tuple[Any], List[Any]], kwargs: Dict[str, Any], pos: int, kw: str, diff --git a/ddtrace/internal/writer/writer.py b/ddtrace/internal/writer/writer.py index 59eb56bd821..01b05515984 100644 --- a/ddtrace/internal/writer/writer.py +++ b/ddtrace/internal/writer/writer.py @@ -31,7 +31,6 @@ from ..constants import _HTTPLIB_NO_TRACE_REQUEST from ..encoding import JSONEncoderV2 from ..logger import get_logger -from ..runtime import container from ..serverless import in_azure_function from ..serverless import in_gcp_function from ..sma import SimpleMovingAverage @@ -493,9 +492,6 @@ def __init__( } if headers: _headers.update(headers) - self._container_info = container.get_container_info() - container.update_headers_with_container_info(_headers, self._container_info) - container.update_header_with_external_info(_headers) _headers.update({"Content-Type": client.encoder.content_type}) # type: ignore[attr-defined] additional_header_str = os.environ.get("_DD_TRACE_WRITER_ADDITIONAL_HEADERS") diff --git a/ddtrace/profiling/exporter/http.py b/ddtrace/profiling/exporter/http.py index f23e12acb5a..6700e584ade 100644 --- a/ddtrace/profiling/exporter/http.py +++ b/ddtrace/profiling/exporter/http.py @@ -15,7 +15,6 @@ from ddtrace.internal import agent from ddtrace.internal import runtime from ddtrace.internal.processor.endpoint_call_counter import EndpointCallCounterProcessor -from ddtrace.internal.runtime import container from ddtrace.internal.utils.retry import fibonacci_backoff_with_jitter from ddtrace.profiling import exporter from ddtrace.profiling import recorder # noqa:F401 @@ -67,7 +66,6 @@ def __init__( self.version: typing.Optional[str] = version self.tags: typing.Dict[str, str] = tags if tags is not None else {} self.max_retry_delay: typing.Optional[float] = max_retry_delay - self._container_info: typing.Optional[container.CGroupInfo] = container.get_container_info() self.endpoint_call_counter_span_processor: typing.Optional[ EndpointCallCounterProcessor ] = endpoint_call_counter_span_processor @@ -183,9 +181,6 @@ def export( else: headers = {} - container.update_headers_with_container_info(headers, self._container_info) - container.update_header_with_external_info(headers) - profile, libs = super(PprofHTTPExporter, self).export(events, start_time_ns, end_time_ns) pprof = io.BytesIO() with gzip.GzipFile(fileobj=pprof, mode="wb") as gz: diff --git a/tests/tracer/runtime/test_container.py b/tests/tracer/runtime/test_container.py index f9becf93a41..f3c28dc90e6 100644 --- a/tests/tracer/runtime/test_container.py +++ b/tests/tracer/runtime/test_container.py @@ -307,6 +307,7 @@ def test_get_container_info(file_contents, container_id, node_inode): if file_contents is None: mock_open.side_effect = FileNotFoundError + get_container_info.cache_clear() info = get_container_info() if info is not None: @@ -344,6 +345,7 @@ def test_get_container_info_exception(mock_log, mock_from_line): # DEV: We need at least 1 line for the loop to call `CGroupInfo.from_line` with get_mock_open(read_data="\r\n") as mock_open: # Assert calling `get_container_info()` does not bubble up the exception + get_container_info.cache_clear() assert get_container_info() is None # Assert we called everything we expected From 45941708b5214e1b018fdeee7b6443257d79e2c9 Mon Sep 17 00:00:00 2001 From: David Sanchez <838104+sanchda@users.noreply.github.com> Date: Wed, 11 Dec 2024 08:56:01 -0600 Subject: [PATCH 11/78] chore: update formatters for native code (#11643) It's slightly annoying to make local changes to native code--I tried to consolidate these a bit and provide common overrides and logic. ## Checklist - [X] PR author has checked that all the criteria below are met - The PR description includes an overview of the change - The PR description articulates the motivation for the change - The change includes tests OR the PR description describes a testing strategy - The PR description notes risks associated with the change, if any - Newly-added code is easy to change - The change follows the [library release note guidelines](https://ddtrace.readthedocs.io/en/stable/releasenotes.html) - The change includes or references documentation updates if necessary - Backport labels are set (if [applicable](https://ddtrace.readthedocs.io/en/latest/contributing.html#backporting)) ## Reviewer Checklist - [x] Reviewer has checked that all the criteria below are met - Title is accurate - All changes are related to the pull request's stated goal - Avoids breaking [API](https://ddtrace.readthedocs.io/en/stable/versioning.html#interfaces) changes - Testing strategy adequately addresses listed risks - Newly-added code is easy to change - Release note makes sense to a user of the library - If necessary, author has acknowledged and discussed the performance implications of this PR as reported in the benchmarks PR comment - Backport labels are set in a manner that is consistent with the [release branch maintenance policy](https://ddtrace.readthedocs.io/en/latest/contributing.html#backporting) --- .../profiling/cmake/AnalysisFunc.cmake | 67 +++++----- .../profiling/crashtracker/CMakeLists.txt | 12 +- .../profiling/dd_wrapper/test/CMakeLists.txt | 3 +- .../datadog/profiling/ddup/CMakeLists.txt | 10 +- .../datadog/profiling/stack_v2/CMakeLists.txt | 5 +- .../profiling/stack_v2/test/CMakeLists.txt | 3 +- scripts/cformat.sh | 118 +++++++++++++----- scripts/cmake-format.sh | 64 +++++++--- 8 files changed, 169 insertions(+), 113 deletions(-) diff --git a/ddtrace/internal/datadog/profiling/cmake/AnalysisFunc.cmake b/ddtrace/internal/datadog/profiling/cmake/AnalysisFunc.cmake index b95a1205a62..2495a84ed29 100644 --- a/ddtrace/internal/datadog/profiling/cmake/AnalysisFunc.cmake +++ b/ddtrace/internal/datadog/profiling/cmake/AnalysisFunc.cmake @@ -1,36 +1,29 @@ include(CheckIPOSupported) function(add_ddup_config target) - # Profiling native extensions are built with C++17, even though underlying - # repo adheres to the manylinux 2014 standard. This isn't currently a - # problem, but if it becomes one, we may have to structure the library - # differently. + # Profiling native extensions are built with C++17, even though underlying repo adheres to the manylinux 2014 + # standard. This isn't currently a problem, but if it becomes one, we may have to structure the library differently. target_compile_features(${target} PUBLIC cxx_std_17) # Common compile options - target_compile_options(${target} PRIVATE "$<$:-Os>" -ffunction-sections - -Wall - -Werror - -Wextra - -Wshadow - -Wnon-virtual-dtor - -Wold-style-cast) + target_compile_options( + ${target} + PRIVATE "$<$:-Os>" + -ffunction-sections + -Wall + -Werror + -Wextra + -Wshadow + -Wnon-virtual-dtor + -Wold-style-cast) if(CMAKE_SYSTEM_NAME STREQUAL "Darwin") # macOS-specific options - target_compile_options( - ${target} - PRIVATE "$<$:-Og;-g>" - "$<$:-Os;-g>" - ) + target_compile_options(${target} PRIVATE "$<$:-Og;-g>" "$<$:-Os;-g>") else() # Non-macOS (e.g., Linux) options - target_compile_options( - ${target} - PRIVATE "$<$:-Og;-ggdb3>" - "$<$:-Os;-ggdb3>" - -fno-semantic-interposition - ) + target_compile_options(${target} PRIVATE "$<$:-Og;-ggdb3>" + "$<$:-Os;-ggdb3>" -fno-semantic-interposition) endif() # Common link options @@ -66,22 +59,21 @@ function(add_ddup_config target) target_compile_options(${target} PRIVATE -fsanitize=${SANITIZE_OPTIONS} -fno-omit-frame-pointer) target_link_options(${target} PRIVATE -fsanitize=${SANITIZE_OPTIONS} -shared-libsan) - # Locate all directories containing relevant `.so` files - execute_process( - COMMAND bash -c "find $(${CMAKE_CXX_COMPILER} -print-file-name=) -name '*.so' -exec dirname {} \; | uniq" - OUTPUT_VARIABLE LIBSAN_LIB_PATHS - OUTPUT_STRIP_TRAILING_WHITESPACE COMMAND_ERROR_IS_FATAL ANY) + # Locate all directories containing relevant `.so` files + execute_process( + COMMAND bash -c "find $(${CMAKE_CXX_COMPILER} -print-file-name=) -name '*.so' -exec dirname {} \; | uniq" + OUTPUT_VARIABLE LIBSAN_LIB_PATHS + OUTPUT_STRIP_TRAILING_WHITESPACE COMMAND_ERROR_IS_FATAL ANY) - # Print for debugging - message(STATUS "LIBSAN_LIB_PATHS: ${LIBSAN_LIB_PATHS}") + # Print for debugging + message(STATUS "LIBSAN_LIB_PATHS: ${LIBSAN_LIB_PATHS}") - # Split the paths into a semicolon-separated list for CMake - string(REPLACE "\n" ";" LIBSAN_LIB_PATHS_LIST "${LIBSAN_LIB_PATHS}") + # Split the paths into a semicolon-separated list for CMake + string(REPLACE "\n" ";" LIBSAN_LIB_PATHS_LIST "${LIBSAN_LIB_PATHS}") - # Set RPATH to include all identified paths - set_target_properties(${target} PROPERTIES - BUILD_RPATH "${LIBSAN_LIB_PATHS_LIST}" - INSTALL_RPATH "${LIBSAN_LIB_PATHS_LIST}") + # Set RPATH to include all identified paths + set_target_properties(${target} PROPERTIES BUILD_RPATH "${LIBSAN_LIB_PATHS_LIST}" INSTALL_RPATH + "${LIBSAN_LIB_PATHS_LIST}") endif() # If DO_FANALYZER is specified and we're using gcc, then we can use -fanalyzer @@ -89,8 +81,7 @@ function(add_ddup_config target) target_compile_options(${target} PRIVATE -fanalyzer) endif() - # The main targets, ddup, crashtracker, stack_v2, and dd_wrapper are built - # as dynamic libraries, so PIC is required. And setting this is also fine - # for tests as they're loading those dynamic libraries. + # The main targets, ddup, crashtracker, stack_v2, and dd_wrapper are built as dynamic libraries, so PIC is required. + # And setting this is also fine for tests as they're loading those dynamic libraries. set_target_properties(${target} PROPERTIES POSITION_INDEPENDENT_CODE ON) endfunction() diff --git a/ddtrace/internal/datadog/profiling/crashtracker/CMakeLists.txt b/ddtrace/internal/datadog/profiling/crashtracker/CMakeLists.txt index a38b70fc224..2ae02df66f2 100644 --- a/ddtrace/internal/datadog/profiling/crashtracker/CMakeLists.txt +++ b/ddtrace/internal/datadog/profiling/crashtracker/CMakeLists.txt @@ -45,12 +45,8 @@ add_custom_command( add_library(${EXTENSION_NAME} SHARED ${CRASHTRACKER_CPP_SRC}) add_ddup_config(${EXTENSION_NAME}) -# Cython generates code that produces errors for the following, so relax compile -# options -target_compile_options( - ${EXTENSION_NAME} - PRIVATE -Wno-old-style-cast -Wno-shadow -Wno-address -) +# Cython generates code that produces errors for the following, so relax compile options +target_compile_options(${EXTENSION_NAME} PRIVATE -Wno-old-style-cast -Wno-shadow -Wno-address) # cmake may mutate the name of the library (e.g., lib- and -.so for dynamic libraries). This suppresses that behavior, # which is required to ensure all paths can be inferred correctly by setup.py. @@ -61,7 +57,7 @@ set_target_properties(${EXTENSION_NAME} PROPERTIES SUFFIX "") # typical. set_target_properties(${EXTENSION_NAME} PROPERTIES INSTALL_RPATH "$ORIGIN/..") target_include_directories(${EXTENSION_NAME} PRIVATE ../dd_wrapper/include ${Datadog_INCLUDE_DIRS} - ${Python3_INCLUDE_DIRS}) + ${Python3_INCLUDE_DIRS}) if(Python3_LIBRARIES) target_link_libraries(${EXTENSION_NAME} PRIVATE dd_wrapper ${Python3_LIBRARIES}) @@ -89,7 +85,7 @@ if(NOT CRASHTRACKER_EXE_TARGET_NAME) endif() set_target_properties(crashtracker_exe PROPERTIES INSTALL_RPATH "$ORIGIN/.." OUTPUT_NAME - ${CRASHTRACKER_EXE_TARGET_NAME}) + ${CRASHTRACKER_EXE_TARGET_NAME}) # To let crashtracker find Python library at runtime set_target_properties(crashtracker_exe PROPERTIES INSTALL_RPATH_USE_LINK_PATH TRUE) diff --git a/ddtrace/internal/datadog/profiling/dd_wrapper/test/CMakeLists.txt b/ddtrace/internal/datadog/profiling/dd_wrapper/test/CMakeLists.txt index 0be6098cd2a..e9c8f57dd60 100644 --- a/ddtrace/internal/datadog/profiling/dd_wrapper/test/CMakeLists.txt +++ b/ddtrace/internal/datadog/profiling/dd_wrapper/test/CMakeLists.txt @@ -31,8 +31,7 @@ function(dd_wrapper_add_test name) set_target_properties(${name} PROPERTIES INSTALL_RPATH "$ORIGIN/..") if(LIB_INSTALL_DIR) - install(TARGETS ${name} - RUNTIME DESTINATION ${LIB_INSTALL_DIR}/../test) + install(TARGETS ${name} RUNTIME DESTINATION ${LIB_INSTALL_DIR}/../test) endif() endfunction() diff --git a/ddtrace/internal/datadog/profiling/ddup/CMakeLists.txt b/ddtrace/internal/datadog/profiling/ddup/CMakeLists.txt index 8aeb0f1c23a..fe92fac3952 100644 --- a/ddtrace/internal/datadog/profiling/ddup/CMakeLists.txt +++ b/ddtrace/internal/datadog/profiling/ddup/CMakeLists.txt @@ -50,12 +50,8 @@ add_custom_command( add_library(${EXTENSION_NAME} SHARED ${DDUP_CPP_SRC}) add_ddup_config(${EXTENSION_NAME}) -# Cython generates code that produces errors for the following, so relax compile -# options -target_compile_options( - ${EXTENSION_NAME} - PRIVATE -Wno-old-style-cast -Wno-shadow -Wno-address -) +# Cython generates code that produces errors for the following, so relax compile options +target_compile_options(${EXTENSION_NAME} PRIVATE -Wno-old-style-cast -Wno-shadow -Wno-address) # cmake may mutate the name of the library (e.g., lib- and -.so for dynamic libraries). This suppresses that behavior, # which is required to ensure all paths can be inferred correctly by setup.py. @@ -66,7 +62,7 @@ set_target_properties(${EXTENSION_NAME} PROPERTIES SUFFIX "") # typical. set_target_properties(${EXTENSION_NAME} PROPERTIES INSTALL_RPATH "$ORIGIN/..") target_include_directories(${EXTENSION_NAME} PRIVATE ../dd_wrapper/include ${Datadog_INCLUDE_DIRS} - ${Python3_INCLUDE_DIRS}) + ${Python3_INCLUDE_DIRS}) if(Python3_LIBRARIES) target_link_libraries(${EXTENSION_NAME} PRIVATE dd_wrapper ${Python3_LIBRARIES}) diff --git a/ddtrace/internal/datadog/profiling/stack_v2/CMakeLists.txt b/ddtrace/internal/datadog/profiling/stack_v2/CMakeLists.txt index 50c8d056c79..69788494920 100644 --- a/ddtrace/internal/datadog/profiling/stack_v2/CMakeLists.txt +++ b/ddtrace/internal/datadog/profiling/stack_v2/CMakeLists.txt @@ -74,9 +74,9 @@ target_compile_definitions(${EXTENSION_NAME} PRIVATE UNWIND_NATIVE_DISABLE) # warning(push, 0 then pop for the same effect. target_include_directories( ${EXTENSION_NAME} PRIVATE .. # include dd_wrapper from the root in order to make its paths transparent in the code - include) + include) target_include_directories(${EXTENSION_NAME} SYSTEM PRIVATE ${echion_SOURCE_DIR} ${Python3_INCLUDE_DIRS} - include/vendored include/util) + include/vendored include/util) # Echion sources need to be given the current platform if(WIN32) @@ -115,4 +115,3 @@ if(BUILD_TESTING) enable_testing() add_subdirectory(test) endif() - diff --git a/ddtrace/internal/datadog/profiling/stack_v2/test/CMakeLists.txt b/ddtrace/internal/datadog/profiling/stack_v2/test/CMakeLists.txt index 926f9b28af7..05176f2c803 100644 --- a/ddtrace/internal/datadog/profiling/stack_v2/test/CMakeLists.txt +++ b/ddtrace/internal/datadog/profiling/stack_v2/test/CMakeLists.txt @@ -29,8 +29,7 @@ function(dd_wrapper_add_test name) endif() if(LIB_INSTALL_DIR) - install(TARGETS ${name} - RUNTIME DESTINATION ${LIB_INSTALL_DIR}/../test) + install(TARGETS ${name} RUNTIME DESTINATION ${LIB_INSTALL_DIR}/../test) endif() endfunction() diff --git a/scripts/cformat.sh b/scripts/cformat.sh index b7d4fe46a2f..b12119e639a 100755 --- a/scripts/cformat.sh +++ b/scripts/cformat.sh @@ -1,43 +1,95 @@ -#!/bin/bash +#!/usr/bin/env bash set -e -# For more modern versions: -# clang-format --dry-run -Werror file.c -# would be enough… - -clean () -{ - rm -f "$CFORMAT_TMP" +clean() { + rm -f "$CFORMAT_TMP" 2>/dev/null || true } trap clean EXIT -if [[ "$1" == "update" ]] -then - THIS_PATH="$(realpath "$0")" - THIS_DIR="$(dirname $(dirname "$THIS_PATH"))" - # Find .c, , .h, .cpp, and .hpp files, excluding specified directories - find "$THIS_DIR" -type f \( -name '*.c' -o -name '*.h' -o -name '*.cpp' -o -name '*.hpp' \) \ - | grep -v '.eggs/' \ - | grep -v 'dd-trace-py/build/' \ - | grep -v '_taint_tracking/CMakeFiles' \ - | grep -v '_taint_tracking/_deps/' \ - | grep -v '.riot/' \ - | grep -v 'ddtrace/vendor/' \ - | grep -v '_taint_tracking/_vendor/' \ - | grep -v 'ddtrace/appsec/_iast/_taint_tracking/cmake-build-debug/' \ - | grep -v '^ddtrace/appsec/_iast/_taint_tracking/_vendor/' \ - | while IFS= read -r file; do - clang-format -i $file - echo "Formatting $file" +# Exclude patterns applied to file list +exclude_patterns() { + local patterns=( + '^ddtrace/vendor/' + '^ddtrace/appsec/_iast/_taint_tracking/_vendor/' + '.eggs/' + 'dd-trace-py/build/' + '_taint_tracking/CMakeFiles' + '_taint_tracking/_deps/' + '.riot/' + '_taint_tracking/_vendor/' + 'ddtrace/appsec/_iast/_taint_tracking/cmake-build-debug/' + ) + + # Join all patterns with '|' + local joined="$(IFS='|'; echo "${patterns[*]}")" + + grep -vE "${joined}" +} + +# Function to enumerate files depending on mode +enumerate_files() { + local extensions=( + '*.c' + '*.h' + '*.cpp' + '*.hpp' + ) + + if [[ "$ENUM_ALL" == "true" ]]; then + local find_conditions=() + for ext in "${extensions[@]}"; do + find_conditions+=("-o" "-name" "$ext") + done + unset 'find_conditions[-1]' + find "$BASE_DIR" -type f \( "${find_conditions[@]}" \) + else + git ls-files "${extensions[@]}" + fi +} + +# Script defaults +UPDATE_MODE=false +ENUM_ALL=false +BASE_DIR=$(dirname "$(realpath "$0")") +CLANG_FORMAT=clang-format + +# NB: consumes the arguments +while (( "$#" )); do + case "$1" in + --fix|-fix|fix) + UPDATE_MODE="true" + ;; + --all|-all|all) + ENUM_ALL="true" + ;; + *) + ;; + esac done + +# Environment variable overrides +[[ -n "${CFORMAT_FIX:-}" ]] && UPDATE_MODE=true +[[ -n "${CFORMAT_ALL:-}" ]] && ENUM_ALL=true +[[ -n "${CFORMAT_BIN:-}" ]] && CLANG_FORMAT="$CLANG_FORMAT_BIN" + +if [[ "$UPDATE_MODE" == "true" ]]; then + # Update mode: Format files in-place + enumerate_files \ + | exclude_patterns \ + | while IFS= read -r file; do + ${CLANG_FORMAT} -i "$file" + echo "Formatting $file" + done else - git ls-files '*.c' '*.h' '*.cpp' '*.hpp' | grep -v '^ddtrace/vendor/' | grep -v '^ddtrace/appsec/_iast/_taint_tracking/_vendor/' | while read filename - do - CFORMAT_TMP=`mktemp` - clang-format "$filename" > "$CFORMAT_TMP" - diff -u "$filename" "$CFORMAT_TMP" - rm -f "$CFORMAT_TMP" - done + # Check mode: Compare formatted output to existing files + enumerate_files \ + | exclude_patterns \ + | while IFS= read -r filename; do + CFORMAT_TMP=$(mktemp) + ${CLANG_FORMAT} "$filename" > "$CFORMAT_TMP" + diff -u "$filename" "$CFORMAT_TMP" || true + rm -f "$CFORMAT_TMP" + done fi diff --git a/scripts/cmake-format.sh b/scripts/cmake-format.sh index 401f4d07fd5..0c272e4eb32 100755 --- a/scripts/cmake-format.sh +++ b/scripts/cmake-format.sh @@ -1,26 +1,50 @@ -#!/bin/bash +#!/usr/bin/env bash set -e -# Navigate to the root of the repository, which is one level up from the directory containing this script. -SCRIPT_ROOT="$(cd -- "$(dirname -- "${BASH_SOURCE[0]}")" &> /dev/null && pwd)" -cd "$SCRIPT_ROOT/.." - -# Set some options for cmake-format -# If --update is passed as first arg, or if CMAKE_FORMAT_FIX_ALL is set, update the files in place +# Script defaults +UPDATE_MODE=false +ENUM_ALL=false CMD_OPT="--check" -if [[ "${1:-}" == "--update" || -n "${CMAKE_FORMAT_FIX_ALL:-}" ]]; then - CMD_OPT="--in-place" -fi +BASE_DIR=$(dirname "$(realpath "$0")") +CMAKE_FORMAT="cmake-format" + +# NB: consumes the arguments +while (( "$#" )); do + case "$1" in + --fix|-fix|fix) + UPDATE_MODE=true + ;; + --all|-all|all) + ENUM_ALL=true + ;; + *) + ;; + esac + shift +done + +# Environment variable overrides +[[ -n "${CMAKE_FORMAT_FIX:-}" || "$UPDATE_MODE" == "true" ]] && CMD_OPT="--in-place" +[[ -n "${CMAKE_FORMAT_ALL:-}" ]] && ENUM_ALL=true +[[ -n "${CMAKE_FORMAT_BIN:-}" ]] && CMAKE_FORMAT="$CMAKE_FORMAT_BIN" -# If the CMAKE_FORMAT_CHECK_ALL environnment variable is truthy, check all files -# else, just check the files that have been modified -if [[ -n "${CMAKE_FORMAT_CHECK_ALL:-}" ]]; then - FILES=$(find . -name '*.cmake' -o -name 'CMakeLists.txt' | grep -vE '^./build/' | grep -vE '_vendor/') -else - FILES=$(git diff --name-only HEAD | grep -E '\.cmake$|CMakeLists.txt' | grep -vE '^build/' | grep -vE '_vendor/' || true) -fi +# Enumerate files function +enumerate_files() { + if [[ "$ENUM_ALL" == true ]]; then + find $BASE_DIR \( -name '*.cmake' -o -name 'CMakeLists.txt' \) + else + git ls-files \ + | grep -E '\.cmake$|CMakeLists.txt' || true + fi +} + +# Enumerate and filter files +FILES=$(enumerate_files | grep -vE '^(\./)?build/' | grep -vE '_vendor/') # Run cmake-format on all files -for file in $FILES; do - cmake-format -c "scripts/.cmake-format" $CMD_OPT "$file" -done +# Use a process substitution to allow iterating safely +while IFS= read -r file; do + [[ -n "$file" ]] || continue + ${CMAKE_FORMAT} -c "scripts/.cmake-format" $CMD_OPT "$file" +done <<< "$FILES" + From aec08be632843f03339692e94b30380c9c6d73b5 Mon Sep 17 00:00:00 2001 From: "Gabriele N. Tornetta" Date: Wed, 11 Dec 2024 15:13:56 +0000 Subject: [PATCH 12/78] chore(symdb): upload compressed symbol payloads (#11404) We add support for compressed symbol database payloads. ## Checklist - [x] PR author has checked that all the criteria below are met - The PR description includes an overview of the change - The PR description articulates the motivation for the change - The change includes tests OR the PR description describes a testing strategy - The PR description notes risks associated with the change, if any - Newly-added code is easy to change - The change follows the [library release note guidelines](https://ddtrace.readthedocs.io/en/stable/releasenotes.html) - The change includes or references documentation updates if necessary - Backport labels are set (if [applicable](https://ddtrace.readthedocs.io/en/latest/contributing.html#backporting)) ## Reviewer Checklist - [x] Reviewer has checked that all the criteria below are met - Title is accurate - All changes are related to the pull request's stated goal - Avoids breaking [API](https://ddtrace.readthedocs.io/en/stable/versioning.html#interfaces) changes - Testing strategy adequately addresses listed risks - Newly-added code is easy to change - Release note makes sense to a user of the library - If necessary, author has acknowledged and discussed the performance implications of this PR as reported in the benchmarks PR comment - Backport labels are set in a manner that is consistent with the [release branch maintenance policy](https://ddtrace.readthedocs.io/en/latest/contributing.html#backporting) --- ddtrace/internal/symbol_db/symbols.py | 14 ++++++++++---- ddtrace/internal/utils/http.py | 9 +++++---- 2 files changed, 15 insertions(+), 8 deletions(-) diff --git a/ddtrace/internal/symbol_db/symbols.py b/ddtrace/internal/symbol_db/symbols.py index e252e22c0bf..228ed53f53d 100644 --- a/ddtrace/internal/symbol_db/symbols.py +++ b/ddtrace/internal/symbol_db/symbols.py @@ -3,6 +3,7 @@ from dataclasses import field import dis from enum import Enum +import gzip from http.client import HTTPResponse from inspect import CO_VARARGS from inspect import CO_VARKEYWORDS @@ -484,13 +485,18 @@ def upload(self) -> HTTPResponse: ), FormData( name="file", - filename="symdb_export.json", - data=json.dumps(self.to_json()), - content_type="json", + filename=f"symbols_{os.getpid()}.json.gz", + data="[symbols_placeholder]", + content_type="gzip", ), ] ) + # DEV: The as_bytes method ends up writing the data line by line, which + # breaks the final payload. We add a placeholder instead and manually + # replace it with the compressed JSON. + body = body.replace(b"[symbols_placeholder]", gzip.compress(json.dumps(self.to_json()).encode("utf-8"))) + with connector(get_trace_url(), timeout=5.0)() as conn: log.debug("[PID %d] SymDB: Uploading symbols payload", os.getpid()) conn.request("POST", "/symdb/v1/input", body, headers) @@ -527,7 +533,7 @@ def is_module_included(module: ModuleType) -> bool: class SymbolDatabaseUploader(BaseModuleWatchdog): - __scope_limit__ = 100 + __scope_limit__ = 400 def __init__(self) -> None: super().__init__() diff --git a/ddtrace/internal/utils/http.py b/ddtrace/internal/utils/http.py index cba605a1527..7e85ce01356 100644 --- a/ddtrace/internal/utils/http.py +++ b/ddtrace/internal/utils/http.py @@ -1,5 +1,6 @@ from contextlib import contextmanager from dataclasses import dataclass +from email.encoders import encode_noop from json import loads import logging import os @@ -418,7 +419,7 @@ def parse_message(msg): class FormData: name: str filename: str - data: str + data: Union[str, bytes] content_type: str @@ -431,12 +432,12 @@ def multipart(parts: List[FormData]) -> Tuple[bytes, dict]: del msg["MIME-Version"] for part in parts: - app = MIMEApplication(part.data, part.content_type, lambda _: _) + app = MIMEApplication(part.data, part.content_type, encode_noop) app.add_header("Content-Disposition", "form-data", name=part.name, filename=part.filename) del app["MIME-Version"] msg.attach(app) # Split headers and body - headers, _, body = msg.as_string(policy=HTTP).partition("\r\n\r\n") + headers, _, body = msg.as_bytes(policy=HTTP).partition(b"\r\n\r\n") - return body.encode("utf-8"), dict(_.split(": ") for _ in headers.splitlines()) + return body, dict(_.split(": ") for _ in headers.decode().splitlines()) From 25e6215dd94af40397cfa15a2d39e3e61a1692c5 Mon Sep 17 00:00:00 2001 From: Taegyun Kim Date: Wed, 11 Dec 2024 10:48:09 -0500 Subject: [PATCH 13/78] chore: fix cmake format (#11672) ## Checklist - [x] PR author has checked that all the criteria below are met - The PR description includes an overview of the change - The PR description articulates the motivation for the change - The change includes tests OR the PR description describes a testing strategy - The PR description notes risks associated with the change, if any - Newly-added code is easy to change - The change follows the [library release note guidelines](https://ddtrace.readthedocs.io/en/stable/releasenotes.html) - The change includes or references documentation updates if necessary - Backport labels are set (if [applicable](https://ddtrace.readthedocs.io/en/latest/contributing.html#backporting)) ## Reviewer Checklist - [x] Reviewer has checked that all the criteria below are met - Title is accurate - All changes are related to the pull request's stated goal - Avoids breaking [API](https://ddtrace.readthedocs.io/en/stable/versioning.html#interfaces) changes - Testing strategy adequately addresses listed risks - Newly-added code is easy to change - Release note makes sense to a user of the library - If necessary, author has acknowledged and discussed the performance implications of this PR as reported in the benchmarks PR comment - Backport labels are set in a manner that is consistent with the [release branch maintenance policy](https://ddtrace.readthedocs.io/en/latest/contributing.html#backporting) --- .../datadog/profiling/dd_wrapper/test/CMakeLists.txt | 11 +++++------ 1 file changed, 5 insertions(+), 6 deletions(-) diff --git a/ddtrace/internal/datadog/profiling/dd_wrapper/test/CMakeLists.txt b/ddtrace/internal/datadog/profiling/dd_wrapper/test/CMakeLists.txt index e9c8f57dd60..b80ace74968 100644 --- a/ddtrace/internal/datadog/profiling/dd_wrapper/test/CMakeLists.txt +++ b/ddtrace/internal/datadog/profiling/dd_wrapper/test/CMakeLists.txt @@ -21,12 +21,11 @@ function(dd_wrapper_add_test name) target_link_libraries(${name} PRIVATE gmock gtest_main dd_wrapper nlohmann_json::nlohmann_json) add_ddup_config(${name}) - gtest_discover_tests(${name} - PROPERTIES - # We start new threads after fork(), and we want to continue - # running the tests after that instead of dying. - ENVIRONMENT "TSAN_OPTIONS=die_after_fork=0:suppressions=${CMAKE_CURRENT_SOURCE_DIR}/TSan.supp" - ) + gtest_discover_tests( + ${name} + PROPERTIES # We start new threads after fork(), and we want to continue running the tests after that instead of + # dying. + ENVIRONMENT "TSAN_OPTIONS=die_after_fork=0:suppressions=${CMAKE_CURRENT_SOURCE_DIR}/TSan.supp") set_target_properties(${name} PROPERTIES INSTALL_RPATH "$ORIGIN/..") From 490e6580cb702f37a0e8483c48008929c29d93ad Mon Sep 17 00:00:00 2001 From: David Sanchez <838104+sanchda@users.noreply.github.com> Date: Wed, 11 Dec 2024 12:42:04 -0600 Subject: [PATCH 14/78] chore(profiling): tweak lock collector impl for clarity (#11661) This is just a hygiene PR to clarify the implementation of the lock collector. ## Checklist - [X] PR author has checked that all the criteria below are met - The PR description includes an overview of the change - The PR description articulates the motivation for the change - The change includes tests OR the PR description describes a testing strategy - The PR description notes risks associated with the change, if any - Newly-added code is easy to change - The change follows the [library release note guidelines](https://ddtrace.readthedocs.io/en/stable/releasenotes.html) - The change includes or references documentation updates if necessary - Backport labels are set (if [applicable](https://ddtrace.readthedocs.io/en/latest/contributing.html#backporting)) ## Reviewer Checklist - [x] Reviewer has checked that all the criteria below are met - Title is accurate - All changes are related to the pull request's stated goal - Avoids breaking [API](https://ddtrace.readthedocs.io/en/stable/versioning.html#interfaces) changes - Testing strategy adequately addresses listed risks - Newly-added code is easy to change - Release note makes sense to a user of the library - If necessary, author has acknowledged and discussed the performance implications of this PR as reported in the benchmarks PR comment - Backport labels are set in a manner that is consistent with the [release branch maintenance policy](https://ddtrace.readthedocs.io/en/latest/contributing.html#backporting) --- ddtrace/profiling/collector/_lock.py | 10 +++++----- ddtrace/profiling/collector/asyncio.py | 4 ++-- ddtrace/profiling/collector/threading.py | 4 ++-- tests/profiling/collector/test_threading.py | 4 ++-- tests/profiling_v2/collector/test_threading.py | 4 ++-- 5 files changed, 13 insertions(+), 13 deletions(-) diff --git a/ddtrace/profiling/collector/_lock.py b/ddtrace/profiling/collector/_lock.py index f2d1289d73b..4ee0e692fac 100644 --- a/ddtrace/profiling/collector/_lock.py +++ b/ddtrace/profiling/collector/_lock.py @@ -338,12 +338,12 @@ def __init__( self.export_libdd_enabled = False @abc.abstractmethod - def _get_original(self): + def _get_patch_target(self): # type: (...) -> typing.Any pass @abc.abstractmethod - def _set_original( + def _set_patch_target( self, value, # type: typing.Any ): @@ -367,7 +367,7 @@ def patch(self): """Patch the module for tracking lock allocation.""" # We only patch the lock from the `threading` module. # Nobody should use locks from `_thread`; if they do so, then it's deliberate and we don't profile. - self.original = self._get_original() + self._original = self._get_patch_target() def _allocate_lock(wrapped, instance, args, kwargs): lock = wrapped(*args, **kwargs) @@ -381,9 +381,9 @@ def _allocate_lock(wrapped, instance, args, kwargs): self.export_libdd_enabled, ) - self._set_original(FunctionWrapper(self.original, _allocate_lock)) + self._set_patch_target(FunctionWrapper(self._original, _allocate_lock)) def unpatch(self): # type: (...) -> None """Unpatch the threading module for tracking lock allocation.""" - self._set_original(self.original) + self._set_patch_target(self._original) diff --git a/ddtrace/profiling/collector/asyncio.py b/ddtrace/profiling/collector/asyncio.py index af57db3d3ad..fe5b63ab8ce 100644 --- a/ddtrace/profiling/collector/asyncio.py +++ b/ddtrace/profiling/collector/asyncio.py @@ -36,11 +36,11 @@ def _start_service(self): self._asyncio_module = asyncio return super(AsyncioLockCollector, self)._start_service() - def _get_original(self): + def _get_patch_target(self): # type: (...) -> typing.Any return self._asyncio_module.Lock - def _set_original( + def _set_patch_target( self, value # type: typing.Any ): # type: (...) -> None diff --git a/ddtrace/profiling/collector/threading.py b/ddtrace/profiling/collector/threading.py index 86daee689f6..3700c145312 100644 --- a/ddtrace/profiling/collector/threading.py +++ b/ddtrace/profiling/collector/threading.py @@ -32,11 +32,11 @@ class ThreadingLockCollector(_lock.LockCollector): PROFILED_LOCK_CLASS = _ProfiledThreadingLock - def _get_original(self): + def _get_patch_target(self): # type: (...) -> typing.Any return threading.Lock - def _set_original( + def _set_patch_target( self, value # type: typing.Any ): # type: (...) -> None diff --git a/tests/profiling/collector/test_threading.py b/tests/profiling/collector/test_threading.py index c6b646a3f98..ae7e7204a68 100644 --- a/tests/profiling/collector/test_threading.py +++ b/tests/profiling/collector/test_threading.py @@ -56,12 +56,12 @@ def test_patch(): lock = threading.Lock collector = collector_threading.ThreadingLockCollector(r) collector.start() - assert lock == collector.original + assert lock == collector._original # wrapt makes this true assert lock == threading.Lock collector.stop() assert lock == threading.Lock - assert collector.original == threading.Lock + assert collector._original == threading.Lock def test_lock_acquire_events(): diff --git a/tests/profiling_v2/collector/test_threading.py b/tests/profiling_v2/collector/test_threading.py index abed7d0cfda..bb55e67522f 100644 --- a/tests/profiling_v2/collector/test_threading.py +++ b/tests/profiling_v2/collector/test_threading.py @@ -74,12 +74,12 @@ def test_patch(): lock = threading.Lock collector = collector_threading.ThreadingLockCollector(None) collector.start() - assert lock == collector.original + assert lock == collector._original # wrapt makes this true assert lock == threading.Lock collector.stop() assert lock == threading.Lock - assert collector.original == threading.Lock + assert collector._original == threading.Lock @pytest.mark.skipif(not sys.platform.startswith("linux"), reason="only works on linux") From 9ba734f98c89c50dd4d58e7a2521e4964e1ba670 Mon Sep 17 00:00:00 2001 From: Sam Brenner <106700075+sabrenner@users.noreply.github.com> Date: Wed, 11 Dec 2024 14:07:46 -0500 Subject: [PATCH 15/78] fix(langchain): pydantic output parser tagging does not throw (#11652) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit MLOB-1973 ## What does this PR do? Fixes #11638 Adds some extra checking around the tagging of JSON-like output parsers in streamed cases. These kinds of output parsers concatenate their output for us, so we do not need to append a bunch of chunks together. It was previously thought that the only type was `JsonOutputParser`, which could be `json.dumps`'d as a string tag. However, the `PydanticOutputParser` inherits from `JsonOutputParser`, but cannot be JSON dumped. Thus, we just stringify it instead. To avoid this behavior of throwing in the future, I've added a `try`/`except` to the `json.dumps`. I've special-cased `PydanticOuputParser` as to not generalize it as an expensive exception to `json.dumps`. These are the only two JSON-type output parsers I've seen, but should more be introduced, we'll log our incompatability and just attempt to `str` it instead. ## Testing For a script like: ```python from typing import List from langchain_core.output_parsers import PydanticOutputParser from langchain_core.prompts import ChatPromptTemplate from langchain_openai import ChatOpenAI from pydantic import BaseModel, Field class Person(BaseModel): """Information about a person.""" name: str = Field(..., description="The name of the person") height_in_meters: float = Field( ..., description="The height of the person expressed in meters." ) class People(BaseModel): """Identifying information about all people in a text.""" people: List[Person] # Set up a parser parser = PydanticOutputParser(pydantic_object=People) # Prompt prompt = ChatPromptTemplate.from_messages( [ ( "system", "Answer the user query. Wrap the output in `json` tags\n{format_instructions}", ), ("human", "{query}"), ] ).partial(format_instructions=parser.get_format_instructions()) query = "Anna is 23 years old and she is 6 feet tall" llm = ChatOpenAI() chain = prompt | llm | parser for event in chain.stream({ "query": query }): print(event) ``` The output tagging is as follows on APM spans: Screenshot 2024-12-10 at 12 04 57 PM and LLMObs spans: Screenshot 2024-12-10 at 12 05 17 PM without throwing errors. ## Checklist - [x] PR author has checked that all the criteria below are met - The PR description includes an overview of the change - The PR description articulates the motivation for the change - The change includes tests OR the PR description describes a testing strategy - The PR description notes risks associated with the change, if any - Newly-added code is easy to change - The change follows the [library release note guidelines](https://ddtrace.readthedocs.io/en/stable/releasenotes.html) - The change includes or references documentation updates if necessary - Backport labels are set (if [applicable](https://ddtrace.readthedocs.io/en/latest/contributing.html#backporting)) ## Reviewer Checklist - [x] Reviewer has checked that all the criteria below are met - Title is accurate - All changes are related to the pull request's stated goal - Avoids breaking [API](https://ddtrace.readthedocs.io/en/stable/versioning.html#interfaces) changes - Testing strategy adequately addresses listed risks - Newly-added code is easy to change - Release note makes sense to a user of the library - If necessary, author has acknowledged and discussed the performance implications of this PR as reported in the benchmarks PR comment - Backport labels are set in a manner that is consistent with the [release branch maintenance policy](https://ddtrace.readthedocs.io/en/latest/contributing.html#backporting) --- ddtrace/contrib/internal/langchain/patch.py | 16 ++++++++++------ ...pydantic-output-parsers-19bc162212ec051e.yaml | 4 ++++ 2 files changed, 14 insertions(+), 6 deletions(-) create mode 100644 releasenotes/notes/langchain-pydantic-output-parsers-19bc162212ec051e.yaml diff --git a/ddtrace/contrib/internal/langchain/patch.py b/ddtrace/contrib/internal/langchain/patch.py index ce72e1affff..fa2332d70f2 100644 --- a/ddtrace/contrib/internal/langchain/patch.py +++ b/ddtrace/contrib/internal/langchain/patch.py @@ -1,4 +1,3 @@ -import json import os import sys from typing import Any @@ -954,17 +953,22 @@ def _on_span_started(span: Span): span.set_tag_str("langchain.request.inputs.%d.%s" % (idx, k), integration.trunc(str(v))) def _on_span_finished(span: Span, streamed_chunks): + maybe_parser = instance.steps[-1] if instance.steps else None if ( streamed_chunks and langchain_core - and isinstance(instance.steps[-1], langchain_core.output_parsers.JsonOutputParser) + and isinstance(maybe_parser, langchain_core.output_parsers.JsonOutputParser) ): - # it's possible that the chain has a json output parser - # this will have already concatenated the chunks into a json object + # it's possible that the chain has a json output parser type + # this will have already concatenated the chunks into an object - # it's also possible the json output parser isn't the last step, + # it's also possible the this parser type isn't the last step, # but one of the last steps, in which case we won't act on it here - content = json.dumps(streamed_chunks[-1]) + result = streamed_chunks[-1] + if maybe_parser.__class__.__name__ == "JsonOutputParser": + content = safe_json(result) + else: + content = str(result) else: # best effort to join chunks together content = "".join([str(chunk) for chunk in streamed_chunks]) diff --git a/releasenotes/notes/langchain-pydantic-output-parsers-19bc162212ec051e.yaml b/releasenotes/notes/langchain-pydantic-output-parsers-19bc162212ec051e.yaml new file mode 100644 index 00000000000..687e465723a --- /dev/null +++ b/releasenotes/notes/langchain-pydantic-output-parsers-19bc162212ec051e.yaml @@ -0,0 +1,4 @@ +--- +fixes: + - | + langchain: resolves a JSON decoding issue resulting from tagging streamed outputs from chains ending with a PydanticOutputParser. From 385d8e013858ce061f0d775951fb399e56215508 Mon Sep 17 00:00:00 2001 From: William Conti <58711692+wconti27@users.noreply.github.com> Date: Wed, 11 Dec 2024 18:13:07 -0500 Subject: [PATCH 16/78] fix: kafka consumer parenting logic (#11653) Fixes bug where Kafka consumer was creating unparented spans when consuming a message with no context present. ## Checklist - [x] PR author has checked that all the criteria below are met - The PR description includes an overview of the change - The PR description articulates the motivation for the change - The change includes tests OR the PR description describes a testing strategy - The PR description notes risks associated with the change, if any - Newly-added code is easy to change - The change follows the [library release note guidelines](https://ddtrace.readthedocs.io/en/stable/releasenotes.html) - The change includes or references documentation updates if necessary - Backport labels are set (if [applicable](https://ddtrace.readthedocs.io/en/latest/contributing.html#backporting)) ## Reviewer Checklist - [x] Reviewer has checked that all the criteria below are met - Title is accurate - All changes are related to the pull request's stated goal - Avoids breaking [API](https://ddtrace.readthedocs.io/en/stable/versioning.html#interfaces) changes - Testing strategy adequately addresses listed risks - Newly-added code is easy to change - Release note makes sense to a user of the library - If necessary, author has acknowledged and discussed the performance implications of this PR as reported in the benchmarks PR comment - Backport labels are set in a manner that is consistent with the [release branch maintenance policy](https://ddtrace.readthedocs.io/en/latest/contributing.html#backporting) --------- Co-authored-by: Emmett Butler <723615+emmettbutler@users.noreply.github.com> --- ddtrace/contrib/internal/kafka/patch.py | 2 +- ...a-consumer-parenting-29acfd08e05d2350.yaml | 5 ++++ tests/contrib/kafka/test_kafka.py | 29 +++++++++++++++++++ 3 files changed, 35 insertions(+), 1 deletion(-) create mode 100644 releasenotes/notes/fix-kafka-consumer-parenting-29acfd08e05d2350.yaml diff --git a/ddtrace/contrib/internal/kafka/patch.py b/ddtrace/contrib/internal/kafka/patch.py index 225c0f82877..b8e8fce007d 100644 --- a/ddtrace/contrib/internal/kafka/patch.py +++ b/ddtrace/contrib/internal/kafka/patch.py @@ -247,7 +247,7 @@ def _instrument_message(messages, pin, start_ns, instance, err): name=schematize_messaging_operation(kafkax.CONSUME, provider="kafka", direction=SpanDirection.PROCESSING), service=trace_utils.ext_service(pin, config.kafka), span_type=SpanTypes.WORKER, - child_of=ctx if ctx is not None else pin.tracer.context_provider.active(), + child_of=ctx if ctx is not None and ctx.trace_id is not None else pin.tracer.context_provider.active(), activate=True, ) as span: # reset span start time to before function call diff --git a/releasenotes/notes/fix-kafka-consumer-parenting-29acfd08e05d2350.yaml b/releasenotes/notes/fix-kafka-consumer-parenting-29acfd08e05d2350.yaml new file mode 100644 index 00000000000..df8cdcfe986 --- /dev/null +++ b/releasenotes/notes/fix-kafka-consumer-parenting-29acfd08e05d2350.yaml @@ -0,0 +1,5 @@ +--- +fixes: + - | + kafka: Fixes an issue with Kafka consumer spans not using the active trace context when distributed + tracing was enabled and no valid distributed context found was found within a consumed message. diff --git a/tests/contrib/kafka/test_kafka.py b/tests/contrib/kafka/test_kafka.py index 9bcf4ffc538..d49f85f26b2 100644 --- a/tests/contrib/kafka/test_kafka.py +++ b/tests/contrib/kafka/test_kafka.py @@ -885,6 +885,35 @@ def test_context_header_injection_works_no_client_added_headers(kafka_topic, pro assert propagation_asserted is True +def test_consumer_uses_active_context_when_no_valid_distributed_context_exists( + kafka_topic, producer, consumer, dummy_tracer +): + # use a random int in this string to prevent reading a message produced by a previous test run + test_string = "producer does not inject context test " + str(random.randint(0, 1000)) + test_key = "producer does not inject context test " + str(random.randint(0, 1000)) + PAYLOAD = bytes(test_string, encoding="utf-8") + + producer.produce(kafka_topic, PAYLOAD, key=test_key) + producer.flush() + + Pin.override(consumer, tracer=dummy_tracer) + + with dummy_tracer.trace("kafka consumer parent span") as parent_span: + with override_config("kafka", dict(distributed_tracing_enabled=True)): + message = None + while message is None or str(message.value()) != str(PAYLOAD): + message = consumer.poll() + + traces = dummy_tracer.pop_traces() + consume_span = traces[len(traces) - 1][-1] + + # assert consumer_span parent is our custom span + assert consume_span.name == "kafka.consume" + assert consume_span.parent_id == parent_span.span_id + + Pin.override(consumer, tracer=None) + + def test_span_has_dsm_payload_hash(dummy_tracer, consumer, producer, kafka_topic): Pin.override(producer, tracer=dummy_tracer) Pin.override(consumer, tracer=dummy_tracer) From c2a3fa4d70ad049ca3c2a944d8a1c5b0099944d8 Mon Sep 17 00:00:00 2001 From: Alberto Vara Date: Thu, 12 Dec 2024 15:37:05 +0100 Subject: [PATCH 17/78] chore(ci): iast packages tests error (#11673) New error in CircleCI but it works locally. It looks the same error of this old PR: https://github.com/DataDog/dd-trace-py/pull/11199 ## Checklist - [x] PR author has checked that all the criteria below are met - The PR description includes an overview of the change - The PR description articulates the motivation for the change - The change includes tests OR the PR description describes a testing strategy - The PR description notes risks associated with the change, if any - Newly-added code is easy to change - The change follows the [library release note guidelines](https://ddtrace.readthedocs.io/en/stable/releasenotes.html) - The change includes or references documentation updates if necessary - Backport labels are set (if [applicable](https://ddtrace.readthedocs.io/en/latest/contributing.html#backporting)) ## Reviewer Checklist - [x] Reviewer has checked that all the criteria below are met - Title is accurate - All changes are related to the pull request's stated goal - Avoids breaking [API](https://ddtrace.readthedocs.io/en/stable/versioning.html#interfaces) changes - Testing strategy adequately addresses listed risks - Newly-added code is easy to change - Release note makes sense to a user of the library - If necessary, author has acknowledged and discussed the performance implications of this PR as reported in the benchmarks PR comment - Backport labels are set in a manner that is consistent with the [release branch maintenance policy](https://ddtrace.readthedocs.io/en/latest/contributing.html#backporting) --- tests/appsec/iast_packages/test_packages.py | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/tests/appsec/iast_packages/test_packages.py b/tests/appsec/iast_packages/test_packages.py index c738eb231b9..86aad989007 100644 --- a/tests/appsec/iast_packages/test_packages.py +++ b/tests/appsec/iast_packages/test_packages.py @@ -495,8 +495,7 @@ def uninstall(self, python_cmd): "d8b5635eb590e078a608e083351288a0", "", import_module_to_validate="multipart.multipart", - # This test is failing in CircleCI because, for some reason, instead of installing version - # 0.0.5, it’s installing the latest version + # This test is failing in CircleCI with the latest version test_import=False, test_propagation=True, ), @@ -573,6 +572,8 @@ def uninstall(self, python_cmd): "Parsed TOML data: {'key': 'value'}", "", import_module_to_validate="tomli._parser", + # This test is failing in CircleCI with the latest version + test_import=False, test_propagation=True, ), PackageForTesting( From 965af0821d82088ac4570042a2a445c1e682b164 Mon Sep 17 00:00:00 2001 From: Romain Komorn <136473744+romainkomorndatadog@users.noreply.github.com> Date: Thu, 12 Dec 2024 16:02:34 +0100 Subject: [PATCH 18/78] chore(ci_visibility): respect EFD threshold and fix is_new tagging (#11691) Fixes a pair of issues: - properly respects the absolute number of new tests threshold when checking session faultiness - stops tagging tests as new when a session is considered faulty No release note because EFD support is in beta and unreleased ## Checklist - [x] PR author has checked that all the criteria below are met - The PR description includes an overview of the change - The PR description articulates the motivation for the change - The change includes tests OR the PR description describes a testing strategy - The PR description notes risks associated with the change, if any - Newly-added code is easy to change - The change follows the [library release note guidelines](https://ddtrace.readthedocs.io/en/stable/releasenotes.html) - The change includes or references documentation updates if necessary - Backport labels are set (if [applicable](https://ddtrace.readthedocs.io/en/latest/contributing.html#backporting)) ## Reviewer Checklist - [x] Reviewer has checked that all the criteria below are met - Title is accurate - All changes are related to the pull request's stated goal - Avoids breaking [API](https://ddtrace.readthedocs.io/en/stable/versioning.html#interfaces) changes - Testing strategy adequately addresses listed risks - Newly-added code is easy to change - Release note makes sense to a user of the library - If necessary, author has acknowledged and discussed the performance implications of this PR as reported in the benchmarks PR comment - Backport labels are set in a manner that is consistent with the [release branch maintenance policy](https://ddtrace.readthedocs.io/en/latest/contributing.html#backporting) --- .../internal/ci_visibility/api/_session.py | 17 +- ddtrace/internal/ci_visibility/api/_test.py | 6 +- .../api/fake_runner_efd_faulty_session.py | 19 +- tests/ci_visibility/test_efd.py | 142 +- ...st_manual_api_fake_efd_faulty_session.json | 2392 ++++++++++++++--- 5 files changed, 2213 insertions(+), 363 deletions(-) diff --git a/ddtrace/internal/ci_visibility/api/_session.py b/ddtrace/internal/ci_visibility/api/_session.py index b6407cc86be..5267a345c0a 100644 --- a/ddtrace/internal/ci_visibility/api/_session.py +++ b/ddtrace/internal/ci_visibility/api/_session.py @@ -119,8 +119,8 @@ def set_efd_abort_reason(self, abort_reason: str): self._efd_abort_reason = abort_reason def efd_is_faulty_session(self): - """A session is considered "EFD faulty" if percentage of tests considered new is greater than the given - threshold + """A session is considered "EFD faulty" if the percentage of tests considered new is greater than the + given threshold, and the total number of news tests exceeds the threshold. NOTE: this behavior is cached on the assumption that this method will only be called once """ @@ -130,16 +130,19 @@ def efd_is_faulty_session(self): if self._session_settings.efd_settings.enabled is False: return False - total_tests = 0 - new_tests = 0 + total_tests_count = 0 + new_tests_count = 0 for _module in self._children.values(): for _suite in _module._children.values(): for _test in _suite._children.values(): - total_tests += 1 + total_tests_count += 1 if _test.is_new(): - new_tests += 1 + new_tests_count += 1 - new_tests_pct = 100 * (new_tests / total_tests) + if new_tests_count <= self._session_settings.efd_settings.faulty_session_threshold: + return False + + new_tests_pct = 100 * (new_tests_count / total_tests_count) self._efd_is_faulty_session = new_tests_pct > self._session_settings.efd_settings.faulty_session_threshold diff --git a/ddtrace/internal/ci_visibility/api/_test.py b/ddtrace/internal/ci_visibility/api/_test.py index 73dc6397b63..c0eb615cd03 100644 --- a/ddtrace/internal/ci_visibility/api/_test.py +++ b/ddtrace/internal/ci_visibility/api/_test.py @@ -101,8 +101,10 @@ def _set_efd_tags(self) -> None: self.set_tag(TEST_EFD_ABORT_REASON, self._efd_abort_reason) # NOTE: The is_new tag is currently only being set in the context of EFD (since that is the only context in - # which unique tests are fetched). - if self.is_new(): + # which unique tests are fetched). Additionally, if a session is considered faulty, we do not want to tag the + # test as new. + session = self.get_session() + if self.is_new() and session is not None and not session.efd_is_faulty_session(): self.set_tag(TEST_IS_NEW, self._is_new) def _set_atr_tags(self) -> None: diff --git a/tests/ci_visibility/api/fake_runner_efd_faulty_session.py b/tests/ci_visibility/api/fake_runner_efd_faulty_session.py index ea841888de6..4937464e74f 100644 --- a/tests/ci_visibility/api/fake_runner_efd_faulty_session.py +++ b/tests/ci_visibility/api/fake_runner_efd_faulty_session.py @@ -1,6 +1,5 @@ -"""Fake test runner where all too many tests are new, so the session is faulty and no retries are done - -Incorporates setting and deleting tags, as well. +"""Fake test runner where too many tests are new, so the session is faulty and no retries are done +. Starts session before discovery (simulating pytest behavior) Comment lines in the test start/finish lines are there for visual distinction. @@ -90,18 +89,8 @@ def run_tests(): m2_s1_id = ext_api.TestSuiteId(m2_id, "m2_s1") api.InternalTestSuite.discover(m2_s1_id) - # M2_S1 tests (mostly exist to keep under faulty session threshold) - m2_s1_test_ids = [ - api.InternalTestId(m2_s1_id, "m2_s1_t1"), - api.InternalTestId(m2_s1_id, "m2_s1_t2"), - api.InternalTestId(m2_s1_id, "m2_s1_t3"), - api.InternalTestId(m2_s1_id, "m2_s1_t4"), - api.InternalTestId(m2_s1_id, "m2_s1_t5"), - api.InternalTestId(m2_s1_id, "m2_s1_t6"), - api.InternalTestId(m2_s1_id, "m2_s1_t7"), - api.InternalTestId(m2_s1_id, "m2_s1_t8"), - api.InternalTestId(m2_s1_id, "m2_s1_t9"), - ] + # M2_S1 tests + m2_s1_test_ids = [api.InternalTestId(m2_s1_id, f"m2_s1_t{i}") for i in range(35)] for test_id in m2_s1_test_ids: api.InternalTest.discover(test_id) diff --git a/tests/ci_visibility/test_efd.py b/tests/ci_visibility/test_efd.py index c623e5db329..0e2de603c6a 100644 --- a/tests/ci_visibility/test_efd.py +++ b/tests/ci_visibility/test_efd.py @@ -66,7 +66,7 @@ def test_efd_max_retries(self, efd_settings, efd_test_duration_s, expected_max_r mock_session = mock.Mock() mock_session.efd_is_faulty_session.return_value = False - with mock.patch.multiple(efd_test, get_session=lambda *args: mock_session): + with mock.patch.object(TestVisibilityTest, "get_session", lambda *args: mock_session): efd_test.start() # Overwrite the test duration efd_test._span.start_ns -= efd_test_duration_s * 1e9 @@ -156,7 +156,7 @@ def test_efd_final_status(self, test_result, retry_results: t.Iterable[TestStatu ) mock_session = mock.Mock() mock_session.efd_is_faulty_session.return_value = False - with mock.patch.multiple(efd_test, get_session=lambda *args: mock_session): + with mock.patch.object(TestVisibilityTest, "get_session", lambda *args: mock_session): efd_test.start() efd_test.finish_test(test_result) expected_num_retry = 0 @@ -177,13 +177,87 @@ def test_efd_does_not_retry_if_disabled(self): efd_test.finish_test(TestStatus.FAIL) assert efd_test.efd_should_retry() is False - @pytest.mark.parametrize("faulty_session_threshold,expected_faulty", ((None, False), (10, True), (40, False))) - def test_efd_session_faulty(self, faulty_session_threshold, expected_faulty): - """Tests that the number of new tests in a session is correctly used to determine if a session is faulty + @pytest.mark.parametrize( + "faulty_session_threshold,expected_faulty", ((None, True), (10, True), (40, True), (50, False)) + ) + def test_efd_session_faulty_percentage(self, faulty_session_threshold, expected_faulty): + """Tests that the number of new tests in a session is correctly used to determine if a session is faulty based + on the percentage of new tests (as opposed to the absolute number). + + In order to test the percentages fully without hitting the absolute number of new tests threshold, we generate + a large number of both known and new tests. + + There are a total of 100 known and 100 new tests, so 50% are new + """ + + if faulty_session_threshold is not None: + efd_settings = EarlyFlakeDetectionSettings(True, faulty_session_threshold=faulty_session_threshold) + else: + efd_settings = EarlyFlakeDetectionSettings(True) + + ssettings = self._get_session_settings(efd_settings=efd_settings) + test_session = TestVisibilitySession(session_settings=ssettings) + + # Modules 1 and 2 each have one suite with 30 known tests and 20 new tests. + m1_id = TestModuleId("module_1") + m1 = TestVisibilityModule(m1_id.name, session_settings=ssettings) + test_session.add_child(m1_id, m1) + m1_s1_id = TestSuiteId(m1_id, "m1_s1") + m1_s1 = TestVisibilitySuite(m1_s1_id.name, session_settings=ssettings) + m1.add_child(m1_s1_id, m1_s1) + + # Known tests: + for i in range(50): + test_name = f"m1_s1_known_t{i}" + m1_s1.add_child( + InternalTestId(m1_s1_id, name=test_name), + TestVisibilityTest(test_name, session_settings=ssettings, is_new=False), + ) + + for i in range(50): + test_name = f"m1_s1_new_t{i}" + m1_s1.add_child( + InternalTestId(m1_s1_id, name=test_name), + TestVisibilityTest(test_name, session_settings=ssettings, is_new=True), + ) + + m2_id = TestModuleId("module_2") + m2 = TestVisibilityModule(m2_id.name, session_settings=ssettings) + test_session.add_child(m2_id, m2) + m2_s1_id = TestSuiteId(m2_id, "suite_1") + m2_s1 = TestVisibilitySuite(m2_s1_id.name, session_settings=ssettings) + m2.add_child(m2_s1_id, m2_s1) + + # Known tests: + for i in range(50): + test_name = f"m2_s1_known_t{i}" + m2_s1.add_child( + InternalTestId(m1_s1_id, name=test_name), + TestVisibilityTest(test_name, session_settings=ssettings, is_new=False), + ) + + for i in range(50): + test_name = f"m2_s1_new_t{i}" + m2_s1.add_child( + InternalTestId(m1_s1_id, name=test_name), + TestVisibilityTest(test_name, session_settings=ssettings, is_new=True), + ) + + assert test_session.efd_is_faulty_session() == expected_faulty + + @pytest.mark.parametrize( + "faulty_session_threshold,expected_faulty", ((None, True), (10, True), (40, False), (50, False)) + ) + def test_efd_session_faulty_absolute(self, faulty_session_threshold, expected_faulty): + """Tests that the number of new tests in a session is correctly used to determine if a session is faulty based + on the absolute number of new tests. For the purpose of this test, the test structure is hardcoded. Whether or not tests are properly marked as new, etc., should be tested elsewhere. + + There are a total of 10 known tests and 40 new tests, so 80% of tests are new. """ + if faulty_session_threshold is not None: efd_settings = EarlyFlakeDetectionSettings(True, faulty_session_threshold=faulty_session_threshold) else: @@ -192,25 +266,28 @@ def test_efd_session_faulty(self, faulty_session_threshold, expected_faulty): ssettings = self._get_session_settings(efd_settings=efd_settings) test_session = TestVisibilitySession(session_settings=ssettings) - # Module + # Modules 1 and 2 each have one suite with 5 known tests and 20 new tests. m1_id = TestModuleId("module_1") m1 = TestVisibilityModule(m1_id.name, session_settings=ssettings) test_session.add_child(m1_id, m1) m1_s1_id = TestSuiteId(m1_id, "m1_s1") m1_s1 = TestVisibilitySuite(m1_s1_id.name, session_settings=ssettings) m1.add_child(m1_s1_id, m1_s1) - m1_s1_t1_id = InternalTestId(m1_s1_id, name="m1_s1_t1") - m1_s1.add_child(m1_s1_t1_id, TestVisibilityTest(m1_s1_t1_id.name, session_settings=ssettings, is_new=True)) - m1_s1_t2_id = InternalTestId(m1_s1_id, name="m1_s1_t2") - m1_s1.add_child(m1_s1_t2_id, TestVisibilityTest(m1_s1_t2_id.name, session_settings=ssettings, is_new=False)) - m1_s1_t3_id = InternalTestId(m1_s1_id, name="m1_s1_t3") - m1_s1.add_child(m1_s1_t3_id, TestVisibilityTest(m1_s1_t3_id.name, session_settings=ssettings, is_new=False)) - - m1_s2_id = TestSuiteId(m1_id, "suite_2") - m1_s2 = TestVisibilitySuite(m1_s2_id.name, session_settings=ssettings) - m1.add_child(m1_s2_id, m1_s2) - m1_s2_t1_id = InternalTestId(m1_s2_id, name="m1_s2_t1") - m1_s2.add_child(m1_s2_t1_id, TestVisibilityTest(m1_s2_t1_id.name, session_settings=ssettings, is_new=True)) + + # Known tests: + for i in range(5): + test_name = f"m1_s1_known_t{i}" + m1_s1.add_child( + InternalTestId(m1_s1_id, name=test_name), + TestVisibilityTest(test_name, session_settings=ssettings, is_new=False), + ) + + for i in range(20): + test_name = f"m1_s1_new_t{i}" + m1_s1.add_child( + InternalTestId(m1_s1_id, name=test_name), + TestVisibilityTest(test_name, session_settings=ssettings, is_new=True), + ) m2_id = TestModuleId("module_2") m2 = TestVisibilityModule(m2_id.name, session_settings=ssettings) @@ -219,20 +296,19 @@ def test_efd_session_faulty(self, faulty_session_threshold, expected_faulty): m2_s1 = TestVisibilitySuite(m2_s1_id.name, session_settings=ssettings) m2.add_child(m2_s1_id, m2_s1) - m2_s1_t1_id = InternalTestId(m2_s1_id, name="m2_s1_t1") - m2_s1.add_child(m2_s1_t1_id, TestVisibilityTest(m2_s1_t1_id.name, session_settings=ssettings, is_new=False)) - m2_s1_t2_id = InternalTestId(m2_s1_id, name="m2_s1_t2") - m2_s1.add_child(m2_s1_t2_id, TestVisibilityTest(m2_s1_t2_id.name, session_settings=ssettings, is_new=False)) - m2_s1_t3_id = InternalTestId(m2_s1_id, name="m2_s1_t3") - m2_s1.add_child(m2_s1_t3_id, TestVisibilityTest(m2_s1_t3_id.name, session_settings=ssettings, is_new=False)) - - # A test with parameters is never considered new: - m2_s1_t4_id = InternalTestId(m2_s1_id, name="m2_s1_t4", parameters='{"hello": "world"}') - m2_s1.add_child( - m2_s1_t4_id, - TestVisibilityTest( - m2_s1_t4_id.name, session_settings=ssettings, is_new=True, parameters=m2_s1_t4_id.parameters - ), - ) + # Known tests: + for i in range(5): + test_name = f"m2_s1_known_t{i}" + m2_s1.add_child( + InternalTestId(m1_s1_id, name=test_name), + TestVisibilityTest(test_name, session_settings=ssettings, is_new=False), + ) + + for i in range(20): + test_name = f"m2_s1_new_t{i}" + m2_s1.add_child( + InternalTestId(m1_s1_id, name=test_name), + TestVisibilityTest(test_name, session_settings=ssettings, is_new=True), + ) assert test_session.efd_is_faulty_session() == expected_faulty diff --git a/tests/snapshots/test_api_fake_runners.test_manual_api_fake_efd_faulty_session.json b/tests/snapshots/test_api_fake_runners.test_manual_api_fake_efd_faulty_session.json index db01cee9ccf..cd01c1a80b8 100644 --- a/tests/snapshots/test_api_fake_runners.test_manual_api_fake_efd_faulty_session.json +++ b/tests/snapshots/test_api_fake_runners.test_manual_api_fake_efd_faulty_session.json @@ -13,7 +13,7 @@ "_dd.ci.env_vars": "{\"CI_PROJECT_URL\":\"https://test.test.io/Test/test-test/test-test\",\"CI_PIPELINE_ID\":\"43949931\",\"CI_JOB_ID\":\"633358062\"}", "_dd.origin": "ciapp-test", "_dd.p.dm": "-0", - "_dd.p.tid": "6751756200000000", + "_dd.p.tid": "675ae84200000000", "ci.job.name": "test-job", "ci.job.url": "https://test.test.io/Test/test-test/test-test/-/jobs/633358062", "ci.node.labels": "[\"runner:test-test-test-test\"]", @@ -24,7 +24,7 @@ "ci.pipeline.url": "https://test.\u2020est.io/Test/test-\u2020est/test-test/-/pipelines/43949931", "ci.provider.name": "gitlab", "ci.stage.name": "test-stage", - "ci.workspace_path": "/tmp/pytest-of-root/pytest-12/test_manual_api_fake_efd_faulty_session0", + "ci.workspace_path": "/tmp/pytest-of-root/pytest-87/test_manual_api_fake_efd_faulty_session0", "component": "dd_manual_test_fw", "git.branch": "test.brancn/test_name", "git.commit.author.date": "2024-09-10T10:11:13+01:00", @@ -35,18 +35,17 @@ "git.repository_url": "https://test.test.io/Test/test-test/test-test.git", "git.tag": "v1.0.0", "language": "python", - "library_version": "2.18.0.dev124+gc03b9e422.d20241205", + "library_version": "2.18.0.dev111+g4ca600932", "os.architecture": "aarch64", "os.platform": "Linux", "os.version": "6.6.12-linuxkit", - "runtime-id": "bdef4ecb6c674245bfc4f6518ff5a773", + "runtime-id": "096f74b542c341ea9f8a4b6947a6a95f", "runtime.name": "CPython", - "runtime.version": "3.9.19", + "runtime.version": "3.11.9", "span.kind": "test", "test.command": "manual_efd_faulty_session", "test.framework": "dd_manual_test_fw", "test.framework_version": "1.0.0", - "test.is_new": "true", "test.module": "m1", "test.module_path": "", "test.name": "m1_s1_t1", @@ -54,9 +53,9 @@ "test.status": "pass", "test.suite": "m1_s1", "test.type": "test", - "test_module_id": "7932912067234031810", - "test_session_id": "15705414272000062156", - "test_suite_id": "845846750843749324", + "test_module_id": "8540260240647316329", + "test_session_id": "18323133602450366815", + "test_suite_id": "12178664196634984526", "type": "test" }, "metrics": { @@ -64,12 +63,12 @@ "_dd.top_level": 1, "_dd.tracer_kr": 1.0, "_sampling_priority_v1": 1, - "process_id": 32495, + "process_id": 97018, "test.source.end": 2, "test.source.start": 1 }, - "duration": 81875, - "start": 1733391714548202334 + "duration": 80708, + "start": 1734010946928117799 }], [ { @@ -86,7 +85,7 @@ "_dd.ci.env_vars": "{\"CI_PROJECT_URL\":\"https://test.test.io/Test/test-test/test-test\",\"CI_PIPELINE_ID\":\"43949931\",\"CI_JOB_ID\":\"633358062\"}", "_dd.origin": "ciapp-test", "_dd.p.dm": "-0", - "_dd.p.tid": "6751756200000000", + "_dd.p.tid": "675ae84200000000", "ci.job.name": "test-job", "ci.job.url": "https://test.test.io/Test/test-test/test-test/-/jobs/633358062", "ci.node.labels": "[\"runner:test-test-test-test\"]", @@ -97,7 +96,7 @@ "ci.pipeline.url": "https://test.\u2020est.io/Test/test-\u2020est/test-test/-/pipelines/43949931", "ci.provider.name": "gitlab", "ci.stage.name": "test-stage", - "ci.workspace_path": "/tmp/pytest-of-root/pytest-12/test_manual_api_fake_efd_faulty_session0", + "ci.workspace_path": "/tmp/pytest-of-root/pytest-87/test_manual_api_fake_efd_faulty_session0", "component": "dd_manual_test_fw", "git.branch": "test.brancn/test_name", "git.commit.author.date": "2024-09-10T10:11:13+01:00", @@ -108,27 +107,26 @@ "git.repository_url": "https://test.test.io/Test/test-test/test-test.git", "git.tag": "v1.0.0", "language": "python", - "library_version": "2.18.0.dev124+gc03b9e422.d20241205", + "library_version": "2.18.0.dev111+g4ca600932", "os.architecture": "aarch64", "os.platform": "Linux", "os.version": "6.6.12-linuxkit", - "runtime-id": "bdef4ecb6c674245bfc4f6518ff5a773", + "runtime-id": "096f74b542c341ea9f8a4b6947a6a95f", "runtime.name": "CPython", - "runtime.version": "3.9.19", + "runtime.version": "3.11.9", "span.kind": "test", "test.command": "manual_efd_faulty_session", "test.framework": "dd_manual_test_fw", "test.framework_version": "1.0.0", - "test.is_new": "true", "test.module": "m1", "test.module_path": "", "test.name": "m1_s1_t2", "test.status": "pass", "test.suite": "m1_s1", "test.type": "test", - "test_module_id": "7932912067234031810", - "test_session_id": "15705414272000062156", - "test_suite_id": "845846750843749324", + "test_module_id": "8540260240647316329", + "test_session_id": "18323133602450366815", + "test_suite_id": "12178664196634984526", "type": "test" }, "metrics": { @@ -136,10 +134,10 @@ "_dd.top_level": 1, "_dd.tracer_kr": 1.0, "_sampling_priority_v1": 1, - "process_id": 32495 + "process_id": 97018 }, - "duration": 66625, - "start": 1733391714563523250 + "duration": 76500, + "start": 1734010946944395590 }], [ { @@ -156,7 +154,7 @@ "_dd.ci.env_vars": "{\"CI_PROJECT_URL\":\"https://test.test.io/Test/test-test/test-test\",\"CI_PIPELINE_ID\":\"43949931\",\"CI_JOB_ID\":\"633358062\"}", "_dd.origin": "ciapp-test", "_dd.p.dm": "-0", - "_dd.p.tid": "6751756200000000", + "_dd.p.tid": "675ae84200000000", "ci.job.name": "test-job", "ci.job.url": "https://test.test.io/Test/test-test/test-test/-/jobs/633358062", "ci.node.labels": "[\"runner:test-test-test-test\"]", @@ -167,7 +165,7 @@ "ci.pipeline.url": "https://test.\u2020est.io/Test/test-\u2020est/test-test/-/pipelines/43949931", "ci.provider.name": "gitlab", "ci.stage.name": "test-stage", - "ci.workspace_path": "/tmp/pytest-of-root/pytest-12/test_manual_api_fake_efd_faulty_session0", + "ci.workspace_path": "/tmp/pytest-of-root/pytest-87/test_manual_api_fake_efd_faulty_session0", "component": "dd_manual_test_fw", "git.branch": "test.brancn/test_name", "git.commit.author.date": "2024-09-10T10:11:13+01:00", @@ -178,13 +176,13 @@ "git.repository_url": "https://test.test.io/Test/test-test/test-test.git", "git.tag": "v1.0.0", "language": "python", - "library_version": "2.18.0.dev124+gc03b9e422.d20241205", + "library_version": "2.18.0.dev111+g4ca600932", "os.architecture": "aarch64", "os.platform": "Linux", "os.version": "6.6.12-linuxkit", - "runtime-id": "bdef4ecb6c674245bfc4f6518ff5a773", + "runtime-id": "096f74b542c341ea9f8a4b6947a6a95f", "runtime.name": "CPython", - "runtime.version": "3.9.19", + "runtime.version": "3.11.9", "span.kind": "test", "test.codeowners": "[\"@romain\", \"@romain2\"]", "test.command": "manual_efd_faulty_session", @@ -197,9 +195,9 @@ "test.status": "skip", "test.suite": "m1_s1", "test.type": "test", - "test_module_id": "7932912067234031810", - "test_session_id": "15705414272000062156", - "test_suite_id": "845846750843749324", + "test_module_id": "8540260240647316329", + "test_session_id": "18323133602450366815", + "test_suite_id": "12178664196634984526", "type": "test" }, "metrics": { @@ -207,12 +205,12 @@ "_dd.top_level": 1, "_dd.tracer_kr": 1.0, "_sampling_priority_v1": 1, - "process_id": 32495, + "process_id": 97018, "test.source.end": 12, "test.source.start": 4 }, - "duration": 71208, - "start": 1733391714563681417 + "duration": 67583, + "start": 1734010946944573757 }], [ { @@ -229,7 +227,7 @@ "_dd.ci.env_vars": "{\"CI_PROJECT_URL\":\"https://test.test.io/Test/test-test/test-test\",\"CI_PIPELINE_ID\":\"43949931\",\"CI_JOB_ID\":\"633358062\"}", "_dd.origin": "ciapp-test", "_dd.p.dm": "-0", - "_dd.p.tid": "6751756200000000", + "_dd.p.tid": "675ae84200000000", "ci.job.name": "test-job", "ci.job.url": "https://test.test.io/Test/test-test/test-test/-/jobs/633358062", "ci.node.labels": "[\"runner:test-test-test-test\"]", @@ -240,7 +238,7 @@ "ci.pipeline.url": "https://test.\u2020est.io/Test/test-\u2020est/test-test/-/pipelines/43949931", "ci.provider.name": "gitlab", "ci.stage.name": "test-stage", - "ci.workspace_path": "/tmp/pytest-of-root/pytest-12/test_manual_api_fake_efd_faulty_session0", + "ci.workspace_path": "/tmp/pytest-of-root/pytest-87/test_manual_api_fake_efd_faulty_session0", "component": "dd_manual_test_fw", "git.branch": "test.brancn/test_name", "git.commit.author.date": "2024-09-10T10:11:13+01:00", @@ -251,13 +249,13 @@ "git.repository_url": "https://test.test.io/Test/test-test/test-test.git", "git.tag": "v1.0.0", "language": "python", - "library_version": "2.18.0.dev124+gc03b9e422.d20241205", + "library_version": "2.18.0.dev111+g4ca600932", "os.architecture": "aarch64", "os.platform": "Linux", "os.version": "6.6.12-linuxkit", - "runtime-id": "bdef4ecb6c674245bfc4f6518ff5a773", + "runtime-id": "096f74b542c341ea9f8a4b6947a6a95f", "runtime.name": "CPython", - "runtime.version": "3.9.19", + "runtime.version": "3.11.9", "span.kind": "test", "test.command": "manual_efd_faulty_session", "test.framework": "dd_manual_test_fw", @@ -269,9 +267,9 @@ "test.status": "skip", "test.suite": "m1_s1", "test.type": "test", - "test_module_id": "7932912067234031810", - "test_session_id": "15705414272000062156", - "test_suite_id": "845846750843749324", + "test_module_id": "8540260240647316329", + "test_session_id": "18323133602450366815", + "test_suite_id": "12178664196634984526", "type": "test" }, "metrics": { @@ -279,10 +277,10 @@ "_dd.top_level": 1, "_dd.tracer_kr": 1.0, "_sampling_priority_v1": 1, - "process_id": 32495 + "process_id": 97018 }, - "duration": 36083, - "start": 1733391714563825542 + "duration": 29584, + "start": 1734010946944705715 }], [ { @@ -299,7 +297,7 @@ "_dd.ci.env_vars": "{\"CI_PROJECT_URL\":\"https://test.test.io/Test/test-test/test-test\",\"CI_PIPELINE_ID\":\"43949931\",\"CI_JOB_ID\":\"633358062\"}", "_dd.origin": "ciapp-test", "_dd.p.dm": "-0", - "_dd.p.tid": "6751756200000000", + "_dd.p.tid": "675ae84200000000", "ci.job.name": "test-job", "ci.job.url": "https://test.test.io/Test/test-test/test-test/-/jobs/633358062", "ci.node.labels": "[\"runner:test-test-test-test\"]", @@ -310,7 +308,7 @@ "ci.pipeline.url": "https://test.\u2020est.io/Test/test-\u2020est/test-test/-/pipelines/43949931", "ci.provider.name": "gitlab", "ci.stage.name": "test-stage", - "ci.workspace_path": "/tmp/pytest-of-root/pytest-12/test_manual_api_fake_efd_faulty_session0", + "ci.workspace_path": "/tmp/pytest-of-root/pytest-87/test_manual_api_fake_efd_faulty_session0", "component": "dd_manual_test_fw", "git.branch": "test.brancn/test_name", "git.commit.author.date": "2024-09-10T10:11:13+01:00", @@ -321,13 +319,13 @@ "git.repository_url": "https://test.test.io/Test/test-test/test-test.git", "git.tag": "v1.0.0", "language": "python", - "library_version": "2.18.0.dev124+gc03b9e422.d20241205", + "library_version": "2.18.0.dev111+g4ca600932", "os.architecture": "aarch64", "os.platform": "Linux", "os.version": "6.6.12-linuxkit", - "runtime-id": "bdef4ecb6c674245bfc4f6518ff5a773", + "runtime-id": "096f74b542c341ea9f8a4b6947a6a95f", "runtime.name": "CPython", - "runtime.version": "3.9.19", + "runtime.version": "3.11.9", "span.kind": "test", "test.command": "manual_efd_faulty_session", "test.framework": "dd_manual_test_fw", @@ -339,9 +337,9 @@ "test.status": "pass", "test.suite": "m1_s1", "test.type": "test", - "test_module_id": "7932912067234031810", - "test_session_id": "15705414272000062156", - "test_suite_id": "845846750843749324", + "test_module_id": "8540260240647316329", + "test_session_id": "18323133602450366815", + "test_suite_id": "12178664196634984526", "type": "test" }, "metrics": { @@ -349,10 +347,10 @@ "_dd.top_level": 1, "_dd.tracer_kr": 1.0, "_sampling_priority_v1": 1, - "process_id": 32495 + "process_id": 97018 }, - "duration": 35125, - "start": 1733391714563932792 + "duration": 29125, + "start": 1734010946944795424 }], [ { @@ -369,7 +367,7 @@ "_dd.ci.env_vars": "{\"CI_PROJECT_URL\":\"https://test.test.io/Test/test-test/test-test\",\"CI_PIPELINE_ID\":\"43949931\",\"CI_JOB_ID\":\"633358062\"}", "_dd.origin": "ciapp-test", "_dd.p.dm": "-0", - "_dd.p.tid": "6751756200000000", + "_dd.p.tid": "675ae84200000000", "ci.job.name": "test-job", "ci.job.url": "https://test.test.io/Test/test-test/test-test/-/jobs/633358062", "ci.node.labels": "[\"runner:test-test-test-test\"]", @@ -380,7 +378,7 @@ "ci.pipeline.url": "https://test.\u2020est.io/Test/test-\u2020est/test-test/-/pipelines/43949931", "ci.provider.name": "gitlab", "ci.stage.name": "test-stage", - "ci.workspace_path": "/tmp/pytest-of-root/pytest-12/test_manual_api_fake_efd_faulty_session0", + "ci.workspace_path": "/tmp/pytest-of-root/pytest-87/test_manual_api_fake_efd_faulty_session0", "component": "dd_manual_test_fw", "git.branch": "test.brancn/test_name", "git.commit.author.date": "2024-09-10T10:11:13+01:00", @@ -391,13 +389,13 @@ "git.repository_url": "https://test.test.io/Test/test-test/test-test.git", "git.tag": "v1.0.0", "language": "python", - "library_version": "2.18.0.dev124+gc03b9e422.d20241205", + "library_version": "2.18.0.dev111+g4ca600932", "os.architecture": "aarch64", "os.platform": "Linux", "os.version": "6.6.12-linuxkit", - "runtime-id": "bdef4ecb6c674245bfc4f6518ff5a773", + "runtime-id": "096f74b542c341ea9f8a4b6947a6a95f", "runtime.name": "CPython", - "runtime.version": "3.9.19", + "runtime.version": "3.11.9", "span.kind": "test", "test.code_coverage.enabled": "false", "test.command": "manual_efd_faulty_session", @@ -407,7 +405,7 @@ "test.framework_version": "1.0.0", "test.itr.tests_skipping.enabled": "false", "test.status": "pass", - "test_session_id": "15705414272000062156", + "test_session_id": "18323133602450366815", "type": "test_session_end" }, "metrics": { @@ -415,10 +413,10 @@ "_dd.top_level": 1, "_dd.tracer_kr": 1.0, "_sampling_priority_v1": 1, - "process_id": 32495 + "process_id": 97018 }, - "duration": 18241542, - "start": 1733391714547670500 + "duration": 20594916, + "start": 1734010946927447174 }, { "name": "test_visibility.module", @@ -434,7 +432,7 @@ "_dd.ci.env_vars": "{\"CI_PROJECT_URL\":\"https://test.test.io/Test/test-test/test-test\",\"CI_PIPELINE_ID\":\"43949931\",\"CI_JOB_ID\":\"633358062\"}", "_dd.origin": "ciapp-test", "_dd.p.dm": "-0", - "_dd.p.tid": "6751756200000000", + "_dd.p.tid": "675ae84200000000", "ci.job.name": "test-job", "ci.job.url": "https://test.test.io/Test/test-test/test-test/-/jobs/633358062", "ci.node.labels": "[\"runner:test-test-test-test\"]", @@ -445,7 +443,7 @@ "ci.pipeline.url": "https://test.\u2020est.io/Test/test-\u2020est/test-test/-/pipelines/43949931", "ci.provider.name": "gitlab", "ci.stage.name": "test-stage", - "ci.workspace_path": "/tmp/pytest-of-root/pytest-12/test_manual_api_fake_efd_faulty_session0", + "ci.workspace_path": "/tmp/pytest-of-root/pytest-87/test_manual_api_fake_efd_faulty_session0", "component": "dd_manual_test_fw", "git.branch": "test.brancn/test_name", "git.commit.author.date": "2024-09-10T10:11:13+01:00", @@ -456,12 +454,12 @@ "git.repository_url": "https://test.test.io/Test/test-test/test-test.git", "git.tag": "v1.0.0", "language": "python", - "library_version": "2.18.0.dev124+gc03b9e422.d20241205", + "library_version": "2.18.0.dev111+g4ca600932", "os.architecture": "aarch64", "os.platform": "Linux", "os.version": "6.6.12-linuxkit", "runtime.name": "CPython", - "runtime.version": "3.9.19", + "runtime.version": "3.11.9", "span.kind": "test", "test.code_coverage.enabled": "false", "test.command": "manual_efd_faulty_session", @@ -471,8 +469,8 @@ "test.module": "m1", "test.module_path": "", "test.status": "pass", - "test_module_id": "7932912067234031810", - "test_session_id": "15705414272000062156", + "test_module_id": "8540260240647316329", + "test_session_id": "18323133602450366815", "type": "test_module_end" }, "metrics": { @@ -480,8 +478,8 @@ "_dd.tracer_kr": 1.0, "_sampling_priority_v1": 1 }, - "duration": 16004833, - "start": 1733391714548140542 + "duration": 17017958, + "start": 1734010946927985882 }, { "name": "test_visibility.suite", @@ -497,7 +495,7 @@ "_dd.ci.env_vars": "{\"CI_PROJECT_URL\":\"https://test.test.io/Test/test-test/test-test\",\"CI_PIPELINE_ID\":\"43949931\",\"CI_JOB_ID\":\"633358062\"}", "_dd.origin": "ciapp-test", "_dd.p.dm": "-0", - "_dd.p.tid": "6751756200000000", + "_dd.p.tid": "675ae84200000000", "ci.job.name": "test-job", "ci.job.url": "https://test.test.io/Test/test-test/test-test/-/jobs/633358062", "ci.node.labels": "[\"runner:test-test-test-test\"]", @@ -508,7 +506,7 @@ "ci.pipeline.url": "https://test.\u2020est.io/Test/test-\u2020est/test-test/-/pipelines/43949931", "ci.provider.name": "gitlab", "ci.stage.name": "test-stage", - "ci.workspace_path": "/tmp/pytest-of-root/pytest-12/test_manual_api_fake_efd_faulty_session0", + "ci.workspace_path": "/tmp/pytest-of-root/pytest-87/test_manual_api_fake_efd_faulty_session0", "component": "dd_manual_test_fw", "git.branch": "test.brancn/test_name", "git.commit.author.date": "2024-09-10T10:11:13+01:00", @@ -519,12 +517,12 @@ "git.repository_url": "https://test.test.io/Test/test-test/test-test.git", "git.tag": "v1.0.0", "language": "python", - "library_version": "2.18.0.dev124+gc03b9e422.d20241205", + "library_version": "2.18.0.dev111+g4ca600932", "os.architecture": "aarch64", "os.platform": "Linux", "os.version": "6.6.12-linuxkit", "runtime.name": "CPython", - "runtime.version": "3.9.19", + "runtime.version": "3.11.9", "span.kind": "test", "test.command": "manual_efd_faulty_session", "test.framework": "dd_manual_test_fw", @@ -533,9 +531,9 @@ "test.module_path": "", "test.status": "pass", "test.suite": "m1_s1", - "test_module_id": "7932912067234031810", - "test_session_id": "15705414272000062156", - "test_suite_id": "845846750843749324", + "test_module_id": "8540260240647316329", + "test_session_id": "18323133602450366815", + "test_suite_id": "12178664196634984526", "type": "test_suite_end" }, "metrics": { @@ -543,8 +541,8 @@ "_dd.tracer_kr": 1.0, "_sampling_priority_v1": 1 }, - "duration": 15893417, - "start": 1733391714548171417 + "duration": 16846042, + "start": 1734010946928084757 }, { "name": "test_visibility.module", @@ -560,7 +558,7 @@ "_dd.ci.env_vars": "{\"CI_PROJECT_URL\":\"https://test.test.io/Test/test-test/test-test\",\"CI_PIPELINE_ID\":\"43949931\",\"CI_JOB_ID\":\"633358062\"}", "_dd.origin": "ciapp-test", "_dd.p.dm": "-0", - "_dd.p.tid": "6751756200000000", + "_dd.p.tid": "675ae84200000000", "ci.job.name": "test-job", "ci.job.url": "https://test.test.io/Test/test-test/test-test/-/jobs/633358062", "ci.node.labels": "[\"runner:test-test-test-test\"]", @@ -571,7 +569,7 @@ "ci.pipeline.url": "https://test.\u2020est.io/Test/test-\u2020est/test-test/-/pipelines/43949931", "ci.provider.name": "gitlab", "ci.stage.name": "test-stage", - "ci.workspace_path": "/tmp/pytest-of-root/pytest-12/test_manual_api_fake_efd_faulty_session0", + "ci.workspace_path": "/tmp/pytest-of-root/pytest-87/test_manual_api_fake_efd_faulty_session0", "component": "dd_manual_test_fw", "git.branch": "test.brancn/test_name", "git.commit.author.date": "2024-09-10T10:11:13+01:00", @@ -582,12 +580,12 @@ "git.repository_url": "https://test.test.io/Test/test-test/test-test.git", "git.tag": "v1.0.0", "language": "python", - "library_version": "2.18.0.dev124+gc03b9e422.d20241205", + "library_version": "2.18.0.dev111+g4ca600932", "os.architecture": "aarch64", "os.platform": "Linux", "os.version": "6.6.12-linuxkit", "runtime.name": "CPython", - "runtime.version": "3.9.19", + "runtime.version": "3.11.9", "span.kind": "test", "test.code_coverage.enabled": "false", "test.command": "manual_efd_faulty_session", @@ -597,8 +595,8 @@ "test.module": "m2", "test.module_path": "", "test.status": "pass", - "test_module_id": "17675353669520667242", - "test_session_id": "15705414272000062156", + "test_module_id": "17061851581233455560", + "test_session_id": "18323133602450366815", "type": "test_module_end" }, "metrics": { @@ -606,8 +604,8 @@ "_dd.tracer_kr": 1.0, "_sampling_priority_v1": 1 }, - "duration": 1614125, - "start": 1733391714564197709 + "duration": 2905833, + "start": 1734010946945047549 }, { "name": "test_visibility.suite", @@ -623,7 +621,7 @@ "_dd.ci.env_vars": "{\"CI_PROJECT_URL\":\"https://test.test.io/Test/test-test/test-test\",\"CI_PIPELINE_ID\":\"43949931\",\"CI_JOB_ID\":\"633358062\"}", "_dd.origin": "ciapp-test", "_dd.p.dm": "-0", - "_dd.p.tid": "6751756200000000", + "_dd.p.tid": "675ae84200000000", "ci.job.name": "test-job", "ci.job.url": "https://test.test.io/Test/test-test/test-test/-/jobs/633358062", "ci.node.labels": "[\"runner:test-test-test-test\"]", @@ -634,7 +632,7 @@ "ci.pipeline.url": "https://test.\u2020est.io/Test/test-\u2020est/test-test/-/pipelines/43949931", "ci.provider.name": "gitlab", "ci.stage.name": "test-stage", - "ci.workspace_path": "/tmp/pytest-of-root/pytest-12/test_manual_api_fake_efd_faulty_session0", + "ci.workspace_path": "/tmp/pytest-of-root/pytest-87/test_manual_api_fake_efd_faulty_session0", "component": "dd_manual_test_fw", "git.branch": "test.brancn/test_name", "git.commit.author.date": "2024-09-10T10:11:13+01:00", @@ -645,12 +643,12 @@ "git.repository_url": "https://test.test.io/Test/test-test/test-test.git", "git.tag": "v1.0.0", "language": "python", - "library_version": "2.18.0.dev124+gc03b9e422.d20241205", + "library_version": "2.18.0.dev111+g4ca600932", "os.architecture": "aarch64", "os.platform": "Linux", "os.version": "6.6.12-linuxkit", "runtime.name": "CPython", - "runtime.version": "3.9.19", + "runtime.version": "3.11.9", "span.kind": "test", "test.command": "manual_efd_faulty_session", "test.framework": "dd_manual_test_fw", @@ -659,9 +657,9 @@ "test.module_path": "", "test.status": "pass", "test.suite": "m2_s1", - "test_module_id": "17675353669520667242", - "test_session_id": "15705414272000062156", - "test_suite_id": "3960011827465066479", + "test_module_id": "17061851581233455560", + "test_session_id": "18323133602450366815", + "test_suite_id": "13136832476816543943", "type": "test_suite_end" }, "metrics": { @@ -669,8 +667,8 @@ "_dd.tracer_kr": 1.0, "_sampling_priority_v1": 1 }, - "duration": 900042, - "start": 1733391714564224375 + "duration": 2378667, + "start": 1734010946945070465 }, { "name": "test_visibility.suite", @@ -686,7 +684,7 @@ "_dd.ci.env_vars": "{\"CI_PROJECT_URL\":\"https://test.test.io/Test/test-test/test-test\",\"CI_PIPELINE_ID\":\"43949931\",\"CI_JOB_ID\":\"633358062\"}", "_dd.origin": "ciapp-test", "_dd.p.dm": "-0", - "_dd.p.tid": "6751756200000000", + "_dd.p.tid": "675ae84200000000", "ci.job.name": "test-job", "ci.job.url": "https://test.test.io/Test/test-test/test-test/-/jobs/633358062", "ci.node.labels": "[\"runner:test-test-test-test\"]", @@ -697,7 +695,7 @@ "ci.pipeline.url": "https://test.\u2020est.io/Test/test-\u2020est/test-test/-/pipelines/43949931", "ci.provider.name": "gitlab", "ci.stage.name": "test-stage", - "ci.workspace_path": "/tmp/pytest-of-root/pytest-12/test_manual_api_fake_efd_faulty_session0", + "ci.workspace_path": "/tmp/pytest-of-root/pytest-87/test_manual_api_fake_efd_faulty_session0", "component": "dd_manual_test_fw", "git.branch": "test.brancn/test_name", "git.commit.author.date": "2024-09-10T10:11:13+01:00", @@ -708,12 +706,12 @@ "git.repository_url": "https://test.test.io/Test/test-test/test-test.git", "git.tag": "v1.0.0", "language": "python", - "library_version": "2.18.0.dev124+gc03b9e422.d20241205", + "library_version": "2.18.0.dev111+g4ca600932", "os.architecture": "aarch64", "os.platform": "Linux", "os.version": "6.6.12-linuxkit", "runtime.name": "CPython", - "runtime.version": "3.9.19", + "runtime.version": "3.11.9", "span.kind": "test", "test.command": "manual_efd_faulty_session", "test.framework": "dd_manual_test_fw", @@ -722,9 +720,9 @@ "test.module_path": "", "test.status": "pass", "test.suite": "m2_s2", - "test_module_id": "17675353669520667242", - "test_session_id": "15705414272000062156", - "test_suite_id": "599512189588521228", + "test_module_id": "17061851581233455560", + "test_session_id": "18323133602450366815", + "test_suite_id": "6307099272916499859", "type": "test_suite_end" }, "metrics": { @@ -732,14 +730,14 @@ "_dd.tracer_kr": 1.0, "_sampling_priority_v1": 1 }, - "duration": 559708, - "start": 1733391714565176084 + "duration": 381542, + "start": 1734010946947489007 }], [ { "name": "test_visibility.test", "service": "test-test", - "resource": "m2_s1_t1", + "resource": "m2_s1_t0", "trace_id": 6, "span_id": 1, "parent_id": 0, @@ -750,7 +748,1525 @@ "_dd.ci.env_vars": "{\"CI_PROJECT_URL\":\"https://test.test.io/Test/test-test/test-test\",\"CI_PIPELINE_ID\":\"43949931\",\"CI_JOB_ID\":\"633358062\"}", "_dd.origin": "ciapp-test", "_dd.p.dm": "-0", - "_dd.p.tid": "6751756200000000", + "_dd.p.tid": "675ae84200000000", + "ci.job.name": "test-job", + "ci.job.url": "https://test.test.io/Test/test-test/test-test/-/jobs/633358062", + "ci.node.labels": "[\"runner:test-test-test-test\"]", + "ci.node.name": "14727097", + "ci.pipeline.id": "43949931", + "ci.pipeline.name": "Test/test-test/test-project-path", + "ci.pipeline.number": "14726", + "ci.pipeline.url": "https://test.\u2020est.io/Test/test-\u2020est/test-test/-/pipelines/43949931", + "ci.provider.name": "gitlab", + "ci.stage.name": "test-stage", + "ci.workspace_path": "/tmp/pytest-of-root/pytest-87/test_manual_api_fake_efd_faulty_session0", + "component": "dd_manual_test_fw", + "git.branch": "test.brancn/test_name", + "git.commit.author.date": "2024-09-10T10:11:13+01:00", + "git.commit.author.email": "First.Last@testtest.com", + "git.commit.author.name": "TestFirst TestLast", + "git.commit.message": "test commit message", + "git.commit.sha": "c165eb71ef833b752783b5268f21521fd16f812a", + "git.repository_url": "https://test.test.io/Test/test-test/test-test.git", + "git.tag": "v1.0.0", + "language": "python", + "library_version": "2.18.0.dev111+g4ca600932", + "os.architecture": "aarch64", + "os.platform": "Linux", + "os.version": "6.6.12-linuxkit", + "runtime-id": "096f74b542c341ea9f8a4b6947a6a95f", + "runtime.name": "CPython", + "runtime.version": "3.11.9", + "span.kind": "test", + "test.command": "manual_efd_faulty_session", + "test.framework": "dd_manual_test_fw", + "test.framework_version": "1.0.0", + "test.module": "m2", + "test.module_path": "", + "test.name": "m2_s1_t0", + "test.status": "pass", + "test.suite": "m2_s1", + "test.type": "test", + "test_module_id": "17061851581233455560", + "test_session_id": "18323133602450366815", + "test_suite_id": "13136832476816543943", + "type": "test" + }, + "metrics": { + "_dd.py.partial_flush": 1, + "_dd.top_level": 1, + "_dd.tracer_kr": 1.0, + "_sampling_priority_v1": 1, + "process_id": 97018 + }, + "duration": 32417, + "start": 1734010946945087257 + }], +[ + { + "name": "test_visibility.test", + "service": "test-test", + "resource": "m2_s1_t1", + "trace_id": 7, + "span_id": 1, + "parent_id": 0, + "type": "test", + "error": 0, + "meta": { + "_dd.base_service": "test_manual_api_fake_efd_faulty_session0", + "_dd.ci.env_vars": "{\"CI_PROJECT_URL\":\"https://test.test.io/Test/test-test/test-test\",\"CI_PIPELINE_ID\":\"43949931\",\"CI_JOB_ID\":\"633358062\"}", + "_dd.origin": "ciapp-test", + "_dd.p.dm": "-0", + "_dd.p.tid": "675ae84200000000", + "ci.job.name": "test-job", + "ci.job.url": "https://test.test.io/Test/test-test/test-test/-/jobs/633358062", + "ci.node.labels": "[\"runner:test-test-test-test\"]", + "ci.node.name": "14727097", + "ci.pipeline.id": "43949931", + "ci.pipeline.name": "Test/test-test/test-project-path", + "ci.pipeline.number": "14726", + "ci.pipeline.url": "https://test.\u2020est.io/Test/test-\u2020est/test-test/-/pipelines/43949931", + "ci.provider.name": "gitlab", + "ci.stage.name": "test-stage", + "ci.workspace_path": "/tmp/pytest-of-root/pytest-87/test_manual_api_fake_efd_faulty_session0", + "component": "dd_manual_test_fw", + "git.branch": "test.brancn/test_name", + "git.commit.author.date": "2024-09-10T10:11:13+01:00", + "git.commit.author.email": "First.Last@testtest.com", + "git.commit.author.name": "TestFirst TestLast", + "git.commit.message": "test commit message", + "git.commit.sha": "c165eb71ef833b752783b5268f21521fd16f812a", + "git.repository_url": "https://test.test.io/Test/test-test/test-test.git", + "git.tag": "v1.0.0", + "language": "python", + "library_version": "2.18.0.dev111+g4ca600932", + "os.architecture": "aarch64", + "os.platform": "Linux", + "os.version": "6.6.12-linuxkit", + "runtime-id": "096f74b542c341ea9f8a4b6947a6a95f", + "runtime.name": "CPython", + "runtime.version": "3.11.9", + "span.kind": "test", + "test.command": "manual_efd_faulty_session", + "test.framework": "dd_manual_test_fw", + "test.framework_version": "1.0.0", + "test.module": "m2", + "test.module_path": "", + "test.name": "m2_s1_t1", + "test.status": "pass", + "test.suite": "m2_s1", + "test.type": "test", + "test_module_id": "17061851581233455560", + "test_session_id": "18323133602450366815", + "test_suite_id": "13136832476816543943", + "type": "test" + }, + "metrics": { + "_dd.py.partial_flush": 1, + "_dd.top_level": 1, + "_dd.tracer_kr": 1.0, + "_sampling_priority_v1": 1, + "process_id": 97018 + }, + "duration": 26458, + "start": 1734010946945178674 + }], +[ + { + "name": "test_visibility.test", + "service": "test-test", + "resource": "m2_s1_t2", + "trace_id": 8, + "span_id": 1, + "parent_id": 0, + "type": "test", + "error": 0, + "meta": { + "_dd.base_service": "test_manual_api_fake_efd_faulty_session0", + "_dd.ci.env_vars": "{\"CI_PROJECT_URL\":\"https://test.test.io/Test/test-test/test-test\",\"CI_PIPELINE_ID\":\"43949931\",\"CI_JOB_ID\":\"633358062\"}", + "_dd.origin": "ciapp-test", + "_dd.p.dm": "-0", + "_dd.p.tid": "675ae84200000000", + "ci.job.name": "test-job", + "ci.job.url": "https://test.test.io/Test/test-test/test-test/-/jobs/633358062", + "ci.node.labels": "[\"runner:test-test-test-test\"]", + "ci.node.name": "14727097", + "ci.pipeline.id": "43949931", + "ci.pipeline.name": "Test/test-test/test-project-path", + "ci.pipeline.number": "14726", + "ci.pipeline.url": "https://test.\u2020est.io/Test/test-\u2020est/test-test/-/pipelines/43949931", + "ci.provider.name": "gitlab", + "ci.stage.name": "test-stage", + "ci.workspace_path": "/tmp/pytest-of-root/pytest-87/test_manual_api_fake_efd_faulty_session0", + "component": "dd_manual_test_fw", + "git.branch": "test.brancn/test_name", + "git.commit.author.date": "2024-09-10T10:11:13+01:00", + "git.commit.author.email": "First.Last@testtest.com", + "git.commit.author.name": "TestFirst TestLast", + "git.commit.message": "test commit message", + "git.commit.sha": "c165eb71ef833b752783b5268f21521fd16f812a", + "git.repository_url": "https://test.test.io/Test/test-test/test-test.git", + "git.tag": "v1.0.0", + "language": "python", + "library_version": "2.18.0.dev111+g4ca600932", + "os.architecture": "aarch64", + "os.platform": "Linux", + "os.version": "6.6.12-linuxkit", + "runtime-id": "096f74b542c341ea9f8a4b6947a6a95f", + "runtime.name": "CPython", + "runtime.version": "3.11.9", + "span.kind": "test", + "test.command": "manual_efd_faulty_session", + "test.framework": "dd_manual_test_fw", + "test.framework_version": "1.0.0", + "test.module": "m2", + "test.module_path": "", + "test.name": "m2_s1_t2", + "test.status": "pass", + "test.suite": "m2_s1", + "test.type": "test", + "test_module_id": "17061851581233455560", + "test_session_id": "18323133602450366815", + "test_suite_id": "13136832476816543943", + "type": "test" + }, + "metrics": { + "_dd.py.partial_flush": 1, + "_dd.top_level": 1, + "_dd.tracer_kr": 1.0, + "_sampling_priority_v1": 1, + "process_id": 97018 + }, + "duration": 29084, + "start": 1734010946945258340 + }], +[ + { + "name": "test_visibility.test", + "service": "test-test", + "resource": "m2_s1_t3", + "trace_id": 9, + "span_id": 1, + "parent_id": 0, + "type": "test", + "error": 0, + "meta": { + "_dd.base_service": "test_manual_api_fake_efd_faulty_session0", + "_dd.ci.env_vars": "{\"CI_PROJECT_URL\":\"https://test.test.io/Test/test-test/test-test\",\"CI_PIPELINE_ID\":\"43949931\",\"CI_JOB_ID\":\"633358062\"}", + "_dd.origin": "ciapp-test", + "_dd.p.dm": "-0", + "_dd.p.tid": "675ae84200000000", + "ci.job.name": "test-job", + "ci.job.url": "https://test.test.io/Test/test-test/test-test/-/jobs/633358062", + "ci.node.labels": "[\"runner:test-test-test-test\"]", + "ci.node.name": "14727097", + "ci.pipeline.id": "43949931", + "ci.pipeline.name": "Test/test-test/test-project-path", + "ci.pipeline.number": "14726", + "ci.pipeline.url": "https://test.\u2020est.io/Test/test-\u2020est/test-test/-/pipelines/43949931", + "ci.provider.name": "gitlab", + "ci.stage.name": "test-stage", + "ci.workspace_path": "/tmp/pytest-of-root/pytest-87/test_manual_api_fake_efd_faulty_session0", + "component": "dd_manual_test_fw", + "git.branch": "test.brancn/test_name", + "git.commit.author.date": "2024-09-10T10:11:13+01:00", + "git.commit.author.email": "First.Last@testtest.com", + "git.commit.author.name": "TestFirst TestLast", + "git.commit.message": "test commit message", + "git.commit.sha": "c165eb71ef833b752783b5268f21521fd16f812a", + "git.repository_url": "https://test.test.io/Test/test-test/test-test.git", + "git.tag": "v1.0.0", + "language": "python", + "library_version": "2.18.0.dev111+g4ca600932", + "os.architecture": "aarch64", + "os.platform": "Linux", + "os.version": "6.6.12-linuxkit", + "runtime-id": "096f74b542c341ea9f8a4b6947a6a95f", + "runtime.name": "CPython", + "runtime.version": "3.11.9", + "span.kind": "test", + "test.command": "manual_efd_faulty_session", + "test.framework": "dd_manual_test_fw", + "test.framework_version": "1.0.0", + "test.module": "m2", + "test.module_path": "", + "test.name": "m2_s1_t3", + "test.status": "pass", + "test.suite": "m2_s1", + "test.type": "test", + "test_module_id": "17061851581233455560", + "test_session_id": "18323133602450366815", + "test_suite_id": "13136832476816543943", + "type": "test" + }, + "metrics": { + "_dd.py.partial_flush": 1, + "_dd.top_level": 1, + "_dd.tracer_kr": 1.0, + "_sampling_priority_v1": 1, + "process_id": 97018 + }, + "duration": 23375, + "start": 1734010946945334507 + }], +[ + { + "name": "test_visibility.test", + "service": "test-test", + "resource": "m2_s1_t4", + "trace_id": 10, + "span_id": 1, + "parent_id": 0, + "type": "test", + "error": 0, + "meta": { + "_dd.base_service": "test_manual_api_fake_efd_faulty_session0", + "_dd.ci.env_vars": "{\"CI_PROJECT_URL\":\"https://test.test.io/Test/test-test/test-test\",\"CI_PIPELINE_ID\":\"43949931\",\"CI_JOB_ID\":\"633358062\"}", + "_dd.origin": "ciapp-test", + "_dd.p.dm": "-0", + "_dd.p.tid": "675ae84200000000", + "ci.job.name": "test-job", + "ci.job.url": "https://test.test.io/Test/test-test/test-test/-/jobs/633358062", + "ci.node.labels": "[\"runner:test-test-test-test\"]", + "ci.node.name": "14727097", + "ci.pipeline.id": "43949931", + "ci.pipeline.name": "Test/test-test/test-project-path", + "ci.pipeline.number": "14726", + "ci.pipeline.url": "https://test.\u2020est.io/Test/test-\u2020est/test-test/-/pipelines/43949931", + "ci.provider.name": "gitlab", + "ci.stage.name": "test-stage", + "ci.workspace_path": "/tmp/pytest-of-root/pytest-87/test_manual_api_fake_efd_faulty_session0", + "component": "dd_manual_test_fw", + "git.branch": "test.brancn/test_name", + "git.commit.author.date": "2024-09-10T10:11:13+01:00", + "git.commit.author.email": "First.Last@testtest.com", + "git.commit.author.name": "TestFirst TestLast", + "git.commit.message": "test commit message", + "git.commit.sha": "c165eb71ef833b752783b5268f21521fd16f812a", + "git.repository_url": "https://test.test.io/Test/test-test/test-test.git", + "git.tag": "v1.0.0", + "language": "python", + "library_version": "2.18.0.dev111+g4ca600932", + "os.architecture": "aarch64", + "os.platform": "Linux", + "os.version": "6.6.12-linuxkit", + "runtime-id": "096f74b542c341ea9f8a4b6947a6a95f", + "runtime.name": "CPython", + "runtime.version": "3.11.9", + "span.kind": "test", + "test.command": "manual_efd_faulty_session", + "test.framework": "dd_manual_test_fw", + "test.framework_version": "1.0.0", + "test.module": "m2", + "test.module_path": "", + "test.name": "m2_s1_t4", + "test.status": "pass", + "test.suite": "m2_s1", + "test.type": "test", + "test_module_id": "17061851581233455560", + "test_session_id": "18323133602450366815", + "test_suite_id": "13136832476816543943", + "type": "test" + }, + "metrics": { + "_dd.py.partial_flush": 1, + "_dd.top_level": 1, + "_dd.tracer_kr": 1.0, + "_sampling_priority_v1": 1, + "process_id": 97018 + }, + "duration": 22625, + "start": 1734010946945402174 + }], +[ + { + "name": "test_visibility.test", + "service": "test-test", + "resource": "m2_s1_t5", + "trace_id": 11, + "span_id": 1, + "parent_id": 0, + "type": "test", + "error": 0, + "meta": { + "_dd.base_service": "test_manual_api_fake_efd_faulty_session0", + "_dd.ci.env_vars": "{\"CI_PROJECT_URL\":\"https://test.test.io/Test/test-test/test-test\",\"CI_PIPELINE_ID\":\"43949931\",\"CI_JOB_ID\":\"633358062\"}", + "_dd.origin": "ciapp-test", + "_dd.p.dm": "-0", + "_dd.p.tid": "675ae84200000000", + "ci.job.name": "test-job", + "ci.job.url": "https://test.test.io/Test/test-test/test-test/-/jobs/633358062", + "ci.node.labels": "[\"runner:test-test-test-test\"]", + "ci.node.name": "14727097", + "ci.pipeline.id": "43949931", + "ci.pipeline.name": "Test/test-test/test-project-path", + "ci.pipeline.number": "14726", + "ci.pipeline.url": "https://test.\u2020est.io/Test/test-\u2020est/test-test/-/pipelines/43949931", + "ci.provider.name": "gitlab", + "ci.stage.name": "test-stage", + "ci.workspace_path": "/tmp/pytest-of-root/pytest-87/test_manual_api_fake_efd_faulty_session0", + "component": "dd_manual_test_fw", + "git.branch": "test.brancn/test_name", + "git.commit.author.date": "2024-09-10T10:11:13+01:00", + "git.commit.author.email": "First.Last@testtest.com", + "git.commit.author.name": "TestFirst TestLast", + "git.commit.message": "test commit message", + "git.commit.sha": "c165eb71ef833b752783b5268f21521fd16f812a", + "git.repository_url": "https://test.test.io/Test/test-test/test-test.git", + "git.tag": "v1.0.0", + "language": "python", + "library_version": "2.18.0.dev111+g4ca600932", + "os.architecture": "aarch64", + "os.platform": "Linux", + "os.version": "6.6.12-linuxkit", + "runtime-id": "096f74b542c341ea9f8a4b6947a6a95f", + "runtime.name": "CPython", + "runtime.version": "3.11.9", + "span.kind": "test", + "test.command": "manual_efd_faulty_session", + "test.framework": "dd_manual_test_fw", + "test.framework_version": "1.0.0", + "test.module": "m2", + "test.module_path": "", + "test.name": "m2_s1_t5", + "test.status": "pass", + "test.suite": "m2_s1", + "test.type": "test", + "test_module_id": "17061851581233455560", + "test_session_id": "18323133602450366815", + "test_suite_id": "13136832476816543943", + "type": "test" + }, + "metrics": { + "_dd.py.partial_flush": 1, + "_dd.top_level": 1, + "_dd.tracer_kr": 1.0, + "_sampling_priority_v1": 1, + "process_id": 97018 + }, + "duration": 21500, + "start": 1734010946945466590 + }], +[ + { + "name": "test_visibility.test", + "service": "test-test", + "resource": "m2_s1_t6", + "trace_id": 12, + "span_id": 1, + "parent_id": 0, + "type": "test", + "error": 0, + "meta": { + "_dd.base_service": "test_manual_api_fake_efd_faulty_session0", + "_dd.ci.env_vars": "{\"CI_PROJECT_URL\":\"https://test.test.io/Test/test-test/test-test\",\"CI_PIPELINE_ID\":\"43949931\",\"CI_JOB_ID\":\"633358062\"}", + "_dd.origin": "ciapp-test", + "_dd.p.dm": "-0", + "_dd.p.tid": "675ae84200000000", + "ci.job.name": "test-job", + "ci.job.url": "https://test.test.io/Test/test-test/test-test/-/jobs/633358062", + "ci.node.labels": "[\"runner:test-test-test-test\"]", + "ci.node.name": "14727097", + "ci.pipeline.id": "43949931", + "ci.pipeline.name": "Test/test-test/test-project-path", + "ci.pipeline.number": "14726", + "ci.pipeline.url": "https://test.\u2020est.io/Test/test-\u2020est/test-test/-/pipelines/43949931", + "ci.provider.name": "gitlab", + "ci.stage.name": "test-stage", + "ci.workspace_path": "/tmp/pytest-of-root/pytest-87/test_manual_api_fake_efd_faulty_session0", + "component": "dd_manual_test_fw", + "git.branch": "test.brancn/test_name", + "git.commit.author.date": "2024-09-10T10:11:13+01:00", + "git.commit.author.email": "First.Last@testtest.com", + "git.commit.author.name": "TestFirst TestLast", + "git.commit.message": "test commit message", + "git.commit.sha": "c165eb71ef833b752783b5268f21521fd16f812a", + "git.repository_url": "https://test.test.io/Test/test-test/test-test.git", + "git.tag": "v1.0.0", + "language": "python", + "library_version": "2.18.0.dev111+g4ca600932", + "os.architecture": "aarch64", + "os.platform": "Linux", + "os.version": "6.6.12-linuxkit", + "runtime-id": "096f74b542c341ea9f8a4b6947a6a95f", + "runtime.name": "CPython", + "runtime.version": "3.11.9", + "span.kind": "test", + "test.command": "manual_efd_faulty_session", + "test.framework": "dd_manual_test_fw", + "test.framework_version": "1.0.0", + "test.module": "m2", + "test.module_path": "", + "test.name": "m2_s1_t6", + "test.status": "pass", + "test.suite": "m2_s1", + "test.type": "test", + "test_module_id": "17061851581233455560", + "test_session_id": "18323133602450366815", + "test_suite_id": "13136832476816543943", + "type": "test" + }, + "metrics": { + "_dd.py.partial_flush": 1, + "_dd.top_level": 1, + "_dd.tracer_kr": 1.0, + "_sampling_priority_v1": 1, + "process_id": 97018 + }, + "duration": 21000, + "start": 1734010946945528757 + }], +[ + { + "name": "test_visibility.test", + "service": "test-test", + "resource": "m2_s1_t7", + "trace_id": 13, + "span_id": 1, + "parent_id": 0, + "type": "test", + "error": 0, + "meta": { + "_dd.base_service": "test_manual_api_fake_efd_faulty_session0", + "_dd.ci.env_vars": "{\"CI_PROJECT_URL\":\"https://test.test.io/Test/test-test/test-test\",\"CI_PIPELINE_ID\":\"43949931\",\"CI_JOB_ID\":\"633358062\"}", + "_dd.origin": "ciapp-test", + "_dd.p.dm": "-0", + "_dd.p.tid": "675ae84200000000", + "ci.job.name": "test-job", + "ci.job.url": "https://test.test.io/Test/test-test/test-test/-/jobs/633358062", + "ci.node.labels": "[\"runner:test-test-test-test\"]", + "ci.node.name": "14727097", + "ci.pipeline.id": "43949931", + "ci.pipeline.name": "Test/test-test/test-project-path", + "ci.pipeline.number": "14726", + "ci.pipeline.url": "https://test.\u2020est.io/Test/test-\u2020est/test-test/-/pipelines/43949931", + "ci.provider.name": "gitlab", + "ci.stage.name": "test-stage", + "ci.workspace_path": "/tmp/pytest-of-root/pytest-87/test_manual_api_fake_efd_faulty_session0", + "component": "dd_manual_test_fw", + "git.branch": "test.brancn/test_name", + "git.commit.author.date": "2024-09-10T10:11:13+01:00", + "git.commit.author.email": "First.Last@testtest.com", + "git.commit.author.name": "TestFirst TestLast", + "git.commit.message": "test commit message", + "git.commit.sha": "c165eb71ef833b752783b5268f21521fd16f812a", + "git.repository_url": "https://test.test.io/Test/test-test/test-test.git", + "git.tag": "v1.0.0", + "language": "python", + "library_version": "2.18.0.dev111+g4ca600932", + "os.architecture": "aarch64", + "os.platform": "Linux", + "os.version": "6.6.12-linuxkit", + "runtime-id": "096f74b542c341ea9f8a4b6947a6a95f", + "runtime.name": "CPython", + "runtime.version": "3.11.9", + "span.kind": "test", + "test.command": "manual_efd_faulty_session", + "test.framework": "dd_manual_test_fw", + "test.framework_version": "1.0.0", + "test.module": "m2", + "test.module_path": "", + "test.name": "m2_s1_t7", + "test.status": "pass", + "test.suite": "m2_s1", + "test.type": "test", + "test_module_id": "17061851581233455560", + "test_session_id": "18323133602450366815", + "test_suite_id": "13136832476816543943", + "type": "test" + }, + "metrics": { + "_dd.py.partial_flush": 1, + "_dd.top_level": 1, + "_dd.tracer_kr": 1.0, + "_sampling_priority_v1": 1, + "process_id": 97018 + }, + "duration": 20083, + "start": 1734010946945590132 + }], +[ + { + "name": "test_visibility.test", + "service": "test-test", + "resource": "m2_s1_t8", + "trace_id": 14, + "span_id": 1, + "parent_id": 0, + "type": "test", + "error": 0, + "meta": { + "_dd.base_service": "test_manual_api_fake_efd_faulty_session0", + "_dd.ci.env_vars": "{\"CI_PROJECT_URL\":\"https://test.test.io/Test/test-test/test-test\",\"CI_PIPELINE_ID\":\"43949931\",\"CI_JOB_ID\":\"633358062\"}", + "_dd.origin": "ciapp-test", + "_dd.p.dm": "-0", + "_dd.p.tid": "675ae84200000000", + "ci.job.name": "test-job", + "ci.job.url": "https://test.test.io/Test/test-test/test-test/-/jobs/633358062", + "ci.node.labels": "[\"runner:test-test-test-test\"]", + "ci.node.name": "14727097", + "ci.pipeline.id": "43949931", + "ci.pipeline.name": "Test/test-test/test-project-path", + "ci.pipeline.number": "14726", + "ci.pipeline.url": "https://test.\u2020est.io/Test/test-\u2020est/test-test/-/pipelines/43949931", + "ci.provider.name": "gitlab", + "ci.stage.name": "test-stage", + "ci.workspace_path": "/tmp/pytest-of-root/pytest-87/test_manual_api_fake_efd_faulty_session0", + "component": "dd_manual_test_fw", + "git.branch": "test.brancn/test_name", + "git.commit.author.date": "2024-09-10T10:11:13+01:00", + "git.commit.author.email": "First.Last@testtest.com", + "git.commit.author.name": "TestFirst TestLast", + "git.commit.message": "test commit message", + "git.commit.sha": "c165eb71ef833b752783b5268f21521fd16f812a", + "git.repository_url": "https://test.test.io/Test/test-test/test-test.git", + "git.tag": "v1.0.0", + "language": "python", + "library_version": "2.18.0.dev111+g4ca600932", + "os.architecture": "aarch64", + "os.platform": "Linux", + "os.version": "6.6.12-linuxkit", + "runtime-id": "096f74b542c341ea9f8a4b6947a6a95f", + "runtime.name": "CPython", + "runtime.version": "3.11.9", + "span.kind": "test", + "test.command": "manual_efd_faulty_session", + "test.framework": "dd_manual_test_fw", + "test.framework_version": "1.0.0", + "test.module": "m2", + "test.module_path": "", + "test.name": "m2_s1_t8", + "test.status": "pass", + "test.suite": "m2_s1", + "test.type": "test", + "test_module_id": "17061851581233455560", + "test_session_id": "18323133602450366815", + "test_suite_id": "13136832476816543943", + "type": "test" + }, + "metrics": { + "_dd.py.partial_flush": 1, + "_dd.top_level": 1, + "_dd.tracer_kr": 1.0, + "_sampling_priority_v1": 1, + "process_id": 97018 + }, + "duration": 21208, + "start": 1734010946945651882 + }], +[ + { + "name": "test_visibility.test", + "service": "test-test", + "resource": "m2_s1_t9", + "trace_id": 15, + "span_id": 1, + "parent_id": 0, + "type": "test", + "error": 0, + "meta": { + "_dd.base_service": "test_manual_api_fake_efd_faulty_session0", + "_dd.ci.env_vars": "{\"CI_PROJECT_URL\":\"https://test.test.io/Test/test-test/test-test\",\"CI_PIPELINE_ID\":\"43949931\",\"CI_JOB_ID\":\"633358062\"}", + "_dd.origin": "ciapp-test", + "_dd.p.dm": "-0", + "_dd.p.tid": "675ae84200000000", + "ci.job.name": "test-job", + "ci.job.url": "https://test.test.io/Test/test-test/test-test/-/jobs/633358062", + "ci.node.labels": "[\"runner:test-test-test-test\"]", + "ci.node.name": "14727097", + "ci.pipeline.id": "43949931", + "ci.pipeline.name": "Test/test-test/test-project-path", + "ci.pipeline.number": "14726", + "ci.pipeline.url": "https://test.\u2020est.io/Test/test-\u2020est/test-test/-/pipelines/43949931", + "ci.provider.name": "gitlab", + "ci.stage.name": "test-stage", + "ci.workspace_path": "/tmp/pytest-of-root/pytest-87/test_manual_api_fake_efd_faulty_session0", + "component": "dd_manual_test_fw", + "git.branch": "test.brancn/test_name", + "git.commit.author.date": "2024-09-10T10:11:13+01:00", + "git.commit.author.email": "First.Last@testtest.com", + "git.commit.author.name": "TestFirst TestLast", + "git.commit.message": "test commit message", + "git.commit.sha": "c165eb71ef833b752783b5268f21521fd16f812a", + "git.repository_url": "https://test.test.io/Test/test-test/test-test.git", + "git.tag": "v1.0.0", + "language": "python", + "library_version": "2.18.0.dev111+g4ca600932", + "os.architecture": "aarch64", + "os.platform": "Linux", + "os.version": "6.6.12-linuxkit", + "runtime-id": "096f74b542c341ea9f8a4b6947a6a95f", + "runtime.name": "CPython", + "runtime.version": "3.11.9", + "span.kind": "test", + "test.command": "manual_efd_faulty_session", + "test.framework": "dd_manual_test_fw", + "test.framework_version": "1.0.0", + "test.module": "m2", + "test.module_path": "", + "test.name": "m2_s1_t9", + "test.status": "pass", + "test.suite": "m2_s1", + "test.type": "test", + "test_module_id": "17061851581233455560", + "test_session_id": "18323133602450366815", + "test_suite_id": "13136832476816543943", + "type": "test" + }, + "metrics": { + "_dd.py.partial_flush": 1, + "_dd.top_level": 1, + "_dd.tracer_kr": 1.0, + "_sampling_priority_v1": 1, + "process_id": 97018 + }, + "duration": 21625, + "start": 1734010946945713840 + }], +[ + { + "name": "test_visibility.test", + "service": "test-test", + "resource": "m2_s1_t10", + "trace_id": 16, + "span_id": 1, + "parent_id": 0, + "type": "test", + "error": 0, + "meta": { + "_dd.base_service": "test_manual_api_fake_efd_faulty_session0", + "_dd.ci.env_vars": "{\"CI_PROJECT_URL\":\"https://test.test.io/Test/test-test/test-test\",\"CI_PIPELINE_ID\":\"43949931\",\"CI_JOB_ID\":\"633358062\"}", + "_dd.origin": "ciapp-test", + "_dd.p.dm": "-0", + "_dd.p.tid": "675ae84200000000", + "ci.job.name": "test-job", + "ci.job.url": "https://test.test.io/Test/test-test/test-test/-/jobs/633358062", + "ci.node.labels": "[\"runner:test-test-test-test\"]", + "ci.node.name": "14727097", + "ci.pipeline.id": "43949931", + "ci.pipeline.name": "Test/test-test/test-project-path", + "ci.pipeline.number": "14726", + "ci.pipeline.url": "https://test.\u2020est.io/Test/test-\u2020est/test-test/-/pipelines/43949931", + "ci.provider.name": "gitlab", + "ci.stage.name": "test-stage", + "ci.workspace_path": "/tmp/pytest-of-root/pytest-87/test_manual_api_fake_efd_faulty_session0", + "component": "dd_manual_test_fw", + "git.branch": "test.brancn/test_name", + "git.commit.author.date": "2024-09-10T10:11:13+01:00", + "git.commit.author.email": "First.Last@testtest.com", + "git.commit.author.name": "TestFirst TestLast", + "git.commit.message": "test commit message", + "git.commit.sha": "c165eb71ef833b752783b5268f21521fd16f812a", + "git.repository_url": "https://test.test.io/Test/test-test/test-test.git", + "git.tag": "v1.0.0", + "language": "python", + "library_version": "2.18.0.dev111+g4ca600932", + "os.architecture": "aarch64", + "os.platform": "Linux", + "os.version": "6.6.12-linuxkit", + "runtime-id": "096f74b542c341ea9f8a4b6947a6a95f", + "runtime.name": "CPython", + "runtime.version": "3.11.9", + "span.kind": "test", + "test.command": "manual_efd_faulty_session", + "test.framework": "dd_manual_test_fw", + "test.framework_version": "1.0.0", + "test.module": "m2", + "test.module_path": "", + "test.name": "m2_s1_t10", + "test.status": "pass", + "test.suite": "m2_s1", + "test.type": "test", + "test_module_id": "17061851581233455560", + "test_session_id": "18323133602450366815", + "test_suite_id": "13136832476816543943", + "type": "test" + }, + "metrics": { + "_dd.py.partial_flush": 1, + "_dd.top_level": 1, + "_dd.tracer_kr": 1.0, + "_sampling_priority_v1": 1, + "process_id": 97018 + }, + "duration": 19750, + "start": 1734010946945776090 + }], +[ + { + "name": "test_visibility.test", + "service": "test-test", + "resource": "m2_s1_t11", + "trace_id": 17, + "span_id": 1, + "parent_id": 0, + "type": "test", + "error": 0, + "meta": { + "_dd.base_service": "test_manual_api_fake_efd_faulty_session0", + "_dd.ci.env_vars": "{\"CI_PROJECT_URL\":\"https://test.test.io/Test/test-test/test-test\",\"CI_PIPELINE_ID\":\"43949931\",\"CI_JOB_ID\":\"633358062\"}", + "_dd.origin": "ciapp-test", + "_dd.p.dm": "-0", + "_dd.p.tid": "675ae84200000000", + "ci.job.name": "test-job", + "ci.job.url": "https://test.test.io/Test/test-test/test-test/-/jobs/633358062", + "ci.node.labels": "[\"runner:test-test-test-test\"]", + "ci.node.name": "14727097", + "ci.pipeline.id": "43949931", + "ci.pipeline.name": "Test/test-test/test-project-path", + "ci.pipeline.number": "14726", + "ci.pipeline.url": "https://test.\u2020est.io/Test/test-\u2020est/test-test/-/pipelines/43949931", + "ci.provider.name": "gitlab", + "ci.stage.name": "test-stage", + "ci.workspace_path": "/tmp/pytest-of-root/pytest-87/test_manual_api_fake_efd_faulty_session0", + "component": "dd_manual_test_fw", + "git.branch": "test.brancn/test_name", + "git.commit.author.date": "2024-09-10T10:11:13+01:00", + "git.commit.author.email": "First.Last@testtest.com", + "git.commit.author.name": "TestFirst TestLast", + "git.commit.message": "test commit message", + "git.commit.sha": "c165eb71ef833b752783b5268f21521fd16f812a", + "git.repository_url": "https://test.test.io/Test/test-test/test-test.git", + "git.tag": "v1.0.0", + "language": "python", + "library_version": "2.18.0.dev111+g4ca600932", + "os.architecture": "aarch64", + "os.platform": "Linux", + "os.version": "6.6.12-linuxkit", + "runtime-id": "096f74b542c341ea9f8a4b6947a6a95f", + "runtime.name": "CPython", + "runtime.version": "3.11.9", + "span.kind": "test", + "test.command": "manual_efd_faulty_session", + "test.framework": "dd_manual_test_fw", + "test.framework_version": "1.0.0", + "test.module": "m2", + "test.module_path": "", + "test.name": "m2_s1_t11", + "test.status": "pass", + "test.suite": "m2_s1", + "test.type": "test", + "test_module_id": "17061851581233455560", + "test_session_id": "18323133602450366815", + "test_suite_id": "13136832476816543943", + "type": "test" + }, + "metrics": { + "_dd.py.partial_flush": 1, + "_dd.top_level": 1, + "_dd.tracer_kr": 1.0, + "_sampling_priority_v1": 1, + "process_id": 97018 + }, + "duration": 21583, + "start": 1734010946945838549 + }], +[ + { + "name": "test_visibility.test", + "service": "test-test", + "resource": "m2_s1_t12", + "trace_id": 18, + "span_id": 1, + "parent_id": 0, + "type": "test", + "error": 0, + "meta": { + "_dd.base_service": "test_manual_api_fake_efd_faulty_session0", + "_dd.ci.env_vars": "{\"CI_PROJECT_URL\":\"https://test.test.io/Test/test-test/test-test\",\"CI_PIPELINE_ID\":\"43949931\",\"CI_JOB_ID\":\"633358062\"}", + "_dd.origin": "ciapp-test", + "_dd.p.dm": "-0", + "_dd.p.tid": "675ae84200000000", + "ci.job.name": "test-job", + "ci.job.url": "https://test.test.io/Test/test-test/test-test/-/jobs/633358062", + "ci.node.labels": "[\"runner:test-test-test-test\"]", + "ci.node.name": "14727097", + "ci.pipeline.id": "43949931", + "ci.pipeline.name": "Test/test-test/test-project-path", + "ci.pipeline.number": "14726", + "ci.pipeline.url": "https://test.\u2020est.io/Test/test-\u2020est/test-test/-/pipelines/43949931", + "ci.provider.name": "gitlab", + "ci.stage.name": "test-stage", + "ci.workspace_path": "/tmp/pytest-of-root/pytest-87/test_manual_api_fake_efd_faulty_session0", + "component": "dd_manual_test_fw", + "git.branch": "test.brancn/test_name", + "git.commit.author.date": "2024-09-10T10:11:13+01:00", + "git.commit.author.email": "First.Last@testtest.com", + "git.commit.author.name": "TestFirst TestLast", + "git.commit.message": "test commit message", + "git.commit.sha": "c165eb71ef833b752783b5268f21521fd16f812a", + "git.repository_url": "https://test.test.io/Test/test-test/test-test.git", + "git.tag": "v1.0.0", + "language": "python", + "library_version": "2.18.0.dev111+g4ca600932", + "os.architecture": "aarch64", + "os.platform": "Linux", + "os.version": "6.6.12-linuxkit", + "runtime-id": "096f74b542c341ea9f8a4b6947a6a95f", + "runtime.name": "CPython", + "runtime.version": "3.11.9", + "span.kind": "test", + "test.command": "manual_efd_faulty_session", + "test.framework": "dd_manual_test_fw", + "test.framework_version": "1.0.0", + "test.module": "m2", + "test.module_path": "", + "test.name": "m2_s1_t12", + "test.status": "pass", + "test.suite": "m2_s1", + "test.type": "test", + "test_module_id": "17061851581233455560", + "test_session_id": "18323133602450366815", + "test_suite_id": "13136832476816543943", + "type": "test" + }, + "metrics": { + "_dd.py.partial_flush": 1, + "_dd.top_level": 1, + "_dd.tracer_kr": 1.0, + "_sampling_priority_v1": 1, + "process_id": 97018 + }, + "duration": 19625, + "start": 1734010946945909299 + }], +[ + { + "name": "test_visibility.test", + "service": "test-test", + "resource": "m2_s1_t13", + "trace_id": 19, + "span_id": 1, + "parent_id": 0, + "type": "test", + "error": 0, + "meta": { + "_dd.base_service": "test_manual_api_fake_efd_faulty_session0", + "_dd.ci.env_vars": "{\"CI_PROJECT_URL\":\"https://test.test.io/Test/test-test/test-test\",\"CI_PIPELINE_ID\":\"43949931\",\"CI_JOB_ID\":\"633358062\"}", + "_dd.origin": "ciapp-test", + "_dd.p.dm": "-0", + "_dd.p.tid": "675ae84200000000", + "ci.job.name": "test-job", + "ci.job.url": "https://test.test.io/Test/test-test/test-test/-/jobs/633358062", + "ci.node.labels": "[\"runner:test-test-test-test\"]", + "ci.node.name": "14727097", + "ci.pipeline.id": "43949931", + "ci.pipeline.name": "Test/test-test/test-project-path", + "ci.pipeline.number": "14726", + "ci.pipeline.url": "https://test.\u2020est.io/Test/test-\u2020est/test-test/-/pipelines/43949931", + "ci.provider.name": "gitlab", + "ci.stage.name": "test-stage", + "ci.workspace_path": "/tmp/pytest-of-root/pytest-87/test_manual_api_fake_efd_faulty_session0", + "component": "dd_manual_test_fw", + "git.branch": "test.brancn/test_name", + "git.commit.author.date": "2024-09-10T10:11:13+01:00", + "git.commit.author.email": "First.Last@testtest.com", + "git.commit.author.name": "TestFirst TestLast", + "git.commit.message": "test commit message", + "git.commit.sha": "c165eb71ef833b752783b5268f21521fd16f812a", + "git.repository_url": "https://test.test.io/Test/test-test/test-test.git", + "git.tag": "v1.0.0", + "language": "python", + "library_version": "2.18.0.dev111+g4ca600932", + "os.architecture": "aarch64", + "os.platform": "Linux", + "os.version": "6.6.12-linuxkit", + "runtime-id": "096f74b542c341ea9f8a4b6947a6a95f", + "runtime.name": "CPython", + "runtime.version": "3.11.9", + "span.kind": "test", + "test.command": "manual_efd_faulty_session", + "test.framework": "dd_manual_test_fw", + "test.framework_version": "1.0.0", + "test.module": "m2", + "test.module_path": "", + "test.name": "m2_s1_t13", + "test.status": "pass", + "test.suite": "m2_s1", + "test.type": "test", + "test_module_id": "17061851581233455560", + "test_session_id": "18323133602450366815", + "test_suite_id": "13136832476816543943", + "type": "test" + }, + "metrics": { + "_dd.py.partial_flush": 1, + "_dd.top_level": 1, + "_dd.tracer_kr": 1.0, + "_sampling_priority_v1": 1, + "process_id": 97018 + }, + "duration": 21792, + "start": 1734010946945968590 + }], +[ + { + "name": "test_visibility.test", + "service": "test-test", + "resource": "m2_s1_t14", + "trace_id": 20, + "span_id": 1, + "parent_id": 0, + "type": "test", + "error": 0, + "meta": { + "_dd.base_service": "test_manual_api_fake_efd_faulty_session0", + "_dd.ci.env_vars": "{\"CI_PROJECT_URL\":\"https://test.test.io/Test/test-test/test-test\",\"CI_PIPELINE_ID\":\"43949931\",\"CI_JOB_ID\":\"633358062\"}", + "_dd.origin": "ciapp-test", + "_dd.p.dm": "-0", + "_dd.p.tid": "675ae84200000000", + "ci.job.name": "test-job", + "ci.job.url": "https://test.test.io/Test/test-test/test-test/-/jobs/633358062", + "ci.node.labels": "[\"runner:test-test-test-test\"]", + "ci.node.name": "14727097", + "ci.pipeline.id": "43949931", + "ci.pipeline.name": "Test/test-test/test-project-path", + "ci.pipeline.number": "14726", + "ci.pipeline.url": "https://test.\u2020est.io/Test/test-\u2020est/test-test/-/pipelines/43949931", + "ci.provider.name": "gitlab", + "ci.stage.name": "test-stage", + "ci.workspace_path": "/tmp/pytest-of-root/pytest-87/test_manual_api_fake_efd_faulty_session0", + "component": "dd_manual_test_fw", + "git.branch": "test.brancn/test_name", + "git.commit.author.date": "2024-09-10T10:11:13+01:00", + "git.commit.author.email": "First.Last@testtest.com", + "git.commit.author.name": "TestFirst TestLast", + "git.commit.message": "test commit message", + "git.commit.sha": "c165eb71ef833b752783b5268f21521fd16f812a", + "git.repository_url": "https://test.test.io/Test/test-test/test-test.git", + "git.tag": "v1.0.0", + "language": "python", + "library_version": "2.18.0.dev111+g4ca600932", + "os.architecture": "aarch64", + "os.platform": "Linux", + "os.version": "6.6.12-linuxkit", + "runtime-id": "096f74b542c341ea9f8a4b6947a6a95f", + "runtime.name": "CPython", + "runtime.version": "3.11.9", + "span.kind": "test", + "test.command": "manual_efd_faulty_session", + "test.framework": "dd_manual_test_fw", + "test.framework_version": "1.0.0", + "test.module": "m2", + "test.module_path": "", + "test.name": "m2_s1_t14", + "test.status": "pass", + "test.suite": "m2_s1", + "test.type": "test", + "test_module_id": "17061851581233455560", + "test_session_id": "18323133602450366815", + "test_suite_id": "13136832476816543943", + "type": "test" + }, + "metrics": { + "_dd.py.partial_flush": 1, + "_dd.top_level": 1, + "_dd.tracer_kr": 1.0, + "_sampling_priority_v1": 1, + "process_id": 97018 + }, + "duration": 20542, + "start": 1734010946946032757 + }], +[ + { + "name": "test_visibility.test", + "service": "test-test", + "resource": "m2_s1_t15", + "trace_id": 21, + "span_id": 1, + "parent_id": 0, + "type": "test", + "error": 0, + "meta": { + "_dd.base_service": "test_manual_api_fake_efd_faulty_session0", + "_dd.ci.env_vars": "{\"CI_PROJECT_URL\":\"https://test.test.io/Test/test-test/test-test\",\"CI_PIPELINE_ID\":\"43949931\",\"CI_JOB_ID\":\"633358062\"}", + "_dd.origin": "ciapp-test", + "_dd.p.dm": "-0", + "_dd.p.tid": "675ae84200000000", + "ci.job.name": "test-job", + "ci.job.url": "https://test.test.io/Test/test-test/test-test/-/jobs/633358062", + "ci.node.labels": "[\"runner:test-test-test-test\"]", + "ci.node.name": "14727097", + "ci.pipeline.id": "43949931", + "ci.pipeline.name": "Test/test-test/test-project-path", + "ci.pipeline.number": "14726", + "ci.pipeline.url": "https://test.\u2020est.io/Test/test-\u2020est/test-test/-/pipelines/43949931", + "ci.provider.name": "gitlab", + "ci.stage.name": "test-stage", + "ci.workspace_path": "/tmp/pytest-of-root/pytest-87/test_manual_api_fake_efd_faulty_session0", + "component": "dd_manual_test_fw", + "git.branch": "test.brancn/test_name", + "git.commit.author.date": "2024-09-10T10:11:13+01:00", + "git.commit.author.email": "First.Last@testtest.com", + "git.commit.author.name": "TestFirst TestLast", + "git.commit.message": "test commit message", + "git.commit.sha": "c165eb71ef833b752783b5268f21521fd16f812a", + "git.repository_url": "https://test.test.io/Test/test-test/test-test.git", + "git.tag": "v1.0.0", + "language": "python", + "library_version": "2.18.0.dev111+g4ca600932", + "os.architecture": "aarch64", + "os.platform": "Linux", + "os.version": "6.6.12-linuxkit", + "runtime-id": "096f74b542c341ea9f8a4b6947a6a95f", + "runtime.name": "CPython", + "runtime.version": "3.11.9", + "span.kind": "test", + "test.command": "manual_efd_faulty_session", + "test.framework": "dd_manual_test_fw", + "test.framework_version": "1.0.0", + "test.module": "m2", + "test.module_path": "", + "test.name": "m2_s1_t15", + "test.status": "pass", + "test.suite": "m2_s1", + "test.type": "test", + "test_module_id": "17061851581233455560", + "test_session_id": "18323133602450366815", + "test_suite_id": "13136832476816543943", + "type": "test" + }, + "metrics": { + "_dd.py.partial_flush": 1, + "_dd.top_level": 1, + "_dd.tracer_kr": 1.0, + "_sampling_priority_v1": 1, + "process_id": 97018 + }, + "duration": 19166, + "start": 1734010946946092674 + }], +[ + { + "name": "test_visibility.test", + "service": "test-test", + "resource": "m2_s1_t16", + "trace_id": 22, + "span_id": 1, + "parent_id": 0, + "type": "test", + "error": 0, + "meta": { + "_dd.base_service": "test_manual_api_fake_efd_faulty_session0", + "_dd.ci.env_vars": "{\"CI_PROJECT_URL\":\"https://test.test.io/Test/test-test/test-test\",\"CI_PIPELINE_ID\":\"43949931\",\"CI_JOB_ID\":\"633358062\"}", + "_dd.origin": "ciapp-test", + "_dd.p.dm": "-0", + "_dd.p.tid": "675ae84200000000", + "ci.job.name": "test-job", + "ci.job.url": "https://test.test.io/Test/test-test/test-test/-/jobs/633358062", + "ci.node.labels": "[\"runner:test-test-test-test\"]", + "ci.node.name": "14727097", + "ci.pipeline.id": "43949931", + "ci.pipeline.name": "Test/test-test/test-project-path", + "ci.pipeline.number": "14726", + "ci.pipeline.url": "https://test.\u2020est.io/Test/test-\u2020est/test-test/-/pipelines/43949931", + "ci.provider.name": "gitlab", + "ci.stage.name": "test-stage", + "ci.workspace_path": "/tmp/pytest-of-root/pytest-87/test_manual_api_fake_efd_faulty_session0", + "component": "dd_manual_test_fw", + "git.branch": "test.brancn/test_name", + "git.commit.author.date": "2024-09-10T10:11:13+01:00", + "git.commit.author.email": "First.Last@testtest.com", + "git.commit.author.name": "TestFirst TestLast", + "git.commit.message": "test commit message", + "git.commit.sha": "c165eb71ef833b752783b5268f21521fd16f812a", + "git.repository_url": "https://test.test.io/Test/test-test/test-test.git", + "git.tag": "v1.0.0", + "language": "python", + "library_version": "2.18.0.dev111+g4ca600932", + "os.architecture": "aarch64", + "os.platform": "Linux", + "os.version": "6.6.12-linuxkit", + "runtime-id": "096f74b542c341ea9f8a4b6947a6a95f", + "runtime.name": "CPython", + "runtime.version": "3.11.9", + "span.kind": "test", + "test.command": "manual_efd_faulty_session", + "test.framework": "dd_manual_test_fw", + "test.framework_version": "1.0.0", + "test.module": "m2", + "test.module_path": "", + "test.name": "m2_s1_t16", + "test.status": "pass", + "test.suite": "m2_s1", + "test.type": "test", + "test_module_id": "17061851581233455560", + "test_session_id": "18323133602450366815", + "test_suite_id": "13136832476816543943", + "type": "test" + }, + "metrics": { + "_dd.py.partial_flush": 1, + "_dd.top_level": 1, + "_dd.tracer_kr": 1.0, + "_sampling_priority_v1": 1, + "process_id": 97018 + }, + "duration": 20375, + "start": 1734010946946156174 + }], +[ + { + "name": "test_visibility.test", + "service": "test-test", + "resource": "m2_s1_t17", + "trace_id": 23, + "span_id": 1, + "parent_id": 0, + "type": "test", + "error": 0, + "meta": { + "_dd.base_service": "test_manual_api_fake_efd_faulty_session0", + "_dd.ci.env_vars": "{\"CI_PROJECT_URL\":\"https://test.test.io/Test/test-test/test-test\",\"CI_PIPELINE_ID\":\"43949931\",\"CI_JOB_ID\":\"633358062\"}", + "_dd.origin": "ciapp-test", + "_dd.p.dm": "-0", + "_dd.p.tid": "675ae84200000000", + "ci.job.name": "test-job", + "ci.job.url": "https://test.test.io/Test/test-test/test-test/-/jobs/633358062", + "ci.node.labels": "[\"runner:test-test-test-test\"]", + "ci.node.name": "14727097", + "ci.pipeline.id": "43949931", + "ci.pipeline.name": "Test/test-test/test-project-path", + "ci.pipeline.number": "14726", + "ci.pipeline.url": "https://test.\u2020est.io/Test/test-\u2020est/test-test/-/pipelines/43949931", + "ci.provider.name": "gitlab", + "ci.stage.name": "test-stage", + "ci.workspace_path": "/tmp/pytest-of-root/pytest-87/test_manual_api_fake_efd_faulty_session0", + "component": "dd_manual_test_fw", + "git.branch": "test.brancn/test_name", + "git.commit.author.date": "2024-09-10T10:11:13+01:00", + "git.commit.author.email": "First.Last@testtest.com", + "git.commit.author.name": "TestFirst TestLast", + "git.commit.message": "test commit message", + "git.commit.sha": "c165eb71ef833b752783b5268f21521fd16f812a", + "git.repository_url": "https://test.test.io/Test/test-test/test-test.git", + "git.tag": "v1.0.0", + "language": "python", + "library_version": "2.18.0.dev111+g4ca600932", + "os.architecture": "aarch64", + "os.platform": "Linux", + "os.version": "6.6.12-linuxkit", + "runtime-id": "096f74b542c341ea9f8a4b6947a6a95f", + "runtime.name": "CPython", + "runtime.version": "3.11.9", + "span.kind": "test", + "test.command": "manual_efd_faulty_session", + "test.framework": "dd_manual_test_fw", + "test.framework_version": "1.0.0", + "test.module": "m2", + "test.module_path": "", + "test.name": "m2_s1_t17", + "test.status": "pass", + "test.suite": "m2_s1", + "test.type": "test", + "test_module_id": "17061851581233455560", + "test_session_id": "18323133602450366815", + "test_suite_id": "13136832476816543943", + "type": "test" + }, + "metrics": { + "_dd.py.partial_flush": 1, + "_dd.top_level": 1, + "_dd.tracer_kr": 1.0, + "_sampling_priority_v1": 1, + "process_id": 97018 + }, + "duration": 20750, + "start": 1734010946946217424 + }], +[ + { + "name": "test_visibility.test", + "service": "test-test", + "resource": "m2_s1_t18", + "trace_id": 24, + "span_id": 1, + "parent_id": 0, + "type": "test", + "error": 0, + "meta": { + "_dd.base_service": "test_manual_api_fake_efd_faulty_session0", + "_dd.ci.env_vars": "{\"CI_PROJECT_URL\":\"https://test.test.io/Test/test-test/test-test\",\"CI_PIPELINE_ID\":\"43949931\",\"CI_JOB_ID\":\"633358062\"}", + "_dd.origin": "ciapp-test", + "_dd.p.dm": "-0", + "_dd.p.tid": "675ae84200000000", + "ci.job.name": "test-job", + "ci.job.url": "https://test.test.io/Test/test-test/test-test/-/jobs/633358062", + "ci.node.labels": "[\"runner:test-test-test-test\"]", + "ci.node.name": "14727097", + "ci.pipeline.id": "43949931", + "ci.pipeline.name": "Test/test-test/test-project-path", + "ci.pipeline.number": "14726", + "ci.pipeline.url": "https://test.\u2020est.io/Test/test-\u2020est/test-test/-/pipelines/43949931", + "ci.provider.name": "gitlab", + "ci.stage.name": "test-stage", + "ci.workspace_path": "/tmp/pytest-of-root/pytest-87/test_manual_api_fake_efd_faulty_session0", + "component": "dd_manual_test_fw", + "git.branch": "test.brancn/test_name", + "git.commit.author.date": "2024-09-10T10:11:13+01:00", + "git.commit.author.email": "First.Last@testtest.com", + "git.commit.author.name": "TestFirst TestLast", + "git.commit.message": "test commit message", + "git.commit.sha": "c165eb71ef833b752783b5268f21521fd16f812a", + "git.repository_url": "https://test.test.io/Test/test-test/test-test.git", + "git.tag": "v1.0.0", + "language": "python", + "library_version": "2.18.0.dev111+g4ca600932", + "os.architecture": "aarch64", + "os.platform": "Linux", + "os.version": "6.6.12-linuxkit", + "runtime-id": "096f74b542c341ea9f8a4b6947a6a95f", + "runtime.name": "CPython", + "runtime.version": "3.11.9", + "span.kind": "test", + "test.command": "manual_efd_faulty_session", + "test.framework": "dd_manual_test_fw", + "test.framework_version": "1.0.0", + "test.module": "m2", + "test.module_path": "", + "test.name": "m2_s1_t18", + "test.status": "pass", + "test.suite": "m2_s1", + "test.type": "test", + "test_module_id": "17061851581233455560", + "test_session_id": "18323133602450366815", + "test_suite_id": "13136832476816543943", + "type": "test" + }, + "metrics": { + "_dd.py.partial_flush": 1, + "_dd.top_level": 1, + "_dd.tracer_kr": 1.0, + "_sampling_priority_v1": 1, + "process_id": 97018 + }, + "duration": 19833, + "start": 1734010946946278132 + }], +[ + { + "name": "test_visibility.test", + "service": "test-test", + "resource": "m2_s1_t19", + "trace_id": 25, + "span_id": 1, + "parent_id": 0, + "type": "test", + "error": 0, + "meta": { + "_dd.base_service": "test_manual_api_fake_efd_faulty_session0", + "_dd.ci.env_vars": "{\"CI_PROJECT_URL\":\"https://test.test.io/Test/test-test/test-test\",\"CI_PIPELINE_ID\":\"43949931\",\"CI_JOB_ID\":\"633358062\"}", + "_dd.origin": "ciapp-test", + "_dd.p.dm": "-0", + "_dd.p.tid": "675ae84200000000", + "ci.job.name": "test-job", + "ci.job.url": "https://test.test.io/Test/test-test/test-test/-/jobs/633358062", + "ci.node.labels": "[\"runner:test-test-test-test\"]", + "ci.node.name": "14727097", + "ci.pipeline.id": "43949931", + "ci.pipeline.name": "Test/test-test/test-project-path", + "ci.pipeline.number": "14726", + "ci.pipeline.url": "https://test.\u2020est.io/Test/test-\u2020est/test-test/-/pipelines/43949931", + "ci.provider.name": "gitlab", + "ci.stage.name": "test-stage", + "ci.workspace_path": "/tmp/pytest-of-root/pytest-87/test_manual_api_fake_efd_faulty_session0", + "component": "dd_manual_test_fw", + "git.branch": "test.brancn/test_name", + "git.commit.author.date": "2024-09-10T10:11:13+01:00", + "git.commit.author.email": "First.Last@testtest.com", + "git.commit.author.name": "TestFirst TestLast", + "git.commit.message": "test commit message", + "git.commit.sha": "c165eb71ef833b752783b5268f21521fd16f812a", + "git.repository_url": "https://test.test.io/Test/test-test/test-test.git", + "git.tag": "v1.0.0", + "language": "python", + "library_version": "2.18.0.dev111+g4ca600932", + "os.architecture": "aarch64", + "os.platform": "Linux", + "os.version": "6.6.12-linuxkit", + "runtime-id": "096f74b542c341ea9f8a4b6947a6a95f", + "runtime.name": "CPython", + "runtime.version": "3.11.9", + "span.kind": "test", + "test.command": "manual_efd_faulty_session", + "test.framework": "dd_manual_test_fw", + "test.framework_version": "1.0.0", + "test.module": "m2", + "test.module_path": "", + "test.name": "m2_s1_t19", + "test.status": "pass", + "test.suite": "m2_s1", + "test.type": "test", + "test_module_id": "17061851581233455560", + "test_session_id": "18323133602450366815", + "test_suite_id": "13136832476816543943", + "type": "test" + }, + "metrics": { + "_dd.py.partial_flush": 1, + "_dd.top_level": 1, + "_dd.tracer_kr": 1.0, + "_sampling_priority_v1": 1, + "process_id": 97018 + }, + "duration": 23375, + "start": 1734010946946356882 + }], +[ + { + "name": "test_visibility.test", + "service": "test-test", + "resource": "m2_s1_t20", + "trace_id": 26, + "span_id": 1, + "parent_id": 0, + "type": "test", + "error": 0, + "meta": { + "_dd.base_service": "test_manual_api_fake_efd_faulty_session0", + "_dd.ci.env_vars": "{\"CI_PROJECT_URL\":\"https://test.test.io/Test/test-test/test-test\",\"CI_PIPELINE_ID\":\"43949931\",\"CI_JOB_ID\":\"633358062\"}", + "_dd.origin": "ciapp-test", + "_dd.p.dm": "-0", + "_dd.p.tid": "675ae84200000000", + "ci.job.name": "test-job", + "ci.job.url": "https://test.test.io/Test/test-test/test-test/-/jobs/633358062", + "ci.node.labels": "[\"runner:test-test-test-test\"]", + "ci.node.name": "14727097", + "ci.pipeline.id": "43949931", + "ci.pipeline.name": "Test/test-test/test-project-path", + "ci.pipeline.number": "14726", + "ci.pipeline.url": "https://test.\u2020est.io/Test/test-\u2020est/test-test/-/pipelines/43949931", + "ci.provider.name": "gitlab", + "ci.stage.name": "test-stage", + "ci.workspace_path": "/tmp/pytest-of-root/pytest-87/test_manual_api_fake_efd_faulty_session0", + "component": "dd_manual_test_fw", + "git.branch": "test.brancn/test_name", + "git.commit.author.date": "2024-09-10T10:11:13+01:00", + "git.commit.author.email": "First.Last@testtest.com", + "git.commit.author.name": "TestFirst TestLast", + "git.commit.message": "test commit message", + "git.commit.sha": "c165eb71ef833b752783b5268f21521fd16f812a", + "git.repository_url": "https://test.test.io/Test/test-test/test-test.git", + "git.tag": "v1.0.0", + "language": "python", + "library_version": "2.18.0.dev111+g4ca600932", + "os.architecture": "aarch64", + "os.platform": "Linux", + "os.version": "6.6.12-linuxkit", + "runtime-id": "096f74b542c341ea9f8a4b6947a6a95f", + "runtime.name": "CPython", + "runtime.version": "3.11.9", + "span.kind": "test", + "test.command": "manual_efd_faulty_session", + "test.framework": "dd_manual_test_fw", + "test.framework_version": "1.0.0", + "test.module": "m2", + "test.module_path": "", + "test.name": "m2_s1_t20", + "test.status": "pass", + "test.suite": "m2_s1", + "test.type": "test", + "test_module_id": "17061851581233455560", + "test_session_id": "18323133602450366815", + "test_suite_id": "13136832476816543943", + "type": "test" + }, + "metrics": { + "_dd.py.partial_flush": 1, + "_dd.top_level": 1, + "_dd.tracer_kr": 1.0, + "_sampling_priority_v1": 1, + "process_id": 97018 + }, + "duration": 19959, + "start": 1734010946946424090 + }], +[ + { + "name": "test_visibility.test", + "service": "test-test", + "resource": "m2_s1_t21", + "trace_id": 27, + "span_id": 1, + "parent_id": 0, + "type": "test", + "error": 0, + "meta": { + "_dd.base_service": "test_manual_api_fake_efd_faulty_session0", + "_dd.ci.env_vars": "{\"CI_PROJECT_URL\":\"https://test.test.io/Test/test-test/test-test\",\"CI_PIPELINE_ID\":\"43949931\",\"CI_JOB_ID\":\"633358062\"}", + "_dd.origin": "ciapp-test", + "_dd.p.dm": "-0", + "_dd.p.tid": "675ae84200000000", + "ci.job.name": "test-job", + "ci.job.url": "https://test.test.io/Test/test-test/test-test/-/jobs/633358062", + "ci.node.labels": "[\"runner:test-test-test-test\"]", + "ci.node.name": "14727097", + "ci.pipeline.id": "43949931", + "ci.pipeline.name": "Test/test-test/test-project-path", + "ci.pipeline.number": "14726", + "ci.pipeline.url": "https://test.\u2020est.io/Test/test-\u2020est/test-test/-/pipelines/43949931", + "ci.provider.name": "gitlab", + "ci.stage.name": "test-stage", + "ci.workspace_path": "/tmp/pytest-of-root/pytest-87/test_manual_api_fake_efd_faulty_session0", + "component": "dd_manual_test_fw", + "git.branch": "test.brancn/test_name", + "git.commit.author.date": "2024-09-10T10:11:13+01:00", + "git.commit.author.email": "First.Last@testtest.com", + "git.commit.author.name": "TestFirst TestLast", + "git.commit.message": "test commit message", + "git.commit.sha": "c165eb71ef833b752783b5268f21521fd16f812a", + "git.repository_url": "https://test.test.io/Test/test-test/test-test.git", + "git.tag": "v1.0.0", + "language": "python", + "library_version": "2.18.0.dev111+g4ca600932", + "os.architecture": "aarch64", + "os.platform": "Linux", + "os.version": "6.6.12-linuxkit", + "runtime-id": "096f74b542c341ea9f8a4b6947a6a95f", + "runtime.name": "CPython", + "runtime.version": "3.11.9", + "span.kind": "test", + "test.command": "manual_efd_faulty_session", + "test.framework": "dd_manual_test_fw", + "test.framework_version": "1.0.0", + "test.module": "m2", + "test.module_path": "", + "test.name": "m2_s1_t21", + "test.status": "pass", + "test.suite": "m2_s1", + "test.type": "test", + "test_module_id": "17061851581233455560", + "test_session_id": "18323133602450366815", + "test_suite_id": "13136832476816543943", + "type": "test" + }, + "metrics": { + "_dd.py.partial_flush": 1, + "_dd.top_level": 1, + "_dd.tracer_kr": 1.0, + "_sampling_priority_v1": 1, + "process_id": 97018 + }, + "duration": 22208, + "start": 1734010946946483924 + }], +[ + { + "name": "test_visibility.test", + "service": "test-test", + "resource": "m2_s1_t22", + "trace_id": 28, + "span_id": 1, + "parent_id": 0, + "type": "test", + "error": 0, + "meta": { + "_dd.base_service": "test_manual_api_fake_efd_faulty_session0", + "_dd.ci.env_vars": "{\"CI_PROJECT_URL\":\"https://test.test.io/Test/test-test/test-test\",\"CI_PIPELINE_ID\":\"43949931\",\"CI_JOB_ID\":\"633358062\"}", + "_dd.origin": "ciapp-test", + "_dd.p.dm": "-0", + "_dd.p.tid": "675ae84200000000", "ci.job.name": "test-job", "ci.job.url": "https://test.test.io/Test/test-test/test-test/-/jobs/633358062", "ci.node.labels": "[\"runner:test-test-test-test\"]", @@ -761,7 +2277,7 @@ "ci.pipeline.url": "https://test.\u2020est.io/Test/test-\u2020est/test-test/-/pipelines/43949931", "ci.provider.name": "gitlab", "ci.stage.name": "test-stage", - "ci.workspace_path": "/tmp/pytest-of-root/pytest-12/test_manual_api_fake_efd_faulty_session0", + "ci.workspace_path": "/tmp/pytest-of-root/pytest-87/test_manual_api_fake_efd_faulty_session0", "component": "dd_manual_test_fw", "git.branch": "test.brancn/test_name", "git.commit.author.date": "2024-09-10T10:11:13+01:00", @@ -772,27 +2288,26 @@ "git.repository_url": "https://test.test.io/Test/test-test/test-test.git", "git.tag": "v1.0.0", "language": "python", - "library_version": "2.18.0.dev124+gc03b9e422.d20241205", + "library_version": "2.18.0.dev111+g4ca600932", "os.architecture": "aarch64", "os.platform": "Linux", "os.version": "6.6.12-linuxkit", - "runtime-id": "bdef4ecb6c674245bfc4f6518ff5a773", + "runtime-id": "096f74b542c341ea9f8a4b6947a6a95f", "runtime.name": "CPython", - "runtime.version": "3.9.19", + "runtime.version": "3.11.9", "span.kind": "test", "test.command": "manual_efd_faulty_session", "test.framework": "dd_manual_test_fw", "test.framework_version": "1.0.0", - "test.is_new": "true", "test.module": "m2", "test.module_path": "", - "test.name": "m2_s1_t1", + "test.name": "m2_s1_t22", "test.status": "pass", "test.suite": "m2_s1", "test.type": "test", - "test_module_id": "17675353669520667242", - "test_session_id": "15705414272000062156", - "test_suite_id": "3960011827465066479", + "test_module_id": "17061851581233455560", + "test_session_id": "18323133602450366815", + "test_suite_id": "13136832476816543943", "type": "test" }, "metrics": { @@ -800,17 +2315,17 @@ "_dd.top_level": 1, "_dd.tracer_kr": 1.0, "_sampling_priority_v1": 1, - "process_id": 32495 + "process_id": 97018 }, - "duration": 34542, - "start": 1733391714564244375 + "duration": 19834, + "start": 1734010946946554215 }], [ { "name": "test_visibility.test", "service": "test-test", - "resource": "m2_s1_t2", - "trace_id": 7, + "resource": "m2_s1_t23", + "trace_id": 29, "span_id": 1, "parent_id": 0, "type": "test", @@ -820,7 +2335,7 @@ "_dd.ci.env_vars": "{\"CI_PROJECT_URL\":\"https://test.test.io/Test/test-test/test-test\",\"CI_PIPELINE_ID\":\"43949931\",\"CI_JOB_ID\":\"633358062\"}", "_dd.origin": "ciapp-test", "_dd.p.dm": "-0", - "_dd.p.tid": "6751756200000000", + "_dd.p.tid": "675ae84200000000", "ci.job.name": "test-job", "ci.job.url": "https://test.test.io/Test/test-test/test-test/-/jobs/633358062", "ci.node.labels": "[\"runner:test-test-test-test\"]", @@ -831,7 +2346,7 @@ "ci.pipeline.url": "https://test.\u2020est.io/Test/test-\u2020est/test-test/-/pipelines/43949931", "ci.provider.name": "gitlab", "ci.stage.name": "test-stage", - "ci.workspace_path": "/tmp/pytest-of-root/pytest-12/test_manual_api_fake_efd_faulty_session0", + "ci.workspace_path": "/tmp/pytest-of-root/pytest-87/test_manual_api_fake_efd_faulty_session0", "component": "dd_manual_test_fw", "git.branch": "test.brancn/test_name", "git.commit.author.date": "2024-09-10T10:11:13+01:00", @@ -842,27 +2357,26 @@ "git.repository_url": "https://test.test.io/Test/test-test/test-test.git", "git.tag": "v1.0.0", "language": "python", - "library_version": "2.18.0.dev124+gc03b9e422.d20241205", + "library_version": "2.18.0.dev111+g4ca600932", "os.architecture": "aarch64", "os.platform": "Linux", "os.version": "6.6.12-linuxkit", - "runtime-id": "bdef4ecb6c674245bfc4f6518ff5a773", + "runtime-id": "096f74b542c341ea9f8a4b6947a6a95f", "runtime.name": "CPython", - "runtime.version": "3.9.19", + "runtime.version": "3.11.9", "span.kind": "test", "test.command": "manual_efd_faulty_session", "test.framework": "dd_manual_test_fw", "test.framework_version": "1.0.0", - "test.is_new": "true", "test.module": "m2", "test.module_path": "", - "test.name": "m2_s1_t2", + "test.name": "m2_s1_t23", "test.status": "pass", "test.suite": "m2_s1", "test.type": "test", - "test_module_id": "17675353669520667242", - "test_session_id": "15705414272000062156", - "test_suite_id": "3960011827465066479", + "test_module_id": "17061851581233455560", + "test_session_id": "18323133602450366815", + "test_suite_id": "13136832476816543943", "type": "test" }, "metrics": { @@ -870,17 +2384,17 @@ "_dd.top_level": 1, "_dd.tracer_kr": 1.0, "_sampling_priority_v1": 1, - "process_id": 32495 + "process_id": 97018 }, - "duration": 33542, - "start": 1733391714564346250 + "duration": 20125, + "start": 1734010946946613590 }], [ { "name": "test_visibility.test", "service": "test-test", - "resource": "m2_s1_t3", - "trace_id": 8, + "resource": "m2_s1_t24", + "trace_id": 30, "span_id": 1, "parent_id": 0, "type": "test", @@ -890,7 +2404,7 @@ "_dd.ci.env_vars": "{\"CI_PROJECT_URL\":\"https://test.test.io/Test/test-test/test-test\",\"CI_PIPELINE_ID\":\"43949931\",\"CI_JOB_ID\":\"633358062\"}", "_dd.origin": "ciapp-test", "_dd.p.dm": "-0", - "_dd.p.tid": "6751756200000000", + "_dd.p.tid": "675ae84200000000", "ci.job.name": "test-job", "ci.job.url": "https://test.test.io/Test/test-test/test-test/-/jobs/633358062", "ci.node.labels": "[\"runner:test-test-test-test\"]", @@ -901,7 +2415,7 @@ "ci.pipeline.url": "https://test.\u2020est.io/Test/test-\u2020est/test-test/-/pipelines/43949931", "ci.provider.name": "gitlab", "ci.stage.name": "test-stage", - "ci.workspace_path": "/tmp/pytest-of-root/pytest-12/test_manual_api_fake_efd_faulty_session0", + "ci.workspace_path": "/tmp/pytest-of-root/pytest-87/test_manual_api_fake_efd_faulty_session0", "component": "dd_manual_test_fw", "git.branch": "test.brancn/test_name", "git.commit.author.date": "2024-09-10T10:11:13+01:00", @@ -912,27 +2426,26 @@ "git.repository_url": "https://test.test.io/Test/test-test/test-test.git", "git.tag": "v1.0.0", "language": "python", - "library_version": "2.18.0.dev124+gc03b9e422.d20241205", + "library_version": "2.18.0.dev111+g4ca600932", "os.architecture": "aarch64", "os.platform": "Linux", "os.version": "6.6.12-linuxkit", - "runtime-id": "bdef4ecb6c674245bfc4f6518ff5a773", + "runtime-id": "096f74b542c341ea9f8a4b6947a6a95f", "runtime.name": "CPython", - "runtime.version": "3.9.19", + "runtime.version": "3.11.9", "span.kind": "test", "test.command": "manual_efd_faulty_session", "test.framework": "dd_manual_test_fw", "test.framework_version": "1.0.0", - "test.is_new": "true", "test.module": "m2", "test.module_path": "", - "test.name": "m2_s1_t3", + "test.name": "m2_s1_t24", "test.status": "pass", "test.suite": "m2_s1", "test.type": "test", - "test_module_id": "17675353669520667242", - "test_session_id": "15705414272000062156", - "test_suite_id": "3960011827465066479", + "test_module_id": "17061851581233455560", + "test_session_id": "18323133602450366815", + "test_suite_id": "13136832476816543943", "type": "test" }, "metrics": { @@ -940,17 +2453,17 @@ "_dd.top_level": 1, "_dd.tracer_kr": 1.0, "_sampling_priority_v1": 1, - "process_id": 32495 + "process_id": 97018 }, - "duration": 33250, - "start": 1733391714564456125 + "duration": 20334, + "start": 1734010946946676715 }], [ { "name": "test_visibility.test", "service": "test-test", - "resource": "m2_s1_t4", - "trace_id": 9, + "resource": "m2_s1_t25", + "trace_id": 31, "span_id": 1, "parent_id": 0, "type": "test", @@ -960,7 +2473,7 @@ "_dd.ci.env_vars": "{\"CI_PROJECT_URL\":\"https://test.test.io/Test/test-test/test-test\",\"CI_PIPELINE_ID\":\"43949931\",\"CI_JOB_ID\":\"633358062\"}", "_dd.origin": "ciapp-test", "_dd.p.dm": "-0", - "_dd.p.tid": "6751756200000000", + "_dd.p.tid": "675ae84200000000", "ci.job.name": "test-job", "ci.job.url": "https://test.test.io/Test/test-test/test-test/-/jobs/633358062", "ci.node.labels": "[\"runner:test-test-test-test\"]", @@ -971,7 +2484,7 @@ "ci.pipeline.url": "https://test.\u2020est.io/Test/test-\u2020est/test-test/-/pipelines/43949931", "ci.provider.name": "gitlab", "ci.stage.name": "test-stage", - "ci.workspace_path": "/tmp/pytest-of-root/pytest-12/test_manual_api_fake_efd_faulty_session0", + "ci.workspace_path": "/tmp/pytest-of-root/pytest-87/test_manual_api_fake_efd_faulty_session0", "component": "dd_manual_test_fw", "git.branch": "test.brancn/test_name", "git.commit.author.date": "2024-09-10T10:11:13+01:00", @@ -982,27 +2495,26 @@ "git.repository_url": "https://test.test.io/Test/test-test/test-test.git", "git.tag": "v1.0.0", "language": "python", - "library_version": "2.18.0.dev124+gc03b9e422.d20241205", + "library_version": "2.18.0.dev111+g4ca600932", "os.architecture": "aarch64", "os.platform": "Linux", "os.version": "6.6.12-linuxkit", - "runtime-id": "bdef4ecb6c674245bfc4f6518ff5a773", + "runtime-id": "096f74b542c341ea9f8a4b6947a6a95f", "runtime.name": "CPython", - "runtime.version": "3.9.19", + "runtime.version": "3.11.9", "span.kind": "test", "test.command": "manual_efd_faulty_session", "test.framework": "dd_manual_test_fw", "test.framework_version": "1.0.0", - "test.is_new": "true", "test.module": "m2", "test.module_path": "", - "test.name": "m2_s1_t4", + "test.name": "m2_s1_t25", "test.status": "pass", "test.suite": "m2_s1", "test.type": "test", - "test_module_id": "17675353669520667242", - "test_session_id": "15705414272000062156", - "test_suite_id": "3960011827465066479", + "test_module_id": "17061851581233455560", + "test_session_id": "18323133602450366815", + "test_suite_id": "13136832476816543943", "type": "test" }, "metrics": { @@ -1010,17 +2522,17 @@ "_dd.top_level": 1, "_dd.tracer_kr": 1.0, "_sampling_priority_v1": 1, - "process_id": 32495 + "process_id": 97018 }, - "duration": 32250, - "start": 1733391714564552959 + "duration": 19000, + "start": 1734010946946737507 }], [ { "name": "test_visibility.test", "service": "test-test", - "resource": "m2_s1_t5", - "trace_id": 10, + "resource": "m2_s1_t26", + "trace_id": 32, "span_id": 1, "parent_id": 0, "type": "test", @@ -1030,7 +2542,7 @@ "_dd.ci.env_vars": "{\"CI_PROJECT_URL\":\"https://test.test.io/Test/test-test/test-test\",\"CI_PIPELINE_ID\":\"43949931\",\"CI_JOB_ID\":\"633358062\"}", "_dd.origin": "ciapp-test", "_dd.p.dm": "-0", - "_dd.p.tid": "6751756200000000", + "_dd.p.tid": "675ae84200000000", "ci.job.name": "test-job", "ci.job.url": "https://test.test.io/Test/test-test/test-test/-/jobs/633358062", "ci.node.labels": "[\"runner:test-test-test-test\"]", @@ -1041,7 +2553,7 @@ "ci.pipeline.url": "https://test.\u2020est.io/Test/test-\u2020est/test-test/-/pipelines/43949931", "ci.provider.name": "gitlab", "ci.stage.name": "test-stage", - "ci.workspace_path": "/tmp/pytest-of-root/pytest-12/test_manual_api_fake_efd_faulty_session0", + "ci.workspace_path": "/tmp/pytest-of-root/pytest-87/test_manual_api_fake_efd_faulty_session0", "component": "dd_manual_test_fw", "git.branch": "test.brancn/test_name", "git.commit.author.date": "2024-09-10T10:11:13+01:00", @@ -1052,27 +2564,26 @@ "git.repository_url": "https://test.test.io/Test/test-test/test-test.git", "git.tag": "v1.0.0", "language": "python", - "library_version": "2.18.0.dev124+gc03b9e422.d20241205", + "library_version": "2.18.0.dev111+g4ca600932", "os.architecture": "aarch64", "os.platform": "Linux", "os.version": "6.6.12-linuxkit", - "runtime-id": "bdef4ecb6c674245bfc4f6518ff5a773", + "runtime-id": "096f74b542c341ea9f8a4b6947a6a95f", "runtime.name": "CPython", - "runtime.version": "3.9.19", + "runtime.version": "3.11.9", "span.kind": "test", "test.command": "manual_efd_faulty_session", "test.framework": "dd_manual_test_fw", "test.framework_version": "1.0.0", - "test.is_new": "true", "test.module": "m2", "test.module_path": "", - "test.name": "m2_s1_t5", + "test.name": "m2_s1_t26", "test.status": "pass", "test.suite": "m2_s1", "test.type": "test", - "test_module_id": "17675353669520667242", - "test_session_id": "15705414272000062156", - "test_suite_id": "3960011827465066479", + "test_module_id": "17061851581233455560", + "test_session_id": "18323133602450366815", + "test_suite_id": "13136832476816543943", "type": "test" }, "metrics": { @@ -1080,17 +2591,17 @@ "_dd.top_level": 1, "_dd.tracer_kr": 1.0, "_sampling_priority_v1": 1, - "process_id": 32495 + "process_id": 97018 }, - "duration": 30417, - "start": 1733391714564645042 + "duration": 20083, + "start": 1734010946946795382 }], [ { "name": "test_visibility.test", "service": "test-test", - "resource": "m2_s1_t6", - "trace_id": 11, + "resource": "m2_s1_t27", + "trace_id": 33, "span_id": 1, "parent_id": 0, "type": "test", @@ -1100,7 +2611,7 @@ "_dd.ci.env_vars": "{\"CI_PROJECT_URL\":\"https://test.test.io/Test/test-test/test-test\",\"CI_PIPELINE_ID\":\"43949931\",\"CI_JOB_ID\":\"633358062\"}", "_dd.origin": "ciapp-test", "_dd.p.dm": "-0", - "_dd.p.tid": "6751756200000000", + "_dd.p.tid": "675ae84200000000", "ci.job.name": "test-job", "ci.job.url": "https://test.test.io/Test/test-test/test-test/-/jobs/633358062", "ci.node.labels": "[\"runner:test-test-test-test\"]", @@ -1111,7 +2622,7 @@ "ci.pipeline.url": "https://test.\u2020est.io/Test/test-\u2020est/test-test/-/pipelines/43949931", "ci.provider.name": "gitlab", "ci.stage.name": "test-stage", - "ci.workspace_path": "/tmp/pytest-of-root/pytest-12/test_manual_api_fake_efd_faulty_session0", + "ci.workspace_path": "/tmp/pytest-of-root/pytest-87/test_manual_api_fake_efd_faulty_session0", "component": "dd_manual_test_fw", "git.branch": "test.brancn/test_name", "git.commit.author.date": "2024-09-10T10:11:13+01:00", @@ -1122,27 +2633,26 @@ "git.repository_url": "https://test.test.io/Test/test-test/test-test.git", "git.tag": "v1.0.0", "language": "python", - "library_version": "2.18.0.dev124+gc03b9e422.d20241205", + "library_version": "2.18.0.dev111+g4ca600932", "os.architecture": "aarch64", "os.platform": "Linux", "os.version": "6.6.12-linuxkit", - "runtime-id": "bdef4ecb6c674245bfc4f6518ff5a773", + "runtime-id": "096f74b542c341ea9f8a4b6947a6a95f", "runtime.name": "CPython", - "runtime.version": "3.9.19", + "runtime.version": "3.11.9", "span.kind": "test", "test.command": "manual_efd_faulty_session", "test.framework": "dd_manual_test_fw", "test.framework_version": "1.0.0", - "test.is_new": "true", "test.module": "m2", "test.module_path": "", - "test.name": "m2_s1_t6", + "test.name": "m2_s1_t27", "test.status": "pass", "test.suite": "m2_s1", "test.type": "test", - "test_module_id": "17675353669520667242", - "test_session_id": "15705414272000062156", - "test_suite_id": "3960011827465066479", + "test_module_id": "17061851581233455560", + "test_session_id": "18323133602450366815", + "test_suite_id": "13136832476816543943", "type": "test" }, "metrics": { @@ -1150,17 +2660,17 @@ "_dd.top_level": 1, "_dd.tracer_kr": 1.0, "_sampling_priority_v1": 1, - "process_id": 32495 + "process_id": 97018 }, - "duration": 31583, - "start": 1733391714564735834 + "duration": 33667, + "start": 1734010946946856965 }], [ { "name": "test_visibility.test", "service": "test-test", - "resource": "m2_s1_t7", - "trace_id": 12, + "resource": "m2_s1_t28", + "trace_id": 34, "span_id": 1, "parent_id": 0, "type": "test", @@ -1170,7 +2680,7 @@ "_dd.ci.env_vars": "{\"CI_PROJECT_URL\":\"https://test.test.io/Test/test-test/test-test\",\"CI_PIPELINE_ID\":\"43949931\",\"CI_JOB_ID\":\"633358062\"}", "_dd.origin": "ciapp-test", "_dd.p.dm": "-0", - "_dd.p.tid": "6751756200000000", + "_dd.p.tid": "675ae84200000000", "ci.job.name": "test-job", "ci.job.url": "https://test.test.io/Test/test-test/test-test/-/jobs/633358062", "ci.node.labels": "[\"runner:test-test-test-test\"]", @@ -1181,7 +2691,7 @@ "ci.pipeline.url": "https://test.\u2020est.io/Test/test-\u2020est/test-test/-/pipelines/43949931", "ci.provider.name": "gitlab", "ci.stage.name": "test-stage", - "ci.workspace_path": "/tmp/pytest-of-root/pytest-12/test_manual_api_fake_efd_faulty_session0", + "ci.workspace_path": "/tmp/pytest-of-root/pytest-87/test_manual_api_fake_efd_faulty_session0", "component": "dd_manual_test_fw", "git.branch": "test.brancn/test_name", "git.commit.author.date": "2024-09-10T10:11:13+01:00", @@ -1192,27 +2702,26 @@ "git.repository_url": "https://test.test.io/Test/test-test/test-test.git", "git.tag": "v1.0.0", "language": "python", - "library_version": "2.18.0.dev124+gc03b9e422.d20241205", + "library_version": "2.18.0.dev111+g4ca600932", "os.architecture": "aarch64", "os.platform": "Linux", "os.version": "6.6.12-linuxkit", - "runtime-id": "bdef4ecb6c674245bfc4f6518ff5a773", + "runtime-id": "096f74b542c341ea9f8a4b6947a6a95f", "runtime.name": "CPython", - "runtime.version": "3.9.19", + "runtime.version": "3.11.9", "span.kind": "test", "test.command": "manual_efd_faulty_session", "test.framework": "dd_manual_test_fw", "test.framework_version": "1.0.0", - "test.is_new": "true", "test.module": "m2", "test.module_path": "", - "test.name": "m2_s1_t7", + "test.name": "m2_s1_t28", "test.status": "pass", "test.suite": "m2_s1", "test.type": "test", - "test_module_id": "17675353669520667242", - "test_session_id": "15705414272000062156", - "test_suite_id": "3960011827465066479", + "test_module_id": "17061851581233455560", + "test_session_id": "18323133602450366815", + "test_suite_id": "13136832476816543943", "type": "test" }, "metrics": { @@ -1220,17 +2729,17 @@ "_dd.top_level": 1, "_dd.tracer_kr": 1.0, "_sampling_priority_v1": 1, - "process_id": 32495 + "process_id": 97018 }, - "duration": 31666, - "start": 1733391714564827459 + "duration": 22958, + "start": 1734010946946935049 }], [ { "name": "test_visibility.test", "service": "test-test", - "resource": "m2_s1_t8", - "trace_id": 13, + "resource": "m2_s1_t29", + "trace_id": 35, "span_id": 1, "parent_id": 0, "type": "test", @@ -1240,7 +2749,7 @@ "_dd.ci.env_vars": "{\"CI_PROJECT_URL\":\"https://test.test.io/Test/test-test/test-test\",\"CI_PIPELINE_ID\":\"43949931\",\"CI_JOB_ID\":\"633358062\"}", "_dd.origin": "ciapp-test", "_dd.p.dm": "-0", - "_dd.p.tid": "6751756200000000", + "_dd.p.tid": "675ae84200000000", "ci.job.name": "test-job", "ci.job.url": "https://test.test.io/Test/test-test/test-test/-/jobs/633358062", "ci.node.labels": "[\"runner:test-test-test-test\"]", @@ -1251,7 +2760,7 @@ "ci.pipeline.url": "https://test.\u2020est.io/Test/test-\u2020est/test-test/-/pipelines/43949931", "ci.provider.name": "gitlab", "ci.stage.name": "test-stage", - "ci.workspace_path": "/tmp/pytest-of-root/pytest-12/test_manual_api_fake_efd_faulty_session0", + "ci.workspace_path": "/tmp/pytest-of-root/pytest-87/test_manual_api_fake_efd_faulty_session0", "component": "dd_manual_test_fw", "git.branch": "test.brancn/test_name", "git.commit.author.date": "2024-09-10T10:11:13+01:00", @@ -1262,27 +2771,26 @@ "git.repository_url": "https://test.test.io/Test/test-test/test-test.git", "git.tag": "v1.0.0", "language": "python", - "library_version": "2.18.0.dev124+gc03b9e422.d20241205", + "library_version": "2.18.0.dev111+g4ca600932", "os.architecture": "aarch64", "os.platform": "Linux", "os.version": "6.6.12-linuxkit", - "runtime-id": "bdef4ecb6c674245bfc4f6518ff5a773", + "runtime-id": "096f74b542c341ea9f8a4b6947a6a95f", "runtime.name": "CPython", - "runtime.version": "3.9.19", + "runtime.version": "3.11.9", "span.kind": "test", "test.command": "manual_efd_faulty_session", "test.framework": "dd_manual_test_fw", "test.framework_version": "1.0.0", - "test.is_new": "true", "test.module": "m2", "test.module_path": "", - "test.name": "m2_s1_t8", + "test.name": "m2_s1_t29", "test.status": "pass", "test.suite": "m2_s1", "test.type": "test", - "test_module_id": "17675353669520667242", - "test_session_id": "15705414272000062156", - "test_suite_id": "3960011827465066479", + "test_module_id": "17061851581233455560", + "test_session_id": "18323133602450366815", + "test_suite_id": "13136832476816543943", "type": "test" }, "metrics": { @@ -1290,17 +2798,17 @@ "_dd.top_level": 1, "_dd.tracer_kr": 1.0, "_sampling_priority_v1": 1, - "process_id": 32495 + "process_id": 97018 }, - "duration": 31750, - "start": 1733391714564918667 + "duration": 20875, + "start": 1734010946947001382 }], [ { "name": "test_visibility.test", "service": "test-test", - "resource": "m2_s1_t9", - "trace_id": 14, + "resource": "m2_s1_t30", + "trace_id": 36, "span_id": 1, "parent_id": 0, "type": "test", @@ -1310,7 +2818,7 @@ "_dd.ci.env_vars": "{\"CI_PROJECT_URL\":\"https://test.test.io/Test/test-test/test-test\",\"CI_PIPELINE_ID\":\"43949931\",\"CI_JOB_ID\":\"633358062\"}", "_dd.origin": "ciapp-test", "_dd.p.dm": "-0", - "_dd.p.tid": "6751756200000000", + "_dd.p.tid": "675ae84200000000", "ci.job.name": "test-job", "ci.job.url": "https://test.test.io/Test/test-test/test-test/-/jobs/633358062", "ci.node.labels": "[\"runner:test-test-test-test\"]", @@ -1321,7 +2829,7 @@ "ci.pipeline.url": "https://test.\u2020est.io/Test/test-\u2020est/test-test/-/pipelines/43949931", "ci.provider.name": "gitlab", "ci.stage.name": "test-stage", - "ci.workspace_path": "/tmp/pytest-of-root/pytest-12/test_manual_api_fake_efd_faulty_session0", + "ci.workspace_path": "/tmp/pytest-of-root/pytest-87/test_manual_api_fake_efd_faulty_session0", "component": "dd_manual_test_fw", "git.branch": "test.brancn/test_name", "git.commit.author.date": "2024-09-10T10:11:13+01:00", @@ -1332,27 +2840,302 @@ "git.repository_url": "https://test.test.io/Test/test-test/test-test.git", "git.tag": "v1.0.0", "language": "python", - "library_version": "2.18.0.dev124+gc03b9e422.d20241205", + "library_version": "2.18.0.dev111+g4ca600932", "os.architecture": "aarch64", "os.platform": "Linux", "os.version": "6.6.12-linuxkit", - "runtime-id": "bdef4ecb6c674245bfc4f6518ff5a773", + "runtime-id": "096f74b542c341ea9f8a4b6947a6a95f", "runtime.name": "CPython", - "runtime.version": "3.9.19", + "runtime.version": "3.11.9", "span.kind": "test", "test.command": "manual_efd_faulty_session", "test.framework": "dd_manual_test_fw", "test.framework_version": "1.0.0", - "test.is_new": "true", "test.module": "m2", "test.module_path": "", - "test.name": "m2_s1_t9", + "test.name": "m2_s1_t30", + "test.status": "pass", + "test.suite": "m2_s1", + "test.type": "test", + "test_module_id": "17061851581233455560", + "test_session_id": "18323133602450366815", + "test_suite_id": "13136832476816543943", + "type": "test" + }, + "metrics": { + "_dd.py.partial_flush": 1, + "_dd.top_level": 1, + "_dd.tracer_kr": 1.0, + "_sampling_priority_v1": 1, + "process_id": 97018 + }, + "duration": 19292, + "start": 1734010946947063965 + }], +[ + { + "name": "test_visibility.test", + "service": "test-test", + "resource": "m2_s1_t31", + "trace_id": 37, + "span_id": 1, + "parent_id": 0, + "type": "test", + "error": 0, + "meta": { + "_dd.base_service": "test_manual_api_fake_efd_faulty_session0", + "_dd.ci.env_vars": "{\"CI_PROJECT_URL\":\"https://test.test.io/Test/test-test/test-test\",\"CI_PIPELINE_ID\":\"43949931\",\"CI_JOB_ID\":\"633358062\"}", + "_dd.origin": "ciapp-test", + "_dd.p.dm": "-0", + "_dd.p.tid": "675ae84200000000", + "ci.job.name": "test-job", + "ci.job.url": "https://test.test.io/Test/test-test/test-test/-/jobs/633358062", + "ci.node.labels": "[\"runner:test-test-test-test\"]", + "ci.node.name": "14727097", + "ci.pipeline.id": "43949931", + "ci.pipeline.name": "Test/test-test/test-project-path", + "ci.pipeline.number": "14726", + "ci.pipeline.url": "https://test.\u2020est.io/Test/test-\u2020est/test-test/-/pipelines/43949931", + "ci.provider.name": "gitlab", + "ci.stage.name": "test-stage", + "ci.workspace_path": "/tmp/pytest-of-root/pytest-87/test_manual_api_fake_efd_faulty_session0", + "component": "dd_manual_test_fw", + "git.branch": "test.brancn/test_name", + "git.commit.author.date": "2024-09-10T10:11:13+01:00", + "git.commit.author.email": "First.Last@testtest.com", + "git.commit.author.name": "TestFirst TestLast", + "git.commit.message": "test commit message", + "git.commit.sha": "c165eb71ef833b752783b5268f21521fd16f812a", + "git.repository_url": "https://test.test.io/Test/test-test/test-test.git", + "git.tag": "v1.0.0", + "language": "python", + "library_version": "2.18.0.dev111+g4ca600932", + "os.architecture": "aarch64", + "os.platform": "Linux", + "os.version": "6.6.12-linuxkit", + "runtime-id": "096f74b542c341ea9f8a4b6947a6a95f", + "runtime.name": "CPython", + "runtime.version": "3.11.9", + "span.kind": "test", + "test.command": "manual_efd_faulty_session", + "test.framework": "dd_manual_test_fw", + "test.framework_version": "1.0.0", + "test.module": "m2", + "test.module_path": "", + "test.name": "m2_s1_t31", + "test.status": "pass", + "test.suite": "m2_s1", + "test.type": "test", + "test_module_id": "17061851581233455560", + "test_session_id": "18323133602450366815", + "test_suite_id": "13136832476816543943", + "type": "test" + }, + "metrics": { + "_dd.py.partial_flush": 1, + "_dd.top_level": 1, + "_dd.tracer_kr": 1.0, + "_sampling_priority_v1": 1, + "process_id": 97018 + }, + "duration": 20750, + "start": 1734010946947123215 + }], +[ + { + "name": "test_visibility.test", + "service": "test-test", + "resource": "m2_s1_t32", + "trace_id": 38, + "span_id": 1, + "parent_id": 0, + "type": "test", + "error": 0, + "meta": { + "_dd.base_service": "test_manual_api_fake_efd_faulty_session0", + "_dd.ci.env_vars": "{\"CI_PROJECT_URL\":\"https://test.test.io/Test/test-test/test-test\",\"CI_PIPELINE_ID\":\"43949931\",\"CI_JOB_ID\":\"633358062\"}", + "_dd.origin": "ciapp-test", + "_dd.p.dm": "-0", + "_dd.p.tid": "675ae84200000000", + "ci.job.name": "test-job", + "ci.job.url": "https://test.test.io/Test/test-test/test-test/-/jobs/633358062", + "ci.node.labels": "[\"runner:test-test-test-test\"]", + "ci.node.name": "14727097", + "ci.pipeline.id": "43949931", + "ci.pipeline.name": "Test/test-test/test-project-path", + "ci.pipeline.number": "14726", + "ci.pipeline.url": "https://test.\u2020est.io/Test/test-\u2020est/test-test/-/pipelines/43949931", + "ci.provider.name": "gitlab", + "ci.stage.name": "test-stage", + "ci.workspace_path": "/tmp/pytest-of-root/pytest-87/test_manual_api_fake_efd_faulty_session0", + "component": "dd_manual_test_fw", + "git.branch": "test.brancn/test_name", + "git.commit.author.date": "2024-09-10T10:11:13+01:00", + "git.commit.author.email": "First.Last@testtest.com", + "git.commit.author.name": "TestFirst TestLast", + "git.commit.message": "test commit message", + "git.commit.sha": "c165eb71ef833b752783b5268f21521fd16f812a", + "git.repository_url": "https://test.test.io/Test/test-test/test-test.git", + "git.tag": "v1.0.0", + "language": "python", + "library_version": "2.18.0.dev111+g4ca600932", + "os.architecture": "aarch64", + "os.platform": "Linux", + "os.version": "6.6.12-linuxkit", + "runtime-id": "096f74b542c341ea9f8a4b6947a6a95f", + "runtime.name": "CPython", + "runtime.version": "3.11.9", + "span.kind": "test", + "test.command": "manual_efd_faulty_session", + "test.framework": "dd_manual_test_fw", + "test.framework_version": "1.0.0", + "test.module": "m2", + "test.module_path": "", + "test.name": "m2_s1_t32", + "test.status": "pass", + "test.suite": "m2_s1", + "test.type": "test", + "test_module_id": "17061851581233455560", + "test_session_id": "18323133602450366815", + "test_suite_id": "13136832476816543943", + "type": "test" + }, + "metrics": { + "_dd.py.partial_flush": 1, + "_dd.top_level": 1, + "_dd.tracer_kr": 1.0, + "_sampling_priority_v1": 1, + "process_id": 97018 + }, + "duration": 22792, + "start": 1734010946947226215 + }], +[ + { + "name": "test_visibility.test", + "service": "test-test", + "resource": "m2_s1_t33", + "trace_id": 39, + "span_id": 1, + "parent_id": 0, + "type": "test", + "error": 0, + "meta": { + "_dd.base_service": "test_manual_api_fake_efd_faulty_session0", + "_dd.ci.env_vars": "{\"CI_PROJECT_URL\":\"https://test.test.io/Test/test-test/test-test\",\"CI_PIPELINE_ID\":\"43949931\",\"CI_JOB_ID\":\"633358062\"}", + "_dd.origin": "ciapp-test", + "_dd.p.dm": "-0", + "_dd.p.tid": "675ae84200000000", + "ci.job.name": "test-job", + "ci.job.url": "https://test.test.io/Test/test-test/test-test/-/jobs/633358062", + "ci.node.labels": "[\"runner:test-test-test-test\"]", + "ci.node.name": "14727097", + "ci.pipeline.id": "43949931", + "ci.pipeline.name": "Test/test-test/test-project-path", + "ci.pipeline.number": "14726", + "ci.pipeline.url": "https://test.\u2020est.io/Test/test-\u2020est/test-test/-/pipelines/43949931", + "ci.provider.name": "gitlab", + "ci.stage.name": "test-stage", + "ci.workspace_path": "/tmp/pytest-of-root/pytest-87/test_manual_api_fake_efd_faulty_session0", + "component": "dd_manual_test_fw", + "git.branch": "test.brancn/test_name", + "git.commit.author.date": "2024-09-10T10:11:13+01:00", + "git.commit.author.email": "First.Last@testtest.com", + "git.commit.author.name": "TestFirst TestLast", + "git.commit.message": "test commit message", + "git.commit.sha": "c165eb71ef833b752783b5268f21521fd16f812a", + "git.repository_url": "https://test.test.io/Test/test-test/test-test.git", + "git.tag": "v1.0.0", + "language": "python", + "library_version": "2.18.0.dev111+g4ca600932", + "os.architecture": "aarch64", + "os.platform": "Linux", + "os.version": "6.6.12-linuxkit", + "runtime-id": "096f74b542c341ea9f8a4b6947a6a95f", + "runtime.name": "CPython", + "runtime.version": "3.11.9", + "span.kind": "test", + "test.command": "manual_efd_faulty_session", + "test.framework": "dd_manual_test_fw", + "test.framework_version": "1.0.0", + "test.module": "m2", + "test.module_path": "", + "test.name": "m2_s1_t33", + "test.status": "pass", + "test.suite": "m2_s1", + "test.type": "test", + "test_module_id": "17061851581233455560", + "test_session_id": "18323133602450366815", + "test_suite_id": "13136832476816543943", + "type": "test" + }, + "metrics": { + "_dd.py.partial_flush": 1, + "_dd.top_level": 1, + "_dd.tracer_kr": 1.0, + "_sampling_priority_v1": 1, + "process_id": 97018 + }, + "duration": 20334, + "start": 1734010946947289840 + }], +[ + { + "name": "test_visibility.test", + "service": "test-test", + "resource": "m2_s1_t34", + "trace_id": 40, + "span_id": 1, + "parent_id": 0, + "type": "test", + "error": 0, + "meta": { + "_dd.base_service": "test_manual_api_fake_efd_faulty_session0", + "_dd.ci.env_vars": "{\"CI_PROJECT_URL\":\"https://test.test.io/Test/test-test/test-test\",\"CI_PIPELINE_ID\":\"43949931\",\"CI_JOB_ID\":\"633358062\"}", + "_dd.origin": "ciapp-test", + "_dd.p.dm": "-0", + "_dd.p.tid": "675ae84200000000", + "ci.job.name": "test-job", + "ci.job.url": "https://test.test.io/Test/test-test/test-test/-/jobs/633358062", + "ci.node.labels": "[\"runner:test-test-test-test\"]", + "ci.node.name": "14727097", + "ci.pipeline.id": "43949931", + "ci.pipeline.name": "Test/test-test/test-project-path", + "ci.pipeline.number": "14726", + "ci.pipeline.url": "https://test.\u2020est.io/Test/test-\u2020est/test-test/-/pipelines/43949931", + "ci.provider.name": "gitlab", + "ci.stage.name": "test-stage", + "ci.workspace_path": "/tmp/pytest-of-root/pytest-87/test_manual_api_fake_efd_faulty_session0", + "component": "dd_manual_test_fw", + "git.branch": "test.brancn/test_name", + "git.commit.author.date": "2024-09-10T10:11:13+01:00", + "git.commit.author.email": "First.Last@testtest.com", + "git.commit.author.name": "TestFirst TestLast", + "git.commit.message": "test commit message", + "git.commit.sha": "c165eb71ef833b752783b5268f21521fd16f812a", + "git.repository_url": "https://test.test.io/Test/test-test/test-test.git", + "git.tag": "v1.0.0", + "language": "python", + "library_version": "2.18.0.dev111+g4ca600932", + "os.architecture": "aarch64", + "os.platform": "Linux", + "os.version": "6.6.12-linuxkit", + "runtime-id": "096f74b542c341ea9f8a4b6947a6a95f", + "runtime.name": "CPython", + "runtime.version": "3.11.9", + "span.kind": "test", + "test.command": "manual_efd_faulty_session", + "test.framework": "dd_manual_test_fw", + "test.framework_version": "1.0.0", + "test.module": "m2", + "test.module_path": "", + "test.name": "m2_s1_t34", "test.status": "pass", "test.suite": "m2_s1", "test.type": "test", - "test_module_id": "17675353669520667242", - "test_session_id": "15705414272000062156", - "test_suite_id": "3960011827465066479", + "test_module_id": "17061851581233455560", + "test_session_id": "18323133602450366815", + "test_suite_id": "13136832476816543943", "type": "test" }, "metrics": { @@ -1360,17 +3143,17 @@ "_dd.top_level": 1, "_dd.tracer_kr": 1.0, "_sampling_priority_v1": 1, - "process_id": 32495 + "process_id": 97018 }, - "duration": 32291, - "start": 1733391714565010709 + "duration": 19833, + "start": 1734010946947350132 }], [ { "name": "test_visibility.test", "service": "test-test", "resource": "m2_s2_t1", - "trace_id": 15, + "trace_id": 41, "span_id": 1, "parent_id": 0, "type": "test", @@ -1380,7 +3163,7 @@ "_dd.ci.env_vars": "{\"CI_PROJECT_URL\":\"https://test.test.io/Test/test-test/test-test\",\"CI_PIPELINE_ID\":\"43949931\",\"CI_JOB_ID\":\"633358062\"}", "_dd.origin": "ciapp-test", "_dd.p.dm": "-0", - "_dd.p.tid": "6751756200000000", + "_dd.p.tid": "675ae84200000000", "ci.job.name": "test-job", "ci.job.url": "https://test.test.io/Test/test-test/test-test/-/jobs/633358062", "ci.node.labels": "[\"runner:test-test-test-test\"]", @@ -1391,7 +3174,7 @@ "ci.pipeline.url": "https://test.\u2020est.io/Test/test-\u2020est/test-test/-/pipelines/43949931", "ci.provider.name": "gitlab", "ci.stage.name": "test-stage", - "ci.workspace_path": "/tmp/pytest-of-root/pytest-12/test_manual_api_fake_efd_faulty_session0", + "ci.workspace_path": "/tmp/pytest-of-root/pytest-87/test_manual_api_fake_efd_faulty_session0", "component": "dd_manual_test_fw", "git.branch": "test.brancn/test_name", "git.commit.author.date": "2024-09-10T10:11:13+01:00", @@ -1402,13 +3185,13 @@ "git.repository_url": "https://test.test.io/Test/test-test/test-test.git", "git.tag": "v1.0.0", "language": "python", - "library_version": "2.18.0.dev124+gc03b9e422.d20241205", + "library_version": "2.18.0.dev111+g4ca600932", "os.architecture": "aarch64", "os.platform": "Linux", "os.version": "6.6.12-linuxkit", - "runtime-id": "bdef4ecb6c674245bfc4f6518ff5a773", + "runtime-id": "096f74b542c341ea9f8a4b6947a6a95f", "runtime.name": "CPython", - "runtime.version": "3.9.19", + "runtime.version": "3.11.9", "span.kind": "test", "test.command": "manual_efd_faulty_session", "test.framework": "dd_manual_test_fw", @@ -1420,9 +3203,9 @@ "test.status": "skip", "test.suite": "m2_s2", "test.type": "test", - "test_module_id": "17675353669520667242", - "test_session_id": "15705414272000062156", - "test_suite_id": "599512189588521228", + "test_module_id": "17061851581233455560", + "test_session_id": "18323133602450366815", + "test_suite_id": "6307099272916499859", "type": "test" }, "metrics": { @@ -1430,19 +3213,19 @@ "_dd.top_level": 1, "_dd.tracer_kr": 1.0, "_sampling_priority_v1": 1, - "process_id": 32495, + "process_id": 97018, "test.source.end": 2, "test.source.start": 1 }, - "duration": 44250, - "start": 1733391714565197084 + "duration": 39917, + "start": 1734010946947506132 }], [ { "name": "test_visibility.test", "service": "test-test", "resource": "m2_s2_t2", - "trace_id": 16, + "trace_id": 42, "span_id": 1, "parent_id": 0, "type": "test", @@ -1452,7 +3235,7 @@ "_dd.ci.env_vars": "{\"CI_PROJECT_URL\":\"https://test.test.io/Test/test-test/test-test\",\"CI_PIPELINE_ID\":\"43949931\",\"CI_JOB_ID\":\"633358062\"}", "_dd.origin": "ciapp-test", "_dd.p.dm": "-0", - "_dd.p.tid": "6751756200000000", + "_dd.p.tid": "675ae84200000000", "ci.job.name": "test-job", "ci.job.url": "https://test.test.io/Test/test-test/test-test/-/jobs/633358062", "ci.node.labels": "[\"runner:test-test-test-test\"]", @@ -1463,7 +3246,7 @@ "ci.pipeline.url": "https://test.\u2020est.io/Test/test-\u2020est/test-test/-/pipelines/43949931", "ci.provider.name": "gitlab", "ci.stage.name": "test-stage", - "ci.workspace_path": "/tmp/pytest-of-root/pytest-12/test_manual_api_fake_efd_faulty_session0", + "ci.workspace_path": "/tmp/pytest-of-root/pytest-87/test_manual_api_fake_efd_faulty_session0", "component": "dd_manual_test_fw", "git.branch": "test.brancn/test_name", "git.commit.author.date": "2024-09-10T10:11:13+01:00", @@ -1474,27 +3257,26 @@ "git.repository_url": "https://test.test.io/Test/test-test/test-test.git", "git.tag": "v1.0.0", "language": "python", - "library_version": "2.18.0.dev124+gc03b9e422.d20241205", + "library_version": "2.18.0.dev111+g4ca600932", "os.architecture": "aarch64", "os.platform": "Linux", "os.version": "6.6.12-linuxkit", - "runtime-id": "bdef4ecb6c674245bfc4f6518ff5a773", + "runtime-id": "096f74b542c341ea9f8a4b6947a6a95f", "runtime.name": "CPython", - "runtime.version": "3.9.19", + "runtime.version": "3.11.9", "span.kind": "test", "test.command": "manual_efd_faulty_session", "test.framework": "dd_manual_test_fw", "test.framework_version": "1.0.0", - "test.is_new": "true", "test.module": "m2", "test.module_path": "", "test.name": "m2_s2_t2", "test.status": "pass", "test.suite": "m2_s2", "test.type": "test", - "test_module_id": "17675353669520667242", - "test_session_id": "15705414272000062156", - "test_suite_id": "599512189588521228", + "test_module_id": "17061851581233455560", + "test_session_id": "18323133602450366815", + "test_suite_id": "6307099272916499859", "type": "test" }, "metrics": { @@ -1502,17 +3284,17 @@ "_dd.top_level": 1, "_dd.tracer_kr": 1.0, "_sampling_priority_v1": 1, - "process_id": 32495 + "process_id": 97018 }, - "duration": 32666, - "start": 1733391714565305209 + "duration": 21333, + "start": 1734010946947589674 }], [ { "name": "test_visibility.test", "service": "test-test", "resource": "m2_s2_t3", - "trace_id": 17, + "trace_id": 43, "span_id": 1, "parent_id": 0, "type": "test", @@ -1522,7 +3304,7 @@ "_dd.ci.env_vars": "{\"CI_PROJECT_URL\":\"https://test.test.io/Test/test-test/test-test\",\"CI_PIPELINE_ID\":\"43949931\",\"CI_JOB_ID\":\"633358062\"}", "_dd.origin": "ciapp-test", "_dd.p.dm": "-0", - "_dd.p.tid": "6751756200000000", + "_dd.p.tid": "675ae84200000000", "ci.job.name": "test-job", "ci.job.url": "https://test.test.io/Test/test-test/test-test/-/jobs/633358062", "ci.node.labels": "[\"runner:test-test-test-test\"]", @@ -1533,7 +3315,7 @@ "ci.pipeline.url": "https://test.\u2020est.io/Test/test-\u2020est/test-test/-/pipelines/43949931", "ci.provider.name": "gitlab", "ci.stage.name": "test-stage", - "ci.workspace_path": "/tmp/pytest-of-root/pytest-12/test_manual_api_fake_efd_faulty_session0", + "ci.workspace_path": "/tmp/pytest-of-root/pytest-87/test_manual_api_fake_efd_faulty_session0", "component": "dd_manual_test_fw", "git.branch": "test.brancn/test_name", "git.commit.author.date": "2024-09-10T10:11:13+01:00", @@ -1544,19 +3326,18 @@ "git.repository_url": "https://test.test.io/Test/test-test/test-test.git", "git.tag": "v1.0.0", "language": "python", - "library_version": "2.18.0.dev124+gc03b9e422.d20241205", + "library_version": "2.18.0.dev111+g4ca600932", "os.architecture": "aarch64", "os.platform": "Linux", "os.version": "6.6.12-linuxkit", - "runtime-id": "bdef4ecb6c674245bfc4f6518ff5a773", + "runtime-id": "096f74b542c341ea9f8a4b6947a6a95f", "runtime.name": "CPython", - "runtime.version": "3.9.19", + "runtime.version": "3.11.9", "span.kind": "test", "test.codeowners": "[\"@romain\"]", "test.command": "manual_efd_faulty_session", "test.framework": "dd_manual_test_fw", "test.framework_version": "1.0.0", - "test.is_new": "true", "test.module": "m2", "test.module_path": "", "test.name": "m2_s2_t3", @@ -1564,9 +3345,9 @@ "test.status": "pass", "test.suite": "m2_s2", "test.type": "test", - "test_module_id": "17675353669520667242", - "test_session_id": "15705414272000062156", - "test_suite_id": "599512189588521228", + "test_module_id": "17061851581233455560", + "test_session_id": "18323133602450366815", + "test_suite_id": "6307099272916499859", "type": "test" }, "metrics": { @@ -1574,19 +3355,19 @@ "_dd.top_level": 1, "_dd.tracer_kr": 1.0, "_sampling_priority_v1": 1, - "process_id": 32495, + "process_id": 97018, "test.source.end": 12, "test.source.start": 4 }, - "duration": 59625, - "start": 1733391714565399292 + "duration": 37250, + "start": 1734010946947654507 }], [ { "name": "test_visibility.test", "service": "test-test", "resource": "m2_s2_t4", - "trace_id": 18, + "trace_id": 44, "span_id": 1, "parent_id": 0, "type": "test", @@ -1596,7 +3377,7 @@ "_dd.ci.env_vars": "{\"CI_PROJECT_URL\":\"https://test.test.io/Test/test-test/test-test\",\"CI_PIPELINE_ID\":\"43949931\",\"CI_JOB_ID\":\"633358062\"}", "_dd.origin": "ciapp-test", "_dd.p.dm": "-0", - "_dd.p.tid": "6751756200000000", + "_dd.p.tid": "675ae84200000000", "ci.job.name": "test-job", "ci.job.url": "https://test.test.io/Test/test-test/test-test/-/jobs/633358062", "ci.node.labels": "[\"runner:test-test-test-test\"]", @@ -1607,7 +3388,7 @@ "ci.pipeline.url": "https://test.\u2020est.io/Test/test-\u2020est/test-test/-/pipelines/43949931", "ci.provider.name": "gitlab", "ci.stage.name": "test-stage", - "ci.workspace_path": "/tmp/pytest-of-root/pytest-12/test_manual_api_fake_efd_faulty_session0", + "ci.workspace_path": "/tmp/pytest-of-root/pytest-87/test_manual_api_fake_efd_faulty_session0", "component": "dd_manual_test_fw", "git.branch": "test.brancn/test_name", "git.commit.author.date": "2024-09-10T10:11:13+01:00", @@ -1618,27 +3399,26 @@ "git.repository_url": "https://test.test.io/Test/test-test/test-test.git", "git.tag": "v1.0.0", "language": "python", - "library_version": "2.18.0.dev124+gc03b9e422.d20241205", + "library_version": "2.18.0.dev111+g4ca600932", "os.architecture": "aarch64", "os.platform": "Linux", "os.version": "6.6.12-linuxkit", - "runtime-id": "bdef4ecb6c674245bfc4f6518ff5a773", + "runtime-id": "096f74b542c341ea9f8a4b6947a6a95f", "runtime.name": "CPython", - "runtime.version": "3.9.19", + "runtime.version": "3.11.9", "span.kind": "test", "test.command": "manual_efd_faulty_session", "test.framework": "dd_manual_test_fw", "test.framework_version": "1.0.0", - "test.is_new": "true", "test.module": "m2", "test.module_path": "", "test.name": "m2_s2_t4", "test.status": "pass", "test.suite": "m2_s2", "test.type": "test", - "test_module_id": "17675353669520667242", - "test_session_id": "15705414272000062156", - "test_suite_id": "599512189588521228", + "test_module_id": "17061851581233455560", + "test_session_id": "18323133602450366815", + "test_suite_id": "6307099272916499859", "type": "test" }, "metrics": { @@ -1646,17 +3426,17 @@ "_dd.top_level": 1, "_dd.tracer_kr": 1.0, "_sampling_priority_v1": 1, - "process_id": 32495 + "process_id": 97018 }, - "duration": 32292, - "start": 1733391714565524000 + "duration": 20250, + "start": 1734010946947733340 }], [ { "name": "test_visibility.test", "service": "test-test", "resource": "m2_s2_t5", - "trace_id": 19, + "trace_id": 45, "span_id": 1, "parent_id": 0, "type": "test", @@ -1666,7 +3446,7 @@ "_dd.ci.env_vars": "{\"CI_PROJECT_URL\":\"https://test.test.io/Test/test-test/test-test\",\"CI_PIPELINE_ID\":\"43949931\",\"CI_JOB_ID\":\"633358062\"}", "_dd.origin": "ciapp-test", "_dd.p.dm": "-0", - "_dd.p.tid": "6751756200000000", + "_dd.p.tid": "675ae84200000000", "ci.job.name": "test-job", "ci.job.url": "https://test.test.io/Test/test-test/test-test/-/jobs/633358062", "ci.node.labels": "[\"runner:test-test-test-test\"]", @@ -1677,7 +3457,7 @@ "ci.pipeline.url": "https://test.\u2020est.io/Test/test-\u2020est/test-test/-/pipelines/43949931", "ci.provider.name": "gitlab", "ci.stage.name": "test-stage", - "ci.workspace_path": "/tmp/pytest-of-root/pytest-12/test_manual_api_fake_efd_faulty_session0", + "ci.workspace_path": "/tmp/pytest-of-root/pytest-87/test_manual_api_fake_efd_faulty_session0", "component": "dd_manual_test_fw", "git.branch": "test.brancn/test_name", "git.commit.author.date": "2024-09-10T10:11:13+01:00", @@ -1688,13 +3468,13 @@ "git.repository_url": "https://test.test.io/Test/test-test/test-test.git", "git.tag": "v1.0.0", "language": "python", - "library_version": "2.18.0.dev124+gc03b9e422.d20241205", + "library_version": "2.18.0.dev111+g4ca600932", "os.architecture": "aarch64", "os.platform": "Linux", "os.version": "6.6.12-linuxkit", - "runtime-id": "bdef4ecb6c674245bfc4f6518ff5a773", + "runtime-id": "096f74b542c341ea9f8a4b6947a6a95f", "runtime.name": "CPython", - "runtime.version": "3.9.19", + "runtime.version": "3.11.9", "span.kind": "test", "test.command": "manual_efd_faulty_session", "test.framework": "dd_manual_test_fw", @@ -1705,9 +3485,9 @@ "test.status": "pass", "test.suite": "m2_s2", "test.type": "test", - "test_module_id": "17675353669520667242", - "test_session_id": "15705414272000062156", - "test_suite_id": "599512189588521228", + "test_module_id": "17061851581233455560", + "test_session_id": "18323133602450366815", + "test_suite_id": "6307099272916499859", "type": "test" }, "metrics": { @@ -1715,8 +3495,8 @@ "_dd.top_level": 1, "_dd.tracer_kr": 1.0, "_sampling_priority_v1": 1, - "process_id": 32495 + "process_id": 97018 }, - "duration": 30792, - "start": 1733391714565625125 + "duration": 19708, + "start": 1734010946947794757 }]] From 99e9aff376a71ceee19264cf79e99e695495f8aa Mon Sep 17 00:00:00 2001 From: Federico Mon Date: Thu, 12 Dec 2024 19:00:06 +0100 Subject: [PATCH 19/78] fix(asm): use unpatched json loads internally (#11688) --- ddtrace/appsec/_utils.py | 5 ++--- ddtrace/internal/_unpatched.py | 1 + .../fix-appsec-use-unpatched-json-8d09aacad4808ef2.yaml | 4 ++++ 3 files changed, 7 insertions(+), 3 deletions(-) create mode 100644 releasenotes/notes/fix-appsec-use-unpatched-json-8d09aacad4808ef2.yaml diff --git a/ddtrace/appsec/_utils.py b/ddtrace/appsec/_utils.py index e2d46fe098e..bb8739654c5 100644 --- a/ddtrace/appsec/_utils.py +++ b/ddtrace/appsec/_utils.py @@ -5,6 +5,7 @@ from ddtrace.appsec._constants import API_SECURITY from ddtrace.appsec._constants import APPSEC +from ddtrace.internal._unpatched import unpatched_json_loads from ddtrace.internal.compat import to_unicode from ddtrace.internal.logger import get_logger from ddtrace.internal.utils.http import _get_blocked_template # noqa:F401 @@ -17,8 +18,6 @@ def parse_response_body(raw_body): - import json - import xmltodict from ddtrace.appsec import _asm_request_context @@ -54,7 +53,7 @@ def access_body(bd): try: # TODO handle charset if "json" in content_type: - req_body = json.loads(access_body(raw_body)) + req_body = unpatched_json_loads(access_body(raw_body)) elif "xml" in content_type: req_body = xmltodict.parse(access_body(raw_body)) else: diff --git a/ddtrace/internal/_unpatched.py b/ddtrace/internal/_unpatched.py index c226379f759..e209f30ff2a 100644 --- a/ddtrace/internal/_unpatched.py +++ b/ddtrace/internal/_unpatched.py @@ -1,6 +1,7 @@ # Acquire a reference to the open function from the builtins module. This is # necessary to ensure that the open function can be used unpatched when required. from builtins import open as unpatched_open # noqa +from json import loads as unpatched_json_loads # noqa # Acquire a reference to the threading module. Some parts of the library (e.g. # the profiler) might be enabled programmatically and therefore might end up diff --git a/releasenotes/notes/fix-appsec-use-unpatched-json-8d09aacad4808ef2.yaml b/releasenotes/notes/fix-appsec-use-unpatched-json-8d09aacad4808ef2.yaml new file mode 100644 index 00000000000..a4784672021 --- /dev/null +++ b/releasenotes/notes/fix-appsec-use-unpatched-json-8d09aacad4808ef2.yaml @@ -0,0 +1,4 @@ +--- +fixes: + - | + ASM: This fix resolves an issue where AppSec was using a patched JSON loads, creating telemetry errors. From ae0547ef8da4dd6b2c9d3545e05593eff6e6f5e3 Mon Sep 17 00:00:00 2001 From: Romain Komorn <136473744+romainkomorndatadog@users.noreply.github.com> Date: Thu, 12 Dec 2024 19:15:55 +0100 Subject: [PATCH 20/78] chore(ci_visibility): make codeowners file relative to repo path (#11696) This fixes an issue in the new version of the `pytest` plugin where `CODEOWNERS` parsing was incorrect due to the plugin sending absolute paths instead of paths relative to the repo root. ## Checklist - [x] PR author has checked that all the criteria below are met - The PR description includes an overview of the change - The PR description articulates the motivation for the change - The change includes tests OR the PR description describes a testing strategy - The PR description notes risks associated with the change, if any - Newly-added code is easy to change - The change follows the [library release note guidelines](https://ddtrace.readthedocs.io/en/stable/releasenotes.html) - The change includes or references documentation updates if necessary - Backport labels are set (if [applicable](https://ddtrace.readthedocs.io/en/latest/contributing.html#backporting)) ## Reviewer Checklist - [x] Reviewer has checked that all the criteria below are met - Title is accurate - All changes are related to the pull request's stated goal - Avoids breaking [API](https://ddtrace.readthedocs.io/en/stable/versioning.html#interfaces) changes - Testing strategy adequately addresses listed risks - Newly-added code is easy to change - Release note makes sense to a user of the library - If necessary, author has acknowledged and discussed the performance implications of this PR as reported in the benchmarks PR comment - Backport labels are set in a manner that is consistent with the [release branch maintenance policy](https://ddtrace.readthedocs.io/en/latest/contributing.html#backporting) --- ddtrace/contrib/pytest/_plugin_v2.py | 10 +++++++++- ddtrace/internal/ci_visibility/recorder.py | 2 +- 2 files changed, 10 insertions(+), 2 deletions(-) diff --git a/ddtrace/contrib/pytest/_plugin_v2.py b/ddtrace/contrib/pytest/_plugin_v2.py index b9cf7daf564..d3825578d7a 100644 --- a/ddtrace/contrib/pytest/_plugin_v2.py +++ b/ddtrace/contrib/pytest/_plugin_v2.py @@ -256,8 +256,16 @@ def _pytest_collection_finish(session) -> None: InternalTestSuite.discover(suite_id) item_path = Path(item.path if hasattr(item, "path") else item.fspath).absolute() + workspace_path = InternalTestSession.get_workspace_path() + if workspace_path: + try: + repo_relative_path = item_path.relative_to(workspace_path) + except ValueError: + repo_relative_path = item_path + else: + repo_relative_path = item_path - item_codeowners = InternalTestSession.get_path_codeowners(item_path) + item_codeowners = InternalTestSession.get_path_codeowners(repo_relative_path) if repo_relative_path else None source_file_info = _get_source_file_info(item, item_path) diff --git a/ddtrace/internal/ci_visibility/recorder.py b/ddtrace/internal/ci_visibility/recorder.py index 12bb3688dad..225221a4a7d 100644 --- a/ddtrace/internal/ci_visibility/recorder.py +++ b/ddtrace/internal/ci_visibility/recorder.py @@ -1031,7 +1031,7 @@ def _on_session_get_path_codeowners(path: Path) -> Optional[List[str]]: codeowners = CIVisibility.get_codeowners() if codeowners is None: return None - return codeowners.of(str(path.absolute())) + return codeowners.of(str(path)) def _register_session_handlers(): From 30a8c6dc95a64fcd66be04814a373b513a432357 Mon Sep 17 00:00:00 2001 From: Brett Langdon Date: Thu, 12 Dec 2024 13:31:50 -0500 Subject: [PATCH 21/78] ci: use mocks for GitHub calls in needs_testrun.py tests (#11692) Avoid hitting rate limits when testing this script, especially when the data/responses from GitHub API should be static anyways. Avoids: https://gitlab.ddbuild.io/DataDog/apm-reliability/dd-trace-py/-/jobs/736522083 ## Checklist - [x] PR author has checked that all the criteria below are met - The PR description includes an overview of the change - The PR description articulates the motivation for the change - The change includes tests OR the PR description describes a testing strategy - The PR description notes risks associated with the change, if any - Newly-added code is easy to change - The change follows the [library release note guidelines](https://ddtrace.readthedocs.io/en/stable/releasenotes.html) - The change includes or references documentation updates if necessary - Backport labels are set (if [applicable](https://ddtrace.readthedocs.io/en/latest/contributing.html#backporting)) ## Reviewer Checklist - [x] Reviewer has checked that all the criteria below are met - Title is accurate - All changes are related to the pull request's stated goal - Avoids breaking [API](https://ddtrace.readthedocs.io/en/stable/versioning.html#interfaces) changes - Testing strategy adequately addresses listed risks - Newly-added code is easy to change - Release note makes sense to a user of the library - If necessary, author has acknowledged and discussed the performance implications of this PR as reported in the benchmarks PR comment - Backport labels are set in a manner that is consistent with the [release branch maintenance policy](https://ddtrace.readthedocs.io/en/latest/contributing.html#backporting) --- .riot/requirements/151a249.txt | 20 + .riot/requirements/196755b.txt | 20 - .riot/requirements/4d1fa34.txt | 20 + .riot/requirements/b2ac981.txt | 22 - hatch.toml | 2 + riotfile.py | 2 + scripts/needs_testrun.py | 45 +- scripts/vcr/needs_testrun.yaml | 23996 +++++++++++++++++++++++++++++++ 8 files changed, 24076 insertions(+), 51 deletions(-) create mode 100644 .riot/requirements/151a249.txt delete mode 100644 .riot/requirements/196755b.txt create mode 100644 .riot/requirements/4d1fa34.txt delete mode 100644 .riot/requirements/b2ac981.txt create mode 100644 scripts/vcr/needs_testrun.yaml diff --git a/.riot/requirements/151a249.txt b/.riot/requirements/151a249.txt new file mode 100644 index 00000000000..e43376d1755 --- /dev/null +++ b/.riot/requirements/151a249.txt @@ -0,0 +1,20 @@ +# +# This file is autogenerated by pip-compile with Python 3.13 +# by the following command: +# +# pip-compile --allow-unsafe --no-annotate .riot/requirements/151a249.in +# +attrs==24.2.0 +coverage[toml]==7.6.9 +hypothesis==6.45.0 +iniconfig==2.0.0 +lxml==5.3.0 +mock==5.1.0 +opentracing==2.4.0 +packaging==24.2 +pluggy==1.5.0 +pytest==8.3.4 +pytest-cov==6.0.0 +pytest-mock==3.14.0 +ruamel-yaml==0.18.6 +sortedcontainers==2.4.0 diff --git a/.riot/requirements/196755b.txt b/.riot/requirements/196755b.txt deleted file mode 100644 index 250298e3848..00000000000 --- a/.riot/requirements/196755b.txt +++ /dev/null @@ -1,20 +0,0 @@ -# -# This file is autogenerated by pip-compile with Python 3.11 -# by the following command: -# -# pip-compile --no-annotate .riot/requirements/196755b.in -# -attrs==24.2.0 -coverage[toml]==7.6.1 -hypothesis==6.45.0 -iniconfig==2.0.0 -mock==5.1.0 -opentracing==2.4.0 -packaging==24.1 -pluggy==1.5.0 -pytest==8.3.3 -pytest-cov==5.0.0 -pytest-mock==3.14.0 -ruamel-yaml==0.18.6 -ruamel-yaml-clib==0.2.8 -sortedcontainers==2.4.0 diff --git a/.riot/requirements/4d1fa34.txt b/.riot/requirements/4d1fa34.txt new file mode 100644 index 00000000000..e4b768d197a --- /dev/null +++ b/.riot/requirements/4d1fa34.txt @@ -0,0 +1,20 @@ +# +# This file is autogenerated by pip-compile with Python 3.13 +# by the following command: +# +# pip-compile --allow-unsafe --no-annotate .riot/requirements/4d1fa34.in +# +attrs==24.2.0 +coverage[toml]==7.6.9 +hypothesis==6.45.0 +iniconfig==2.0.0 +lxml==5.3.0 +mock==5.1.0 +opentracing==2.4.0 +packaging==24.2 +pluggy==1.5.0 +pytest==8.3.4 +pytest-cov==6.0.0 +pytest-mock==3.14.0 +ruamel-yaml==0.18.6 +sortedcontainers==2.4.0 diff --git a/.riot/requirements/b2ac981.txt b/.riot/requirements/b2ac981.txt deleted file mode 100644 index b9c0f587e1e..00000000000 --- a/.riot/requirements/b2ac981.txt +++ /dev/null @@ -1,22 +0,0 @@ -# -# This file is autogenerated by pip-compile with Python 3.10 -# by the following command: -# -# pip-compile --no-annotate --resolver=backtracking .riot/requirements/b2ac981.in -# -attrs==23.1.0 -coverage[toml]==7.2.7 -exceptiongroup==1.1.2 -hypothesis==6.45.0 -iniconfig==2.0.0 -mock==5.1.0 -opentracing==2.4.0 -packaging==23.1 -pluggy==1.2.0 -pytest==7.4.0 -pytest-cov==4.1.0 -pytest-mock==3.11.1 -ruamel-yaml==0.17.32 -ruamel-yaml-clib==0.2.7 -sortedcontainers==2.4.0 -tomli==2.0.1 diff --git a/hatch.toml b/hatch.toml index 7bc7e107c04..f22870e01f1 100644 --- a/hatch.toml +++ b/hatch.toml @@ -133,8 +133,10 @@ _ = [ detached = true python = "3.10" extra-dependencies = [ + "lxml==5.3.0", "packaging==23.1", "ruamel.yaml==0.18.6", + "vcrpy==6.0.2", ] [envs.scripts.scripts] diff --git a/riotfile.py b/riotfile.py index 1a1f65ed116..b12bfcc1181 100644 --- a/riotfile.py +++ b/riotfile.py @@ -116,6 +116,7 @@ def select_pys(min_version: str = MIN_PYTHON_VERSION, max_version: str = MAX_PYT pys=["3"], pkgs={ "ruamel.yaml": latest, + "lxml": latest, }, ), Venv( @@ -124,6 +125,7 @@ def select_pys(min_version: str = MIN_PYTHON_VERSION, max_version: str = MAX_PYT pys=["3"], pkgs={ "ruamel.yaml": latest, + "lxml": latest, }, ), Venv( diff --git a/scripts/needs_testrun.py b/scripts/needs_testrun.py index 99ebba2c18c..bbbd73b33db 100755 --- a/scripts/needs_testrun.py +++ b/scripts/needs_testrun.py @@ -8,7 +8,6 @@ import logging import os from pathlib import Path -import re from subprocess import check_output import sys import typing as t @@ -16,6 +15,8 @@ from urllib.request import Request from urllib.request import urlopen +from lxml import html + sys.path.insert(0, str(Path(__file__).parents[1])) @@ -26,20 +27,34 @@ LOGGER = logging.getLogger(__name__) -BASE_BRANCH_PATTERN = re.compile(r':([^<]+)') - @cache def get_base_branch(pr_number: int) -> str: """Get the base branch of a PR - >>> get_base_branch(6412) + >>> import vcr + >>> with vcr.use_cassette( + ... "scripts/vcr/needs_testrun.yaml", + ... filter_headers=["authorization", "user-agent"], + ... record_mode="all"): + ... get_base_branch(6412) + ... get_base_branch(11534) + ... get_base_branch(11690) '1.x' + '2.15' + 'main' """ pr_page_content = urlopen(f"https://github.com/DataDog/dd-trace-py/pull/{pr_number}").read().decode("utf-8") - return BASE_BRANCH_PATTERN.search(pr_page_content).group(1) + tree = html.fromstring(pr_page_content) + base_ref = tree.find_class("base-ref") + if base_ref: + ref = base_ref[0].text_content().strip() + # We might have `DataDog:1.x` or `DataDog:main` so we need to strip the prefix + _, _, ref = ref.rpartition(":") + return ref.strip() + return "main" @cache @@ -116,7 +131,12 @@ def get_changed_files(pr_number: int, sha: t.Optional[str] = None) -> t.Set[str] or if there is a specific SHA given, use the less accurate method of diffing against a base commit, either the given SHA or the merge-base. - >>> sorted(get_changed_files(6388)) # doctest: +NORMALIZE_WHITESPACE + >>> import vcr + >>> with vcr.use_cassette( + ... "scripts/vcr/needs_testrun.yaml", + ... filter_headers=["authorization", "user-agent"], + ... record_mode="all"): + ... sorted(get_changed_files(6388)) # doctest: +NORMALIZE_WHITESPACE ['ddtrace/debugging/_expressions.py', 'releasenotes/notes/fix-debugger-expressions-none-literal-30f3328d2e386f40.yaml', 'tests/debugging/test_expressions.py'] @@ -141,12 +161,19 @@ def get_changed_files(pr_number: int, sha: t.Optional[str] = None) -> t.Set[str] def needs_testrun(suite: str, pr_number: int, sha: t.Optional[str] = None) -> bool: """Check if a testrun is needed for a suite and PR - >>> needs_testrun("debugger", 6485) + >>> import vcr + >>> with vcr.use_cassette( + ... "scripts/vcr/needs_testrun.yaml", + ... filter_headers=["authorization", "user-agent"], + ... record_mode="all"): + ... needs_testrun("debugger", 6485) + ... needs_testrun("debugger", 6388) + ... needs_testrun("foobar", 6412) + ... needs_testrun("profile", 11690) True - >>> needs_testrun("debugger", 6388) True - >>> needs_testrun("foobar", 6412) True + False """ if "itr:noskip" in get_latest_commit_message().lower(): return True diff --git a/scripts/vcr/needs_testrun.yaml b/scripts/vcr/needs_testrun.yaml new file mode 100644 index 00000000000..f68dca107eb --- /dev/null +++ b/scripts/vcr/needs_testrun.yaml @@ -0,0 +1,23996 @@ +interactions: +- request: + body: null + headers: + Connection: + - close + Host: + - github.com + method: GET + uri: https://github.com/DataDog/dd-trace-py/pull/6412 + response: + body: + string: "\n\n\n\n\n\n\n\n\n\n\n\n \n \n + \ \n \n \n \n + \ \n + \ \n\n + \ \n\n \n\n \n \n \n \n \n\n\n \n\n\n \n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n + \ \n \n\n\n\n\n\n\n\n\n\n\n\n\n ci: run the debugger suite only if necessary by P403n1x87 + \xB7 Pull Request #6412 \xB7 DataDog/dd-trace-py \xB7 GitHub\n\n\n\n + \ \n \n \n\n \n \n\n\n + \ \n\n\n \n\n\n \n \n\n \n \n\n + \ \n\n\n\n \n\n \n\n\n\n\n \n\n \n\n \n\n + \ \n\n \n\n \n\n \n \n \n\n \n \n \n\n\n\n\n \n\n\n\n + \ \n\n\n \n \n \n \n\n \n\n \n + \ \n\n + \ \n\n\n\n \n\n \n\n\n \n\n \n\n \n \n + \ \n\n\n\n\n\n \n\n + \ \n\n \n
\n \n\n\n
\n Skip to content\n\n + \ \n \n + \ \n \n \n\n\n\n\n\n\n\n\n\n \n \n + \
\n\n\n\n\n\n + \ \n\n \n\n \n\n\n
\n

Navigation Menu

\n\n \n\n + \
\n
\n
\n + \ \n
\n\n \n + \ \n + \ \n\n + \ \n\n
\n \n Sign in\n \n
\n
\n\n\n + \
\n
\n + \ \n\n
\n \n\n\n\n \n \n
\n \n \n\n + \
\n Search + or jump to...\n
\n + \ \n\n + \
\n \n\n \n\n \n
\n \n + \

Search + code, repositories, users, issues, pull requests...

\n
\n \n
+ \
\n
\n \n
\n \n \n \n \n \n\n \n
\n
\n
\n
\n + \ \n
\n + \
\n Clear\n + \ \n\n + \
\n \n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n + \
\n \n + \
\n + \
\n
\n\n \n + \
\n
\n\n
\n
\n
\n \n
\n + \ \n\n \n
\n + \
\n
\n + \

\n Provide + feedback\n

\n \n
\n
\n + \ \n
\n
\n + \ \n
\n \n + \
\n

We read every piece of feedback, and take your input very + seriously.

\n \n \n + \ \n
\n
\n + \ \n
\n\n \n \n\n + \ \n
\n
\n + \
\n

\n Saved searches\n

\n + \

Use + saved searches to filter your results more quickly

\n
\n
\n \n + \
\n
\n \n
\n \n + \
\n\n \n\n
\n + \
\n
\n\n
\n + \
\n \n
\n + \
\n
\n\n\n
\n \n Sign in\n \n + \
\n\n \n Sign + up\n \n \n
\n + \
\n
\n \n\n\n \n \n\n + \
\n\n\n\n\n\n\n\n\n + \
\n\n\n + \ \n\n\n + \ \n
\n\n\n + \ \n\n\n\n\n\n\n \n
\n
\n \n \n\n\n\n + \ \n \n\n \n\n\n\n\n\n\n \n
\n\n
\n\n + \
\n \n
\n + \ \n \n\n + \ \n \n + \ \n DataDog\n + \ \n /\n + \ \n dd-trace-py\n \n\n Public\n
\n\n\n + \
\n\n
\n \n\n + \
\n
\n\n
\n
\n\n\n \n\n + \
\n\n \n\n\n\n\n
\n \n\n\n\n \n \n
\n \n\n
\n \n \n \n\n
\n
\n
\n\n \n
\n \n \n New issue\n \n \n + \
\n
\n \n \n\n
\n\n
\n

\n Have a question + about this project? Sign up for a free GitHub account to open an + issue and contact its maintainers and the community.\n

\n\n \n\n

By + clicking “Sign up for GitHub”, you agree to our terms of service + and\n privacy statement. We\u2019ll occasionally send you + account related emails.

\n\n

\n + \ Already on GitHub?\n Sign + in\n to your account\n

\n
\n\n
\n
\n
\n + \ \n + \
\n\n

\n ci: + run the debugger suite only if necessary\n #6412\n

\n
\n
\n\n
\n
\n \n + \ Merged\n\n
\n\n\n\n\n + \
\n P403n1x87\n + \ merged 7 commits into\n\n\n DataDog:1.x\n\nfrom\n\nP403n1x87:ci/debugger-suitespec\n \n \n \n\n \n \n\n + \
\n
\n\n\n + \ Jul 25, + 2023\n\n\n
\n
\n\n\n \n\n\n\n
\n
\n
\n
\n + \
\n \n Merged\n\n + \
\n\n\n\n\n
\n + \

\n \n ci: run the debugger suite only if necessary\n \n + \ #6412\n

\n\n + \
\n P403n1x87\n merged 7 commits into\n\n\n DataDog:1.x\n\nfrom\n\nP403n1x87:ci/debugger-suitespec\n \n \n \n\n \n \n\n + \
\n
\n\n\n + \ Jul 25, + 2023\n\n\n
\n
\n
\n + \
\n
\n
\n
\n
\n\n\n\n + \ \n + \ \n\n\n + \ \n\n\n
\n + \
\n

Conversation

\n + \ \n \n\n\n \n\n
\n\n
\n \"P403n1x87\"\n + \ \n \n
\n + \
\n
\n
\n
\n \n \n \n\n \n\n\n \n \n \n \n Copy + link\n\n
\n
\n + \
\n
\n\n
\n \n\n + \ \n + \ Contributor\n\n\n + \ \n\n
\n\n

\n + \
\n \"@P403n1x87\"\n\n \n + \ P403n1x87\n \n\n \n\n \n\n commented\n\n\n + \ Jul + 20, 2023\n\n\n \n + \ \n\n
\n + \ \n
\n \n edited by majorgreys\n + \ \n \n \n \n\n
\n
\n \n \n \n + \ \n \n + \ \n Loading\n\n \n \n + \
\n
\n\n
\n\n

\n
\n\n
\n + \
\n \n \n + \
\n

We introduce the concept + of suitespec as a way of describing how sources affect test runs. We use it + to ensure that the debugger tests run only if sources that the suite depends + on are modified by the current commit.

\n

Suitespec Implementation + Details

\n

The suitespec solution is based on a manual + configuration of of test suites. To simplify the declaration of file patterns + for test suites, one can make use of components, which essentially + are a logic collection of patterns. Test suite can then be declared as a list + of components to reflect their dependencies on these logic parts, and to DRY + the declaration itself by avoiding repetitions.

\n

Notes

\n
    \n
  • When the script fails for any reason, tests are run.
  • \n
  • It + is important that path patterns are listed correctly, or some tests might + not run when they are in fact supposed to.
  • \n
  • Best effort to determine + the correct list of changed files via the GitHub REST API. When that fails, + we fall back to the less accurate git diff + against the target branch.
  • \n
\n

Checklist

\n
    \n
  • Change(s) + are motivated and described in the PR description.
  • \n
  • Testing strategy is described if automated tests are not included + in the PR.
  • \n
  • Risk is outlined + (performance impact, potential for breakage, maintainability, etc).
  • \n
  • Change is maintainable (easy to change, telemetry, documentation).
  • \n
  • Library release note guidelines are followed. If no release + note is required, add label changelog/no-changelog.
  • \n
  • Documentation is included (in-code, generated user docs, public corp docs).
  • \n
  • Backport labels are set (if applicable)
  • \n
\n

Reviewer Checklist

\n
    \n
  • Title + is accurate.
  • \n
  • No unnecessary + changes are introduced.
  • \n
  • Description + motivates each change.
  • \n
  • Avoids + breaking API changes unless absolutely necessary.
  • \n
  • Testing strategy adequately addresses listed risk(s).
  • \n
  • Change is maintainable (easy to change, telemetry, documentation).
  • \n
  • Release note makes sense to a user of the library.
  • \n
  • Reviewer has explicitly acknowledged and discussed the performance + implications of this PR as reported in the benchmarks PR comment.
  • \n
  • Backport labels are set in a manner that is consistent with + the release branch maintenance policy
  • \n
\n
\n + \
\n \n
\n\n
\n\n
\n \n + \

\n \n + \ \n \n \n\n

\n \n\n\n\n + \
\n
\n
\n \n
\n
\n + \ \n
\n
\n
\n + \
\n\n
\n
\n
\n\n\n \n\n \n
\n\n\n
\n \n
\n + \
\n \n \n\n
\n
\n\n + \ \n\n \"@P403n1x87\"\nP403n1x87\n\n\n\n\n added\n the \n\n changelog/no-changelog\n\n A changelog + entry is not required for this PR.\n label\n\n\n Jul 20, 2023\n\n
\n
\n\n\n\n\n
\n\n + \
\n \n
\n \n
\n \"P403n1x87\"\n + \
\n \n
\n + \
\n + \ \n P403n1x87\n + \ \n\n \n\n commented\n\n\n \n \n + \ Jul + 20, 2023\n \n \n \n + \
\n\n \n
\n
\n\n + \
\n + \ \n \n
+ \ \n + \
\n + \
\n \n riotfile.py\n\n + \ \n Outdated\n \n \n \nShow + resolved\n \n \nHide resolved\n + \
\n
\n
\n + \ \n \n \n \n\n \n
\n
\n\n\n\n\n
\n
+ \
\n
\n\n\n
\n\n
\n \n
\n + \ \n
\n \n
\n + \
\"@P403n1x87\"\n P403n1x87\n\n\n force-pushed\n + \ the\n \n \n ci/debugger-suitespec\n\n\n + \ \n branch\n 4 times, most recently\n from\n 8953a58 + \ to\n 575d15e + \ \n + \ Compare\n \n\n\n\n July 20, 2023 13:13 \n + \ \n
\n
\n\n\n
\n\n
\n \n
\n \n
\n \"emmettbutler\"\n + \
\n \n
\n + \
\n + \ \n emmettbutler\n + \ \n\n \n\n reviewed\n\n\n \n \n + \ Jul + 20, 2023\n \n \n \n + \
\n\n \n
\n
\n\n + \
\n + \ \n \n
+ \ \n + \
\n
\n + \ \n scripts/needs_testrun.py\n\n + \ \n Outdated\n \n \n \nShow + resolved\n \n \nHide resolved\n + \
\n
\n
\n + \ \n \n \n \n\n \n
\n
\n\n\n\n\n
\n
+ \
\n
\n\n\n
\n\n
\n \n \n
\n\n
\n + \ \"@emmettbutler\"\n\n
\n\n\n
\n\n
\n + \
\n + \
\n + \
\n + \ \n + \ \n \n\n \n\n\n + \ \n \n \n \n Copy + link\n\n
\n
\n + \
\n
\n\n
\n \n\n + \ \n + \ Collaborator\n\n\n + \ \n\n
\n\n

\n + \
\n \n\n \n emmettbutler\n + \ \n\n \n\n \n\n commented\n\n\n Jul 20, 2023\n\n\n + \ \n
\n\n

\n
\n\n\n
\n\n + \ \n\n \n \n + \ \n \n + \ \n
\n + \

I love this idea!

\n
\n
\n\n\n
\n\n + \ \n\n
\n
\n + \
\n \n \n
\n
\n \n
\n
\n
\n
\n + \
\n
\n\n
\n \n + \

\n \n + \ \n \n \n\n

\n \n\n\n + \
\n
\n\n
\n\n\n
\n\n\n
\n + \ \n
\n \n
\n \"brettlangdon\"\n + \
\n \n
\n + \
\n + \ \n brettlangdon\n + \ \n\n \n\n reviewed\n\n\n \n \n + \ Jul + 20, 2023\n \n \n \n + \
\n\n \n
\n
\n + \
\n + \ \n \n
\n
\n
\n
\n \n \n \n\n \n\n\n \n \n \n \n Copy + link\n\n
\n
\n + \
\n
\n\n
\n \n\n + \ \n Member\n\n\n \n\n
\n\n

\n
\n \"@brettlangdon\"\n\n \n brettlangdon\n \n\n \n\n \n\n + \ left a comment\n\n\n\n\n \n
\n\n

\n
\n \n\n
\n
\n + \ \n
\n \n \n\n

Choose a reason for hiding this comment

\n\n + \

\n The reason will be displayed to describe this + comment to others. Learn more.\n + \

\n\n
\n \n \n
\n\n + \ \n
\n\n \n
\n

I know @gnufede was trying to get CI Visibility + running for this repo, if we go that route, we might be able to ITR ?

\n + \
\n
\n \n
\n\n\n
\n \n + \

\n \n + \ \n \n \n\n

\n \n\n\n\n + \
\n
\n + \
\n \n
\n
\n + \ \n
\n
\n
\n + \
\n\n
\n
\n
\n
\n \n \n
\n
\n
\n + \ \n tests/.suitespec.json\n\n + \ \n \n \nShow resolved\n + \ \n \nHide resolved\n + \
\n
\n
\n + \ \n \n \n \n\n \n
\n
\n\n\n\n\n
\n
+ \
\n
\n\n\n
\n\n
\n \n \n
\n\n
\n + \ \"@P403n1x87\"\n\n
\n\n\n
\n\n
\n + \
\n + \
\n + \
\n + \ \n + \ \n \n\n \n\n\n + \ \n \n \n \n Copy + link\n\n
\n
\n + \
\n
\n\n
\n \n\n + \ \n + \ Contributor\n\n\n + \ \n\n Author\n\n\n
\n\n

\n
\n \n\n \n + \ P403n1x87\n \n\n \n\n \n\n commented\n\n\n + \ Jul 20, 2023\n\n\n \n
\n\n + \

\n
\n\n\n
\n\n \n\n + \ \n \n \n \n + \ \n
\n
\n

I know @gnufede + was trying to get CI Visibility running for this repo, if we go that route, + we might be able to ITR ?

\n
\n

My understanding + is that ITR is a per-test rather than per-test-suite. So I see ITR improving + this even further rather than an alternative?

\n
\n
\n\n\n
\n\n + \ \n\n
\n
\n
\n \n \n
\n + \ emmettbutler reacted with thumbs up emoji\n + \
\n \n + \
\n
\n
\n
\n
\n + \
\n\n
\n \n + \

\n \n + \ \n \n \n\n

\n \n\n\n + \
\n
\n\n
\n\n\n
\n\n\n
\n + \ \n
\n \n
\n \n
\n + \
\"@P403n1x87\"\n P403n1x87\n\n\n force-pushed\n + \ the\n \n \n ci/debugger-suitespec\n\n\n + \ \n branch\n 3 times, most recently\n from\n 713167a + \ to\n e8c3ecc + \ \n + \ Compare\n \n\n\n\n July 20, 2023 17:15 \n + \ \n
\n
\n
\n \n
\n \n
\n + \
\"@emmettbutler\"\n emmettbutler\n\n\n self-requested a review\n\n\n + \ July 20, 2023 21:23 \n + \ \n
\n
\n\n\n
\n\n
\n \n
\n \n
\n \"emmettbutler\"\n + \
\n \n
\n + \
\n + \ \n emmettbutler\n + \ \n\n \n\n previously approved these changes\n\n\n + \ \n \n + \ Jul + 20, 2023\n \n \n \n + \
\n\n \n
\n
\n\n + \
\n + \ \n \n
\n
\n
+ \
\n
\n\n\n
\n\n
\n \n
\n + \ \n
\n \n
\n
\"@P403n1x87\"\n P403n1x87\n\n\n dismissed\n emmettbutler\u2019s stale review\n\n\n + \ via\n \n 4e53e79\n + \ \n\n July + 21, 2023 09:41 \n \n
\n
\n
\n + \ \n
\n \n
\n + \
\"@P403n1x87\"\n P403n1x87\n\n\n force-pushed\n + \ the\n \n \n ci/debugger-suitespec\n\n\n + \ \n branch\n from\n e8c3ecc + \ to\n 4e53e79 + \ \n + \ Compare\n \n\n\n\n July 21, 2023 09:41 \n + \ \n
\n
\n\n\n
\n\n
\n \n
\n \n
\n \"P403n1x87\"\n + \
\n \n
\n + \
\n + \ \n P403n1x87\n + \ \n\n \n\n commented\n\n\n \n \n + \ Jul + 21, 2023\n \n \n \n + \
\n\n \n
\n
\n\n + \
\n + \ \n \n
+ \ \n + \
\n
\n + \ \n .circleci/config.yml\n\n + \ \n Outdated\n \n \n \nShow + resolved\n \n \nHide resolved\n + \
\n
\n
\n + \ \n \n \n \n\n \n
\n
\n\n\n\n\n
\n
+ \
\n
\n\n\n
\n\n
\n \n
\n + \ \n
\n \n
\n + \
\"@P403n1x87\"\n P403n1x87\n\n\n force-pushed\n + \ the\n \n \n ci/debugger-suitespec\n\n\n + \ \n branch\n 3 times, most recently\n from\n d2671c5 + \ to\n 19b0da0 + \ \n + \ Compare\n \n\n\n\n July 21, 2023 10:35 \n + \ \n
\n
\n
\n \n
\n \n
\n + \
\"@P403n1x87\"\n P403n1x87\n\n\n marked this pull request as + ready for review\n\n July + 21, 2023 10:41 \n \n
\n
\n
\n + \ \n
\n \n
\n + \
\"@P403n1x87\"\n P403n1x87\n\n\n requested review from\n a team\n\n as code owners\n\n\n + \ July 21, 2023 10:41 \n + \ \n
\n
\n
\n \n
\n \n
\n + \
\"@P403n1x87\"\n P403n1x87\n\n\n requested review from\n majorgreys, + \n jbertran, + \n brettlangdon, + \n emmettbutler + and \n a team\n\n\n\n July 21, 2023 10:41 \n + \ \n
\n
\n
\n \n
\n \n
\n + \
\"@P403n1x87\"\n P403n1x87\n\n\n force-pushed\n + \ the\n \n \n ci/debugger-suitespec\n\n\n + \ \n branch\n from\n af236d7 + \ to\n a4c0000 + \ \n + \ Compare\n \n\n\n\n July 21, 2023 15:26 \n + \ \n
\n
\n\n\n
\n\n\n
\n + \
\n
\n
\n \n \n
\n
\n + \
\n\n
\n \n
\n + \ \n
\n \n
\n + \
\"@P403n1x87\"\n P403n1x87\n\n\n force-pushed\n + \ the\n \n \n ci/debugger-suitespec\n\n\n + \ \n branch\n 2 times, most recently\n from\n c50870c + \ to\n e812418 + \ \n + \ Compare\n \n\n\n\n July 24, 2023 12:52 \n + \ \n
\n
\n\n\n
\n\n
\n + \ \n
\n \n
\n \"P403n1x87\"\n + \
\n \n
\n + \
\n + \ \n P403n1x87\n + \ \n\n \n\n commented\n\n\n \n \n + \ Jul + 24, 2023\n \n \n \n + \
\n\n \n
\n
\n\n + \
\n + \ \n \n
+ \ \n + \
\n + \
\n \n .circleci/config.templ.yml\n\n + \ \n \n \nShow resolved\n + \ \n \nHide resolved\n + \
\n
\n
\n + \ \n \n \n \n\n \n
\n
\n\n\n\n\n
\n
+ \
\n
\n\n\n
\n\n
\n \n
\n \n
\n \"P403n1x87\"\n + \
\n \n
\n + \
\n + \ \n P403n1x87\n + \ \n\n \n\n commented\n\n\n \n \n + \ Jul + 24, 2023\n \n \n \n + \
\n\n \n
\n
\n\n + \
\n + \ \n \n
+ \ \n + \
\n + \
\n \n .circleci/config.templ.yml\n\n + \ \n \n \nShow resolved\n + \ \n \nHide resolved\n + \
\n
\n
\n + \ \n \n \n \n\n \n
\n
\n\n\n\n\n
\n
+ \
\n
\n\n\n\n\n
\n \n
\n \n
\n \"P403n1x87\"\n + \
\n \n
\n + \
\n + \ \n P403n1x87\n + \ \n\n \n\n commented\n\n\n \n \n + \ Jul + 24, 2023\n \n \n \n + \
\n\n \n
\n
\n\n + \
\n + \ \n \n
+ \ \n + \
\n + \
\n \n .circleci/config.templ.yml\n\n + \ \n \n \nShow resolved\n + \ \n \nHide resolved\n + \
\n
\n
\n + \ \n \n \n \n\n \n
\n
\n\n\n\n\n
\n
+ \
\n
\n\n\n\n\n
\n \n
\n \n
\n \"emmettbutler\"\n + \
\n \n
\n + \
\n + \ \n emmettbutler\n + \ \n\n \n\n previously approved these changes\n\n\n + \ \n \n + \ Jul + 24, 2023\n \n \n \n + \
\n\n \n
\n
\n\n + \
\n + \ \n \n
\n
\n
+ \
\n
\n\n\n\n\n
\n \n
\n + \ \n
\n \n
\n
\"@brettlangdon\"\n brettlangdon\n\n\n dismissed\n emmettbutler\u2019s stale review\n\n\n + \ via\n \n cdb1444\n + \ \n\n July + 24, 2023 16:44 \n \n
\n
\n\n\n
\n\n + \
\n \n
\n \n
\n \"brettlangdon\"\n + \
\n \n
\n + \
\n + \ \n brettlangdon\n + \ \n\n \n\n reviewed\n\n\n \n \n + \ Jul + 24, 2023\n \n \n \n + \
\n\n \n
\n
\n\n + \
\n + \ \n \n
+ \ \n + \
\n + \
\n \n .circleci/config.templ.yml\n\n + \ \n Outdated\n \n \n \nShow + resolved\n \n \nHide resolved\n + \
\n
\n
\n + \ \n \n \n \n\n \n
\n
\n\n\n\n\n
\n
+ \
\n
\n\n\n\n\n
\n \n
\n + \ \n
\n \n
\n + \
\"@P403n1x87\"\n P403n1x87\n\n\n requested review from\n brettlangdon + and \n emmettbutler\n\n\n\n + \ July 24, 2023 18:57 \n + \ \n
\n
\n\n\n
\n\n
\n + \ \n
\n \n
\n \"brettlangdon\"\n + \
\n \n
\n + \
\n + \ \n brettlangdon\n + \ \n\n \n\n previously approved these changes\n\n\n + \ \n \n + \ Jul + 24, 2023\n \n \n \n + \
\n\n \n
\n
\n\n + \
\n + \ \n \n
\n
\n
+ \
\n
\n\n\n\n\n
\n \n
\n \n
\n \"emmettbutler\"\n + \
\n \n
\n + \
\n + \ \n emmettbutler\n + \ \n\n \n\n previously approved these changes\n\n\n + \ \n \n + \ Jul + 24, 2023\n \n \n \n + \
\n\n \n
\n
\n\n + \
\n + \ \n \n
\n
\n
+ \
\n
\n\n\n\n\n
\n + \ \n
\n
\n \n
\n \n
\n + \
P403n1x87\n \n\n added 5 commits\n + \ July 24, 2023 22:13
\n
+ \
\n
\n + \ \n
\n \n
\n
\n
\n
\n \n
\n
\n \n \"@P403n1x87\"\n
\n
\n\n + \ \n\n
\n \n\n \n \n \n\n \n\n \n\n\n + \
\n\n
\n + \
\n + \ \n\n + \ \n \n \n\n \n\n
\n \n
\n\n
\n + \
\n
\n\n
\n \n + \ 0d844de\n \n
\n
\n + \
\n
\n
We introduce the concept of suitespec as a way of describing
+        how\nsources affect test runs. We use it to ensure that the debugger\ntests
+        run only if sources that the suite depends on are modified\nby the current
+        commit.
\n
\n
\n\n
\n
\n \n
\n \n
\n
\n
\n
\n \n
\n
\n \n \"@P403n1x87\"\n
\n
\n\n + \ \n\n
\n \n\n \n \n \n\n \n\n \n\n\n + \
\n\n
\n + \
\n + \ \n\n + \ \n \n \n\n \n\n
\n \n
\n\n
\n + \
\n
\n\n
\n \n + \ 1ffab15\n \n
\n
\n + \
\n
\n\n
\n
\n \n
\n \n
\n
\n
\n
\n \n
\n
\n \n \"@P403n1x87\"\n
\n
\n\n + \
\n \n web + scraping FTW\n \n\n
\n\n
\n \n\n \n \n \n\n \n\n \n\n\n + \
\n\n
\n + \
\n + \ \n\n + \ \n \n \n\n \n\n
\n \n
\n\n
\n + \
\n
\n\n
\n \n + \ a115763\n \n
\n
\n + \
\n
\n\n
\n
\n \n
\n \n
\n
\n
\n
\n \n
\n
\n \n \"@P403n1x87\"\n
\n
\n\n + \
\n \n add + doctests\n \n\n
\n\n
\n \n\n + \ \n \n \n\n \n\n \n\n\n + \
\n\n
\n + \
\n + \ \n\n + \ \n \n \n\n \n\n
\n \n
\n\n
\n + \
\n
\n\n
\n \n + \ 4d0fb2e\n \n
\n
\n + \
\n
\n\n
\n
\n \n
\n \n
\n
\n
\n
\n \n
\n
\n \n \"@P403n1x87\"\n
\n
\n\n + \
\n \n use + dynamic config\n \n\n
\n\n
\n \n\n \n \n \n\n \n\n \n\n\n + \
\n\n
\n + \
\n + \ \n\n + \ \n \n \n\n \n\n
\n \n
\n\n
\n + \
\n
\n\n
\n \n + \ 690a7b1\n \n
\n
\n + \
\n
\n\n
\n
\n
\n\n\n
\n\n
\n + \ \n
\n \n
\n \n
\n
\"@P403n1x87\"\n P403n1x87\n\n\n dismissed stale reviews from + emmettbutler + and brettlangdon\n\n\n + \ via\n \n 690a7b1\n + \ \n\n July + 24, 2023 21:17 \n \n
\n
\n
\n + \ \n
\n \n
\n + \
\"@P403n1x87\"\n P403n1x87\n\n\n force-pushed\n + \ the\n \n \n ci/debugger-suitespec\n\n\n + \ \n branch\n from\n 5f1daca + \ to\n 690a7b1 + \ \n + \ Compare\n \n\n\n\n July 24, 2023 21:17 \n + \ \n
\n
\n
\n \n
\n \n
\n + \
\"@P403n1x87\"\n P403n1x87\n\n\n requested review from\n emmettbutler + and \n brettlangdon\n\n\n\n + \ July 24, 2023 21:17 \n + \ \n
\n
\n\n\n
\n\n
\n + \ \n
\n \n
\n \"brettlangdon\"\n + \
\n \n
\n + \
\n + \ \n brettlangdon\n + \ \n\n \n\n approved these changes\n\n\n \n \n + \ Jul + 24, 2023\n \n \n \n + \
\n\n \n
\n
\n\n
\n \n \n
\n
\n
\n
\n\n\n\n\n + \
\n + \ \n
\n
\n \n
\n \n
\n + \
P403n1x87\n \n\n added 2 commits\n + \ July 25, 2023 09:06
\n
+ \
\n
\n + \ \n
\n \n
\n
\n
\n
\n \n
\n
\n \n \"@P403n1x87\"\n
\n
\n\n + \ \n\n + \
\n \n\n \n \n \n\n \n\n \n\n\n + \
\n\n
\n + \
\n + \ \n\n + \ \n \n \n\n \n\n
\n \n
\n\n
\n + \
\n
\n\n
\n \n + \ f421ece\n \n
\n
\n + \
\n
\n\n
\n
\n \n
\n \n
\n
\n
\n
\n \n
\n
\n \n \"@P403n1x87\"\n
\n
\n\n + \ \n\n + \
\n \n\n \n \n \n\n \n\n \n\n\n + \
\n\n
\n + \
\n + \ \n\n + \ \n \n \n\n \n\n
\n \n
\n\n
\n + \
\n
\n\n
\n \n + \ 3eacc26\n \n
\n
\n + \
\n
\n\n
\n
\n
\n\n\n
\n\n
\n + \ \n
\n
\n + \ \n + \ \n\n + \
\n
\n\n\n \"@P403n1x87\"\n P403n1x87\n\n\n\n merged commit f441242\n into\n\n \n \n DataDog:1.x\n + \ \n\n\n Jul 25, 2023\n\n
\n
\n\n
\n\n
\n
\n \n \n\n
\n\n
\n
\n \"@Yun-Kim\"\nYun-Kim\n\n\n\n mentioned this pull request\n \n Jul 26, 2023\n + \ \n
\n\n\n\n\n \n
\n \n \n \n\n \n \n\n + \ \n \n \n \n\n\n 16 + tasks\n
\n
\n\n\n\n
\n
\n\n + \ \n
\n \n + \ \n + \ \n\n \n
\n \n Yun-Kim \n\n added a commit\n that referenced\n + \ this pull request\n\n \n + \ Jul + 26, 2023\n \n
\n \n
\n + \
\n
\n \n
\n
\n \n \"@Yun-Kim\"\n + \
\n
\n\n\n \n\n + \
\n \n\n \n \n \n\n \n\n \n\n\n + \
\n\n
\n + \
\n\n \n + \
\n \n 43497d1\n \n
\n
\n + \
\n
\n
#6412
+        changed our circleci configuration setup to be dynamic, but this\ninadvertently
+        removed the `coverage` and `riot_run_latest` circleci\npipeline parameters
+        from the main `.circleci/config.yml` file, which\nbreaks our nightly 1.x coverage
+        pipeline runs. This PR re-adds those\nparameters back and re-enables coverage
+        reporting.\n\nNote that `datastreams`, `langchain`, `elasticsearch`,\n`integration-snapshot`
+        test suites are still failing on 1.x nightly\ncoverage runs and will need
+        to be fixed.\n\n## Checklist\n\n- [x] Change(s) are motivated and described
+        in the PR description.\n- [x] Testing strategy is described if automated tests
+        are not included\nin the PR.\n- [x] Risk is outlined (performance impact,
+        potential for breakage,\nmaintainability, etc).\n- [x] Change is maintainable
+        (easy to change, telemetry, documentation).\n- [x] [Library release note\nguidelines](https://ddtrace.readthedocs.io/en/stable/releasenotes.html)\nare
+        followed. If no release note is required, add label\n`changelog/no-changelog`.\n-
+        [x] Documentation is included (in-code, generated user docs, [public\ncorp
+        docs](https://github.com/DataDog/documentation/)).\n-
+        [x] Backport labels are set (if\n[applicable](https://ddtrace.readthedocs.io/en/latest/contributing.html#backporting))\n\n##
+        Reviewer Checklist\n\n- [x] Title is accurate.\n- [x] No unnecessary changes
+        are introduced.\n- [x] Description motivates each change.\n- [x] Avoids breaking\n[API](https://ddtrace.readthedocs.io/en/stable/versioning.html#interfaces)\nchanges
+        unless absolutely necessary.\n- [x] Testing strategy adequately addresses
+        listed risk(s).\n- [x] Change is maintainable (easy to change, telemetry,
+        documentation).\n- [x] Release note makes sense to a user of the library.\n-
+        [x] Reviewer has explicitly acknowledged and discussed the performance\nimplications
+        of this PR as reported in the benchmarks PR comment.\n- [x] Backport labels
+        are set in a manner that is consistent with the\n[release branch maintenance\npolicy](https://ddtrace.readthedocs.io/en/latest/contributing.html#backporting)
\n + \
\n
\n\n
\n
\n
\n\n \n
\n \n \n \n\n \n
\n + \ \n romainkomorndatadog + \n\n pushed a commit\n that referenced\n this pull request\n\n + \ \n Aug 8, 2023\n + \ \n
\n \n
\n + \
\n
\n \n
\n
\n \n \"@P403n1x87\"\n + \ \n \"@romainkomorndatadog\"\n + \
\n
\n\n\n
\n + \ \n ci: + run the debugger suite only if necessary (#6412)\n + \ \n\n \n + \ \n \n\n
\n\n + \
\n \n\n \n \n \n\n \n\n
\n\n
\n
\n\n \n
\n + \ \n 6838e4b\n \n
\n
\n + \
\n
\n
We introduce the concept of suitespec as a way of describing
+        how sources\naffect test runs. We use it to ensure that the debugger tests
+        run only\nif sources that the suite depends on are modified by the current
+        commit.\n\n## Suitespec Implementation Details\n\nThe suitespec solution is
+        based on a manual configuration of of test\nsuites. To simplify the declaration
+        of file patterns for test suites,\none can make use of _components_, which
+        essentially are a logic\ncollection of patterns. Test suite can then be declared
+        as a list of\ncomponents to reflect their dependencies on these logic parts,
+        and to\nDRY the declaration itself by avoiding repetitions.\n\n## Notes\n\n-
+        When the script fails for any reason, tests are run.\n- It is important that
+        path patterns are listed correctly, or some tests\nmight not run when they
+        are in fact supposed to.\n- Best effort to determine the correct list of changed
+        files via the\nGitHub REST API. When that fails, we fall back to the less
+        accurate `git\ndiff` against the target branch.\n\n## Checklist\n\n- [x] Change(s)
+        are motivated and described in the PR description.\n- [x] Testing strategy
+        is described if automated tests are not included\nin the PR.\n- [x] Risk is
+        outlined (performance impact, potential for breakage,\nmaintainability, etc).\n-
+        [x] Change is maintainable (easy to change, telemetry, documentation).\n-
+        [x] [Library release note\nguidelines](https://ddtrace.readthedocs.io/en/stable/releasenotes.html)\nare
+        followed. If no release note is required, add label\n`changelog/no-changelog`.\n-
+        [x] Documentation is included (in-code, generated user docs, [public\ncorp
+        docs](https://github.com/DataDog/documentation/)).\n-
+        [x] Backport labels are set (if\n[applicable](https://ddtrace.readthedocs.io/en/latest/contributing.html#backporting))\n\n##
+        Reviewer Checklist\n\n- [ ] Title is accurate.\n- [ ] No unnecessary changes
+        are introduced.\n- [ ] Description motivates each change.\n- [ ] Avoids breaking\n[API](https://ddtrace.readthedocs.io/en/stable/versioning.html#interfaces)\nchanges
+        unless absolutely necessary.\n- [ ] Testing strategy adequately addresses
+        listed risk(s).\n- [ ] Change is maintainable (easy to change, telemetry,
+        documentation).\n- [ ] Release note makes sense to a user of the library.\n-
+        [ ] Reviewer has explicitly acknowledged and discussed the performance\nimplications
+        of this PR as reported in the benchmarks PR comment.\n- [ ] Backport labels
+        are set in a manner that is consistent with the\n[release branch maintenance\npolicy](https://ddtrace.readthedocs.io/en/latest/contributing.html#backporting)
\n + \
\n
\n\n
\n
\n
\n\n \n
\n \n \n \n\n \n
\n + \ \n romainkomorndatadog + \n\n pushed a commit\n that referenced\n this pull request\n\n + \ \n Aug 8, 2023\n + \ \n
\n \n
\n + \
\n
\n \n
\n
\n \n \"@Yun-Kim\"\n + \ \n \"@romainkomorndatadog\"\n + \
\n
\n\n\n \n\n + \
\n \n\n \n \n \n\n \n\n
\n\n
\n
\n\n \n
\n + \ \n b38e5ce\n \n
\n
\n + \
\n
\n
#6412
+        changed our circleci configuration setup to be dynamic, but this\ninadvertently
+        removed the `coverage` and `riot_run_latest` circleci\npipeline parameters
+        from the main `.circleci/config.yml` file, which\nbreaks our nightly 1.x coverage
+        pipeline runs. This PR re-adds those\nparameters back and re-enables coverage
+        reporting.\n\nNote that `datastreams`, `langchain`, `elasticsearch`,\n`integration-snapshot`
+        test suites are still failing on 1.x nightly\ncoverage runs and will need
+        to be fixed.\n\n## Checklist\n\n- [x] Change(s) are motivated and described
+        in the PR description.\n- [x] Testing strategy is described if automated tests
+        are not included\nin the PR.\n- [x] Risk is outlined (performance impact,
+        potential for breakage,\nmaintainability, etc).\n- [x] Change is maintainable
+        (easy to change, telemetry, documentation).\n- [x] [Library release note\nguidelines](https://ddtrace.readthedocs.io/en/stable/releasenotes.html)\nare
+        followed. If no release note is required, add label\n`changelog/no-changelog`.\n-
+        [x] Documentation is included (in-code, generated user docs, [public\ncorp
+        docs](https://github.com/DataDog/documentation/)).\n-
+        [x] Backport labels are set (if\n[applicable](https://ddtrace.readthedocs.io/en/latest/contributing.html#backporting))\n\n##
+        Reviewer Checklist\n\n- [x] Title is accurate.\n- [x] No unnecessary changes
+        are introduced.\n- [x] Description motivates each change.\n- [x] Avoids breaking\n[API](https://ddtrace.readthedocs.io/en/stable/versioning.html#interfaces)\nchanges
+        unless absolutely necessary.\n- [x] Testing strategy adequately addresses
+        listed risk(s).\n- [x] Change is maintainable (easy to change, telemetry,
+        documentation).\n- [x] Release note makes sense to a user of the library.\n-
+        [x] Reviewer has explicitly acknowledged and discussed the performance\nimplications
+        of this PR as reported in the benchmarks PR comment.\n- [x] Backport labels
+        are set in a manner that is consistent with the\n[release branch maintenance\npolicy](https://ddtrace.readthedocs.io/en/latest/contributing.html#backporting)
\n + \
\n
\n\n
\n
\n
\n\n\n\n
\n\n\n\n \n
\n
\n \n
+ \
\n\n\n\n \n\n
\n + \
\n
\n \n Sign up for free\n to join + this conversation on GitHub.\n Already have an account?\n Sign + in to comment\n\n\n \n
\n\n
\n
\n \n\n\n + \
\n
\n\n\n \n
\n \n
\n \n
\n Reviewers\n
\n\n \n\n\n + \

\n \n\n \n \"@brettlangdon\"\n \n brettlangdon\n\n\n\n + \ \n + \ \n \n + \ \n\n \n \n brettlangdon approved these changes\n\n + \

\n

\n \n\n \n \"@majorgreys\"\n \n majorgreys\n\n\n + \ Awaiting requested review from majorgreys\n\n + \ majorgreys is a code owner automatically + assigned from DataDog/apm-core-python\n\n \n

\n + \

\n \n\n \n \"@jbertran\"\n \n jbertran\n\n\n + \ Awaiting requested review from jbertran\n\n + \ jbertran was automatically assigned from + DataDog/apm-framework-integrations-reviewers-py\n\n \n

\n + \

\n \n\n \n \"@emmettbutler\"\n \n emmettbutler\n\n\n + \ Awaiting requested review from emmettbutler\n\n\n + \ \n

\n\n \n
\n\n
\n\n\n
\n
\n\n \n
\n Assignees\n + \
\n\n\n \n\n + \ No one assigned\n\n\n\n
\n\n\n \n\n \n\n\n
\n Labels\n
\n\n\n
\n \n\n changelog/no-changelog\n\n + \ A changelog entry is not required for + this PR.\n\n
\n\n
\n\n\n \n\n
\n
\n
\n Projects\n + \
\n\n
\n
\n\n None yet\n\n\n\n
\n\n\n + \ \n
\n
\n \n
\n Milestone\n + \
\n\n No milestone\n\n
\n\n\n \n \n \n
\n
\n \n
\n \n
\n Development\n + \
\n\n\n \n\n

Successfully merging this pull request may + close these issues.

\n\n\n \n\n
+ \
\n
\n
\n\n \n \n\n + \ \n\n \n
\n + \
\n
\n 4 participants\n
\n \n
\n
\n\n\n\n + \ \n\n \n\n\n\n\n \n\n\n\n\n\n \n \n \n + \ \n\n\n + \ \n\n\n \n\n\n\n\n \n \n\n + \ \n\n
\n

Footer

\n\n \n\n\n
\n
\n \n \n \n\n\n + \ \n © 2024 GitHub, Inc.\n \n
\n\n + \ \n
\n
\n\n\n\n\n \n\n\n \n\n + \ \n\n
\n + \
\n
\n
\n\n \n\n\n\n\n\n \n\n
\n + \
\n \n\n\n" + headers: + Accept-Ranges: + - bytes + Cache-Control: + - no-cache + Content-Security-Policy: + - 'default-src ''none''; base-uri ''self''; child-src github.com/assets-cdn/worker/ + github.com/webpack/ github.com/assets/ gist.github.com/assets-cdn/worker/; + connect-src ''self'' uploads.github.com www.githubstatus.com collector.github.com + raw.githubusercontent.com api.github.com github-cloud.s3.amazonaws.com github-production-repository-file-5c1aeb.s3.amazonaws.com + github-production-upload-manifest-file-7fdce7.s3.amazonaws.com github-production-user-asset-6210df.s3.amazonaws.com + *.rel.tunnels.api.visualstudio.com wss://*.rel.tunnels.api.visualstudio.com + objects-origin.githubusercontent.com copilot-proxy.githubusercontent.com proxy.individual.githubcopilot.com + proxy.business.githubcopilot.com proxy.enterprise.githubcopilot.com *.actions.githubusercontent.com + wss://*.actions.githubusercontent.com productionresultssa0.blob.core.windows.net/ + productionresultssa1.blob.core.windows.net/ productionresultssa2.blob.core.windows.net/ + productionresultssa3.blob.core.windows.net/ productionresultssa4.blob.core.windows.net/ + productionresultssa5.blob.core.windows.net/ productionresultssa6.blob.core.windows.net/ + productionresultssa7.blob.core.windows.net/ productionresultssa8.blob.core.windows.net/ + productionresultssa9.blob.core.windows.net/ productionresultssa10.blob.core.windows.net/ + productionresultssa11.blob.core.windows.net/ productionresultssa12.blob.core.windows.net/ + productionresultssa13.blob.core.windows.net/ productionresultssa14.blob.core.windows.net/ + productionresultssa15.blob.core.windows.net/ productionresultssa16.blob.core.windows.net/ + productionresultssa17.blob.core.windows.net/ productionresultssa18.blob.core.windows.net/ + productionresultssa19.blob.core.windows.net/ github-production-repository-image-32fea6.s3.amazonaws.com + github-production-release-asset-2e65be.s3.amazonaws.com insights.github.com + wss://alive.github.com api.githubcopilot.com api.individual.githubcopilot.com + api.business.githubcopilot.com api.enterprise.githubcopilot.com; font-src + github.githubassets.com; form-action ''self'' github.com gist.github.com copilot-workspace.githubnext.com + objects-origin.githubusercontent.com; frame-ancestors ''none''; frame-src + viewscreen.githubusercontent.com notebooks.githubusercontent.com; img-src + ''self'' data: blob: github.githubassets.com media.githubusercontent.com camo.githubusercontent.com + identicons.github.com avatars.githubusercontent.com private-avatars.githubusercontent.com + github-cloud.s3.amazonaws.com objects.githubusercontent.com secured-user-images.githubusercontent.com/ + user-images.githubusercontent.com/ private-user-images.githubusercontent.com + opengraph.githubassets.com github-production-user-asset-6210df.s3.amazonaws.com + customer-stories-feed.github.com spotlights-feed.github.com objects-origin.githubusercontent.com + *.githubusercontent.com; manifest-src ''self''; media-src github.com user-images.githubusercontent.com/ + secured-user-images.githubusercontent.com/ private-user-images.githubusercontent.com + github-production-user-asset-6210df.s3.amazonaws.com gist.github.com; script-src + github.githubassets.com; style-src ''unsafe-inline'' github.githubassets.com; + upgrade-insecure-requests; worker-src github.com/assets-cdn/worker/ github.com/webpack/ + github.com/assets/ gist.github.com/assets-cdn/worker/' + Content-Type: + - text/html; charset=utf-8 + Date: + - Thu, 12 Dec 2024 14:42:36 GMT + Referrer-Policy: + - no-referrer-when-downgrade + Server: + - GitHub.com + Set-Cookie: + - _gh_sess=fQw%2BTZMS4QYZ%2FTA4MNOPSJubEJj6%2B4YAbvZcDJw6R8TTK%2BMVMvH7EZQtu30ktX%2By%2FA6TPcH4dFe9WAR0%2B6WdXM5LeWe7eUOeosO%2FKdcYGMtaudvPV7Tjrv8NPxefhK8GYTzCAI0TN6iQR7CC7S4bKt21Me3zMtaqQlfrbOvexXVbatPyfKM1pSwdQDSYNgXgZpvz6FpudZu8Ito5%2FSqD%2F6P%2B%2Foq57qdkGtm98SrAr1VET3ZWzxV9a2jYhwXfpCKzqaZa4CrIRiFSjs65m6mZvg%3D%3D--qdQ52Vjfe00bTqax--8KxHEOVAdiZ0ixENBtHaQg%3D%3D; + Path=/; HttpOnly; Secure; SameSite=Lax + - _octo=GH1.1.1954075846.1734014555; Path=/; Domain=github.com; Expires=Fri, + 12 Dec 2025 14:42:35 GMT; Secure; SameSite=Lax + - logged_in=no; Path=/; Domain=github.com; Expires=Fri, 12 Dec 2025 14:42:35 + GMT; HttpOnly; Secure; SameSite=Lax + Strict-Transport-Security: + - max-age=31536000; includeSubdomains; preload + Transfer-Encoding: + - chunked + Vary: + - X-PJAX, X-PJAX-Container, Turbo-Visit, Turbo-Frame, Accept-Encoding, Accept, + X-Requested-With + X-Content-Type-Options: + - nosniff + X-Frame-Options: + - deny + X-GitHub-Request-Id: + - ED1F:3C75F0:23D0577:323E7BD:675AF65B + X-XSS-Protection: + - '0' + connection: + - close + server-timing: + - pull_request_layout-fragment;desc="pull_request_layout fragment";dur=259.185408,conversation_content-fragment;desc="conversation_content + fragment";dur=1167.36918,conversation_sidebar-fragment;desc="conversation_sidebar + fragment";dur=278.203377,nginx;desc="NGINX";dur=1.232025,glb;desc="GLB";dur=3.090931 + x-voltron-version: + - 69a2227 + status: + code: 200 + message: OK +- request: + body: null + headers: + Connection: + - close + Host: + - github.com + method: GET + uri: https://github.com/DataDog/dd-trace-py/pull/11534 + response: + body: + string: "\n\n\n\n\n\n\n\n\n\n\n\n \n \n + \ \n \n \n \n + \ \n + \ \n\n + \ \n\n \n\n \n \n \n \n \n\n\n \n\n\n \n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n + \ \n \n\n\n\n\n\n\n\n\n\n\n\n\n fix(asm): add global states to ensure patching once [backport + 2.15] by christophe-papazian \xB7 Pull Request #11534 \xB7 DataDog/dd-trace-py + \xB7 GitHub\n\n\n\n \n \n \n\n \n \n\n\n + \ \n\n\n \n\n\n \n \n\n \n \n\n + \ \n\n\n\n \n\n \n\n\n\n\n \n\n \n\n \n\n + \ \n\n \n\n \n\n \n \n \n\n \n \n \n\n\n\n\n \n\n\n\n + \ \n\n\n \n \n \n \n\n \n\n \n + \ \n\n + \ \n\n\n\n \n\n \n\n\n \n\n \n\n \n \n + \ \n\n\n\n\n\n \n\n + \ \n\n \n
\n \n\n\n
\n Skip to content\n\n + \ \n \n + \ \n \n \n\n\n\n\n\n\n\n\n\n \n \n + \
\n\n\n\n\n\n + \ \n\n \n\n \n\n\n
\n

Navigation Menu

\n\n \n\n + \
\n
\n
\n + \ \n
\n\n \n + \ \n + \ \n\n + \ \n\n
\n \n Sign in\n \n
\n
\n\n\n + \
\n
\n + \ \n\n
\n \n\n\n\n \n \n
\n \n \n\n + \
\n Search + or jump to...\n
\n + \ \n\n + \
\n \n\n \n\n \n
\n \n + \

Search + code, repositories, users, issues, pull requests...

\n
\n \n
+ \
\n
\n \n
\n \n \n \n \n \n\n \n
\n
\n
\n
\n + \ \n
\n + \
\n Clear\n + \ \n\n + \
\n \n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n + \
\n \n + \
\n + \
\n
\n\n \n + \
\n
\n\n
\n
\n
\n \n
\n + \ \n\n \n
\n + \
\n
\n + \

\n Provide + feedback\n

\n \n
\n
\n + \ \n
\n
\n + \ \n
\n \n + \
\n

We read every piece of feedback, and take your input very + seriously.

\n \n \n + \ \n
\n
\n + \ \n
\n\n \n \n\n + \ \n
\n
\n + \
\n

\n Saved searches\n

\n + \

Use + saved searches to filter your results more quickly

\n
\n
\n \n + \
\n
\n \n
\n \n + \
\n\n \n\n
\n + \
\n
\n\n
\n + \
\n \n
\n + \
\n
\n\n\n
\n \n Sign in\n \n + \
\n\n \n Sign + up\n \n \n
\n + \
\n
\n \n\n\n \n \n\n + \
\n\n\n\n\n\n\n\n\n + \
\n\n\n + \ \n\n\n + \ \n
\n\n\n + \ \n\n\n\n\n\n\n \n
\n
\n \n \n\n\n\n + \ \n \n\n \n\n\n\n\n\n\n \n
\n\n
\n\n + \
\n \n
\n + \ \n \n\n + \ \n \n + \ \n DataDog\n + \ \n /\n + \ \n dd-trace-py\n \n\n Public\n
\n\n\n + \
\n\n
\n \n\n + \
\n
\n\n
\n
\n\n\n \n\n + \
\n\n \n\n\n\n\n
\n \n\n\n\n \n \n
\n \n\n
\n \n \n \n\n
\n
\n
\n\n \n
\n \n \n New issue\n \n \n + \
\n
\n \n \n\n
\n\n
\n

\n Have a question + about this project? Sign up for a free GitHub account to open an + issue and contact its maintainers and the community.\n

\n\n \n\n

By + clicking “Sign up for GitHub”, you agree to our terms of service + and\n privacy statement. We\u2019ll occasionally send you + account related emails.

\n\n

\n + \ Already on GitHub?\n Sign + in\n to your account\n

\n
\n\n
\n
\n
\n + \ \n + \
\n\n

\n fix(asm): + add global states to ensure patching once [backport 2.15]\n #11534\n

\n
\n
\n\n + \
\n + \
\n + \ \n Merged\n\n + \
\n\n\n\n\n
\n gnufede\n merged 3 commits into\n\n\n 2.15\n\nfrom\n\nbackport-11522-to-2.15\n \n \n \n\n \n \n\n + \
\n
\n\n\n + \ Nov 26, + 2024\n\n\n
\n
\n\n\n \n\n\n\n
\n
\n
\n
\n + \
\n \n Merged\n\n + \
\n\n\n\n\n
\n + \

\n \n fix(asm): add global states to ensure patching once [backport + 2.15]\n \n #11534\n

\n\n + \
\n gnufede\n merged 3 commits into\n\n\n 2.15\n\nfrom\n\nbackport-11522-to-2.15\n \n \n \n\n \n \n\n + \
\n
\n\n\n + \ Nov 26, + 2024\n\n\n
\n
\n
\n + \
\n
\n
\n
\n
\n\n\n\n + \ \n
\n
\n \n \n +74\n + \ \n \n \u221210\n + \ \n \n \n + \ \n \n
\n\n \n
\n\n\n\n
\n + \
\n

Conversation

\n + \ \n \n\n\n \n\n
\n\n
\n \"christophe-papazian\"\n + \ \n \n
\n + \
\n
\n
\n
\n \n \n \n\n \n\n\n \n \n \n \n Copy + link\n\n
\n
\n + \
\n
\n\n
\n \n\n + \ \n + \ Contributor\n\n\n + \ \n\n
\n\n

\n + \
\n \"@christophe-papazian\"\n\n \n + \ christophe-papazian\n \n\n \n\n + \ \n\n commented\n\n\n Nov 25, 2024\n\n\n \n + \ \n\n
\n + \ \n
\n \n edited\n \n + \ \n \n \n\n
\n
\n \n \n \n + \ \n \n + \ \n Loading\n\n \n \n + \
\n
\n\n
\n\n

\n
\n\n
\n + \
\n \n \n + \
\n

Backport 81824b8 + from #11522 to 2.15.

\n

Ensure common patches for SCA and Exploit Prevention are loaded..

\n

only once
\nonly if exploit prevention is active or sca is + active
\nChanges:

\n

factorize load_common_modules logic + in ddtrace.appsec
\nboolean state for patch_common_module and enable_iast_propagation + to ensure they are only called once.
\nensure it's loaded after one click + activation
\nensure it's properly loaded in unit tests if required
\nadd + some failsafe for iast in wrap_open for importerror
\nupdate an iast test + to reflect that common_modules is loaded in the test by default.
\nAPPSEC-55997

\n

Checklist

\n
    \n
  • PR author has checked that all the criteria below are met
  • \n
  • The + PR description includes an overview of the change
  • \n
  • The PR description + articulates the motivation for the change
  • \n
  • The change includes tests + OR the PR description describes a testing strategy
  • \n
  • The PR description + notes risks associated with the change, if any
  • \n
  • Newly-added code + is easy to change
  • \n
  • The change follows the library release note guidelines
  • \n
  • The change + includes or references documentation updates if necessary
  • \n
  • Backport + labels are set (if applicable)
  • \n
\n

Reviewer Checklist

\n
    \n
  • Reviewer + has checked that all the criteria below are met
  • \n
  • Title is accurate
  • \n
  • All + changes are related to the pull request's stated goal
  • \n
  • Avoids breaking + API changes
  • \n
  • Testing strategy adequately addresses + listed risks
  • \n
  • Newly-added code is easy to change
  • \n
  • Release + note makes sense to a user of the library
  • \n
  • If necessary, author has + acknowledged and discussed the performance implications of this PR as reported + in the benchmarks PR comment
  • \n
  • Backport labels are set in a manner + that is consistent with the release branch maintenance policy
  • \n
\n
\n + \
\n \n
\n\n
\n\n
\n \n + \

\n \n + \ \n \n \n\n

\n \n\n\n\n + \
\n
\n
\n \n
\n
\n + \ \n
\n
\n
\n + \
\n\n
\n
\n
\n\n\n \n\n \n
\n\n\n
\n + \ \n
\n
\n
\n \n
\n \n
\n
\n
\n
\n \n
\n
\n \n \"@christophe-papazian\"\n + \
\n
\n\n
\n \n + \ fix(asm): + add global states to ensure patching once (#11522)\n + \ \n\n \n + \ + \ \n
\n\n
\n \n\n + \ \n \n \n\n \n\n \n\n\n + \
\n\n
\n + \
\n + \ \n\n + \ \n \n \n\n \n\n
\n \n
\n\n
\n + \
\n
\n\n
\n \n + \ cd59645\n \n
\n
\n + \
\n
\n
Ensure common patches for SCA and Exploit Prevention are loaded..\n-
+        only once\n- only if exploit prevention is active or sca is active\n\nChanges:\n-
+        factorize load_common_modules logic in ddtrace.appsec\n- boolean state for
+        patch_common_module and enable_iast_propagation to\nensure they are only called
+        once.\n- ensure it's loaded after one click activation\n- ensure it's properly
+        loaded in unit tests if required\n- add some failsafe for iast in wrap_open
+        for importerror\n- update an iast test to reflect that common_modules is loaded
+        in the\ntest by default.\n\nAPPSEC-55997\n\n- [x] PR author has checked that
+        all the criteria below are met\n- The PR description includes an overview
+        of the change\n- The PR description articulates the motivation for the change\n-
+        The change includes tests OR the PR description describes a testing\nstrategy\n-
+        The PR description notes risks associated with the change, if any\n- Newly-added
+        code is easy to change\n- The change follows the [library release note\nguidelines](https://ddtrace.readthedocs.io/en/stable/releasenotes.html)\n-
+        The change includes or references documentation updates if necessary\n- Backport
+        labels are set (if\n[applicable](https://ddtrace.readthedocs.io/en/latest/contributing.html#backporting))\n\n-
+        [x] Reviewer has checked that all the criteria below are met\n- Title is accurate\n-
+        All changes are related to the pull request's stated goal\n- Avoids breaking\n[API](https://ddtrace.readthedocs.io/en/stable/versioning.html#interfaces)\nchanges\n-
+        Testing strategy adequately addresses listed risks\n- Newly-added code is
+        easy to change\n- Release note makes sense to a user of the library\n- If
+        necessary, author has acknowledged and discussed the performance\nimplications
+        of this PR as reported in the benchmarks PR comment\n- Backport labels are
+        set in a manner that is consistent with the\n[release branch maintenance\npolicy](https://ddtrace.readthedocs.io/en/latest/contributing.html#backporting)\n\n(cherry
+        picked from commit 81824b8)
\n + \
\n
\n\n
\n
\n
\n\n\n
\n\n
\n + \ \n
\n \n
\n \n
\n + \
\"@christophe-papazian\"\n christophe-papazian\n\n\n marked + this pull request as ready for review\n\n November + 25, 2024 16:51 \n \n
\n
\n
\n + \ \n
\n \n
\n + \
\"@christophe-papazian\"\n christophe-papazian\n\n\n requested + review from\n a team\n\n + \ as code owners\n\n\n + \ November 25, 2024 16:51 \n + \ \n
\n
\n
\n \n
\n \n
\n + \
\"@christophe-papazian\"\n christophe-papazian\n\n\n requested + review from\n gnufede + and \n emmettbutler\n\n\n\n + \ November 25, 2024 16:51 \n + \ \n
\n
\n\n\n
\n\n
\n \n \n
\n\n
\n + \ \"@github-actions\"\n\n \n + \ \"GitHub\n \n
\n\n\n
\n\n
\n + \
\n + \
\n + \
\n + \ \n + \ \n \n\n \n\n\n + \ \n \n \n \n Copy + link\n\n
\n
\n + \
\n
\n\n
\n \n\n + \ \n + \ Contributor\n\n\n + \ \n\n
\n\n

\n + \
\n \n\n \n github-actions\n + \ bot\n\n \n\n + \ \n\n commented\n\n\n Nov 25, 2024\n\n\n + \ \n
\n\n

\n
\n\n\n
\n\n + \ \n\n \n \n + \ \n \n \n
\n + \

CODEOWNERS have + been resolved as:

\n
releasenotes/notes/exploit_prevention_patch_fix-1bdd7540e1d085d8.yaml
+        \  @DataDog/apm-python\nddtrace/_monkey.py                                                      @DataDog/apm-core-python\nddtrace/appsec/__init__.py
+        \                                             @DataDog/asm-python\nddtrace/appsec/_common_module_patches.py
+        \                               @DataDog/asm-python\nddtrace/appsec/_iast/__init__.py
+        \                                       @DataDog/asm-python\nddtrace/appsec/_remoteconfiguration.py
+        \                                 @DataDog/asm-python\ntests/appsec/integrations/test_flask_telemetry.py
+        \                      @DataDog/asm-python\ntests/utils.py                                                          @DataDog/python-guild\n
\n\n + \
\n
\n\n\n
\n\n \n\n
\n + \
\n
\n \n \n
\n
\n \n
\n
\n
\n
\n + \
\n
\n\n
\n \n + \

\n \n + \ \n \n \n\n

\n \n\n\n + \
\n
\n\n
\n\n\n
\n\n\n
\n + \ \n \n
\n\n
\n + \ \"@datadog-dd-trace-py-rkomorn\"\n\n
\n\n\n + \
\n\n
\n + \
\n + \
\n + \
\n + \ \n + \ \n \n\n \n\n\n + \ \n \n \n \n Copy + link\n\n
\n
\n + \
\n
\n\n
\n \n\n\n\n + \ \n\n
\n\n

\n + \
\n \n\n \n datadog-dd-trace-py-rkomorn\n + \ bot\n\n \n\n + \ \n\n commented\n\n\n Nov 25, 2024\n\n\n + \ \n \n\n
\n \n
\n + \ \n edited\n \n \n \n + \ \n\n
\n + \
\n \n \n \n + \ \n \n + \ \n Loading\n\n \n \n + \
\n
\n\n
\n\n

\n
\n\n\n
\n\n + \ \n\n \n \n + \ \n \n \n
\n + \

Datadog Report

\n

Branch report: + backport-11522-to-2.15
\nCommit + report: c476a58
\nTest + service: dd-trace-py

\n

\u2705 + 0 Failed, 592 Passed, 694 Skipped, 19m 30.54s Total duration (15m 23.31s time + saved)

\n
\n
\n\n\n + \
\n\n \n\n
\n + \
\n
\n \n \n
\n
\n \n
\n
\n
\n
\n + \
\n
\n\n
\n \n + \

\n \n + \ \n \n \n\n

\n \n\n\n + \
\n
\n\n
\n\n\n
\n\n\n
\n + \ \n
\n \n
\n \"gnufede\"\n + \
\n \n
\n + \
\n + \ \n gnufede\n \n\n + \ \n\n approved these changes\n\n\n \n \n + \ Nov + 25, 2024\n \n \n \n + \
\n\n \n
\n
\n\n
\n \n \n
\n
\n
\n
\n\n\n
\n\n + \
\n \n
\n + \ \n
\n + \ \n
\n
\n \"@gnufede\"\n gnufede\n\nenabled + auto-merge (squash)\n\n November + 25, 2024 17:32 \n \n
\n
\n\n\n
\n\n + \
\n \n \n
\n\n
\n + \ \"@pr-commenter\"\n\n
\n\n\n
\n\n
\n + \
\n + \
\n + \
\n + \ \n + \ \n \n\n \n\n\n + \ \n \n \n \n Copy + link\n\n
\n
\n + \
\n
\n\n
\n \n\n\n\n + \ \n\n
\n\n

\n + \
\n \n\n \n pr-commenter\n + \ bot\n\n \n\n + \ \n\n commented\n\n\n Nov 25, 2024\n\n\n + \ \n \n\n
\n \n
\n + \ \n edited\n \n \n \n + \ \n\n
\n + \
\n \n \n \n + \ \n \n + \ \n Loading\n\n \n \n + \
\n
\n\n
\n\n

\n
\n\n\n
\n\n + \ \n\n \n \n + \ \n \n \n
\n + \

Benchmarks

\n

Benchmark execution + time: 2024-11-26 21:13:50

\n

Comparing candidate commit + c476a58 + in PR branch backport-11522-to-2.15 with + baseline commit b462888 + in branch 2.15.

\n

Found + 0 performance improvements and 0 performance regressions! Performance is the + same for 371 metrics, 53 unstable metrics.

\n
\n
\n\n\n + \
\n\n \n\n
\n + \
\n
\n \n \n
\n
\n \n
\n
\n
\n
\n + \
\n
\n\n
\n \n + \

\n \n + \ \n \n \n\n

\n \n\n\n + \
\n
\n\n
\n\n\n
\n\n\n
\n + \ \n
\n \n
\n \"erikayasuda\"\n + \
\n \n
\n + \
\n + \ \n erikayasuda\n + \ \n\n \n\n approved these changes\n\n\n \n \n + \ Nov + 26, 2024\n \n \n \n + \
\n\n \n
\n
\n\n
\n \n \n
\n
\n
\n
\n\n\n
\n\n + \
\n + \ \n
\n
\n \n
\n \n
\n + \
christophe-papazian\n \nand others\n + \ added 2 commits\n November + 26, 2024 18:39
\n
\n
\n \n
\n \n
\n
\n
\n
\n \n
\n
\n \n \"@christophe-papazian\"\n + \
\n
\n\n \n\n + \
\n \n\n \n \n \n\n \n\n \n\n\n + \
\n\n
\n + \
\n + \ \n\n + \ \n \n \n\n \n\n
\n \n
\n\n
\n + \
\n
\n\n
\n \n + \ 3ac9ef8\n \n
\n
\n + \
\n
\n\n
\n
\n \n
\n \n
\n
\n
\n
\n \n
\n
\n \n \"@erikayasuda\"\n
\n
\n\n + \ \n\n + \
\n \n\n \n \n \n\n \n\n \n\n\n + \
\n\n
\n + \
\n + \ \n\n + \ \n \n \n\n \n\n
\n \n
\n\n
\n + \
\n
\n\n
\n \n + \ c476a58\n \n
\n
\n + \
\n
\n\n
\n
\n
\n\n\n
\n\n
\n + \ \n
\n
\n + \ \n + \ \n\n + \
\n
\n\n \n + \ \"@gnufede\"\n gnufede\n\n\n\n + \ merged commit 2d6800f\n into\n\n \n \n 2.15\n \n\n\n Nov 26, 2024\n\n
\n 584 checks passed\n
\n\n
\n + \ \n \n + \ \n \n + \ \n\n + \ \n
\n
\n
\n\n
\n\n + \
\n \n
\n \n
\n
\"@gnufede\"\n gnufede\n\n\n + \ \n deleted the\n \n + \ \n backport-11522-to-2.15\n \n branch\n\n + \ November 26, 2024 21:16 \n + \ \n
\n
\n\n\n
\n\n\n\n\n\n \n
\n
\n \n
+ \
\n\n\n\n
\n\n
\n + \
\n
\n \n Sign up for free\n to join + this conversation on GitHub.\n Already have an account?\n Sign + in to comment\n\n\n \n
\n\n
\n
\n
\n\n
\n + \
\n
\n\n\n \n
\n \n
\n \n
\n Reviewers\n
\n\n \n\n\n + \

\n \n\n \n \"@erikayasuda\"\n \n erikayasuda\n\n\n\n + \ \n + \ \n \n + \ \n\n \n \n erikayasuda approved these changes\n\n + \

\n

\n \n\n \n \"@gnufede\"\n \n gnufede\n\n\n\n \n + \ \n \n + \ \n\n \n \n gnufede approved these changes\n\n + \

\n

\n \n\n \n \"@emmettbutler\"\n \n emmettbutler\n\n\n + \ Awaiting requested review from emmettbutler\n\n + \ emmettbutler is a code owner automatically + assigned from DataDog/apm-python\n\n \n

\n\n \n
\n\n
\n\n\n + \
\n
\n\n \n
\n Assignees\n + \
\n\n\n \n\n + \ No one assigned\n\n\n\n
\n\n\n \n\n \n\n\n
\n Labels\n
\n\n\n
\n None yet\n
\n\n
\n\n\n \n\n
\n
\n
\n Projects\n + \
\n\n
\n
\n\n None yet\n\n\n\n
\n\n\n + \ \n
\n
\n \n
\n Milestone\n + \
\n\n No milestone\n\n
\n\n\n \n \n \n
\n
\n \n
\n \n
\n Development\n + \
\n\n\n \n\n

Successfully merging this pull request may + close these issues.

\n\n\n \n\n
+ \
\n
\n
\n\n \n \n\n + \ \n\n \n
\n + \
\n
\n 3 participants\n
\n \n
\n
\n\n\n\n + \ \n\n \n\n\n\n\n \n\n
\n\n\n\n \n \n \n + \ \n\n\n + \ \n\n\n \n\n\n\n\n \n \n\n + \ \n\n
\n

Footer

\n\n \n\n\n
\n
\n \n \n \n\n\n + \ \n © 2024 GitHub, Inc.\n \n
\n\n + \ \n
\n
\n\n\n\n\n \n\n\n \n\n + \ \n\n
\n + \
\n
\n
\n\n \n\n\n\n\n\n \n\n
\n + \
\n \n\n\n" + headers: + Accept-Ranges: + - bytes + Cache-Control: + - no-cache + Content-Security-Policy: + - 'default-src ''none''; base-uri ''self''; child-src github.com/assets-cdn/worker/ + github.com/webpack/ github.com/assets/ gist.github.com/assets-cdn/worker/; + connect-src ''self'' uploads.github.com www.githubstatus.com collector.github.com + raw.githubusercontent.com api.github.com github-cloud.s3.amazonaws.com github-production-repository-file-5c1aeb.s3.amazonaws.com + github-production-upload-manifest-file-7fdce7.s3.amazonaws.com github-production-user-asset-6210df.s3.amazonaws.com + *.rel.tunnels.api.visualstudio.com wss://*.rel.tunnels.api.visualstudio.com + objects-origin.githubusercontent.com copilot-proxy.githubusercontent.com proxy.individual.githubcopilot.com + proxy.business.githubcopilot.com proxy.enterprise.githubcopilot.com *.actions.githubusercontent.com + wss://*.actions.githubusercontent.com productionresultssa0.blob.core.windows.net/ + productionresultssa1.blob.core.windows.net/ productionresultssa2.blob.core.windows.net/ + productionresultssa3.blob.core.windows.net/ productionresultssa4.blob.core.windows.net/ + productionresultssa5.blob.core.windows.net/ productionresultssa6.blob.core.windows.net/ + productionresultssa7.blob.core.windows.net/ productionresultssa8.blob.core.windows.net/ + productionresultssa9.blob.core.windows.net/ productionresultssa10.blob.core.windows.net/ + productionresultssa11.blob.core.windows.net/ productionresultssa12.blob.core.windows.net/ + productionresultssa13.blob.core.windows.net/ productionresultssa14.blob.core.windows.net/ + productionresultssa15.blob.core.windows.net/ productionresultssa16.blob.core.windows.net/ + productionresultssa17.blob.core.windows.net/ productionresultssa18.blob.core.windows.net/ + productionresultssa19.blob.core.windows.net/ github-production-repository-image-32fea6.s3.amazonaws.com + github-production-release-asset-2e65be.s3.amazonaws.com insights.github.com + wss://alive.github.com api.githubcopilot.com api.individual.githubcopilot.com + api.business.githubcopilot.com api.enterprise.githubcopilot.com; font-src + github.githubassets.com; form-action ''self'' github.com gist.github.com copilot-workspace.githubnext.com + objects-origin.githubusercontent.com; frame-ancestors ''none''; frame-src + viewscreen.githubusercontent.com notebooks.githubusercontent.com; img-src + ''self'' data: blob: github.githubassets.com media.githubusercontent.com camo.githubusercontent.com + identicons.github.com avatars.githubusercontent.com private-avatars.githubusercontent.com + github-cloud.s3.amazonaws.com objects.githubusercontent.com secured-user-images.githubusercontent.com/ + user-images.githubusercontent.com/ private-user-images.githubusercontent.com + opengraph.githubassets.com github-production-user-asset-6210df.s3.amazonaws.com + customer-stories-feed.github.com spotlights-feed.github.com objects-origin.githubusercontent.com + *.githubusercontent.com; manifest-src ''self''; media-src github.com user-images.githubusercontent.com/ + secured-user-images.githubusercontent.com/ private-user-images.githubusercontent.com + github-production-user-asset-6210df.s3.amazonaws.com gist.github.com; script-src + github.githubassets.com; style-src ''unsafe-inline'' github.githubassets.com; + upgrade-insecure-requests; worker-src github.com/assets-cdn/worker/ github.com/webpack/ + github.com/assets/ gist.github.com/assets-cdn/worker/' + Content-Type: + - text/html; charset=utf-8 + Date: + - Thu, 12 Dec 2024 14:42:37 GMT + Referrer-Policy: + - no-referrer-when-downgrade + Server: + - GitHub.com + Set-Cookie: + - _gh_sess=LtiHLNx8mstCD1%2F8GdlLK3Ek4%2FUx0Fe2Z5G%2BgyD3AJIfkjlnrgBVvR4nRGY7DTatKP%2Bou1B2HQOEbvPrmsRQSzNr4QrkXD%2B%2BoelH3OrGoVb5p8iCoqQMgEy0wWGa1LZNg6ElbtORrY%2BOTZc3pcswIwJXzwyf5B41ot6LyczBcI7LxdQXLwION06Cw9M4GChczVf00HfGJq85K%2FijVuPAL%2BSNpc0CpSymS4zbxOOTeM85%2BMUXqmgfjypU8Hdl1TUYqKHqDF25MpY1LOSlKhlLLw%3D%3D--xSnv%2BlNibojh5RSX--pzM3%2Fm4gngMObk6H3%2FTOfw%3D%3D; + Path=/; HttpOnly; Secure; SameSite=Lax + - _octo=GH1.1.1210734268.1734014556; Path=/; Domain=github.com; Expires=Fri, + 12 Dec 2025 14:42:36 GMT; Secure; SameSite=Lax + - logged_in=no; Path=/; Domain=github.com; Expires=Fri, 12 Dec 2025 14:42:36 + GMT; HttpOnly; Secure; SameSite=Lax + Strict-Transport-Security: + - max-age=31536000; includeSubdomains; preload + Transfer-Encoding: + - chunked + Vary: + - X-PJAX, X-PJAX-Container, Turbo-Visit, Turbo-Frame, Accept-Encoding, Accept, + X-Requested-With + X-Content-Type-Options: + - nosniff + X-Frame-Options: + - deny + X-GitHub-Request-Id: + - ED26:1035B4:234099A:312F92C:675AF65C + X-XSS-Protection: + - '0' + connection: + - close + server-timing: + - pull_request_layout-fragment;desc="pull_request_layout fragment";dur=450.768495,conversation_content-fragment;desc="conversation_content + fragment";dur=576.513283,conversation_sidebar-fragment;desc="conversation_sidebar + fragment";dur=305.288275,nginx;desc="NGINX";dur=1.093278,glb;desc="GLB";dur=4.679312 + x-voltron-version: + - 69a2227 + status: + code: 200 + message: OK +- request: + body: null + headers: + Connection: + - close + Host: + - github.com + method: GET + uri: https://github.com/DataDog/dd-trace-py/pull/11690 + response: + body: + string: "\n\n\n\n\n\n\n\n\n\n\n\n \n \n + \ \n \n \n \n + \ \n + \ \n\n + \ \n\n \n\n \n \n \n \n \n\n\n \n\n\n \n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n + \ \n \n\n\n\n\n\n\n\n\n\n\n\n\n ci: store fake DD_API_KEY as a secret by brettlangdon \xB7 + Pull Request #11690 \xB7 DataDog/dd-trace-py \xB7 GitHub\n\n\n\n \n \n \n\n \n \n\n\n + \ \n\n\n \n\n\n \n \n\n \n \n\n + \ \n\n\n\n \n\n \n\n\n\n\n \n\n \n\n \n\n + \ \n\n \n\n \n\n \n \n \n\n \n \n \n\n\n\n\n \n\n\n\n + \ \n\n\n \n \n \n \n\n \n\n \n + \ \n\n + \ \n\n\n\n \n\n \n\n\n \n\n \n\n \n \n + \ \n\n\n\n\n\n \n\n + \ \n\n \n
\n \n\n\n
\n Skip to content\n\n + \ \n \n + \ \n \n \n\n\n\n\n \n \n + \
\n\n\n\n\n\n + \ \n\n \n\n \n\n\n
\n

Navigation Menu

\n\n \n\n + \
\n
\n
\n + \ \n
\n\n \n + \ \n + \ \n\n + \ \n\n
\n \n Sign in\n \n
\n
\n\n\n + \
\n
\n + \ \n\n
\n \n\n\n\n \n \n
\n \n \n\n + \
\n Search + or jump to...\n
\n + \ \n\n + \
\n \n\n \n\n \n
\n \n + \

Search + code, repositories, users, issues, pull requests...

\n
\n \n
+ \
\n
\n \n
\n \n \n \n \n \n\n \n
\n
\n
\n
\n + \ \n
\n + \
\n Clear\n + \ \n\n + \
\n \n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n + \
\n \n + \
\n + \
\n
\n\n \n + \
\n
\n\n
\n
\n
\n \n
\n + \ \n\n \n
\n + \
\n
\n + \

\n Provide + feedback\n

\n \n
\n
\n + \ \n
\n
\n + \ \n
\n \n + \
\n

We read every piece of feedback, and take your input very + seriously.

\n \n \n + \ \n
\n
\n + \ \n
\n\n \n \n\n + \ \n
\n
\n + \
\n

\n Saved searches\n

\n + \

Use + saved searches to filter your results more quickly

\n
\n
\n \n + \
\n
\n \n
\n \n + \
\n\n \n\n
\n + \
\n
\n\n
\n + \
\n \n
\n + \
\n
\n\n\n
\n \n Sign in\n \n + \
\n\n \n Sign + up\n \n \n
\n + \
\n
\n \n\n\n \n \n\n + \
\n\n\n\n\n\n\n\n\n + \
\n\n\n + \ \n\n\n + \ \n
\n\n\n + \ \n\n\n\n\n\n\n \n
\n
\n \n \n\n\n\n + \ \n \n\n \n\n\n\n\n\n\n \n
\n\n
\n\n + \
\n \n
\n + \ \n \n\n + \ \n \n + \ \n DataDog\n + \ \n /\n + \ \n dd-trace-py\n \n\n Public\n
\n\n\n + \
\n\n
\n \n\n + \
\n
\n\n
\n
\n\n\n \n\n + \
\n\n \n\n\n\n\n
\n \n\n\n\n \n \n
\n \n\n
\n \n \n \n\n
\n
\n
\n\n \n
\n \n \n New issue\n \n \n + \
\n
\n \n \n\n
\n\n
\n

\n Have a question + about this project? Sign up for a free GitHub account to open an + issue and contact its maintainers and the community.\n

\n\n \n\n

By + clicking “Sign up for GitHub”, you agree to our terms of service + and\n privacy statement. We\u2019ll occasionally send you + account related emails.

\n\n

\n + \ Already on GitHub?\n Sign + in\n to your account\n

\n
\n\n
\n
\n
\n + \ \n + \
\n\n

\n ci: + store fake DD_API_KEY as a secret\n #11690\n + \

\n
\n
\n\n
\n
\n \n + Open\n\n
\n\n\n\n\n
\n brettlangdon\n\n wants to merge\n 1\n + \ commit into\n\n\n main\n\n + \
\n
\n + \ \n base:\n + \ main\n \n + \ \n \n + \ \n
\n
\n + \
\n Choose + a base branch\n \n
\n\n + \ \n
\n + \ \n
\n\n \n \n\n
\n \n\n \n\n \n\n\n
\n
\n \n + \ \n \n \n Loading\n\n + \
\n
\n\n \n\n\n \n\n + \
\n\n \n
\n + \
\n
\n
\n\n \n + \
\n
\n\n
\n \n
\n\nfrom\n\nbrettlangdon-patch-3\n \n \n \n\n \n \n\n + \
\n
\n\n\n\n + \ \n \n\n\n\n\n\n\n\n\n \n \n + \
\n\n\n + \
\n\n
\n
\n\n\n \n\n\n\n
\n
\n + \
\n + \
\n + \
\n \n Open\n\n
\n\n\n\n\n + \
\n

\n \n + \ ci: store fake DD_API_KEY as a secret\n \n #11690\n

\n\n
\n brettlangdon\n\n + \ wants to merge\n 1\n + \ commit into\n\n\n main\n\nfrom\n\nbrettlangdon-patch-3\n \n \n \n\n \n \n\n + \
\n
\n\n\n\n\n + \
\n
\n
\n
\n + \
\n
\n
\n
\n\n\n\n \n
\n
\n \n \n +2\n \n \n \u22122\n \n \n + \ \n \n \n + \
\n\n \n
\n\n\n\n
\n + \
\n

Conversation

\n + \ \n \n\n\n \n\n
\n\n
\n \"brettlangdon\"\n + \ \n \n
\n + \
\n
\n
\n
\n \n \n \n\n \n\n\n \n \n \n \n Copy + link\n\n
\n
\n + \
\n
\n\n
\n \n\n + \ \n Member\n\n\n \n\n
\n\n

\n
\n \"@brettlangdon\"\n\n \n brettlangdon\n \n\n \n\n \n\n + \ commented\n\n\n Dec 12, 2024\n\n\n \n + \ \n\n
\n + \ \n
\n \n edited\n \n + \ \n \n \n\n
\n
\n \n \n \n + \ \n \n + \ \n Loading\n\n \n \n + \
\n
\n\n
\n\n

\n
\n\n
\n + \
\n \n \n + \
\n

Checklist

\n
    \n
  • PR author + has checked that all the criteria below are met
  • \n
  • The PR description + includes an overview of the change
  • \n
  • The PR description articulates + the motivation for the change
  • \n
  • The change includes tests OR the PR + description describes a testing strategy
  • \n
  • The PR description notes + risks associated with the change, if any
  • \n
  • Newly-added code is easy + to change
  • \n
  • The change follows the library release note guidelines
  • \n
  • The change + includes or references documentation updates if necessary
  • \n
  • Backport + labels are set (if applicable)
  • \n
\n

Reviewer Checklist

\n
    \n
  • Reviewer + has checked that all the criteria below are met
  • \n
  • Title is accurate
  • \n
  • All + changes are related to the pull request's stated goal
  • \n
  • Avoids breaking + API changes
  • \n
  • Testing strategy adequately addresses + listed risks
  • \n
  • Newly-added code is easy to change
  • \n
  • Release + note makes sense to a user of the library
  • \n
  • If necessary, author has + acknowledged and discussed the performance implications of this PR as reported + in the benchmarks PR comment
  • \n
  • Backport labels are set in a manner + that is consistent with the release branch maintenance policy
  • \n
\n
\n + \
\n \n
\n\n
\n\n
\n \n + \

\n \n + \ \n \n \n\n

\n \n\n\n\n + \
\n
\n
\n \n
\n
\n + \ \n
\n
\n
\n + \
\n\n
\n
\n
\n\n\n \n\n \n
\n\n\n
\n + \ \n
\n
\n
\n \n
\n \n
\n
\n
\n
\n \n
\n
\n \n \"@brettlangdon\"\n
\n
\n\n + \ \n\n + \
\n \n\n \n \n \n\n \n\n \n\n\n + \
\n\n
\n + \
\n + \ \n\n + \ \n \n \n\n \n\n
\n \n
\n\n
\n + \
\n
\n\n
\n \n + \ a6675d3\n \n
\n
\n + \
\n
\n\n
\n
\n
\n\n\n
\n\n
\n + \ \n
\n
\n \n + \ \n\n
\n + \
\n\n \n\n \"@brettlangdon\"\nbrettlangdon\n\n\n\n\n added\n the \n\n changelog/no-changelog\n\n A changelog + entry is not required for this PR.\n label\n\n\n Dec 12, 2024\n\n
\n
\n\n\n + \
\n \n
\n \n
\n + \
\"@brettlangdon\"\n brettlangdon\n\n\n requested review from\n + \ a team\n\n as code owners\n\n\n + \ December 12, 2024 13:39 \n + \ \n
\n
\n
\n \n
\n \n
\n + \
\"@brettlangdon\"\n brettlangdon\n\n\n requested review from\n + \ avara1986 + and \n erikayasuda\n\n\n\n + \ December 12, 2024 13:39 \n + \ \n
\n
\n\n\n
\n\n
\n \n \n
\n\n
\n + \ \"@github-actions\"\n\n \n + \ \"GitHub\n \n
\n\n\n
\n\n
\n + \
\n + \
\n + \
\n + \ \n + \ \n \n\n \n\n\n + \ \n \n \n \n Copy + link\n\n
\n
\n + \
\n
\n\n
\n \n\n + \ \n + \ Contributor\n\n\n + \ \n\n
\n\n

\n + \
\n \n\n \n github-actions\n + \ bot\n\n \n\n + \ \n\n commented\n\n\n Dec 12, 2024\n\n\n + \ \n
\n\n

\n
\n\n\n
\n\n + \ \n\n \n \n + \ \n \n \n
\n + \

CODEOWNERS have + been resolved as:

\n
.github/workflows/system-tests.yml
+        \                                     @DataDog/python-guild @DataDog/apm-core-python\n
\n\n + \
\n
\n\n\n
\n\n \n\n
\n + \
\n
\n \n \n
\n
\n \n
\n
\n
\n
\n + \
\n
\n\n
\n \n + \

\n \n + \ \n \n \n\n

\n \n\n\n + \
\n
\n\n
\n\n\n
\n\n\n
\n + \ \n
\n \n
\n \"romainkomorndatadog\"\n + \
\n \n
\n + \
\n + \ \n romainkomorndatadog\n + \ \n\n \n\n approved these changes\n\n\n \n \n + \ Dec + 12, 2024\n \n \n \n + \
\n\n \n
\n
\n\n
\n \n \n
\n
\n
\n
\n\n\n
\n\n + \
\n \n \n
\n\n
\n + \ \"@datadog-dd-trace-py-rkomorn\"\n\n
\n\n\n + \
\n\n
\n + \
\n + \
\n + \
\n + \ \n + \ \n \n\n \n\n\n + \ \n \n \n \n Copy + link\n\n
\n
\n + \
\n
\n\n
\n \n\n\n\n + \ \n\n
\n\n

\n + \
\n \n\n \n datadog-dd-trace-py-rkomorn\n + \ bot\n\n \n\n + \ \n\n commented\n\n\n Dec 12, 2024\n\n\n + \ \n
\n\n

\n
\n\n\n
\n\n + \ \n\n \n \n + \ \n \n \n
\n + \

Datadog Report

\n

Branch report: + brettlangdon-patch-3
\nCommit + report: a6675d3
\nTest + service: dd-trace-py

\n

\u2705 + 0 Failed, 55 Passed, 1413 Skipped, 1m 29.81s Total duration (35m 20.17s time + saved)

\n
\n
\n\n\n + \
\n\n \n\n
\n + \
\n
\n \n \n
\n
\n \n
\n
\n
\n
\n + \
\n
\n\n
\n \n + \

\n \n + \ \n \n \n\n

\n \n\n\n + \
\n
\n\n
\n\n\n
\n\n\n
\n + \ \n \n
\n\n
\n + \ \"@brettlangdon\"\n\n
\n\n\n
\n\n
\n + \
\n + \
\n + \
\n + \ \n + \ \n \n\n \n\n\n + \ \n \n \n \n Copy + link\n\n
\n
\n + \
\n
\n\n
\n \n\n + \ \n Member\n\n\n \n\n Author\n\n\n + \
\n\n

\n
\n + \ \n\n \n brettlangdon\n + \ \n\n \n\n \n\n commented\n\n\n Dec 12, 2024\n\n\n + \ \n
\n\n

\n
\n\n\n
\n\n + \ \n\n \n \n + \ \n \n \n
\n + \

/merge

\n
\n
\n\n\n + \
\n\n \n\n
\n + \
\n
\n \n \n
\n
\n \n
\n
\n
\n
\n + \
\n
\n\n
\n \n + \

\n \n + \ \n \n \n\n

\n \n\n\n + \
\n
\n\n
\n\n\n
\n\n\n
\n + \ \n \n
\n\n
\n + \ \"@dd-devflow\"\n\n
\n\n\n
\n\n
\n + \
\n + \
\n + \
\n + \ \n + \ \n \n\n \n\n\n + \ \n \n \n \n Copy + link\n\n
\n
\n + \
\n
\n\n
\n \n\n\n\n + \ \n\n
\n\n

\n + \
\n \n\n \n dd-devflow\n + \ bot\n\n \n\n + \ \n\n commented\n\n\n Dec 12, 2024\n\n\n + \ \n \n\n
\n \n
\n + \ \n edited\n \n \n \n + \ \n\n
\n + \
\n \n \n \n + \ \n \n + \ \n Loading\n\n \n \n + \
\n
\n\n
\n\n

\n
\n\n\n
\n\n + \ \n\n \n \n + \ \n \n \n
\n + \ \n

Devflow running: /merge

\n

View all feedbacks in Devflow UI.

\n
\n

2024-12-12 13:54:30 UTC \u2139\uFE0F MergeQueue: + waiting for PR to be ready

\n

This merge request is not + mergeable yet, because of pending checks/missing approvals. It will be added + to the queue as soon as checks pass and/or get approvals.
\nNote: + if you pushed new commits since the last approval, you may need additional + approval.
\nYou can remove it from the waiting list with /remove + command.

\n

Use /merge -c + to cancel this operation!

\n
\n

2024-12-12 + 14:26:14 UTC \u2139\uFE0F MergeQueue: merge request + added to the queue

\n

The median merge time in main + is 34m.

\n

Use /merge -c + to cancel this operation!

\n
\n

\u23F3 + command still in progress ...

\n
\n
\n\n\n + \
\n\n \n\n
\n + \
\n
\n \n \n
\n
\n \n
\n
\n
\n
\n + \
\n
\n\n
\n \n + \

\n \n + \ \n \n \n\n

\n \n\n\n + \
\n
\n\n
\n\n\n
\n\n\n
\n + \ \n
\n
\n \n + \ \n\n
\n + \
\n\n \n\n \"@dd-devflow\"\ndd-devflow\nbot\n\n\n\n added\n the \n\n mergequeue-status: waiting\n\n label\n\n\n Dec 12, 2024\n\n
\n
\n\n\n\n\n
\n\n + \
\n \n \n
\n\n
\n + \ \"@pr-commenter\"\n\n
\n\n\n
\n\n
\n + \
\n + \
\n + \
\n + \ \n + \ \n \n\n \n\n\n + \ \n \n \n \n Copy + link\n\n
\n
\n + \
\n
\n\n
\n \n\n\n\n + \ \n\n
\n\n

\n + \
\n \n\n \n pr-commenter\n + \ bot\n\n \n\n + \ \n\n commented\n\n\n Dec 12, 2024\n\n\n + \ \n
\n\n

\n
\n\n\n
\n\n + \ \n\n \n \n + \ \n \n \n
\n + \

Benchmarks

\n

Benchmark execution + time: 2024-12-12 14:24:20

\n

Comparing candidate commit + a6675d3 + in PR branch brettlangdon-patch-3 with + baseline commit 385d8e0 + in branch main.

\n

Found + 0 performance improvements and 0 performance regressions! Performance is the + same for 394 metrics, 2 unstable metrics.

\n
\n
\n\n\n + \
\n\n \n\n
\n + \
\n
\n \n \n
\n
\n \n
\n
\n
\n
\n + \
\n
\n\n
\n \n + \

\n \n + \ \n \n \n\n

\n \n\n\n + \
\n
\n\n
\n\n\n
\n\n\n
\n + \ \n
\n
\n \n + \ \n\n
\n + \ \n
\n\n\n\n\n
\n\n\n\n\n\n + \ \n
\n
\n \n
+ \
\n\n\n\n
\n\n
\n + \
\n
\n \n Sign up for free\n to join + this conversation on GitHub.\n Already have an account?\n Sign + in to comment\n\n\n \n
\n\n
\n
\n
\n\n
\n + \
\n
\n\n\n \n
\n \n
\n \n
\n Reviewers\n
\n\n \n\n\n + \

\n \n\n \n \"@romainkomorndatadog\"\n \n romainkomorndatadog\n\n\n\n + \ \n + \ \n \n + \ \n\n \n \n romainkomorndatadog approved these changes\n\n + \

\n

\n \n\n \n \"@avara1986\"\n \n avara1986\n\n\n + \ Awaiting requested review from avara1986\n\n + \ avara1986 is a code owner automatically + assigned from DataDog/python-guild\n\n \n

\n

\n \n\n \n \"@erikayasuda\"\n \n erikayasuda\n\n\n + \ Awaiting requested review from erikayasuda\n\n + \ erikayasuda is a code owner automatically + assigned from DataDog/apm-core-python\n\n \n

\n\n + \ \n
\n\n
\n\n\n
\n
\n\n \n
\n Assignees\n + \
\n\n\n \n\n + \ No one assigned\n\n\n\n
\n\n\n \n\n \n\n\n
\n Labels\n
\n\n\n
\n \n\n changelog/no-changelog\n\n + \ A changelog entry is not required for + this PR.\n \n\n mergequeue-status: + in_progress\n\n\n
\n\n
\n\n\n \n\n
\n
\n
\n Projects\n + \
\n\n
\n
\n\n None yet\n\n\n\n
\n\n\n + \ \n
\n
\n \n
\n Milestone\n + \
\n\n No milestone\n\n
\n\n\n \n \n \n
\n
\n \n
\n \n
\n Development\n + \
\n\n\n \n\n

Successfully merging this pull request may + close these issues.

\n\n\n \n\n
+ \
\n
\n
\n\n \n \n\n + \ \n\n \n
\n + \
\n
\n 2 participants\n
\n \n
\n
\n\n\n\n + \ \n\n \n\n\n\n\n \n\n
\n\n\n
\n \n \n \n + \ \n\n\n + \ \n\n\n \n\n\n\n\n \n \n\n + \ \n\n
\n

Footer

\n\n \n\n\n
\n
\n \n \n \n\n\n + \ \n © 2024 GitHub, Inc.\n \n
\n\n + \ \n
\n
\n\n\n\n\n \n\n\n \n\n + \ \n\n
\n + \
\n
\n
\n\n \n\n\n\n\n\n \n\n
\n + \
\n \n\n\n" + headers: + Accept-Ranges: + - bytes + Cache-Control: + - no-cache + Content-Security-Policy: + - 'default-src ''none''; base-uri ''self''; child-src github.com/assets-cdn/worker/ + github.com/webpack/ github.com/assets/ gist.github.com/assets-cdn/worker/; + connect-src ''self'' uploads.github.com www.githubstatus.com collector.github.com + raw.githubusercontent.com api.github.com github-cloud.s3.amazonaws.com github-production-repository-file-5c1aeb.s3.amazonaws.com + github-production-upload-manifest-file-7fdce7.s3.amazonaws.com github-production-user-asset-6210df.s3.amazonaws.com + *.rel.tunnels.api.visualstudio.com wss://*.rel.tunnels.api.visualstudio.com + objects-origin.githubusercontent.com copilot-proxy.githubusercontent.com proxy.individual.githubcopilot.com + proxy.business.githubcopilot.com proxy.enterprise.githubcopilot.com *.actions.githubusercontent.com + wss://*.actions.githubusercontent.com productionresultssa0.blob.core.windows.net/ + productionresultssa1.blob.core.windows.net/ productionresultssa2.blob.core.windows.net/ + productionresultssa3.blob.core.windows.net/ productionresultssa4.blob.core.windows.net/ + productionresultssa5.blob.core.windows.net/ productionresultssa6.blob.core.windows.net/ + productionresultssa7.blob.core.windows.net/ productionresultssa8.blob.core.windows.net/ + productionresultssa9.blob.core.windows.net/ productionresultssa10.blob.core.windows.net/ + productionresultssa11.blob.core.windows.net/ productionresultssa12.blob.core.windows.net/ + productionresultssa13.blob.core.windows.net/ productionresultssa14.blob.core.windows.net/ + productionresultssa15.blob.core.windows.net/ productionresultssa16.blob.core.windows.net/ + productionresultssa17.blob.core.windows.net/ productionresultssa18.blob.core.windows.net/ + productionresultssa19.blob.core.windows.net/ github-production-repository-image-32fea6.s3.amazonaws.com + github-production-release-asset-2e65be.s3.amazonaws.com insights.github.com + wss://alive.github.com api.githubcopilot.com api.individual.githubcopilot.com + api.business.githubcopilot.com api.enterprise.githubcopilot.com; font-src + github.githubassets.com; form-action ''self'' github.com gist.github.com copilot-workspace.githubnext.com + objects-origin.githubusercontent.com; frame-ancestors ''none''; frame-src + viewscreen.githubusercontent.com notebooks.githubusercontent.com; img-src + ''self'' data: blob: github.githubassets.com media.githubusercontent.com camo.githubusercontent.com + identicons.github.com avatars.githubusercontent.com private-avatars.githubusercontent.com + github-cloud.s3.amazonaws.com objects.githubusercontent.com secured-user-images.githubusercontent.com/ + user-images.githubusercontent.com/ private-user-images.githubusercontent.com + opengraph.githubassets.com github-production-user-asset-6210df.s3.amazonaws.com + customer-stories-feed.github.com spotlights-feed.github.com objects-origin.githubusercontent.com + *.githubusercontent.com; manifest-src ''self''; media-src github.com user-images.githubusercontent.com/ + secured-user-images.githubusercontent.com/ private-user-images.githubusercontent.com + github-production-user-asset-6210df.s3.amazonaws.com gist.github.com; script-src + github.githubassets.com; style-src ''unsafe-inline'' github.githubassets.com; + upgrade-insecure-requests; worker-src github.com/assets-cdn/worker/ github.com/webpack/ + github.com/assets/ gist.github.com/assets-cdn/worker/' + Content-Type: + - text/html; charset=utf-8 + Date: + - Thu, 12 Dec 2024 14:42:38 GMT + Referrer-Policy: + - no-referrer-when-downgrade + Server: + - GitHub.com + Set-Cookie: + - _gh_sess=goPCFokfo9CoHjAGnWH6245viFzykZOSTQZe2I4w0VI8O%2FBqLC9Xv8AW%2F6ZjmrAmWBiSwR%2BJfSgAxkI4KR6iJ7iP7KTOza9Z%2Fx3f69HoNCXVVOHyocDogP%2Bkm1AiUdpG5y74PTCPFqrxrAFXC27mPRlmZoEWOfCSWgl4YRkTZv70BAdIcjfmqhFa%2BtQhB0TltjWeDdF8qyOXZzTY7EorwqYP%2BPT%2FJYz2v61wLYsHH22O6rrrwLYlwr2P3x6Yb3Bx2aKM6eK975vB0hXOQtNMug%3D%3D--y2NTdNqEEkcwaCVD--Ce4x%2FRlMrMinpyEeKuACLQ%3D%3D; + Path=/; HttpOnly; Secure; SameSite=Lax + - _octo=GH1.1.2015969099.1734014557; Path=/; Domain=github.com; Expires=Fri, + 12 Dec 2025 14:42:37 GMT; Secure; SameSite=Lax + - logged_in=no; Path=/; Domain=github.com; Expires=Fri, 12 Dec 2025 14:42:37 + GMT; HttpOnly; Secure; SameSite=Lax + Strict-Transport-Security: + - max-age=31536000; includeSubdomains; preload + Transfer-Encoding: + - chunked + Vary: + - X-PJAX, X-PJAX-Container, Turbo-Visit, Turbo-Frame, Accept-Encoding, Accept, + X-Requested-With + X-Content-Type-Options: + - nosniff + X-Frame-Options: + - deny + X-GitHub-Request-Id: + - ED29:27C835:21B19AA:2F4A603:675AF65D + X-XSS-Protection: + - '0' + connection: + - close + server-timing: + - pull_request_layout-fragment;desc="pull_request_layout fragment";dur=412.175919,conversation_content-fragment;desc="conversation_content + fragment";dur=448.910543,conversation_sidebar-fragment;desc="conversation_sidebar + fragment";dur=302.334653,nginx;desc="NGINX";dur=1.331055,glb;desc="GLB";dur=3.067062 + x-voltron-version: + - 69a2227 + status: + code: 200 + message: OK +- request: + body: null + headers: + Accept: + - application/vnd.github+json + Connection: + - close + Host: + - api.github.com + method: GET + uri: https://api.github.com/repos/datadog/dd-trace-py/pulls/6388/files?page=1 + response: + body: + string: '[{"sha":"1325b0864ebc6d4c40970f698018ac2524fe4e33","filename":"ddtrace/debugging/_expressions.py","status":"modified","additions":2,"deletions":2,"changes":4,"blob_url":"https://github.com/DataDog/dd-trace-py/blob/2eb060881fdd94f4f717ae19549b598317b74d30/ddtrace%2Fdebugging%2F_expressions.py","raw_url":"https://github.com/DataDog/dd-trace-py/raw/2eb060881fdd94f4f717ae19549b598317b74d30/ddtrace%2Fdebugging%2F_expressions.py","contents_url":"https://api.github.com/repos/DataDog/dd-trace-py/contents/ddtrace%2Fdebugging%2F_expressions.py?ref=2eb060881fdd94f4f717ae19549b598317b74d30","patch":"@@ + -292,8 +292,8 @@ def _compile_operation(ast):\n \n def _compile_literal(ast):\n # + type: (DDASTType) -> Optional[List[Instr]]\n- # literal => | + true | false | \"string\"\n- if not isinstance(ast, (str, int, float, bool)):\n+ # + literal => | true | false | \"string\" | null\n+ if not (isinstance(ast, + (str, int, float, bool)) or ast is None):\n return None\n \n return + [Instr(\"LOAD_CONST\", ast)]"},{"sha":"b4517ad79f67a2b362360ae8e7e0b0b3fa2e4ea8","filename":"releasenotes/notes/fix-debugger-expressions-none-literal-30f3328d2e386f40.yaml","status":"added","additions":4,"deletions":0,"changes":4,"blob_url":"https://github.com/DataDog/dd-trace-py/blob/2eb060881fdd94f4f717ae19549b598317b74d30/releasenotes%2Fnotes%2Ffix-debugger-expressions-none-literal-30f3328d2e386f40.yaml","raw_url":"https://github.com/DataDog/dd-trace-py/raw/2eb060881fdd94f4f717ae19549b598317b74d30/releasenotes%2Fnotes%2Ffix-debugger-expressions-none-literal-30f3328d2e386f40.yaml","contents_url":"https://api.github.com/repos/DataDog/dd-trace-py/contents/releasenotes%2Fnotes%2Ffix-debugger-expressions-none-literal-30f3328d2e386f40.yaml?ref=2eb060881fdd94f4f717ae19549b598317b74d30","patch":"@@ + -0,0 +1,4 @@\n+---\n+fixes:\n+ - |\n+ dynamic instrumentation: handle + null literal in conditions and expressions."},{"sha":"3c4d96fe66b871238c02651af82d43a1ad8085c3","filename":"tests/debugging/test_expressions.py","status":"modified","additions":1,"deletions":0,"changes":1,"blob_url":"https://github.com/DataDog/dd-trace-py/blob/2eb060881fdd94f4f717ae19549b598317b74d30/tests%2Fdebugging%2Ftest_expressions.py","raw_url":"https://github.com/DataDog/dd-trace-py/raw/2eb060881fdd94f4f717ae19549b598317b74d30/tests%2Fdebugging%2Ftest_expressions.py","contents_url":"https://api.github.com/repos/DataDog/dd-trace-py/contents/tests%2Fdebugging%2Ftest_expressions.py?ref=2eb060881fdd94f4f717ae19549b598317b74d30","patch":"@@ + -72,6 +72,7 @@ def __getitem__(self, name):\n # Test argument predicates + and operations\n ({\"contains\": [{\"ref\": \"payload\"}, \"hello\"]}, + {\"payload\": \"hello world\"}, True),\n ({\"eq\": [{\"ref\": \"hits\"}, + True]}, {\"hits\": True}, True),\n+ ({\"eq\": [{\"ref\": \"hits\"}, + None]}, {\"hits\": None}, True),\n ({\"substring\": [{\"ref\": \"payload\"}, + 4, 7]}, {\"payload\": \"hello world\"}, \"hello world\"[4:7]),\n ({\"any\": + [{\"ref\": \"collection\"}, {\"isEmpty\": {\"ref\": \"@it\"}}]}, {\"collection\": + [\"foo\", \"bar\", \"\"]}, True),\n ({\"startsWith\": [{\"ref\": \"local_string\"}, + \"hello\"]}, {\"local_string\": \"hello world!\"}, True),"}]' + headers: + Access-Control-Allow-Origin: + - '*' + Access-Control-Expose-Headers: + - ETag, Link, Location, Retry-After, X-GitHub-OTP, X-RateLimit-Limit, X-RateLimit-Remaining, + X-RateLimit-Used, X-RateLimit-Resource, X-RateLimit-Reset, X-OAuth-Scopes, + X-Accepted-OAuth-Scopes, X-Poll-Interval, X-GitHub-Media-Type, X-GitHub-SSO, + X-GitHub-Request-Id, Deprecation, Sunset + Cache-Control: + - private, max-age=60, s-maxage=60 + Content-Length: + - '3264' + Content-Security-Policy: + - default-src 'none' + Content-Type: + - application/json; charset=utf-8 + Date: + - Thu, 12 Dec 2024 14:42:38 GMT + ETag: + - '"85a10accfc7f3330efa4961171936a0e3ea39a94e59a1811461b17a9a610bdb4"' + Last-Modified: + - Sun, 08 Dec 2024 16:19:43 GMT + Referrer-Policy: + - origin-when-cross-origin, strict-origin-when-cross-origin + Server: + - github.com + Strict-Transport-Security: + - max-age=31536000; includeSubdomains; preload + Vary: + - Accept, Authorization, Cookie, X-GitHub-OTP,Accept-Encoding, Accept, X-Requested-With + X-Accepted-OAuth-Scopes: + - '' + X-Content-Type-Options: + - nosniff + X-Frame-Options: + - deny + X-GitHub-Media-Type: + - github.v3; format=json + X-GitHub-Request-Id: + - ED30:1C31C4:1AEE9EF:3582AA6:675AF65E + X-OAuth-Scopes: + - delete:packages, gist, read:org, read:packages, repo, workflow + X-RateLimit-Limit: + - '5000' + X-RateLimit-Remaining: + - '4898' + X-RateLimit-Reset: + - '1734015073' + X-RateLimit-Resource: + - core + X-RateLimit-Used: + - '102' + X-XSS-Protection: + - '0' + connection: + - close + x-github-api-version-selected: + - '2022-11-28' + x-oauth-client-id: + - 178c6fc778ccc68e1d6a + status: + code: 200 + message: OK +- request: + body: null + headers: + Accept: + - application/vnd.github+json + Connection: + - close + Host: + - api.github.com + method: GET + uri: https://api.github.com/repos/datadog/dd-trace-py/pulls/6388/files?page=2 + response: + body: + string: '[]' + headers: + Access-Control-Allow-Origin: + - '*' + Access-Control-Expose-Headers: + - ETag, Link, Location, Retry-After, X-GitHub-OTP, X-RateLimit-Limit, X-RateLimit-Remaining, + X-RateLimit-Used, X-RateLimit-Resource, X-RateLimit-Reset, X-OAuth-Scopes, + X-Accepted-OAuth-Scopes, X-Poll-Interval, X-GitHub-Media-Type, X-GitHub-SSO, + X-GitHub-Request-Id, Deprecation, Sunset + Cache-Control: + - private, max-age=60, s-maxage=60 + Content-Length: + - '2' + Content-Security-Policy: + - default-src 'none' + Content-Type: + - application/json; charset=utf-8 + Date: + - Thu, 12 Dec 2024 14:42:38 GMT + ETag: + - '"4acd3c336ca9625e24fba0a2ea9cad06cf4693ace7e76d92c8a9a05f03c7b0cd"' + Last-Modified: + - Sun, 08 Dec 2024 16:19:43 GMT + Link: + - ; rel="prev", + ; rel="last", + ; rel="first" + Referrer-Policy: + - origin-when-cross-origin, strict-origin-when-cross-origin + Server: + - github.com + Strict-Transport-Security: + - max-age=31536000; includeSubdomains; preload + Vary: + - Accept, Authorization, Cookie, X-GitHub-OTP,Accept-Encoding, Accept, X-Requested-With + X-Accepted-OAuth-Scopes: + - '' + X-Content-Type-Options: + - nosniff + X-Frame-Options: + - deny + X-GitHub-Media-Type: + - github.v3; format=json + X-GitHub-Request-Id: + - ED34:27B12E:1B9E8A9:36ED322:675AF65E + X-OAuth-Scopes: + - delete:packages, gist, read:org, read:packages, repo, workflow + X-RateLimit-Limit: + - '5000' + X-RateLimit-Remaining: + - '4897' + X-RateLimit-Reset: + - '1734015073' + X-RateLimit-Resource: + - core + X-RateLimit-Used: + - '103' + X-XSS-Protection: + - '0' + connection: + - close + x-github-api-version-selected: + - '2022-11-28' + x-oauth-client-id: + - 178c6fc778ccc68e1d6a + status: + code: 200 + message: OK +- request: + body: null + headers: + Accept: + - application/vnd.github+json + Connection: + - close + Host: + - api.github.com + method: GET + uri: https://api.github.com/repos/datadog/dd-trace-py/pulls/11690/files?page=1 + response: + body: + string: '[{"sha":"ce795db4fe24584e0a3c105f6f130071b1292cbe","filename":".github/workflows/system-tests.yml","status":"modified","additions":2,"deletions":2,"changes":4,"blob_url":"https://github.com/DataDog/dd-trace-py/blob/a6675d3799af44382bd5b677c56a94843a6433aa/.github%2Fworkflows%2Fsystem-tests.yml","raw_url":"https://github.com/DataDog/dd-trace-py/raw/a6675d3799af44382bd5b677c56a94843a6433aa/.github%2Fworkflows%2Fsystem-tests.yml","contents_url":"https://api.github.com/repos/DataDog/dd-trace-py/contents/.github%2Fworkflows%2Fsystem-tests.yml?ref=a6675d3799af44382bd5b677c56a94843a6433aa","patch":"@@ + -54,7 +54,7 @@ jobs:\n # system-tests requires an API_KEY, but it does + not have to be a valid key, as long as we don''t run a scenario\n # + that make assertion on backend data. Using a fake key allow to run system + tests on PR originating from forks.\n # If ever it''s needed, a valid + key exists in the repo, using ${{ secrets.DD_API_KEY }}\n- DD_API_KEY: + 1234567890abcdef1234567890abcdef\n+ DD_API_KEY: ${{ secrets.FAKE_DD_API_KEY + }}\n CMAKE_BUILD_PARALLEL_LEVEL: 12\n SYSTEM_TESTS_AWS_ACCESS_KEY_ID: + ${{ secrets.IDM_AWS_ACCESS_KEY_ID }}\n SYSTEM_TESTS_AWS_SECRET_ACCESS_KEY: + ${{ secrets.IDM_AWS_SECRET_ACCESS_KEY }}\n@@ -106,7 +106,7 @@ jobs:\n # + system-tests requires an API_KEY, but it does not have to be a valid key, + as long as we don''t run a scenario\n # that make assertion on backend + data. Using a fake key allow to run system tests on PR originating from forks.\n # + If ever it''s needed, a valid key exists in the repo, using ${{ secrets.DD_API_KEY + }}\n- DD_API_KEY: 1234567890abcdef1234567890abcdef\n+ DD_API_KEY: + ${{ secrets.FAKE_DD_API_KEY }}\n CMAKE_BUILD_PARALLEL_LEVEL: 12\n SYSTEM_TESTS_AWS_ACCESS_KEY_ID: + ${{ secrets.IDM_AWS_ACCESS_KEY_ID }}\n SYSTEM_TESTS_AWS_SECRET_ACCESS_KEY: + ${{ secrets.IDM_AWS_SECRET_ACCESS_KEY }}"}]' + headers: + Access-Control-Allow-Origin: + - '*' + Access-Control-Expose-Headers: + - ETag, Link, Location, Retry-After, X-GitHub-OTP, X-RateLimit-Limit, X-RateLimit-Remaining, + X-RateLimit-Used, X-RateLimit-Resource, X-RateLimit-Reset, X-OAuth-Scopes, + X-Accepted-OAuth-Scopes, X-Poll-Interval, X-GitHub-Media-Type, X-GitHub-SSO, + X-GitHub-Request-Id, Deprecation, Sunset + Cache-Control: + - private, max-age=60, s-maxage=60 + Content-Length: + - '1930' + Content-Security-Policy: + - default-src 'none' + Content-Type: + - application/json; charset=utf-8 + Date: + - Thu, 12 Dec 2024 14:42:39 GMT + ETag: + - '"e91026bdc9aa216ff163739444e03dfcf4e719131166fd717d6e5a7eafbd54fe"' + Last-Modified: + - Thu, 12 Dec 2024 14:26:20 GMT + Referrer-Policy: + - origin-when-cross-origin, strict-origin-when-cross-origin + Server: + - github.com + Strict-Transport-Security: + - max-age=31536000; includeSubdomains; preload + Vary: + - Accept, Authorization, Cookie, X-GitHub-OTP,Accept-Encoding, Accept, X-Requested-With + X-Accepted-OAuth-Scopes: + - '' + X-Content-Type-Options: + - nosniff + X-Frame-Options: + - deny + X-GitHub-Media-Type: + - github.v3; format=json + X-GitHub-Request-Id: + - ED39:1C31C4:1AEEBE6:3582E96:675AF65E + X-OAuth-Scopes: + - delete:packages, gist, read:org, read:packages, repo, workflow + X-RateLimit-Limit: + - '5000' + X-RateLimit-Remaining: + - '4896' + X-RateLimit-Reset: + - '1734015073' + X-RateLimit-Resource: + - core + X-RateLimit-Used: + - '104' + X-XSS-Protection: + - '0' + connection: + - close + x-github-api-version-selected: + - '2022-11-28' + x-oauth-client-id: + - 178c6fc778ccc68e1d6a + status: + code: 200 + message: OK +- request: + body: null + headers: + Accept: + - application/vnd.github+json + Connection: + - close + Host: + - api.github.com + method: GET + uri: https://api.github.com/repos/datadog/dd-trace-py/pulls/11690/files?page=2 + response: + body: + string: '[]' + headers: + Access-Control-Allow-Origin: + - '*' + Access-Control-Expose-Headers: + - ETag, Link, Location, Retry-After, X-GitHub-OTP, X-RateLimit-Limit, X-RateLimit-Remaining, + X-RateLimit-Used, X-RateLimit-Resource, X-RateLimit-Reset, X-OAuth-Scopes, + X-Accepted-OAuth-Scopes, X-Poll-Interval, X-GitHub-Media-Type, X-GitHub-SSO, + X-GitHub-Request-Id, Deprecation, Sunset + Cache-Control: + - private, max-age=60, s-maxage=60 + Content-Length: + - '2' + Content-Security-Policy: + - default-src 'none' + Content-Type: + - application/json; charset=utf-8 + Date: + - Thu, 12 Dec 2024 14:42:39 GMT + ETag: + - '"4acd3c336ca9625e24fba0a2ea9cad06cf4693ace7e76d92c8a9a05f03c7b0cd"' + Last-Modified: + - Thu, 12 Dec 2024 14:26:20 GMT + Link: + - ; rel="prev", + ; rel="last", + ; rel="first" + Referrer-Policy: + - origin-when-cross-origin, strict-origin-when-cross-origin + Server: + - github.com + Strict-Transport-Security: + - max-age=31536000; includeSubdomains; preload + Vary: + - Accept, Authorization, Cookie, X-GitHub-OTP,Accept-Encoding, Accept, X-Requested-With + X-Accepted-OAuth-Scopes: + - '' + X-Content-Type-Options: + - nosniff + X-Frame-Options: + - deny + X-GitHub-Media-Type: + - github.v3; format=json + X-GitHub-Request-Id: + - ED3C:38B93F:1B68A83:36861E8:675AF65F + X-OAuth-Scopes: + - delete:packages, gist, read:org, read:packages, repo, workflow + X-RateLimit-Limit: + - '5000' + X-RateLimit-Remaining: + - '4895' + X-RateLimit-Reset: + - '1734015073' + X-RateLimit-Resource: + - core + X-RateLimit-Used: + - '105' + X-XSS-Protection: + - '0' + connection: + - close + x-github-api-version-selected: + - '2022-11-28' + x-oauth-client-id: + - 178c6fc778ccc68e1d6a + status: + code: 200 + message: OK +- request: + body: null + headers: + Connection: + - close + Host: + - github.com + method: GET + uri: https://github.com/DataDog/dd-trace-py/pull/6412 + response: + body: + string: "\n\n\n\n\n\n\n\n\n\n\n\n \n \n + \ \n \n \n \n + \ \n + \ \n\n + \ \n\n \n\n \n \n \n \n \n\n\n \n\n\n \n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n + \ \n \n\n\n\n\n\n\n\n\n\n\n\n\n ci: run the debugger suite only if necessary by P403n1x87 + \xB7 Pull Request #6412 \xB7 DataDog/dd-trace-py \xB7 GitHub\n\n\n\n + \ \n \n \n\n \n \n\n\n + \ \n\n\n \n\n\n \n \n\n \n \n\n + \ \n\n\n\n \n\n \n\n\n\n\n \n\n \n\n \n\n + \ \n\n \n\n \n\n \n \n \n\n \n \n \n\n\n\n\n \n\n\n\n + \ \n\n\n \n \n \n \n\n \n\n \n + \ \n\n + \ \n\n\n\n \n\n \n\n\n \n\n \n\n \n \n + \ \n\n\n\n\n\n \n\n + \ \n\n \n
\n \n\n\n
\n Skip to content\n\n + \ \n \n + \ \n \n \n\n\n\n\n\n\n\n\n\n \n \n + \
\n\n\n\n\n\n + \ \n\n \n\n \n\n\n
\n

Navigation Menu

\n\n \n\n + \
\n
\n
\n + \ \n
\n\n \n + \ \n + \ \n\n + \ \n\n
\n \n Sign in\n \n
\n
\n\n\n + \
\n
\n + \ \n\n
\n \n\n\n\n \n \n
\n \n \n\n + \
\n Search + or jump to...\n
\n + \ \n\n + \
\n \n\n \n\n \n
\n \n + \

Search + code, repositories, users, issues, pull requests...

\n
\n \n
+ \
\n
\n \n
\n \n \n \n \n \n\n \n
\n
\n
\n
\n + \ \n
\n + \
\n Clear\n + \ \n\n + \
\n \n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n + \
\n \n + \
\n + \
\n
\n\n \n + \
\n
\n\n
\n
\n
\n \n
\n + \ \n\n \n
\n + \
\n
\n + \

\n Provide + feedback\n

\n \n
\n
\n + \ \n
\n
\n + \ \n
\n \n + \
\n

We read every piece of feedback, and take your input very + seriously.

\n \n \n + \ \n
\n
\n + \ \n
\n\n \n \n\n + \ \n
\n
\n + \
\n

\n Saved searches\n

\n + \

Use + saved searches to filter your results more quickly

\n
\n
\n \n + \
\n
\n \n
\n \n + \
\n\n \n\n
\n + \
\n
\n\n
\n + \
\n \n
\n + \
\n
\n\n\n
\n \n Sign in\n \n + \
\n\n \n Sign + up\n \n \n
\n + \
\n
\n \n\n\n \n \n\n + \
\n\n\n\n\n\n\n\n\n + \
\n\n\n + \ \n\n\n + \ \n
\n\n\n + \ \n\n\n\n\n\n\n \n
\n
\n \n \n\n\n\n + \ \n \n\n \n\n\n\n\n\n\n \n
\n\n
\n\n + \
\n \n
\n + \ \n \n\n + \ \n \n + \ \n DataDog\n + \ \n /\n + \ \n dd-trace-py\n \n\n Public\n
\n\n\n + \
\n\n
\n \n\n + \
\n
\n\n
\n
\n\n\n \n\n + \
\n\n \n\n\n\n\n
\n \n\n\n\n \n \n
\n \n\n
\n \n \n \n\n
\n
\n
\n\n \n
\n \n \n New issue\n \n \n + \
\n
\n \n \n\n
\n\n
\n

\n Have a question + about this project? Sign up for a free GitHub account to open an + issue and contact its maintainers and the community.\n

\n\n \n\n

By + clicking “Sign up for GitHub”, you agree to our terms of service + and\n privacy statement. We\u2019ll occasionally send you + account related emails.

\n\n

\n + \ Already on GitHub?\n Sign + in\n to your account\n

\n
\n\n
\n
\n
\n + \ \n + \
\n\n

\n ci: + run the debugger suite only if necessary\n #6412\n

\n
\n
\n\n
\n
\n \n + \ Merged\n\n
\n\n\n\n\n + \
\n P403n1x87\n + \ merged 7 commits into\n\n\n DataDog:1.x\n\nfrom\n\nP403n1x87:ci/debugger-suitespec\n \n \n \n\n \n \n\n + \
\n
\n\n\n + \ Jul 25, + 2023\n\n\n
\n
\n\n\n \n\n\n\n
\n
\n
\n
\n + \
\n \n Merged\n\n + \
\n\n\n\n\n
\n + \

\n \n ci: run the debugger suite only if necessary\n \n + \ #6412\n

\n\n + \
\n P403n1x87\n merged 7 commits into\n\n\n DataDog:1.x\n\nfrom\n\nP403n1x87:ci/debugger-suitespec\n \n \n \n\n \n \n\n + \
\n
\n\n\n + \ Jul 25, + 2023\n\n\n
\n
\n
\n + \
\n
\n
\n
\n
\n\n\n\n + \ \n + \ \n\n\n + \ \n\n\n
\n + \
\n

Conversation

\n + \ \n \n\n\n \n\n
\n\n
\n \"P403n1x87\"\n + \ \n \n
\n + \
\n
\n
\n
\n \n \n \n\n \n\n\n \n \n \n \n Copy + link\n\n
\n
\n + \
\n
\n\n
\n \n\n + \ \n + \ Contributor\n\n\n + \ \n\n
\n\n

\n + \
\n \"@P403n1x87\"\n\n \n + \ P403n1x87\n \n\n \n\n \n\n commented\n\n\n + \ Jul + 20, 2023\n\n\n \n + \ \n\n
\n + \ \n
\n \n edited by majorgreys\n + \ \n \n \n \n\n
\n
\n \n \n \n + \ \n \n + \ \n Loading\n\n \n \n + \
\n
\n\n
\n\n

\n
\n\n
\n + \
\n \n \n + \
\n

We introduce the concept + of suitespec as a way of describing how sources affect test runs. We use it + to ensure that the debugger tests run only if sources that the suite depends + on are modified by the current commit.

\n

Suitespec Implementation + Details

\n

The suitespec solution is based on a manual + configuration of of test suites. To simplify the declaration of file patterns + for test suites, one can make use of components, which essentially + are a logic collection of patterns. Test suite can then be declared as a list + of components to reflect their dependencies on these logic parts, and to DRY + the declaration itself by avoiding repetitions.

\n

Notes

\n
    \n
  • When the script fails for any reason, tests are run.
  • \n
  • It + is important that path patterns are listed correctly, or some tests might + not run when they are in fact supposed to.
  • \n
  • Best effort to determine + the correct list of changed files via the GitHub REST API. When that fails, + we fall back to the less accurate git diff + against the target branch.
  • \n
\n

Checklist

\n
    \n
  • Change(s) + are motivated and described in the PR description.
  • \n
  • Testing strategy is described if automated tests are not included + in the PR.
  • \n
  • Risk is outlined + (performance impact, potential for breakage, maintainability, etc).
  • \n
  • Change is maintainable (easy to change, telemetry, documentation).
  • \n
  • Library release note guidelines are followed. If no release + note is required, add label changelog/no-changelog.
  • \n
  • Documentation is included (in-code, generated user docs, public corp docs).
  • \n
  • Backport labels are set (if applicable)
  • \n
\n

Reviewer Checklist

\n
    \n
  • Title + is accurate.
  • \n
  • No unnecessary + changes are introduced.
  • \n
  • Description + motivates each change.
  • \n
  • Avoids + breaking API changes unless absolutely necessary.
  • \n
  • Testing strategy adequately addresses listed risk(s).
  • \n
  • Change is maintainable (easy to change, telemetry, documentation).
  • \n
  • Release note makes sense to a user of the library.
  • \n
  • Reviewer has explicitly acknowledged and discussed the performance + implications of this PR as reported in the benchmarks PR comment.
  • \n
  • Backport labels are set in a manner that is consistent with + the release branch maintenance policy
  • \n
\n
\n + \
\n \n
\n\n
\n\n
\n \n + \

\n \n + \ \n \n \n\n

\n \n\n\n\n + \
\n
\n
\n \n
\n
\n + \ \n
\n
\n
\n + \
\n\n
\n
\n
\n\n\n \n\n \n
\n\n\n
\n \n
\n + \
\n \n \n\n
\n
\n\n + \ \n\n \"@P403n1x87\"\nP403n1x87\n\n\n\n\n added\n the \n\n changelog/no-changelog\n\n A changelog + entry is not required for this PR.\n label\n\n\n Jul 20, 2023\n\n
\n
\n\n\n\n\n
\n\n + \
\n \n
\n \n
\n \"P403n1x87\"\n + \
\n \n
\n + \
\n + \ \n P403n1x87\n + \ \n\n \n\n commented\n\n\n \n \n + \ Jul + 20, 2023\n \n \n \n + \
\n\n \n
\n
\n\n + \
\n + \ \n \n
+ \ \n + \
\n + \
\n \n riotfile.py\n\n + \ \n Outdated\n \n \n \nShow + resolved\n \n \nHide resolved\n + \
\n
\n
\n + \ \n \n \n \n\n \n
\n
\n\n\n\n\n
\n
+ \
\n
\n\n\n
\n\n
\n \n
\n + \ \n
\n \n
\n + \
\"@P403n1x87\"\n P403n1x87\n\n\n force-pushed\n + \ the\n \n \n ci/debugger-suitespec\n\n\n + \ \n branch\n 4 times, most recently\n from\n 8953a58 + \ to\n 575d15e + \ \n + \ Compare\n \n\n\n\n July 20, 2023 13:13 \n + \ \n
\n
\n\n\n
\n\n
\n \n
\n \n
\n \"emmettbutler\"\n + \
\n \n
\n + \
\n + \ \n emmettbutler\n + \ \n\n \n\n reviewed\n\n\n \n \n + \ Jul + 20, 2023\n \n \n \n + \
\n\n \n
\n
\n\n + \
\n + \ \n \n
+ \ \n + \
\n
\n + \ \n scripts/needs_testrun.py\n\n + \ \n Outdated\n \n \n \nShow + resolved\n \n \nHide resolved\n + \
\n
\n
\n + \ \n \n \n \n\n \n
\n
\n\n\n\n\n
\n
+ \
\n
\n\n\n
\n\n
\n \n \n
\n\n
\n + \ \"@emmettbutler\"\n\n
\n\n\n
\n\n
\n + \
\n + \
\n + \
\n + \ \n + \ \n \n\n \n\n\n + \ \n \n \n \n Copy + link\n\n
\n
\n + \
\n
\n\n
\n \n\n + \ \n + \ Collaborator\n\n\n + \ \n\n
\n\n

\n + \
\n \n\n \n emmettbutler\n + \ \n\n \n\n \n\n commented\n\n\n Jul 20, 2023\n\n\n + \ \n
\n\n

\n
\n\n\n
\n\n + \ \n\n \n \n + \ \n \n + \ \n
\n + \

I love this idea!

\n
\n
\n\n\n
\n\n + \ \n\n
\n
\n + \
\n \n \n
\n
\n \n
\n
\n
\n
\n + \
\n
\n\n
\n \n + \

\n \n + \ \n \n \n\n

\n \n\n\n + \
\n
\n\n
\n\n\n
\n\n\n
\n + \ \n
\n \n
\n \"brettlangdon\"\n + \
\n \n
\n + \
\n + \ \n brettlangdon\n + \ \n\n \n\n reviewed\n\n\n \n \n + \ Jul + 20, 2023\n \n \n \n + \
\n\n \n
\n
\n + \
\n + \ \n \n
\n
\n
\n
\n \n \n \n\n \n\n\n \n \n \n \n Copy + link\n\n
\n
\n + \
\n
\n\n
\n \n\n + \ \n Member\n\n\n \n\n
\n\n

\n
\n \"@brettlangdon\"\n\n \n brettlangdon\n \n\n \n\n \n\n + \ left a comment\n\n\n\n\n \n
\n\n

\n
\n \n\n
\n
\n + \ \n
\n \n \n\n

Choose a reason for hiding this comment

\n\n + \

\n The reason will be displayed to describe this + comment to others. Learn more.\n + \

\n\n
\n \n \n
\n\n + \ \n
\n\n \n
\n

I know @gnufede was trying to get CI Visibility + running for this repo, if we go that route, we might be able to ITR ?

\n + \
\n
\n \n
\n\n\n
\n \n + \

\n \n + \ \n \n \n\n

\n \n\n\n\n + \
\n
\n + \
\n \n
\n
\n + \ \n
\n
\n
\n + \
\n\n
\n
\n
\n
\n \n \n
\n
\n
\n + \ \n tests/.suitespec.json\n\n + \ \n \n \nShow resolved\n + \ \n \nHide resolved\n + \
\n
\n
\n + \ \n \n \n \n\n \n
\n
\n\n\n\n\n
\n
+ \
\n
\n\n\n
\n\n
\n \n \n
\n\n
\n + \ \"@P403n1x87\"\n\n
\n\n\n
\n\n
\n + \
\n + \
\n + \
\n + \ \n + \ \n \n\n \n\n\n + \ \n \n \n \n Copy + link\n\n
\n
\n + \
\n
\n\n
\n \n\n + \ \n + \ Contributor\n\n\n + \ \n\n Author\n\n\n
\n\n

\n
\n \n\n \n + \ P403n1x87\n \n\n \n\n \n\n commented\n\n\n + \ Jul 20, 2023\n\n\n \n
\n\n + \

\n
\n\n\n
\n\n \n\n + \ \n \n \n \n + \ \n
\n
\n

I know @gnufede + was trying to get CI Visibility running for this repo, if we go that route, + we might be able to ITR ?

\n
\n

My understanding + is that ITR is a per-test rather than per-test-suite. So I see ITR improving + this even further rather than an alternative?

\n
\n
\n\n\n
\n\n + \ \n\n
\n
\n
\n \n \n
\n + \ emmettbutler reacted with thumbs up emoji\n + \
\n \n + \
\n
\n
\n
\n
\n + \
\n\n
\n \n + \

\n \n + \ \n \n \n\n

\n \n\n\n + \
\n
\n\n
\n\n\n
\n\n\n
\n + \ \n
\n \n
\n \n
\n + \
\"@P403n1x87\"\n P403n1x87\n\n\n force-pushed\n + \ the\n \n \n ci/debugger-suitespec\n\n\n + \ \n branch\n 3 times, most recently\n from\n 713167a + \ to\n e8c3ecc + \ \n + \ Compare\n \n\n\n\n July 20, 2023 17:15 \n + \ \n
\n
\n
\n \n
\n \n
\n + \
\"@emmettbutler\"\n emmettbutler\n\n\n self-requested a review\n\n\n + \ July 20, 2023 21:23 \n + \ \n
\n
\n\n\n
\n\n
\n \n
\n \n
\n \"emmettbutler\"\n + \
\n \n
\n + \
\n + \ \n emmettbutler\n + \ \n\n \n\n previously approved these changes\n\n\n + \ \n \n + \ Jul + 20, 2023\n \n \n \n + \
\n\n \n
\n
\n\n + \
\n + \ \n \n
\n
\n
+ \
\n
\n\n\n
\n\n
\n \n
\n + \ \n
\n \n
\n
\"@P403n1x87\"\n P403n1x87\n\n\n dismissed\n emmettbutler\u2019s stale review\n\n\n + \ via\n \n 4e53e79\n + \ \n\n July + 21, 2023 09:41 \n \n
\n
\n
\n + \ \n
\n \n
\n + \
\"@P403n1x87\"\n P403n1x87\n\n\n force-pushed\n + \ the\n \n \n ci/debugger-suitespec\n\n\n + \ \n branch\n from\n e8c3ecc + \ to\n 4e53e79 + \ \n + \ Compare\n \n\n\n\n July 21, 2023 09:41 \n + \ \n
\n
\n\n\n
\n\n
\n \n
\n \n
\n \"P403n1x87\"\n + \
\n \n
\n + \
\n + \ \n P403n1x87\n + \ \n\n \n\n commented\n\n\n \n \n + \ Jul + 21, 2023\n \n \n \n + \
\n\n \n
\n
\n\n + \
\n + \ \n \n
+ \ \n + \
\n
\n + \ \n .circleci/config.yml\n\n + \ \n Outdated\n \n \n \nShow + resolved\n \n \nHide resolved\n + \
\n
\n
\n + \ \n \n \n \n\n \n
\n
\n\n\n\n\n
\n
+ \
\n
\n\n\n
\n\n
\n \n
\n + \ \n
\n \n
\n + \
\"@P403n1x87\"\n P403n1x87\n\n\n force-pushed\n + \ the\n \n \n ci/debugger-suitespec\n\n\n + \ \n branch\n 3 times, most recently\n from\n d2671c5 + \ to\n 19b0da0 + \ \n + \ Compare\n \n\n\n\n July 21, 2023 10:35 \n + \ \n
\n
\n
\n \n
\n \n
\n + \
\"@P403n1x87\"\n P403n1x87\n\n\n marked this pull request as + ready for review\n\n July + 21, 2023 10:41 \n \n
\n
\n
\n + \ \n
\n \n
\n + \
\"@P403n1x87\"\n P403n1x87\n\n\n requested review from\n a team\n\n as code owners\n\n\n + \ July 21, 2023 10:41 \n + \ \n
\n
\n
\n \n
\n \n
\n + \
\"@P403n1x87\"\n P403n1x87\n\n\n requested review from\n majorgreys, + \n jbertran, + \n brettlangdon, + \n emmettbutler + and \n a team\n\n\n\n July 21, 2023 10:41 \n + \ \n
\n
\n
\n \n
\n \n
\n + \
\"@P403n1x87\"\n P403n1x87\n\n\n force-pushed\n + \ the\n \n \n ci/debugger-suitespec\n\n\n + \ \n branch\n from\n af236d7 + \ to\n a4c0000 + \ \n + \ Compare\n \n\n\n\n July 21, 2023 15:26 \n + \ \n
\n
\n\n\n
\n\n\n
\n + \
\n
\n
\n \n \n
\n
\n + \
\n\n
\n \n
\n + \ \n
\n \n
\n + \
\"@P403n1x87\"\n P403n1x87\n\n\n force-pushed\n + \ the\n \n \n ci/debugger-suitespec\n\n\n + \ \n branch\n 2 times, most recently\n from\n c50870c + \ to\n e812418 + \ \n + \ Compare\n \n\n\n\n July 24, 2023 12:52 \n + \ \n
\n
\n\n\n
\n\n
\n + \ \n
\n \n
\n \"P403n1x87\"\n + \
\n \n
\n + \
\n + \ \n P403n1x87\n + \ \n\n \n\n commented\n\n\n \n \n + \ Jul + 24, 2023\n \n \n \n + \
\n\n \n
\n
\n\n + \
\n + \ \n \n
+ \ \n + \
\n + \
\n \n .circleci/config.templ.yml\n\n + \ \n \n \nShow resolved\n + \ \n \nHide resolved\n + \
\n
\n
\n + \ \n \n \n \n\n \n
\n
\n\n\n\n\n
\n
+ \
\n
\n\n\n
\n\n
\n \n
\n \n
\n \"P403n1x87\"\n + \
\n \n
\n + \
\n + \ \n P403n1x87\n + \ \n\n \n\n commented\n\n\n \n \n + \ Jul + 24, 2023\n \n \n \n + \
\n\n \n
\n
\n\n + \
\n + \ \n \n
+ \ \n + \
\n + \
\n \n .circleci/config.templ.yml\n\n + \ \n \n \nShow resolved\n + \ \n \nHide resolved\n + \
\n
\n
\n + \ \n \n \n \n\n \n
\n
\n\n\n\n\n
\n
+ \
\n
\n\n\n\n\n
\n \n
\n \n
\n \"P403n1x87\"\n + \
\n \n
\n + \
\n + \ \n P403n1x87\n + \ \n\n \n\n commented\n\n\n \n \n + \ Jul + 24, 2023\n \n \n \n + \
\n\n \n
\n
\n\n + \
\n + \ \n \n
+ \ \n + \
\n + \
\n \n .circleci/config.templ.yml\n\n + \ \n \n \nShow resolved\n + \ \n \nHide resolved\n + \
\n
\n
\n + \ \n \n \n \n\n \n
\n
\n\n\n\n\n
\n
+ \
\n
\n\n\n\n\n
\n \n
\n \n
\n \"emmettbutler\"\n + \
\n \n
\n + \
\n + \ \n emmettbutler\n + \ \n\n \n\n previously approved these changes\n\n\n + \ \n \n + \ Jul + 24, 2023\n \n \n \n + \
\n\n \n
\n
\n\n + \
\n + \ \n \n
\n
\n
+ \
\n
\n\n\n\n\n
\n \n
\n + \ \n
\n \n
\n
\"@brettlangdon\"\n brettlangdon\n\n\n dismissed\n emmettbutler\u2019s stale review\n\n\n + \ via\n \n cdb1444\n + \ \n\n July + 24, 2023 16:44 \n \n
\n
\n\n\n
\n\n + \
\n \n
\n \n
\n \"brettlangdon\"\n + \
\n \n
\n + \
\n + \ \n brettlangdon\n + \ \n\n \n\n reviewed\n\n\n \n \n + \ Jul + 24, 2023\n \n \n \n + \
\n\n \n
\n
\n\n + \
\n + \ \n \n
+ \ \n + \
\n + \
\n \n .circleci/config.templ.yml\n\n + \ \n Outdated\n \n \n \nShow + resolved\n \n \nHide resolved\n + \
\n
\n
\n + \ \n \n \n \n\n \n
\n
\n\n\n\n\n
\n
+ \
\n
\n\n\n\n\n
\n \n
\n + \ \n
\n \n
\n + \
\"@P403n1x87\"\n P403n1x87\n\n\n requested review from\n brettlangdon + and \n emmettbutler\n\n\n\n + \ July 24, 2023 18:57 \n + \ \n
\n
\n\n\n
\n\n
\n + \ \n
\n \n
\n \"brettlangdon\"\n + \
\n \n
\n + \
\n + \ \n brettlangdon\n + \ \n\n \n\n previously approved these changes\n\n\n + \ \n \n + \ Jul + 24, 2023\n \n \n \n + \
\n\n \n
\n
\n\n + \
\n + \ \n \n
\n
\n
+ \
\n
\n\n\n\n\n
\n \n
\n \n
\n \"emmettbutler\"\n + \
\n \n
\n + \
\n + \ \n emmettbutler\n + \ \n\n \n\n previously approved these changes\n\n\n + \ \n \n + \ Jul + 24, 2023\n \n \n \n + \
\n\n \n
\n
\n\n + \
\n + \ \n \n
\n
\n
+ \
\n
\n\n\n\n\n
\n + \ \n
\n
\n \n
\n \n
\n + \
P403n1x87\n \n\n added 5 commits\n + \ July 24, 2023 22:13
\n
+ \
\n
\n + \ \n
\n \n
\n
\n
\n
\n \n
\n
\n \n \"@P403n1x87\"\n
\n
\n\n + \ \n\n
\n \n\n \n \n \n\n \n\n \n\n\n + \
\n\n
\n + \
\n + \ \n\n + \ \n \n \n\n \n\n
\n \n
\n\n
\n + \
\n
\n\n
\n \n + \ 0d844de\n \n
\n
\n + \
\n
\n
We introduce the concept of suitespec as a way of describing
+        how\nsources affect test runs. We use it to ensure that the debugger\ntests
+        run only if sources that the suite depends on are modified\nby the current
+        commit.
\n
\n
\n\n
\n
\n \n
\n \n
\n
\n
\n
\n \n
\n
\n \n \"@P403n1x87\"\n
\n
\n\n + \ \n\n
\n \n\n \n \n \n\n \n\n \n\n\n + \
\n\n
\n + \
\n + \ \n\n + \ \n \n \n\n \n\n
\n \n
\n\n
\n + \
\n
\n\n
\n \n + \ 1ffab15\n \n
\n
\n + \
\n
\n\n
\n
\n \n
\n \n
\n
\n
\n
\n \n
\n
\n \n \"@P403n1x87\"\n
\n
\n\n + \
\n \n web + scraping FTW\n \n\n
\n\n
\n \n\n \n \n \n\n \n\n \n\n\n + \
\n\n
\n + \
\n + \ \n\n + \ \n \n \n\n \n\n
\n \n
\n\n
\n + \
\n
\n\n
\n \n + \ a115763\n \n
\n
\n + \
\n
\n\n
\n
\n \n
\n \n
\n
\n
\n
\n \n
\n
\n \n \"@P403n1x87\"\n
\n
\n\n + \
\n \n add + doctests\n \n\n
\n\n
\n \n\n + \ \n \n \n\n \n\n \n\n\n + \
\n\n
\n + \
\n + \ \n\n + \ \n \n \n\n \n\n
\n \n
\n\n
\n + \
\n
\n\n
\n \n + \ 4d0fb2e\n \n
\n
\n + \
\n
\n\n
\n
\n \n
\n \n
\n
\n
\n
\n \n
\n
\n \n \"@P403n1x87\"\n
\n
\n\n + \
\n \n use + dynamic config\n \n\n
\n\n
\n \n\n \n \n \n\n \n\n \n\n\n + \
\n\n
\n + \
\n + \ \n\n + \ \n \n \n\n \n\n
\n \n
\n\n
\n + \
\n
\n\n
\n \n + \ 690a7b1\n \n
\n
\n + \
\n
\n\n
\n
\n
\n\n\n
\n\n
\n + \ \n
\n \n
\n \n
\n
\"@P403n1x87\"\n P403n1x87\n\n\n dismissed stale reviews from + emmettbutler + and brettlangdon\n\n\n + \ via\n \n 690a7b1\n + \ \n\n July + 24, 2023 21:17 \n \n
\n
\n
\n + \ \n
\n \n
\n + \
\"@P403n1x87\"\n P403n1x87\n\n\n force-pushed\n + \ the\n \n \n ci/debugger-suitespec\n\n\n + \ \n branch\n from\n 5f1daca + \ to\n 690a7b1 + \ \n + \ Compare\n \n\n\n\n July 24, 2023 21:17 \n + \ \n
\n
\n
\n \n
\n \n
\n + \
\"@P403n1x87\"\n P403n1x87\n\n\n requested review from\n emmettbutler + and \n brettlangdon\n\n\n\n + \ July 24, 2023 21:17 \n + \ \n
\n
\n\n\n
\n\n
\n + \ \n
\n \n
\n \"brettlangdon\"\n + \
\n \n
\n + \
\n + \ \n brettlangdon\n + \ \n\n \n\n approved these changes\n\n\n \n \n + \ Jul + 24, 2023\n \n \n \n + \
\n\n \n
\n
\n\n
\n \n \n
\n
\n
\n
\n\n\n\n\n + \
\n + \ \n
\n
\n \n
\n \n
\n + \
P403n1x87\n \n\n added 2 commits\n + \ July 25, 2023 09:06
\n
+ \
\n
\n + \ \n
\n \n
\n
\n
\n
\n \n
\n
\n \n \"@P403n1x87\"\n
\n
\n\n + \ \n\n + \
\n \n\n \n \n \n\n \n\n \n\n\n + \
\n\n
\n + \
\n + \ \n\n + \ \n \n \n\n \n\n
\n \n
\n\n
\n + \
\n
\n\n
\n \n + \ f421ece\n \n
\n
\n + \
\n
\n\n
\n
\n \n
\n \n
\n
\n
\n
\n \n
\n
\n \n \"@P403n1x87\"\n
\n
\n\n + \ \n\n + \
\n \n\n \n \n \n\n \n\n \n\n\n + \
\n\n
\n + \
\n + \ \n\n + \ \n \n \n\n \n\n
\n \n
\n\n
\n + \
\n
\n\n
\n \n + \ 3eacc26\n \n
\n
\n + \
\n
\n\n
\n
\n
\n\n\n
\n\n
\n + \ \n
\n
\n + \ \n + \ \n\n + \
\n
\n\n\n \"@P403n1x87\"\n P403n1x87\n\n\n\n merged commit f441242\n into\n\n \n \n DataDog:1.x\n + \ \n\n\n Jul 25, 2023\n\n
\n
\n\n
\n\n
\n
\n \n \n\n
\n\n
\n
\n \"@Yun-Kim\"\nYun-Kim\n\n\n\n mentioned this pull request\n \n Jul 26, 2023\n + \ \n
\n\n\n\n\n \n
\n \n \n \n\n \n \n\n + \ \n \n \n \n\n\n 16 + tasks\n
\n
\n\n\n\n
\n
\n\n + \ \n
\n \n + \ \n + \ \n\n \n
\n \n Yun-Kim \n\n added a commit\n that referenced\n + \ this pull request\n\n \n + \ Jul + 26, 2023\n \n
\n \n
\n + \
\n
\n \n
\n
\n \n \"@Yun-Kim\"\n + \
\n
\n\n\n \n\n + \
\n \n\n \n \n \n\n \n\n \n\n\n + \
\n\n
\n + \
\n\n \n + \
\n \n 43497d1\n \n
\n
\n + \
\n
\n
#6412
+        changed our circleci configuration setup to be dynamic, but this\ninadvertently
+        removed the `coverage` and `riot_run_latest` circleci\npipeline parameters
+        from the main `.circleci/config.yml` file, which\nbreaks our nightly 1.x coverage
+        pipeline runs. This PR re-adds those\nparameters back and re-enables coverage
+        reporting.\n\nNote that `datastreams`, `langchain`, `elasticsearch`,\n`integration-snapshot`
+        test suites are still failing on 1.x nightly\ncoverage runs and will need
+        to be fixed.\n\n## Checklist\n\n- [x] Change(s) are motivated and described
+        in the PR description.\n- [x] Testing strategy is described if automated tests
+        are not included\nin the PR.\n- [x] Risk is outlined (performance impact,
+        potential for breakage,\nmaintainability, etc).\n- [x] Change is maintainable
+        (easy to change, telemetry, documentation).\n- [x] [Library release note\nguidelines](https://ddtrace.readthedocs.io/en/stable/releasenotes.html)\nare
+        followed. If no release note is required, add label\n`changelog/no-changelog`.\n-
+        [x] Documentation is included (in-code, generated user docs, [public\ncorp
+        docs](https://github.com/DataDog/documentation/)).\n-
+        [x] Backport labels are set (if\n[applicable](https://ddtrace.readthedocs.io/en/latest/contributing.html#backporting))\n\n##
+        Reviewer Checklist\n\n- [x] Title is accurate.\n- [x] No unnecessary changes
+        are introduced.\n- [x] Description motivates each change.\n- [x] Avoids breaking\n[API](https://ddtrace.readthedocs.io/en/stable/versioning.html#interfaces)\nchanges
+        unless absolutely necessary.\n- [x] Testing strategy adequately addresses
+        listed risk(s).\n- [x] Change is maintainable (easy to change, telemetry,
+        documentation).\n- [x] Release note makes sense to a user of the library.\n-
+        [x] Reviewer has explicitly acknowledged and discussed the performance\nimplications
+        of this PR as reported in the benchmarks PR comment.\n- [x] Backport labels
+        are set in a manner that is consistent with the\n[release branch maintenance\npolicy](https://ddtrace.readthedocs.io/en/latest/contributing.html#backporting)
\n + \
\n
\n\n
\n
\n
\n\n \n
\n \n \n \n\n \n
\n + \ \n romainkomorndatadog + \n\n pushed a commit\n that referenced\n this pull request\n\n + \ \n Aug 8, 2023\n + \ \n
\n \n
\n + \
\n
\n \n
\n
\n \n \"@P403n1x87\"\n + \ \n \"@romainkomorndatadog\"\n + \
\n
\n\n\n
\n + \ \n ci: + run the debugger suite only if necessary (#6412)\n + \ \n\n \n + \ \n \n\n
\n\n + \
\n \n\n \n \n \n\n \n\n
\n\n
\n
\n\n \n
\n + \ \n 6838e4b\n \n
\n
\n + \
\n
\n
We introduce the concept of suitespec as a way of describing
+        how sources\naffect test runs. We use it to ensure that the debugger tests
+        run only\nif sources that the suite depends on are modified by the current
+        commit.\n\n## Suitespec Implementation Details\n\nThe suitespec solution is
+        based on a manual configuration of of test\nsuites. To simplify the declaration
+        of file patterns for test suites,\none can make use of _components_, which
+        essentially are a logic\ncollection of patterns. Test suite can then be declared
+        as a list of\ncomponents to reflect their dependencies on these logic parts,
+        and to\nDRY the declaration itself by avoiding repetitions.\n\n## Notes\n\n-
+        When the script fails for any reason, tests are run.\n- It is important that
+        path patterns are listed correctly, or some tests\nmight not run when they
+        are in fact supposed to.\n- Best effort to determine the correct list of changed
+        files via the\nGitHub REST API. When that fails, we fall back to the less
+        accurate `git\ndiff` against the target branch.\n\n## Checklist\n\n- [x] Change(s)
+        are motivated and described in the PR description.\n- [x] Testing strategy
+        is described if automated tests are not included\nin the PR.\n- [x] Risk is
+        outlined (performance impact, potential for breakage,\nmaintainability, etc).\n-
+        [x] Change is maintainable (easy to change, telemetry, documentation).\n-
+        [x] [Library release note\nguidelines](https://ddtrace.readthedocs.io/en/stable/releasenotes.html)\nare
+        followed. If no release note is required, add label\n`changelog/no-changelog`.\n-
+        [x] Documentation is included (in-code, generated user docs, [public\ncorp
+        docs](https://github.com/DataDog/documentation/)).\n-
+        [x] Backport labels are set (if\n[applicable](https://ddtrace.readthedocs.io/en/latest/contributing.html#backporting))\n\n##
+        Reviewer Checklist\n\n- [ ] Title is accurate.\n- [ ] No unnecessary changes
+        are introduced.\n- [ ] Description motivates each change.\n- [ ] Avoids breaking\n[API](https://ddtrace.readthedocs.io/en/stable/versioning.html#interfaces)\nchanges
+        unless absolutely necessary.\n- [ ] Testing strategy adequately addresses
+        listed risk(s).\n- [ ] Change is maintainable (easy to change, telemetry,
+        documentation).\n- [ ] Release note makes sense to a user of the library.\n-
+        [ ] Reviewer has explicitly acknowledged and discussed the performance\nimplications
+        of this PR as reported in the benchmarks PR comment.\n- [ ] Backport labels
+        are set in a manner that is consistent with the\n[release branch maintenance\npolicy](https://ddtrace.readthedocs.io/en/latest/contributing.html#backporting)
\n + \
\n
\n\n
\n
\n
\n\n \n
\n \n \n \n\n \n
\n + \ \n romainkomorndatadog + \n\n pushed a commit\n that referenced\n this pull request\n\n + \ \n Aug 8, 2023\n + \ \n
\n \n
\n + \
\n
\n \n
\n
\n \n \"@Yun-Kim\"\n + \ \n \"@romainkomorndatadog\"\n + \
\n
\n\n\n \n\n + \
\n \n\n \n \n \n\n \n\n
\n\n
\n
\n\n \n
\n + \ \n b38e5ce\n \n
\n
\n + \
\n
\n
#6412
+        changed our circleci configuration setup to be dynamic, but this\ninadvertently
+        removed the `coverage` and `riot_run_latest` circleci\npipeline parameters
+        from the main `.circleci/config.yml` file, which\nbreaks our nightly 1.x coverage
+        pipeline runs. This PR re-adds those\nparameters back and re-enables coverage
+        reporting.\n\nNote that `datastreams`, `langchain`, `elasticsearch`,\n`integration-snapshot`
+        test suites are still failing on 1.x nightly\ncoverage runs and will need
+        to be fixed.\n\n## Checklist\n\n- [x] Change(s) are motivated and described
+        in the PR description.\n- [x] Testing strategy is described if automated tests
+        are not included\nin the PR.\n- [x] Risk is outlined (performance impact,
+        potential for breakage,\nmaintainability, etc).\n- [x] Change is maintainable
+        (easy to change, telemetry, documentation).\n- [x] [Library release note\nguidelines](https://ddtrace.readthedocs.io/en/stable/releasenotes.html)\nare
+        followed. If no release note is required, add label\n`changelog/no-changelog`.\n-
+        [x] Documentation is included (in-code, generated user docs, [public\ncorp
+        docs](https://github.com/DataDog/documentation/)).\n-
+        [x] Backport labels are set (if\n[applicable](https://ddtrace.readthedocs.io/en/latest/contributing.html#backporting))\n\n##
+        Reviewer Checklist\n\n- [x] Title is accurate.\n- [x] No unnecessary changes
+        are introduced.\n- [x] Description motivates each change.\n- [x] Avoids breaking\n[API](https://ddtrace.readthedocs.io/en/stable/versioning.html#interfaces)\nchanges
+        unless absolutely necessary.\n- [x] Testing strategy adequately addresses
+        listed risk(s).\n- [x] Change is maintainable (easy to change, telemetry,
+        documentation).\n- [x] Release note makes sense to a user of the library.\n-
+        [x] Reviewer has explicitly acknowledged and discussed the performance\nimplications
+        of this PR as reported in the benchmarks PR comment.\n- [x] Backport labels
+        are set in a manner that is consistent with the\n[release branch maintenance\npolicy](https://ddtrace.readthedocs.io/en/latest/contributing.html#backporting)
\n + \
\n
\n\n
\n
\n
\n\n\n\n
\n\n\n\n \n
\n
\n \n
+ \
\n\n\n\n \n\n
\n + \
\n
\n \n Sign up for free\n to join + this conversation on GitHub.\n Already have an account?\n Sign + in to comment\n\n\n \n
\n\n
\n
\n \n\n\n + \
\n
\n\n\n \n
\n \n
\n \n
\n Reviewers\n
\n\n \n\n\n + \

\n \n\n \n \"@brettlangdon\"\n \n brettlangdon\n\n\n\n + \ \n + \ \n \n + \ \n\n \n \n brettlangdon approved these changes\n\n + \

\n

\n \n\n \n \"@majorgreys\"\n \n majorgreys\n\n\n + \ Awaiting requested review from majorgreys\n\n + \ majorgreys is a code owner automatically + assigned from DataDog/apm-core-python\n\n \n

\n + \

\n \n\n \n \"@jbertran\"\n \n jbertran\n\n\n + \ Awaiting requested review from jbertran\n\n + \ jbertran was automatically assigned from + DataDog/apm-framework-integrations-reviewers-py\n\n \n

\n + \

\n \n\n \n \"@emmettbutler\"\n \n emmettbutler\n\n\n + \ Awaiting requested review from emmettbutler\n\n\n + \ \n

\n\n \n
\n\n
\n\n\n
\n
\n\n \n
\n Assignees\n + \
\n\n\n \n\n + \ No one assigned\n\n\n\n
\n\n\n \n\n \n\n\n
\n Labels\n
\n\n\n
\n \n\n changelog/no-changelog\n\n + \ A changelog entry is not required for + this PR.\n\n
\n\n
\n\n\n \n\n
\n
\n
\n Projects\n + \
\n\n
\n
\n\n None yet\n\n\n\n
\n\n\n + \ \n
\n
\n \n
\n Milestone\n + \
\n\n No milestone\n\n
\n\n\n \n \n \n
\n
\n \n
\n \n
\n Development\n + \
\n\n\n \n\n

Successfully merging this pull request may + close these issues.

\n\n\n \n\n
+ \
\n
\n
\n\n \n \n\n + \ \n\n \n
\n + \
\n
\n 4 participants\n
\n \n
\n
\n\n\n\n + \ \n\n \n\n\n\n\n \n\n\n\n\n\n \n \n \n + \ \n\n\n + \ \n\n\n \n\n\n\n\n \n \n\n + \ \n\n
\n

Footer

\n\n \n\n\n
\n
\n \n \n \n\n\n + \ \n © 2024 GitHub, Inc.\n \n
\n\n + \ \n
\n
\n\n\n\n\n \n\n\n \n\n + \ \n\n
\n + \
\n
\n
\n\n \n\n\n\n\n\n \n\n
\n + \
\n \n\n\n" + headers: + Accept-Ranges: + - bytes + Cache-Control: + - no-cache + Content-Security-Policy: + - 'default-src ''none''; base-uri ''self''; child-src github.com/assets-cdn/worker/ + github.com/webpack/ github.com/assets/ gist.github.com/assets-cdn/worker/; + connect-src ''self'' uploads.github.com www.githubstatus.com collector.github.com + raw.githubusercontent.com api.github.com github-cloud.s3.amazonaws.com github-production-repository-file-5c1aeb.s3.amazonaws.com + github-production-upload-manifest-file-7fdce7.s3.amazonaws.com github-production-user-asset-6210df.s3.amazonaws.com + *.rel.tunnels.api.visualstudio.com wss://*.rel.tunnels.api.visualstudio.com + objects-origin.githubusercontent.com copilot-proxy.githubusercontent.com proxy.individual.githubcopilot.com + proxy.business.githubcopilot.com proxy.enterprise.githubcopilot.com *.actions.githubusercontent.com + wss://*.actions.githubusercontent.com productionresultssa0.blob.core.windows.net/ + productionresultssa1.blob.core.windows.net/ productionresultssa2.blob.core.windows.net/ + productionresultssa3.blob.core.windows.net/ productionresultssa4.blob.core.windows.net/ + productionresultssa5.blob.core.windows.net/ productionresultssa6.blob.core.windows.net/ + productionresultssa7.blob.core.windows.net/ productionresultssa8.blob.core.windows.net/ + productionresultssa9.blob.core.windows.net/ productionresultssa10.blob.core.windows.net/ + productionresultssa11.blob.core.windows.net/ productionresultssa12.blob.core.windows.net/ + productionresultssa13.blob.core.windows.net/ productionresultssa14.blob.core.windows.net/ + productionresultssa15.blob.core.windows.net/ productionresultssa16.blob.core.windows.net/ + productionresultssa17.blob.core.windows.net/ productionresultssa18.blob.core.windows.net/ + productionresultssa19.blob.core.windows.net/ github-production-repository-image-32fea6.s3.amazonaws.com + github-production-release-asset-2e65be.s3.amazonaws.com insights.github.com + wss://alive.github.com api.githubcopilot.com api.individual.githubcopilot.com + api.business.githubcopilot.com api.enterprise.githubcopilot.com; font-src + github.githubassets.com; form-action ''self'' github.com gist.github.com copilot-workspace.githubnext.com + objects-origin.githubusercontent.com; frame-ancestors ''none''; frame-src + viewscreen.githubusercontent.com notebooks.githubusercontent.com; img-src + ''self'' data: blob: github.githubassets.com media.githubusercontent.com camo.githubusercontent.com + identicons.github.com avatars.githubusercontent.com private-avatars.githubusercontent.com + github-cloud.s3.amazonaws.com objects.githubusercontent.com secured-user-images.githubusercontent.com/ + user-images.githubusercontent.com/ private-user-images.githubusercontent.com + opengraph.githubassets.com github-production-user-asset-6210df.s3.amazonaws.com + customer-stories-feed.github.com spotlights-feed.github.com objects-origin.githubusercontent.com + *.githubusercontent.com; manifest-src ''self''; media-src github.com user-images.githubusercontent.com/ + secured-user-images.githubusercontent.com/ private-user-images.githubusercontent.com + github-production-user-asset-6210df.s3.amazonaws.com gist.github.com; script-src + github.githubassets.com; style-src ''unsafe-inline'' github.githubassets.com; + upgrade-insecure-requests; worker-src github.com/assets-cdn/worker/ github.com/webpack/ + github.com/assets/ gist.github.com/assets-cdn/worker/' + Content-Type: + - text/html; charset=utf-8 + Date: + - Thu, 12 Dec 2024 14:42:36 GMT + Referrer-Policy: + - no-referrer-when-downgrade + Server: + - GitHub.com + Set-Cookie: + - _gh_sess=l3QKY0YBtVa6g6vTxtaD7V81b1rqVKbVC2TuUprbrsBeLyGMxD93o4PKJuuuX8DsRurIz%2BgmK%2Bu2SsrLsbekGNnDfnlY8nmv6JixFA0imSoyuXwZ1hoQQntsqmb%2BY5H2ZmdVcxjGdx4KfXsgsWYyampVlxVtj8kcqBXpQ1EmwL8bxCXFb1Ua2ljpQIrEF0vAkXxAKjJvD9Nkk%2BoV9Oq9FDdyOTS5F09seblwdhXqyPUiRtK%2F47XQlwOGT%2Bbx3gQZd0o0tqUnOHebKKHm1e8WeA%3D%3D--cOTu%2FzH%2Bqi016usb--raQjHHicfYwN6TAxmM%2B1hg%3D%3D; + Path=/; HttpOnly; Secure; SameSite=Lax + - _octo=GH1.1.165126635.1734014562; Path=/; Domain=github.com; Expires=Fri, + 12 Dec 2025 14:42:42 GMT; Secure; SameSite=Lax + - logged_in=no; Path=/; Domain=github.com; Expires=Fri, 12 Dec 2025 14:42:42 + GMT; HttpOnly; Secure; SameSite=Lax + Strict-Transport-Security: + - max-age=31536000; includeSubdomains; preload + Transfer-Encoding: + - chunked + Vary: + - X-PJAX, X-PJAX-Container, Turbo-Visit, Turbo-Frame, Accept-Encoding, Accept, + X-Requested-With + X-Content-Type-Options: + - nosniff + X-Frame-Options: + - deny + X-GitHub-Request-Id: + - ED48:2AD30D:249761D:33182B5:675AF662 + X-XSS-Protection: + - '0' + connection: + - close + server-timing: + - pull_request_layout-fragment;desc="pull_request_layout fragment";dur=259.185408,conversation_content-fragment;desc="conversation_content + fragment";dur=1167.36918,conversation_sidebar-fragment;desc="conversation_sidebar + fragment";dur=278.203377,nginx;desc="NGINX";dur=1.232025,glb;desc="GLB";dur=3.090931 + x-voltron-version: + - 69a2227 + status: + code: 200 + message: OK +- request: + body: null + headers: + Connection: + - close + Host: + - github.com + method: GET + uri: https://github.com/DataDog/dd-trace-py/pull/11534 + response: + body: + string: "\n\n\n\n\n\n\n\n\n\n\n\n \n \n + \ \n \n \n \n + \ \n + \ \n\n + \ \n\n \n\n \n \n \n \n \n\n\n \n\n\n \n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n + \ \n \n\n\n\n\n\n\n\n\n\n\n\n\n fix(asm): add global states to ensure patching once [backport + 2.15] by christophe-papazian \xB7 Pull Request #11534 \xB7 DataDog/dd-trace-py + \xB7 GitHub\n\n\n\n \n \n \n\n \n \n\n\n + \ \n\n\n \n\n\n \n \n\n \n \n\n + \ \n\n\n\n \n\n \n\n\n\n\n \n\n \n\n \n\n + \ \n\n \n\n \n\n \n \n \n\n \n \n \n\n\n\n\n \n\n\n\n + \ \n\n\n \n \n \n \n\n \n\n \n + \ \n\n + \ \n\n\n\n \n\n \n\n\n \n\n \n\n \n \n + \ \n\n\n\n\n\n \n\n + \ \n\n \n
\n \n\n\n
\n Skip to content\n\n + \ \n \n + \ \n \n \n\n\n\n\n\n\n\n\n\n \n \n + \
\n\n\n\n\n\n + \ \n\n \n\n \n\n\n
\n

Navigation Menu

\n\n \n\n + \
\n
\n
\n + \ \n
\n\n \n + \ \n + \ \n\n + \ \n\n
\n \n Sign in\n \n
\n
\n\n\n + \
\n
\n + \ \n\n
\n \n\n\n\n \n \n
\n \n \n\n + \
\n Search + or jump to...\n
\n + \ \n\n + \
\n \n\n \n\n \n
\n \n + \

Search + code, repositories, users, issues, pull requests...

\n
\n \n
+ \
\n
\n \n
\n \n \n \n \n \n\n \n
\n
\n
\n
\n + \ \n
\n + \
\n Clear\n + \ \n\n + \
\n \n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n + \
\n \n + \
\n + \
\n
\n\n \n + \
\n
\n\n
\n
\n
\n \n
\n + \ \n\n \n
\n + \
\n
\n + \

\n Provide + feedback\n

\n \n
\n
\n + \ \n
\n
\n + \ \n
\n \n + \
\n

We read every piece of feedback, and take your input very + seriously.

\n \n \n + \ \n
\n
\n + \ \n
\n\n \n \n\n + \ \n
\n
\n + \
\n

\n Saved searches\n

\n + \

Use + saved searches to filter your results more quickly

\n
\n
\n \n + \
\n
\n \n
\n \n + \
\n\n \n\n
\n + \
\n
\n\n
\n + \
\n \n
\n + \
\n
\n\n\n
\n \n Sign in\n \n + \
\n\n \n Sign + up\n \n \n
\n + \
\n
\n \n\n\n \n \n\n + \
\n\n\n\n\n\n\n\n\n + \
\n\n\n + \ \n\n\n + \ \n
\n\n\n + \ \n\n\n\n\n\n\n \n
\n
\n \n \n\n\n\n + \ \n \n\n \n\n\n\n\n\n\n \n
\n\n
\n\n + \
\n \n
\n + \ \n \n\n + \ \n \n + \ \n DataDog\n + \ \n /\n + \ \n dd-trace-py\n \n\n Public\n
\n\n\n + \
\n\n
\n \n\n + \
\n
\n\n
\n
\n\n\n \n\n + \
\n\n \n\n\n\n\n
\n \n\n\n\n \n \n
\n \n\n
\n \n \n \n\n
\n
\n
\n\n \n
\n \n \n New issue\n \n \n + \
\n
\n \n \n\n
\n\n
\n

\n Have a question + about this project? Sign up for a free GitHub account to open an + issue and contact its maintainers and the community.\n

\n\n \n\n

By + clicking “Sign up for GitHub”, you agree to our terms of service + and\n privacy statement. We\u2019ll occasionally send you + account related emails.

\n\n

\n + \ Already on GitHub?\n Sign + in\n to your account\n

\n
\n\n
\n
\n
\n + \ \n + \
\n\n

\n fix(asm): + add global states to ensure patching once [backport 2.15]\n #11534\n

\n
\n
\n\n + \
\n + \
\n + \ \n Merged\n\n + \
\n\n\n\n\n
\n gnufede\n merged 3 commits into\n\n\n 2.15\n\nfrom\n\nbackport-11522-to-2.15\n \n \n \n\n \n \n\n + \
\n
\n\n\n + \ Nov 26, + 2024\n\n\n
\n
\n\n\n \n\n\n\n
\n
\n
\n
\n + \
\n \n Merged\n\n + \
\n\n\n\n\n
\n + \

\n \n fix(asm): add global states to ensure patching once [backport + 2.15]\n \n #11534\n

\n\n + \
\n gnufede\n merged 3 commits into\n\n\n 2.15\n\nfrom\n\nbackport-11522-to-2.15\n \n \n \n\n \n \n\n + \
\n
\n\n\n + \ Nov 26, + 2024\n\n\n
\n
\n
\n + \
\n
\n
\n
\n
\n\n\n\n + \ \n
\n
\n \n \n +74\n + \ \n \n \u221210\n + \ \n \n \n + \ \n \n
\n\n \n
\n\n\n\n
\n + \
\n

Conversation

\n + \ \n \n\n\n \n\n
\n\n
\n \"christophe-papazian\"\n + \ \n \n
\n + \
\n
\n
\n
\n \n \n \n\n \n\n\n \n \n \n \n Copy + link\n\n
\n
\n + \
\n
\n\n
\n \n\n + \ \n + \ Contributor\n\n\n + \ \n\n
\n\n

\n + \
\n \"@christophe-papazian\"\n\n \n + \ christophe-papazian\n \n\n \n\n + \ \n\n commented\n\n\n Nov 25, 2024\n\n\n \n + \ \n\n
\n + \ \n
\n \n edited\n \n + \ \n \n \n\n
\n
\n \n \n \n + \ \n \n + \ \n Loading\n\n \n \n + \
\n
\n\n
\n\n

\n
\n\n
\n + \
\n \n \n + \
\n

Backport 81824b8 + from #11522 to 2.15.

\n

Ensure common patches for SCA and Exploit Prevention are loaded..

\n

only once
\nonly if exploit prevention is active or sca is + active
\nChanges:

\n

factorize load_common_modules logic + in ddtrace.appsec
\nboolean state for patch_common_module and enable_iast_propagation + to ensure they are only called once.
\nensure it's loaded after one click + activation
\nensure it's properly loaded in unit tests if required
\nadd + some failsafe for iast in wrap_open for importerror
\nupdate an iast test + to reflect that common_modules is loaded in the test by default.
\nAPPSEC-55997

\n

Checklist

\n
    \n
  • PR author has checked that all the criteria below are met
  • \n
  • The + PR description includes an overview of the change
  • \n
  • The PR description + articulates the motivation for the change
  • \n
  • The change includes tests + OR the PR description describes a testing strategy
  • \n
  • The PR description + notes risks associated with the change, if any
  • \n
  • Newly-added code + is easy to change
  • \n
  • The change follows the library release note guidelines
  • \n
  • The change + includes or references documentation updates if necessary
  • \n
  • Backport + labels are set (if applicable)
  • \n
\n

Reviewer Checklist

\n
    \n
  • Reviewer + has checked that all the criteria below are met
  • \n
  • Title is accurate
  • \n
  • All + changes are related to the pull request's stated goal
  • \n
  • Avoids breaking + API changes
  • \n
  • Testing strategy adequately addresses + listed risks
  • \n
  • Newly-added code is easy to change
  • \n
  • Release + note makes sense to a user of the library
  • \n
  • If necessary, author has + acknowledged and discussed the performance implications of this PR as reported + in the benchmarks PR comment
  • \n
  • Backport labels are set in a manner + that is consistent with the release branch maintenance policy
  • \n
\n
\n + \
\n \n
\n\n
\n\n
\n \n + \

\n \n + \ \n \n \n\n

\n \n\n\n\n + \
\n
\n
\n \n
\n
\n + \ \n
\n
\n
\n + \
\n\n
\n
\n
\n\n\n \n\n \n
\n\n\n
\n + \ \n
\n
\n
\n \n
\n \n
\n
\n
\n
\n \n
\n
\n \n \"@christophe-papazian\"\n + \
\n
\n\n
\n \n + \ fix(asm): + add global states to ensure patching once (#11522)\n + \ \n\n \n + \ + \ \n
\n\n
\n \n\n + \ \n \n \n\n \n\n \n\n\n + \
\n\n
\n + \
\n + \ \n\n + \ \n \n \n\n \n\n
\n \n
\n\n
\n + \
\n
\n\n
\n \n + \ cd59645\n \n
\n
\n + \
\n
\n
Ensure common patches for SCA and Exploit Prevention are loaded..\n-
+        only once\n- only if exploit prevention is active or sca is active\n\nChanges:\n-
+        factorize load_common_modules logic in ddtrace.appsec\n- boolean state for
+        patch_common_module and enable_iast_propagation to\nensure they are only called
+        once.\n- ensure it's loaded after one click activation\n- ensure it's properly
+        loaded in unit tests if required\n- add some failsafe for iast in wrap_open
+        for importerror\n- update an iast test to reflect that common_modules is loaded
+        in the\ntest by default.\n\nAPPSEC-55997\n\n- [x] PR author has checked that
+        all the criteria below are met\n- The PR description includes an overview
+        of the change\n- The PR description articulates the motivation for the change\n-
+        The change includes tests OR the PR description describes a testing\nstrategy\n-
+        The PR description notes risks associated with the change, if any\n- Newly-added
+        code is easy to change\n- The change follows the [library release note\nguidelines](https://ddtrace.readthedocs.io/en/stable/releasenotes.html)\n-
+        The change includes or references documentation updates if necessary\n- Backport
+        labels are set (if\n[applicable](https://ddtrace.readthedocs.io/en/latest/contributing.html#backporting))\n\n-
+        [x] Reviewer has checked that all the criteria below are met\n- Title is accurate\n-
+        All changes are related to the pull request's stated goal\n- Avoids breaking\n[API](https://ddtrace.readthedocs.io/en/stable/versioning.html#interfaces)\nchanges\n-
+        Testing strategy adequately addresses listed risks\n- Newly-added code is
+        easy to change\n- Release note makes sense to a user of the library\n- If
+        necessary, author has acknowledged and discussed the performance\nimplications
+        of this PR as reported in the benchmarks PR comment\n- Backport labels are
+        set in a manner that is consistent with the\n[release branch maintenance\npolicy](https://ddtrace.readthedocs.io/en/latest/contributing.html#backporting)\n\n(cherry
+        picked from commit 81824b8)
\n + \
\n
\n\n
\n
\n
\n\n\n
\n\n
\n + \ \n
\n \n
\n \n
\n + \
\"@christophe-papazian\"\n christophe-papazian\n\n\n marked + this pull request as ready for review\n\n November + 25, 2024 16:51 \n \n
\n
\n
\n + \ \n
\n \n
\n + \
\"@christophe-papazian\"\n christophe-papazian\n\n\n requested + review from\n a team\n\n + \ as code owners\n\n\n + \ November 25, 2024 16:51 \n + \ \n
\n
\n
\n \n
\n \n
\n + \
\"@christophe-papazian\"\n christophe-papazian\n\n\n requested + review from\n gnufede + and \n emmettbutler\n\n\n\n + \ November 25, 2024 16:51 \n + \ \n
\n
\n\n\n
\n\n
\n \n \n
\n\n
\n + \ \"@github-actions\"\n\n \n + \ \"GitHub\n \n
\n\n\n
\n\n
\n + \
\n + \
\n + \
\n + \ \n + \ \n \n\n \n\n\n + \ \n \n \n \n Copy + link\n\n
\n
\n + \
\n
\n\n
\n \n\n + \ \n + \ Contributor\n\n\n + \ \n\n
\n\n

\n + \
\n \n\n \n github-actions\n + \ bot\n\n \n\n + \ \n\n commented\n\n\n Nov 25, 2024\n\n\n + \ \n
\n\n

\n
\n\n\n
\n\n + \ \n\n \n \n + \ \n \n \n
\n + \

CODEOWNERS have + been resolved as:

\n
releasenotes/notes/exploit_prevention_patch_fix-1bdd7540e1d085d8.yaml
+        \  @DataDog/apm-python\nddtrace/_monkey.py                                                      @DataDog/apm-core-python\nddtrace/appsec/__init__.py
+        \                                             @DataDog/asm-python\nddtrace/appsec/_common_module_patches.py
+        \                               @DataDog/asm-python\nddtrace/appsec/_iast/__init__.py
+        \                                       @DataDog/asm-python\nddtrace/appsec/_remoteconfiguration.py
+        \                                 @DataDog/asm-python\ntests/appsec/integrations/test_flask_telemetry.py
+        \                      @DataDog/asm-python\ntests/utils.py                                                          @DataDog/python-guild\n
\n\n + \
\n
\n\n\n
\n\n \n\n
\n + \
\n
\n \n \n
\n
\n \n
\n
\n
\n
\n + \
\n
\n\n
\n \n + \

\n \n + \ \n \n \n\n

\n \n\n\n + \
\n
\n\n
\n\n\n
\n\n\n
\n + \ \n \n
\n\n
\n + \ \"@datadog-dd-trace-py-rkomorn\"\n\n
\n\n\n + \
\n\n
\n + \
\n + \
\n + \
\n + \ \n + \ \n \n\n \n\n\n + \ \n \n \n \n Copy + link\n\n
\n
\n + \
\n
\n\n
\n \n\n\n\n + \ \n\n
\n\n

\n + \
\n \n\n \n datadog-dd-trace-py-rkomorn\n + \ bot\n\n \n\n + \ \n\n commented\n\n\n Nov 25, 2024\n\n\n + \ \n \n\n
\n \n
\n + \ \n edited\n \n \n \n + \ \n\n
\n + \
\n \n \n \n + \ \n \n + \ \n Loading\n\n \n \n + \
\n
\n\n
\n\n

\n
\n\n\n
\n\n + \ \n\n \n \n + \ \n \n \n
\n + \

Datadog Report

\n

Branch report: + backport-11522-to-2.15
\nCommit + report: c476a58
\nTest + service: dd-trace-py

\n

\u2705 + 0 Failed, 592 Passed, 694 Skipped, 19m 30.54s Total duration (15m 23.31s time + saved)

\n
\n
\n\n\n + \
\n\n \n\n
\n + \
\n
\n \n \n
\n
\n \n
\n
\n
\n
\n + \
\n
\n\n
\n \n + \

\n \n + \ \n \n \n\n

\n \n\n\n + \
\n
\n\n
\n\n\n
\n\n\n
\n + \ \n
\n \n
\n \"gnufede\"\n + \
\n \n
\n + \
\n + \ \n gnufede\n \n\n + \ \n\n approved these changes\n\n\n \n \n + \ Nov + 25, 2024\n \n \n \n + \
\n\n \n
\n
\n\n
\n \n \n
\n
\n
\n
\n\n\n
\n\n + \
\n \n
\n + \ \n
\n + \ \n
\n
\n \"@gnufede\"\n gnufede\n\nenabled + auto-merge (squash)\n\n November + 25, 2024 17:32 \n \n
\n
\n\n\n
\n\n + \
\n \n \n
\n\n
\n + \ \"@pr-commenter\"\n\n
\n\n\n
\n\n
\n + \
\n + \
\n + \
\n + \ \n + \ \n \n\n \n\n\n + \ \n \n \n \n Copy + link\n\n
\n
\n + \
\n
\n\n
\n \n\n\n\n + \ \n\n
\n\n

\n + \
\n \n\n \n pr-commenter\n + \ bot\n\n \n\n + \ \n\n commented\n\n\n Nov 25, 2024\n\n\n + \ \n \n\n
\n \n
\n + \ \n edited\n \n \n \n + \ \n\n
\n + \
\n \n \n \n + \ \n \n + \ \n Loading\n\n \n \n + \
\n
\n\n
\n\n

\n
\n\n\n
\n\n + \ \n\n \n \n + \ \n \n \n
\n + \

Benchmarks

\n

Benchmark execution + time: 2024-11-26 21:13:50

\n

Comparing candidate commit + c476a58 + in PR branch backport-11522-to-2.15 with + baseline commit b462888 + in branch 2.15.

\n

Found + 0 performance improvements and 0 performance regressions! Performance is the + same for 371 metrics, 53 unstable metrics.

\n
\n
\n\n\n + \
\n\n \n\n
\n + \
\n
\n \n \n
\n
\n \n
\n
\n
\n
\n + \
\n
\n\n
\n \n + \

\n \n + \ \n \n \n\n

\n \n\n\n + \
\n
\n\n
\n\n\n
\n\n\n
\n + \ \n
\n \n
\n \"erikayasuda\"\n + \
\n \n
\n + \
\n + \ \n erikayasuda\n + \ \n\n \n\n approved these changes\n\n\n \n \n + \ Nov + 26, 2024\n \n \n \n + \
\n\n \n
\n
\n\n
\n \n \n
\n
\n
\n
\n\n\n
\n\n + \
\n + \ \n
\n
\n \n
\n \n
\n + \
christophe-papazian\n \nand others\n + \ added 2 commits\n November + 26, 2024 18:39
\n
\n
\n \n
\n \n
\n
\n
\n
\n \n
\n
\n \n \"@christophe-papazian\"\n + \
\n
\n\n \n\n + \
\n \n\n \n \n \n\n \n\n \n\n\n + \
\n\n
\n + \
\n + \ \n\n + \ \n \n \n\n \n\n
\n \n
\n\n
\n + \
\n
\n\n
\n \n + \ 3ac9ef8\n \n
\n
\n + \
\n
\n\n
\n
\n \n
\n \n
\n
\n
\n
\n \n
\n
\n \n \"@erikayasuda\"\n
\n
\n\n + \ \n\n + \
\n \n\n \n \n \n\n \n\n \n\n\n + \
\n\n
\n + \
\n + \ \n\n + \ \n \n \n\n \n\n
\n \n
\n\n
\n + \
\n
\n\n
\n \n + \ c476a58\n \n
\n
\n + \
\n
\n\n
\n
\n
\n\n\n
\n\n
\n + \ \n
\n
\n + \ \n + \ \n\n + \
\n
\n\n \n + \ \"@gnufede\"\n gnufede\n\n\n\n + \ merged commit 2d6800f\n into\n\n \n \n 2.15\n \n\n\n Nov 26, 2024\n\n
\n 584 checks passed\n
\n\n
\n + \ \n \n + \ \n \n + \ \n\n + \ \n
\n
\n
\n\n
\n\n + \
\n \n
\n \n
\n
\"@gnufede\"\n gnufede\n\n\n + \ \n deleted the\n \n + \ \n backport-11522-to-2.15\n \n branch\n\n + \ November 26, 2024 21:16 \n + \ \n
\n
\n\n\n
\n\n\n\n\n\n \n
\n
\n \n
+ \
\n\n\n\n
\n\n
\n + \
\n
\n \n Sign up for free\n to join + this conversation on GitHub.\n Already have an account?\n Sign + in to comment\n\n\n \n
\n\n
\n
\n
\n\n
\n + \
\n
\n\n\n \n
\n \n
\n \n
\n Reviewers\n
\n\n \n\n\n + \

\n \n\n \n \"@erikayasuda\"\n \n erikayasuda\n\n\n\n + \ \n + \ \n \n + \ \n\n \n \n erikayasuda approved these changes\n\n + \

\n

\n \n\n \n \"@gnufede\"\n \n gnufede\n\n\n\n \n + \ \n \n + \ \n\n \n \n gnufede approved these changes\n\n + \

\n

\n \n\n \n \"@emmettbutler\"\n \n emmettbutler\n\n\n + \ Awaiting requested review from emmettbutler\n\n + \ emmettbutler is a code owner automatically + assigned from DataDog/apm-python\n\n \n

\n\n \n
\n\n
\n\n\n + \
\n
\n\n \n
\n Assignees\n + \
\n\n\n \n\n + \ No one assigned\n\n\n\n
\n\n\n \n\n \n\n\n
\n Labels\n
\n\n\n
\n None yet\n
\n\n
\n\n\n \n\n
\n
\n
\n Projects\n + \
\n\n
\n
\n\n None yet\n\n\n\n
\n\n\n + \ \n
\n
\n \n
\n Milestone\n + \
\n\n No milestone\n\n
\n\n\n \n \n \n
\n
\n \n
\n \n
\n Development\n + \
\n\n\n \n\n

Successfully merging this pull request may + close these issues.

\n\n\n \n\n
+ \
\n
\n
\n\n \n \n\n + \ \n\n \n
\n + \
\n
\n 3 participants\n
\n \n
\n
\n\n\n\n + \ \n\n \n\n\n\n\n \n\n
\n\n\n\n \n \n \n + \ \n\n\n + \ \n\n\n \n\n\n\n\n \n \n\n + \ \n\n
\n

Footer

\n\n \n\n\n
\n
\n \n \n \n\n\n + \ \n © 2024 GitHub, Inc.\n \n
\n\n + \ \n
\n
\n\n\n\n\n \n\n\n \n\n + \ \n\n
\n + \
\n
\n
\n\n \n\n\n\n\n\n \n\n
\n + \
\n \n\n\n" + headers: + Accept-Ranges: + - bytes + Cache-Control: + - no-cache + Content-Security-Policy: + - 'default-src ''none''; base-uri ''self''; child-src github.com/assets-cdn/worker/ + github.com/webpack/ github.com/assets/ gist.github.com/assets-cdn/worker/; + connect-src ''self'' uploads.github.com www.githubstatus.com collector.github.com + raw.githubusercontent.com api.github.com github-cloud.s3.amazonaws.com github-production-repository-file-5c1aeb.s3.amazonaws.com + github-production-upload-manifest-file-7fdce7.s3.amazonaws.com github-production-user-asset-6210df.s3.amazonaws.com + *.rel.tunnels.api.visualstudio.com wss://*.rel.tunnels.api.visualstudio.com + objects-origin.githubusercontent.com copilot-proxy.githubusercontent.com proxy.individual.githubcopilot.com + proxy.business.githubcopilot.com proxy.enterprise.githubcopilot.com *.actions.githubusercontent.com + wss://*.actions.githubusercontent.com productionresultssa0.blob.core.windows.net/ + productionresultssa1.blob.core.windows.net/ productionresultssa2.blob.core.windows.net/ + productionresultssa3.blob.core.windows.net/ productionresultssa4.blob.core.windows.net/ + productionresultssa5.blob.core.windows.net/ productionresultssa6.blob.core.windows.net/ + productionresultssa7.blob.core.windows.net/ productionresultssa8.blob.core.windows.net/ + productionresultssa9.blob.core.windows.net/ productionresultssa10.blob.core.windows.net/ + productionresultssa11.blob.core.windows.net/ productionresultssa12.blob.core.windows.net/ + productionresultssa13.blob.core.windows.net/ productionresultssa14.blob.core.windows.net/ + productionresultssa15.blob.core.windows.net/ productionresultssa16.blob.core.windows.net/ + productionresultssa17.blob.core.windows.net/ productionresultssa18.blob.core.windows.net/ + productionresultssa19.blob.core.windows.net/ github-production-repository-image-32fea6.s3.amazonaws.com + github-production-release-asset-2e65be.s3.amazonaws.com insights.github.com + wss://alive.github.com api.githubcopilot.com api.individual.githubcopilot.com + api.business.githubcopilot.com api.enterprise.githubcopilot.com; font-src + github.githubassets.com; form-action ''self'' github.com gist.github.com copilot-workspace.githubnext.com + objects-origin.githubusercontent.com; frame-ancestors ''none''; frame-src + viewscreen.githubusercontent.com notebooks.githubusercontent.com; img-src + ''self'' data: blob: github.githubassets.com media.githubusercontent.com camo.githubusercontent.com + identicons.github.com avatars.githubusercontent.com private-avatars.githubusercontent.com + github-cloud.s3.amazonaws.com objects.githubusercontent.com secured-user-images.githubusercontent.com/ + user-images.githubusercontent.com/ private-user-images.githubusercontent.com + opengraph.githubassets.com github-production-user-asset-6210df.s3.amazonaws.com + customer-stories-feed.github.com spotlights-feed.github.com objects-origin.githubusercontent.com + *.githubusercontent.com; manifest-src ''self''; media-src github.com user-images.githubusercontent.com/ + secured-user-images.githubusercontent.com/ private-user-images.githubusercontent.com + github-production-user-asset-6210df.s3.amazonaws.com gist.github.com; script-src + github.githubassets.com; style-src ''unsafe-inline'' github.githubassets.com; + upgrade-insecure-requests; worker-src github.com/assets-cdn/worker/ github.com/webpack/ + github.com/assets/ gist.github.com/assets-cdn/worker/' + Content-Type: + - text/html; charset=utf-8 + Date: + - Thu, 12 Dec 2024 14:42:37 GMT + Referrer-Policy: + - no-referrer-when-downgrade + Server: + - GitHub.com + Set-Cookie: + - _gh_sess=lcakJUZEMJACUB16KjyZXhnaDJ7Lf%2FYHyuliVSpr%2BebjFOJkqrrGVy310bXG4sCIwd5suyAhSq1ar47KgrE92K2xy%2FyLkV0kyOGj2HTHZLBE0AoalTEwk%2FtwXY2eTPd4xUPomg0vtlqQrrYnQNHrj9IxaNsg225S2Xxjw2F05HFwCLbhj4Tdo2o8BBOYJeV2WH8GGT4bJ6XT0VeQRP3trJrZhY9WOPmlbyZ0k%2Biokd%2By3Tr6Fld4rQ3BKKJ6Nq%2FEfMuSc4M5FDuoXJzxMyyAGg%3D%3D--VglcwRFwLrbj7fn0--vsOdxQkYekEyYwll%2BqS%2B1A%3D%3D; + Path=/; HttpOnly; Secure; SameSite=Lax + - _octo=GH1.1.1476856324.1734014562; Path=/; Domain=github.com; Expires=Fri, + 12 Dec 2025 14:42:42 GMT; Secure; SameSite=Lax + - logged_in=no; Path=/; Domain=github.com; Expires=Fri, 12 Dec 2025 14:42:42 + GMT; HttpOnly; Secure; SameSite=Lax + Strict-Transport-Security: + - max-age=31536000; includeSubdomains; preload + Transfer-Encoding: + - chunked + Vary: + - X-PJAX, X-PJAX-Container, Turbo-Visit, Turbo-Frame, Accept-Encoding, Accept, + X-Requested-With + X-Content-Type-Options: + - nosniff + X-Frame-Options: + - deny + X-GitHub-Request-Id: + - ED4A:356904:24669B2:32ED588:675AF662 + X-XSS-Protection: + - '0' + connection: + - close + server-timing: + - pull_request_layout-fragment;desc="pull_request_layout fragment";dur=450.768495,conversation_content-fragment;desc="conversation_content + fragment";dur=576.513283,conversation_sidebar-fragment;desc="conversation_sidebar + fragment";dur=305.288275,nginx;desc="NGINX";dur=1.093278,glb;desc="GLB";dur=4.679312 + x-voltron-version: + - 69a2227 + status: + code: 200 + message: OK +- request: + body: null + headers: + Connection: + - close + Host: + - github.com + method: GET + uri: https://github.com/DataDog/dd-trace-py/pull/11690 + response: + body: + string: "\n\n\n\n\n\n\n\n\n\n\n\n \n \n + \ \n \n \n \n + \ \n + \ \n\n + \ \n\n \n\n \n \n \n \n \n\n\n \n\n\n \n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n + \ \n \n\n\n\n\n\n\n\n\n\n\n\n\n ci: store fake DD_API_KEY as a secret by brettlangdon \xB7 + Pull Request #11690 \xB7 DataDog/dd-trace-py \xB7 GitHub\n\n\n\n \n \n \n\n \n \n\n\n + \ \n\n\n \n\n\n \n \n\n \n \n\n + \ \n\n\n\n \n\n \n\n\n\n\n \n\n \n\n \n\n + \ \n\n \n\n \n\n \n \n \n\n \n \n \n\n\n\n\n \n\n\n\n + \ \n\n\n \n \n \n \n\n \n\n \n + \ \n\n + \ \n\n\n\n \n\n \n\n\n \n\n \n\n \n \n + \ \n\n\n\n\n\n \n\n + \ \n\n \n
\n \n\n\n
\n Skip to content\n\n + \ \n \n + \ \n \n \n\n\n\n\n \n \n + \
\n\n\n\n\n\n + \ \n\n \n\n \n\n\n
\n

Navigation Menu

\n\n \n\n + \
\n
\n
\n + \ \n
\n\n \n + \ \n + \ \n\n + \ \n\n
\n \n Sign in\n \n
\n
\n\n\n + \
\n
\n + \ \n\n
\n \n\n\n\n \n \n
\n \n \n\n + \
\n Search + or jump to...\n
\n + \ \n\n + \
\n \n\n \n\n \n
\n \n + \

Search + code, repositories, users, issues, pull requests...

\n
\n \n
+ \
\n
\n \n
\n \n \n \n \n \n\n \n
\n
\n
\n
\n + \ \n
\n + \
\n Clear\n + \ \n\n + \
\n \n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n + \
\n \n + \
\n + \
\n
\n\n \n + \
\n
\n\n
\n
\n
\n \n
\n + \ \n\n \n
\n + \
\n
\n + \

\n Provide + feedback\n

\n \n
\n
\n + \ \n
\n
\n + \ \n
\n \n + \
\n

We read every piece of feedback, and take your input very + seriously.

\n \n \n + \ \n
\n
\n + \ \n
\n\n \n \n\n + \ \n
\n
\n + \
\n

\n Saved searches\n

\n + \

Use + saved searches to filter your results more quickly

\n
\n
\n \n + \
\n
\n \n
\n \n + \
\n\n \n\n
\n + \
\n
\n\n
\n + \
\n \n
\n + \
\n
\n\n\n
\n \n Sign in\n \n + \
\n\n \n Sign + up\n \n \n
\n + \
\n
\n \n\n\n \n \n\n + \
\n\n\n\n\n\n\n\n\n + \
\n\n\n + \ \n\n\n + \ \n
\n\n\n + \ \n\n\n\n\n\n\n \n
\n
\n \n \n\n\n\n + \ \n \n\n \n\n\n\n\n\n\n \n
\n\n
\n\n + \
\n \n
\n + \ \n \n\n + \ \n \n + \ \n DataDog\n + \ \n /\n + \ \n dd-trace-py\n \n\n Public\n
\n\n\n + \
\n\n
\n \n\n + \
\n
\n\n
\n
\n\n\n \n\n + \
\n\n \n\n\n\n\n
\n \n\n\n\n \n \n
\n \n\n
\n \n \n \n\n
\n
\n
\n\n \n
\n \n \n New issue\n \n \n + \
\n
\n \n \n\n
\n\n
\n

\n Have a question + about this project? Sign up for a free GitHub account to open an + issue and contact its maintainers and the community.\n

\n\n \n\n

By + clicking “Sign up for GitHub”, you agree to our terms of service + and\n privacy statement. We\u2019ll occasionally send you + account related emails.

\n\n

\n + \ Already on GitHub?\n Sign + in\n to your account\n

\n
\n\n
\n
\n
\n + \ \n + \
\n\n

\n ci: + store fake DD_API_KEY as a secret\n #11690\n + \

\n
\n
\n\n
\n
\n \n + Open\n\n
\n\n\n\n\n
\n brettlangdon\n\n wants to merge\n 1\n + \ commit into\n\n\n main\n\n + \
\n
\n + \ \n base:\n + \ main\n \n + \ \n \n + \ \n
\n
\n + \
\n Choose + a base branch\n \n
\n\n + \ \n
\n + \ \n
\n\n \n \n\n
\n \n\n \n\n \n\n\n
\n
\n \n + \ \n \n \n Loading\n\n + \
\n
\n\n \n\n\n \n\n + \
\n\n \n
\n + \
\n
\n
\n\n \n + \
\n
\n\n
\n \n
\n\nfrom\n\nbrettlangdon-patch-3\n \n \n \n\n \n \n\n + \
\n
\n\n\n\n + \ \n \n\n\n\n\n\n\n\n\n \n \n + \
\n\n\n + \
\n\n
\n
\n\n\n \n\n\n\n
\n
\n + \
\n + \
\n + \
\n \n Open\n\n
\n\n\n\n\n + \
\n

\n \n + \ ci: store fake DD_API_KEY as a secret\n \n #11690\n

\n\n
\n brettlangdon\n\n + \ wants to merge\n 1\n + \ commit into\n\n\n main\n\nfrom\n\nbrettlangdon-patch-3\n \n \n \n\n \n \n\n + \
\n
\n\n\n\n\n + \
\n
\n
\n
\n + \
\n
\n
\n
\n\n\n\n \n
\n
\n \n \n +2\n \n \n \u22122\n \n \n + \ \n \n \n + \
\n\n \n
\n\n\n\n
\n + \
\n

Conversation

\n + \ \n \n\n\n \n\n
\n\n
\n \"brettlangdon\"\n + \ \n \n
\n + \
\n
\n
\n
\n \n \n \n\n \n\n\n \n \n \n \n Copy + link\n\n
\n
\n + \
\n
\n\n
\n \n\n + \ \n Member\n\n\n \n\n
\n\n

\n
\n \"@brettlangdon\"\n\n \n brettlangdon\n \n\n \n\n \n\n + \ commented\n\n\n Dec 12, 2024\n\n\n \n + \ \n\n
\n + \ \n
\n \n edited\n \n + \ \n \n \n\n
\n
\n \n \n \n + \ \n \n + \ \n Loading\n\n \n \n + \
\n
\n\n
\n\n

\n
\n\n
\n + \
\n \n \n + \
\n

Checklist

\n
    \n
  • PR author + has checked that all the criteria below are met
  • \n
  • The PR description + includes an overview of the change
  • \n
  • The PR description articulates + the motivation for the change
  • \n
  • The change includes tests OR the PR + description describes a testing strategy
  • \n
  • The PR description notes + risks associated with the change, if any
  • \n
  • Newly-added code is easy + to change
  • \n
  • The change follows the library release note guidelines
  • \n
  • The change + includes or references documentation updates if necessary
  • \n
  • Backport + labels are set (if applicable)
  • \n
\n

Reviewer Checklist

\n
    \n
  • Reviewer + has checked that all the criteria below are met
  • \n
  • Title is accurate
  • \n
  • All + changes are related to the pull request's stated goal
  • \n
  • Avoids breaking + API changes
  • \n
  • Testing strategy adequately addresses + listed risks
  • \n
  • Newly-added code is easy to change
  • \n
  • Release + note makes sense to a user of the library
  • \n
  • If necessary, author has + acknowledged and discussed the performance implications of this PR as reported + in the benchmarks PR comment
  • \n
  • Backport labels are set in a manner + that is consistent with the release branch maintenance policy
  • \n
\n
\n + \
\n \n
\n\n
\n\n
\n \n + \

\n \n + \ \n \n \n\n

\n \n\n\n\n + \
\n
\n
\n \n
\n
\n + \ \n
\n
\n
\n + \
\n\n
\n
\n
\n\n\n \n\n \n
\n\n\n
\n + \ \n
\n
\n
\n \n
\n \n
\n
\n
\n
\n \n
\n
\n \n \"@brettlangdon\"\n
\n
\n\n + \ \n\n + \
\n \n\n \n \n \n\n \n\n \n\n\n + \
\n\n
\n + \
\n + \ \n\n + \ \n \n \n\n \n\n
\n \n
\n\n
\n + \
\n
\n\n
\n \n + \ a6675d3\n \n
\n
\n + \
\n
\n\n
\n
\n
\n\n\n
\n\n
\n + \ \n
\n
\n \n + \ \n\n
\n + \
\n\n \n\n \"@brettlangdon\"\nbrettlangdon\n\n\n\n\n added\n the \n\n changelog/no-changelog\n\n A changelog + entry is not required for this PR.\n label\n\n\n Dec 12, 2024\n\n
\n
\n\n\n + \
\n \n
\n \n
\n + \
\"@brettlangdon\"\n brettlangdon\n\n\n requested review from\n + \ a team\n\n as code owners\n\n\n + \ December 12, 2024 13:39 \n + \ \n
\n
\n
\n \n
\n \n
\n + \
\"@brettlangdon\"\n brettlangdon\n\n\n requested review from\n + \ avara1986 + and \n erikayasuda\n\n\n\n + \ December 12, 2024 13:39 \n + \ \n
\n
\n\n\n
\n\n
\n \n \n
\n\n
\n + \ \"@github-actions\"\n\n \n + \ \"GitHub\n \n
\n\n\n
\n\n
\n + \
\n + \
\n + \
\n + \ \n + \ \n \n\n \n\n\n + \ \n \n \n \n Copy + link\n\n
\n
\n + \
\n
\n\n
\n \n\n + \ \n + \ Contributor\n\n\n + \ \n\n
\n\n

\n + \
\n \n\n \n github-actions\n + \ bot\n\n \n\n + \ \n\n commented\n\n\n Dec 12, 2024\n\n\n + \ \n
\n\n

\n
\n\n\n
\n\n + \ \n\n \n \n + \ \n \n \n
\n + \

CODEOWNERS have + been resolved as:

\n
.github/workflows/system-tests.yml
+        \                                     @DataDog/python-guild @DataDog/apm-core-python\n
\n\n + \
\n
\n\n\n
\n\n \n\n
\n + \
\n
\n \n \n
\n
\n \n
\n
\n
\n
\n + \
\n
\n\n
\n \n + \

\n \n + \ \n \n \n\n

\n \n\n\n + \
\n
\n\n
\n\n\n
\n\n\n
\n + \ \n
\n \n
\n \"romainkomorndatadog\"\n + \
\n \n
\n + \
\n + \ \n romainkomorndatadog\n + \ \n\n \n\n approved these changes\n\n\n \n \n + \ Dec + 12, 2024\n \n \n \n + \
\n\n \n
\n
\n\n
\n \n \n
\n
\n
\n
\n\n\n
\n\n + \
\n \n \n
\n\n
\n + \ \"@datadog-dd-trace-py-rkomorn\"\n\n
\n\n\n + \
\n\n
\n + \
\n + \
\n + \
\n + \ \n + \ \n \n\n \n\n\n + \ \n \n \n \n Copy + link\n\n
\n
\n + \
\n
\n\n
\n \n\n\n\n + \ \n\n
\n\n

\n + \
\n \n\n \n datadog-dd-trace-py-rkomorn\n + \ bot\n\n \n\n + \ \n\n commented\n\n\n Dec 12, 2024\n\n\n + \ \n
\n\n

\n
\n\n\n
\n\n + \ \n\n \n \n + \ \n \n \n
\n + \

Datadog Report

\n

Branch report: + brettlangdon-patch-3
\nCommit + report: a6675d3
\nTest + service: dd-trace-py

\n

\u2705 + 0 Failed, 55 Passed, 1413 Skipped, 1m 29.81s Total duration (35m 20.17s time + saved)

\n
\n
\n\n\n + \
\n\n \n\n
\n + \
\n
\n \n \n
\n
\n \n
\n
\n
\n
\n + \
\n
\n\n
\n \n + \

\n \n + \ \n \n \n\n

\n \n\n\n + \
\n
\n\n
\n\n\n
\n\n\n
\n + \ \n \n
\n\n
\n + \ \"@brettlangdon\"\n\n
\n\n\n
\n\n
\n + \
\n + \
\n + \
\n + \ \n + \ \n \n\n \n\n\n + \ \n \n \n \n Copy + link\n\n
\n
\n + \
\n
\n\n
\n \n\n + \ \n Member\n\n\n \n\n Author\n\n\n + \
\n\n

\n
\n + \ \n\n \n brettlangdon\n + \ \n\n \n\n \n\n commented\n\n\n Dec 12, 2024\n\n\n + \ \n
\n\n

\n
\n\n\n
\n\n + \ \n\n \n \n + \ \n \n \n
\n + \

/merge

\n
\n
\n\n\n + \
\n\n \n\n
\n + \
\n
\n \n \n
\n
\n \n
\n
\n
\n
\n + \
\n
\n\n
\n \n + \

\n \n + \ \n \n \n\n

\n \n\n\n + \
\n
\n\n
\n\n\n
\n\n\n
\n + \ \n \n
\n\n
\n + \ \"@dd-devflow\"\n\n
\n\n\n
\n\n
\n + \
\n + \
\n + \
\n + \ \n + \ \n \n\n \n\n\n + \ \n \n \n \n Copy + link\n\n
\n
\n + \
\n
\n\n
\n \n\n\n\n + \ \n\n
\n\n

\n + \
\n \n\n \n dd-devflow\n + \ bot\n\n \n\n + \ \n\n commented\n\n\n Dec 12, 2024\n\n\n + \ \n \n\n
\n \n
\n + \ \n edited\n \n \n \n + \ \n\n
\n + \
\n \n \n \n + \ \n \n + \ \n Loading\n\n \n \n + \
\n
\n\n
\n\n

\n
\n\n\n
\n\n + \ \n\n \n \n + \ \n \n \n
\n + \ \n

Devflow running: /merge

\n

View all feedbacks in Devflow UI.

\n
\n

2024-12-12 13:54:30 UTC \u2139\uFE0F MergeQueue: + waiting for PR to be ready

\n

This merge request is not + mergeable yet, because of pending checks/missing approvals. It will be added + to the queue as soon as checks pass and/or get approvals.
\nNote: + if you pushed new commits since the last approval, you may need additional + approval.
\nYou can remove it from the waiting list with /remove + command.

\n

Use /merge -c + to cancel this operation!

\n
\n

2024-12-12 + 14:26:14 UTC \u2139\uFE0F MergeQueue: merge request + added to the queue

\n

The median merge time in main + is 34m.

\n

Use /merge -c + to cancel this operation!

\n
\n

\u23F3 + command still in progress ...

\n
\n
\n\n\n + \
\n\n \n\n
\n + \
\n
\n \n \n
\n
\n \n
\n
\n
\n
\n + \
\n
\n\n
\n \n + \

\n \n + \ \n \n \n\n

\n \n\n\n + \
\n
\n\n
\n\n\n
\n\n\n
\n + \ \n
\n
\n \n + \ \n\n
\n + \
\n\n \n\n \"@dd-devflow\"\ndd-devflow\nbot\n\n\n\n added\n the \n\n mergequeue-status: waiting\n\n label\n\n\n Dec 12, 2024\n\n
\n
\n\n\n\n\n
\n\n + \
\n \n \n
\n\n
\n + \ \"@pr-commenter\"\n\n
\n\n\n
\n\n
\n + \
\n + \
\n + \
\n + \ \n + \ \n \n\n \n\n\n + \ \n \n \n \n Copy + link\n\n
\n
\n + \
\n
\n\n
\n \n\n\n\n + \ \n\n
\n\n

\n + \
\n \n\n \n pr-commenter\n + \ bot\n\n \n\n + \ \n\n commented\n\n\n Dec 12, 2024\n\n\n + \ \n
\n\n

\n
\n\n\n
\n\n + \ \n\n \n \n + \ \n \n \n
\n + \

Benchmarks

\n

Benchmark execution + time: 2024-12-12 14:24:20

\n

Comparing candidate commit + a6675d3 + in PR branch brettlangdon-patch-3 with + baseline commit 385d8e0 + in branch main.

\n

Found + 0 performance improvements and 0 performance regressions! Performance is the + same for 394 metrics, 2 unstable metrics.

\n
\n
\n\n\n + \
\n\n \n\n
\n + \
\n
\n \n \n
\n
\n \n
\n
\n
\n
\n + \
\n
\n\n
\n \n + \

\n \n + \ \n \n \n\n

\n \n\n\n + \
\n
\n\n
\n\n\n
\n\n\n
\n + \ \n
\n
\n \n + \ \n\n
\n + \ \n
\n\n\n\n\n
\n\n\n\n\n\n + \ \n
\n
\n \n
+ \
\n\n\n\n
\n\n
\n + \
\n
\n \n Sign up for free\n to join + this conversation on GitHub.\n Already have an account?\n Sign + in to comment\n\n\n \n
\n\n
\n
\n
\n\n
\n + \
\n
\n\n\n \n
\n \n
\n \n
\n Reviewers\n
\n\n \n\n\n + \

\n \n\n \n \"@romainkomorndatadog\"\n \n romainkomorndatadog\n\n\n\n + \ \n + \ \n \n + \ \n\n \n \n romainkomorndatadog approved these changes\n\n + \

\n

\n \n\n \n \"@avara1986\"\n \n avara1986\n\n\n + \ Awaiting requested review from avara1986\n\n + \ avara1986 is a code owner automatically + assigned from DataDog/python-guild\n\n \n

\n

\n \n\n \n \"@erikayasuda\"\n \n erikayasuda\n\n\n + \ Awaiting requested review from erikayasuda\n\n + \ erikayasuda is a code owner automatically + assigned from DataDog/apm-core-python\n\n \n

\n\n + \ \n
\n\n
\n\n\n
\n
\n\n \n
\n Assignees\n + \
\n\n\n \n\n + \ No one assigned\n\n\n\n
\n\n\n \n\n \n\n\n
\n Labels\n
\n\n\n
\n \n\n changelog/no-changelog\n\n + \ A changelog entry is not required for + this PR.\n \n\n mergequeue-status: + in_progress\n\n\n
\n\n
\n\n\n \n\n
\n
\n
\n Projects\n + \
\n\n
\n
\n\n None yet\n\n\n\n
\n\n\n + \ \n
\n
\n \n
\n Milestone\n + \
\n\n No milestone\n\n
\n\n\n \n \n \n
\n
\n \n
\n \n
\n Development\n + \
\n\n\n \n\n

Successfully merging this pull request may + close these issues.

\n\n\n \n\n
+ \
\n
\n
\n\n \n \n\n + \ \n\n \n
\n + \
\n
\n 2 participants\n
\n \n
\n
\n\n\n\n + \ \n\n \n\n\n\n\n \n\n
\n\n\n
\n \n \n \n + \ \n\n\n + \ \n\n\n \n\n\n\n\n \n \n\n + \ \n\n
\n

Footer

\n\n \n\n\n
\n
\n \n \n \n\n\n + \ \n © 2024 GitHub, Inc.\n \n
\n\n + \ \n
\n
\n\n\n\n\n \n\n\n \n\n + \ \n\n
\n + \
\n
\n
\n\n \n\n\n\n\n\n \n\n
\n + \
\n \n\n\n" + headers: + Accept-Ranges: + - bytes + Cache-Control: + - no-cache + Content-Security-Policy: + - 'default-src ''none''; base-uri ''self''; child-src github.com/assets-cdn/worker/ + github.com/webpack/ github.com/assets/ gist.github.com/assets-cdn/worker/; + connect-src ''self'' uploads.github.com www.githubstatus.com collector.github.com + raw.githubusercontent.com api.github.com github-cloud.s3.amazonaws.com github-production-repository-file-5c1aeb.s3.amazonaws.com + github-production-upload-manifest-file-7fdce7.s3.amazonaws.com github-production-user-asset-6210df.s3.amazonaws.com + *.rel.tunnels.api.visualstudio.com wss://*.rel.tunnels.api.visualstudio.com + objects-origin.githubusercontent.com copilot-proxy.githubusercontent.com proxy.individual.githubcopilot.com + proxy.business.githubcopilot.com proxy.enterprise.githubcopilot.com *.actions.githubusercontent.com + wss://*.actions.githubusercontent.com productionresultssa0.blob.core.windows.net/ + productionresultssa1.blob.core.windows.net/ productionresultssa2.blob.core.windows.net/ + productionresultssa3.blob.core.windows.net/ productionresultssa4.blob.core.windows.net/ + productionresultssa5.blob.core.windows.net/ productionresultssa6.blob.core.windows.net/ + productionresultssa7.blob.core.windows.net/ productionresultssa8.blob.core.windows.net/ + productionresultssa9.blob.core.windows.net/ productionresultssa10.blob.core.windows.net/ + productionresultssa11.blob.core.windows.net/ productionresultssa12.blob.core.windows.net/ + productionresultssa13.blob.core.windows.net/ productionresultssa14.blob.core.windows.net/ + productionresultssa15.blob.core.windows.net/ productionresultssa16.blob.core.windows.net/ + productionresultssa17.blob.core.windows.net/ productionresultssa18.blob.core.windows.net/ + productionresultssa19.blob.core.windows.net/ github-production-repository-image-32fea6.s3.amazonaws.com + github-production-release-asset-2e65be.s3.amazonaws.com insights.github.com + wss://alive.github.com api.githubcopilot.com api.individual.githubcopilot.com + api.business.githubcopilot.com api.enterprise.githubcopilot.com; font-src + github.githubassets.com; form-action ''self'' github.com gist.github.com copilot-workspace.githubnext.com + objects-origin.githubusercontent.com; frame-ancestors ''none''; frame-src + viewscreen.githubusercontent.com notebooks.githubusercontent.com; img-src + ''self'' data: blob: github.githubassets.com media.githubusercontent.com camo.githubusercontent.com + identicons.github.com avatars.githubusercontent.com private-avatars.githubusercontent.com + github-cloud.s3.amazonaws.com objects.githubusercontent.com secured-user-images.githubusercontent.com/ + user-images.githubusercontent.com/ private-user-images.githubusercontent.com + opengraph.githubassets.com github-production-user-asset-6210df.s3.amazonaws.com + customer-stories-feed.github.com spotlights-feed.github.com objects-origin.githubusercontent.com + *.githubusercontent.com; manifest-src ''self''; media-src github.com user-images.githubusercontent.com/ + secured-user-images.githubusercontent.com/ private-user-images.githubusercontent.com + github-production-user-asset-6210df.s3.amazonaws.com gist.github.com; script-src + github.githubassets.com; style-src ''unsafe-inline'' github.githubassets.com; + upgrade-insecure-requests; worker-src github.com/assets-cdn/worker/ github.com/webpack/ + github.com/assets/ gist.github.com/assets-cdn/worker/' + Content-Type: + - text/html; charset=utf-8 + Date: + - Thu, 12 Dec 2024 14:42:38 GMT + Referrer-Policy: + - no-referrer-when-downgrade + Server: + - GitHub.com + Set-Cookie: + - _gh_sess=SRp9AZpG%2B9PjbOI2DwrEGHPVoSPO1RQxFghqLR7KL1Fy058969XVQivgCdTFTsevR18tNXoZ%2FKyRkxCnOi2HhrErMbXOcrBwL5FA5%2FuR4HL8V1NhpBn75oTzynU53VGcHD6m7%2BIlieWYdCDurncYFhjKC%2FyJMwrbWCv8a%2BqwOdUGnXDfrkHq9if6PsYS6W3SV3HjEy72OBGtOU%2FpHCZOngO5mPkK52xmJZd5cZuqoLImJBzkm8LUbVPQjcLWKerz3McWy5a71T9kSEnN3Z0www%3D%3D--5w9odkPS8zIP4Vqv--zkwZym8AQTQ3LMLwY5hRjw%3D%3D; + Path=/; HttpOnly; Secure; SameSite=Lax + - _octo=GH1.1.172292125.1734014562; Path=/; Domain=github.com; Expires=Fri, + 12 Dec 2025 14:42:42 GMT; Secure; SameSite=Lax + - logged_in=no; Path=/; Domain=github.com; Expires=Fri, 12 Dec 2025 14:42:42 + GMT; HttpOnly; Secure; SameSite=Lax + Strict-Transport-Security: + - max-age=31536000; includeSubdomains; preload + Transfer-Encoding: + - chunked + Vary: + - X-PJAX, X-PJAX-Container, Turbo-Visit, Turbo-Frame, Accept-Encoding, Accept, + X-Requested-With + X-Content-Type-Options: + - nosniff + X-Frame-Options: + - deny + X-GitHub-Request-Id: + - ED4B:321365:23B1314:320365D:675AF662 + X-XSS-Protection: + - '0' + connection: + - close + server-timing: + - pull_request_layout-fragment;desc="pull_request_layout fragment";dur=412.175919,conversation_content-fragment;desc="conversation_content + fragment";dur=448.910543,conversation_sidebar-fragment;desc="conversation_sidebar + fragment";dur=302.334653,nginx;desc="NGINX";dur=1.331055,glb;desc="GLB";dur=3.067062 + x-voltron-version: + - 69a2227 + status: + code: 200 + message: OK +- request: + body: null + headers: + Accept: + - application/vnd.github+json + Connection: + - close + Host: + - api.github.com + method: GET + uri: https://api.github.com/repos/datadog/dd-trace-py/pulls/6388/files?page=1 + response: + body: + string: '[{"sha":"1325b0864ebc6d4c40970f698018ac2524fe4e33","filename":"ddtrace/debugging/_expressions.py","status":"modified","additions":2,"deletions":2,"changes":4,"blob_url":"https://github.com/DataDog/dd-trace-py/blob/2eb060881fdd94f4f717ae19549b598317b74d30/ddtrace%2Fdebugging%2F_expressions.py","raw_url":"https://github.com/DataDog/dd-trace-py/raw/2eb060881fdd94f4f717ae19549b598317b74d30/ddtrace%2Fdebugging%2F_expressions.py","contents_url":"https://api.github.com/repos/DataDog/dd-trace-py/contents/ddtrace%2Fdebugging%2F_expressions.py?ref=2eb060881fdd94f4f717ae19549b598317b74d30","patch":"@@ + -292,8 +292,8 @@ def _compile_operation(ast):\n \n def _compile_literal(ast):\n # + type: (DDASTType) -> Optional[List[Instr]]\n- # literal => | + true | false | \"string\"\n- if not isinstance(ast, (str, int, float, bool)):\n+ # + literal => | true | false | \"string\" | null\n+ if not (isinstance(ast, + (str, int, float, bool)) or ast is None):\n return None\n \n return + [Instr(\"LOAD_CONST\", ast)]"},{"sha":"b4517ad79f67a2b362360ae8e7e0b0b3fa2e4ea8","filename":"releasenotes/notes/fix-debugger-expressions-none-literal-30f3328d2e386f40.yaml","status":"added","additions":4,"deletions":0,"changes":4,"blob_url":"https://github.com/DataDog/dd-trace-py/blob/2eb060881fdd94f4f717ae19549b598317b74d30/releasenotes%2Fnotes%2Ffix-debugger-expressions-none-literal-30f3328d2e386f40.yaml","raw_url":"https://github.com/DataDog/dd-trace-py/raw/2eb060881fdd94f4f717ae19549b598317b74d30/releasenotes%2Fnotes%2Ffix-debugger-expressions-none-literal-30f3328d2e386f40.yaml","contents_url":"https://api.github.com/repos/DataDog/dd-trace-py/contents/releasenotes%2Fnotes%2Ffix-debugger-expressions-none-literal-30f3328d2e386f40.yaml?ref=2eb060881fdd94f4f717ae19549b598317b74d30","patch":"@@ + -0,0 +1,4 @@\n+---\n+fixes:\n+ - |\n+ dynamic instrumentation: handle + null literal in conditions and expressions."},{"sha":"3c4d96fe66b871238c02651af82d43a1ad8085c3","filename":"tests/debugging/test_expressions.py","status":"modified","additions":1,"deletions":0,"changes":1,"blob_url":"https://github.com/DataDog/dd-trace-py/blob/2eb060881fdd94f4f717ae19549b598317b74d30/tests%2Fdebugging%2Ftest_expressions.py","raw_url":"https://github.com/DataDog/dd-trace-py/raw/2eb060881fdd94f4f717ae19549b598317b74d30/tests%2Fdebugging%2Ftest_expressions.py","contents_url":"https://api.github.com/repos/DataDog/dd-trace-py/contents/tests%2Fdebugging%2Ftest_expressions.py?ref=2eb060881fdd94f4f717ae19549b598317b74d30","patch":"@@ + -72,6 +72,7 @@ def __getitem__(self, name):\n # Test argument predicates + and operations\n ({\"contains\": [{\"ref\": \"payload\"}, \"hello\"]}, + {\"payload\": \"hello world\"}, True),\n ({\"eq\": [{\"ref\": \"hits\"}, + True]}, {\"hits\": True}, True),\n+ ({\"eq\": [{\"ref\": \"hits\"}, + None]}, {\"hits\": None}, True),\n ({\"substring\": [{\"ref\": \"payload\"}, + 4, 7]}, {\"payload\": \"hello world\"}, \"hello world\"[4:7]),\n ({\"any\": + [{\"ref\": \"collection\"}, {\"isEmpty\": {\"ref\": \"@it\"}}]}, {\"collection\": + [\"foo\", \"bar\", \"\"]}, True),\n ({\"startsWith\": [{\"ref\": \"local_string\"}, + \"hello\"]}, {\"local_string\": \"hello world!\"}, True),"}]' + headers: + Access-Control-Allow-Origin: + - '*' + Access-Control-Expose-Headers: + - ETag, Link, Location, Retry-After, X-GitHub-OTP, X-RateLimit-Limit, X-RateLimit-Remaining, + X-RateLimit-Used, X-RateLimit-Resource, X-RateLimit-Reset, X-OAuth-Scopes, + X-Accepted-OAuth-Scopes, X-Poll-Interval, X-GitHub-Media-Type, X-GitHub-SSO, + X-GitHub-Request-Id, Deprecation, Sunset + Cache-Control: + - private, max-age=60, s-maxage=60 + Content-Length: + - '3264' + Content-Security-Policy: + - default-src 'none' + Content-Type: + - application/json; charset=utf-8 + Date: + - Thu, 12 Dec 2024 14:42:43 GMT + ETag: + - '"85a10accfc7f3330efa4961171936a0e3ea39a94e59a1811461b17a9a610bdb4"' + Last-Modified: + - Sun, 08 Dec 2024 16:19:43 GMT + Referrer-Policy: + - origin-when-cross-origin, strict-origin-when-cross-origin + Server: + - github.com + Strict-Transport-Security: + - max-age=31536000; includeSubdomains; preload + Vary: + - Accept, Authorization, Cookie, X-GitHub-OTP,Accept-Encoding, Accept, X-Requested-With + X-Accepted-OAuth-Scopes: + - '' + X-Content-Type-Options: + - nosniff + X-Frame-Options: + - deny + X-GitHub-Media-Type: + - github.v3; format=json + X-GitHub-Request-Id: + - ED4D:111D81:1B766DC:3695A0A:675AF662 + X-OAuth-Scopes: + - delete:packages, gist, read:org, read:packages, repo, workflow + X-RateLimit-Limit: + - '5000' + X-RateLimit-Remaining: + - '4894' + X-RateLimit-Reset: + - '1734015073' + X-RateLimit-Resource: + - core + X-RateLimit-Used: + - '106' + X-XSS-Protection: + - '0' + connection: + - close + x-github-api-version-selected: + - '2022-11-28' + x-oauth-client-id: + - 178c6fc778ccc68e1d6a + status: + code: 200 + message: OK +- request: + body: null + headers: + Accept: + - application/vnd.github+json + Connection: + - close + Host: + - api.github.com + method: GET + uri: https://api.github.com/repos/datadog/dd-trace-py/pulls/6388/files?page=2 + response: + body: + string: '[]' + headers: + Access-Control-Allow-Origin: + - '*' + Access-Control-Expose-Headers: + - ETag, Link, Location, Retry-After, X-GitHub-OTP, X-RateLimit-Limit, X-RateLimit-Remaining, + X-RateLimit-Used, X-RateLimit-Resource, X-RateLimit-Reset, X-OAuth-Scopes, + X-Accepted-OAuth-Scopes, X-Poll-Interval, X-GitHub-Media-Type, X-GitHub-SSO, + X-GitHub-Request-Id, Deprecation, Sunset + Cache-Control: + - private, max-age=60, s-maxage=60 + Content-Length: + - '2' + Content-Security-Policy: + - default-src 'none' + Content-Type: + - application/json; charset=utf-8 + Date: + - Thu, 12 Dec 2024 14:42:43 GMT + ETag: + - '"4acd3c336ca9625e24fba0a2ea9cad06cf4693ace7e76d92c8a9a05f03c7b0cd"' + Last-Modified: + - Sun, 08 Dec 2024 16:19:43 GMT + Link: + - ; rel="prev", + ; rel="last", + ; rel="first" + Referrer-Policy: + - origin-when-cross-origin, strict-origin-when-cross-origin + Server: + - github.com + Strict-Transport-Security: + - max-age=31536000; includeSubdomains; preload + Vary: + - Accept, Authorization, Cookie, X-GitHub-OTP,Accept-Encoding, Accept, X-Requested-With + X-Accepted-OAuth-Scopes: + - '' + X-Content-Type-Options: + - nosniff + X-Frame-Options: + - deny + X-GitHub-Media-Type: + - github.v3; format=json + X-GitHub-Request-Id: + - ED4E:1C31C4:1AEFA31:3584AFC:675AF663 + X-OAuth-Scopes: + - delete:packages, gist, read:org, read:packages, repo, workflow + X-RateLimit-Limit: + - '5000' + X-RateLimit-Remaining: + - '4893' + X-RateLimit-Reset: + - '1734015073' + X-RateLimit-Resource: + - core + X-RateLimit-Used: + - '107' + X-XSS-Protection: + - '0' + connection: + - close + x-github-api-version-selected: + - '2022-11-28' + x-oauth-client-id: + - 178c6fc778ccc68e1d6a + status: + code: 200 + message: OK +- request: + body: null + headers: + Accept: + - application/vnd.github+json + Connection: + - close + Host: + - api.github.com + method: GET + uri: https://api.github.com/repos/datadog/dd-trace-py/pulls/11690/files?page=1 + response: + body: + string: '[{"sha":"ce795db4fe24584e0a3c105f6f130071b1292cbe","filename":".github/workflows/system-tests.yml","status":"modified","additions":2,"deletions":2,"changes":4,"blob_url":"https://github.com/DataDog/dd-trace-py/blob/a6675d3799af44382bd5b677c56a94843a6433aa/.github%2Fworkflows%2Fsystem-tests.yml","raw_url":"https://github.com/DataDog/dd-trace-py/raw/a6675d3799af44382bd5b677c56a94843a6433aa/.github%2Fworkflows%2Fsystem-tests.yml","contents_url":"https://api.github.com/repos/DataDog/dd-trace-py/contents/.github%2Fworkflows%2Fsystem-tests.yml?ref=a6675d3799af44382bd5b677c56a94843a6433aa","patch":"@@ + -54,7 +54,7 @@ jobs:\n # system-tests requires an API_KEY, but it does + not have to be a valid key, as long as we don''t run a scenario\n # + that make assertion on backend data. Using a fake key allow to run system + tests on PR originating from forks.\n # If ever it''s needed, a valid + key exists in the repo, using ${{ secrets.DD_API_KEY }}\n- DD_API_KEY: + 1234567890abcdef1234567890abcdef\n+ DD_API_KEY: ${{ secrets.FAKE_DD_API_KEY + }}\n CMAKE_BUILD_PARALLEL_LEVEL: 12\n SYSTEM_TESTS_AWS_ACCESS_KEY_ID: + ${{ secrets.IDM_AWS_ACCESS_KEY_ID }}\n SYSTEM_TESTS_AWS_SECRET_ACCESS_KEY: + ${{ secrets.IDM_AWS_SECRET_ACCESS_KEY }}\n@@ -106,7 +106,7 @@ jobs:\n # + system-tests requires an API_KEY, but it does not have to be a valid key, + as long as we don''t run a scenario\n # that make assertion on backend + data. Using a fake key allow to run system tests on PR originating from forks.\n # + If ever it''s needed, a valid key exists in the repo, using ${{ secrets.DD_API_KEY + }}\n- DD_API_KEY: 1234567890abcdef1234567890abcdef\n+ DD_API_KEY: + ${{ secrets.FAKE_DD_API_KEY }}\n CMAKE_BUILD_PARALLEL_LEVEL: 12\n SYSTEM_TESTS_AWS_ACCESS_KEY_ID: + ${{ secrets.IDM_AWS_ACCESS_KEY_ID }}\n SYSTEM_TESTS_AWS_SECRET_ACCESS_KEY: + ${{ secrets.IDM_AWS_SECRET_ACCESS_KEY }}"}]' + headers: + Access-Control-Allow-Origin: + - '*' + Access-Control-Expose-Headers: + - ETag, Link, Location, Retry-After, X-GitHub-OTP, X-RateLimit-Limit, X-RateLimit-Remaining, + X-RateLimit-Used, X-RateLimit-Resource, X-RateLimit-Reset, X-OAuth-Scopes, + X-Accepted-OAuth-Scopes, X-Poll-Interval, X-GitHub-Media-Type, X-GitHub-SSO, + X-GitHub-Request-Id, Deprecation, Sunset + Cache-Control: + - private, max-age=60, s-maxage=60 + Content-Length: + - '1930' + Content-Security-Policy: + - default-src 'none' + Content-Type: + - application/json; charset=utf-8 + Date: + - Thu, 12 Dec 2024 14:42:43 GMT + ETag: + - '"e91026bdc9aa216ff163739444e03dfcf4e719131166fd717d6e5a7eafbd54fe"' + Last-Modified: + - Thu, 12 Dec 2024 14:26:20 GMT + Referrer-Policy: + - origin-when-cross-origin, strict-origin-when-cross-origin + Server: + - github.com + Strict-Transport-Security: + - max-age=31536000; includeSubdomains; preload + Vary: + - Accept, Authorization, Cookie, X-GitHub-OTP,Accept-Encoding, Accept, X-Requested-With + X-Accepted-OAuth-Scopes: + - '' + X-Content-Type-Options: + - nosniff + X-Frame-Options: + - deny + X-GitHub-Media-Type: + - github.v3; format=json + X-GitHub-Request-Id: + - ED50:122321:1C06DEB:37B36A8:675AF663 + X-OAuth-Scopes: + - delete:packages, gist, read:org, read:packages, repo, workflow + X-RateLimit-Limit: + - '5000' + X-RateLimit-Remaining: + - '4892' + X-RateLimit-Reset: + - '1734015073' + X-RateLimit-Resource: + - core + X-RateLimit-Used: + - '108' + X-XSS-Protection: + - '0' + connection: + - close + x-github-api-version-selected: + - '2022-11-28' + x-oauth-client-id: + - 178c6fc778ccc68e1d6a + status: + code: 200 + message: OK +- request: + body: null + headers: + Accept: + - application/vnd.github+json + Connection: + - close + Host: + - api.github.com + method: GET + uri: https://api.github.com/repos/datadog/dd-trace-py/pulls/11690/files?page=2 + response: + body: + string: '[]' + headers: + Access-Control-Allow-Origin: + - '*' + Access-Control-Expose-Headers: + - ETag, Link, Location, Retry-After, X-GitHub-OTP, X-RateLimit-Limit, X-RateLimit-Remaining, + X-RateLimit-Used, X-RateLimit-Resource, X-RateLimit-Reset, X-OAuth-Scopes, + X-Accepted-OAuth-Scopes, X-Poll-Interval, X-GitHub-Media-Type, X-GitHub-SSO, + X-GitHub-Request-Id, Deprecation, Sunset + Cache-Control: + - private, max-age=60, s-maxage=60 + Content-Length: + - '2' + Content-Security-Policy: + - default-src 'none' + Content-Type: + - application/json; charset=utf-8 + Date: + - Thu, 12 Dec 2024 14:42:44 GMT + ETag: + - '"4acd3c336ca9625e24fba0a2ea9cad06cf4693ace7e76d92c8a9a05f03c7b0cd"' + Last-Modified: + - Thu, 12 Dec 2024 14:26:20 GMT + Link: + - ; rel="prev", + ; rel="last", + ; rel="first" + Referrer-Policy: + - origin-when-cross-origin, strict-origin-when-cross-origin + Server: + - github.com + Strict-Transport-Security: + - max-age=31536000; includeSubdomains; preload + Vary: + - Accept, Authorization, Cookie, X-GitHub-OTP,Accept-Encoding, Accept, X-Requested-With + X-Accepted-OAuth-Scopes: + - '' + X-Content-Type-Options: + - nosniff + X-Frame-Options: + - deny + X-GitHub-Media-Type: + - github.v3; format=json + X-GitHub-Request-Id: + - ED52:23893A:1B6422E:3678EE8:675AF664 + X-OAuth-Scopes: + - delete:packages, gist, read:org, read:packages, repo, workflow + X-RateLimit-Limit: + - '5000' + X-RateLimit-Remaining: + - '4891' + X-RateLimit-Reset: + - '1734015073' + X-RateLimit-Resource: + - core + X-RateLimit-Used: + - '109' + X-XSS-Protection: + - '0' + connection: + - close + x-github-api-version-selected: + - '2022-11-28' + x-oauth-client-id: + - 178c6fc778ccc68e1d6a + status: + code: 200 + message: OK +version: 1 From 710ed97c4dba053bf75e9b651e339ae2551a892f Mon Sep 17 00:00:00 2001 From: Taegyun Kim Date: Thu, 12 Dec 2024 14:23:26 -0500 Subject: [PATCH 22/78] chore(profiling): ddup.upload() optionally gets tracer object (#11695) profiling tests that check for span ids are flaky. I wonder whether sharing tracer instance across tests causes that problem. ## Checklist - [x] PR author has checked that all the criteria below are met - The PR description includes an overview of the change - The PR description articulates the motivation for the change - The change includes tests OR the PR description describes a testing strategy - The PR description notes risks associated with the change, if any - Newly-added code is easy to change - The change follows the [library release note guidelines](https://ddtrace.readthedocs.io/en/stable/releasenotes.html) - The change includes or references documentation updates if necessary - Backport labels are set (if [applicable](https://ddtrace.readthedocs.io/en/latest/contributing.html#backporting)) ## Reviewer Checklist - [x] Reviewer has checked that all the criteria below are met - Title is accurate - All changes are related to the pull request's stated goal - Avoids breaking [API](https://ddtrace.readthedocs.io/en/stable/versioning.html#interfaces) changes - Testing strategy adequately addresses listed risks - Newly-added code is easy to change - Release note makes sense to a user of the library - If necessary, author has acknowledged and discussed the performance implications of this PR as reported in the benchmarks PR comment - Backport labels are set in a manner that is consistent with the [release branch maintenance policy](https://ddtrace.readthedocs.io/en/latest/contributing.html#backporting) --- .../internal/datadog/profiling/ddup/_ddup.pyi | 3 ++- .../internal/datadog/profiling/ddup/_ddup.pyx | 7 ++++--- ddtrace/profiling/profiler.py | 1 + ddtrace/profiling/scheduler.py | 6 +++++- tests/profiling_v2/collector/test_asyncio.py | 5 ++--- tests/profiling_v2/collector/test_stack.py | 21 +++++++++---------- .../profiling_v2/collector/test_threading.py | 17 +++++++-------- 7 files changed, 32 insertions(+), 28 deletions(-) diff --git a/ddtrace/internal/datadog/profiling/ddup/_ddup.pyi b/ddtrace/internal/datadog/profiling/ddup/_ddup.pyi index 552e377df0b..78351e93b91 100644 --- a/ddtrace/internal/datadog/profiling/ddup/_ddup.pyi +++ b/ddtrace/internal/datadog/profiling/ddup/_ddup.pyi @@ -3,6 +3,7 @@ from typing import Optional from typing import Union from .._types import StringType from ddtrace._trace.span import Span +from ddtrace._trace.tracer import Tracer def config( env: StringType, @@ -16,7 +17,7 @@ def config( enable_code_provenance: Optional[bool], ) -> None: ... def start() -> None: ... -def upload() -> None: ... +def upload(tracer: Optional[Tracer]) -> None: ... class SampleHandle: def push_cputime(self, value: int, count: int) -> None: ... diff --git a/ddtrace/internal/datadog/profiling/ddup/_ddup.pyx b/ddtrace/internal/datadog/profiling/ddup/_ddup.pyx index b3f9b264890..9c590c796d7 100644 --- a/ddtrace/internal/datadog/profiling/ddup/_ddup.pyx +++ b/ddtrace/internal/datadog/profiling/ddup/_ddup.pyx @@ -20,6 +20,7 @@ from ddtrace.internal.constants import DEFAULT_SERVICE_NAME from ddtrace.internal.packages import get_distributions from ddtrace.internal.runtime import get_runtime_id from ddtrace._trace.span import Span +from ddtrace._trace.tracer import Tracer ctypedef void (*func_ptr_t)(string_view) @@ -396,16 +397,16 @@ def _get_endpoint(tracer)-> str: return endpoint -def upload() -> None: +def upload(tracer: Optional[Tracer] = ddtrace.tracer) -> None: call_func_with_str(ddup_set_runtime_id, get_runtime_id()) - processor = ddtrace.tracer._endpoint_call_counter_span_processor + processor = tracer._endpoint_call_counter_span_processor endpoint_counts, endpoint_to_span_ids = processor.reset() call_ddup_profile_set_endpoints(endpoint_to_span_ids) call_ddup_profile_add_endpoint_counts(endpoint_counts) - endpoint = _get_endpoint(ddtrace.tracer) + endpoint = _get_endpoint(tracer) call_func_with_str(ddup_config_url, endpoint) with nogil: diff --git a/ddtrace/profiling/profiler.py b/ddtrace/profiling/profiler.py index fa4bdb79a3e..9903cc29108 100644 --- a/ddtrace/profiling/profiler.py +++ b/ddtrace/profiling/profiler.py @@ -311,6 +311,7 @@ def start_collector(collector_class: Type) -> None: recorder=r, exporters=exporters, before_flush=self._collectors_snapshot, + tracer=self.tracer, ) def _collectors_snapshot(self): diff --git a/ddtrace/profiling/scheduler.py b/ddtrace/profiling/scheduler.py index 9f286f8688b..e8aafe7a63b 100644 --- a/ddtrace/profiling/scheduler.py +++ b/ddtrace/profiling/scheduler.py @@ -7,6 +7,8 @@ from typing import Optional from typing import Sequence # noqa F401 +import ddtrace +from ddtrace._trace.tracer import Tracer from ddtrace.internal import compat from ddtrace.internal import periodic from ddtrace.internal.datadog.profiling import ddup @@ -30,6 +32,7 @@ def __init__( recorder: Optional[Recorder] = None, exporters: Optional[List[Exporter]] = None, before_flush: Optional[Callable] = None, + tracer: Optional[Tracer] = ddtrace.tracer, interval: float = config.upload_interval, ): super(Scheduler, self).__init__(interval=interval) @@ -38,6 +41,7 @@ def __init__( self.before_flush: Optional[Callable] = before_flush self._configured_interval: float = self.interval self._last_export: int = 0 # Overridden in _start_service + self._tracer = tracer self._export_libdd_enabled: bool = config.export.libdd_enabled def _start_service(self): @@ -59,7 +63,7 @@ def flush(self): LOG.error("Scheduler before_flush hook failed", exc_info=True) if self._export_libdd_enabled: - ddup.upload() + ddup.upload(self._tracer) # These are only used by the Python uploader, but set them here to keep logs/etc # consistent for now diff --git a/tests/profiling_v2/collector/test_asyncio.py b/tests/profiling_v2/collector/test_asyncio.py index c29ff7fe92c..f0c9bb625d9 100644 --- a/tests/profiling_v2/collector/test_asyncio.py +++ b/tests/profiling_v2/collector/test_asyncio.py @@ -7,7 +7,6 @@ import pytest from ddtrace import ext -from ddtrace import tracer from ddtrace.internal.datadog.profiling import ddup from ddtrace.profiling.collector import asyncio as collector_asyncio from tests.profiling.collector import pprof_utils @@ -85,7 +84,7 @@ async def test_asyncio_lock_events(self): ], ) - async def test_asyncio_lock_events_tracer(self): + async def test_asyncio_lock_events_tracer(self, tracer): tracer._endpoint_call_counter_span_processor.enable() resource = str(uuid.uuid4()) span_type = ext.SpanTypes.WEB @@ -103,7 +102,7 @@ async def test_asyncio_lock_events_tracer(self): lock_ctx = asyncio.Lock() # !CREATE! test_asyncio_lock_events_tracer_3 async with lock_ctx: # !ACQUIRE! !RELEASE! test_asyncio_lock_events_tracer_3 pass - ddup.upload() + ddup.upload(tracer=tracer) linenos_1 = get_lock_linenos("test_asyncio_lock_events_tracer_1") linenos_2 = get_lock_linenos("test_asyncio_lock_events_tracer_2") diff --git a/tests/profiling_v2/collector/test_stack.py b/tests/profiling_v2/collector/test_stack.py index 5d9007248bc..af13a1ea237 100644 --- a/tests/profiling_v2/collector/test_stack.py +++ b/tests/profiling_v2/collector/test_stack.py @@ -9,7 +9,6 @@ import pytest from ddtrace import ext -from ddtrace import tracer from ddtrace.internal.datadog.profiling import ddup from ddtrace.profiling.collector import stack from ddtrace.settings.profiling import config @@ -82,7 +81,7 @@ def foo(): @pytest.mark.parametrize("stack_v2_enabled", [True, False]) -def test_push_span(stack_v2_enabled, tmp_path): +def test_push_span(stack_v2_enabled, tmp_path, tracer): if sys.version_info[:2] == (3, 7) and stack_v2_enabled: pytest.skip("stack_v2 is not supported on Python 3.7") @@ -111,7 +110,7 @@ def test_push_span(stack_v2_enabled, tmp_path): local_root_span_id = span._local_root.span_id for _ in range(10): time.sleep(0.1) - ddup.upload() + ddup.upload(tracer=tracer) profile = pprof_utils.parse_profile(output_filename) samples = pprof_utils.get_samples_with_label_key(profile, "span id") @@ -129,7 +128,7 @@ def test_push_span(stack_v2_enabled, tmp_path): ) -def test_push_span_unregister_thread(tmp_path, monkeypatch): +def test_push_span_unregister_thread(tmp_path, monkeypatch, tracer): if sys.version_info[:2] == (3, 7): pytest.skip("stack_v2 is not supported on Python 3.7") @@ -166,7 +165,7 @@ def target_fun(): t.start() t.join() thread_id = t.ident - ddup.upload() + ddup.upload(tracer=tracer) profile = pprof_utils.parse_profile(output_filename) samples = pprof_utils.get_samples_with_label_key(profile, "span id") @@ -187,7 +186,7 @@ def target_fun(): @pytest.mark.parametrize("stack_v2_enabled", [True, False]) -def test_push_non_web_span(stack_v2_enabled, tmp_path): +def test_push_non_web_span(stack_v2_enabled, tmp_path, tracer): if sys.version_info[:2] == (3, 7) and stack_v2_enabled: pytest.skip("stack_v2 is not supported on Python 3.7") @@ -216,7 +215,7 @@ def test_push_non_web_span(stack_v2_enabled, tmp_path): local_root_span_id = span._local_root.span_id for _ in range(10): time.sleep(0.1) - ddup.upload() + ddup.upload(tracer=tracer) profile = pprof_utils.parse_profile(output_filename) samples = pprof_utils.get_samples_with_label_key(profile, "span id") @@ -235,7 +234,7 @@ def test_push_non_web_span(stack_v2_enabled, tmp_path): @pytest.mark.parametrize("stack_v2_enabled", [True, False]) -def test_push_span_none_span_type(stack_v2_enabled, tmp_path): +def test_push_span_none_span_type(stack_v2_enabled, tmp_path, tracer): # Test for https://github.com/DataDog/dd-trace-py/issues/11141 if sys.version_info[:2] == (3, 7) and stack_v2_enabled: pytest.skip("stack_v2 is not supported on Python 3.7") @@ -266,7 +265,7 @@ def test_push_span_none_span_type(stack_v2_enabled, tmp_path): local_root_span_id = span._local_root.span_id for _ in range(10): time.sleep(0.1) - ddup.upload() + ddup.upload(tracer=tracer) profile = pprof_utils.parse_profile(output_filename) samples = pprof_utils.get_samples_with_label_key(profile, "span id") @@ -398,7 +397,7 @@ def target_fun(): @pytest.mark.skipif(not stack.FEATURES["stack-exceptions"], reason="Stack exceptions are not supported") @pytest.mark.parametrize("stack_v2_enabled", [True, False]) -def test_exception_collection_trace(stack_v2_enabled, tmp_path): +def test_exception_collection_trace(stack_v2_enabled, tmp_path, tracer): if sys.version_info[:2] == (3, 7) and stack_v2_enabled: pytest.skip("stack_v2 is not supported on Python 3.7") @@ -419,7 +418,7 @@ def test_exception_collection_trace(stack_v2_enabled, tmp_path): except Exception: time.sleep(1) - ddup.upload() + ddup.upload(tracer=tracer) profile = pprof_utils.parse_profile(output_filename) samples = pprof_utils.get_samples_with_label_key(profile, "exception type") diff --git a/tests/profiling_v2/collector/test_threading.py b/tests/profiling_v2/collector/test_threading.py index bb55e67522f..12b84fc9970 100644 --- a/tests/profiling_v2/collector/test_threading.py +++ b/tests/profiling_v2/collector/test_threading.py @@ -8,7 +8,6 @@ import pytest from ddtrace import ext -from ddtrace import tracer from ddtrace.internal.datadog.profiling import ddup from ddtrace.profiling.collector import threading as collector_threading from tests.profiling.collector import pprof_utils @@ -356,7 +355,7 @@ def lockfunc(self): ], ) - def test_lock_events_tracer(self): + def test_lock_events_tracer(self, tracer): tracer._endpoint_call_counter_span_processor.enable() resource = str(uuid.uuid4()) span_type = ext.SpanTypes.WEB @@ -375,7 +374,7 @@ def test_lock_events_tracer(self): span_id = t.span_id lock2.release() # !RELEASE! test_lock_events_tracer_2 - ddup.upload() + ddup.upload(tracer=tracer) linenos1 = get_lock_linenos("test_lock_events_tracer_1") linenos2 = get_lock_linenos("test_lock_events_tracer_2") @@ -419,7 +418,7 @@ def test_lock_events_tracer(self): ], ) - def test_lock_events_tracer_non_web(self): + def test_lock_events_tracer_non_web(self, tracer): tracer._endpoint_call_counter_span_processor.enable() resource = str(uuid.uuid4()) span_type = ext.SpanTypes.SQL @@ -435,7 +434,7 @@ def test_lock_events_tracer_non_web(self): span_id = t.span_id lock2.release() # !RELEASE! test_lock_events_tracer_non_web - ddup.upload() + ddup.upload(tracer=tracer) linenos2 = get_lock_linenos("test_lock_events_tracer_non_web") @@ -463,7 +462,7 @@ def test_lock_events_tracer_non_web(self): ], ) - def test_lock_events_tracer_late_finish(self): + def test_lock_events_tracer_late_finish(self, tracer): tracer._endpoint_call_counter_span_processor.enable() resource = str(uuid.uuid4()) span_type = ext.SpanTypes.WEB @@ -482,7 +481,7 @@ def test_lock_events_tracer_late_finish(self): lock2.release() # !RELEASE! test_lock_events_tracer_late_finish_2 span.resource = resource span.finish() - ddup.upload() + ddup.upload(tracer=tracer) linenos1 = get_lock_linenos("test_lock_events_tracer_late_finish_1") linenos2 = get_lock_linenos("test_lock_events_tracer_late_finish_2") @@ -520,7 +519,7 @@ def test_lock_events_tracer_late_finish(self): ], ) - def test_resource_not_collected(self): + def test_resource_not_collected(self, tracer): tracer._endpoint_call_counter_span_processor.enable() resource = str(uuid.uuid4()) span_type = ext.SpanTypes.WEB @@ -539,7 +538,7 @@ def test_resource_not_collected(self): lock1.release() # !RELEASE! test_resource_not_collected_1 span_id = t.span_id lock2.release() # !RELEASE! test_resource_not_collected_2 - ddup.upload() + ddup.upload(tracer=tracer) linenos1 = get_lock_linenos("test_resource_not_collected_1") linenos2 = get_lock_linenos("test_resource_not_collected_2") From b87c4dd04831770911fc164c20b836fa73a079ef Mon Sep 17 00:00:00 2001 From: Brett Langdon Date: Thu, 12 Dec 2024 15:36:57 -0500 Subject: [PATCH 23/78] ci: store fake DD_API_KEY as a secret (#11690) --- .github/workflows/system-tests.yml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/workflows/system-tests.yml b/.github/workflows/system-tests.yml index 06604dc811c..ce795db4fe2 100644 --- a/.github/workflows/system-tests.yml +++ b/.github/workflows/system-tests.yml @@ -54,7 +54,7 @@ jobs: # system-tests requires an API_KEY, but it does not have to be a valid key, as long as we don't run a scenario # that make assertion on backend data. Using a fake key allow to run system tests on PR originating from forks. # If ever it's needed, a valid key exists in the repo, using ${{ secrets.DD_API_KEY }} - DD_API_KEY: 1234567890abcdef1234567890abcdef + DD_API_KEY: ${{ secrets.FAKE_DD_API_KEY }} CMAKE_BUILD_PARALLEL_LEVEL: 12 SYSTEM_TESTS_AWS_ACCESS_KEY_ID: ${{ secrets.IDM_AWS_ACCESS_KEY_ID }} SYSTEM_TESTS_AWS_SECRET_ACCESS_KEY: ${{ secrets.IDM_AWS_SECRET_ACCESS_KEY }} @@ -106,7 +106,7 @@ jobs: # system-tests requires an API_KEY, but it does not have to be a valid key, as long as we don't run a scenario # that make assertion on backend data. Using a fake key allow to run system tests on PR originating from forks. # If ever it's needed, a valid key exists in the repo, using ${{ secrets.DD_API_KEY }} - DD_API_KEY: 1234567890abcdef1234567890abcdef + DD_API_KEY: ${{ secrets.FAKE_DD_API_KEY }} CMAKE_BUILD_PARALLEL_LEVEL: 12 SYSTEM_TESTS_AWS_ACCESS_KEY_ID: ${{ secrets.IDM_AWS_ACCESS_KEY_ID }} SYSTEM_TESTS_AWS_SECRET_ACCESS_KEY: ${{ secrets.IDM_AWS_SECRET_ACCESS_KEY }} From d364f1bb6e5b0287d2a5350cfb3329e4ba1e93c3 Mon Sep 17 00:00:00 2001 From: Brett Langdon Date: Thu, 12 Dec 2024 15:37:16 -0500 Subject: [PATCH 24/78] ci: fix flaky aiohttp test failure (#11698) Co-authored-by: erikayasuda <153395705+erikayasuda@users.noreply.github.com> --- tests/contrib/aiohttp/test_request.py | 4 ---- 1 file changed, 4 deletions(-) diff --git a/tests/contrib/aiohttp/test_request.py b/tests/contrib/aiohttp/test_request.py index d32da71a927..cde0f311521 100644 --- a/tests/contrib/aiohttp/test_request.py +++ b/tests/contrib/aiohttp/test_request.py @@ -4,8 +4,6 @@ from ddtrace import config from ddtrace.contrib.aiohttp.middlewares import trace_app -from ddtrace.contrib.aiohttp.patch import patch -from ddtrace.contrib.aiohttp.patch import unpatch from tests.utils import assert_is_measured from tests.utils import override_global_config @@ -76,9 +74,7 @@ async def test_user_specified_service(tracer, aiohttp_client, loop): When a service name is specified by the user The aiohttp integration should use it as the service name """ - unpatch() with override_global_config(dict(service="mysvc")): - patch() app = setup_app() trace_app(app, tracer) client = await aiohttp_client(app) From 68bff3aaf1b53e64677d61746adab41fd7529a01 Mon Sep 17 00:00:00 2001 From: Quinna Halim Date: Thu, 12 Dec 2024 15:44:44 -0500 Subject: [PATCH 25/78] chore(ci): enable quality gates (#11710) Enable Quality Gates to prevent new flaky tests from being merged into main or release branches. Example branches: https://github.com/DataDog/dd-trace-py/tree/erikayasuda/qg-simple-delayed-fix https://github.com/DataDog/dd-trace-py/tree/erikayasuda/qg-simple-quick-fix ## Checklist - [x] PR author has checked that all the criteria below are met - The PR description includes an overview of the change - The PR description articulates the motivation for the change - The change includes tests OR the PR description describes a testing strategy - The PR description notes risks associated with the change, if any - Newly-added code is easy to change - The change follows the [library release note guidelines](https://ddtrace.readthedocs.io/en/stable/releasenotes.html) - The change includes or references documentation updates if necessary - Backport labels are set (if [applicable](https://ddtrace.readthedocs.io/en/latest/contributing.html#backporting)) ## Reviewer Checklist - [x] Reviewer has checked that all the criteria below are met - Title is accurate - All changes are related to the pull request's stated goal - Avoids breaking [API](https://ddtrace.readthedocs.io/en/stable/versioning.html#interfaces) changes - Testing strategy adequately addresses listed risks - Newly-added code is easy to change - Release note makes sense to a user of the library - If necessary, author has acknowledged and discussed the performance implications of this PR as reported in the benchmarks PR comment - Backport labels are set in a manner that is consistent with the [release branch maintenance policy](https://ddtrace.readthedocs.io/en/latest/contributing.html#backporting) --------- Co-authored-by: erikayasuda <153395705+erikayasuda@users.noreply.github.com> --- .gitlab-ci.yml | 15 +++++++++++++++ 1 file changed, 15 insertions(+) diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml index b05126541d2..4105e2d5eb0 100644 --- a/.gitlab-ci.yml +++ b/.gitlab-ci.yml @@ -2,6 +2,7 @@ stages: - package - tests-gen - tests-trigger + - quality-gate - shared-pipeline - benchmarks - macrobenchmarks @@ -87,3 +88,17 @@ deploy_to_di_backend:manual: UPSTREAM_COMMIT_AUTHOR: $CI_COMMIT_AUTHOR UPSTREAM_TAG: $CI_COMMIT_TAG UPSTREAM_PACKAGE_JOB: build + +check_new_flaky_tests: + stage: quality-gate + extends: .testrunner + script: + - curl -L --fail "https://github.com/DataDog/datadog-ci/releases/latest/download/datadog-ci_linux-x64" --output "/usr/local/bin/datadog-ci" && chmod +x /usr/local/bin/datadog-ci + - export DD_SITE=datadoghq.com + - export DD_API_KEY=$(aws ssm get-parameter --region us-east-1 --name ci.${CI_PROJECT_NAME}.dd-api-key-qualitygate --with-decryption --query "Parameter.Value" --out text) + - export DD_APP_KEY=$(aws ssm get-parameter --region us-east-1 --name ci.${CI_PROJECT_NAME}.dd-app-key-qualitygate --with-decryption --query "Parameter.Value" --out text) + - datadog-ci gate evaluate + except: + - main + - '[0-9].[0-9]*' + - 'mq-working-branch**' \ No newline at end of file From ac24ade35253f04947c20f8996842dd0aa5983ff Mon Sep 17 00:00:00 2001 From: ncybul <124532568+ncybul@users.noreply.github.com> Date: Thu, 12 Dec 2024 16:21:23 -0500 Subject: [PATCH 26/78] chore(docs): add vertexai docs (#11713) ## Checklist - [x] PR author has checked that all the criteria below are met - The PR description includes an overview of the change - The PR description articulates the motivation for the change - The change includes tests OR the PR description describes a testing strategy - The PR description notes risks associated with the change, if any - Newly-added code is easy to change - The change follows the [library release note guidelines](https://ddtrace.readthedocs.io/en/stable/releasenotes.html) - The change includes or references documentation updates if necessary - Backport labels are set (if [applicable](https://ddtrace.readthedocs.io/en/latest/contributing.html#backporting)) ## Reviewer Checklist - [x] Reviewer has checked that all the criteria below are met - Title is accurate - All changes are related to the pull request's stated goal - Avoids breaking [API](https://ddtrace.readthedocs.io/en/stable/versioning.html#interfaces) changes - Testing strategy adequately addresses listed risks - Newly-added code is easy to change - Release note makes sense to a user of the library - If necessary, author has acknowledged and discussed the performance implications of this PR as reported in the benchmarks PR comment - Backport labels are set in a manner that is consistent with the [release branch maintenance policy](https://ddtrace.readthedocs.io/en/latest/contributing.html#backporting) --- docs/integrations.rst | 7 +++++++ docs/spelling_wordlist.txt | 1 + 2 files changed, 8 insertions(+) diff --git a/docs/integrations.rst b/docs/integrations.rst index d07fbe33e45..04a94007626 100644 --- a/docs/integrations.rst +++ b/docs/integrations.rst @@ -478,6 +478,13 @@ urllib3 .. automodule:: ddtrace.contrib.urllib3 +.. _vertexai: + +vertexai +^^^^^^^^^^^^^^^^^^^ +.. automodule:: ddtrace.contrib.vertexai + + .. _vertica: Vertica diff --git a/docs/spelling_wordlist.txt b/docs/spelling_wordlist.txt index d3c185a9360..6f0bb1afa71 100644 --- a/docs/spelling_wordlist.txt +++ b/docs/spelling_wordlist.txt @@ -266,6 +266,7 @@ username uvicorn vendored versioned +vertexai vertica w3c websocket From a37aa20ed22f489ce2d433f997927150b7a2a890 Mon Sep 17 00:00:00 2001 From: Yun Kim <35776586+Yun-Kim@users.noreply.github.com> Date: Thu, 12 Dec 2024 17:04:48 -0500 Subject: [PATCH 27/78] fix(llmobs): do not ignore global patch env vars (#11662) Fixes #11639. This PR fixes an issue where `LLMObs.enable()` ignored/overrode global patch env vars including `DD_TRACE__ENABLED` and `DD_PATCH_MODULES`. ## Checklist - [x] PR author has checked that all the criteria below are met - The PR description includes an overview of the change - The PR description articulates the motivation for the change - The change includes tests OR the PR description describes a testing strategy - The PR description notes risks associated with the change, if any - Newly-added code is easy to change - The change follows the [library release note guidelines](https://ddtrace.readthedocs.io/en/stable/releasenotes.html) - The change includes or references documentation updates if necessary - Backport labels are set (if [applicable](https://ddtrace.readthedocs.io/en/latest/contributing.html#backporting)) ## Reviewer Checklist - [x] Reviewer has checked that all the criteria below are met - Title is accurate - All changes are related to the pull request's stated goal - Avoids breaking [API](https://ddtrace.readthedocs.io/en/stable/versioning.html#interfaces) changes - Testing strategy adequately addresses listed risks - Newly-added code is easy to change - Release note makes sense to a user of the library - If necessary, author has acknowledged and discussed the performance implications of this PR as reported in the benchmarks PR comment - Backport labels are set in a manner that is consistent with the [release branch maintenance policy](https://ddtrace.readthedocs.io/en/latest/contributing.html#backporting) --- ddtrace/llmobs/_llmobs.py | 17 +++++- ...global-patch-configs-a2adc4803f55b142.yaml | 5 ++ tests/llmobs/test_llmobs_service.py | 60 +++++++++++++++++++ 3 files changed, 80 insertions(+), 2 deletions(-) create mode 100644 releasenotes/notes/fix-llmobs-do-not-ignore-global-patch-configs-a2adc4803f55b142.yaml diff --git a/ddtrace/llmobs/_llmobs.py b/ddtrace/llmobs/_llmobs.py index a3ac9501319..808cee89e0f 100644 --- a/ddtrace/llmobs/_llmobs.py +++ b/ddtrace/llmobs/_llmobs.py @@ -23,6 +23,7 @@ from ddtrace.internal.telemetry import telemetry_writer from ddtrace.internal.telemetry.constants import TELEMETRY_APM_PRODUCT from ddtrace.internal.utils.formats import asbool +from ddtrace.internal.utils.formats import parse_tags_str from ddtrace.llmobs._constants import ANNOTATIONS_CONTEXT_ID from ddtrace.llmobs._constants import INPUT_DOCUMENTS from ddtrace.llmobs._constants import INPUT_MESSAGES @@ -347,8 +348,20 @@ def flush(cls) -> None: @staticmethod def _patch_integrations() -> None: - """Patch LLM integrations.""" - patch(**{integration: True for integration in SUPPORTED_LLMOBS_INTEGRATIONS.values()}) # type: ignore[arg-type] + """ + Patch LLM integrations. Ensure that we do not ignore DD_TRACE__ENABLED or DD_PATCH_MODULES settings. + """ + integrations_to_patch = {integration: True for integration in SUPPORTED_LLMOBS_INTEGRATIONS.values()} + for module, _ in integrations_to_patch.items(): + env_var = "DD_TRACE_%s_ENABLED" % module.upper() + if env_var in os.environ: + integrations_to_patch[module] = asbool(os.environ[env_var]) + dd_patch_modules = os.getenv("DD_PATCH_MODULES") + dd_patch_modules_to_str = parse_tags_str(dd_patch_modules) + integrations_to_patch.update( + {k: asbool(v) for k, v in dd_patch_modules_to_str.items() if k in SUPPORTED_LLMOBS_INTEGRATIONS.values()} + ) + patch(**integrations_to_patch) # type: ignore[arg-type] log.debug("Patched LLM integrations: %s", list(SUPPORTED_LLMOBS_INTEGRATIONS.values())) @classmethod diff --git a/releasenotes/notes/fix-llmobs-do-not-ignore-global-patch-configs-a2adc4803f55b142.yaml b/releasenotes/notes/fix-llmobs-do-not-ignore-global-patch-configs-a2adc4803f55b142.yaml new file mode 100644 index 00000000000..b080742d74a --- /dev/null +++ b/releasenotes/notes/fix-llmobs-do-not-ignore-global-patch-configs-a2adc4803f55b142.yaml @@ -0,0 +1,5 @@ +--- +fixes: + - | + LLM Observability: This fix resolves an issue where ``LLMObs.enable()`` ignored global patch configurations, specifically + the ``DD_TRACE__ENABLED`` and ``DD_PATCH_MODULES`` environment variables. diff --git a/tests/llmobs/test_llmobs_service.py b/tests/llmobs/test_llmobs_service.py index 160023f5df7..5808ed01513 100644 --- a/tests/llmobs/test_llmobs_service.py +++ b/tests/llmobs/test_llmobs_service.py @@ -31,6 +31,7 @@ from ddtrace.llmobs._constants import SPAN_KIND from ddtrace.llmobs._constants import SPAN_START_WHILE_DISABLED_WARNING from ddtrace.llmobs._constants import TAGS +from ddtrace.llmobs._llmobs import SUPPORTED_LLMOBS_INTEGRATIONS from ddtrace.llmobs._llmobs import LLMObsTraceProcessor from ddtrace.llmobs.utils import Prompt from tests.llmobs._utils import _expected_llmobs_eval_metric_event @@ -144,6 +145,65 @@ def test_service_enable_already_enabled(mock_logs): mock_logs.debug.assert_has_calls([mock.call("%s already enabled", "LLMObs")]) +@mock.patch("ddtrace.llmobs._llmobs.patch") +def test_service_enable_patches_llmobs_integrations(mock_tracer_patch): + with override_global_config(dict(_dd_api_key="", _llmobs_ml_app="")): + llmobs_service.enable() + mock_tracer_patch.assert_called_once() + kwargs = mock_tracer_patch.call_args[1] + for module in SUPPORTED_LLMOBS_INTEGRATIONS.values(): + assert kwargs[module] is True + llmobs_service.disable() + + +@mock.patch("ddtrace.llmobs._llmobs.patch") +def test_service_enable_does_not_override_global_patch_modules(mock_tracer_patch, monkeypatch): + monkeypatch.setenv("DD_PATCH_MODULES", "openai:false") + with override_global_config(dict(_dd_api_key="", _llmobs_ml_app="")): + llmobs_service.enable() + mock_tracer_patch.assert_called_once() + kwargs = mock_tracer_patch.call_args[1] + for module in SUPPORTED_LLMOBS_INTEGRATIONS.values(): + if module == "openai": + assert kwargs[module] is False + continue + assert kwargs[module] is True + llmobs_service.disable() + + +@mock.patch("ddtrace.llmobs._llmobs.patch") +def test_service_enable_does_not_override_integration_enabled_env_vars(mock_tracer_patch, monkeypatch): + monkeypatch.setenv("DD_TRACE_OPENAI_ENABLED", "false") + with override_global_config(dict(_dd_api_key="", _llmobs_ml_app="")): + llmobs_service.enable() + mock_tracer_patch.assert_called_once() + kwargs = mock_tracer_patch.call_args[1] + for module in SUPPORTED_LLMOBS_INTEGRATIONS.values(): + if module == "openai": + assert kwargs[module] is False + continue + assert kwargs[module] is True + llmobs_service.disable() + + +@mock.patch("ddtrace.llmobs._llmobs.patch") +def test_service_enable_does_not_override_global_patch_config(mock_tracer_patch, monkeypatch): + """Test that _patch_integrations() ensures `DD_PATCH_MODULES` overrides `DD_TRACE__ENABLED`.""" + monkeypatch.setenv("DD_TRACE_OPENAI_ENABLED", "true") + monkeypatch.setenv("DD_TRACE_ANTHROPIC_ENABLED", "false") + monkeypatch.setenv("DD_PATCH_MODULES", "openai:false") + with override_global_config(dict(_dd_api_key="", _llmobs_ml_app="")): + llmobs_service.enable() + mock_tracer_patch.assert_called_once() + kwargs = mock_tracer_patch.call_args[1] + for module in SUPPORTED_LLMOBS_INTEGRATIONS.values(): + if module in ("openai", "anthropic"): + assert kwargs[module] is False + continue + assert kwargs[module] is True + llmobs_service.disable() + + def test_start_span_while_disabled_logs_warning(LLMObs, mock_logs): LLMObs.disable() _ = LLMObs.llm(model_name="test_model", name="test_llm_call", model_provider="test_provider") From e4742671776a9e09a29846a976c5804b08d252b1 Mon Sep 17 00:00:00 2001 From: Yun Kim <35776586+Yun-Kim@users.noreply.github.com> Date: Thu, 12 Dec 2024 18:34:35 -0500 Subject: [PATCH 28/78] chore(llmobs): use span store instead of temporary tags (#11543) This PR performs some cleanup refactors on the LLM Obs SDK and associated integrations. Specifically regarding the data stored, which includes LLMObs span metadata/metrics/tags/IO: - Stop storing these as temporary span tags and instead use the span store field, which allows arbitrary key value pairs but is not submitted to Datadog. This removes the potential for temporary tags to be not extracted and still submitted as a APM span tag. - Stop attempting `safe_json()` (i.e. `json.dumps()`) to store the above data, which is an expensive operation that adds up with the number of separate calls, and instead just store the raw values of the stored objects in the store field, and only call `safe_json()` "once" at payload encoding time. Things to look out for: - Previously we were calling `safe_json()` every time to store data as string span tags. One danger includes errors during span processing due to wrong types (expect string, likely receive a dictionary/object from the span store field) - By avoiding any jsonify processing before encode time, a small edge case appeared from the LLMObs SDK decorator function which auto-annotates non-LLM spans with input function argument maps. In Python 3.8, the `bind_partial().arguments` call used to extract the function arguments returns an OrderedDict (otherwise returns a regular Dict() in Python >= 3.9, which broke some tests as we were simply casting to a string when storing the input/output value). I added a fix to cast the `bind_partial().arguments` object to a dict to avoid this issue coming up. ## Next Steps This is a great first step, but there are still tons of performance improvements we can make to our encoding/writing. The most notable is that we call `json.dumps()` on span events more than once (to calculate the payload size before adding to the buffer). ## Checklist - [x] PR author has checked that all the criteria below are met - The PR description includes an overview of the change - The PR description articulates the motivation for the change - The change includes tests OR the PR description describes a testing strategy - The PR description notes risks associated with the change, if any - Newly-added code is easy to change - The change follows the [library release note guidelines](https://ddtrace.readthedocs.io/en/stable/releasenotes.html) - The change includes or references documentation updates if necessary - Backport labels are set (if [applicable](https://ddtrace.readthedocs.io/en/latest/contributing.html#backporting)) ## Reviewer Checklist - [x] Reviewer has checked that all the criteria below are met - Title is accurate - All changes are related to the pull request's stated goal - Avoids breaking [API](https://ddtrace.readthedocs.io/en/stable/versioning.html#interfaces) changes - Testing strategy adequately addresses listed risks - Newly-added code is easy to change - Release note makes sense to a user of the library - If necessary, author has acknowledged and discussed the performance implications of this PR as reported in the benchmarks PR comment - Backport labels are set in a manner that is consistent with the [release branch maintenance policy](https://ddtrace.readthedocs.io/en/latest/contributing.html#backporting) --- ddtrace/llmobs/_integrations/anthropic.py | 27 +- ddtrace/llmobs/_integrations/bedrock.py | 31 +- ddtrace/llmobs/_integrations/gemini.py | 27 +- ddtrace/llmobs/_integrations/langchain.py | 132 +++++---- ddtrace/llmobs/_integrations/openai.py | 57 ++-- ddtrace/llmobs/_integrations/vertexai.py | 30 +- ddtrace/llmobs/_llmobs.py | 49 ++-- ddtrace/llmobs/_trace_processor.py | 74 ++--- ddtrace/llmobs/_utils.py | 12 +- ddtrace/llmobs/_writer.py | 14 +- ddtrace/llmobs/decorators.py | 12 +- .../anthropic/test_anthropic_llmobs.py | 32 --- tests/contrib/openai/test_openai_llmobs.py | 32 --- tests/llmobs/_utils.py | 52 +++- tests/llmobs/test_llmobs_decorators.py | 8 +- tests/llmobs/test_llmobs_service.py | 271 ++++++------------ .../test_llmobs_span_agentless_writer.py | 28 +- tests/llmobs/test_llmobs_span_encoder.py | 72 +++++ tests/llmobs/test_llmobs_trace_processor.py | 98 ++++--- 19 files changed, 495 insertions(+), 563 deletions(-) create mode 100644 tests/llmobs/test_llmobs_span_encoder.py diff --git a/ddtrace/llmobs/_integrations/anthropic.py b/ddtrace/llmobs/_integrations/anthropic.py index 0747d68e77b..dfb39c0f7e9 100644 --- a/ddtrace/llmobs/_integrations/anthropic.py +++ b/ddtrace/llmobs/_integrations/anthropic.py @@ -19,7 +19,6 @@ from ddtrace.llmobs._constants import TOTAL_TOKENS_METRIC_KEY from ddtrace.llmobs._integrations.base import BaseLLMIntegration from ddtrace.llmobs._utils import _get_attr -from ddtrace.llmobs._utils import safe_json log = get_logger(__name__) @@ -66,21 +65,21 @@ def _llmobs_set_tags( system_prompt = kwargs.get("system") input_messages = self._extract_input_message(messages, system_prompt) - span.set_tag_str(SPAN_KIND, "llm") - span.set_tag_str(MODEL_NAME, span.get_tag("anthropic.request.model") or "") - span.set_tag_str(INPUT_MESSAGES, safe_json(input_messages)) - span.set_tag_str(METADATA, safe_json(parameters)) - span.set_tag_str(MODEL_PROVIDER, "anthropic") - - if span.error or response is None: - span.set_tag_str(OUTPUT_MESSAGES, json.dumps([{"content": ""}])) - else: + output_messages = [{"content": ""}] + if not span.error and response is not None: output_messages = self._extract_output_message(response) - span.set_tag_str(OUTPUT_MESSAGES, safe_json(output_messages)) - usage = self._get_llmobs_metrics_tags(span) - if usage: - span.set_tag_str(METRICS, safe_json(usage)) + span._set_ctx_items( + { + SPAN_KIND: "llm", + MODEL_NAME: span.get_tag("anthropic.request.model") or "", + MODEL_PROVIDER: "anthropic", + INPUT_MESSAGES: input_messages, + METADATA: parameters, + OUTPUT_MESSAGES: output_messages, + METRICS: self._get_llmobs_metrics_tags(span), + } + ) def _extract_input_message(self, messages, system_prompt=None): """Extract input messages from the stored prompt. diff --git a/ddtrace/llmobs/_integrations/bedrock.py b/ddtrace/llmobs/_integrations/bedrock.py index 78798ae4f98..bf8b020ebea 100644 --- a/ddtrace/llmobs/_integrations/bedrock.py +++ b/ddtrace/llmobs/_integrations/bedrock.py @@ -19,7 +19,6 @@ from ddtrace.llmobs._constants import TOTAL_TOKENS_METRIC_KEY from ddtrace.llmobs._integrations import BaseLLMIntegration from ddtrace.llmobs._utils import _get_llmobs_parent_id -from ddtrace.llmobs._utils import safe_json log = get_logger(__name__) @@ -37,9 +36,9 @@ def _llmobs_set_tags( operation: str = "", ) -> None: """Extract prompt/response tags from a completion and set them as temporary "_ml_obs.*" tags.""" - if span.get_tag(PROPAGATED_PARENT_ID_KEY) is None: + if span._get_ctx_item(PROPAGATED_PARENT_ID_KEY) is None: parent_id = _get_llmobs_parent_id(span) or "undefined" - span.set_tag(PARENT_ID_KEY, parent_id) + span._set_ctx_item(PARENT_ID_KEY, parent_id) parameters = {} if span.get_tag("bedrock.request.temperature"): parameters["temperature"] = float(span.get_tag("bedrock.request.temperature") or 0.0) @@ -48,20 +47,20 @@ def _llmobs_set_tags( prompt = kwargs.get("prompt", "") input_messages = self._extract_input_message(prompt) - - span.set_tag_str(SPAN_KIND, "llm") - span.set_tag_str(MODEL_NAME, span.get_tag("bedrock.request.model") or "") - span.set_tag_str(MODEL_PROVIDER, span.get_tag("bedrock.request.model_provider") or "") - - span.set_tag_str(INPUT_MESSAGES, safe_json(input_messages)) - span.set_tag_str(METADATA, safe_json(parameters)) - if span.error or response is None: - span.set_tag_str(OUTPUT_MESSAGES, safe_json([{"content": ""}])) - else: + output_messages = [{"content": ""}] + if not span.error and response is not None: output_messages = self._extract_output_message(response) - span.set_tag_str(OUTPUT_MESSAGES, safe_json(output_messages)) - metrics = self._llmobs_metrics(span, response) - span.set_tag_str(METRICS, safe_json(metrics)) + span._set_ctx_items( + { + SPAN_KIND: "llm", + MODEL_NAME: span.get_tag("bedrock.request.model") or "", + MODEL_PROVIDER: span.get_tag("bedrock.request.model_provider") or "", + INPUT_MESSAGES: input_messages, + METADATA: parameters, + METRICS: self._llmobs_metrics(span, response), + OUTPUT_MESSAGES: output_messages, + } + ) @staticmethod def _llmobs_metrics(span: Span, response: Optional[Dict[str, Any]]) -> Dict[str, Any]: diff --git a/ddtrace/llmobs/_integrations/gemini.py b/ddtrace/llmobs/_integrations/gemini.py index f1a4730812f..491187475f0 100644 --- a/ddtrace/llmobs/_integrations/gemini.py +++ b/ddtrace/llmobs/_integrations/gemini.py @@ -19,7 +19,6 @@ from ddtrace.llmobs._integrations.utils import get_system_instructions_from_google_model from ddtrace.llmobs._integrations.utils import llmobs_get_metadata_google from ddtrace.llmobs._utils import _get_attr -from ddtrace.llmobs._utils import safe_json class GeminiIntegration(BaseLLMIntegration): @@ -41,28 +40,28 @@ def _llmobs_set_tags( response: Optional[Any] = None, operation: str = "", ) -> None: - span.set_tag_str(SPAN_KIND, "llm") - span.set_tag_str(MODEL_NAME, span.get_tag("google_generativeai.request.model") or "") - span.set_tag_str(MODEL_PROVIDER, span.get_tag("google_generativeai.request.provider") or "") - instance = kwargs.get("instance", None) metadata = llmobs_get_metadata_google(kwargs, instance) - span.set_tag_str(METADATA, safe_json(metadata)) system_instruction = get_system_instructions_from_google_model(instance) input_contents = get_argument_value(args, kwargs, 0, "contents") input_messages = self._extract_input_message(input_contents, system_instruction) - span.set_tag_str(INPUT_MESSAGES, safe_json(input_messages)) - if span.error or response is None: - span.set_tag_str(OUTPUT_MESSAGES, safe_json([{"content": ""}])) - else: + output_messages = [{"content": ""}] + if not span.error and response is not None: output_messages = self._extract_output_message(response) - span.set_tag_str(OUTPUT_MESSAGES, safe_json(output_messages)) - usage = get_llmobs_metrics_tags_google("google_generativeai", span) - if usage: - span.set_tag_str(METRICS, safe_json(usage)) + span._set_ctx_items( + { + SPAN_KIND: "llm", + MODEL_NAME: span.get_tag("google_generativeai.request.model") or "", + MODEL_PROVIDER: span.get_tag("google_generativeai.request.provider") or "", + METADATA: metadata, + INPUT_MESSAGES: input_messages, + OUTPUT_MESSAGES: output_messages, + METRICS: get_llmobs_metrics_tags_google("google_generativeai", span), + } + ) def _extract_input_message(self, contents, system_instruction=None): messages = [] diff --git a/ddtrace/llmobs/_integrations/langchain.py b/ddtrace/llmobs/_integrations/langchain.py index 2128458253d..1fce3d11804 100644 --- a/ddtrace/llmobs/_integrations/langchain.py +++ b/ddtrace/llmobs/_integrations/langchain.py @@ -28,7 +28,6 @@ from ddtrace.llmobs._constants import SPAN_KIND from ddtrace.llmobs._constants import TOTAL_TOKENS_METRIC_KEY from ddtrace.llmobs._integrations.base import BaseLLMIntegration -from ddtrace.llmobs._utils import safe_json from ddtrace.llmobs.utils import Document @@ -130,15 +129,11 @@ def _llmobs_set_metadata(self, span: Span, model_provider: Optional[str] = None) if max_tokens is not None and max_tokens != "None": metadata["max_tokens"] = int(max_tokens) if metadata: - span.set_tag_str(METADATA, safe_json(metadata)) + span._set_ctx_item(METADATA, metadata) def _llmobs_set_tags_from_llm( self, span: Span, args: List[Any], kwargs: Dict[str, Any], completions: Any, is_workflow: bool = False ) -> None: - span.set_tag_str(SPAN_KIND, "workflow" if is_workflow else "llm") - span.set_tag_str(MODEL_NAME, span.get_tag(MODEL) or "") - span.set_tag_str(MODEL_PROVIDER, span.get_tag(PROVIDER) or "") - input_tag_key = INPUT_VALUE if is_workflow else INPUT_MESSAGES output_tag_key = OUTPUT_VALUE if is_workflow else OUTPUT_MESSAGES stream = span.get_tag("langchain.request.stream") @@ -146,21 +141,28 @@ def _llmobs_set_tags_from_llm( prompts = get_argument_value(args, kwargs, 0, "input" if stream else "prompts") if isinstance(prompts, str) or not isinstance(prompts, list): prompts = [prompts] - if stream: # chat and llm take the same input types for streamed calls - span.set_tag_str(input_tag_key, safe_json(self._handle_stream_input_messages(prompts))) + input_messages = self._handle_stream_input_messages(prompts) else: - span.set_tag_str(input_tag_key, safe_json([{"content": str(prompt)} for prompt in prompts])) + input_messages = [{"content": str(prompt)} for prompt in prompts] + + span._set_ctx_items( + { + SPAN_KIND: "workflow" if is_workflow else "llm", + MODEL_NAME: span.get_tag(MODEL) or "", + MODEL_PROVIDER: span.get_tag(PROVIDER) or "", + input_tag_key: input_messages, + } + ) if span.error: - span.set_tag_str(output_tag_key, safe_json([{"content": ""}])) + span._set_ctx_item(output_tag_key, [{"content": ""}]) return if stream: message_content = [{"content": completions}] # single completion for streams else: message_content = [{"content": completion[0].text} for completion in completions.generations] - if not is_workflow: input_tokens, output_tokens, total_tokens = self.check_token_usage_chat_or_llm_result(completions) if total_tokens > 0: @@ -169,8 +171,8 @@ def _llmobs_set_tags_from_llm( OUTPUT_TOKENS_METRIC_KEY: output_tokens, TOTAL_TOKENS_METRIC_KEY: total_tokens, } - span.set_tag_str(METRICS, safe_json(metrics)) - span.set_tag_str(output_tag_key, safe_json(message_content)) + span._set_ctx_item(METRICS, metrics) + span._set_ctx_item(output_tag_key, message_content) def _llmobs_set_tags_from_chat_model( self, @@ -180,10 +182,13 @@ def _llmobs_set_tags_from_chat_model( chat_completions: Any, is_workflow: bool = False, ) -> None: - span.set_tag_str(SPAN_KIND, "workflow" if is_workflow else "llm") - span.set_tag_str(MODEL_NAME, span.get_tag(MODEL) or "") - span.set_tag_str(MODEL_PROVIDER, span.get_tag(PROVIDER) or "") - + span._set_ctx_items( + { + SPAN_KIND: "workflow" if is_workflow else "llm", + MODEL_NAME: span.get_tag(MODEL) or "", + MODEL_PROVIDER: span.get_tag(PROVIDER) or "", + } + ) input_tag_key = INPUT_VALUE if is_workflow else INPUT_MESSAGES output_tag_key = OUTPUT_VALUE if is_workflow else OUTPUT_MESSAGES stream = span.get_tag("langchain.request.stream") @@ -203,17 +208,17 @@ def _llmobs_set_tags_from_chat_model( ) role = getattr(message, "role", ROLE_MAPPING.get(message.type, "")) input_messages.append({"content": str(content), "role": str(role)}) - span.set_tag_str(input_tag_key, safe_json(input_messages)) + span._set_ctx_item(input_tag_key, input_messages) if span.error: - span.set_tag_str(output_tag_key, json.dumps([{"content": ""}])) + span._set_ctx_item(output_tag_key, [{"content": ""}]) return output_messages = [] if stream: content = chat_completions.content role = chat_completions.__class__.__name__.replace("MessageChunk", "").lower() # AIMessageChunk --> ai - span.set_tag_str(output_tag_key, safe_json([{"content": content, "role": ROLE_MAPPING.get(role, "")}])) + span._set_ctx_item(output_tag_key, [{"content": content, "role": ROLE_MAPPING.get(role, "")}]) return input_tokens, output_tokens, total_tokens = 0, 0, 0 @@ -249,7 +254,7 @@ def _llmobs_set_tags_from_chat_model( output_tokens = sum(v["output_tokens"] for v in tokens_per_choice_run_id.values()) total_tokens = sum(v["total_tokens"] for v in tokens_per_choice_run_id.values()) - span.set_tag_str(output_tag_key, safe_json(output_messages)) + span._set_ctx_item(output_tag_key, output_messages) if not is_workflow and total_tokens > 0: metrics = { @@ -257,7 +262,7 @@ def _llmobs_set_tags_from_chat_model( OUTPUT_TOKENS_METRIC_KEY: output_tokens, TOTAL_TOKENS_METRIC_KEY: total_tokens, } - span.set_tag_str(METRICS, safe_json(metrics)) + span._set_ctx_item(METRICS, metrics) def _extract_tool_calls(self, chat_completion_msg: Any) -> List[Dict[str, Any]]: """Extracts tool calls from a langchain chat completion.""" @@ -301,20 +306,17 @@ def _handle_stream_input_messages(self, inputs): return input_messages def _llmobs_set_meta_tags_from_chain(self, span: Span, args, kwargs, outputs: Any) -> None: - span.set_tag_str(SPAN_KIND, "workflow") - stream = span.get_tag("langchain.request.stream") - if stream: + if span.get_tag("langchain.request.stream"): inputs = get_argument_value(args, kwargs, 0, "input") else: inputs = kwargs + formatted_inputs = "" if inputs is not None: formatted_inputs = self.format_io(inputs) - span.set_tag_str(INPUT_VALUE, safe_json(formatted_inputs)) - if span.error or outputs is None: - span.set_tag_str(OUTPUT_VALUE, "") - return - formatted_outputs = self.format_io(outputs) - span.set_tag_str(OUTPUT_VALUE, safe_json(formatted_outputs)) + formatted_outputs = "" + if not span.error and outputs is not None: + formatted_outputs = self.format_io(outputs) + span._set_ctx_items({SPAN_KIND: "workflow", INPUT_VALUE: formatted_inputs, OUTPUT_VALUE: formatted_outputs}) def _llmobs_set_meta_tags_from_embedding( self, @@ -324,13 +326,15 @@ def _llmobs_set_meta_tags_from_embedding( output_embedding: Union[List[float], List[List[float]], None], is_workflow: bool = False, ) -> None: - span.set_tag_str(SPAN_KIND, "workflow" if is_workflow else "embedding") - span.set_tag_str(MODEL_NAME, span.get_tag(MODEL) or "") - span.set_tag_str(MODEL_PROVIDER, span.get_tag(PROVIDER) or "") - + span._set_ctx_items( + { + SPAN_KIND: "workflow" if is_workflow else "embedding", + MODEL_NAME: span.get_tag(MODEL) or "", + MODEL_PROVIDER: span.get_tag(PROVIDER) or "", + } + ) input_tag_key = INPUT_VALUE if is_workflow else INPUT_DOCUMENTS output_tag_key = OUTPUT_VALUE - output_values: Any try: @@ -343,16 +347,16 @@ def _llmobs_set_meta_tags_from_embedding( ): if is_workflow: formatted_inputs = self.format_io(input_texts) - span.set_tag_str(input_tag_key, safe_json(formatted_inputs)) + span._set_ctx_item(input_tag_key, formatted_inputs) else: if isinstance(input_texts, str): input_texts = [input_texts] input_documents = [Document(text=str(doc)) for doc in input_texts] - span.set_tag_str(input_tag_key, safe_json(input_documents)) + span._set_ctx_item(input_tag_key, input_documents) except TypeError: log.warning("Failed to serialize embedding input data to JSON") if span.error or output_embedding is None: - span.set_tag_str(output_tag_key, "") + span._set_ctx_item(output_tag_key, "") return try: if isinstance(output_embedding[0], float): @@ -364,7 +368,7 @@ def _llmobs_set_meta_tags_from_embedding( output_values = output_embedding embeddings_count = len(output_embedding) embedding_dim = len(output_values[0]) - span.set_tag_str( + span._set_ctx_item( output_tag_key, "[{} embedding(s) returned with size {}]".format(embeddings_count, embedding_dim), ) @@ -379,19 +383,22 @@ def _llmobs_set_meta_tags_from_similarity_search( output_documents: Union[List[Any], None], is_workflow: bool = False, ) -> None: - span.set_tag_str(SPAN_KIND, "workflow" if is_workflow else "retrieval") - span.set_tag_str(MODEL_NAME, span.get_tag(MODEL) or "") - span.set_tag_str(MODEL_PROVIDER, span.get_tag(PROVIDER) or "") - + span._set_ctx_items( + { + SPAN_KIND: "workflow" if is_workflow else "retrieval", + MODEL_NAME: span.get_tag(MODEL) or "", + MODEL_PROVIDER: span.get_tag(PROVIDER) or "", + } + ) input_query = get_argument_value(args, kwargs, 0, "query") if input_query is not None: formatted_inputs = self.format_io(input_query) - span.set_tag_str(INPUT_VALUE, safe_json(formatted_inputs)) + span._set_ctx_item(INPUT_VALUE, formatted_inputs) if span.error or not output_documents or not isinstance(output_documents, list): - span.set_tag_str(OUTPUT_VALUE, "") + span._set_ctx_item(OUTPUT_VALUE, "") return if is_workflow: - span.set_tag_str(OUTPUT_VALUE, "[{} document(s) retrieved]".format(len(output_documents))) + span._set_ctx_item(OUTPUT_VALUE, "[{} document(s) retrieved]".format(len(output_documents))) return documents = [] for d in output_documents: @@ -400,32 +407,31 @@ def _llmobs_set_meta_tags_from_similarity_search( metadata = getattr(d, "metadata", {}) doc["name"] = metadata.get("name", doc["id"]) documents.append(doc) - span.set_tag_str(OUTPUT_DOCUMENTS, safe_json(self.format_io(documents))) + span._set_ctx_item(OUTPUT_DOCUMENTS, self.format_io(documents)) # we set the value as well to ensure that the UI would display it in case the span was the root - span.set_tag_str(OUTPUT_VALUE, "[{} document(s) retrieved]".format(len(documents))) + span._set_ctx_item(OUTPUT_VALUE, "[{} document(s) retrieved]".format(len(documents))) def _llmobs_set_meta_tags_from_tool(self, span: Span, tool_inputs: Dict[str, Any], tool_output: object) -> None: - if span.get_tag(METADATA): - metadata = json.loads(str(span.get_tag(METADATA))) - else: - metadata = {} - - span.set_tag_str(SPAN_KIND, "tool") + metadata = json.loads(str(span.get_tag(METADATA))) if span.get_tag(METADATA) else {} + formatted_input = "" if tool_inputs is not None: tool_input = tool_inputs.get("input") if tool_inputs.get("config"): metadata["tool_config"] = tool_inputs.get("config") if tool_inputs.get("info"): metadata["tool_info"] = tool_inputs.get("info") - if metadata: - span.set_tag_str(METADATA, safe_json(metadata)) formatted_input = self.format_io(tool_input) - span.set_tag_str(INPUT_VALUE, safe_json(formatted_input)) - if span.error or tool_output is None: - span.set_tag_str(OUTPUT_VALUE, "") - return - formatted_outputs = self.format_io(tool_output) - span.set_tag_str(OUTPUT_VALUE, safe_json(formatted_outputs)) + formatted_outputs = "" + if not span.error and tool_output is not None: + formatted_outputs = self.format_io(tool_output) + span._set_ctx_items( + { + SPAN_KIND: "tool", + METADATA: metadata, + INPUT_VALUE: formatted_input, + OUTPUT_VALUE: formatted_outputs, + } + ) def _set_base_span_tags( # type: ignore[override] self, diff --git a/ddtrace/llmobs/_integrations/openai.py b/ddtrace/llmobs/_integrations/openai.py index 5c9e73eaca7..bd727b1a5a2 100644 --- a/ddtrace/llmobs/_integrations/openai.py +++ b/ddtrace/llmobs/_integrations/openai.py @@ -23,7 +23,6 @@ from ddtrace.llmobs._constants import TOTAL_TOKENS_METRIC_KEY from ddtrace.llmobs._integrations.base import BaseLLMIntegration from ddtrace.llmobs._utils import _get_attr -from ddtrace.llmobs._utils import safe_json from ddtrace.llmobs.utils import Document from ddtrace.pin import Pin @@ -148,19 +147,18 @@ def _llmobs_set_tags( ) -> None: """Sets meta tags and metrics for span events to be sent to LLMObs.""" span_kind = "embedding" if operation == "embedding" else "llm" - span.set_tag_str(SPAN_KIND, span_kind) model_name = span.get_tag("openai.response.model") or span.get_tag("openai.request.model") - span.set_tag_str(MODEL_NAME, model_name or "") model_provider = "azure_openai" if self._is_azure_openai(span) else "openai" - span.set_tag_str(MODEL_PROVIDER, model_provider) if operation == "completion": self._llmobs_set_meta_tags_from_completion(span, kwargs, response) elif operation == "chat": self._llmobs_set_meta_tags_from_chat(span, kwargs, response) elif operation == "embedding": self._llmobs_set_meta_tags_from_embedding(span, kwargs, response) - metrics = self._set_llmobs_metrics_tags(span, response) - span.set_tag_str(METRICS, safe_json(metrics)) + metrics = self._extract_llmobs_metrics_tags(span, response) + span._set_ctx_items( + {SPAN_KIND: span_kind, MODEL_NAME: model_name or "", MODEL_PROVIDER: model_provider, METRICS: metrics} + ) @staticmethod def _llmobs_set_meta_tags_from_completion(span: Span, kwargs: Dict[str, Any], completions: Any) -> None: @@ -168,20 +166,18 @@ def _llmobs_set_meta_tags_from_completion(span: Span, kwargs: Dict[str, Any], co prompt = kwargs.get("prompt", "") if isinstance(prompt, str): prompt = [prompt] - span.set_tag_str(INPUT_MESSAGES, safe_json([{"content": str(p)} for p in prompt])) - parameters = {k: v for k, v in kwargs.items() if k not in ("model", "prompt")} - span.set_tag_str(METADATA, safe_json(parameters)) - - if span.error or not completions: - span.set_tag_str(OUTPUT_MESSAGES, safe_json([{"content": ""}])) - return - if hasattr(completions, "choices"): # non-streaming response - choices = completions.choices - else: # streamed response - choices = completions - messages = [{"content": _get_attr(choice, "text", "")} for choice in choices] - span.set_tag_str(OUTPUT_MESSAGES, safe_json(messages)) + output_messages = [{"content": ""}] + if not span.error and completions: + choices = getattr(completions, "choices", completions) + output_messages = [{"content": _get_attr(choice, "text", "")} for choice in choices] + span._set_ctx_items( + { + INPUT_MESSAGES: [{"content": str(p)} for p in prompt], + METADATA: parameters, + OUTPUT_MESSAGES: output_messages, + } + ) @staticmethod def _llmobs_set_meta_tags_from_chat(span: Span, kwargs: Dict[str, Any], messages: Optional[Any]) -> None: @@ -189,16 +185,14 @@ def _llmobs_set_meta_tags_from_chat(span: Span, kwargs: Dict[str, Any], messages input_messages = [] for m in kwargs.get("messages", []): input_messages.append({"content": str(_get_attr(m, "content", "")), "role": str(_get_attr(m, "role", ""))}) - span.set_tag_str(INPUT_MESSAGES, safe_json(input_messages)) - parameters = {k: v for k, v in kwargs.items() if k not in ("model", "messages", "tools", "functions")} - span.set_tag_str(METADATA, safe_json(parameters)) + span._set_ctx_items({INPUT_MESSAGES: input_messages, METADATA: parameters}) if span.error or not messages: - span.set_tag_str(OUTPUT_MESSAGES, safe_json([{"content": ""}])) + span._set_ctx_item(OUTPUT_MESSAGES, [{"content": ""}]) return - output_messages = [] if isinstance(messages, list): # streamed response + output_messages = [] for streamed_message in messages: message = {"content": streamed_message["content"], "role": streamed_message["role"]} tool_calls = streamed_message.get("tool_calls", []) @@ -213,9 +207,10 @@ def _llmobs_set_meta_tags_from_chat(span: Span, kwargs: Dict[str, Any], messages for tool_call in tool_calls ] output_messages.append(message) - span.set_tag_str(OUTPUT_MESSAGES, safe_json(output_messages)) + span._set_ctx_item(OUTPUT_MESSAGES, output_messages) return choices = _get_attr(messages, "choices", []) + output_messages = [] for idx, choice in enumerate(choices): tool_calls_info = [] choice_message = _get_attr(choice, "message", {}) @@ -241,7 +236,7 @@ def _llmobs_set_meta_tags_from_chat(span: Span, kwargs: Dict[str, Any], messages output_messages.append({"content": content, "role": role, "tool_calls": tool_calls_info}) continue output_messages.append({"content": content, "role": role}) - span.set_tag_str(OUTPUT_MESSAGES, safe_json(output_messages)) + span._set_ctx_item(OUTPUT_MESSAGES, output_messages) @staticmethod def _llmobs_set_meta_tags_from_embedding(span: Span, kwargs: Dict[str, Any], resp: Any) -> None: @@ -250,7 +245,6 @@ def _llmobs_set_meta_tags_from_embedding(span: Span, kwargs: Dict[str, Any], res metadata = {"encoding_format": encoding_format} if kwargs.get("dimensions"): metadata["dimensions"] = kwargs.get("dimensions") - span.set_tag_str(METADATA, safe_json(metadata)) embedding_inputs = kwargs.get("input", "") if isinstance(embedding_inputs, str) or isinstance(embedding_inputs[0], int): @@ -258,20 +252,19 @@ def _llmobs_set_meta_tags_from_embedding(span: Span, kwargs: Dict[str, Any], res input_documents = [] for doc in embedding_inputs: input_documents.append(Document(text=str(doc))) - span.set_tag_str(INPUT_DOCUMENTS, safe_json(input_documents)) - + span._set_ctx_items({METADATA: metadata, INPUT_DOCUMENTS: input_documents}) if span.error: return if encoding_format == "float": embedding_dim = len(resp.data[0].embedding) - span.set_tag_str( + span._set_ctx_item( OUTPUT_VALUE, "[{} embedding(s) returned with size {}]".format(len(resp.data), embedding_dim) ) return - span.set_tag_str(OUTPUT_VALUE, "[{} embedding(s) returned]".format(len(resp.data))) + span._set_ctx_item(OUTPUT_VALUE, "[{} embedding(s) returned]".format(len(resp.data))) @staticmethod - def _set_llmobs_metrics_tags(span: Span, resp: Any) -> Dict[str, Any]: + def _extract_llmobs_metrics_tags(span: Span, resp: Any) -> Dict[str, Any]: """Extract metrics from a chat/completion and set them as a temporary "_ml_obs.metrics" tag.""" token_usage = _get_attr(resp, "usage", None) if token_usage is not None: diff --git a/ddtrace/llmobs/_integrations/vertexai.py b/ddtrace/llmobs/_integrations/vertexai.py index 69fdc7eb665..4019268e0c4 100644 --- a/ddtrace/llmobs/_integrations/vertexai.py +++ b/ddtrace/llmobs/_integrations/vertexai.py @@ -19,7 +19,6 @@ from ddtrace.llmobs._integrations.utils import get_system_instructions_from_google_model from ddtrace.llmobs._integrations.utils import llmobs_get_metadata_google from ddtrace.llmobs._utils import _get_attr -from ddtrace.llmobs._utils import safe_json class VertexAIIntegration(BaseLLMIntegration): @@ -41,30 +40,29 @@ def _llmobs_set_tags( response: Optional[Any] = None, operation: str = "", ) -> None: - span.set_tag_str(SPAN_KIND, "llm") - span.set_tag_str(MODEL_NAME, span.get_tag("vertexai.request.model") or "") - span.set_tag_str(MODEL_PROVIDER, span.get_tag("vertexai.request.provider") or "") - instance = kwargs.get("instance", None) history = kwargs.get("history", []) metadata = llmobs_get_metadata_google(kwargs, instance) - span.set_tag_str(METADATA, safe_json(metadata)) system_instruction = get_system_instructions_from_google_model(instance) input_contents = get_argument_value(args, kwargs, 0, "contents") input_messages = self._extract_input_message(input_contents, history, system_instruction) - span.set_tag_str(INPUT_MESSAGES, safe_json(input_messages)) - - if span.error or response is None: - span.set_tag_str(OUTPUT_MESSAGES, safe_json([{"content": ""}])) - return - output_messages = self._extract_output_message(response) - span.set_tag_str(OUTPUT_MESSAGES, safe_json(output_messages)) + output_messages = [{"content": ""}] + if not span.error and response is not None: + output_messages = self._extract_output_message(response) - usage = get_llmobs_metrics_tags_google("vertexai", span) - if usage: - span.set_tag_str(METRICS, safe_json(usage)) + span._set_ctx_items( + { + SPAN_KIND: "llm", + MODEL_NAME: span.get_tag("vertexai.request.model") or "", + MODEL_PROVIDER: span.get_tag("vertexai.request.provider") or "", + METADATA: metadata, + INPUT_MESSAGES: input_messages, + OUTPUT_MESSAGES: output_messages, + METRICS: get_llmobs_metrics_tags_google("vertexai", span), + } + ) def _extract_input_message(self, contents, history, system_instruction=None): from vertexai.generative_models._generative_models import Part diff --git a/ddtrace/llmobs/_llmobs.py b/ddtrace/llmobs/_llmobs.py index 808cee89e0f..867edbdca4f 100644 --- a/ddtrace/llmobs/_llmobs.py +++ b/ddtrace/llmobs/_llmobs.py @@ -399,23 +399,23 @@ def _start_span( if name is None: name = operation_kind span = self.tracer.trace(name, resource=operation_kind, span_type=SpanTypes.LLM) - span.set_tag_str(SPAN_KIND, operation_kind) + span._set_ctx_item(SPAN_KIND, operation_kind) if model_name is not None: - span.set_tag_str(MODEL_NAME, model_name) + span._set_ctx_item(MODEL_NAME, model_name) if model_provider is not None: - span.set_tag_str(MODEL_PROVIDER, model_provider) + span._set_ctx_item(MODEL_PROVIDER, model_provider) session_id = session_id if session_id is not None else _get_session_id(span) if session_id is not None: - span.set_tag_str(SESSION_ID, session_id) + span._set_ctx_item(SESSION_ID, session_id) if ml_app is None: ml_app = _get_ml_app(span) - span.set_tag_str(ML_APP, ml_app) - if span.get_tag(PROPAGATED_PARENT_ID_KEY) is None: + span._set_ctx_item(ML_APP, ml_app) + if span._get_ctx_item(PROPAGATED_PARENT_ID_KEY) is None: # For non-distributed traces or spans in the first service of a distributed trace, # The LLMObs parent ID tag is not set at span start time. We need to manually set the parent ID tag now # in these cases to avoid conflicting with the later propagated tags. parent_id = _get_llmobs_parent_id(span) or "undefined" - span.set_tag_str(PARENT_ID_KEY, str(parent_id)) + span._set_ctx_item(PARENT_ID_KEY, str(parent_id)) return span @classmethod @@ -638,7 +638,7 @@ def annotate( cls._tag_metrics(span, metrics) if tags is not None: cls._tag_span_tags(span, tags) - span_kind = span.get_tag(SPAN_KIND) + span_kind = span._get_ctx_item(SPAN_KIND) if parameters is not None: log.warning("Setting parameters is deprecated, please set parameters and other metadata as tags instead.") cls._tag_params(span, parameters) @@ -664,7 +664,7 @@ def _tag_prompt(span, prompt: dict) -> None: """Tags a given LLMObs span with a prompt""" try: validated_prompt = validate_prompt(prompt) - span.set_tag_str(INPUT_PROMPT, safe_json(validated_prompt)) + span._set_ctx_item(INPUT_PROMPT, validated_prompt) except TypeError: log.warning("Failed to validate prompt with error: ", exc_info=True) return @@ -677,7 +677,7 @@ def _tag_params(span: Span, params: Dict[str, Any]) -> None: if not isinstance(params, dict): log.warning("parameters must be a dictionary of key-value pairs.") return - span.set_tag_str(INPUT_PARAMETERS, safe_json(params)) + span._set_ctx_item(INPUT_PARAMETERS, params) @classmethod def _tag_llm_io(cls, span, input_messages=None, output_messages=None): @@ -689,7 +689,7 @@ def _tag_llm_io(cls, span, input_messages=None, output_messages=None): if not isinstance(input_messages, Messages): input_messages = Messages(input_messages) if input_messages.messages: - span.set_tag_str(INPUT_MESSAGES, safe_json(input_messages.messages)) + span._set_ctx_item(INPUT_MESSAGES, input_messages.messages) except TypeError: log.warning("Failed to parse input messages.", exc_info=True) if output_messages is None: @@ -699,7 +699,7 @@ def _tag_llm_io(cls, span, input_messages=None, output_messages=None): output_messages = Messages(output_messages) if not output_messages.messages: return - span.set_tag_str(OUTPUT_MESSAGES, safe_json(output_messages.messages)) + span._set_ctx_item(OUTPUT_MESSAGES, output_messages.messages) except TypeError: log.warning("Failed to parse output messages.", exc_info=True) @@ -713,12 +713,12 @@ def _tag_embedding_io(cls, span, input_documents=None, output_text=None): if not isinstance(input_documents, Documents): input_documents = Documents(input_documents) if input_documents.documents: - span.set_tag_str(INPUT_DOCUMENTS, safe_json(input_documents.documents)) + span._set_ctx_item(INPUT_DOCUMENTS, input_documents.documents) except TypeError: log.warning("Failed to parse input documents.", exc_info=True) if output_text is None: return - span.set_tag_str(OUTPUT_VALUE, safe_json(output_text)) + span._set_ctx_item(OUTPUT_VALUE, str(output_text)) @classmethod def _tag_retrieval_io(cls, span, input_text=None, output_documents=None): @@ -726,7 +726,7 @@ def _tag_retrieval_io(cls, span, input_text=None, output_documents=None): Will be mapped to span's `meta.{input,output}.text` fields. """ if input_text is not None: - span.set_tag_str(INPUT_VALUE, safe_json(input_text)) + span._set_ctx_item(INPUT_VALUE, str(input_text)) if output_documents is None: return try: @@ -734,7 +734,7 @@ def _tag_retrieval_io(cls, span, input_text=None, output_documents=None): output_documents = Documents(output_documents) if not output_documents.documents: return - span.set_tag_str(OUTPUT_DOCUMENTS, safe_json(output_documents.documents)) + span._set_ctx_item(OUTPUT_DOCUMENTS, output_documents.documents) except TypeError: log.warning("Failed to parse output documents.", exc_info=True) @@ -744,9 +744,9 @@ def _tag_text_io(cls, span, input_value=None, output_value=None): Will be mapped to span's `meta.{input,output}.values` fields. """ if input_value is not None: - span.set_tag_str(INPUT_VALUE, safe_json(input_value)) + span._set_ctx_item(INPUT_VALUE, str(input_value)) if output_value is not None: - span.set_tag_str(OUTPUT_VALUE, safe_json(output_value)) + span._set_ctx_item(OUTPUT_VALUE, str(output_value)) @staticmethod def _tag_span_tags(span: Span, span_tags: Dict[str, Any]) -> None: @@ -759,12 +759,9 @@ def _tag_span_tags(span: Span, span_tags: Dict[str, Any]) -> None: log.warning("span_tags must be a dictionary of string key - primitive value pairs.") return try: - current_tags_str = span.get_tag(TAGS) - if current_tags_str: - current_tags = json.loads(current_tags_str) - current_tags.update(span_tags) - span_tags = current_tags - span.set_tag_str(TAGS, safe_json(span_tags)) + existing_tags = span._get_ctx_item(TAGS) or {} + existing_tags.update(span_tags) + span._set_ctx_item(TAGS, existing_tags) except Exception: log.warning("Failed to parse tags.", exc_info=True) @@ -776,7 +773,7 @@ def _tag_metadata(span: Span, metadata: Dict[str, Any]) -> None: if not isinstance(metadata, dict): log.warning("metadata must be a dictionary of string key-value pairs.") return - span.set_tag_str(METADATA, safe_json(metadata)) + span._set_ctx_item(METADATA, metadata) @staticmethod def _tag_metrics(span: Span, metrics: Dict[str, Any]) -> None: @@ -786,7 +783,7 @@ def _tag_metrics(span: Span, metrics: Dict[str, Any]) -> None: if not isinstance(metrics, dict): log.warning("metrics must be a dictionary of string key - numeric value pairs.") return - span.set_tag_str(METRICS, safe_json(metrics)) + span._set_ctx_item(METRICS, metrics) @classmethod def submit_evaluation( diff --git a/ddtrace/llmobs/_trace_processor.py b/ddtrace/llmobs/_trace_processor.py index b4af0c5ffd1..231d53d7626 100644 --- a/ddtrace/llmobs/_trace_processor.py +++ b/ddtrace/llmobs/_trace_processor.py @@ -1,4 +1,3 @@ -import json from typing import Any from typing import Dict from typing import List @@ -27,7 +26,6 @@ from ddtrace.llmobs._constants import OUTPUT_DOCUMENTS from ddtrace.llmobs._constants import OUTPUT_MESSAGES from ddtrace.llmobs._constants import OUTPUT_VALUE -from ddtrace.llmobs._constants import PARENT_ID_KEY from ddtrace.llmobs._constants import RAGAS_ML_APP_PREFIX from ddtrace.llmobs._constants import RUNNER_IS_INTEGRATION_SPAN_TAG from ddtrace.llmobs._constants import SESSION_ID @@ -37,6 +35,7 @@ from ddtrace.llmobs._utils import _get_ml_app from ddtrace.llmobs._utils import _get_session_id from ddtrace.llmobs._utils import _get_span_name +from ddtrace.llmobs._utils import safe_json log = get_logger(__name__) @@ -62,7 +61,7 @@ def process_trace(self, trace: List[Span]) -> Optional[List[Span]]: def submit_llmobs_span(self, span: Span) -> None: """Generate and submit an LLMObs span event to be sent to LLMObs.""" span_event = None - is_llm_span = span.get_tag(SPAN_KIND) == "llm" + is_llm_span = span._get_ctx_item(SPAN_KIND) == "llm" is_ragas_integration_span = False try: span_event, is_ragas_integration_span = self._llmobs_span_event(span) @@ -77,44 +76,49 @@ def submit_llmobs_span(self, span: Span) -> None: def _llmobs_span_event(self, span: Span) -> Tuple[Dict[str, Any], bool]: """Span event object structure.""" - span_kind = span._meta.pop(SPAN_KIND) + span_kind = span._get_ctx_item(SPAN_KIND) + if not span_kind: + raise KeyError("Span kind not found in span context") meta: Dict[str, Any] = {"span.kind": span_kind, "input": {}, "output": {}} - if span_kind in ("llm", "embedding") and span.get_tag(MODEL_NAME) is not None: - meta["model_name"] = span._meta.pop(MODEL_NAME) - meta["model_provider"] = span._meta.pop(MODEL_PROVIDER, "custom").lower() - if span.get_tag(METADATA) is not None: - meta["metadata"] = json.loads(span._meta.pop(METADATA)) - if span.get_tag(INPUT_PARAMETERS): - meta["input"]["parameters"] = json.loads(span._meta.pop(INPUT_PARAMETERS)) - if span_kind == "llm" and span.get_tag(INPUT_MESSAGES) is not None: - meta["input"]["messages"] = json.loads(span._meta.pop(INPUT_MESSAGES)) - if span.get_tag(INPUT_VALUE) is not None: - meta["input"]["value"] = span._meta.pop(INPUT_VALUE) - if span_kind == "llm" and span.get_tag(OUTPUT_MESSAGES) is not None: - meta["output"]["messages"] = json.loads(span._meta.pop(OUTPUT_MESSAGES)) - if span_kind == "embedding" and span.get_tag(INPUT_DOCUMENTS) is not None: - meta["input"]["documents"] = json.loads(span._meta.pop(INPUT_DOCUMENTS)) - if span.get_tag(OUTPUT_VALUE) is not None: - meta["output"]["value"] = span._meta.pop(OUTPUT_VALUE) - if span_kind == "retrieval" and span.get_tag(OUTPUT_DOCUMENTS) is not None: - meta["output"]["documents"] = json.loads(span._meta.pop(OUTPUT_DOCUMENTS)) - if span.get_tag(INPUT_PROMPT) is not None: - prompt_json_str = span._meta.pop(INPUT_PROMPT) + if span_kind in ("llm", "embedding") and span._get_ctx_item(MODEL_NAME) is not None: + meta["model_name"] = span._get_ctx_item(MODEL_NAME) + meta["model_provider"] = (span._get_ctx_item(MODEL_PROVIDER) or "custom").lower() + meta["metadata"] = span._get_ctx_item(METADATA) or {} + if span._get_ctx_item(INPUT_PARAMETERS): + meta["input"]["parameters"] = span._get_ctx_item(INPUT_PARAMETERS) + if span_kind == "llm" and span._get_ctx_item(INPUT_MESSAGES) is not None: + meta["input"]["messages"] = span._get_ctx_item(INPUT_MESSAGES) + if span._get_ctx_item(INPUT_VALUE) is not None: + meta["input"]["value"] = safe_json(span._get_ctx_item(INPUT_VALUE)) + if span_kind == "llm" and span._get_ctx_item(OUTPUT_MESSAGES) is not None: + meta["output"]["messages"] = span._get_ctx_item(OUTPUT_MESSAGES) + if span_kind == "embedding" and span._get_ctx_item(INPUT_DOCUMENTS) is not None: + meta["input"]["documents"] = span._get_ctx_item(INPUT_DOCUMENTS) + if span._get_ctx_item(OUTPUT_VALUE) is not None: + meta["output"]["value"] = safe_json(span._get_ctx_item(OUTPUT_VALUE)) + if span_kind == "retrieval" and span._get_ctx_item(OUTPUT_DOCUMENTS) is not None: + meta["output"]["documents"] = span._get_ctx_item(OUTPUT_DOCUMENTS) + if span._get_ctx_item(INPUT_PROMPT) is not None: + prompt_json_str = span._get_ctx_item(INPUT_PROMPT) if span_kind != "llm": log.warning( "Dropping prompt on non-LLM span kind, annotating prompts is only supported for LLM span kinds." ) else: - meta["input"]["prompt"] = json.loads(prompt_json_str) + meta["input"]["prompt"] = prompt_json_str if span.error: - meta[ERROR_MSG] = span.get_tag(ERROR_MSG) - meta[ERROR_STACK] = span.get_tag(ERROR_STACK) - meta[ERROR_TYPE] = span.get_tag(ERROR_TYPE) + meta.update( + { + ERROR_MSG: span.get_tag(ERROR_MSG), + ERROR_STACK: span.get_tag(ERROR_STACK), + ERROR_TYPE: span.get_tag(ERROR_TYPE), + } + ) if not meta["input"]: meta.pop("input") if not meta["output"]: meta.pop("output") - metrics = json.loads(span._meta.pop(METRICS, "{}")) + metrics = span._get_ctx_item(METRICS) or {} ml_app = _get_ml_app(span) is_ragas_integration_span = False @@ -122,10 +126,8 @@ def _llmobs_span_event(self, span: Span) -> Tuple[Dict[str, Any], bool]: if ml_app.startswith(RAGAS_ML_APP_PREFIX): is_ragas_integration_span = True - span.set_tag_str(ML_APP, ml_app) - + span._set_ctx_item(ML_APP, ml_app) parent_id = str(_get_llmobs_parent_id(span) or "undefined") - span._meta.pop(PARENT_ID_KEY, None) llmobs_span_event = { "trace_id": "{:x}".format(span.trace_id), @@ -140,7 +142,7 @@ def _llmobs_span_event(self, span: Span) -> Tuple[Dict[str, Any], bool]: } session_id = _get_session_id(span) if session_id is not None: - span.set_tag_str(SESSION_ID, session_id) + span._set_ctx_item(SESSION_ID, session_id) llmobs_span_event["session_id"] = session_id llmobs_span_event["tags"] = self._llmobs_tags( @@ -169,7 +171,7 @@ def _llmobs_tags( tags["session_id"] = session_id if is_ragas_integration_span: tags[RUNNER_IS_INTEGRATION_SPAN_TAG] = "ragas" - existing_tags = span._meta.pop(TAGS, None) + existing_tags = span._get_ctx_item(TAGS) if existing_tags is not None: - tags.update(json.loads(existing_tags)) + tags.update(existing_tags) return ["{}:{}".format(k, v) for k, v in tags.items()] diff --git a/ddtrace/llmobs/_utils.py b/ddtrace/llmobs/_utils.py index 8813788f0a3..c1b1c4a776c 100644 --- a/ddtrace/llmobs/_utils.py +++ b/ddtrace/llmobs/_utils.py @@ -110,8 +110,8 @@ def _get_llmobs_parent_id(span: Span) -> Optional[str]: """Return the span ID of the nearest LLMObs-type span in the span's ancestor tree. In priority order: manually set parent ID tag, nearest LLMObs ancestor, local root's propagated parent ID tag. """ - if span.get_tag(PARENT_ID_KEY): - return span.get_tag(PARENT_ID_KEY) + if span._get_ctx_item(PARENT_ID_KEY): + return span._get_ctx_item(PARENT_ID_KEY) nearest_llmobs_ancestor = _get_nearest_llmobs_ancestor(span) if nearest_llmobs_ancestor: return str(nearest_llmobs_ancestor.span_id) @@ -132,12 +132,12 @@ def _get_ml_app(span: Span) -> str: Return the ML app name for a given span, by checking the span's nearest LLMObs span ancestor. Default to the global config LLMObs ML app name otherwise. """ - ml_app = span.get_tag(ML_APP) + ml_app = span._get_ctx_item(ML_APP) if ml_app: return ml_app nearest_llmobs_ancestor = _get_nearest_llmobs_ancestor(span) if nearest_llmobs_ancestor: - ml_app = nearest_llmobs_ancestor.get_tag(ML_APP) + ml_app = nearest_llmobs_ancestor._get_ctx_item(ML_APP) return ml_app or config._llmobs_ml_app or "unknown-ml-app" @@ -146,12 +146,12 @@ def _get_session_id(span: Span) -> Optional[str]: Return the session ID for a given span, by checking the span's nearest LLMObs span ancestor. Default to the span's trace ID. """ - session_id = span.get_tag(SESSION_ID) + session_id = span._get_ctx_item(SESSION_ID) if session_id: return session_id nearest_llmobs_ancestor = _get_nearest_llmobs_ancestor(span) if nearest_llmobs_ancestor: - session_id = nearest_llmobs_ancestor.get_tag(SESSION_ID) + session_id = nearest_llmobs_ancestor._get_ctx_item(SESSION_ID) return session_id diff --git a/ddtrace/llmobs/_writer.py b/ddtrace/llmobs/_writer.py index 6496de96cfe..5a293f05c4e 100644 --- a/ddtrace/llmobs/_writer.py +++ b/ddtrace/llmobs/_writer.py @@ -1,5 +1,4 @@ import atexit -import json from typing import Any from typing import Dict from typing import List @@ -32,6 +31,7 @@ from ddtrace.llmobs._constants import EVP_PROXY_AGENT_ENDPOINT from ddtrace.llmobs._constants import EVP_SUBDOMAIN_HEADER_NAME from ddtrace.llmobs._constants import EVP_SUBDOMAIN_HEADER_VALUE +from ddtrace.llmobs._utils import safe_json logger = get_logger(__name__) @@ -108,11 +108,7 @@ def periodic(self) -> None: self._buffer = [] data = self._data(events) - try: - enc_llm_events = json.dumps(data) - except TypeError: - logger.error("failed to encode %d LLMObs %s events", len(events), self._event_type, exc_info=True) - return + enc_llm_events = safe_json(data) conn = httplib.HTTPSConnection(self._intake, 443, timeout=self._timeout) try: conn.request("POST", self._endpoint, enc_llm_events, self._headers) @@ -197,7 +193,7 @@ def put(self, events: List[LLMObsSpanEvent]): ) return self._buffer.extend(events) - self.buffer_size += len(json.dumps(events)) + self.buffer_size += len(safe_json(events)) def encode(self): with self._lock: @@ -207,7 +203,7 @@ def encode(self): self._init_buffer() data = {"_dd.stage": "raw", "_dd.tracer_version": ddtrace.__version__, "event_type": "span", "spans": events} try: - enc_llm_events = json.dumps(data) + enc_llm_events = safe_json(data) logger.debug("encode %d LLMObs span events to be sent", len(events)) except TypeError: logger.error("failed to encode %d LLMObs span events", len(events), exc_info=True) @@ -277,7 +273,7 @@ def stop(self, timeout=None): super(LLMObsSpanWriter, self).stop(timeout=timeout) def enqueue(self, event: LLMObsSpanEvent) -> None: - event_size = len(json.dumps(event)) + event_size = len(safe_json(event)) if event_size >= EVP_EVENT_SIZE_LIMIT: logger.warning( diff --git a/ddtrace/llmobs/decorators.py b/ddtrace/llmobs/decorators.py index 93f329f2889..7e61f9b4e18 100644 --- a/ddtrace/llmobs/decorators.py +++ b/ddtrace/llmobs/decorators.py @@ -172,7 +172,7 @@ def generator_wrapper(*args, **kwargs): func_signature = signature(func) bound_args = func_signature.bind_partial(*args, **kwargs) if _automatic_io_annotation and bound_args.arguments: - LLMObs.annotate(span=span, input_data=bound_args.arguments) + LLMObs.annotate(span=span, input_data=dict(bound_args.arguments)) return yield_from_async_gen(func, span, args, kwargs) @wraps(func) @@ -186,13 +186,13 @@ async def wrapper(*args, **kwargs): func_signature = signature(func) bound_args = func_signature.bind_partial(*args, **kwargs) if _automatic_io_annotation and bound_args.arguments: - LLMObs.annotate(span=span, input_data=bound_args.arguments) + LLMObs.annotate(span=span, input_data=dict(bound_args.arguments)) resp = await func(*args, **kwargs) if ( _automatic_io_annotation and resp and operation_kind != "retrieval" - and span.get_tag(OUTPUT_VALUE) is None + and span._get_ctx_item(OUTPUT_VALUE) is None ): LLMObs.annotate(span=span, output_data=resp) return resp @@ -211,7 +211,7 @@ def generator_wrapper(*args, **kwargs): func_signature = signature(func) bound_args = func_signature.bind_partial(*args, **kwargs) if _automatic_io_annotation and bound_args.arguments: - LLMObs.annotate(span=span, input_data=bound_args.arguments) + LLMObs.annotate(span=span, input_data=dict(bound_args.arguments)) try: yield from func(*args, **kwargs) except (StopIteration, GeneratorExit): @@ -234,13 +234,13 @@ def wrapper(*args, **kwargs): func_signature = signature(func) bound_args = func_signature.bind_partial(*args, **kwargs) if _automatic_io_annotation and bound_args.arguments: - LLMObs.annotate(span=span, input_data=bound_args.arguments) + LLMObs.annotate(span=span, input_data=dict(bound_args.arguments)) resp = func(*args, **kwargs) if ( _automatic_io_annotation and resp and operation_kind != "retrieval" - and span.get_tag(OUTPUT_VALUE) is None + and span._get_ctx_item(OUTPUT_VALUE) is None ): LLMObs.annotate(span=span, output_data=resp) return resp diff --git a/tests/contrib/anthropic/test_anthropic_llmobs.py b/tests/contrib/anthropic/test_anthropic_llmobs.py index f286a890209..e2850a4157f 100644 --- a/tests/contrib/anthropic/test_anthropic_llmobs.py +++ b/tests/contrib/anthropic/test_anthropic_llmobs.py @@ -1,6 +1,5 @@ from pathlib import Path -import mock import pytest from tests.llmobs._utils import _expected_llmobs_llm_span_event @@ -117,37 +116,6 @@ def test_error(self, anthropic, ddtrace_global_config, mock_llmobs_writer, mock_ ) ) - def test_error_unserializable_arg( - self, anthropic, ddtrace_global_config, mock_llmobs_writer, mock_tracer, request_vcr - ): - """Ensure we handle unserializable arguments correctly and still emit llmobs records.""" - llm = anthropic.Anthropic() - with pytest.raises(Exception): - llm.messages.create( - model="claude-3-opus-20240229", - max_tokens=object(), - temperature=0.8, - messages=[{"role": "user", "content": "Hello World!"}], - ) - - span = mock_tracer.pop_traces()[0][0] - assert mock_llmobs_writer.enqueue.call_count == 1 - expected_span = _expected_llmobs_llm_span_event( - span, - model_name="claude-3-opus-20240229", - model_provider="anthropic", - input_messages=[{"content": "Hello World!", "role": "user"}], - output_messages=[{"content": ""}], - error=span.get_tag("error.type"), - error_message=span.get_tag("error.message"), - error_stack=span.get_tag("error.stack"), - metadata={"temperature": 0.8, "max_tokens": mock.ANY}, - tags={"ml_app": "", "service": "tests.contrib.anthropic"}, - ) - mock_llmobs_writer.enqueue.assert_called_with(expected_span) - actual_span = mock_llmobs_writer.enqueue.call_args[0][0] - assert "[Unserializable object: ", "service": "tests.contrib.openai"}, - ) - mock_llmobs_writer.enqueue.assert_called_with(expected_span) - actual_span = mock_llmobs_writer.enqueue.call_args[0][0] - assert "[Unserializable object: Date: Fri, 13 Dec 2024 14:28:59 +0100 Subject: [PATCH 29/78] chore(iast): move env var to config (#11720) Moving `DD_IAST_VULNERABILITIES_PER_REQUEST` and `DD_IAST_MAX_CONCURRENT_REQUESTS` to `asm_config` ## Checklist - [x] PR author has checked that all the criteria below are met - The PR description includes an overview of the change - The PR description articulates the motivation for the change - The change includes tests OR the PR description describes a testing strategy - The PR description notes risks associated with the change, if any - Newly-added code is easy to change - The change follows the [library release note guidelines](https://ddtrace.readthedocs.io/en/stable/releasenotes.html) - The change includes or references documentation updates if necessary - Backport labels are set (if [applicable](https://ddtrace.readthedocs.io/en/latest/contributing.html#backporting)) ## Reviewer Checklist - [x] Reviewer has checked that all the criteria below are met - Title is accurate - All changes are related to the pull request's stated goal - Avoids breaking [API](https://ddtrace.readthedocs.io/en/stable/versioning.html#interfaces) changes - Testing strategy adequately addresses listed risks - Newly-added code is easy to change - Release note makes sense to a user of the library - If necessary, author has acknowledged and discussed the performance implications of this PR as reported in the benchmarks PR comment - Backport labels are set in a manner that is consistent with the [release branch maintenance policy](https://ddtrace.readthedocs.io/en/latest/contributing.html#backporting) --- ddtrace/appsec/_constants.py | 4 ++++ ddtrace/appsec/_iast/__init__.py | 2 +- ddtrace/appsec/_iast/_overhead_control_engine.py | 14 +++++--------- ddtrace/appsec/_iast/taint_sinks/_base.py | 8 +++++--- ddtrace/settings/asm.py | 12 ++++++++++++ tests/appsec/iast/test_overhead_control_engine.py | 12 +++++------- tests/telemetry/test_writer.py | 2 ++ 7 files changed, 34 insertions(+), 20 deletions(-) diff --git a/ddtrace/appsec/_constants.py b/ddtrace/appsec/_constants.py index a127ebb6615..fbb1264f6f9 100644 --- a/ddtrace/appsec/_constants.py +++ b/ddtrace/appsec/_constants.py @@ -129,6 +129,10 @@ class IAST(metaclass=Constant_Class): ENV_DEBUG: Literal["DD_IAST_DEBUG"] = "DD_IAST_DEBUG" ENV_PROPAGATION_DEBUG: Literal["DD_IAST_PROPAGATION_DEBUG"] = "DD_IAST_PROPAGATION_DEBUG" ENV_REQUEST_SAMPLING: Literal["DD_IAST_REQUEST_SAMPLING"] = "DD_IAST_REQUEST_SAMPLING" + DD_IAST_VULNERABILITIES_PER_REQUEST: Literal[ + "DD_IAST_VULNERABILITIES_PER_REQUEST" + ] = "DD_IAST_VULNERABILITIES_PER_REQUEST" + DD_IAST_MAX_CONCURRENT_REQUESTS: Literal["DD_IAST_MAX_CONCURRENT_REQUESTS"] = "DD_IAST_MAX_CONCURRENT_REQUESTS" ENV_TELEMETRY_REPORT_LVL: Literal["DD_IAST_TELEMETRY_VERBOSITY"] = "DD_IAST_TELEMETRY_VERBOSITY" LAZY_TAINT: Literal["_DD_IAST_LAZY_TAINT"] = "_DD_IAST_LAZY_TAINT" JSON: Literal["_dd.iast.json"] = "_dd.iast.json" diff --git a/ddtrace/appsec/_iast/__init__.py b/ddtrace/appsec/_iast/__init__.py index 5aab86cf783..3e4b04a0b6a 100644 --- a/ddtrace/appsec/_iast/__init__.py +++ b/ddtrace/appsec/_iast/__init__.py @@ -1,4 +1,4 @@ -"""IAST (interactive application security testing) analyzes code for security vulnerabilities. +"""IAST (Interactive Application Security Testing) analyzes code for security vulnerabilities. To add new vulnerabilities analyzers (Taint sink) we should update `IAST_PATCH` in `ddtrace/appsec/iast/_patch_modules.py` diff --git a/ddtrace/appsec/_iast/_overhead_control_engine.py b/ddtrace/appsec/_iast/_overhead_control_engine.py index 036e4d3cbfd..b1f490b14ef 100644 --- a/ddtrace/appsec/_iast/_overhead_control_engine.py +++ b/ddtrace/appsec/_iast/_overhead_control_engine.py @@ -3,7 +3,6 @@ limit. It will measure operations being executed in a request and it will deactivate detection (and therefore reduce the overhead to nearly 0) if a certain threshold is reached. """ -import os from typing import Set from typing import Text from typing import Tuple @@ -25,22 +24,18 @@ def get_request_sampling_value() -> float: return float(asm_config._iast_request_sampling) -MAX_REQUESTS = int(os.environ.get("DD_IAST_MAX_CONCURRENT_REQUESTS", 2)) -MAX_VULNERABILITIES_PER_REQUEST = int(os.environ.get("DD_IAST_VULNERABILITIES_PER_REQUEST", 2)) - - class Operation(object): """Common operation related to Overhead Control Engine (OCE). Every vulnerabilities/taint_sinks should inherit from this class. OCE instance calls these methods to control the overhead produced in each request. """ _lock = threading.Lock() - _vulnerability_quota = MAX_VULNERABILITIES_PER_REQUEST + _vulnerability_quota = asm_config._iast_max_vulnerabilities_per_requests _reported_vulnerabilities: Set[Tuple[str, int]] = set() @classmethod def reset(cls): - cls._vulnerability_quota = MAX_VULNERABILITIES_PER_REQUEST + cls._vulnerability_quota = asm_config._iast_max_vulnerabilities_per_requests cls._reported_vulnerabilities = set() @classmethod @@ -57,7 +52,7 @@ def acquire_quota(cls) -> bool: def increment_quota(cls) -> bool: cls._lock.acquire() result = False - if cls._vulnerability_quota < MAX_VULNERABILITIES_PER_REQUEST: + if cls._vulnerability_quota < asm_config._iast_max_vulnerabilities_per_requests: cls._vulnerability_quota += 1 result = True cls._lock.release() @@ -86,12 +81,13 @@ class OverheadControl(object): """ _lock = threading.Lock() - _request_quota = MAX_REQUESTS + _request_quota = asm_config._iast_max_concurrent_requests _vulnerabilities: Set[Type[Operation]] = set() _sampler = RateSampler(sample_rate=get_request_sampling_value() / 100.0) def reconfigure(self): self._sampler = RateSampler(sample_rate=get_request_sampling_value() / 100.0) + self._request_quota = asm_config._iast_max_concurrent_requests def acquire_request(self, span: Span) -> bool: """Decide whether if IAST analysis will be done for this request. diff --git a/ddtrace/appsec/_iast/taint_sinks/_base.py b/ddtrace/appsec/_iast/taint_sinks/_base.py index 7db79d33fd8..16eaac2452c 100644 --- a/ddtrace/appsec/_iast/taint_sinks/_base.py +++ b/ddtrace/appsec/_iast/taint_sinks/_base.py @@ -61,9 +61,11 @@ def wrapper(wrapped: Callable, instance: Any, args: Any, kwargs: Any) -> Any: vulnerability and update the context with the report information. """ if not is_iast_request_enabled(): - log.debug( - "[IAST] VulnerabilityBase.wrapper. No request quota or this vulnerability is outside the context" - ) + if _is_iast_debug_enabled(): + log.debug( + "[IAST] VulnerabilityBase.wrapper. No request quota or this vulnerability " + "is outside the context" + ) return wrapped(*args, **kwargs) elif cls.has_quota(): return func(wrapped, instance, args, kwargs) diff --git a/ddtrace/settings/asm.py b/ddtrace/settings/asm.py index 3ec15ae67ef..cf20ea08f1a 100644 --- a/ddtrace/settings/asm.py +++ b/ddtrace/settings/asm.py @@ -156,6 +156,16 @@ class ASMConfig(Env): + r"ey[I-L][\w=-]+\.ey[I-L][\w=-]+(\.[\w.+\/=-]+)?|[\-]{5}BEGIN[a-z\s]+PRIVATE\sKEY" + r"[\-]{5}[^\-]+[\-]{5}END[a-z\s]+PRIVATE\sKEY|ssh-rsa\s*[a-z0-9\/\.+]{100,}", ) + _iast_max_concurrent_requests = Env.var( + int, + IAST.DD_IAST_MAX_CONCURRENT_REQUESTS, + default=2, + ) + _iast_max_vulnerabilities_per_requests = Env.var( + int, + IAST.DD_IAST_VULNERABILITIES_PER_REQUEST, + default=2, + ) _iast_lazy_taint = Env.var(bool, IAST.LAZY_TAINT, default=False) _deduplication_enabled = Env.var(bool, "_DD_APPSEC_DEDUPLICATION_ENABLED", default=True) @@ -213,6 +223,8 @@ class ASMConfig(Env): "_iast_redaction_enabled", "_iast_redaction_name_pattern", "_iast_redaction_value_pattern", + "_iast_max_concurrent_requests", + "_iast_max_vulnerabilities_per_requests", "_iast_lazy_taint", "_ep_stack_trace_enabled", "_ep_max_stack_traces", diff --git a/tests/appsec/iast/test_overhead_control_engine.py b/tests/appsec/iast/test_overhead_control_engine.py index 318f1a2104f..1d1d4d11b90 100644 --- a/tests/appsec/iast/test_overhead_control_engine.py +++ b/tests/appsec/iast/test_overhead_control_engine.py @@ -5,8 +5,7 @@ from ddtrace.appsec._iast import oce from ddtrace.appsec._iast._iast_request_context import get_iast_reporter -from ddtrace.appsec._iast._overhead_control_engine import MAX_REQUESTS -from ddtrace.appsec._iast._overhead_control_engine import MAX_VULNERABILITIES_PER_REQUEST +from ddtrace.settings.asm import config as asm_config from tests.utils import override_global_config @@ -55,7 +54,7 @@ def test_oce_max_vulnerabilities_per_request(iast_context_defaults): m.digest() span_report = get_iast_reporter() - assert len(span_report.vulnerabilities) == MAX_VULNERABILITIES_PER_REQUEST + assert len(span_report.vulnerabilities) == asm_config._iast_max_vulnerabilities_per_requests @pytest.mark.skip_iast_check_logs @@ -72,7 +71,7 @@ def test_oce_reset_vulnerabilities_report(iast_context_defaults): span_report = get_iast_reporter() - assert len(span_report.vulnerabilities) == MAX_VULNERABILITIES_PER_REQUEST + 1 + assert len(span_report.vulnerabilities) == asm_config._iast_max_vulnerabilities_per_requests + 1 @pytest.mark.skip_iast_check_logs @@ -82,7 +81,7 @@ def test_oce_no_race_conditions_in_span(iast_span_defaults): oc = OverheadControl() oc.reconfigure() - assert oc._request_quota == MAX_REQUESTS + assert oc._request_quota == asm_config._iast_max_concurrent_requests # Request 1 tries to acquire the lock assert oc.acquire_request(iast_span_defaults) is True @@ -148,7 +147,6 @@ def test_oce_concurrent_requests_in_spans(iast_span_defaults): """ import threading - from ddtrace.appsec._iast._overhead_control_engine import MAX_REQUESTS from ddtrace.appsec._iast._overhead_control_engine import OverheadControl oc = OverheadControl() @@ -167,7 +165,7 @@ def test_oce_concurrent_requests_in_spans(iast_span_defaults): results.append(thread.join()) # Ensures quota is always within bounds after multithreading scenario - assert 0 <= oc._request_quota <= MAX_REQUESTS + assert 0 <= oc._request_quota <= asm_config._iast_max_concurrent_requests @pytest.mark.skip_iast_check_logs diff --git a/tests/telemetry/test_writer.py b/tests/telemetry/test_writer.py index 7718710ff60..bcc3be9e38c 100644 --- a/tests/telemetry/test_writer.py +++ b/tests/telemetry/test_writer.py @@ -356,6 +356,7 @@ def test_app_started_event_configuration_override(test_agent_session, run_python {"name": "DD_EXPERIMENTAL_APPSEC_STANDALONE_ENABLED", "origin": "default", "value": False}, {"name": "DD_HTTP_CLIENT_TAG_QUERY_STRING", "origin": "default", "value": None}, {"name": "DD_IAST_ENABLED", "origin": "default", "value": False}, + {"name": "DD_IAST_MAX_CONCURRENT_REQUESTS", "origin": "default", "value": 2}, {"name": "DD_IAST_REDACTION_ENABLED", "origin": "default", "value": True}, { "name": "DD_IAST_REDACTION_NAME_PATTERN", @@ -380,6 +381,7 @@ def test_app_started_event_configuration_override(test_agent_session, run_python {"name": "DD_IAST_REQUEST_SAMPLING", "origin": "default", "value": 30.0}, {"name": "DD_IAST_STACK_TRACE_ENABLED", "origin": "default", "value": True}, {"name": "DD_IAST_TELEMETRY_VERBOSITY", "origin": "default", "value": "INFORMATION"}, + {"name": "DD_IAST_VULNERABILITIES_PER_REQUEST", "origin": "default", "value": 2}, {"name": "DD_INJECT_FORCE", "origin": "env_var", "value": True}, {"name": "DD_INSTRUMENTATION_INSTALL_ID", "origin": "default", "value": None}, {"name": "DD_INSTRUMENTATION_INSTALL_TYPE", "origin": "default", "value": None}, From d1bce2eb9422a1f0c62f92a67d6ac778c2c1e815 Mon Sep 17 00:00:00 2001 From: Brett Langdon Date: Fri, 13 Dec 2024 11:57:43 -0500 Subject: [PATCH 30/78] ci: fix needs_testrun tests. do not record all requests (#11711) --- scripts/needs_testrun.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/scripts/needs_testrun.py b/scripts/needs_testrun.py index bbbd73b33db..c28ded72812 100755 --- a/scripts/needs_testrun.py +++ b/scripts/needs_testrun.py @@ -36,7 +36,7 @@ def get_base_branch(pr_number: int) -> str: >>> with vcr.use_cassette( ... "scripts/vcr/needs_testrun.yaml", ... filter_headers=["authorization", "user-agent"], - ... record_mode="all"): + ... record_mode="none"): ... get_base_branch(6412) ... get_base_branch(11534) ... get_base_branch(11690) @@ -135,7 +135,7 @@ def get_changed_files(pr_number: int, sha: t.Optional[str] = None) -> t.Set[str] >>> with vcr.use_cassette( ... "scripts/vcr/needs_testrun.yaml", ... filter_headers=["authorization", "user-agent"], - ... record_mode="all"): + ... record_mode="none"): ... sorted(get_changed_files(6388)) # doctest: +NORMALIZE_WHITESPACE ['ddtrace/debugging/_expressions.py', 'releasenotes/notes/fix-debugger-expressions-none-literal-30f3328d2e386f40.yaml', @@ -165,7 +165,7 @@ def needs_testrun(suite: str, pr_number: int, sha: t.Optional[str] = None) -> bo >>> with vcr.use_cassette( ... "scripts/vcr/needs_testrun.yaml", ... filter_headers=["authorization", "user-agent"], - ... record_mode="all"): + ... record_mode="none"): ... needs_testrun("debugger", 6485) ... needs_testrun("debugger", 6388) ... needs_testrun("foobar", 6412) From 509d39067dfbcd9c1832051b3b60fa00db29dc8e Mon Sep 17 00:00:00 2001 From: Brett Langdon Date: Fri, 13 Dec 2024 11:58:20 -0500 Subject: [PATCH 31/78] ci: use hatch to execute needs_testrun.py (#11709) --- hatch.toml | 3 +++ scripts/needs_testrun.py | 10 +++++++++- scripts/run-test-suite | 2 +- scripts/run-test-suite-hatch | 2 +- 4 files changed, 14 insertions(+), 3 deletions(-) diff --git a/hatch.toml b/hatch.toml index f22870e01f1..21610b6c776 100644 --- a/hatch.toml +++ b/hatch.toml @@ -143,6 +143,9 @@ extra-dependencies = [ test = [ "python -m doctest {args} scripts/get-target-milestone.py scripts/needs_testrun.py tests/suitespec.py", ] +needs_testrun = [ + "scripts/needs_testrun.py {args}", +] [envs.meta-testing] python = "3.10" diff --git a/scripts/needs_testrun.py b/scripts/needs_testrun.py index c28ded72812..01ba87299bd 100755 --- a/scripts/needs_testrun.py +++ b/scripts/needs_testrun.py @@ -270,8 +270,13 @@ def extract_git_commit_selections(git_commit_message: str) -> t.Set[str]: def main() -> bool: argp = ArgumentParser() + try: + default_pr_number = _get_pr_number() + except RuntimeError: + default_pr_number = None + argp.add_argument("suite", help="The suite to use", type=str) - argp.add_argument("--pr", help="The PR number", type=int, default=_get_pr_number()) + argp.add_argument("--pr", help="The PR number", type=int, default=default_pr_number) argp.add_argument( "--sha", help="Commit hash to use as diff base (defaults to PR merge root)", type=lambda v: v or None ) @@ -282,6 +287,9 @@ def main() -> bool: if args.verbose: LOGGER.setLevel(logging.INFO) + if not args.pr: + raise RuntimeError("Could not determine PR number") + return needs_testrun(args.suite, args.pr, sha=args.sha) diff --git a/scripts/run-test-suite b/scripts/run-test-suite index cca6bb5262e..8664decde25 100755 --- a/scripts/run-test-suite +++ b/scripts/run-test-suite @@ -47,7 +47,7 @@ set -e if ! [[ -v CIRCLECI && $CIRCLE_BRANCH =~ main ]]; then if [[ -f "$CHECKPOINT_FILENAME" ]]; then latest_success_commit=$(cat $CHECKPOINT_FILENAME) - if ! ./scripts/needs_testrun.py $CIRCLE_JOB --sha $latest_success_commit; then + if ! hatch run scripts:needs_testrun $CIRCLE_JOB --sha $latest_success_commit; then echo "The $CIRCLE_JOB job succeeded at commit $latest_success_commit." echo "None of the changes on this branch since that commit affect the $CIRCLE_JOB job." echo "Skipping this job." diff --git a/scripts/run-test-suite-hatch b/scripts/run-test-suite-hatch index 0a8d1c1d765..38754de9ac9 100755 --- a/scripts/run-test-suite-hatch +++ b/scripts/run-test-suite-hatch @@ -33,7 +33,7 @@ set -e if ! [[ -v CIRCLECI && $CIRCLE_BRANCH =~ main ]]; then if [[ -f "$CHECKPOINT_FILENAME" ]]; then latest_success_commit=$(cat $CHECKPOINT_FILENAME) - if ! ./scripts/needs_testrun.py $CIRCLE_JOB --sha $latest_success_commit; then + if ! hatch run scripts:needs_testrun $CIRCLE_JOB --sha $latest_success_commit; then echo "The $CIRCLE_JOB job succeeded at commit $latest_success_commit." echo "None of the changes on this branch since that commit affect the $CIRCLE_JOB job." echo "Skipping this job." From 0581d4d33237163b16e94c58e87d162aa45a5135 Mon Sep 17 00:00:00 2001 From: Brett Langdon Date: Fri, 13 Dec 2024 13:17:21 -0500 Subject: [PATCH 32/78] ci: wait for es server to be up before running tests (#11707) --- .../contrib/elasticsearch/test_elasticsearch.py | 16 ++++++++++++++++ 1 file changed, 16 insertions(+) diff --git a/tests/contrib/elasticsearch/test_elasticsearch.py b/tests/contrib/elasticsearch/test_elasticsearch.py index ecb09100387..92ce195c92f 100644 --- a/tests/contrib/elasticsearch/test_elasticsearch.py +++ b/tests/contrib/elasticsearch/test_elasticsearch.py @@ -1,5 +1,7 @@ import datetime +from http.client import HTTPConnection from importlib import import_module +import time import pytest @@ -38,6 +40,18 @@ raise ImportError("could not import any of {0!r}".format(module_names)) +def wait_for_es(host: str, port: int): + for _ in range(20): + try: + conn = HTTPConnection(f"{host}:{port}") + conn.request("GET", "/") + conn.getresponse() + return + except Exception: + time.sleep(1) + raise Exception(f"Could not connect to ES at {host}:{port}") + + class ElasticsearchPatchTest(TracerTestCase): """ Elasticsearch integration test suite. @@ -67,6 +81,8 @@ def setUp(self): super(ElasticsearchPatchTest, self).setUp() es = self._get_es() + config = self._get_es_config() + wait_for_es(config["host"], config["port"]) tags = { # `component` is a reserved tag. Setting it via `Pin` should have no effect. "component": "foo", From 1dd528c0ef5d04e2b095f61d8a15e8fc15cbb00a Mon Sep 17 00:00:00 2001 From: Brett Langdon Date: Fri, 13 Dec 2024 14:45:27 -0500 Subject: [PATCH 33/78] fix(lib-injection): fix SSI abort telemetry metrics (#11627) --- lib-injection/sources/sitecustomize.py | 156 ++++++++++++------ ...-injection-telemetry-4fbea5e41ee1ff3e.yaml | 4 + 2 files changed, 106 insertions(+), 54 deletions(-) create mode 100644 releasenotes/notes/fix-lib-injection-telemetry-4fbea5e41ee1ff3e.yaml diff --git a/lib-injection/sources/sitecustomize.py b/lib-injection/sources/sitecustomize.py index dbc68f65ebe..7d28a3c4d4a 100644 --- a/lib-injection/sources/sitecustomize.py +++ b/lib-injection/sources/sitecustomize.py @@ -31,6 +31,7 @@ def parse_version(version): return Version((0, 0), "") +TELEMETRY_DATA = [] SCRIPT_DIR = os.path.dirname(__file__) RUNTIMES_ALLOW_LIST = { "cpython": { @@ -68,49 +69,58 @@ def get_oci_ddtrace_version(): def build_installed_pkgs(): installed_packages = {} if sys.version_info >= (3, 8): - from importlib import metadata as importlib_metadata + try: + from importlib import metadata as importlib_metadata - installed_packages = {pkg.metadata["Name"]: pkg.version for pkg in importlib_metadata.distributions()} + installed_packages = {pkg.metadata["Name"]: pkg.version for pkg in importlib_metadata.distributions()} + except Exception as e: + _log("Failed to build installed packages list: %s" % e, level="debug") else: try: import pkg_resources installed_packages = {pkg.key: pkg.version for pkg in pkg_resources.working_set} - except ImportError: + except Exception: try: import importlib_metadata installed_packages = {pkg.metadata["Name"]: pkg.version for pkg in importlib_metadata.distributions()} - except ImportError: - pass + except Exception as e: + _log("Failed to build installed packages list: %s" % e, level="debug") return {key.lower(): value for key, value in installed_packages.items()} def build_min_pkgs(): min_pkgs = dict() - for location in VERSION_COMPAT_FILE_LOCATIONS: - if os.path.exists(location): - with open(location, "r") as csvfile: - csv_reader = csv.reader(csvfile, delimiter=",") - for idx, row in enumerate(csv_reader): - if idx < 2: - continue - min_pkgs[row[0].lower()] = parse_version(row[1]) - break + try: + for location in VERSION_COMPAT_FILE_LOCATIONS: + if os.path.exists(location): + with open(location, "r") as csvfile: + csv_reader = csv.reader(csvfile, delimiter=",") + for idx, row in enumerate(csv_reader): + if idx < 2: + continue + min_pkgs[row[0].lower()] = parse_version(row[1]) + break + except Exception as e: + _log("Failed to build min-pkgs list: %s" % e, level="debug") return min_pkgs def build_denied_executables(): denied_executables = set() _log("Checking denied-executables list", level="debug") - if os.path.exists(EXECUTABLE_DENY_LOCATION): - with open(EXECUTABLE_DENY_LOCATION, "r") as denyfile: - _log("Found deny-list file", level="debug") - for line in denyfile.readlines(): - cleaned = line.strip("\n") - denied_executables.add(cleaned) - denied_executables.add(os.path.basename(cleaned)) - _log("Built denied-executables list of %s entries" % (len(denied_executables),), level="debug") + try: + if os.path.exists(EXECUTABLE_DENY_LOCATION): + with open(EXECUTABLE_DENY_LOCATION, "r") as denyfile: + _log("Found deny-list file", level="debug") + for line in denyfile.readlines(): + cleaned = line.strip("\n") + denied_executables.add(cleaned) + denied_executables.add(os.path.basename(cleaned)) + _log("Built denied-executables list of %s entries" % (len(denied_executables),), level="debug") + except Exception as e: + _log("Failed to build denied-executables list: %s" % e, level="debug") return denied_executables @@ -228,13 +238,14 @@ def _inject(): global PYTHON_RUNTIME global PKGS_ALLOW_LIST global EXECUTABLES_DENY_LIST + global TELEMETRY_DATA + # Try to get the version of the Python runtime first so we have it for telemetry + PYTHON_VERSION = platform.python_version() + PYTHON_RUNTIME = platform.python_implementation().lower() DDTRACE_VERSION = get_oci_ddtrace_version() INSTALLED_PACKAGES = build_installed_pkgs() - PYTHON_RUNTIME = platform.python_implementation().lower() - PYTHON_VERSION = platform.python_version() PKGS_ALLOW_LIST = build_min_pkgs() EXECUTABLES_DENY_LIST = build_denied_executables() - telemetry_data = [] integration_incomp = False runtime_incomp = False os.environ["_DD_INJECT_WAS_ATTEMPTED"] = "true" @@ -260,12 +271,14 @@ def _inject(): _log("ddtrace_pkgs path is %r" % pkgs_path, level="debug") _log("ddtrace_pkgs contents: %r" % os.listdir(pkgs_path), level="debug") + abort = False incompatible_sysarg = get_first_incompatible_sysarg() if incompatible_sysarg is not None: _log("Found incompatible executable: %s." % incompatible_sysarg, level="debug") if not FORCE_INJECT: _log("Aborting dd-trace-py instrumentation.", level="debug") - telemetry_data.append( + abort = True + TELEMETRY_DATA.append( create_count_metric( "library_entrypoint.abort.integration", ) @@ -287,9 +300,10 @@ def _inject(): integration_incomp = True if not FORCE_INJECT: _log("Aborting dd-trace-py instrumentation.", level="debug") + abort = True for key, value in incompatible_packages.items(): - telemetry_data.append( + TELEMETRY_DATA.append( create_count_metric( "library_entrypoint.abort.integration", [ @@ -313,15 +327,16 @@ def _inject(): runtime_incomp = True if not FORCE_INJECT: _log("Aborting dd-trace-py instrumentation.", level="debug") + abort = True - telemetry_data.append(create_count_metric("library_entrypoint.abort.runtime")) + TELEMETRY_DATA.append(create_count_metric("library_entrypoint.abort.runtime")) else: _log( "DD_INJECT_FORCE set to True, allowing unsupported runtimes and continuing.", level="debug", ) - if telemetry_data: - telemetry_data.append( + if abort: + TELEMETRY_DATA.append( create_count_metric( "library_entrypoint.abort", [ @@ -329,8 +344,6 @@ def _inject(): ], ) ) - telemetry_event = gen_telemetry_payload(telemetry_data, DDTRACE_VERSION) - send_telemetry(telemetry_event) return site_pkgs_path = os.path.join( @@ -339,6 +352,12 @@ def _inject(): _log("site-packages path is %r" % site_pkgs_path, level="debug") if not os.path.exists(site_pkgs_path): _log("ddtrace site-packages not found in %r, aborting" % site_pkgs_path, level="error") + TELEMETRY_DATA.append( + gen_telemetry_payload( + [create_count_metric("library_entrypoint.abort", ["reason:missing_" + site_pkgs_path])], + DDTRACE_VERSION, + ) + ) return # Add the custom site-packages directory to the Python path to load the ddtrace package. @@ -349,6 +368,17 @@ def _inject(): except BaseException as e: _log("failed to load ddtrace module: %s" % e, level="error") + TELEMETRY_DATA.append( + gen_telemetry_payload( + [ + create_count_metric( + "library_entrypoint.error", ["error_type:import_ddtrace_" + type(e).__name__.lower()] + ) + ], + DDTRACE_VERSION, + ) + ) + return else: try: @@ -377,38 +407,56 @@ def _inject(): os.environ["PYTHONPATH"] = python_path _log("successfully configured ddtrace package, python path is %r" % os.environ["PYTHONPATH"]) - event = gen_telemetry_payload( - [ - create_count_metric( - "library_entrypoint.complete", - [ - "injection_forced:" + str(runtime_incomp or integration_incomp).lower(), - ], - ) - ], - DDTRACE_VERSION, + TELEMETRY_DATA.append( + gen_telemetry_payload( + [ + create_count_metric( + "library_entrypoint.complete", + [ + "injection_forced:" + str(runtime_incomp or integration_incomp).lower(), + ], + ) + ], + DDTRACE_VERSION, + ) ) - send_telemetry(event) except Exception as e: - event = gen_telemetry_payload( - [create_count_metric("library_entrypoint.error", ["error_type:" + type(e).__name__.lower()])], - DDTRACE_VERSION, + TELEMETRY_DATA.append( + gen_telemetry_payload( + [ + create_count_metric( + "library_entrypoint.error", ["error_type:init_ddtrace_" + type(e).__name__.lower()] + ) + ], + DDTRACE_VERSION, + ) ) - send_telemetry(event) _log("failed to load ddtrace.bootstrap.sitecustomize: %s" % e, level="error") return else: module_origin = spec.origin if spec else None _log("user-installed ddtrace found: %s, aborting site-packages injection" % module_origin, level="warning") + TELEMETRY_DATA.append( + create_count_metric( + "library_entrypoint.abort", + [ + "reason:ddtrace_already_present", + ], + ) + ) try: - _inject() -except Exception as e: try: - event = gen_telemetry_payload( - [create_count_metric("library_entrypoint.error", ["error_type:" + type(e).__name__.lower()])] + _inject() + except Exception as e: + TELEMETRY_DATA.append( + gen_telemetry_payload( + [create_count_metric("library_entrypoint.error", ["error_type:main_" + type(e).__name__.lower()])] + ) ) - send_telemetry(event) - except Exception: - pass # absolutely never allow exceptions to propagate to the app + finally: + if TELEMETRY_DATA: + send_telemetry(TELEMETRY_DATA) +except Exception: + pass # absolutely never allow exceptions to propagate to the app diff --git a/releasenotes/notes/fix-lib-injection-telemetry-4fbea5e41ee1ff3e.yaml b/releasenotes/notes/fix-lib-injection-telemetry-4fbea5e41ee1ff3e.yaml new file mode 100644 index 00000000000..cf80f366f5b --- /dev/null +++ b/releasenotes/notes/fix-lib-injection-telemetry-4fbea5e41ee1ff3e.yaml @@ -0,0 +1,4 @@ +--- +fixes: + - | + lib-injection: Fix missing lib-injection telemetry for common abort scenarios. From 00ec1f7d6473e21690ab1b7cac734e4f2d7ac5e7 Mon Sep 17 00:00:00 2001 From: David Sanchez <838104+sanchda@users.noreply.github.com> Date: Fri, 13 Dec 2024 16:40:18 -0600 Subject: [PATCH 34/78] feat(profiling): add support for pytorch profiling (#9154) PR does - Patches `torch.profiler.profile` class by adding our own `on_trace_ready` handler - Adds GPU time/flops/memory samples via libdatadog interface in `on_trace_ready` event handler - Ensures that libdd exporter is enabled if pytorch is enabled - Hides functionality behind a FF set to False by default - changelog entry - Is there a minimum python version? - the biggest requirement is that the current pytorch profiler API which we instrument was introduced in torch version 1.8.1 (https://pytorch.org/blog/introducing-pytorch-profiler-the-new-and-improved-performance-tool/), do we just want to document or we could disable the instrumentation if we detect an outdated version with `torch.__version__` - Some documentation on needed user configuration, conflicting features, gotchas ~~Probably should make experimental/beta collectors not part of the ALL template (Is this blocking since we haven't done in the past??)~~ ## Testing Done - Tested by running on ec2 GPU instance - Tested by running `prof-pytorch` service in staging - I'm not entirely sure if we need unit tests for this feature, or where they would live. Would we want the unit test suite to depend on torch? Maybe this is solved for tracing integrations, though ## Checklist - [x] Change(s) are motivated and described in the PR description - [x] Testing strategy is described if automated tests are not included in the PR - [x] Risks are described (performance impact, potential for breakage, maintainability) - [x] Change is maintainable (easy to change, telemetry, documentation) - [x] [Library release note guidelines](https://ddtrace.readthedocs.io/en/stable/releasenotes.html) are followed or label `changelog/no-changelog` is set - [x] Documentation is included (in-code, generated user docs, [public corp docs](https://github.com/DataDog/documentation/)) - [x] Backport labels are set (if [applicable](https://ddtrace.readthedocs.io/en/latest/contributing.html#backporting)) - [x] If this PR changes the public interface, I've notified `@DataDog/apm-tees`. ## Reviewer Checklist - [x] Title is accurate - [x] All changes are related to the pull request's stated goal - [x] Description motivates each change - [x] Avoids breaking [API](https://ddtrace.readthedocs.io/en/stable/versioning.html#interfaces) changes - [x] Testing strategy adequately addresses listed risks - [x] Change is maintainable (easy to change, telemetry, documentation) - [x] Release note makes sense to a user of the library - [x] Author has acknowledged and discussed the performance implications of this PR as reported in the benchmarks PR comment - [x] Backport labels are set in a manner that is consistent with the [release branch maintenance policy](https://ddtrace.readthedocs.io/en/latest/contributing.html#backporting) --------- Co-authored-by: sanchda Co-authored-by: Peter Griggs Co-authored-by: Daniel Schwartz-Narbonne Co-authored-by: Emmett Butler <723615+emmettbutler@users.noreply.github.com> Co-authored-by: Daniel Schwartz-Narbonne Co-authored-by: Taegyun Kim Co-authored-by: Daniel Schwartz-Narbonne --- .github/workflows/pytorch_gpu_tests.yml | 43 ++++ .../dd_wrapper/include/ddup_interface.hpp | 5 + .../dd_wrapper/include/libdatadog_helpers.hpp | 3 +- .../profiling/dd_wrapper/include/sample.hpp | 7 + .../profiling/dd_wrapper/include/types.hpp | 11 +- .../dd_wrapper/src/ddup_interface.cpp | 30 +++ .../profiling/dd_wrapper/src/profile.cpp | 17 ++ .../profiling/dd_wrapper/src/sample.cpp | 58 +++++ .../internal/datadog/profiling/ddup/_ddup.pyi | 25 ++- .../internal/datadog/profiling/ddup/_ddup.pyx | 37 +++ ddtrace/profiling/collector/pytorch.py | 211 ++++++++++++++++++ ddtrace/profiling/profiler.py | 36 +++ ddtrace/settings/profiling.py | 31 ++- docs/advanced_usage.rst | 82 +++++++ docs/pytorch_metric.png | Bin 0 -> 124186 bytes docs/spelling_wordlist.txt | 120 +++++----- hatch.toml | 28 +++ ...-pytorch-integration-0683123b7bb83f99.yaml | 8 + .../simple_program_pytorch_gpu.py | 42 ++++ tests/profiling_v2/test_pytorch.py | 44 ++++ 20 files changed, 770 insertions(+), 68 deletions(-) create mode 100644 .github/workflows/pytorch_gpu_tests.yml create mode 100644 ddtrace/profiling/collector/pytorch.py create mode 100644 docs/pytorch_metric.png create mode 100644 releasenotes/notes/profiling-add-pytorch-integration-0683123b7bb83f99.yaml create mode 100644 tests/profiling_v2/simple_program_pytorch_gpu.py create mode 100644 tests/profiling_v2/test_pytorch.py diff --git a/.github/workflows/pytorch_gpu_tests.yml b/.github/workflows/pytorch_gpu_tests.yml new file mode 100644 index 00000000000..1db504ae61d --- /dev/null +++ b/.github/workflows/pytorch_gpu_tests.yml @@ -0,0 +1,43 @@ +name: Pytorch Unit Tests (with GPU) + +on: + push: + branches: + - 'main' + - 'mq-working-branch**' + paths: + - 'ddtrace/profiling/collector/pytorch.py' + pull_request: + paths: + - 'ddtrace/profiling/collector/pytorch.py' + workflow_dispatch: + +jobs: + unit-tests: + runs-on: APM-4-CORE-GPU-LINUX + steps: + - uses: actions/checkout@v4 + # Include all history and tags + with: + persist-credentials: false + fetch-depth: 0 + + - uses: actions/setup-python@v5 + name: Install Python + with: + python-version: '3.12' + + - uses: actions-rust-lang/setup-rust-toolchain@v1 + - name: Install latest stable toolchain and rustfmt + run: rustup update stable && rustup default stable && rustup component add rustfmt clippy + + - name: Install hatch + uses: pypa/hatch@install + with: + version: "1.12.0" + + - name: Install PyTorch + run: pip install torch + + - name: Run tests + run: hatch run profiling_pytorch:test diff --git a/ddtrace/internal/datadog/profiling/dd_wrapper/include/ddup_interface.hpp b/ddtrace/internal/datadog/profiling/dd_wrapper/include/ddup_interface.hpp index cd18ead1966..0eec6ad87bc 100644 --- a/ddtrace/internal/datadog/profiling/dd_wrapper/include/ddup_interface.hpp +++ b/ddtrace/internal/datadog/profiling/dd_wrapper/include/ddup_interface.hpp @@ -44,6 +44,9 @@ extern "C" void ddup_push_release(Datadog::Sample* sample, int64_t release_time, int64_t count); void ddup_push_alloc(Datadog::Sample* sample, int64_t size, int64_t count); void ddup_push_heap(Datadog::Sample* sample, int64_t size); + void ddup_push_gpu_gputime(Datadog::Sample* sample, int64_t time, int64_t count); + void ddup_push_gpu_memory(Datadog::Sample* sample, int64_t mem, int64_t count); + void ddup_push_gpu_flops(Datadog::Sample* sample, int64_t flops, int64_t count); void ddup_push_lock_name(Datadog::Sample* sample, std::string_view lock_name); void ddup_push_threadinfo(Datadog::Sample* sample, int64_t thread_id, @@ -56,11 +59,13 @@ extern "C" void ddup_push_trace_type(Datadog::Sample* sample, std::string_view trace_type); void ddup_push_exceptioninfo(Datadog::Sample* sample, std::string_view exception_type, int64_t count); void ddup_push_class_name(Datadog::Sample* sample, std::string_view class_name); + void ddup_push_gpu_device_name(Datadog::Sample*, std::string_view device_name); void ddup_push_frame(Datadog::Sample* sample, std::string_view _name, std::string_view _filename, uint64_t address, int64_t line); + void ddup_push_absolute_ns(Datadog::Sample* sample, int64_t timestamp_ns); void ddup_push_monotonic_ns(Datadog::Sample* sample, int64_t monotonic_ns); void ddup_flush_sample(Datadog::Sample* sample); // Stack v2 specific flush, which reverses the locations diff --git a/ddtrace/internal/datadog/profiling/dd_wrapper/include/libdatadog_helpers.hpp b/ddtrace/internal/datadog/profiling/dd_wrapper/include/libdatadog_helpers.hpp index 9952eab8e3f..03a302eb533 100644 --- a/ddtrace/internal/datadog/profiling/dd_wrapper/include/libdatadog_helpers.hpp +++ b/ddtrace/internal/datadog/profiling/dd_wrapper/include/libdatadog_helpers.hpp @@ -45,7 +45,8 @@ namespace Datadog { X(local_root_span_id, "local root span id") \ X(trace_type, "trace type") \ X(class_name, "class name") \ - X(lock_name, "lock name") + X(lock_name, "lock name") \ + X(gpu_device_name, "gpu device name") #define X_ENUM(a, b) a, #define X_STR(a, b) b, diff --git a/ddtrace/internal/datadog/profiling/dd_wrapper/include/sample.hpp b/ddtrace/internal/datadog/profiling/dd_wrapper/include/sample.hpp index 8ddf412bf89..38baf59aa97 100644 --- a/ddtrace/internal/datadog/profiling/dd_wrapper/include/sample.hpp +++ b/ddtrace/internal/datadog/profiling/dd_wrapper/include/sample.hpp @@ -100,6 +100,9 @@ class Sample bool push_release(int64_t lock_time, int64_t count); bool push_alloc(int64_t size, int64_t count); bool push_heap(int64_t size); + bool push_gpu_gputime(int64_t time, int64_t count); + bool push_gpu_memory(int64_t size, int64_t count); + bool push_gpu_flops(int64_t flops, int64_t count); // Adds metadata to sample bool push_lock_name(std::string_view lock_name); @@ -112,11 +115,15 @@ class Sample bool push_exceptioninfo(std::string_view exception_type, int64_t count); bool push_class_name(std::string_view class_name); bool push_monotonic_ns(int64_t monotonic_ns); + bool push_absolute_ns(int64_t timestamp_ns); // Interacts with static Sample state bool is_timeline_enabled() const; static void set_timeline(bool enabled); + // Pytorch GPU metadata + bool push_gpu_device_name(std::string_view device_name); + // Assumes frames are pushed in leaf-order void push_frame(std::string_view name, // for ddog_prof_Function std::string_view filename, // for ddog_prof_Function diff --git a/ddtrace/internal/datadog/profiling/dd_wrapper/include/types.hpp b/ddtrace/internal/datadog/profiling/dd_wrapper/include/types.hpp index 51785be2c93..3c62fa5d62f 100644 --- a/ddtrace/internal/datadog/profiling/dd_wrapper/include/types.hpp +++ b/ddtrace/internal/datadog/profiling/dd_wrapper/include/types.hpp @@ -11,7 +11,10 @@ enum SampleType : unsigned int LockRelease = 1 << 4, Allocation = 1 << 5, Heap = 1 << 6, - All = CPU | Wall | Exception | LockAcquire | LockRelease | Allocation | Heap + GPUTime = 1 << 7, + GPUMemory = 1 << 8, + GPUFlops = 1 << 9, + All = CPU | Wall | Exception | LockAcquire | LockRelease | Allocation | Heap | GPUTime | GPUMemory | GPUFlops }; // Every Sample object has a corresponding `values` vector, since libdatadog expects contiguous values per sample. @@ -30,6 +33,12 @@ struct ValueIndex unsigned short alloc_space; unsigned short alloc_count; unsigned short heap_space; + unsigned short gpu_time; + unsigned short gpu_count; + unsigned short gpu_alloc_space; + unsigned short gpu_alloc_count; + unsigned short gpu_flops; + unsigned short gpu_flops_samples; // Should be "count," but flops is already a count }; } // namespace Datadog diff --git a/ddtrace/internal/datadog/profiling/dd_wrapper/src/ddup_interface.cpp b/ddtrace/internal/datadog/profiling/dd_wrapper/src/ddup_interface.cpp index baee51a7eda..5d3ef356c2a 100644 --- a/ddtrace/internal/datadog/profiling/dd_wrapper/src/ddup_interface.cpp +++ b/ddtrace/internal/datadog/profiling/dd_wrapper/src/ddup_interface.cpp @@ -193,6 +193,24 @@ ddup_push_heap(Datadog::Sample* sample, int64_t size) // cppcheck-suppress unuse sample->push_heap(size); } +void +ddup_push_gpu_gputime(Datadog::Sample* sample, int64_t time, int64_t count) // cppcheck-suppress unusedFunction +{ + sample->push_gpu_gputime(time, count); +} + +void +ddup_push_gpu_memory(Datadog::Sample* sample, int64_t size, int64_t count) // cppcheck-suppress unusedFunction +{ + sample->push_gpu_memory(size, count); +} + +void +ddup_push_gpu_flops(Datadog::Sample* sample, int64_t flops, int64_t count) // cppcheck-suppress unusedFunction +{ + sample->push_gpu_flops(flops, count); +} + void ddup_push_lock_name(Datadog::Sample* sample, std::string_view lock_name) // cppcheck-suppress unusedFunction { @@ -252,6 +270,12 @@ ddup_push_class_name(Datadog::Sample* sample, std::string_view class_name) // cp sample->push_class_name(class_name); } +void +ddup_push_gpu_device_name(Datadog::Sample* sample, std::string_view gpu_device_name) // cppcheck-suppress unusedFunction +{ + sample->push_gpu_device_name(gpu_device_name); +} + void ddup_push_frame(Datadog::Sample* sample, // cppcheck-suppress unusedFunction std::string_view _name, @@ -262,6 +286,12 @@ ddup_push_frame(Datadog::Sample* sample, // cppcheck-suppress unusedFunction sample->push_frame(_name, _filename, address, line); } +void +ddup_push_absolute_ns(Datadog::Sample* sample, int64_t timestamp_ns) // cppcheck-suppress unusedFunction +{ + sample->push_absolute_ns(timestamp_ns); +} + void ddup_push_monotonic_ns(Datadog::Sample* sample, int64_t monotonic_ns) // cppcheck-suppress unusedFunction { diff --git a/ddtrace/internal/datadog/profiling/dd_wrapper/src/profile.cpp b/ddtrace/internal/datadog/profiling/dd_wrapper/src/profile.cpp index f9f7a3e9585..083ad1a655d 100644 --- a/ddtrace/internal/datadog/profiling/dd_wrapper/src/profile.cpp +++ b/ddtrace/internal/datadog/profiling/dd_wrapper/src/profile.cpp @@ -89,6 +89,23 @@ Datadog::Profile::setup_samplers() if (0U != (type_mask & SampleType::Heap)) { val_idx.heap_space = get_value_idx("heap-space", "bytes"); } + if (0U != (type_mask & SampleType::GPUTime)) { + val_idx.gpu_time = get_value_idx("gpu-time", "nanoseconds"); + val_idx.gpu_count = get_value_idx("gpu-samples", "count"); + } + if (0U != (type_mask & SampleType::GPUMemory)) { + // In the backend the unit is called 'gpu-space', but maybe for consistency + // it should be gpu-alloc-space + // gpu-alloc-samples may be unused, but it's passed along for scaling purposes + val_idx.gpu_alloc_space = get_value_idx("gpu-space", "bytes"); + val_idx.gpu_alloc_count = get_value_idx("gpu-alloc-samples", "count"); + } + if (0U != (type_mask & SampleType::GPUFlops)) { + // Technically "FLOPS" is a unit, but we call it a 'count' because no + // other profiler uses it as a unit. + val_idx.gpu_flops = get_value_idx("gpu-flops", "count"); + val_idx.gpu_flops_samples = get_value_idx("gpu-flops-samples", "count"); + } // Whatever the first sampler happens to be is the default "period" for the profile // The value of 1 is a pointless default. diff --git a/ddtrace/internal/datadog/profiling/dd_wrapper/src/sample.cpp b/ddtrace/internal/datadog/profiling/dd_wrapper/src/sample.cpp index bc0a316bcc3..1e7ca1b0217 100644 --- a/ddtrace/internal/datadog/profiling/dd_wrapper/src/sample.cpp +++ b/ddtrace/internal/datadog/profiling/dd_wrapper/src/sample.cpp @@ -262,6 +262,42 @@ Datadog::Sample::push_heap(int64_t size) return false; } +bool +Datadog::Sample::push_gpu_gputime(int64_t time, int64_t count) +{ + if (0U != (type_mask & SampleType::GPUTime)) { + values[profile_state.val().gpu_time] += time * count; + values[profile_state.val().gpu_count] += count; + return true; + } + std::cout << "bad push gpu" << std::endl; + return false; +} + +bool +Datadog::Sample::push_gpu_memory(int64_t size, int64_t count) +{ + if (0U != (type_mask & SampleType::GPUMemory)) { + values[profile_state.val().gpu_alloc_space] += size * count; + values[profile_state.val().gpu_alloc_count] += count; + return true; + } + std::cout << "bad push gpu memory" << std::endl; + return false; +} + +bool +Datadog::Sample::push_gpu_flops(int64_t size, int64_t count) +{ + if (0U != (type_mask & SampleType::GPUFlops)) { + values[profile_state.val().gpu_flops] += size * count; + values[profile_state.val().gpu_flops_samples] += count; + return true; + } + std::cout << "bad push gpu flops" << std::endl; + return false; +} + bool Datadog::Sample::push_lock_name(std::string_view lock_name) { @@ -351,6 +387,28 @@ Datadog::Sample::push_class_name(std::string_view class_name) return true; } +bool +Datadog::Sample::push_gpu_device_name(std::string_view device_name) +{ + if (!push_label(ExportLabelKey::gpu_device_name, device_name)) { + std::cout << "bad push" << std::endl; + return false; + } + return true; +} + +bool +Datadog::Sample::push_absolute_ns(int64_t _timestamp_ns) +{ + // If timeline is not enabled, then this is a no-op + if (is_timeline_enabled()) { + endtime_ns = _timestamp_ns; + } + + return true; +} + + bool Datadog::Sample::push_monotonic_ns(int64_t _monotonic_ns) { diff --git a/ddtrace/internal/datadog/profiling/ddup/_ddup.pyi b/ddtrace/internal/datadog/profiling/ddup/_ddup.pyi index 78351e93b91..2f466b62af3 100644 --- a/ddtrace/internal/datadog/profiling/ddup/_ddup.pyi +++ b/ddtrace/internal/datadog/profiling/ddup/_ddup.pyi @@ -20,19 +20,24 @@ def start() -> None: ... def upload(tracer: Optional[Tracer]) -> None: ... class SampleHandle: - def push_cputime(self, value: int, count: int) -> None: ... - def push_walltime(self, value: int, count: int) -> None: ... + def flush_sample(self) -> None: ... + def push_absolute_ns(self, timestamp_ns: int) -> None: ... def push_acquire(self, value: int, count: int) -> None: ... - def push_release(self, value: int, count: int) -> None: ... def push_alloc(self, value: int, count: int) -> None: ... + def push_class_name(self, class_name: StringType) -> None: ... + def push_cputime(self, value: int, count: int) -> None: ... + def push_exceptioninfo(self, exc_type: Union[None, bytes, str, type], count: int) -> None: ... + def push_frame(self, name: StringType, filename: StringType, address: int, line: int) -> None: ... + def push_gpu_device_name(self, device_name: StringType) -> None: ... + def push_gpu_flops(self, value: int, count: int) -> None: ... + def push_gpu_gputime(self, value: int, count: int) -> None: ... + def push_gpu_memory(self, value: int, count: int) -> None: ... def push_heap(self, value: int) -> None: ... def push_lock_name(self, lock_name: StringType) -> None: ... - def push_frame(self, name: StringType, filename: StringType, address: int, line: int) -> None: ... - def push_threadinfo(self, thread_id: int, thread_native_id: int, thread_name: StringType) -> None: ... + def push_monotonic_ns(self, monotonic_ns: int) -> None: ... + def push_release(self, value: int, count: int) -> None: ... + def push_span(self, span: Optional[Span]) -> None: ... def push_task_id(self, task_id: Optional[int]) -> None: ... def push_task_name(self, task_name: StringType) -> None: ... - def push_exceptioninfo(self, exc_type: Union[None, bytes, str, type], count: int) -> None: ... - def push_class_name(self, class_name: StringType) -> None: ... - def push_span(self, span: Optional[Span]) -> None: ... - def push_monotonic_ns(self, monotonic_ns: int) -> None: ... - def flush_sample(self) -> None: ... + def push_threadinfo(self, thread_id: int, thread_native_id: int, thread_name: StringType) -> None: ... + def push_walltime(self, value: int, count: int) -> None: ... diff --git a/ddtrace/internal/datadog/profiling/ddup/_ddup.pyx b/ddtrace/internal/datadog/profiling/ddup/_ddup.pyx index 9c590c796d7..5b8b6add921 100644 --- a/ddtrace/internal/datadog/profiling/ddup/_ddup.pyx +++ b/ddtrace/internal/datadog/profiling/ddup/_ddup.pyx @@ -68,6 +68,9 @@ cdef extern from "ddup_interface.hpp": void ddup_push_release(Sample *sample, int64_t release_time, int64_t count) void ddup_push_alloc(Sample *sample, int64_t size, int64_t count) void ddup_push_heap(Sample *sample, int64_t size) + void ddup_push_gpu_gputime(Sample *sample, int64_t gputime, int64_t count) + void ddup_push_gpu_memory(Sample *sample, int64_t size, int64_t count) + void ddup_push_gpu_flops(Sample *sample, int64_t flops, int64_t count) void ddup_push_lock_name(Sample *sample, string_view lock_name) void ddup_push_threadinfo(Sample *sample, int64_t thread_id, int64_t thread_native_id, string_view thread_name) void ddup_push_task_id(Sample *sample, int64_t task_id) @@ -77,8 +80,10 @@ cdef extern from "ddup_interface.hpp": void ddup_push_trace_type(Sample *sample, string_view trace_type) void ddup_push_exceptioninfo(Sample *sample, string_view exception_type, int64_t count) void ddup_push_class_name(Sample *sample, string_view class_name) + void ddup_push_gpu_device_name(Sample *sample, string_view device_name) void ddup_push_frame(Sample *sample, string_view _name, string_view _filename, uint64_t address, int64_t line) void ddup_push_monotonic_ns(Sample *sample, int64_t monotonic_ns) + void ddup_push_absolute_ns(Sample *sample, int64_t monotonic_ns) void ddup_flush_sample(Sample *sample) void ddup_drop_sample(Sample *sample) @@ -302,6 +307,18 @@ cdef call_ddup_push_class_name(Sample* sample, class_name: StringType): if utf8_data != NULL: ddup_push_class_name(sample, string_view(utf8_data, utf8_size)) +cdef call_ddup_push_gpu_device_name(Sample* sample, device_name: StringType): + if not device_name: + return + if isinstance(device_name, bytes): + ddup_push_gpu_device_name(sample, string_view(device_name, len(device_name))) + return + cdef const char* utf8_data + cdef Py_ssize_t utf8_size + utf8_data = PyUnicode_AsUTF8AndSize(device_name, &utf8_size) + if utf8_data != NULL: + ddup_push_gpu_device_name(sample, string_view(utf8_data, utf8_size)) + cdef call_ddup_push_trace_type(Sample* sample, trace_type: StringType): if not trace_type: return @@ -448,6 +465,18 @@ cdef class SampleHandle: if self.ptr is not NULL: ddup_push_heap(self.ptr, clamp_to_int64_unsigned(value)) + def push_gpu_gputime(self, value: int, count: int) -> None: + if self.ptr is not NULL: + ddup_push_gpu_gputime(self.ptr, clamp_to_int64_unsigned(value), clamp_to_int64_unsigned(count)) + + def push_gpu_memory(self, value: int, count: int) -> None: + if self.ptr is not NULL: + ddup_push_gpu_memory(self.ptr, clamp_to_int64_unsigned(value), clamp_to_int64_unsigned(count)) + + def push_gpu_flops(self, value: int, count: int) -> None: + if self.ptr is not NULL: + ddup_push_gpu_flops(self.ptr, clamp_to_int64_unsigned(value), clamp_to_int64_unsigned(count)) + def push_lock_name(self, lock_name: StringType) -> None: if self.ptr is not NULL: call_ddup_push_lock_name(self.ptr, lock_name) @@ -494,6 +523,10 @@ cdef class SampleHandle: if self.ptr is not NULL: call_ddup_push_class_name(self.ptr, class_name) + def push_gpu_device_name(self, device_name: StringType) -> None: + if self.ptr is not NULL: + call_ddup_push_gpu_device_name(self.ptr, device_name) + def push_span(self, span: Optional[Span]) -> None: if self.ptr is NULL: return @@ -512,6 +545,10 @@ cdef class SampleHandle: if self.ptr is not NULL: ddup_push_monotonic_ns(self.ptr, monotonic_ns) + def push_absolute_ns(self, timestamp_ns: int) -> None: + if self.ptr is not NULL: + ddup_push_absolute_ns(self.ptr, timestamp_ns) + def flush_sample(self) -> None: # Flushing the sample consumes it. The user will no longer be able to use # this handle after flushing it. diff --git a/ddtrace/profiling/collector/pytorch.py b/ddtrace/profiling/collector/pytorch.py new file mode 100644 index 00000000000..3d9e636871d --- /dev/null +++ b/ddtrace/profiling/collector/pytorch.py @@ -0,0 +1,211 @@ +from __future__ import absolute_import + +import abc +import logging +import random +import typing + +import wrapt + +from ddtrace._trace.tracer import Tracer +from ddtrace.internal.datadog.profiling import ddup +from ddtrace.profiling import _threading +from ddtrace.profiling import collector +from ddtrace.profiling.recorder import Recorder +from ddtrace.settings.profiling import config + + +LOG = logging.getLogger(__name__) + + +class _WrappedTorchProfiler(wrapt.ObjectProxy): + def __init__( + self, + wrapped: typing.Any, + recorder: Recorder, + tracer: typing.Optional[Tracer], + ) -> None: + wrapt.ObjectProxy.__init__(self, wrapped) + self.on_trace_ready = handle_torch_trace + self._self_recorder = recorder + self._self_tracer = tracer + + +class MLProfilerCollector(collector.CaptureSamplerCollector): + """Record ML framework (i.e. pytorch) profiler usage.""" + + def __init__(self, recorder=None): + super().__init__(recorder) + self.tracer = None + # Holds the pytorch profiler object which is wrapped by this class + self._original: typing.Any = None + + @abc.abstractmethod + def _get_patch_target(self): + # type: (...) -> typing.Any + pass + + @abc.abstractmethod + def _set_patch_target( + self, + value, # type: typing.Any + ): + # type: (...) -> None + pass + + def _start_service(self): + # type: (...) -> None + """Start collecting framework profiler usage.""" + try: + import torch + except ImportError as e: + raise collector.CollectorUnavailable(e) + self._torch_module = torch + self.patch() + super()._start_service() + + def _stop_service(self): + # type: (...) -> None + """Stop collecting framework profiler usage.""" + super()._stop_service() + self.unpatch() + + def patch(self): + # type: (...) -> None + """Patch the module for tracking profiling data.""" + # We only patch the profile call from the `torch.profiler` module. + self._original = self._get_patch_target() + + def profiler_init(wrapped, instance, args, kwargs): + profiler = wrapped(*args, **kwargs) + return self.PROFILED_TORCH_CLASS( + profiler, + self.recorder, + self.tracer, + ) + + self._set_patch_target(wrapt.FunctionWrapper(self._original, profiler_init)) + + def unpatch(self): + # type: (...) -> None + """Unpatch the torch.profiler module for tracking profiling data.""" + self._set_patch_target(self._original) + + +class TorchProfilerCollector(MLProfilerCollector): + """Monkey patch torch.profiler.profile usage.""" + + PROFILED_TORCH_CLASS = _WrappedTorchProfiler + + def __init__(self, recorder=None): + super().__init__(recorder) + + def _get_patch_target(self): + # type: (...) -> typing.Any + return self._torch_module.profiler.profile + + def _set_patch_target( + self, value # type: typing.Any + ): + # type: (...) -> None + self._torch_module.profiler.profile = value + + +def handle_torch_trace(prof): + NANOS_PER_MICROSECOND = 1e3 + LOG.debug("handle_torch_trace called") + events = prof.events() + if len(events) == 0: + return + + # need an upper bound of events collected, can be adjusted based on profile size. + # Sadly, there is no way AFAICT to tell the PyTorch profiler itself to limit the num of samples. + # We truncate to keep the uploaded profile to a reasonable size. + # For now, experiment with a default of 1_000_000 if nothing is set. + # TODO, better values here. + collection_fraction = 1.0 + num_events_to_report = min(len(events), config.pytorch.events_limit or 1_000_000) + if num_events_to_report < len(events): + LOG.debug("Dropped events. num_events_to_report %d. len(events): %d", num_events_to_report, len(events)) + collection_fraction = num_events_to_report / len(events) + + empty_events_count = 0 + + # earlier versions use microsecond, later versions use nanosecond + kineto_results = prof.profiler.kineto_results + if hasattr(kineto_results, "trace_start_ns"): + trace_start_ns = kineto_results.trace_start_ns() + elif hasattr(kineto_results, "trace_start_us"): + trace_start_ns = kineto_results.trace_start_us() * NANOS_PER_MICROSECOND + else: + raise AttributeError("Neither trace_start_ns nor trace_start_us exists") + + for e in events: + if collection_fraction < random.random(): # nosec: used for sampling, not security + continue + + handle = ddup.SampleHandle() + data_added = False + + # cpu time sample + if e.cpu_time > 0: + data_added = True + handle.push_cputime(int(e.cpu_time * NANOS_PER_MICROSECOND), e.count) + + # gpu time sample - both device_time and cuda_time are in microseconds + if hasattr(e, "device_time") and e.device_time > 0: + data_added = True + time_elapsed = int(e.device_time * NANOS_PER_MICROSECOND) + handle.push_gpu_gputime(time_elapsed, e.count) + elif hasattr(e, "cuda_time") and e.cuda_time > 0: + data_added = True + time_elapsed = int(e.cuda_time * NANOS_PER_MICROSECOND) + handle.push_gpu_gputime(time_elapsed, e.count) + + # gpu flops sample + if e.flops is not None and e.flops > 0: + data_added = True + handle.push_gpu_flops(e.flops, e.count) + + # GPU memory usage + # earlier versions of torch use cuda_memory_usage, recent versions use device_memory_usage + if hasattr(e, "device_memory_usage") and e.device_memory_usage is not None and e.device_memory_usage > 0: + data_added = True + handle.push_gpu_memory(e.device_memory_usage, e.count) + elif hasattr(e, "cuda_memory_usage") and e.cuda_memory_usage is not None and e.cuda_memory_usage > 0: + data_added = True + handle.push_gpu_memory(e.cuda_memory_usage, e.count) + + # If there is data, flush it to the profile. + # Otherwise, do nothing and the sample object will be dropped when it goes out of scope + if data_added: + handle.push_frame(e.name, "unknown-file", 0, 0) + # Pushing pseudoframes for the device name ("device.CPU" or "device.CUDA") + # onto the stack allows differentation of pytorch frames from other profiling frames + # in the flame graph. + # Note that stacks go root last, so this goes at the end + handle.push_frame("PYTORCH_" + str(e.device_type), "unknown-file", 0, 0) + + handle.push_gpu_device_name("cuda " + str(e.device_index)) + + if str(e.device_type).startswith("DeviceType.CPU"): + # There is a known issue with getting thread ids and names from pytorch. + # If we can't get one, just use a default name. + handle.push_threadinfo( + e.thread, + _threading.get_thread_native_id(e.thread), + _threading.get_thread_name(e.thread) or "PYTORCH-CPU-THREAD-" + str(e.thread), + ) + elif str(e.device_type).startswith("DeviceType.CUDA"): + handle.push_threadinfo( + e.thread, _threading.get_thread_native_id(e.thread), "PYTORCH-CUDA-" + str(e.device_index) + ) + else: + raise AttributeError(f"Unexpected device_type {e.device_type}") + + handle.push_absolute_ns(int(trace_start_ns + e.time_range.end * NANOS_PER_MICROSECOND)) + handle.flush_sample() + else: + if empty_events_count % 1000 == 0: + LOG.debug("%d events with no data to record: %s", empty_events_count, e) + empty_events_count += 1 diff --git a/ddtrace/profiling/profiler.py b/ddtrace/profiling/profiler.py index 9903cc29108..111c1624fd2 100644 --- a/ddtrace/profiling/profiler.py +++ b/ddtrace/profiling/profiler.py @@ -24,6 +24,7 @@ from ddtrace.profiling import scheduler from ddtrace.profiling.collector import asyncio from ddtrace.profiling.collector import memalloc +from ddtrace.profiling.collector import pytorch from ddtrace.profiling.collector import stack from ddtrace.profiling.collector import stack_event from ddtrace.profiling.collector import threading @@ -120,6 +121,7 @@ def __init__( _stack_collector_enabled: bool = profiling_config.stack.enabled, _stack_v2_enabled: bool = profiling_config.stack.v2_enabled, _lock_collector_enabled: bool = profiling_config.lock.enabled, + _pytorch_collector_enabled: bool = profiling_config.pytorch.enabled, enable_code_provenance: bool = profiling_config.code_provenance, endpoint_collection_enabled: bool = profiling_config.endpoint_collection, ): @@ -135,6 +137,7 @@ def __init__( self._stack_collector_enabled: bool = _stack_collector_enabled self._stack_v2_enabled: bool = _stack_v2_enabled self._lock_collector_enabled: bool = _lock_collector_enabled + self._pytorch_collector_enabled: bool = _pytorch_collector_enabled self.enable_code_provenance: bool = enable_code_provenance self.endpoint_collection_enabled: bool = endpoint_collection_enabled @@ -219,6 +222,12 @@ def _build_default_exporters(self): LOG.error("Profiling failures occurred in an injected instance of ddtrace, disabling profiling") return [] + # pytorch collector relies on libdd exporter + if self._pytorch_collector_enabled: + LOG.error("Disabling pytorch profiler as libdd collector failed to initialize") + config.pytorch.enabled = False + self._pytorch_collector_enabled = False + # DEV: Import this only if needed to avoid importing protobuf # unnecessarily from ddtrace.profiling.exporter import http @@ -297,6 +306,33 @@ def start_collector(collector_class: Type) -> None: for module, hook in self._collectors_on_import: ModuleWatchdog.register_module_hook(module, hook) + if self._pytorch_collector_enabled: + + def start_collector(collector_class: Type) -> None: + with self._service_lock: + col = collector_class(r) + + if self.status == service.ServiceStatus.RUNNING: + # The profiler is already running so we need to start the collector + try: + col.start() + LOG.debug("Started pytorch collector %r", col) + except collector.CollectorUnavailable: + LOG.debug("Collector %r pytorch is unavailable, disabling", col) + return + except Exception: + LOG.error("Failed to start collector %r pytorch, disabling.", col, exc_info=True) + return + + self._collectors.append(col) + + self._collectors_on_import = [ + ("torch", lambda _: start_collector(pytorch.TorchProfilerCollector)), + ] + + for module, hook in self._collectors_on_import: + ModuleWatchdog.register_module_hook(module, hook) + if self._memory_collector_enabled: self._collectors.append(memalloc.MemoryCollector(r)) diff --git a/ddtrace/settings/profiling.py b/ddtrace/settings/profiling.py index ad8d8794d69..94d71f1778b 100644 --- a/ddtrace/settings/profiling.py +++ b/ddtrace/settings/profiling.py @@ -92,7 +92,13 @@ def _is_libdd_required(config): # libdd... requires libdd # injected environments _cannot_ deploy protobuf, so they must use libdd # timeline requires libdd - return config.stack.v2_enabled or config.export._libdd_enabled or config._injected or config.timeline_enabled + return ( + config.stack.v2_enabled + or config.export._libdd_enabled + or config._injected + or config.timeline_enabled + or config.pytorch.enabled + ) # This value indicates whether or not profiling is _loaded_ in an injected environment. It does not by itself @@ -399,6 +405,26 @@ class ProfilingConfigHeap(En): sample_size = En.d(int, _derive_default_heap_sample_size) +class ProfilingConfigPytorch(En): + __item__ = __prefix__ = "pytorch" + + enabled = En.v( + bool, + "enabled", + default=False, + help_type="Boolean", + help="Whether to enable the PyTorch profiler", + ) + + events_limit = En.v( + int, + "events_limit", + default=1_000_000, + help_type="Integer", + help="How many events the PyTorch profiler records each collection", + ) + + class ProfilingConfigExport(En): __item__ = __prefix__ = "export" @@ -416,6 +442,7 @@ class ProfilingConfigExport(En): ProfilingConfig.include(ProfilingConfigLock, namespace="lock") ProfilingConfig.include(ProfilingConfigMemory, namespace="memory") ProfilingConfig.include(ProfilingConfigHeap, namespace="heap") +ProfilingConfig.include(ProfilingConfigPytorch, namespace="pytorch") ProfilingConfig.include(ProfilingConfigExport, namespace="export") config = ProfilingConfig() @@ -466,6 +493,8 @@ def config_str(config): configured_features.append("mem") if config.heap.sample_size > 0: configured_features.append("heap") + if config.pytorch.enabled: + configured_features.append("pytorch") if config.export.libdd_enabled: configured_features.append("exp_dd") else: diff --git a/docs/advanced_usage.rst b/docs/advanced_usage.rst index fd556ef8770..309b6178c56 100644 --- a/docs/advanced_usage.rst +++ b/docs/advanced_usage.rst @@ -737,3 +737,85 @@ To avoid such duplicate log entries from ``ddtrace``, you can remove the automat ddtrace_logger = logging.getLogger("ddtrace") for handler in ddtrace_logger.handlers: ddtrace_logger.removeHandler(handler) + +PyTorch Profiling +----------------- + +The PyTorch profiler can be used to trace CPU and GPU events that occur when running inference or training on a PyTorch model. +The PyTorch profiler as it's `typically used `__, will output a trace json file to +local disk that can be loaded in a visualization tool like TensorBoard or Perfetto. With the dd-trace-py PyTorch profiler integration, we instrument the `profiler API `__ +to automatically export this data to Datadog for visualization without having to manually copy files between servers. + +The requirements for using this feature are: + +- must be using the `torch.profiler` module which was introduced in PyTorch version `1.8.1`. +- must set the environment variable `DD_PROFILING_PYTORCH_ENABLED=true`. + +It is important to note that we offer no different performance guarantees than the PyTorch profiler itself, which is not recommended to run in production continuously due to memory and CPU overhead. This +is an experimental feature which should be run with caution as it can add significant overhead. Additionally, please note that running this feature in certain +configurations can conflict with other features. For instance, running the NSight Systems or NSight Compute profiler alongside the PyTorch profiler on the same machine at the same time will likely lead to +errors as CUPTI generally does not support multiple concurrent readers. + + +Below is an example program using the well known `CIFAR-10 `__ dataset for image classification. +This can be run through the command line (assuming that a Datadog agent is running in the same environment) with: + +.. code-block:: bash + + DD_SERVICE=test-pytorch-service DD_PROFILING_PYTORCH_ENABLED=true DD_PROFILING_ENABLED=true ddtrace-run python cifar10.py + +.. code-block:: python + + import torch + import torch.nn + import torch.optim + import torch.utils.data + import torchvision.datasets + import torchvision.models + import torchvision.transforms as T + from torchvision.models import resnet18, ResNet18_Weights + + from torch.profiler import ProfilerActivity + + + def cifar(): + transform = T.Compose( + [T.Resize(224), T.ToTensor(), T.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))] + ) + train_set = torchvision.datasets.CIFAR10( + root="./data", train=True, download=True, transform=transform + ) + train_loader = torch.utils.data.DataLoader(train_set, batch_size=32, shuffle=True) + device = torch.device("cuda") + model = resnet18(weights=ResNet18_Weights.DEFAULT).cuda() + criterion = torch.nn.CrossEntropyLoss() + optimizer = torch.optim.SGD(model.parameters(), lr=0.001, momentum=0.9) + model.train() + + def train(data): + inputs, labels = data[0].to(device=device), data[1].to(device=device) + outputs = model(inputs) + loss = criterion(outputs, labels) + optimizer.zero_grad() + loss.backward() + optimizer.step() + + with torch.profiler.profile( + activities=[ProfilerActivity.CUDA], + ): + for step, batch_data in enumerate(train_loader): + print("step #%d" % step) + if step >= (1 + 1 + 3) * 2: + break + train(batch_data) + + + if __name__ == "__main__": + cifar() + +The profiling data is then visible under the Timeseries tab in the profiling page. For instance, the GPU Time by Kernel Name metric is shown below +for an application serving inference with an LLM through PyTorch: + +.. image:: pytorch_metric.png + :width: 600 + :alt: Alternative text \ No newline at end of file diff --git a/docs/pytorch_metric.png b/docs/pytorch_metric.png new file mode 100644 index 0000000000000000000000000000000000000000..f3b26cd57d566232c89320b06c09440067cce8b0 GIT binary patch literal 124186 zcmd?Rgbk%fqbxlvzwgxF}Bf_^5QDbdqsvSYT7dxNCJXhsqZmJeKsS& z=lw!pwg|DbDqK!u%@W!bDM$=T%yUUX+L_JDXxPO<36S*wX}+%IPwKik{WRLJ^CF|G z@L5bXTt6YYP$TcoLI|MIhILnrqH(Rgq6lA#4HuOYaiwyZ@v5R9xF6Hfxid!SL6KIc z4d9~CQZawruKOzVmC-6(<2L7|6MV(>Kn%QutS~j!?u#(bmwd~suLnYrlJ@zEC$MiK z52iV+V(%qg=ylGW&~_Z}7Us|$Zo%rt%)Pv_6rF+u*xQa&(yDBMs0t>iD&U__{N61r z0s1H}#AOI9O^qrfqQgWn8O}JKjjskKml&lS#a%su`tlM5kvvWA2f@?d8|E$;#=WPe zrCsV(VP5qG78Q%k)vA~DFsDx%$)N7?vyaRj)8%Ff5f$I8aR%E7@5J%icN z&Cbaf$ZY3G^CywN=}4M6nmAb6J6QtkD1Xs4{s3@x5~8O5#pwS&e_p2<(DFZ=>>U5< z7PNz`ziL?7S=dznlJ7)$dI;9L*dg0Jcz`PQw2gn!g(VcjaFV1zCS}{l7% z8#Li(rwf-*brFS;lN3`2!XBg|dp_?W>@&oL%e(>?pMyOBa0H@=9%52VA=K-J6;zCj zKSaCzUimp8#4~FAe9JoN0mwv@y&D6tcpT`L?HttytZYcw0bI>pE}J}D?~<>!ZMCvE zGGky+qyJF?n0!y74~Ye&;b=wwQ8G}Kn<3(qu(-b$Uu=5OQ}T8CkY$>GR>Fq48o>Vx zHSBeSS73>Igi4QpsT4gOwe~@f{YMc`f$8xGNvCv)`j^T{9N0g)`3Y~v&w0M%E+FUdRrb(Vus{hwmqj4~RZXLF+Uq0?6tsbVT)H^zQcEGDbz-a3HGS3=?- z6PmQ!3mBlq?FliU)+;6jS`&5mfv-Zty=`BWSNi!g-;({sawWz9P(hF65Jjz#8$}S86^Q6^Bg2b& zA&~CSaKJ|1O+#)@?RX+*;z@FbMN;Jb8dkS+mX&6C*Imv{b0&(uG8LM58ybx2t(*lz~b)x2e4ZVRaR8bhsn`vjN8KP{9L>il|FgGxWJjczK zqued_2rtWrif1sdObQRD41r5jOP>h_f2x7rQ~xnZP{`nje7IW2P9F?fRB@701-#C( zE~k-o>YENE8JD%HzK|R!w~dA9*wcgIA9VK*-GckbI)xnb&|x#9I$*MG4~j(%#&33<5SIR+K=*ACrah5c^fUNYlZB>);{m zvU28`PCz5`09+)B(fG#nSNTZp`4Y5QKxzICS?1YEJ@NvFnL>Hq2g$qU z(SF?at44Z01(v)RzN5M`Nnk8}`wXB|+~*zf>BG?bCnOJ($37C|9Yj(u>=J>Ne!uG|fg++bByL zo#F%*YZIvet#)eXn}t)8TfQqdU)3H*w-myHnVU6hp9r$A&Ya73-eBc5iIE9 zP0`&WpR{>V7u5f71 zB7q`_o2Qe<tuFQ*Z&|yf<+t{C(A3wsMeuSinUSqWHK*y5}-d3#aEMkB;oOMYh?i{f$MV zl7`aSCmZbha>M+@8kH5-Ks5X}fXjW3QHo)M=Nm+ym>#D;9`Q$t!Ji|~eFoLkRoc#h zTZ#-j8X)4 zUbnYNq<2H*m_2s^g;N(U79fltOgmI%Ez7sXk?5>sWRuVQaJmpsAG z$;?!BMsJx{AB@vw+m8{-3YE8E0WI{LUc{sh>!9g66SJvVTgGQEQK)yC}z>P0e#`2m({rKcM+cqI3=W(n;zfCx!NbZ*`<=JfTf;a6eTb^unmQVIzy^G{yY_jWm*#Q$V|Gq?z zxrH1Qvc8N?hmeFpb#X9MJ|7|^L3#}#e|k^R`bpvP!TvkAHKp#DeLUm|BOx1yx;5jo zm^@&SI|&OPHp+_^Qf6Wgn?Pnf3h-_ILVpV7}DUl6Pr*xSGY1)3yXKc zfO|X(4myF86tI(K6Mwq9mzvC86dG_}Y?h;GsB@HQX)Qi0*$5^V2?+xB1wGpu+)LIA z$J}_?&IY`s3V5x_R4&LU5HUmb)TkZkxILDnndWt1Y;CCf#@0sv(hN50ox@cKqK&J| zgh_f@%~}b7p>V%Mv5ek#T9lG*&a318W^2xp)3)0qoMbpzc;B^yTP88~fzN7eqA|Nq ziC4$Ay3D=Jte6Iff>_S{5Hc<1sZJ&#;X1)1EatsDPqg99Je2rW6Sx9~o z$xM;lXY&zSg$GCynX-X7%GK#gWPBb5>8?4eVZ(YQ8{|6$_V@#G3)J#FzpURopZAts znOm9m-~XJ*Qdc-vY5V$cgt<5i(Wo55OS897%vGXU0^N_yrYg+6Bja@Q(~4CTzhIrn z6Tj5wD=Q7gq3QJ2IdLL0_Hn~(Icu>u`jB3X%0UaXX$yp_m#o5f@y)lLj*WnjjF5q+4aC=_h6_;W}!5aKwVYqhB6zl`+ zlU-{z8b0#}c0<>$m^5D=asxLMpaS0atI_1U<9&y)ZUJeD7sN*Vi`xaB8MC%aRT?hG z%k=53Pv5&=9nX!$at8?)&((J^hpO_L{rE7i)OFn^JbJiC^xvUsnHlT2z>8Jl;JKt5%i5yJCtv z0!7k+$_;w_Y%sU@V^g}9%GYn@`zjDukz4gAn7Q{8m~6ni4t#RarOmC8I9+$A7MbPy z_tUmx_n@+8$xDn$Ec&xMdDcnq8qxQtYp#@>O@cd6Y-D!5`?yO>cuZc!C8G0K$wvD6 z5?1R+gii3-&sUpqme1l)@s1rKY1lBHDk`Ij=6GIB0UsVFbRu%Fh~JEpz}*idWt(oN zG~Du+>t{Dcl8gU%fj;F6CjMdA`e>ZePE%{YZF*SPX@NLpMA?t?)&sC3aG_XVZ4IO5 zEA4gXzq(y-ditU9Fz|EI&sVUXgx@gM8YP6uGm$0OGv*Z=lCDX3o>vsFKoHD4E@OZnz^}Inf@T|%{ zRBAQ*p^|+zoAJD+4_~B-S^doURQ?O&%2ymon6dsS|`co0F+?Qn}`lZ zhgG|q%f9pr3tSbCDPHR1DcJd%EoNc>9pHqYkgqN)&nVd17Ut&d`&w#lW*?q5Ip#)7vYV z^s3Mk`}eat43@B0*dfssPwzU-(S0yfaqwPGXP_Y&VJlvnKPBPk!@1j8WsH3W8Quun zBM9R3cAy-(%wk#W!%J!=EiC*|B^X4^wLh+CUO-oUHit|-&e6-S$1UdWf9yfT;!Z-AwS&XG7@sBLZ zr4k%&Iy-aP5H(9~S@Xm)Vthjj$S}qdJPn_=o)?peU6oECa7Rcixz_a{On1A-`6^-( z`|icf$rPxBEKrnOXx&7+8dw%D_w0s?`-9Qh&m@DHPOAxkT8?_?x;u>ZeNN$?`DN@q z|0Sv0#!Tzk{G3T_U^rud}n_1)Js$`3mP zJq%m?dOv{&=Rj60JOS=4`~%1wM>cZ?Vb2*$BrT;FbaFx(Eh}jUPc^;SWg~p!Wzs=n zt6~*|#n?yHGc`px6s@lfv7Tf-vP6hZ1HnS0oMl*^PwyU2b$>o=X5V@ciLb;crf^Ge z7n;haW4h_0e)@dvq(16JhQQanax{tX!5*fK!gf01HPhRzG|RlbH6r8S8JKeRSAkd3 zo%upm8K^{DR!~Q1-LE0t$Q|g=2;sOV(+H4JY+;N%46cKdc&Ay`^~`_ zymYa$g#BgR;GqRukd6YUyie1E%5e=9<8V>UN4i~Ke7pZjPkJaCP5I490A*x7~#C)Jf4zO4sQMO zglx4ExiI7RB5UbR4E znq0V1CC`V82+^;qbTjQ@mO^!yLzWZKg2UI;K1SkI)I_71#$T#Y{dvV@@zGUip(G}T z?(uXG?D62o4Bfc0NXzo#S9=MvUdYuLolt5p8NN%mrj795StPpdyrd^Tq7rIaE(W}( zreVfm*Ls@WFPEKpi(i|Mg~KU3yEQzmnf!SF@a9t(!9KP?y~{!1>AlovXFLh;i-N_V zKC+wFd1K%4ZC469kPh&uu24uNcwOkX8UpJG(`|ombtPgl3m4w^LdSV}edZ&P3hBxB zozbsq9)>s!z8}%6F+6Xi4iS5%i_IfQb`c7;54D8tcPrj0^Pf#&IY|aRxOg`|-QH1X z1U)kEx%PAHBC@_gdJ#>D8`Pms8^<*aM5ltM5I-+hQefb43P>at=Ltq@5{@m7PC zgz|*iuWf`cS0DIqVc>~_jA2O9HEjBPNRV)&5eQYRPCB0&oB~y*xAFZ^E}uTF7z0qu z`cjNT=B*-hP^jm_I|!9Vlbaq*ynh_;v$jzZi4LXnl(&ksJNYG8Gq*8Zf;ifqjIIg! z;g06%$)PKcA-XX91PW=dm%zXV#$q(uhmLcZitgt!GC_`E9`Llxk}KB50Rgi07Tkto zpBpe(JfvaTtm|2Qk(VAtnsa_6;=)GpU0h;vpQG=G-(03$#ODgqt@iI9TIw4Pt?95| zqRFteUL8DT+h&KC-<{TwJ4I+O))xl{IQzcUNg7=q1`r0!%dCXFmY2JZD~#mVyf& zjRczCsacFpKoIgr(z086wAz3-hSHVlrzC~q= zEz+!Y7m1McK}qPlunPfI?+kqOFRx2$kJN9Pwm7W7?BjCxKaLyPt9GH{f9uim_#Uo4 zWRS@^1yFs33%3lmkFy><2p`LT*colb%!-Rvu!$h~sGE<3!WLpA!nLvabIRR*WEXj% zKqoirou_3@@Ue{J?5(~PCETpxh#WV1-;O112-T|hok^OCep{Xk)IWOtX+B$!c$W3c5Sn8vGG|wS|#q2jbdB z{>uG`oWF8+azlc~YdUZZYgd;$Evs zVDp<*MIq+BTyV=$8V~e&?rEDr6T53n=%T(--tF-xea?mc^444-=M1I$nJi8=B?4yv zcb_^43?@ZM2ZBK4~k(=yD2I1V{nzY~VcB{+JDKSS;TZ>mC zc#>Indi2tV((ruxreo@*(X>|N;$KarEpeyuSZU(&8(;TNyk8?wSi>9>Mxr}tIS66F zc#l#?7U>f>Vj{x(@=Hq@TJJf+0U`f{#v8;ky6mV|6t6f})C3lTPY+xnm!bh9$db+3 zQ{eCbpxP{Y_skjVAZI9_=NIpYrwTvBGfTitR_@R_Mxe@5HCC_|-YoHcO3Mdza$|=l zwY7q{yoZZ#tThwr{ts-1NkU=4*` zr*=rvEIRm^sXw^Ex^AX8z@)o*=*4{XelzA52!aG~3xKDAQvJ;$oStD-J{Y>TIsvkT zWEr-SC6qXRp>H}$kEr?p2HZBG&o`bH5LFX9>W}Pm6egPjF#Jft9Rr+RYEe=5GGiOH z0C;#8LvN9H-sTvrPnW4pHXOUjfkJzv{?b!kj~)CwKU(!&j}H+&tq6~J!T5vw23f#Q zZKEcxeG~I8A(}ww#Nn0+i~`K6#h-wDf2xAqh1M!a93n*3iQ&0I&KW)?37#~becWhz z_7u^fDQVn}JzohbHNmRsBIDNz6~v&KnZl)9BExtj(V%=^cX&SrES91J|B$g2eV$FG zu&4tW6M5GoAXymGe+u<*HtJU^DyEqKwiZm_4K z%K=YYd|(A!cQ9=0_uC%4PRRRwV7pK3o04IhIzyzpL4;Mq$)#hj%l9p9rNzZ;AT>;_ zaYp^RtHTu{0lNT$TJ^CfiV|vu=({eP))YcV5R1pV_WFE^)*aHZ*EhgsVE+Rh08pqqqA2JQ=!G50}W2DH( zTwD_ZFeie{yP91E3BfDocds{&X8|f6D239%pW_{X-3qsCQld|NDQl=!cBJA9o|U7F z3eVSD#)`}71O0DjBU$e-P08+8%{87_C3UyQ6Et}C>ZyFLgXqUd{VRfyF?l|^&GcZd z9S#qvHh;ufej~_!iF*F_Yb)Fydn!iYTs1%c3 z8(IXfl69#g=zPTH1zs0K@DxPj3h)A2W+Ldtjn)+96%?XJbnDR4xFV82h0p-mVr-tt zt-@LAA2+Iz1Mi7J5DZuofGh!`fAD^2u*crG>#(EOyFeTPP8#-Q&md59Fx{f zOuDL=e3)77U_d?)ti=&Phe@Dk(wp1!TL0UEveyn<;$)-EApBdHJHr5M=_74dBY$HpR`Tz=l{enG~9Y&N@1dZUlpEVoKvF;b$F zDV%s;=A>VEoh(Mmj8He^xxwJT@0dpk<$-6cW~Ilj`i>`{w>^ZfTZV3}bu2qYOfCVA zXSrb|K%EiizKhLaHkh#ExABHo$4IHx?Jo+qOUUPtgM1#cSJBsk$HLl}iLyqt(*e0Y zeE@4p3jAOb?trF;Qvtlm%dPuUaJ@P9mJx6+q5xA8jtE|=zBjz;@keQQ&KS#8qG{rusSQcdTh@mX^`FVi%2M@gy^ zwsOu+yMU#AvQ6*Xzxqk z(tYIu_+n~Wmj*}@L-`(DQSh3!@amJfetwBz7T!28U--;fZQeC?IsU^N3gY$rBFCWd z-naNiTstsP8%ms#OQSZdX znW_UB);@6GK|}aQiTg7}xk6#rV5jGe5n~Lv@tNsBBti7dp5Bd@WQU|Y2f*iZ?XKWC zp6LSk@~v=G5qCPU3TtiXWw;9Q)psuuzdds_`+EJIV@6MxGxf|3kL-a!Z}1qgaNDg z;AXU>fd3M6nF*ft<5MYY`CgV)xVA3v$H*A1;LK$ksDJdh*f;}LPKT<@m`JNliEfxn zvZ4tR#6Nl)Wg8@h&<0t{=U=0pHMXaiU&Ih=R+eI2&zomjifAk|ze*GB~t100+N9T$Jy8G?o)!qx++#(J{ec+BqWKZMp zb6Y36mU<|7b&SZ+dGvbLni{AIXXo=^`K3BtKhza+5$fS@Jy&K)Wd$bW)oXT=^KK1a zVSu=p5F3NZny%uR8Q%z0pEc6>wI}c5WRrO}`w8GCl4vg;E;>1ZJ%5S_R|C~ETH>-G z4_N#8$CRI2>w?5wlmeRS4%goktjH79WCjxWin@bAe5x`l$Crmd)m}a12@z-zb$ue_ znZpIyj-BC?-9ia@L!+Y19XECbd;7ODPmbKeqvRt0PBkZ19u{LSz zwI1(o`UFcV@EsXYvSWRMvWml4kJ_Ju;7_BJJQ2scb@)AgD93IU4!0!JH8A=05|gRM z8sHK#bav*PLYd-c5Tq_Aczt<} z^8>+g>vvywWGsq_P;TT)4|ci{gwitY`$pUwvHj>IfiumgQ{C{Lyf)5?wka_R)~4Qd z%M_naPZW$-50R}ahf2QmW(WJ%x8UVg?1XRHQr~X7mJWv~?${PiObyfG*%utI_Uxkj z<3MhV;E2{QsmWjPusCt2i3j$>ucXN~xaih}?B5RW>Q5Gf-b*WW?Hztw(-pz>!FG+e zAN3-{p6saC_Dw>7TZ$2j7ubi#b-Ibo8Jh5_kNg#-e}U5m50hG_Y>E<$c1XGWS*o? zFBeA4JoVoRwVk(;Q|cT?Az?jJxKTT}$aW>Rf9fbOKA{jVdGzUuOnP^3mIkLG(mLXh_Q zhQwTx)BO#vE41tN?=c~*^Rr&a`)T=6fM1OsXbUPyEko?v( zVD`rqz#=1@Up3x(xmwIu^LxL^Y-(JxqlzAZ4;uBdei#{_f<)%4QI^YOKrFN`*X~r1 z6r1nBQnR#w!nb@^ar#p^t6vBZGfScIc`c#!IGl_kUP=A}8az__96NiqHC6XJ;Z!}H zXftTPscXtwI`V0Ml(3*{`LH{>^f234eEiczY0RUy;CxSxng@=&CkSL@*!rzhw`1w!eSK@V0IdI_9gkNZgfY-$T`%t3?JM!UDBn$X#n9elgS;C&%BhM%MnD zqo3mH`j&Oo@hUM04>uiK$8pJj#P9AGn8kNm0cLIf8&Tum)quyP|mYwLR8vjYu_ zfl$%)H+l{6Rt0IU9!6=!yJzvnjj&k+Y}YE3=^m$%UI=G(vq=!I2kDfQs8(wBIlEV| z$A+pXLx13PB-~g0R~Wz#9tJ;1{s5=_FXZkz6{c(ZAu=8VI$M87>LTK9pY&h`1>f7M2MAAs86cYf+!|W#o?~;=gwtDv1tS zC>HVm=j|N!QM!BjXdosXziR#p5Fq2dc6FY(KRW!5=>UQ!hl$8wq_ zMN3UhovY}fLGf_yHmPnoQ&RbyMQgmck?9}}CIGT-_S$r(Oej_uloPME(_q}vh>M@l z0WuRw0PY2P{X!=Y|2UQssU`Yi&SKa=?QumB-6azx=j(ZMzC)#Sq=!pDK=6gEO5;x> z_4lvu8%0X}w^5kXX5IaO&DAv;)!Yw=q2UVj)jKTF;l!MS)~pvX&2A3)PK4xh!9WqW zy_)WBJxKjKSpxMg1|ro7e%62*utZB*T!Qti++yMg9Tk;Wkk@U)a0=TytM|pYrr`|a zfM*}QcZPK-Ila^A1D|aseXzo|-oH$BkPN+EqiBBplZFPq5y1WKe}xszjhds?EKwlA zhDCl7_WZS~mRch=NJaSeK>jPQb5Qg3ftnW$@~5LgM`ChvmIp)6QNSG}Cv#&T&(toR z*LvawU005Ho&AM~_JWAb`xDb>XL=ry+O;X<`0+7;9d zHXxMBuTwr(tZ5R9>gw9JXV=trM{)3Q)AVHXx9R(H`c88AKL32xrdgh#GgGV+C}6bJ zpQfnu#*#M9VmP*#o&7BNOuSR=ZWW|o%0zV=^+MofOS-4RC52!P;uthykgHG_>u{BN znCsb4)p)RaB@&C=xJ&~fU`{*~a2;4qT^I^d)ZOwfzr+u5CK61>%O8p6F$8G~6FoMZ ztKTsgeh&wKcV8B%vLE0eBUUbTpj!@xhB5|NE5en99#GH3&Fj|V+G2cJ|~{IbaP*EwA&XqesSo7*#-Z7 zhv2zV>kGOmiw72-4<=iqE~REC)(`4Hkb7t=Pz4F&M)rM)4h=M-WEUl z$>li4_ZWS~U+@@%l%-E8CH~M_v;PKd;%s)@xa#9ihYrdPc>Uu<<73R(I|0yk3Z`c;T>jy;vz^`l*t*&?R)cZUQ?vwq4E60Qn^B=2I`ke0j4eFnzj z*|H>AuGCDa4e7~35b48BdqxMVTHe6rlgmJ^d=h@SmBWgVKCR|xidE6jr<l3(%7@6+KG;F=RkgAn zR}phL#>0VSuIuKR%KD${S_UNul-x=f97J4i z*nEBZp7hMRXborB4vkrryZqYfaW7BmEnefklzC4lThgh%-+&S-9RJGV2`vKqbdpYf z*YRYxnzg?~WLuY*REu&6I`*tI!k(I~Qyic*K3yS6*;nObhUbyG{dwzA0g424J`!Vi1a z&LuL=6KGZ$CJQBeR?ptLfkw86)ATW9L78RycxRKGN|u>I7j=9(AG5{7NnYrdA;GV5 z9hc`7Iw|`rR!_V6L*u%ip+6gWq}u znE{pYaPe}aX9gRG(?H1QuODmd!0Lfkb4lWZYRRuHFb{>gm#m4JL$M`);rNb6yl0=9 zROrWE>lI`W9Gx`lc7sYTPmY&rbqNhiB|Y*lZEGzk48Y4CS3fVzaB{mSkz-IC>8=w{*<_eN6kI1%cecAd9HUQHRZN3M$U2pbLj`*pv5!Dq z?Q&uD*1n9RXm2hP(2|+W^_GD3WpF}t4;RPk7GaKA^-C+!{BZU14-G8JL_4Pq`Q(Ja z(sa<7%Hh|Q3b$fKkGge?NDP1RvOL@py{31JiLCTw3pJDevdc$DCBUs{Mcwb(P2szE zdXSSw9?OpnWlc^ttaQ&w?MJ?GeWny#vC(gGnk0XMq6zmqYP`AaW|P@RathC;Hyq=> zL>I(8*e-r@Tty2cS4{(L7WYHre=_MZ%WO}-Eh`ZcM8i6_A@rzD0WA_AzhIyq7-66O zRG+;`WP}&&%)>crg5os~*`;2ce%ewAh)8CWYg@&TX= z8R&*k0Iv=))=se1MD7JA8c}Tx)zU5)twdq{$|FhMh<=?NNk$Po#*WzaaOMi}+4M*w z32@=ZCr%ZX|B?budzBPL|29vM05+Xvu!`LFC#};vl9oI_3`}OF==G7VtsjjDhq+Gx z^XBOzJgT)c*B-fj2E$Z##%do#FR3yh(jtxNMHFAZ$?ZFP?Q296;%0Q(*GN8OpF zrI&-8cHAgLT2xyXK^-R7eSRVrFD!tA$mbxj^0EA8Pz5X~eGZa+LE0p;hVva3?nJNi z?z;Sph1#^D10`@Wg5XIE0Pt%Jt=!w+>bAGXHB5E#6p=mJ`SpY7Q@f9ltMGO9FdUtgU> z=6i?1edIpWGdQbKP&;#*THnYZW zEDrsRtV~1=G!+K}J2+b(e|!Atpo{(>b^XKt${O|xiW_woX5Ox#2_?gwG)|^6Iw`RZ z;PbwkLh$WtRVc*0m5u0j_|oUGcTW89rH<_Ba5nTPCDZ4}$C;YS;j}tsw9Fu9YXPP( zWoz07-Np~OdX1jiEC!7^E$y$u`Oah?xm?%Mts)(`^xJe2-rj1hhL&d_p)^-c>EhMQ zc|w#DyY2e<0b1uBl6;LX#h%)LKiq!Ym+_s(7At%atbEz>`1Hf@r^HJq@`0&G^)aA` z`+YzP6wORevjhcRr8Pxt9F*YZ{DMG?n)wtWhZwZ$xey@%p+NeQmY(9-(j*E+6@o^e z<@DH|&+<^rss?TcJHI6vK8Bn%GDvKAL2^xv@NWI3GBP+8OgpdVU<2DdQ z*I+7{iBe0402D~&LaDG|g*}MgNZ>XVB{2KLex1p9E(y}pOCjggZN6tsYC7G5>{f}{ z{pB9`(@6k}dxGY>$s){}T$&^9!~O*PY3Mx^&gZ?m@TK}rHtH_EqB=DgT%rOZhHL;E zp1@6|s4duMXytcfZSQa-v_uau4z9ACk1A-&P5c|3oq}$y5?*U}RY#POZpCQdiE+){ z)-^y(?>7tGIgzkex4L0qsx~Ed)r-~T01`mq0L}Mwz_Wtzm9#HmA7;Os2T0WrmJqXG z7|PQJ?gz?yPO7cDjJ(CZ57zLe7PrWo1B8ndVW8Y|GA^|q$@5TP5Rx;fUXG&73)2~p zU}A?he_dKGAg-C4vKSjcdsrEV3{_4w)>`#LLtk2s7^^D2C){m<_7%0mL!h*Ytn)1> zrjU3G#|ld&5+Vecz+HCrH-tK^KhXl$=pT5ckINr_6}83GUdS`CDNe+x43mb=OgfJ~J3#T9lOYmUYQN)^x|Cg0ip=5VV zLq^UQoLNGvH@&qMfFxGKi79?nhtdrd_kJdk-C8dPeT?S;g<=de3q3z5Zeb&(y4RW{n)3rb*eGI#{0c)S^=VyI?)kCh)zVr2Zv>&clPFM{Zyc!Ii2j9U5;XO3L8wlU z zxXC2r{=>ufI|*oe^kgh-EpGY$eg6LA_W4LHdV0hfYxA38|3Mf|(q~;r)M%dRUpjOB z8K$SHO}2{cUpj+=4HY$vuUYl)R|4g@$_gE#N9qBWe~yqWr6@FQ6&@jR9@%ypx-4d8;4M377=^? zlRC%f(MFZc&?PGJ^e^8lpT}cZH$APpdT^llXHiMum(9_y+Hdj4Xy&UVZ^Ja%)GJ?@<7K8EB(aqjaN% z<%!g%UK`RWo5R~piz{>Qs>y%X%AEIbbf32qf7Ao(mSqYA1&?ZK#9(%CccwMvV}NZh zy7-P-{Vat3SDLrje`%I~ zOwc8$-n+_#EdKno!zBs_OWR)f3N~OU?>|OwE!g4+zj95k^5pwm z$S(#L&&av<^2Sr#(9EEzX_JNeq|hA5n&inP1-M5lY^T$-=r5FO#+f*!Z8D%$Ofo(u z=47s`FmpZ6l!N6uC6~i_ny;K@u`e-5G-i3*mT!XR$oeibOm8){Z$Hqa? z40ZV1{#T!1dzpC_GC{oCw5xW|fWAkA5d-$$?*h1#YY;9}s;8%YB}70@;yx^7mdJ*O z5(bP44UK6)BSh(TIPdu!OC^ECpi!0M^l?hsVW$coYkL&Y-EGAezRG=Br@ZGbYNKz+ zOQTM;C+rq@(x0zu8zkzwsUNJ>_aryvJ!No{CH3qx)d9e{4ZiXTnu$9op>!1k^nP&E!hgGEVLk_DOW2x_!pU{kDv3*1xr5NIxh- z$}PJUmazF)rk|_5(d%$~ZutWJ1frXTmW1^R*GLln$I1_ws@%tMHpXfFf2KZ#i>2w7~|UM{|Z!qXPk zo%^`s#pac(ySpXbz5~47w!8sDI0<E;#OJ3>!$qwxgmYb({I3p(qJrirj3LUxi;_^Jd-T&QldQdHr&d(+*sLgYJzU`G_^!w2xOYLwsz3Sl zl!j(%Ao=0$Z6cGl&0z9M#=*n}eU5CVcr2H^IppPjRg-YG(FZ-hlqcJA(^p>=Ip&4| zrPc<#pDwdfTI{6;35I@|_u=Ht^P8bxZ$;pbBz4&B=3~&{jWQkajKk`zVATtYaU?9? zb>GP<-{S|y3CZ`G9?riZ_u)DD(O0be`a&hvzi&(~cM@^yzNgsFgvpS)O){@qNLW+f z-t~ajF2H$f(9Xl_L%b{LLw^icwjG#MxNH=WM1T+^mBk_XHm~5^;XN}2pPND0<`_f6 z;jIs7{aT)2uyiavd!reLl(h*nHGWuyI7c#5kKys?Qw>(x-6L-mWt&;qfWz=`9hu@7942Tkw7POtI&hO+l#Z~ya>j-Tby5+rqji0nt_8nQ?@ci(N_`4JOg0~ueSnl!%+gE$!0;% z{28p5>nbZiJMXlsJimGgI9@UdsX8m|u&6>|mVAzv;onFqls&doGCUg`8ok{hoBxlk zw~lJ-Yqz$G6^hdqC=R8#6)CQzNO5;}mtw(PTHIY)++70%N^y600>u(M1pV?H`91GB z-{G%4$QXN!?5wrdTKAmuy63@o6r!x4M^PC{jXWO(nx&e!Qp023nWn_&-yI^a!WWT^ z>1ri;eUwLzCxqZ`kl$#BumSxB$R^Ih>vuH%h+1$}At1fhAQ2`8SBTFd4L9;>Nx$;B zq9Nm)y~(>TGGEpDc5y|b`vhY)Y_dNP0v(++;)SFF@8VjVj;}X37Qn`0erkfeH5UF?_HX&l^-Bci6BBa`Wq@H`4D-i72b(g4gy;r#iT^z+f?TS=N+0=IUR52tkqCW)e z5?UsQjVRt*EVYF%1g8;sj0_jW=OvY0Aw7>Y-$Y~m*Ty%22xAEgK%UGwxV}UbwHrpf zjt{vpBRBHO@b0Q7w?lqv6>w{Q@(!F5uXOSHg9V#vvo=OV)4SR|E=w+U8kN5y!Xel9 zhwW&>*b^BD+3QrHbNRaJ>GllyofU z=Z1agX0q4Io5kr@j40Md`9{jJ@lLS6T({g_(`F=mrb!MP9fmF(0nX_{Xj#2&Fe*gI zyTKPK+TQ4B55J90lv{O7%zQKORgv&c3{fS|6m)BWyeYvHt6gzg+bW4`g8b=6p2&7J zGJKmnO&ig^LVo6mgqJ;;!kMg5r6TcU(4i4+@xiyTH8c`$7*FiENZ5#E?EBpeS%2E-lx*5PS7*DkritMQ0GJ}G)`FLAM`h~x=vd=B1H`uxR zS&7SXtnBVvK0=1suvy`##ovRBJ>2qpS*|-iA7w)=?{E5rGij#~hcDT^ zXwG-!nxg4@0d*m7{hC|G**PmPw!2|Nwsbc6$<^u`DWyQ(v~DWp#D4qdMke2rCc(5? z;}nlw^dS+pis32f!SD|pU-!(2dAZ>U66fJ2WuHTsF=$#6G}w{m0j;xT&~fHsp1c|` z-b_GWnP2!#;U9RMDddr_oYS?Z$!9>}3xz0p=c52o9FCcPdT_QW#9Op&6_qV^-ML*)z{%er&JfOd1VYh9|{#S z&6WMrasiJ#dT*1O^-GF*_*10iYTR=X)J=KR-alV^2hivq*pyB#?Ds_*=nnsJvec}f zcXHVl!55Q*yV*v?ZLl96t26*csgEoB_9|W^*tVC!$RoafV*Pt)X=6Z zzY@sh<=zAx%7%|}fOg1oT}CD6Oqr1(JYWw)Vl^ZC`5GohnyrZnCCMy*`QE4N_Q$sb z-ZW>?=Px4_!Qsw>vFQ?$qH|`Cs~n>9f|UpAUCjxtS~n(02jjWd$Coj{_eh(RZ~0V+ zKZ*HGC(&_P4q$(>2IJ-Cb{XZaU^>zJ2O%g2)5IDuU&#FDs^1h~@KAlBPoAGYm@ZPY zR!RGY0KwOxEm_h=M_{oA!pBUad>t-d%v6lPdw={tt|X=UswNgDW}Q# z^qKN}P(M*+I#qb3Mg?oGp!?MI!}Z`pwey*7#N$6>+^I;2ZKzLUn! z$XU!+sbu`g@D6mFLmA%JVVw`qh&THjWo|LyKnP_l6{{j3X!*H_VK1|a50kA3KD{L7 znAU`<8Sh7!Ug3G({#7+k9cTH)8DLx|8^6=6LFKblC<0wH{=%S?Hv^pnEFVO%CaUe_ zMs(D`CnItrn+G3(y?Ao7S(aBNc*dRjL{%#^n+ocoQuN#F+TKpbnoK`OW5{oVhyrGx z*BH+%tX0%sV1mb;uDA9_Q>8$BP^?k;=VdmANrNV1w)K@cm9LNoA!+RsMmaQ5@4Jb)qMh(0mA^7jJph{rLmRxWz1lmbr7ov(8yIe`2QmE1}`b z!Q1+a-_Fas0WON#TU7r&4T2NsO#pk69my4LK6aHI#vH0uWurAu!nPGRp2}2WT2grW z5PgC!y;3QmOwI$m!_|9R`^ODsQF;#H%c20h^Emj`I^?J*7ohWs!bLk?#^L}XqE#!f zAO?3)_6CP<=%fGzThb8`-eH)Sa+A;BZCgIFmzdq8mtENY$AL$AW4~|;u0M)T)hn_X zzQxTVparSkLs?Pgf>5edA3N0Qo!-DD*DtjzPrSpJKkv{?Bm#R9O;;TWc;iDSv*;?R zJ3Cb$?;51T>p1ipbe`7RBz#1k+FtuE@AU+u_@r2kEm2ifeH0_T1joX$ODA*E5Q1G< z-O=pItUt{#x08d6RFth`+wP+`-NKaga*@Sror9qtZB_yRWJ9}^C#nmXN}ar~W(d#4Y@ZcTWAea=ma%Z*3VN#UD1;f>N-9@|YR zMOT%{gj#(W{}=l5Pah(rF@!`jtehW$B_lm<PFn>89q7H-58BaBRs{e{?~f|A8br{kx*toL%UbT+2o+*M3;7h1p^ zc+$fwy@pOK^-(uG!$!@ske+<)Hgd3~a&xj; z3mqR{zPzmbyMyp69Ts`+cVMXVRi$JJV%rydp2uO)UE@;0e}fV^p=hrHEqRJk;VV@n zTWJ0MM~ceoNmQHRrLfygCjzTvpdF_jr!LuPE;UL|gifADK=?ueWYNF*&1+Joe#A~+ z^KEeJzCusr50}C7wa&h)o4>1MsO4Qr-qHRMk7m^O2FCZi_->&=AY38E4-HC?izQ5?1#N4jh}K z5*~{udFI(J4A#l`AgtKm1Gj%?hfb3j>kIa`EM(p`zd_} z5LfbDw~%CpkOOQ(L`|OVCsqm}o52@ehm|7AKrdfWLR)bw@m<~ZQT;odxh9-G>6kld%Ulk|p^iiO z{5Q3O@;W|*6Xv9yO!{M79xq-U;G5w}?gIdcoXV>XuC9D&qOX6^^saNSXJte)_BxXQ z+I}_6l+w9*o!k-}-kV@~AFUhDUCuJdZ{+A|@ORKztdI2)QaMkAw=3R*XQWugv{?8c z$wbYgx_ls!J1?f1hGDUd;$RO7;(w~$YDztsE_`47KB5W#=@4*O>5NeTOEY$DH!H)Z z?@Q4X)E^*jJPRgMV6|ERtoQ}5j%Y48Ec+m-_U-9KtHPb{ssNl76tEc|PEOJDUjQ6GY zO;Z5%D4Ibhm8j1bOPsZeKU%J*Br>1F0yTz&$AC9Jj&N&)w-=pA|b4Waz!eNBHvE3ZX$Lw zFP|DP!O(>M(X@)Hzk#y)srY#J{eFHK0_NSIn}aHfM(W6*#k8x6{w_>giQj_e+3lT; zyt&nboDj8P2KE&5)dU+!=+JOXCcZm(DID$0^ED1OX!|FmRAnEr*A(@QOd1#W=)8tK8t zPbFOtsMuz|>0StnDB^Xm{Wy`ymMrY;B?0_Bwitc88wc z*4}D|f9W^e^g7ptcDupp78T+2zt%ndzFz=g2s)pkR!xgVjC6(%UFz8Bc_d}pudlkB zG&|=Q&0^{7iQm;rbl}a-v(g;2+Xi5{REvEFc_KGqIo!VRs%6+ z-1wN#W}6j-obO({B$wMI-a{Qp(P9~`&wGXOhgCO)phkng0G^FE@xo@;f5_%3uBVsw z65g`sQ9T|6uTGCH=v{R` zUxR?)-Fuh8m&&v~v~<)hv4hI*?t71?w)0nonGv&?0qd!+rP>Jbb`{}Pz!Ao#yh{v- z?+xP9q*`sZ`?@?#dJ~L~WD7 zlo`_`i9Pi4`jEx&S{=FWw~1LNDb6wjh>XM``95dezQxZ4Ue|AatB?`^5u80EZHL0o8?MgJclwf7_ah^{H`izJ z?h8I$zx(sG^PttVtqe+;G$5tya{1rB&)_XH zi6X)Qc%h^Nkt>?n!5a~7?n>cH2+At=vWL~l>a6IXaa`(I%;5)C+dQD^CZLOwi~&$1 z@CUeDS0j7Rx0f6MJ4!g_Ca#fvGSJlw%&C-V^0-^03nu1wlZJ|+?>}yiSUSf&9yODx zNr?nLZfJc%l>Pbg(T~cWA-DWO#BnR%n%R}WT>Ng^4SS=9cYf`5eVsU)(LDcsen7%P zit@^!nP9WeeZDRczKX7eEl30W1X8hAskYDauKJU?CWb$3hTPx1`{W^D9e47Yge0@7 zXFdZl&J9HL*mdcMS<(>A+Soy-%ey-d$co+6pAhoXAKhnMQY&DvTA{dw*v zF@5@$Ig9pkUanm_xs!g~7XFHTP+@Q=T1W^7h;*VVNKJ9F?f9O&=;p`Im!A^Y*j0y?dg_#lv6 ztvt?>Lm`xOf4UU-SvtnJs+3GP{c6oid+;bOp5frP=KTOKU09queA zsPxym&GhPpw1~6uwjs3isnja8uP1euIyuX(YM%`K1b>^~&B|wUg4d@`R9k6v(9c(e zZl3T}7C_m@ah|^P3WJe{;vxR zW`r7Eu%nbKB+1zMxJ#jSkQk;_+i3YcDLGnHgGRn}b8srzT;_F0Ablr$_$3&B%vQN_ z-=X5Go`@0UKM9SVQ^^7Z7Vlz@Rzx!+Ujk90pN{%5$RjTRwd3VmLyc_nUQG4@W1^q(95bI}^~ z!cesJg`n5h|3f|9z?V$~Oy)41(kEWLd_VfXrT!p3gt+ZNHBb68PoJ_j&tFNN4Aq6* zRms2k?RPI>Zun<+1ogz~f2p_#BJ(N6iI}_AOlh=>?^?T?Rhl_N;d+{R@Z7%@U=Fp9 zk!w-0>6gT{@Q%QR|EaYi^wrgfo~J%*Faj^F_A!vRb?bbt3zKvN5H~tH!Yycu)&^B`_Dq>prXPo_)z8cIUPYuLOjn|-rA+lET z!G4f_A#sCFKgAmg;X<@Pzt2C4^s~UEN9YkM-v3g*8!cVM41X`5Ba#t9_8G_jmU90? z@cqwim!dW`e>-xKZM0&_4v9U?dMiy>nDKk>}} z{^VbiG8ADXkqD{%XY~C00{<7{mXcS0>h%BpzkfWRCin#aKH0N>7N-B_`Fu|K7s)*D zooDp_@%^xo4`agT?tK0SB)oaf=iLe4e}AbU;{Tt)!fq#Td*~eNn+UHG!i>MINe2Rp)Ou=1dkD`d8ZiY#Eb}&3*S{`tC^~ZL8$V+;SafF16kAAxk2VAE*u( zMRw-_4;oG62s=c(n`K&(l-nw~4kJhx0pXsj(EE@A%=p#)*a(kmawN_D#jBHv$dJ(o zqm4FJPHMojS&-YpprtKzvk;j2i|P79lQ&Tq$@1r}-_GVNU#UN1GScjixJoF>lpx2MUJ2MHsD9h5l>A1ve>Ci>fxrg{2JT zNy%)r3$4-G)roO&eQsch7l}p1Z&z&-$4y_vg>pN`AAevMl|ayZ6NKBjft{32c%;H3 zDhvq~B2Ye!GQVN7^1)roBVv?$nSZ@4AQ*qjX*M&i-q#;3(q4s{uT#VxtoCV45Ai3) zFwY1YhV1&ZvXF7(DD!8I-kS@^e-RY%m@w3@=i4a~5Aa!AckRe+k?b*FDdyhwFX860 zb@MNYNo22$Z1SEHu9%ai){OQF6aFvfeTd|Xoh!4FRrw||3r-bR$wxD@srRe%d&p0o zR^}gnR|+ZZHw5TwQ38~|Z5YIDlY4x<%`hsAsv+P^14?Pf(M^vv5QH#2@WfuY!lpcazu+1(`IcK`9*CS~D`zBW2LQ zlo0sk*G2Z+km1!*f5(*uO$3sZId`S?vOGYJC3rU~(RlPqw-Jmm(p=1DOsf^E^{4Pw z9`6o9x3;+(Y>$zay|#+>Mlz);G}&o1SqRh5&OT-WZ9;QEE>h{^NlbF2^B~D@2Z)XU zfCZ}KVl()AwGs`a>?ei%F6mbF4vT>7>za*|Wv|Z*PryOb#Me@na(zBt{A>H8Bd%}K zM&@)Y!SPJaOB&C<*GkvqW`iN6M%7&3cJ|P88v|Oee7hSJyTm);Ub6ut7v~zfAk*3= z2Q8@p^7+AobwtiybARl4(e$wlJMo+Ife&sgyy4>Sxm&{t^mL|C-`xJ%?SNynQ;bip zOV6@D+y3{8I3`g}?(tim%&H><|8F#UQnXl0azl(4OkGrtJ4JlTA4Pl#*vEd#wo9Y$0IO%peRAQ4}R66K>GH9?K{ZQDQ z%aP7LD686x6N3G6d!-eY>jSUGqaW@GfFayBP@HV$P^07hR23n>ZPS=`C|X@EFz)bi zpg5WmgKTyoY#Y*_wlrkO`PIV#EY8b;-76h9O4pu1B5HcN9N^^)8_)J` z&N{*&KWm{NIjTsaV6N7UPUxT_lm{`73bQP&fVN)J|dk)Vv z_;AbYT4K??c3ZvP<&|#)y78nh6*#S2tR4DW?!I-V`h$X{go{7(4T%=AkdrLwo+co< zfa=Z-Mipw1CWQ6kk|c1V*fqISE%gWC0}~WRC0x)k-d%uwIHPvweyEW}^B7Q!u)eo9 zzEVrW#~?(`ueykfq<#ylp#c3fSXLxbcZTpwx&7STtHDiR(SCREqF+RFB&Er2{pDQ; zIKAfWt_!98flqYxD^BwP4cc_biOw^R31$AOqTaa|$4EXyS9`3=(a$z}TZ39u4;%g1 zjWa$U^ym68=@|X5#ysqj-{17le_d;Mh^ZdE4K2XcX|q;7A}?^Tk12?$SqJbBjrblo zS7aYoKKHvYYYQ^?Z*K6Y{KNxj);osNurhU^-BV3CJ40OuYkcX~+@-M# z(0XaP_v$oHG88c5P9?(NL_SScj+eP@k!a@~=$MTCEpci~Oa@v0Z( z*Dizd`3rq?%kMKDRQ!HnEsrp&brUSvniGLC)AbeMk8k6hHoN2zNNjbATFc7CsbmIa zTbt+ZCY$ANaycJq3_Jb8mGT6}DeBxcO1`{ePT;6ze!U@XzpVJs^RsWnk?@E8I;qh} zpWNc%&}%IZ#-cRLPE}oxB654%DQS!pY_{oNQnQs;J79Uej&Fpy!u}CE1F@2pa%n0D zQK)qv5Wj!C5VSr8=>P5Jb>CH3i?RRL;beME%TB;~h_qtPmvK_wg2;Hv*>m;MGR0_) zxq1W=^Zee7BDc#Wc+-2j3{j^a;r?xu%Ul@y9Fm+I%Pi^7_x23qVUls-h9o{v?l{k{ zCOWP5eeFoTZa;$cl}n)xZbuMyDHu+|y*9Xsj#o6QT{|5_)Atv# z(x%X<)7x#d(L4$SB0XL%lDqkIg?OJNmrPouPk4kO#l4n_o{G;EVrF@+@Hnk_g*MVdSY|*Suqdg>s@Iae(0C7Ps^6^O602O#?|!Oyt(e>Oz1pue^%Kw-N`G~~ zMA+o>C_(8m{0D_fvs}$vo#BNe;v`6bi0Q;6(SMg=6RQva0x0qQAm*S~&e!SxG4FJ7?k#UAzi$GKkQS~>nOaMG>LQ!{LqcXVSToz? zx?Jnv%Hxgmx51Ik*cbGw=|pR7!8`g{UCTFT(d6b^k4GH?_tRPxpjvM!duIU7PaS4U zIJ^H)If9%m0pi)U{q*YZ30!{;_-uR<0_le&L?7Pd4!YZIGX54<7t9k3Mxv;bd}Pbk zE?6<5=Mx#TN57tSvQ;nL z_PxNQ5cbPBnk)M%g+*)yiM-8hN#0USr1AHuP-$WH^$_68HR~0Nh6l*jjz|jmKl<5T z-JZ`?xta&u6ctFy#GUz`cKCp(wta;#HxCfEQ1#f(b!OrNFt3+X>;DzTn=0D%El`7F z+Tq*W7ze5%+7!$`MhbJlVWPHoJa#IjH4WmwPI7%cXW8$8o22q{h)Dnjz*}eA%P=5TSFAWwbpX}_uyfivXq=hg&w9CDFUT%a^_eT+g zBAQhFdOM6*CuU|#C);W@GrA`1&g%a%Kbi(i{M_XC-9Q!`2rc+KS*-_3Jet4SVUk4~ z8*lV@_+30&RZz3V6s0|X*=;bqVwiT{}B5hByt?I3CO&eOa)t zcYzg+Ha&AGy!%;sJ=Oj-Wt8ChjHbN}$ASHCOy6|eQvis{Yl7h{Ui5vxnKnHtWk?PX z@ZAKwbhpWVoz&O;7#UZW-D)m}HM9DfasHi1l`)XWTeEzOT@z+2rf)%=A>WZ8R0Zh$ zjE`!2(+L`Pl9YUU_p?f+kzTQ&R@AxV?U}rGO<7agb1xVpcbiJbl>7dXX| zC!^b<@fCu>v)%spmCgPi*SQNs%wgGZMbvMsXiBU4>O6c;R$|pNPhDOGB1C&eOOfSpsyMw4d5&DRZCTjb!6lORCMBroWf`o>P8}v)oVTs|(JK zfm+Vw*W5Sxztt?v2wS(p^{Z6uf`8+N-^t|3)j1d`3-+SCB(%E$_ZC?+nI3Ya2+5Eh z@mc(oi<|JxR(i9vbD$V)^;~w*Gfx%DDE)o79xdX^vBnI`{n$W1S3T*ED`II%zkF;n zR+P!$Zs%qx511>O$M8(NQC*%UQV1{C4 z0WjEBP<{r-^NF z!YMmzz2$c_+l5s9grB^%@mR`p3kz&Z&Q!&bOWCkiTZjI z!p&o`H}gV~OvoE2SJXGTRugXp=p980=yfOX8iG8cm&=7W2*>UTUFVPrSqjq0;^&td z{V73!(y}UpfOLX!x*;FaR}C^u(KtD&lefaS8AyLZihWD<@PasY)DaM>Vg)W~@l$>g}gV|^D359(2Jzu|B@mnK_cLl<( zcTM$rr3aM*1Ci6O!lz)@^KvCVw z{VC1ZuV#O+PyVOE_Z?w?`xN=tKv=iGUBP?j)nmDl8MUtIC3s7x#Ux}F7mxmk@`Qhv!qPlV2GRs^T0pTO(g!I;b$pcftw z`()ipw?UqMg>u$;Q&r z*DV@p8V8Y^yR31_NWp4u*DIg&+O0lm9`u$VPA28H-)!EzoTdJ#b+Q3WzgHxg^Hg~X z4H2>3a|yt$0e9>91b||+`@43x0#*KU+7zI1QqwV9^+q~pM_%h!fqqF9~R+wGnBR$>BZimVqW>`csV9>O2;bd(jz=D@a6 z#%?N;C?}leK29``yMrZ2Z9dJNb1xWQ()oBER+R*hD)+3ouE|TnbJQ=~T9jcmsQr)m zgq61U6YB$2O10J_33)5ccXA@u)9B>fei_VtMj(P$Q=2(FjxU>?&IJ_nKPurgcpfc; z-s3>9mUP?A(BzT@Q#&{`3bCI}#owT~>Zt`pNd^Vu5YXT+yaI$>P#WL|nxw-UPJZx5 zQe~`rH+c@$h&)xuO}d#@G#(R%L~GP)Na*IC_YGi~?WUb-{8V7N?&+r%cWW_Q4?Iga zATvf#9V4Iq*e1XB{5!;{Ra++mZga7%3VKKQK3r|Xk8KG}B0uCc) z&F*00fBd;x1Cr`Z?Fe$fcbg4Rg3R!AECwfx1Fl0*n$Yl}9) zr>Lo7Nkz&8x&Fc#!nWDVpqfjnv@_B&{X#~sv-jl^k!u38GdG5%51(Ewq8=b2c-3)w zks(64Z1cvvET#LUeHX%K2PG9_<_u!p*!rzyWyt-e;(e;b9&4d-DeEZ*b>=27&I#}j zUgRN_POhIVdhSYZZW5I#Vt2l?obL#&h&jz08I*Kyv?mj20w~P;nF2M}gHQoCXI8Sf=e+YR z(WsCzoYvb5Yxpimz_`j%ve|+jLG9OZbGEDuMT>zp(<>Foc>NTEpXFvg3US=r_5@*8 z6-UNV7LN(@Yap7N6tg1wF=TFvxgKKsrl3YrpHaMxouQL!Naf=8bwMLf>2Qa0>6iI4 zde!6{iKOlJ&51wY{vc+m=zQ4(qe+3j564t3ib@2?jefQCS5D)}B$XY_`&7w=DeQma zN{zGeQkUC$8>aJqN4r7>mN}XvnvxPfN^M+`d|twvmIhtOp<8 zq0*mC^fqFl=nW^)n~vJ?#FaMaT9VC`wl*yE?HB=lGy}wHajW<*&kGrp8i);h#N3dH zwMpm8i(>>*y-f(!>K#bHgrUI^$nc9Z6Q`bgj7Rp)!gGbtVzNnsLLDL8coYobimms! z(Lz<%@8)oRP@kCzTg6%v+y%8!As_P{-EF->Lj#(0cQU`k%#%1+&Zhpbw_PZ+#usf> zBj;PYwaoM$^|v6>vuN&5cV3e9EiSw2Lkw(wcB6?dcxW7Ih_(^3!+y0Ohw%+yp zg=te|s(BCWeaEu2Vkh5Z&udV7>8i)*7?lQf8qR#&{mZ38IFhfbYow~&*~Y2i;R5cy zqnTR2Ew^WS?Sn0Xq4^ zL|WD3f%V-b^mJsTJK!r7?@BJxNuq5^9{~-4~be>Q`$ev9GzUux}n1d-4n;Q#5fmUixGOb5c~DJUgwH zbPmN}d+7OkkdD^3P_E?^6vdm^AGKF%Yg|t-LxQjA*bTgGPMaQ1x%G}+wwQdQf|^zF zUmQaoeFqGcYVE-*83aEFs-E;_isbr@0;uwtd_a^@x`;T)M)BK#l+1JyHLbd9@*kcK zHyynuN|ytQ8iG}l<-uOTMA&alsKujORLscI;II3COa}rzRc%`q`kJX`13Qy z?Rskr!TJ?D?+uIj#TNcc$u%0~rEJWXOuP5wt7pOTQ zFug`prVp1gWip?O>8sin&5lXmJDO4MAsBKtFM&J~4FMVI?%}D>oReRe(~uIozB4Lm zmub^Bmvb(bwElSlckGWU2?;{My>u#R)ETtc_Ux>{@QZsW^exceA>wAyW zYkk}!zzLkQckTaiVzX>Lv01dDp`(e$=NWb07;G204Qi^Yf*t9lBVGWikyNEju)r~i z$q{v`*MH)r0u>)O5>}di^n~Io#Mh_%ovspN=S-(O?eSkbKekI-esC-_066+>yt|ZJ z7Ajfg`Lx;fE!^*EroF-S03u_#!wYo&{Be;aSK!a((gcy0d%;;VPT?=8oMFqwmcXrL zV+k`CrDtU{}A)Q)I2uR9Yqg;}-L@A$jita)3q)%FEne-wG4#j2oP zF_E^W!Iack`Uj<3OLfdGwL10tA2;PA*)ZHnAeQPPEHu z9rE<@W*&L>{H?G`U7QI1*}C^>Z|DK*j%AxZ`6-{#mTmDLw{e;%VkC(`?hmLi)XtP9 zgPm?MRCV@$9sL>b>mo@PCjUy_bBbYZ0tQF%fAFnIn}UZE*`g(_bvj)nEUO3oK76T( zyeC|XXt3sFI`aMoF8DkKAHl5klzERJJMnnjbbcHgzPA2Sp+ z@KNl3D>3@y*WbdBqh8t`f^z-FGC{nL10JM;cI6G7 z>8zyg&d4$&Pmu?8&KsUNLP(P~OJ$aFrV+KO)Td@`9T6Dki7i#UFjR;kslc3ge z`-lq_CzT?t=Fbj}8zmvTrs6316n_v6V0uLrjC;;7JKt>~aB$5Y90a4C!H!UHITygF z*D!$1Yv<#O^3b`|ZFu}aJ8g4pVQ)H%Ay?h69KneJfk5PF^nC#*YkP|K1-tEGCho_8 zsq=IRL>HAGpmV5uNxko866 z{-&@#x%8sTu|aNG5WVxwnV(Ed@|u7G$|&QX3`$JWUo*5r#?L?dG#>fUi|VzlO%C1} zQdXtgVg2?*L*A6Amb~IR5W1oaxQ#u{j3I9g!&PCbM zO%7~z(yVu2zxWhdEaaOOgcwn~xVo*jh0J)DU=E}1qL3)lKpF@!`L1#&rKnFB{+vA4 z1QAj z(kU%jp(1+jgOa1*XE2BL^2yf==dnfGic=muMjcGh-n`zQ1Ma%{07?otVhhSdsmuuY zXPm!?d7>MOtzBaHVfRz`FDE2q%~zIlWK!2L9Ss&RnDLuVX?(#5^FUru{A&7G)%NH@ zoXd5rT&%L|e%2$-12A{DWOe3ly>VLTI{KB-mydDKc zE93X{6E?XwUr?|1hviI|MSlUtP65({NaY5c{H%~nDEwadq{4o?6dc#wOkgqEA^?1J z%w}F|DPzVU{gq`eq0;u@JBl9JmFsT9*GILhy+e7zCYAa4ooBiF4nI1dL* zMFe0=Tfphs{ALR;e{Tm(W%74R&v-<9;+dr_*x01!#lL;~us`pqS=yqO8yX z_%VN+_bkOqi)yZ$0e9P!T|ArqLC}4pGR$cRqRcu=%V<)wv8;(rgl%HGadUK~U62~P zwPR6?$oGPuz4EwyaVs>}qs?_5Dv=j^nT9kF+AmX?HMX9_ zryNDc$rjX6zPCrGtD_)`bhxWc(i<8_^0L7j>k7+EmPd{Ns~lnnlkb(LBfTr859T_| ziLa#!xt4*kN(a0H&6QZ0W3g zE~VSvpi*9q@awF(+R@Mns(UYcwlLMZB`?=3oTdK>iTXQwI{(Qe^g3hhl zgUH_?-(Y+(SuBk2mbp=T*~j@MA{>=hd0cDW#kxXmVUNg(4Mz#*a&g*wK=`XR>JOe2 zKy?#PEy9wr38=CWq-8ql4q1v=rI2nv&lDJoHs-f388_Qbi%%DsL6^d%1okfW?=76z z2;6EcvE&_4GI(M`Y&v*L!n_C1cl#TTE>3KO$iKp_mRa&HiP!B4IQ=kLNrBpfF{W8> zVhP+BJyu>rW#Kf5FE^0t@PWUvdY=mZkfnNhH7{!|+qyNE>OL6WVp_U2GK|YZG6p>5 zQ`@ZZRaA^?3y6LMMa5HsR9s18-0)(-J4TV|uioG4doINs#vGHi3^v|Z&Dx%i`NA(! z8|)4d$YbRJ(~Zk?MY2Oft@NYztlqoJ!=Ju&`qg|@rwUiU1Ea%Z11Wt*lae=}5OBmO z0J;RhD-c~<&75WU)^4Z35Z#M%J0PKGjTM?Ij42bo7^*1vT+`*7Qt3PBxycyKb;l-4 zoKYbB03pyB%k=EMh?9Vqss4JYg+_zFu}3mn?wij3;J}T>-JJT$6%84PE6DL{zYt`4 zEPzov3~=4eKyP*zWi?G4aQ(9~6>78;&&A1L42&~3HZiN^u!ZWFfL~to-&t_ndQ+x% zl~r*zc#sDt6AVob^=s+71jOCQoTt9VM%|h9)N{Bd!%AVUd_QYGMYM=YsT+rRmk`&& zRL3IOviX_yRh5wdJ8DRBkiDLoOUqEK<*q5QMll&4s32{7K>b@AV_O-$ zRVxc>wa~;SRxaq4ig0Pd2l#rpqa@2#Th%9b`zG`NM}!9qxINeJ#P3GPk^!QEXG z9D=*MySsaEcil*^joaofI^BIvpTA%4eY!7rYKm z)?&*?t%giGxRMP#)_pA&MQ$hFT!pxH`^-=c{%wJ%c*ErGo-_WM>YeO(CRr^EHSA9B z;H!%gQR6y$`7BM7@AD8fG)_(s3-Mtm=4qr{CFeLTmX5`pd(T~Xl!wDdky=J=0!M3G z)*3(nltp1R596bC-7x7KYcu>W37_j)9NtMhD;V@{IOW`yTkb>r+b{Ud<&?Ey2r6mQ z3->qex1mmNIkM^(IaO4yvfO`C9hJ^d-1zZ}Kflh7x%vUOXKYWrHCjryZR>&_b!pRl|7tBz1=@^y5$;3 z+ua*9cHe-zv?SP94!(OEHst0q5Jj02(S{~FQu|*`|9asr!w8R zZy$kvx7bvg>15lWQBIdgz+Xx-nay86KC@4q;x!HiJ)bLglgL+J;)x5X{)h;d;ze~V zV7OaB*{&t-xj3s&M2&_GgN`X4I|bpELI^^PlSnV8*l%un4d%+Mzu#M$<^IIlOnoXV zs;A{2iH;uoJaryl4X$?}UU>#+d{V>>agNlL=7t0!LK`B9D>@nmO1O}(v3oO>H1`Fc zF&ll~%%h|J=7os~BiOD4BP+k@)RcHP`SY!r;NGohQ2VJm%rQpyMp;q-BD56DH;QB&dEIxg=|&<1 zyKteYj@XX5D_oDo`QDppDX3_m1m7jGd7Q7UOEq2j7Hv!MlCMd_=-qN1Ls{RgC^WiX z5wx6MP;du`{4wW0tUx9WN;6T9;G0hjxAEE_S3vrx)#jzbQU2Y;(5&(+43FD6p9@rq zv#UM#Rc8-zDZ?k(JUgJ@#QMf%%jYEQn}fBU$RDq=(seN%6@>@$H0Iwz`E6zbWFMPG z{DWyL369eX+I&C+vK6PW$>0``<>E;g7zu@OlGs|El%Vm(m&?&#|3}=Te3md`ZjpDKTP6(=Iq1?Ew{q+fGlzMx0wDj1lbaH zz{?q=rG~2zizN?1UQ74A?IJe42fOSCL9#hSMPdi?xxrW;-?;YM)CAu;7-}sRHvE;rzu?fT; z&gVVcVAOOLR~r2KSS|CeIgL2_Dm$W+>sz@a|cxw{Vy84{(|JU z=VhPvODO{TxB7ts1FNG>5&!+6M2;w-z1I;>6Z}(oOP~0o9UZaJ5VqYXYs7y1v;p@& z!c(_>0cP9I3oV8trzPp6BHnSZ^i8VTD z$#e8Lrjmbs!yU)P)aHH{`${bABi1WnD=}-mLR7xia4S9$^d8p&on^Y}i(w&%>xKS5&+&X zJphaEr`y@<*dq?9gPb*pgWXAYLiLO(idz|_qYfY6g<6ex1g~~dyA)FEi`YYy*2L^D zzlDW}1e2Bx4vU)xI&o7A-JahEM)^V5wVOOrQrn!Q3lwrP0d@sh^D;tKu2{&s#&YYs zOJYJkgLe-TRd^!7xS_ECIEFF^H|vW;RHJMn=nInc&`m~G-`?=e8AO`Zc%xgfz)fv1 zx($CMktvZT7HDVbWMr1^>i|TqINg51@i?6aw0R~)mznEH#=WsGXE6i-55}iKO4VP- zn{}yI#|7_ZykXjAO~BxJZ26mCf%&WN9%o12opv+Y$`u>UAXIy(yM^_-rA9F3bU9h5B-F*LqeZz7pwSjKdu&E&fJl~u%MvAgj=AB;mIp71XFs!ql- zncF?Kneam%wrj2261(-e#o4>^%^g4Taf8 z^_LE-IE~}LXH_m)6#r19^rod+r3awv+wI)Tm|bhnIx@ZP%EEaJ&-R#If^2V&P*YTy-%v7Ozp^ITgN#M02aXYkXXTzPX*R zMJ70dHz3AUxOY7{A$PtL?^brDV>4TkwB2FMeg%J)6lE})s78IfL!8lUvvrMRGM#~P zbA2s4-U&}_@oTQyXbU4Ykh=4JstQf{gri|W;* zGU2&5F*h!9BJ9 z>DTZ%5;2jrhf5`30i+|aIhX%X`N!+F@l7g^=6XA~D`l6fBc9Mi_HXI;1U+i8D`hPO zhO6Gy=Vb}d(N}q+spB!Gv!Oj2T0pJx^M}45kD}2eb)KWJ6nqx%jiY=zotD^%@{N>O z1V!SC_V84e7zjR>9QA~=&DXq=$HUC`pr4Z&he=c}=k1;#NP|Y(Dzhy3U`>z3vE%b# z#0M0h-%AV$ubVLJBb(38rzG(lsTa2A=gHEbHg{>mIMm;YTO;9lvp4TJK=@$)ah=;z~I+; ze#aS0H3usTq8RHb*x*Kq3^rwjt>Fh-o1$;iV=rcoc_?@52j-gjXMNQT({WT^U*Dw}n8-{%vq!?6I$*QBk;=I0 zdi65i4w4(nA@RVduFx<+xqEl1lXL}NyHUETqk92&;k175yw@|N@`BNVpAlMBEG4G* z!bHKpwq$oCiH+qte4OR;((BtyYuGbep*Ya)`wei6=~RK1sfCVaOpWL9QDcCvnu}6_ z@O9OTp-YSTiYVz69>av;lEnojZCBnU0J;$Qnw7`10)rW)+9q7T!r5!@HZ*B)QwUTu zP3DBhZ+(1iZ@Pc#E_QYYg{+hSwP=db~H$mQe(^k)*R z(C^9I5DAw5R=I6-)!JfraB2Po_)FOK@x)c=JtxQq*c{In?mm=gR(%I}@4Hi4Q%XMJ`o_L%l1o z+DmFEswCVfGnNb8#L+~B@jf@;D3$3#6Oyfw#X6&dFycZj_IPHYXC^E86>S`my^UQ9 zw-OHi_vYRBoWr;R;#rL~PA;9{CeqjQXL$J z(%Lb63Z?0d3J9j0oR;N3a2S51HQGaBC2?t=Lk)R0U14dGs6cr7bP!$+298r+RxO|* z5g!D;|2i3_R%aID(vw@AsEVJrUjYE! z25@ikp2cMpVdyAaU?CW(7jY}Tvvnhprx=WQ#d>o}HlWjFIUc2AxwnXHOpQEB%j0~& zq4c;3wd`aDHn}NOq`A#9M(e?u@2Uw|-j7=Z!aW{u7Y;Z_Rni zN)~5X^=7U&Nq906ZEOQSV_l*y(6_UXYh&E%A!{$FKM_v^6T*7%`KW14@P>nqFwi{^};@n}3R@k8hA zv!HjHH{=L&v{{lp;B#8OU|*?|;odY7I(z^|mm{4ZsC0E;`AFfus-ge6=#$h-HS|(>JdetFoBIJkUdg3MIBv?jY$=(vn6mcLjN|%b zPgNwY?Aix|jZoLCV{D&N0Yb2@SK2;VYhD{3qq5|Oou0HNt5gsvL&h64LMwk-YRBf< zvEl~Xy*qSbCEiU>_CCQIl=9~){;4OLEd%d!ZN(Uf3Kzlru)R~ z3qw0XL$=wQEJkcB%+sp2Iiq;VM~Thl%g`T~^GTPtI$~VX>M{Y?7Ij@$9pJ3&21X60 z*bVNk;#^sJ?n(s5m1RQs?DJOZDW(k9WD`GFLO)y{$P@}KHILMt<2ah#UGDc*AkD~4 z&a93deKNhLCNm<{^k3&pO4QOjMB=PFY&{lrcd^T9U5Xg_!B9fDbhGDwtkj1+3F~r2 z`mv@+C5PSm^W+(b(P(9Z@?0e9d$rY!;zEsO+dgLAhIGNvZVIoX_DDza2WytJGvdxA z$zg?&B-*@|+pkT`(` z_2j2FB)O!8U&*LIvTg{)_3e_)jG@~c;N>F2>|;<$X6N4K_%N%p@o3Lm7Nc$WeV~I` zs8Ou1gO-z^J@Ol+R9GEp9#Moc?i|9K$g#_++*(0v#$<95dyw;G`4NU?a!Sn2XT|hm z>Fos_zi=Lxo7jaBP64s9gj584r~tgKbHtiBPthc9yVDg~Frl+oWN)Zs&-)$>-^D>X!Pi8!^F zW1aV{c{C!PJrBAA*tw0j#-Og_9eQ*#k! zw?%?c%y3R?HR6x)%an~eUS>mJ^&MsK%hZ_yMDFRlloqItDA(M{q$szFs6judhl0Rs5&Rpp=@dCd)T zfFOv^=R~X3MTA(Y-}_cK!n0d>LwIRmD3;RH>pry+Hg>nLq&hgvF^V>RrUR^D`?8g;owYJk1m8B#rI);y_kQxD>$vX*g|9;~R~ebhmaJ zI1Z=le09P{A!99m7;l7>?4YZQ!RwOb zD;FPw^ye9Egf%H6At9*IrfcXgZYbu_U!ZHo{-9xFA1*FGo>=_6NP z*f+4;H<1&zV9lHauvnjcP&xon%1;y|F;3ma72n^1-PYGl87EhyWTsj_Cv1$b7me{o>CQYq}nih*nP71tSN5du(3Mj!z6TzVop#f zo|yH+2j(qPcS=5F_XyweoU?AD-#Nd#K&2bLy1CTKODd%swB2{P+3WwXd-Xz|PdDgeeGMZ;Bm zF&7epD?>&DP~u84p8?#Ur(7Qpktm56z_c^PDx)D^BtH4h1U{$Sx)OSxivIQP0_}j7 zJ4DyJ2agj|gf!M*=wM^`CVTAwWxmxGiuv%+g;Ip#sJ_|m3Gw2Er%H4LooS+#!=5O`~iZ3#M+-vQz_RIenOp+#3q(2T* z9rLvbQrF}0JcU_qwwI!=i$AcSj-U|9wOsTlDPE%J3(cZScz?FkU}83ZLh7sn^7YAs z-?o^$FO!c}$QAFvodR)-{bU~MT6T4eF@1~!`jvBW$46n*{Q~&D_Gu8eU$H0V7+u{O z{zCc@gyT58H`;x4`a{(IV0`CraV`{HARtoqf<~zx7SCynd5H7na_t9@mc~|S-vzIR zfe`hBzLn?MI@~E=vqK+hyaX};0VTlQG1x6?j*&z)_z$C6&e zfvP(rpKVpts7&M)(tXX2k@Yiy39Q%mK|e`p0C-^Fn8>}276B#qmt8O^Jru5)Se zX5DSP;B663xCG-7mnX$KFo^57^#jMp5ozB!eO*ipZn(OwM<#wh7fcp3&)CoAX$Fby zwc#n;B3+`P3JR)VE4!m6Gql=jS;GM%7lw@Dm)uUzD$b2MSySDIluHyCu4%9y4G*u% z!W&#UjJF<#W4{hnu3K*w>yp@Nf0gemN<=L!@J$F9ymQ`BuCBovQv5M+*b8vfA zYPL7QTJ$lJy<)&16;uQa{e6c~z56V*O=$+r($dnlwXW52uXiLV`h8P@!o}5w$jfoo z4^3iMidmce!JFPlOCS)vX8%lZB&2vlwpy>p2tYeEiL}FG99`I(Ol<{#zBx-XiIMDh zBhcgXpA^c+EEf))B_C+9+?BQM0?ew4^PHi14C^sJsD)gTMtp~y;k} z6O>QF2S>@7jiC8Uac!mX8CnfP-@!88*nnO;!L)zELgtM&R(+=%Sova;ZC<5pHT%WY zg%np262Mts||gS zX#1?sWs|_Uu}|4Xs-hOdSbEh(qUT+2o*kb>4g%wf>esAQWi3TdtCqCEvpp3zodD#E z#RAQ3wlo)UrN?V_CjDWvBHrwV-m|lH6H8jYuq<@p@yENxdzNw!W%Yh-IM9*X`2^Dp z&Xm1VW;@ynfdGk04y*xU4QOM3@;gT^0A? zE`OYrU50RXOycgJ+C)mE3n1S@@=Qs2cIyPZQbWa$lwVLDi8Fv|3@j=ZL zCEIT|Ulx$I(ekj{U8Ne6^5MEdRW=o`RDJ&U+<2gyP3?6=>9GsMZ{^@Jxh=;h?;iVkFi5?w+`= zro!cc^8#EdTUvapNzAK;(2;CkZ2+KpOoU0!PDfMu@Y3Ur)B5?R@VQ1;@&1|Rb|q!@0tBA-*MYb1OAp*t_a6l0md4ptdzsh#Kp>0MyAajqvG43>G!4*t zSg@l^-3%`7t3Ij(^TD$kLSAiK`%qJ6f^KwO)Ll3Cn#r6Fn z%4MR@MkzdUqn!0@U-d>Q#ni!htQ6d{o_4vUpIX!Ha6S*~T3&ZJ8~r>MYPmIsdJmp* zuxQsTq#KF4m?X!rS<}nvykh%(mZ-D%t97ItdXTvf-Z}>`V6%2jT z&3BV?x14|V${7Xea;mB7uQodu70!4PCJ8UybFQ(70;7&$@f;fvRW_f52pf48XDMNJxDDy+tZe) zwx|<#aqLn2L# znKkh2LsQr}ipDjjL7i|!=ioO5(D*#oiKJ9_z&$j-`&ySYsuBNMX;}XVB3T{|hMj2K z&m@D?pC{zOiF}+E*O;kooMF8mQ+F)Kpyc*LXF?IbhbbgH?}?cxG1(e+{P%AD@K59Gl|+cjk#$=&%Q&z3{h87@c=kpsrCh>998K`++BEqMM1f|J^uY zhwkLHx2gxB^x_)quSY9g1Xp;wMAx9K5yL!B*LYsC2BKY23mjW;I%rs-k(cAU1o zYzmX6>-J^4avkdZ?U+E9+5YTHJ(Koq`dOIj4o#>squ}P2`&A`rOGxwzkwaTr<6+|` zJU9LM)MktOqdL=aeKb)fX|ho?yRQ`#L@x(xw|+z-VRy$#Z&c-$J+NSgly*R^-a<aJBY^4wdwA>xJ~7W#vI1>xmv4VZ<{|i@nMUSJ86*Czr(}_ z$p5$!qZDa$X6{FBfnMeL2Wff2&Yci~O|gW@22=RKn?$svDaXZw6HN~>cwDEF?!`gh zhP$&|nQZ2$Tr8is{Z{@i&YEaLCEY9b!Rgr!MgI#&{LiUtgfjXVKDi~W%xnm-2qQ!v zWEVwuIK5kL?Fms$6fNg8%{SO`eYyAVIwWr6e_8M{jiMp;50gN|k^4N+jgVwELuT zL)foyS$!Ni`Mzexbgw)KBTr=S63#L{u!FdBUCgT1ot}O8u3* zLY)2Zx4IX@p`)?ftOy;hB8-~lVT*&|UiJ~rPxNhl`Hw{@#4_9NrLV?kN?MSvBS`9K#A2M&oOLmX#cvnPU z7f^~mYcDl!yAojDehkGCq&RwQ-QbjGolg`VQXCo-Dgnet~@!` z&lQRpg(={0_XEm)4RY~FsU~grTjzEVq9rwjr`XYH`+`0TOFNHd~s;k%QBsvWIZ@V>EnFE>90J=xF#D zM>5yJ9o^W1I_fQ`|7Jj_QQh;R$8M9KX-7umXU3&U0yYtT`jp%;ebqXN)lAE3C8}8M z1zB*@Ef(}lQO-S8$1wy*v*6U->@k9RH^gZgAz>jvNvbhS|tDA zAqWQ({N@oUr7$@N^MoBnpd#IMX~0j`Hy!gCE2BDF55cw^C?e;-)6oJ?>1f&LcYges zNG!&(ar@JSSl#lkn}B;tpJV1MDcM9avLKOtet^&)Oxt}MGxMLK|D6>l7Xn}nKi27L z5TTpk)@Vp+PsZRJhJN_|nGtM7Y*$*DQT%=)BKTAJX`-0C0RFa*_}M6%^2bH*rOvfI z@u>ew?5|nFZS-2xdlmGh1h9zz@jx$)`*!-6=$bPFBn8p9AFUI7k zN4}i0dAMJZ%NEBPSOn28H{NwG4Tl&<0v$kGyp7;Q=TW0@Z(h#%wO=6e+r%~8?pJ~B8w`5qCrus zenOXdA?BT$o7|FCqtffBf01UrBvkJfU|IEvDV;H35+3z0LAW25MqXIm?EDD%=g@!P zLDooM#zX-A7W;;BtDNS(aH1gt*+{v6QzE}CVs|)E*Y>YUfVG?zPWjfqfRdoepml}NjzRE_e|_3|rN z#@fRFdXFjvW@+@*?s%qO%>l(B&fhqfzfI6v#*g2}iCK#`3Rt03J7?T6_5ju2=JlUh z3%tgR_Of5!&jO`BFF%6#`acnFHF_Mpxw*ZCKLsPuyKePIP<MG!(WYG=W<$hAH;j?qSS%&c=3-=D1{eU0>~MynP=tnmVRk$z+QfbT=_= zDyU^V+s@NJ&x*xFof>^ZU&1tUS9SjMa@FijB)fC^6eS@LP)bxV(doS zZElyDVXy0wda+?HYaoG-w=Yd*bt3_uX*nOB}b| zH)~)z3$}CC54sjQ*(`F$B>auA`w#SMjS4y(Lp6{7P*w1DdZf7cI4m{2X##kxvAy_6 z1bHV9-CVtp&wa&(sLw|M##F;(`x~IMD0T1of{Wrg*Mt6S;kL$?`HH(7>Z+Z&h0RHhT5fr)vg+9;w2GX1nr61a^ zu0?LLG!t zoua-DbkzSq2%&%L;lvFFQ596^7XY@Xt{1ZQhp~)CESaJCwo*SDHDjt=@)5+KiR=JR z5_{1J|8eF5NLgamp8DI14{i-mD+O(#X9y%M*4;*`dya>wvlxrZWz~a-u0&99WG~X# z?M3Nmxh#igcn7jrQ@0udEevB-UtuR~nO*T0{G(Fq*}ENqjor zB!*_p@o=$mshwoiG231Wl6!r+(V+SLXT8hAoBf45mEUmMzcFSYt?!^OX7|wdN6~>jxl%`tI z&}_a~Ij2ymcAEf@Ov{}~sxg+yDVoV-vs@Vi_`DEj9$*Y;3b+AkAGMqgPs_Q%)toEF z=qe)ulkrlDLq5B1H?z%H^Fq{@zSGN8t!6bOfIA6BZ?}%V**(RM*?PFUVsV*JRC?2l z@O4xte{kMx-3Ej4dvk+LA$P+2uh{?~d;;K%Q&O)#pEZZX(7+!0=D~ddrMjOh-TmC> zw&E>M7g`e;3qzo9ZgHPt#b3W4uhq-y9S7++GUX{DUGrH95hxve$bgrMEn2oJ@JDD5 zj$>4Upz~cSqOPBMf!D~r98%hxB=9Jnz{~%Tb|;lLrrb)0sK!!_k3}^Qt5b_!-u2b- z#*l1p{2{v*>{i~m*cA7&qS;U~O&X`dpwDyA{(ia1F?vEExTU4hnvNnag-TCQIT@oU z`zeJ+gEOSiy3}BFSIT3u3IMkh8@mIa%MIf*Q+cK?v6%GpOQtcD$g^55HH!cd@XU_{ z9}mss+tA&YE8#`9+*6t!g@a>D(lD+C<-*L=loGBnW+0P>KjngRLyZ=HE}!4w zM@2$T#wkoD3urrPS^(fgwb?YMElKfkJXe}%SlUNp`Mm7L4@Hxe)21=s{7#^MVMFJQ|asX`8B>K+v$*I1l{phVJ^oLRv4Kxe8$DB6x-cal$ z`PVYJ6W*lF^tffJA^pwIATp(J*2q3BAalBSXsdF~B z7IPnbZn@5j0pHqL?mh?;zIkn1N2aS4lzpnjpiFy0p^DZ0@|u*h*0F(ZC?t$(n-gQg zcH^f zqOKC=iJnlsqKP;AOT{b2YM?mt;hPdM?%`q6mF(H~ivq>d&(#)_xKDX~0Kb;t=C(p5 z<=$gsV6+jSN7J_LDFqslhlO!GUR4PK2jejsk3xKD=G%E&c8k7-?xCcuvJzq z)b9%tK{5Q40}ufgvO~#QF$Z{gKkJ{Wpr8_)p)(m<2RgTnI1{URvvr4@W+5uyhicSL zwrM!#dm}h;FEu@S@uBTXt=3vd0K|r9vP(dU@WuP(OVQ%=u0{)L{YSQV$vCRq1%z#4 zZG>&9xNJIt_hW}#j?>N*qd;0oafViAI#O+` zC)D*wjYibjFHJv`J}b7Z;WY%SRfXu6Hl-e0(9tr@I?s>;XnypL=dl)!5~r`e#dHE( z33`-_J(8SDb$IvsHhaCih?;X}u~cO*Cae{{=DW9AmLTXf4I{sOtz!}8+PJ`Uk5I%% zck^S4S|LAYl!(-&9U4qFU-d05Cr!YXDxUAx44CjiFjTiTi;SUqXq^)f2to`@=lAsw%&6979Yq+N~KL(&sn8qKE zq-rQ*?LX~)K;VYWE9K?okP$vN9 zHdLc-ih&rDUi5r-Yf}_n+OyJvb#(c=2p3Tw998@(OP&XS3sG8*KlT6`pnu2LL$q+_P$tu4rnB9a8z^ zeUYwenxp3WuKqbUBYIPr1~zJ?nO0SZC>n9MHazcC(P}!>K;_B3Gh#Co8OEG%M`Cvn z&g8-o)ZG(|*mn+A?`~n+uiYiyXKmMT59Y-iuK(SZRM}6Lo4Xpr$ zsf#iB+|B*rS1dWg|DGj*BM6pIVPNeG$6pxK!B3*{(Xe*B`r!2$3C~Vgc(h+&1As=a zD<9A?<>=wRRKE($Bvcx`WHZ1Oge>e?WBV?bzNi^^iqzdS{$J;0x)8zhS*z2(7mN@ z<0U4YAZ-t67`&U`&qTW&hdoJW9`Rxx^%ny(MMDceKTAK%UzHCrwx-+g=j2N!oGw!n za^_oWMtEcnO67Jf#h)^|=lB%Q8jJ}yT4X{w^w~h;fBXw?S+XaO0BbJuTJO8vN%hNB zt0#ONhj@xZikNwCz*(({zm1OGoVE*JRb$C(^kc^ly2NxH0CwFjxai(*ud2AVPADlB za>=Mu_48#kx}+6xnF%Cpk)|&m5%;P;f}()ms`ue?hJI0xu_UXH3E|~xg1=PVhmN-<;vSApZ=P6Tk27O}`4x+j4ujkH+(=sb;@S%<9*$wH0G(F)9`Za9@fd z;W}-0-j&)Kw0#Ng0d(Pj#lpDy)NU#`%~~#H(p(Tmq3aqlhVy<8xX6D?B16k>1`7vq zg={q3(s+@yj~N^MZ_x?=+-qH}B;@b5|J)|+d6H1j2i(fejA_el z@MHPirnJp1DEji?hd4dPmKJ^BFko1wX&9Cxkr2e7)3oz37E>x1c&z{vhsJ6&1`@7S z0qH`SjJP7D{ePUy-yd$w??`ZW$&OU34{8n(&%*w=cBW{_SjNw z{pnkh#6v&$dTlgD$;de#Sjr4%2Klz011^2=DnR9dNtFz4 zSK`gnoc`lu|9SEAxzojhf3y2aV{v^}tCgX%|3(-YG~mtVUR y&5~D#OL`5n=D~w|Ft1*^u_Tl4q}k?pCihn%;utLU!eVVyf#x z3dTFX;3c9oh~@faOP=4VdnQ~3wd9xC;v*-WZ(Q5NZ|VQc?*3EIWOP`FDi&vYd!6DM z|Dq*+L?a*!X!^YJKc!6nwXn5}ry8{}6pE4OF#jC(k4RKHQUJJA;xACne?9jk5dY_x z+si_XAuNAY{ryv3Z^#Cl-jM$vx9N{TNF;#(Ge!&31ReRGqW#Cq6M?X1qTjznTY$Tq zsGjQE&O61c{%1PBx6uE;*gy67|Dk5rqR3D74_#e0&VKTMHf>jVw#BKR^yzK&aqZU) ziXR`=ERwJTPtn$Yn;;0wYl7G6%9haf+Q6%~ zOeg2S=W(xsV!I!i=YN~1H$M`;F)8m?A4wp6iylocwZnfV?&Ag54JP>!_zUZAGw!5< zR$VfuK4M6VWW<2sUix%yR3;_T>~s^k(sH*8&ty6NywUN|TgpJ~tIOfxr@Eu1nkD+h zKkP7^Tvu^voQmr7v$x|R7zmRSvJL$CIsFKQ93V$+{cP|urD<=k%JAnGq|<7E(?1(D zs^jO>k}AKxSl7CB00PO{a&Nn|I$y06_AF;aA<~U+3VG+8IJ!YLaujQ{v1!yZ2)7xu z!#oI2k~n$w7CtsUw(|_$HGWDO*}g5}4l!?bKKt(G=9X-65vX9e3!XNW2HC27HylmF zUN{Qa6-c@|UY!IAREocL+?cJ@cI`9EEZ^*3n1U}Z?4{1EAOM`gQA#aMl3e3)QA8{( z4Y4XO3+KvA?OjF9!)96$@;y*2_Y`V?KsqORCp8p z&gxFoMU?%c#ds1) z3!I}SW3nr-bff)W4TUX0o9s*gO4&H&pgB5}VZ{>~n!@hR`V^J40NYQ%z)vl336f&` z-2OhIv3F8YUeuhG29?ut=SVyfUmoUON_Aq&YLQz*^qA9iPc-G}c(nXJjyRI`#O0RF z4`K1kPgikmy@EeJkeJVqsx2RI0LH#J5?+sw0F`nE7WSJnsd`wEV8WPZj}yU};CI>Z zGmrqR<)Uo0s+VZLCZi4lw@&stGI7ZT;ru6EKMOh$g}mfTAe=4w#AkOenlgHkR7d4f zZDe6y`W5Hp>_iEgeMNc1J(uxvCIJq0yiN=zg^S%_GP97^gvXse3%yQD7~n$)gznD( zxcLj&6*>Tj_vAE;XZ6YCyCp=jtcUAWf9WwvyaegkZSUJlM$u3{@Yn+U<2KQ(=JV1* zcUR=~V^;+1iuPu5;{bDazS~|T^uC>4U!gtG-cnDna+KjnbfAfHk=aH3d#=aDsyI)5 ztn2&l^3hash*yNE^z|4j#cDE7?C!bjG0LkNoD@gQ_YBK7i}$O$)>G*YpZ1qp>2Gds zt(%BdlmXPUfFZ_Frg+5C9qJ^ANPdX&o|j54ySx#D3G;MqT}bI-96K-CP-0=K0EVRp zyZ|ST*^h2|<9k2=K5HjuG40&tSO0QXUQu=MJ}A`m|Q|*|BsK3pB&8Y`5WnYfU3<%`r$ij z9so^|_ilDZjJ;t@WJw32kTga2^C-Dt9-%Zb`SLfFCW<_^-_NVvuSg{E`FNd`ZbA@X z!0_qiK(*q6>j_iK*Q+7?2Wn ztr1;xya($45x!bCBoBF5NCN+A(}^*eE|m0lFweF~;S|}(JIi5D#UUDrOcV_zoby&h z&1E&8m}5Y2df+=&kFgFZdAYZ| z9=1r{JH;9QFtIr?ijr~h)G}9EHQV7}eog6*CAVl*KW|JsGc_fiK*k^v{3IVOQmj+d#c-3ODvF6Y%)otbm zJpb4l3&h0LI1E#5DwA!;alps3tM8s^AJTvEv^+`ywkyR*2$oWlGQDZJ1r~KD(s1z| z!5;f;$CAIbACJSb8iWa!c-PKUqfoA;G1+X;`{##ed+2N~>D)}$nB~P3tEj5g=F61< z$l*L^&F4(SM+ira)9=~|ZT)y07b#Ca(nON^GJ?{a6Ez(Vv}_J|V1wz1eLDO@hy4iHMiYtOoD$=z#g_}y|#I#bDxUMjyGGC8As6UI?cVg z;&Lp+C5xb>$D+4(xd!X9(AcGm%4+5}i}(yU?YwanHL`AFcFk;W?}4HY$-4?=4iQN5 zc3k;vs7dMpR=ZpLQL^fr_`x*3qvv2NG0Cz{G-dyr4C<05>zZi8w9~--=_IE2TliQv zvUaHES=6l+kB1s?7oP7gj(Z2)PnW<>j|v{;(_m=WaoHPpM>ueY79ve$Lp;fKUUEv; zO+bt7;=Ysbu$|NnS=Xsu6hfu>jtlL0A?Fz;ao=H!bkBr%KZU}W6zKHI6T6GcV@eI|SZnLAE8|WcH1lcgPoU2ZR1RCnFIR7#_fOw_e z4IVX}m!}4*=-6buoG)^^r3BhbD|N#D-Vsc2v2 z>2s*QAmjvn06L$A5idw3DbpJ)by(5OT@qu{Div{Z5cWnqky zzHHbJM@Z*c>IZhMAR**3}LT3kMGj1Aj`Br(a& zn6nyb5C{?ba^cV_SEuB)IIfNmo{IQwUE|>=8j-HLJ?c+H-m{z4pYtAlF&#eNbB|AG zpktaSQZ9K#+6;-*)a8sAZ8=Xo=eyHU{`pBohy4E`?X9Dt?!G=yk!}&BOG)WQ8bm=v zN>C(*knWD58&OKSySuw%XoeoThOVJI?)dn7-}^p4UH6~6)~s2u&dhhtXZJq)oU_4V z=V=Q~1xBz;e3VE-AMbLePfWXzFQUyBI03HY2ge1PzD?Jl-bN7dj>THgrgoK-aIlco zE1jigWE6_WhSshJ)kZ(Fxu;*@B-`EUPT_;@{8}-ZP!8QXSS*qd_OEv(kbMzv$j@hUArX&V(t8us2sWTh{RA)5Xmm-$|_7sQEo0 zJme0Sx_p-4Axb&oL6DszOvJI;x8s{?dsnLNIffUc59S$M)9RieIu2en#w`i84(!pM ze!_cShaR~W32uFV+n$M-9!7HNr8?c#lUjk;O{{Mh>(;J}g(7-T4ZSAU6_$wU{lKjk zrsnFlN}fVg=j2M|5`Ol?jz6objlctOCHeCl*6?e-rT~sO^@YJHB5B4$M8?phIMjcd zpNbFTiwB}t?rkO2r3b~6G~MLvB7rCjA?3s8)+e2TUNcXzIc%FL?BFN&T(5z=&`kDMl58RLGi7F1$Ulp#Wc(- z{ynD!CP*STltRB2(M63N*V(6y(fM##0~sZAk(tQ~LMhZIM1{pg0k#Bj6PJzbK z-z0XLvl<>$-I=7=xMiHUev8k4+vCz^;dFh#hjXzK8{C7y_uBW!13DQ?)Tf@xocy7u zvB5@6`qhYiRV4|>J7ke%!rL%>g>tf|Y=hdg>lB&TyR(rFJG#Vml%&p}^b{zXy6TL} zXL0Fyu=O*L7rdL27(wrRGfY{8#=5kMCqNYID@*n3HUSo0)1?}nCA6x*S209iR%GsV z9h>3GlO@};|FrxNB?4nui;-jOXfB|c?8yjQlLUAL_$|%iu`ORb9RgZzveVDC^+|3m z(}!^S5YlebCP@#QD%3UK!_j5%-e&av+zeRdAp;}^S1&h6Qzly|7h*c`qjqYsYaC1C z#iF*il$#vNVP%68xdbYJRr29l^F?wh!*E9J1Tq)~QdN5dj(=+jT`~Fz!#|C+8LN?W zs`QBTiw3#|PYyu6j)yj<4Jg z;N>7JryN;m_P9uPoj#j%X2qK7pwY>ikG4_(o#G;5UlV<%75r(P%x)wI4|v*83_{5G zs!)ih4m%&Yx-A20XuemE*dW<=l*Mb#2MF2w=61m{rRL7I1rIB}zHMjQ-D`t2e|s@1v`zOM+y$#W}q~6i#F)2gQ(`BDD8|MyS4#?-Dj(7bY3+N2h^h+syp6eO^Yd~Nwtm9 zbn2#aj5)}Ql&=&@f_dmz&CFC)F316%=Z#w5MaL$@8%0hvC6w$^imd1Aw${xU*ast^ z?cW;F_C`7%G+hNj@YwjIn6YSz_`8c}g{^g}o~PY;;bL9JLm2_T9;ZwuxpB8CC2SX;QVZtB^G+~P zLILae&Y%f8&oSVPVmea0khbu(45{6{(~A`%+fY*_udTUG2P`1j7SWgI%`9~xtUgD$ad`(2DYIZ8?$o*;E0F*?odIZqH z6K)~*Z7;IynE2*4Lrp_%V>0AQ#k$a$E1Vq3&`10ZbpyKNnXeEHPt`58w?ZFwD#J96 zw!%@7HBEJ;&Z$}-EUzF2kZEIU0H|TG22^~T7pYJwg(B!|>V@3*-GvfFaKDNJ54u#%bFewLsJ&d$nZ1lc2SD-dgwoeiJml_z0b31 zdNM3@QuT%)*}Dhb{T7;i0Bj}q6r3#-fBauVt&t#`&2(zGZ^!skvVKM2$ zK)JyJVq9cmUbH38^QAW*i$XxlXj9V-x)TbL`tc+Lh+^1co@YUvhcPp*G638CqSE-Q zx8n0oS{{of^-_)4>i22K?>iaJtJ{{zXVnnLWA1^gM~=;cf-BQ&$H%)?F^izH+vPh~ zb#|1&7pV@WV%n4JcMySd;8}T)?;~7lthBadGNI|z#Ru9Y`|YGImX|2|98iplBL=Fv z9vlv=y1lRU%LND(10I312u+y70^`Fk8i93TQ2@ss%MjisM;ru10-Hc{5#};2Vi5NZ z&VX*20F`Fz5mpb12}Hi7)0rm8{0CH28g3uM9}T=57AU}i;IhGsV=hVkuhW}uIbFZ z;?Z(m@ncUaVESfti^k=dO(W#e#?7!$q$^$H3(1zMPTM=gxK)ztgFxTCfK>P<`+#~_ z;Pe@+R@2p>!we?Poi+vCKnqRr=|ZGMuN}jB-OP z==0Jmh7<%G%=<>>%M0z+4!aJA2z)OMfgF)ABK~)dHpxAMYC{RU_{nU8InLN+t7CMb zZX=uv`VHt>Q`TqQAB-!m-&{O46L0qA)oL#D64XU z=;BysD5itb6ES4Fs`i-XX^JqKap`)nH-S?RX+!!#0cKSpzrDDT7OL93nMaTF+@n)j zM}@0w=50G?DPlB7>#j*;un9-ohOfl)J?;*;Z-BM_ynaR6I7z8$BoS8s`g#h{&Ywhw?SYPx8s1jb=W zc33fRgm^ZwB7+35U?;<`x-Bcv)QE}SdP>I9IBZXh9vG#`iiT5?;W+Nkzx>H7JaSm# zg7sB*WT(Rp{6TYI3L)ge{Aaf2@5tTn=m6kEh=Og{arC|cJUBQH;0G8PIdx>|kw2yO zF8=XIaCZ9YN)Kw|OD8lFm%P~Ut(=Eeu>wgaSg&$se>U(0XS_}~4})~b$J zXI533>PyBn&qeai3#p$TVenw}S#UGX?9yKONVWNEoqQSUKs0km34qo@cTHLj|0N;= zSCc{0KvdFzhV#!l$ZhCz_IR^q?Sv1*HX9=NE+1rn^;-YeOR)fbYfwl_FJZtN`w&);q5HiMhl&u;OTG9f-Ol!?}Nd0PU?8$d&O!@SA@l{)Yf`((tjqa--pY5#u*)o_v24a-kSU9Q5A+ z;CSAL!p9=?z)62WNKJr13QOnUeEJs*QRMJ3x}Fq)$iIMRe9W^bsr!}oFBljF;A66( zB6jru#`cQ}-ik=ZO@SR<@BV^8(H1@yOYXYy;V54@Pzg+G(dynl=kY*`x_0O{-Obr!-mXXXn+UD8TROh(qH(PM*E8f=sSav zf1$yPCvYJ~Ak6$7jr<#f417%M=|nukU%3*7!1Bc`J z6%4`giBBd?8gV{0tMlHt-*(AFv^tlcPV1uPf6TUhfrI-O>7ph=kGh%j(^R>=mre~& z4Gas9VO9SE&kPUUOlIqTY6>n(sg-eP?@SicV4e{56@o>dKVw$u?SAp`|LV6lIjv%e zra<2)nD;LR;E=6b{l_-FIQG^>&Zx6x3{l5|MSbE;M=FaX%edOR6! zwLXG)q0qI0@E889k$NZbE^-!ni~}xYP7LMN4<&J9^g{PoPY7AU9WB%ucG4zmSoLRv z_SRwcNeW%A0&3^!+2rq1F^Cbl2^yi^r0ZRAlR#3 zFqr)~S^Hn)p|*UyX9tptI(|pZkG{6P-ksw-ObVOhvH1F2$FbRS3A#4U-U?sb-8Y)5 zXP0kNGsZe&OBSSWfl|vmzePI1{1+f|`ZiI9#K&-}I*(*~{>f%K3P-mS6mkq5S*e&C z7LvKCvC!|Y+U9avbW?*D0^H7qoW_b}bL--jDF5}G-@;A6?46Vh6uZe7&_CPZt#Dir zoUy9~to3RIP6O7hD6FGT1-4c4KNlB}TTHb#nl1l`X5ps!i;%~`nN+x{c>j!9_5EbQ zr&{M{d+1h#@&_Ef6k;{5)R;C;u-Is7KbtOmth?JZdCr~Ol$+;0Os?V zP5(4Fic;usyO{Y5{Pj;KwuS=-P_L{y5%mYT(t@AD&7u@@O81{Wtd{}~z%I8W@8zH5 z`v1eYfM=<#Zl404#wGnR9WSH^d^jJH6vhfW=?9-uae=0T_co? zxIZy@Aw7nF0{0F6eaGrEvkqy)4}b6t;pw+`a1OlDAx*B_fA5p}Cx>R};f$U0dzt!| zIdaj$$0WVpwHROR5N9bO{Xw2b=$CL92VlB&HK$}Dz?SmrSEN7Y;Ee^BR0VuWqq%XU z9{oRO@DmOqEO<~1!D#8O{*dL59}(Yx3yRTj_|tzGtKWegbs>B#H%QfJ_Qdp4?bCVQ zLzD0N6)bphJVr_}w90=xi~tudAtbgX+J)+Hz6d8G1<1wG;TvsYVqcnn%=s_5_cs5< zqvtCwyoKuM+m<@0nIDagxK;f7+|j~|aS2lXD-`Vg6(x=5aq*(9aFtq~xUhYHmDOH! zsBK3mEd#21j$Y>waj^Wpw}?38ze!#3o_aIn;%pZ(a3O{rEtkkgCmxdeO2&hnPjjjS z5c^6s0aufzl4nGC!>R0=vIgK23>*XuR7P2Y^Y8G(r>IzOl=q}~8`+ox|?>)R;HK_aOD&6s=9ee+k zQIhFA6r%q6f4rV?^=ys*heS^9BM}W$)2>1#Dn6=`P8?Y#7h&Fc<$c*F)x#NP9R&67 z#8Dlb!(!a_)7efv*nk#!|HsEhk;B7=SchwYPLX?fsV8!P{gpP8YDvq?5Ll&MxN;bP-g!4tlGJJhS)kepPzwt zXDsQ)V4lEap{&L(I?y26tA=VGVsVmD-CWVna$svlA$}00cJjvA3KSA|Y#a6HAcU@P zN@igvn&yFT!}zD&=lxgLNdg+qO!ALu!>Pk#V`EbFKU;>& ztuxCKTrr_yyto^tfEG@_@MlYB7u1LuuJCEag2F3h>O=TRG*QsfTQ$DBP>LG7!IDy6 z4YzF591o0`FL{i$I6G7MmE8^@Jzy@Wv}30HdYx!i`zY6jV5NRj8=_K;C}$M&5O7aX zN9Lg-(jl3DvyvlkD}Ft};Z2OyHP-DA(_J-aDQrwzdLkUjHbh*$VaN*7k}NEGXbI2X z*>6y{q~oFTa}MDz$8!eefhYU&C)(~O$@aTr`O`aZLMF>dq(Z1ZTbcv~%HvHH8L1E3 zYtD6UY=bg?*qfLLO!y+bX}X}fK-Mk%miWjc^N_5q<{7_Q|L;Jdm(V*Y31~GT^>B!}8WY zg0{D^O@1=m`viOPQ-+?o3Ie%?req)Ozc9U7a4{k9>}yN8Fg&tbBDGb?YyymDP^#8z z^4))4x7}BA`eimoVT)hGNX`MP6F-}Aerg4hGbRo)ug;$x2BE2mzRS%F!-j>k zQPA)Yboae;YoM^S5)?v&@VI~4nPmA=NiHQXcoy4Y_(4q_9SQqVzo675<8)_KR9Q*G z-NhZJ%0eA1N_8alt+)?uZikuUErL>O9k@a*_nPph`Nq$TxN>brZEw;>YmDDI%6Nuk zJ7yvJZ1VdvCjB962u`*W{qT)Qbf|>w$86}?1X7ique_MgR0x+Ux@ns3VU~NP&mGEX z5@>}1y=#WP3VVlP`x;+Sw9e@am(ziMQtTOc<0%4`e!fkzMRT#%Uw9wj8yjyTFWq|t_x>622Xs!;A&9LPb|pH+5W`7Ub=ZD^LU1OxklCk zI@4=p7L^Z%6qlPDG=Dg7|H(?2e^9$R@;tOc0<1a88+!Z^hr9a*Y~S|>P>0XFY+%wBk@?VE$mC0pR9^{0U$tPZYh zCGoTR4M*x5Nb`Kl#k+&qMDMImzRA+B|Bu_2fqM+P`JMQSH~4G?}Ldo7;y)>UCYO<(Bb7|yXN0=5AJKPieHJW5&YoT1ctmrYi> zNr_;g(8mMzH)>@=A2-m_?T;=RY?AsXhzxgy1Bs0goSPNOQ4*9QRgHg>|6+f>F4-ds z(oV>mc6&T>d#WDgM&h(f_e2$SCR%xRHg1tWuiKoY$C7fpim*9wT&A%)M*?6_*z)~H zysI{WYD#@Vm?*z}1-0scMDuTUj4{B2fEPmh#?GF?nWP?C0()_UDt?agRq9bcNEcj0 zx5g7i8B><7@_<);jKg+HLI-h2A_^7*rq(@CNof~)PO4f}qn2#Zkj2>4tm-stzBO0skIqz4n*W=&Qu>F-T zCgtVKt8$t4Fq)zw2NaR!yh~(i9)4WVo?_GApM*8iE!fge6gAx{g4}(#(kv}TwYd? zN%J#QUU%JyJH-}#XD{8{w6}oLBRP>;z=hJ}>=l{A6C%%EZ{6j7yRc{p`cy{Gc`okJ z+^lg>_v`hj8U&i=TI;2{$~hZSYE$PeZZ5TCrd)lSQ7PyId%S2}4x&c>>Po{-7}R_x z@54_8Me%#VJQ9Ljo+PLE=qOPaD*Xhp{)musHBw@O{($Y@MC1C0canDqXS-iabLWLt zspv7qfC=&^6EG#wY8t$pOrp1z1$A1X>ytCWQJ>D_a+`qzh!oN2B_<-y&?DCkB>Qm2J&~Wl(DtWQi=#1B zBHMe8)&<6@Jh)k7M*0l~JY(u@sHMuTf((U>-E{@J#it%b!HLdFh2h#n^c!c z`Gl!N!74suBax-qF?7Nhq9)lY4A0BI7&n>QJATp@ySp8hP}oYB5(<8X6h&n4NwSx{ z_(HRStI?+VHlfzFn1?EmRa+)n;tWwc)<$MK^J?d$oq}cjhLH{W8sy3joj7-oc zJ(_|PkFz|7C!ao7T&kz`t=35iqL|x{pr8=kR~4DoF+@fW^IDiOwhC3|U!n9(c>Ajy z`Hc10XaN$Sb<#ekeV?x?=pd}9$KtNX$L+wOC0F)3i$`v}Da`zPFy^YUuzs>-PiHh9=v#1koW?g>hndxk zO}_!Du!U2XB8YIGd#kA?uZla3!HRxCgQQxMl;DI{3rEIbty!SOUc@7(wAUetD#y)l zKRPa+T;jx+=tg~y#$oZ}JscVJe;&^w-dc7{EjM0iL8hA9%JP>XHs?i}LD?=ez7g?&8YcB)*8oGs_DIH@!c!6YXL0nFqd@Ulk6 z;9YIk+aKe+&vYn&+#MeZ6#sJ}*&3WWl{Wm1o&G+(?X ztM>F|pFUk<&Q)DWN8hT^O~Mp!f3cL{)B%(j1>^oUZcBhwf!?J6D>@&z>J~nlW5Tn< zfl^!3P8}caAIhX}7WirX#@)r?9X&)Y%sw9l?};|hZ?roaCNc%zPIb_nG6Z9zsUN?pQcNvf)S(X>>4>}NTH(T{$tyq&ZDR@2=PH5%XM zJGWmmzY0T_)4(pJS<(lwY6}BN$DD@foKs6OtU{u=)fPt9efFZo;QyGwBskhNx9^&h zR1*7cg!ROoM71=&R9bG*sDcafX^!7C>pAmC)clS=dMF`ZW<%sF%m}g~3T@#VWN#X# zd{Z?w$9EZjOhs$Ynn;AAc_wZHHDWZJ@29YthwQl_-fa&b3%y%FcL|EP4T&5Q@gZ`9xB zcapUVU(^>7jR}&sRz!Vn`gs;%g35gV!`o;mC`^`L)ljo%cBa=cSN?1gC()xPw*qHw z1p!PQE2C;TQXWInaJh{iB@Z@BFrQ%7FH}0<4GvG;2y#B4Rwky@y)1BJExZ_?lz>HZ zTzpF$W>#)BTbwcdvFx9AW!z%pX?pXRs#um|>_m4UP!NYqbs!CBOg4FPy$wN$l2#;@ z+x3u|9FQa1m@bU;xR5P`qxim-LlpKWmX*UYZ|fSb@-#&@H{uU$rscii%*{U@za&*?ZQSOyTFaF>RIO1F0i?_V=eQH%+RYVdecHRA$MmPB)9nr z(A=?>E$E|B;5rn7IioeDdyI)MJ=Yvt$X~ozNvd#m0le2TXT+}A4S^7h3B#`Jk=7{A`ALY!u%l@U(QZb zAcD9J2O(5vjr|UDu{Y#4;9O=QQ^78X?M?Qi1ih+4tMj6r`o*FI4@VpKdo}0r?km7t zbd7ZHR0W~*eP4fui}RK9xG~8SZJ4`NlICSeQ}ujzdgVsEyjHxhcGHyv?(H+9nP$Pm z-_~3F{MRxf<(zomwPbOCH-Cn9t_*m)=o`OW0W3kaVf#51#be@aCP^XGSv9mB;)Wxi zQ+uHhQf=st(^{mAqChy_27AN^_C)Ypv7%Alpco^N7|} z6pSt2!6P8r1k5bfq@Piq1}o4*T;!mCUwIL!La`X-l=diPhQl?L7}vdRVTJ=s=tfdL`bnT$pNvuMi)3R7t*g2 zQlLq3Y~cx6%H2tKskf=}jVkI~$$^Bb#JMfFy@!^Yx#eW04$vi^&X%j_^(zFBO4S8i zj|-lTi^Pj246}zZZfa8Sh{=^&Xbxm=Hm8z;T^DZv>$zhT5F72CqF82Wha;}3bFvK& z$8~>!&+h51G0#6VsMu7o=DBtCl-~dhw(85SpaKNEAshY*#mdpp^&R(vdK0MNc?|D? z;FMAJqU&r8~ z0~j|`cT)g;sDDisPln6y+d5=z?5VZE>%y{|QJ`sE~WC9E~g%QjEzI-bQN zJW1sWDf>hSHhet|&CvjT1k^S?T_TCfe{~$Bp!fd2)h`2FalKOS+1?z+`{v>sOCY`%Be0Zm@y9sLzWM zQ)31+2${2@?#3%5R7HjZzRy_N&5Xoz{g%rAT?ozirK%CJOCKD2v)*Rp8rU~3SGxWFdR!wkVk+?J zzv>C*&F~B_H&27O9!-n?jG*uPS;g-AQcxpljFzOPk;y*R<%XJ-RgyU)jCUkG5M@%W zHSo%QJTQog4#$2{B7mB&pa1*3dH19@wh%EC+K^i8_A>V03%_ZXXx6n?v_<88v5hUl ziDs>lc|VVz25a+jaZaZNWT)66=cVV`DCVqU91@R+Z54Uy9$&<~9l)cvkmiew+W@E+;jK!Y|f{=l?2Rr;+OD=cgkh5;gkea!c+#gOS=V zn#H?8;NADeEOXxT)`7({0)6!+*GOc|ipbBu%+k#_^JledMm5}t2$MXOv|u6-oA zwG##j1$QnCQ@)HeEIDVz5zM1v*G~J!OCUcU%7mOEnl^c7J`*lxl{^nS09sIH|%Ox7)W2PxGY`yrnae(LG zvB#lB%k5HIYnE>F`;YY>1%S&#GhBKeh9K8C7pH-vmeKU{cCmrnVRs>yV?0@b8utSzQN5=gS)1)PteT3JU1?JT=FvLh^82-X4tt}+KL&c|} zPI2>N&#B#*wL3j-2JEnjx^IFbnNum}pHJef$lFw8({0$pf@K^tSV zfnCp)&jMI3Ug69r85mzx^l^Nb@0UbPYBKTtl}p~lAo&;$)dttE$2agY_s2mRWCIEP z2j0{KSD&S{v#i%&+|%(Es-i6@d2F7C+K#njvqll24#l>M(NVn%>L5l$kGpDp@cC>z zdL)+}vE3Exzswbq*~CFUsker&EM`PL`ej(GA!x5vIr&wWY@7=uSmzdp!_!#L@=JrQ zFL}K}${<-)VX&Kt^cA!;`uO|-1I53EFXb`JU2k#r_%p8BD`$Qa#oP$tTgOAMr)#h8 z@mwb(XQ&QdoQN%ca)f=9wRE=$7~)dXV#i8gzz<%55pi%JEBMRG8`CSz9^o2}!0sWK z9-DV-4VxWPA}AklTfYV;@*G|kc+B;-VQL_2Ik9V=_Y%Z&37zTSU~Sj74#$!yEPWo{ z9)B}yp*;C4a;m)c1M$1IM}wFf^*)Eg;cnZmozC91&k%n_ENd9>vK9hwHg}-}inVw} zRPN-~kMTV0ejY|#b4Aol?*6nlij>_zt=-qyljiHiSH@)T_^Ei~DvQ}qf5>jy4zwVJ_yHchn*5mrr#zJ;ne^kHC$ApzD0;Vt+us>gjTq|J6=@-fw zFMOV-8GbYvXjh!FY_r{|yaqX9e(FXoEg)uS2;$^+*mFQ`_=yLG@5TMS!OMD$Y&aZQ(f*`^v~N=Pfk zWX2PJq}`DOO1ux91QXKE_U%8U=%V6C1#3Ki;HkVu%VUnhdrrb`mDsVjdjtuq#2R6% zPAR`#2^PZKL386^z8N-zA*>-ZbBzBuUKPaR^P>iyXMkJvEl9b0)Wv#x&MXykII_xE z-Xa>NwZAqB_x)mg`!n+M>~X3qj5c4pF+=m{i^QC(^@)3sZ36;#$>zdEEGzfU6<&`x z&KSRWvzchJ`sYYk${tg3g374!xm{^V^?H&gPmJlOe%a23=rwV8H^iETul}|mol-{N zMrj2}n~8oC$_6QWn0~s6mVv-H*2lJQrRw!fB;kcjhfl=c#i;I+KFfv}=&iYRw!4Cb zO`Vgb8h=(9Yj)S*U8v!yKw9G*=V3AEQ$lDK6yRQciCs#YkqwxOK~9qW_+?im2KW(+ zvfza9<`ZiCeMU7eqM^=*m&TnETtDxVTQ%*P@QC!dEl%rD&Oz`x*fzO|ilQT}Qm7xM8(VHK?fK_)d+NzjlG!#B)RJ&v2m08PxO}DT z3iJc(GuaLf+oR<<=Cu~>gI7IbgrxE82{?Nx05i2tN+6Y>;RI{YsB#2EU$sUwkATz$R!N1Z&8-yR)UqIWVu=BvaOjZ@R-RlsUES7!eeV>MpBEsRtwNP{> zr6J_9bZt8-sr+=kWrBE!BZ9KADFmybHj=zb7SMKzJ-@eqcdjC@r>|tqM zX4-cZZydQR*U?2gW9IV&#x+t}9XR%E< z+6<(q^-Kx&sBScH8|n7MKv8TXU#SgZHXzCcH15C3D>?ema9EcUBQ|?PBF!`V8uh|& zN@RGYrDXssV1J|~!0qCVsw+E|n_dmj@KdiPSG=EXRBFO-;c9rs>71}f^20leSz%EM)Y1}3UT)-GKZ!Mc z6^)L_(G$1XZ-uWq?H#woP{gxHa5f|&2VE%Dat?)WqO^Z{=ot|s573*$01;}H=QQ@Y z`V(zp)$_wXv=nlplXP=J6N5^W#&?$eEy;YL5AFx>+Webm)r5?|-;N2%wOGzb!I;Faw6x~=$bKf|HY~OaiA~p_|vhV02 z_8rBJi|z{0vKH_k^*m6LE3(-E*@Uf(g@m4$aSjy!{6X@@{+eM(lg5cO*^$o|qc&-> zL+(X>zdu4iX2`I$Rlt#hU%Z2$o+${JC7gS_}S&cXF?b;{9kyg8?bKe@b^wG%s+h~jqwR|4PUW_J&NCCYJoLtvoy}{Dad4%HoK{dezRqwD%N0As9zKtzmK??fLt1l(xPVm_FXuctq zGt%HrryRvaH-g9x+5ue^S?%OP1vgtx%{a0gkZJq8OX(ZHd zZmdT0Dir!Na?4^Kc(}d1t7nEj0G;-m*1aP5+26G_W3qeUu^&*klKPfc9+7vjaL;dR zSb4NO8_Hi!)CNd?cDJ)?TSm64o0CT8P*Gy23j=e5shyiu7_FLccT_9BQdjnEK+A0J z^fMMR-KlxZ6lZq6yWKvX9p|IhM(TPy=YSYvOzR5zDh}lsudxY(8Kjsw^Fe{Ts`LOv zPuN3s1to5ryU{<}i#1<7+0Um+*EgFgiN@aLe+NHypub-5iA!&T#bmr7TcsrF8kOrc zpNaIHnY<(AwCPCxQA~{w2`!5Wln-%Ince}F;iD9rB%XbcABFv_Ef?eFJI&h>{keEM zh{!!BFq=iQL866JNyjTwG45`nEOR^9r8ATQ1FY+4IGNx>vZGSbWP5m5cSkx62;Ync z#Ei!KDSeY?o40T!_WghujB9VmK-!5@*-T1knUcS=m*hJoqTl zchGiR!t^FiBG!>DFb*cv>4`h@7>J*69O&llPnU#KjnPL!cyKvyB^47v!G{Z=%CC8x zgN@b2mLc{q`2`Nkd*BxuJ#qF3ieOPIk9x)Xi>-6MOlm@o1W}GfB-|DOSx4bBr*;;h zbK1OhHNAsL$My)Hl7!GR2zdiO5W`TGY3M^8W*B*H|>zoX7 zt>t;|##$)?Zo08uEo0@87g4`yL*{$X5_L?Bp7`j0ZZnq0dOpKV=&O6U;oM)AY=OdW zqwvCcuBE_c%hDm+=z_vL+5KAFXm>Ija{15p>w0l&GtiB;a!&v8IJB$#D({`@~0A?fsy2rW@NtEibHaJ>b>sjW%d9_z`|}OJef7# zIyd0QMYQ4FNu+8`^-R@5P6DTX;lNPhf>1N?#zHiJgs(OHeT+7#Jx00AGtJGVTU+mV z33lts?m_s-*dvHK@nRf>9U_(v67lYAI~LbG9xiD4#>p`%b4Xp$UY^qq9g4u6+lv@L z{P=h*yg>7lurSjlw0yP6W=xE_YfxFSXL%EKO=M`G84i2kJD z10%9GK}ZZn&U%B;HqvD7`awg^Z?}?n7OHeZGY@^d16-QHMd{La?ID`{;<7PBhAZW7 z5GqT)K*G+BoG%<3j(v_h#3jk}h&$w6`Ua1`eul8?)T3+8Spjw7+xH-x@f<7-WVk_| z+DnTiv7c$?*>RJ6nR(_R;Ml)HdX~!WNFpiN!N+6cHKeqFQa1(7xb3?L2 zw-i|?)n!_dbt=fI?!wa~W?ZKp8>Pq^EM{{4^AKfE&1;!8Wr)=18p*kDPH(6W3+(uO+tczT#8C7dath16DK- z;p~BK%#G(;mID>D#HZ_3xr*f3vJru#n=%=PA4%vGyO2dBQJ9z|InyJp( zz2qw?RC0J5!EXguN}cQVVH@atTOU=O=zaUwqZsiWv zVAqs@U8yoR;!!Osh>X;6=~*-cl&z0lvf(Gmo;!mAEJ*uh1RCW(x4Mcw@44$NYW9%U zEV_D_sdMs$s2TR$&Vfx#Lexr=@NrFb_?r4lZ8PDXJ|R7ar5C$_u^IER-NIwI+uI%*}X4G znX6H6N0*2Z^5w1Fz|EzrojH~>iOz-lUFR6do~qaSBcV8hN6x6X>>v)$3G;#g*P{@2 z!pj!q#{)VIsppLgqvRDj6F3c}l?%n9YFflaitI?G{nNFNOWiT?=PSkuA`z zg4%SR@Ld!=TfW1iZEZR^Y>x<|8QQRDOcm*G8G3AD$z9r^Q7(Avg+5ltb#(VIc#rE` ziM&|hRcndVH*tJbovjc4vNsXd{L&?fb4*l$G=U#gCmg%I^uxIixV6k;km04muk&Hz zD(2jr&cJ zDy5(6N-Y0%5~bTSM1-$U63xXRK?^pSX`RDZySBYU|u6|5%z7DRg6z6>6MV!=wcHDaJ5Z&vyGspPDu`Hz9ev4v-d*y0jOGO zD5_-JCS;6e8SF0xT+{-H5}9y?Wy((~Alb9*6pm&%_+WO{VF{~2E?vG>$t7)M&~rn4 z-YlkA(7-7^w})A9VL87Ff1-(iqxXgFs*qtI=iGKfE(4pTs5s^Fr=BKTv*OCgdJtw} zEa>7UxF3@ekujAN-Gl3HSa*XM2!#A-Jm%86I9OV7}PgB@aa}yQlJ9UUyRKjt7@knW(T&Lv+K4syY& zFKQrpKplY&!4npMfhhOSczD(%eCWD-!7|E4X9w@OF*;#U1h0*}WfDZ8t(fi(&BCOC zFDsjiAbn@+4^G$QnATNnM354C4dl+kprK5S_X{E8@5@HC44>sSlZh_)v{r$@-zUT_ zW`v5)Mq!=F@+FS_EO_9aMnQz3RP@FVm_~HZ;d*j5w~PtGdEFqzFeZU=uSpW$f3`CW z>sFvPrw9m9y`}5DBQMu0Z;&)pPf)2C2`c_*4~4d|aLy4=QR9+<*}3-&u}hI|bvG@| zK>>1Q^Ki#+IsYNF+z%ljn!m~2RoY}52&>`8X$j=?7=1474jpmwWZ%bI%(--Ym$50TaAHPS&#lBR+7>rfJeVTy$*p zx@=JJT{_F#OyDno2TvUX8=oAm)vL?64x2_dsZ$svT)q9GbB|7Eco;k*0S!_4D>(HW z2}iN5=5kWMd~qbnhB`fK=8{p(Y@D{1@IQM);-7q5aMrFLBQE|yNJevKMRIiWZl>mn z4-{U)T)8=1ZMGz21$mC|ZZV5t7@N(2vKSf8Z2|hwZtnW5GV%_7i^9OKTeKi0O)G>~ z{dN0P@M}(j>Ru^!7WB$^1VwvsIij+E9ZDm>9IuG|`Yc~P6u&LOsi+aO`}0sxq;)1| zm2tI2XsQNw3tzfr3Ozwkk*D#H~U5N`Xdfp`Df^!G#)LMOA56wz0&(|FJ-y?S3^Dp2(Q_}q~H`tVDVRl-`I}0i07HTZfP}O&LdGWdnkr?(xAxgD1Wsrhj zJ4ZgzGWfFkEuSoW%eqfn_agSy)m4S_I;mRuSAP+}mDS0;uH7)U%K|KcjD5@bs$#w3 z>$<6$`P9NdL&t(PcQjd)woK{;bL{b11*-|5a|8G82*Out&CQW@##~t2a<>YvAPx3w zR8(cGsq*bcBhJ42a(BCm%$Zt|Mh&~>#klZ4lm*-U?;(zRq_J&qRwPKwDFuGQr`O;y z5O`nODVO^}jeBvf){JK9{<3l~nIGIR1dbEYs4-jOBkZI=?rylMRM@u~2(I3@YQ9KM zvcKi5;8-FaOzeEE7v;1)q0~C7(5BI3`E#O3?T|EA-O#;(C{N33h6Suys7>o0$Ic3g zj&v}zIzfT%XMss!(q_rKvv&hgWTI$Viygsy&w%-6}N|T}XYl zGg;|Me%Y;NKKn+_-0*X5s0IEaPs-l-^=!puRhpexR zYP0FqE>fg8ltOVzk>c*!7AWpe97+ieMS?p8iWk=a#XY!dai_SuI|L7bFZ6ld?>+0B zU-!z&N@ngYv-j+4_BH<>Y5a}Ye@nR&^Y-7M4{-f2|lP83apbG%9Ep&6mhssoGlnR11QmJ zP>noXFm$K%sXT06q2~13W)|4$+BhmZufwjFy-RZ{B6F8F6u9eKmYi-cZnV@}Yg*a< z;x1ylVRaVDqZxQtv)ft%>=<3AQJ$V(nk_8uE?a&_*99+-a-Do$O3Iq|<@sgFg# zBQa*8X-ok_#pRhR(_~ra5*{Asu!h=^p!-^N1E{Bj=IKWpwYmC}QwqmsTh@`ESjn>= z9gedu?TfUtDl1hI?TPxlA{%?{cRslN&FFQ|z7D(gT0`MHL~k=-c0cY~~wQe0N) z76ocewVu~|(&y6{qkX4fv3Nd1SDh4Y*4cyJ;S2oU|BioTx4-yMD655InY>aN@H&7?K8eGw^w9Bv z5@1MBakBcb3za=D2&pE>C{+;W(e0GYVm?2OsM>v&Wrl&eTn8pJ@8ol*m zX9g=AU|=FP%Ro@#AXCOjBG*aE+m#4CMpE7fmg`ntIqZf_C2eECG@Q5vGC}6qXbh)7=JI(3azVj-}hb(qc)VZG_SAwU%HANe2 z+y+{do{&7jx+1#M*0J(G_M{F`m+rM<*^{O}S?zbpb3w>^9grt;Ea$=NOF6@Kxs5N@ zjF;6kfZKVFffmw;hscx*uud!corWh|!Ig@ueN4T5Gi|I2M!svlBP_p_vJ(eiyC}wy zW~8#h;j=^-D>2Inf7LfOZ{d~nVz1ND%AJPn5-VOnxjX5HuUEhQlMROI*Ga}RysqCi z?1vOeNpT35%JhA40%lhnE->Nz6|eT&p*)QwW*sA9D|^p!|`@Qtdh!Rv)KO zk+#2ZSA>;|a?%%oPE?C+T143ETFOU;G|ATfIEGgM>PUNX;t)T6!ekO9ynz-&fftzzH) zDTeuNnDWfuNoXfsW?EcqB(7%RU0eV97U|{?4e6je*8%L{hVZJIiG!qB>*CttYx9nFF*>cCTU$^oVI}%O{V^s z4{jClx9zD_`q5YX1(BQ0sJD-BB?2@<8 z*^-=5PQrzTj8MiOMkkHcMQP-ezKA43=`K@PHGSR{_7*2 zq9Gq2PD6?ZB|koepYprwElLTciws&9R<;W$fRr_m%E{QT5VLhwv&cB8CrbMLJn?3L z$h_z=g0R*p<^<%pYGXC*v3ll_FY9zp$YEV}L9`rM{6KZ$JzcuS&*DKxIyl_lYiM|C zec(7e+XeG8!yE85!+Kx2gTDU~es@gxiL3KR8+zXC;m2y&T6(}pJ?dGS4`eW~He%zU zv!(qnvqXawyawh0XHzx3n6*FOZldgaY=s#--Bt!A3SRImlVG*F0|?$kTQ&OwnqdPS z2;^dnZzkYib+|Fs?obv|m@kCgaxLLuZ3(ow>vb?FiK9@#HT1de#m3P#mu}w;pG(Yv zq7Ai83gHly2>@S;=6A!jV$9KkdScY^-di^yW4{`p^fYwpdDZw6821nOHfPpXC$;VJ zJgDATUg==z6Ux$pObN2e1qWm|JZzmKj7Ka})jH$0m_uSV64>G2lf zD97;tG3r@;bT+2lawCYW&M^Jm%vW93ugY!Cl2Fm>3h>EW=|gQpTb`g_DD^Ur2M=wV zwN<(UTv~Op1CcsV3iIwfX z+@Yw~?3yIgrEd*IDqG0P9m%bBe<4NU<}9$7*;s+a94tt0zZwx0s#W=4h?M&cCADcP z3K#gn>&qR}ovff2%U8xU9uJqx+36QT)xXFZb@iD{daro!Q8==G*ZcI<(;NEeV7T2C z)t(=Gk9XMH_Dg@UHK>i9fqBTBAvW7R^3BHz1);IJjFpIDM*V8;S8cpo58m^^67GXc z!QbM2{d`J4P2`t6&yQ=FxiZbyi6^WBzSimQifQIw32i**sYY19`GyF6a`I!(|*_^BU2y9=hmqP$ZOED zOuffht|@DI#BJM&1m@^NSVT?PE3Sd7P zX2ZCab3IA*oU>i9R4tXgeFF^fz#Q%A9fDww{tj?1WU`vEi)rPHOw?2;o`;sYbu#+< z34PzjVp{O14|O}vbx+S^WzPhjoeghFI)d+c?E{yBX)J{*UJE^rJ)FV`=6kLj9{wn7 zjR)uVZ2&T&rP-{Qw65(~wURn!;!}N4)4mEnox}4}Re(E~(Bd4cFAO-2mgesl-#3TC^B$7`)ChYG;!wbPdL__Mv&ZJV0%S)DW zEjn;G9}ayD4$nMTO&fO>h`Fx_-}WwFZiRP}+EEkH(-9My5%018r1oh$28J&O4hG*3 z1Ul{3QvlA3;ds`eX}IFhj}Dg4NCQYnbs7_lSCi1Z39KbM;g-NlO=2OQSx6!LfI(%% zo*oSx*by*(J4>)l324>>fjP+azuoL7BC<*}@(eS@g^UP=Tb%X|EyvT_(U>J`yAG+- zW31{mNj=w%`l(XmaQlV=dDiGe-K6|ChytYxn9p5z(856zjcq)&vU6zo5r(ol1CaLh20DFV~A8BhGk7~WBN=pUp2jMH!8 z@UPmG0kOlqD)Vs~eLESHlyO4ZUN=A9ISib5V=;b+{w~wHD*9fe(^u@IfNnY#dU=Kaw!EHv`E!jOsg|?S1*eI zG{Sx`$!xU#cys?A(KN}I@r|no(*cQVBhojSikQuIw&FuXm(~y?SVzt1ZMI{m1Y&)Fr7uT_@7yL=Bm?)X z@#5X{5+$jJd1IFiUAI++NI%U50G@H zSXnYdMaez2hK*wm@o-xtL$Y+HCFIo?QFoo&b~+hjpARNQv>N4NNas7KfI^A0k_%YC zAxu@}_L2b{C14fTdj2wpicadt##+y?9uh!!ZQ#XiQq9Nq!vs=)%p@C9t%@r74-^Z_ z5w=mJGhk-5oma;sqE?VQce{_oW_^@Z+Xi6YOoxsw!h?e~#i-;mmF*gf&p7)iJnKu2 zI@mBLu%v(#>S8h1J~CIIu@j^fU;XkA`~qXwjat4`T*Wk%K60yCJoZ%VPI93sC2lze zc8?IAAzL)g9{>G<8f||BzAsqjAIyc4c&2?3klMzYRS4ZS5{?daRv^RpsKjbGq%-cw zV1)v`iwPxzS;u~-o^yi3ACbdkzbFlOf?>2V&!=R>V!JP!UUi|`2kdt$MS8I6>+8K< zHsLvL-r>4Um(9BtmC(H+-NYF007oqONLZ1=3Yiw>wO#mFP=xUo%}^5*jrO{m@@%+y zmfxIO`Yw|Yoskk}Qy7O@K+O$FPCY-~Of&PGx>uQ#dD3~Ln+ubpwO@NyEmI(+7UeG} zmP^nGr{l6y3H`1=r^kpFxi$+N4#==gN$(-uzfN?n&bL&@dMLG@I8AzjL}WvKeZVv* zHhJDcQ?a&9V>cR2`7nFVr9+x$cGM9&*TR3A%gxZzYh0I&A&nn+M;|;E(D#w5j&3?dD5cwt z0tc*{-TFU~IlK#aGLA}#r;LR+uUIQz@x57dqfLN7owH3x-n$Tt+(X|UuXn|`MQU_t zM%z1V6LVIcUF=O9I&4)I+DYY3-BJQZ5XDoQp~&A0DRUoZa!ELiW>+9!O=mU1KFXVI z=9S?LZZQ`_?D`oo3qW}p{5&%Ep@@76nZOXq;SY~~^x>Qe4u5A+lbl2E5r(~edlN0U zU865ZJoPCiiQ5Ui_iw&UD8AB3ddGtUH++z`Oq=}*BD`Or;5`%OZ+y~isF zVWp?fW96_lgL(049kWVBcTFm?e|^8^t6(QhZeWA-SVD!G$d%Pt@+aa;>0=E;XEBdB zFRp+A2=u0)~|K4fBVGK`6Uv-!IJ zl!NK89e3YsEe6YBw7BZ9gcEb?AR{^Gt?u_s-mA2^A7#xJH?H2RrHHOp$S4{9hz5lr z&T4bb4!E`#-fIn33v&{^BQS4*#SbSbesYtKMv%0d--k%!`?H2Q55^z1Qt0#NH$Tfo zlHYSAr5w6txdAP;^&{&oORP=ONe9Cbd=}=OlQOt3g!Cv~SCs=M;@h46*6u2i2a=Od zOXLIL0Gfe;>!rfpd_(pmuhmMvLWafktI}9F!aer9`a)3>F02Oo9 zl!Z=N&Z%3pf()*`e-j`$aHJ;K zwlhViv-hy@F3Hi0@66{Zl6mxm%H)xjm2y=D&&EEx--Y^_zQE%?da!eDdu=4J=P$H%_brhn zLHA~BOC(X=jM(Ax?_WrywmQ4dL4WcB5DlVd3k*!FOkDoJX#LkwK`sXy`}BD|ZwTBh zUW<}9@@MmI@0W6(CCjwbb6$#ezydoM4kLWArf^q50os5esSTcBpXTwU>(>O6Jyb~N z9AT7~$)VK}2+OZr6WkM@{Zd$a{p&h=y40PuK=>3Qs&pzVkE!_u(rhYvojmXxM&z_o zbZ_lFEx4QiX`Q1yE*IW!Bti@)Z8SP~gS%KYP@Wdgs1XwzC8h(v=Yp{cFd7d~Nd5dl z^h1TVQWp#uN9QB7Rk@9+0yf!uhh^jdgP>;uk>1ow;qBqCK6A<-fX}QCJY~MU2KloOB-6^ zmlvqBQ~U7Sy|-S_Eo$iwPV8|$#<|KB3zuXu=gy{?CR>?&i_)rPBrR5B0;zqqmr>8d zudoGVl2Qp9Oip#t$c-1vehl#kHfpv@mSmoc(${Ge8`{S%BA*iSHvV!l#?c}^h=dWN zG3!Jwztfg_&qp8RSJvZBcFvIPioUxVK|W-m^RsT+ zj4Xi7en6G8C~;sKa?ecV%X9KMF=PhZK`R_Pcc^C^%DGNxlgsh(%$*_P&PEznp5~_sEriX01|7gE^<)6`W>p&cW6%f!YVl8t~0Iu@CzA z&w{En=A} zxQs%OILp)tytQ9pm^?7=;Hy_EwErKrzkW1ND3tT|rnkF4>y+eadU3LdaEL6CS)UHv z25Sv}%_osJoMd6}M@{hox<^#$-5I2cjwoArRv4sPs1X+$E&dF_rfEI_DeH{;-?xi% zM?4rhU_E1Osqb{5NgP1v16}&sQlds$$M1A*x z7;+ap_RBgLRdV%wSZ%T5tazElKCDx(fKLQ}ex-@+g>?#Xy(=IZ==}kX#k5)dD)gxY z3<49$W?1#SdUx~G#Kc4aVuaZy8=3Vr!TU^vPq(c}&Bk!TzRpxh$sPsxB((B{C-zri z&<9cx%uwUZXn~jB8e;0cd1@5!UfH`?G3^gU#2M=z~$@SMvI#w03E_bq5B*LG4$9-)EwG;=>?K=3WlWV6(urtoFG4F){W>_!W!c(Qwd; zW1R1F?zBzhs{qS?y8y*22$C9%N*B-0F)mu&C&B?nlS~Z^s`Mxu zR@ufXgb9CxQF_cIQf3^}J$2)mfJy10$Dez>+YXvFRqFs92E&g$QrLddP_A=C zDBznf*M0XgLlQL{f&7Z7yQ`W8807612bF{Q>Xt!cg28(tSDIEM+QINu3>uD3~;!H*RkOhi@`t8 z?YY-CM+??W@D7YOr74=xB2Ck^C|<*sggI`V1H8i%ltZzREXDmiu+)VU^4d~5mZ)3! z**I<5r>aG|P`(m@0OUu{)lG8LwoKEu##SDe&g==i^TAf^49Be?4H9Yj#cb#CL08_8 z2QLuyZ-%gs_SI(F>ng3`yf?}Is^wTMUn$Y-0ag@F1~V%}T4jt=$mSZ+_R=z=_-~V@ zI4kb%V&j4M|Dr|#NnJN)8m{ITkq)+mOoyFuW31t4)rEzR z2HMrL$c@+Z7#TSn@oKxTx&ud!6z-QDm$lNgft!jrX=rF3!iNU>hKh88uFE9wl#yTh z2})&t8G*JUp<-&Tm>pv5dpj(#4M8#>%#MRLE=gNkMpkMp9BZ-`JYNffr)Dj83r!gi zLw(<5qWyjR#PQSr-A_?7ns$8+Od5&gg-k+%^mv;QD40%0p%GZrqB{+x)HZ;3Ck~_@H3ouJjoyz(Ma9U?tGm3Qy$+D=W`{>K7g61De+sim*&wJe=Qgs zR|*`h7TY^SNPH|76aEsZz>?h7c4y3oa5~H7&jM+s*vXiq95)H1WUJFH&&ZhgD*^HB zH$Oo|^4bC9y_9AT2c#>U5D3P$+vqKM-MaTz>&1&pK%IQ9-q$zIPMP?4Own(sa5E>C ze(>h7%@Gh5=Km{59*Na(g1e!U=38to3(&RW;nh`C%igAPB#}XVNAn}ZV_4VlmGEff zcSA9kG0ZuZqf~FGJ0wM}HtE{XKI_&jbw^3Ga>1ryrM_&?^B9!ha0H>Iu|LnLLLK%n z^ze5(T3I?6=#qV0GIR~kL{nT?s4BiJ#^-tej{&@g8-RzQ9oTKnF$0vn&t!15o_65g zWd3!hWVzfL#ZA`jHg#T^YqEL&U!^OfBcCSRur)*RoTofFJP(8hlWQ}8A8wZDakvKB z1=`lsqIk1=U%kIuX)@1t>$kJzmZovlgG5UQxw$!XJJzMl7*tM?RV-X^;Q4Ai)ESaA zw?}f*+ZmcfdO^{fC$s+=7cfkGe*_Bipx|~{sv1mlT1VfO7Hjg3UuW}LgUD-{?RS?k z<}bgg_tMr9KPUyH$WhEER@Idb=cZa_|6`kDH)uxDsTo!Qyf@s7NLR78KIb@1!@35I zBJ@~$u23jSla~QXmkb3b%Rbo!&4*IHtMK3NH{h0&S%LSdgmdqSu|4i~<{gr-iuf>XP1+?Kd*sOM`o(q)p! zYGi!!7-eq6LZOpEIk{txp!wMdc`$)$*9#pt3e|3E>0gZ}Eb>xf{Gj80j<{&b!J7=5 zwpL@vll`Ag`j4gRQ$785lHSnh)GAIqKa8n3;*E>QpbRrGzVWW+zD3A? zz572-j`B3~x&@7EBvhQM{ZS|Baa;KYugF(cQsJ0$GpKF5;LR~rIxA`_W;E5vg zIE3YVTT+f|Vd4FoY%;N**cR=?)qRcw)B7#W<}}&@KGlAz^pjkDFO*2w@h1|$WSmu%} zsE9g%8D|IWb{@F`F+y0$9|uS~!jEXx)46??tT_AK5H$wHRPqwoz59Mn1^ZkjfkEU` zDo$d7XMCv8wY^f|%Y)GKma{!VQ}7Vs)0*|^`;*xt;fQ{n+vHQ_haLXt?2T6Gb%{|HqF$!`*L z4Wd6}zoAa^o_|O9!QR4RZqe;}3Gl^XAnkg$N^rHAi;AGT>I zp*gpU^rHK7!h-++j+s*78W;z zsbTz%s)4?NKULh-%i5lSD}mBg5VNM2}$^1hSeF1|o%F@Y5nCZiD-2$&HePjJxeD2V-csZprrU z&V86>RJikS(Z0EGGHgFDFMs&j=R(b~o5^Q|jNhp*&0%~5x1YeK!4;)#+K=x`i}rLQ zUG=>sjrc+oX^EyT1&h)@0Wh-_kC=>%-Id$H(vlIJxwN!|QXPC`D~I*hMgP&6>(zb5 zPWpy7&dGwEZ4-nQ0JS24+%<>fWoXO*12Mb z15H`-zk{T%FC8Ah%xpN`=Z@EL>9Vcw zmmD%r-%yQ3?&t1~%Xa4CWJG~7YU_4&9rMNNDADsFiv#A?h@ z4&|)37X%kMSbUlKQ_p7xE84_dTixNe-sU}yITi6wt4i-nLVdkegcxEbc`S?q$_2hx zTPu8V@~_!*gZ5W#UD-~otYoMW?Ft=NTyjNKa`95;$8KsL3II(-+m<-Ijhv=yg!;aH zogN9B<^7EA_Ss-D`ZC;U5=gsGTXy(m<3K}mKoS~R4Eb_dD(ybc0u59JnhHsHTNks| zoa6|16{XmG+V>sp8pkf03i^60bEom;dAKj%dsS`iSN7NkEgpg;LOGIGCjNjWac~pr z_ggN^{tV}MFx#7vNgB!{bX~J0j5}AQ)4^QP8qro*shv|KH2>kq5df_%-F{7^Z@zrr zGm|=Vzm~E2<0^|tc;;#Xwmp9W8*`c+#TSEbkdoTGM}>waGR_Xk8mrl2=0lk4;)FTd z(wUw@_9~4s#}4`Eua%Yq1IIz|ae48I+_rs`TeSdeQccxyQS5AQG`Y!n$xQU@+K0%t zU+i)-cIX)ODhY;)-Dq|UzJ0(M4$E)}0aqC?A%@`j288W|-G!+pVZx%A7SG_zI5hTZr#WAN~jnebJ4X7SU@e9*cE znSpF6S~Vl$rI+VJQSmTk+u93Ashwwc>umkt>YqNJhnsxO!T?vnEP% zFxaOv4Z7<%L?Prn^-p31zhkQrBFZCTdRz~sm`Pr?pIJf?du;&RJBe?(TV83KKD?b` zqg^nKNIzFn6|&hxo*>n?GdS}7Cidysy!VQghY`~YY=)L5(R6K z^PcfoPW^zgb6@21m~}hu}>9^k7NAZMeSog#XYR6U-M$E>*t?&gaqeJCdOrVt4GD>P=CFdn>ybv zMmp+av`GFmPf(lIXmFbMKcb>MwGetooJ7iE@;(1dmon_*z~08L%tB@)}%v+aK50Y4x?1_ll7Y}+fL(Pr#?dOe1?e6F8wC-NkZdzo}HV{Y-e27Ft+Pu<>{n;?{uV=@BSrye7tC9lfh z2PGwO3ptaSmDS%9UKKyLTDo!*j>0yBr5ZxBI(CBDJh};Oc_?((&d6^zq84mLPnTVU za3A(eGMHb4_hvkPhE%S2_}z}Uw4I1@1s`45?f_Ku;y(Y~#qGbz9Pt32O^=fh`*{X% zVpAR1YXnwm5Zvj+X~VCP=a*57Y)#R^`-Fi|3*;XaRtj^Wy&anyodIiA-ZLzBu^mA_ zWnb7)-i%2PWjvf63>6!=6$YS=+>)-GV^7S@8S`2bb9h+=n5|LT>y(qUO}AFx>=8Xk zp>6R!?g(AkT(0E`t??{>Ve71H=?Ob7=XsoAb@i9~?XtCUTh zt}O+XOWDV!cX$tKON))r@yr9AQ*d9<%KZ@MIiW$9EJ9DddgSqI*?S@4>FN}(sA6CI zL-xmKJw5L0qt>?XX(^I_|7)H~IhtsCN{Wz01OD09R}kM4HB{V$s=amq^fH3FMpg#- zvrltbcl5#|aJ=Hxu2u5iEK8jd0k~Wo)sRqtHu(|(n}5ocsV{g8AisUaa&aKcieYRT9p-NOr4ezNBvN$|5!6_a8#z*XVsvB z_D!-O-33vZcgdDQBY-3k$Bzlz%xPGe_fKRYMAmswvloAjv}(E7mkRI9!W3{V4!zWD zz{S2R(zBN7t`LYkmek(|0XdCO)ys1i84n*UExP4>-L-*tarfEfEt%@@~W#|*31i(Iyv zto@=YxWp#$;0m5mF)d*DYauqmPq*+>HM4@!?^1zgeqryBKIv8Y9S}El)E=UF(zKGw*!O zDb-fFI}1H?FNKd55h6}=sSpwJO;D9jZDnCVcL#VW0=ml7dy)h<@}yf`J#!6F(PCj@AogCLTDFjc!dlV!jsgjnrX>j#+8x>Q3Zd5y3nxYAdXU&(|x><533 zVU?&N$>(^!1SM>o#Z>JBI&g)hAR;J=bB}i!t35LmM=y;+d`1}UIW9w=%r(N_=?Ob9 zWP6hs^S7MxlLGz{`N6pkmhii9eJ9sZGWX#*o45izIt0EAOk7an^5HP0iK}FDq zQ<65S$`JYiKEh2LP@*waXp6Vn4z_)+-tli`1^%FV1@~2xUw@FFQgXy6AEe5MIokrK zg?%q8w{XXJm6I7~;@bvRb(m+|i&1Utx~Gl2{%AEmFb=o1j+KPNm-a*Uopg_~+kLGz zon*wg;=kVEhO4|Ig2SJZ1D9wnjk2Ah!d}XxmZGMVRAb;Xi#D3>Q-Ong%DZcj0{#vxj23qBQHVN!Ygz1w^RF;Wa$XKc_}2Pum#qN)*xvRjrAo%fT+G(m zq&BSjV}yG{THfApKaCmpq!?4*eC@x*2xVkVnlVeE2u+%w!gMTVEz?aL7hLv%2g0R| zS8_$bCFptez$I;>Ue<_5v3l)(;s3R4xPdD_qNxwyf#bF<_-`;!eU#swk;Jgojj%B@ zbLZSMR>KG7rh^Nc571IL3H%|RUBb&dpIL=ntRt!VRGxmHnc}6BeE|WgizqM+M zO{8CO!!ufWugVN_$XNL*!~YGXz&O)MBg-i)Q7 znfpxWU1Ilo?Ikb`ErL#p<+_7{gNL~?ZiiB=L8x?>p>q0~2`_TdxY3_pUq-TsBvibG z+85qJtF&P7N2ADnLVHUEIaD$;CbqYGAnOAD?VEC8Z(WH`+d9DeD(eof;6=>0 zgZr+okAHHFx0>~uEnDueZ!3h}2f($Vj`|d`0;&?8794ckF$q`1pK%}Yeu!tjQgr-I zOcIak2iRX(*ZiFrbw63Husp50(141HibZ}SJVlv|3i-Zi z%>DZqYe!R`rv}quyX(oE36V*r-6%=LESF(d zbxE;`!qmu?C!NqCg}Oz6=wnD!iqC!fO3Fr^9oLNK397MsD}iSn;jiS{nym?~iN9X^ z6qg+E#ykKGXqFEE@k}rnyjcnv&J=Spj|)j~@6vT)pEN0*1j}O`G>dJgNhd!Q@WR5v zs2fqS$vN}Ws=1m)?R(F*{ZJ`ifFYpOSX)=FS`^7lb>w}og}0aE9lX)-`n0krqD6H+ zw`&|Y#NqFVxD1yK*A4^B^lW53b0@-;CD#PZ%h!J#K8`iNLYVv@;)AWiRQQ0qTOt-M zn`rLrJW^d(7l=8(AT%^S?%)~``{&P}G^@uiPEI{h;|IuS@?@#(v1$m;OH6Xl?G@WA z*FN>+0fsj>b7B{4K&gf{a;{wg+%;VxoZ_?ZZL(qrsX2;knBym_2wew7sYV`BpQFx2 zPMC{X7)d8v{sH$re7!f%e|=<7=&qp_^ma?GdEz&dg`t!Hg0i}hvB(>d3|7@(Pp8!Y z@zy|dAZq>KT2@M@gdVheZX-m)Rv1A|8Sf6E!SC^_l-`~ti7g#z(;+miQN`MKEM(|%uZb# zs8-&n>Dvtv?F``o>GOPzoz*E>>Iipp+GY|Btt)hkz7_Qm=sy)0X!9Rqj}88V@CnCn zvINa!JuKcF?1EPC!VB38q$}t$ONirj^`02_5&P#hZ?8=nViSl>o)6h4O9W?6u zE8U4k{3MCYlmYxVS@gM7R8U`jC|jVN$aeG*KUS&&lF)#9n78B=y-=N9eS&&^tQ`weYafY^3qTS%%_Sw@1vTTltH$(#VxkC5bL}+IL%2?I@-|2_aq@F-5(g{D5xm%T6nk7 z!LvP6wB|3KPxI}DPW{3?(UeijrqFrLG{q&JXU z=9<>BkWdw)le1HjOWV=#_8eR5jd&Dcvuk*sgLR*r&ulsYjnP77Cip=A?h;JSJ7e8YPdizR2+!+DVwdGNDX&(#VhYXf|HRC?DPUMza~1Dm`8L~veXQb&GR-(^OeBkXTE~T2Zr1zZt zM=OEmkleOy9x%ts3Mtn!ba@UB&qbPkLZ52v{Fl7Yj3UN$53AdjK}RCjP`^fG3O-c97%bEvh%+=RbjX%S7n;+Fl>0iC=b#&vHD> zezJzlV^!9t@am(sZ1U>6R|MK9K(+*Frm&lwOuX1>0wvD|Mb)Ly5Ll0nI6b=2IaYD-W|1P63w#zE>H*CvkW z!wfAHg+|J17futH3<07SGhmk;r^uy17?Y;mrZni6?T;GI&Nf4{2Vm=YUsNTn0KSkS z6&23LV#77Q_&h1O^KL)}8~u3<%FZQm)>lO}#p&QKE`XVdRV5dAC#?XVC&J^vo3dr-V|qTbZ}_B39oJ|{am zhk5+%FwI`*>u-r!Y8>~4(vxSS%cq=#rg zx23!>*A5joKnBoVHC-(GvAFGVSJdDZXk^UkFlZ?)05W{-6-~F34;#LhkK={iCAib3 zvF_j|oGn4VThhJ>_%Ke#uy#TF=QY!OyOr@x$T9S)eeA4qmgb|tvMuex_(oql`m?TI zdi)n<*j&gZO5<>ki47fdQ)NeQKPSUE{eogY^p1}v|5K!}q+5GVao64XSp;(qVsw0Y z#e4pjvWb>=v2SFo6}Q3LZX$!43S=<}xg#E`IdQBXpJDe6RkWkOp8XbDh>EH`fGk0@ zO_$cJFM^a;WmZ~9WcJQdncg(GImxvERlf7ZOsqQY&r8gp%P+Bkah*A{>-RZUkV~VC z_KNA*3i6o^M$2D!lNUV_ne(xNG9W7ccqS*Rz}O_5xv|2k5%bn(sD!#5a=^{=v$C%w zARHrxx=N_&@7r?_V@6LK-K&x4y4!{n?ELv$5KN_aV#=^b02POSp+)KD`X@OZf-*1Y zhP^|`kM-MPA;I8Ajr8$_er>cdITShhp#J`c^lN=396H)hs9ss`Y16g~9JpB@O>pgR zHdBrI61O38P3bDmtv~JjISgBMa`tL(butb7I62y#NBqcV70Fs>K)r-Wf@3=vInho( zgetG|#_XjZLOPrDv}?$^1Y5Xb(OUv^<=KC+uTBOKM1`P?P3rFIzBUY`-+Z1|@FB!2 z?r`iKy1x3dY}X$YjDG!)N~>;yM!G*U#<3MDeL7&?clKXBTx^7Cgbrhybl=+WjJ(4s z4_KNJ+~u3H(nsn}%*ZKyHCkiuc&fei1tep>XiP3uw)^V6IChHth*jAprx?oUw`Xg8 z(Lok6k-59nw><)3F?9WIq5?lP`}&8qP||>_*oV858)&`hm;LwOj!mgR;hhZ`IQCAN z_L08umW>h@GCZPQzP{HmO072gvp;&8y$Pv4Jkk;GP!Lw5uH>XYZ*L6}nWp(1FMu)( zb_ER=1`~-Kvi0+(Q2Q#W)aK#MmM8dYZ$>zNZtinwX5(5`Xy^`5clT0x*xSM{6fMgM zUPK~QOqlaWf426O3eRb^_ZjMICd19=WR2SuennAl3&}ZVLv;eBgtd^giMM|9Kzt)d zr00u)aR~fSe$~ACjpWV=!0+P^PXG1n*$4!Ymwfd%7HW}Mtyl`f{te6%PF z^)KbYc|IBP@hdis^%vI`i{%G;$c_+)p-ZgLAIgri>vD@syY9=C2X%-0gtVWa4Gxye zy5{+ZdKS%S_O_h5JZ=!|ZDV#TyS5~*jL+FYX}*8~yuVEzeB)N8$sasHH)B7)7PU1y zUztp+#2}o!i#a--GP@Lzk2=B`hSTh)C*kd5$~?ezDdA3tFprf1`4;UeH@C+#4bC~_ zgPd!)V@BV@#`;BkeV*JoscH+AQ+Pkhb$Gd8cvi z>LLwB86J|Kn-oC2*PoVCoI39%pohT^2j0leK_!p*2GM?A!O zImm`gaO`|X7j#I26G+EX$_5=c;Gtf~BXz^TlVuH<33Gh^ta)&94ai)8dNUCJcsagMT)4xFH08iQqt6U!rS3|>bkrWx&y96&}5n0=?4SRoG`C0R!j z7uGp+!vsY>9C8GTxR8APWRw%NmEXq!bgUcjqb`yk^CXd#w9HT5xQ??0(vl`hZb=h& z4*A)({5tZn%GEnNp+Xrbx39CaNn3Z)E}%2o9Rl-_58J6r6n~@#Kb#lRS(f2XI7-Z+ zSJWdXkc-$9YER(DKsk9W`>QBh0iBcw*T4{XaYD|;Rq*`*vaX%pdHN90BE;$967AnA zor;t{X*nzpN7e)D4RO9bBCehSWv1N#f42KVHcmZYCw_1k4A!6X4t)!?C+dQG!p2TJ z$=lzt>YiK3{>Y=8(Mww9aG(@o`Q#oLD$R7~D&zD2iV>GRNgD z);Wpe!+58Ib#~(dbqu$>TSBZzRx=QDmWh>f{1Et+deDRpI|;>+^dc(U!jCjxr#R=$ zmxa!sp1e2UB#mFgS?134=}!o>+ezq;qh>K`jO?jEfqqa9~0?$PY*8 z#X%zJ5HD$|3yvt0vV}Ckh5{=?{GgQhLC%40JoS_f!~+AaD93|?GwHk_H_D^TQH~Ky zfSd?XSd)h7&@I$Y43<%rzcPb7zQaa-ejQRiH^9u`1==M%BRp{|EE92%mw4eiUjQ9` zvW{Q_5*LYSoPK^8kl&X<;!R9BU`Bq_t*c-0egoPT(nEW(=ZpuYemS6J9pLE7=-G}+N;~B_Nhq#@82eVs`Q+1MttaoMFi=0{o_E~rz=3VEaCA#d6)ul*zCW<2XD zsy+JhNEz6FK)AR|JvtMC|{J!sn3Exqh zon8pD(h1_r&BBDti9d*!4i8tJLFx~{{d_*(N?fN^rkoHUPdKU67e^n{D1G8~C`8dB z4m|06^E!@}@x_+Gd~wGldI6ufa)}rC%k#MM{CHz7VxKSg3SKQ5>@otcvS+SBbK?dINS*9$* z72wLqZlPvek|TNI2wdm@5qrslEYp4u+%-Oe=H*FAi2o8zl$RFxZMJhl6m| zA!MSS$ctzH`&N#a&ePILJ+nzKW_NmdzZpp7WJK+w`}z6Ym^b5)%=$ z5jvT4Oi{jHN0ce*n61BHg&w$dTf+4{OJ;1Y!|>%yvR-oddy`413+f)0C*&p{lq2gX zq|I|89dzV{?!b*$_;o;5uo-7JRu1YLDW}Z<*@-1>6Z#^^fj;$c3Em&epgg3NIwMU^ z&VdEWK^fFj(&u3AmFMziY#1^F4?4X*DKVEiNgB2#VjyK8U7Ree_dtf#tE033Wgxcs+8FXi>4A6#@B{X+De@|`-JE5E2W6vtIQnfGbg&KZM_?UmQ0cXB?iZs=Eq7r9XP!Fj*f=E66<4-^tzgZ@FdO$9Cl80 zB-No7bj06(UcOja$R~*dM_N87&1+evKe;2*lGb%T9+F?u^Q7e_aeND<$K|V4e3HJlRauNW%{D1)k2NixAuIU_tXFj^9y!qmw2LcCZ zke5!P%*TN!b)#NUIQw`?K84Pqo5T?GvMgeacsRMBjl#nXt}G1(gb0+0yiVCVY>q4k zt2odB0et8uA}S?@x(OBCfQej4goyyLmg9Z$WHt8?MKRnJn}l=82yhjP*0Nguq9pJ zvu0I>IK9HMGR_KRX8#zMj|DKDHi>xDjUTAHpT=^CFYQz6B=M5vp*`>#<*^JnlGFon zER$t{R_4QXzQA@z8YvITVV@dRchpDb3HjLiLjZQ` zKT7^lG9w)}L%U&nl=*nh@=-SFgZ#my40cAzAp08RW!r&{_{GTsS_Yo?cV$?jmL;NKT&Y=tI9{imAnfmr=XXX~H^Im?Ko|-qk5p3XuA^3xS z|JtXCa^4)H0)O>90FQIr4;`?(77OT=b}}Xs__ct2d&f=h%7odtIgnzh?@@N^;F9S` zxzHqzv)fv1%k`hVPg#^z_Lw&RTJpmL2-<=C%7B!o7LGH@kE?&k(+clNkadv8c}qI) zyvYKz(s z`Dp2&aIgVP5jg0RgC)dcz{50LrxiHhA%5VCxVW+)mtW`1>&HjQDEUGs>8YRZSb&E1jB^Yiu|r^a9*;V#IJ^tl0CjP8od+}GLYz=uTtJyo1Mnny<9Pb< zY)?_x#o+~7%E0^_IHMj5VdcwE=$hEZ(GgRm`+9JqgOd#8LAjZSPJX_!WI5!M??{`Z zZ2>blE_mYuDWc}Qz(Zv3|Xc8$cy?w8NLpblkwCAw4B(IyhtyP(3z88GM?w5y~Xqu5;M@! zj!{14BCoh3b&tZ2HtDwu@CPo78T*Xl&H96kyyv=pFTw$)p5P} zeA+_ofX--I&(-_o7rMp-2zIp;lCuyWrb}C9T)v~Em-QN#C*L~`lH`x$TkCkp9`~M) zG-PYZPsQ=Z4gfhwmtf*X$9N1R>7O`>Jit_u}}!=J=g*cIq|Ym8A4|~v*lf; z;T-w-3uHk13d;9si@j!F!*ZhXueZ;L7Yc!n!95qWaG?D_J^gt9nv*_~{Fijp%bPdRaEIX#pCI>xQKgN6<`>5Dl&aNl8rpA2s0Tt*Z) zpX@j#@yJQzyh#mN!! zsKv1w?bX*S4y$5hNxKj5WlZC`C7pLV(FD-HS<<=ZjgNFmkIW|>HI^YAf~5Q;9S1r% z)@#730S74@v?P%ggOWmu6j&XVN3vk*r(xDd15{*`joa!!U}r z(c)Ma>>sFWsd|vlsoO@}3(rumT!qEGgmxOm-R z{ymoQX&$M&Y!l$m`*&J>2=PZbz5}0BPhw1ZthysWM)GHWBkjq(_|%q*0IbQ!MTkG< zArFyM)Ttcc%5$GdxA$_|VC6PTbztN}=GgHc2p#u@Q$B2_hn{aHJ9FQ{E*(WUxNDw% zo694g`8~4qD6`O;n=e~Z{t`Y3gk`jt|yKVc$ym%^_R^R%4v&H*oOnllT*Q(C<7p#r6&!@)tR70W>x^2;(x z3vH%ks8P-(om?me(zCU@Vtsvb`|d7Fzv{2Z^S~h2$Ih3Ze1PGx$wl*vTld%~{H#OZ*(GJ6d?=eX>C-v} z;*U5o3hjUUmh20p5Kmc&U!1Pwfg|fIN+wB1`AE++=JVxaI*w2NwH-Ni%X$s^vQbA0 z{=haLKhj9v{yFOz@w5k=lP@tRuYtAE^T7A+ddO_r8uV?*Prg2;j<@Jvag6|+q5{j0 zN@JYFnsNTI5FhGCxsjiCK|3MOBS(&!$*H66G9TY4o@gWBE8nfK(7w@p>zmh;eRxEE z+7IMnUj-h#f62)&)*tH*j38eWwutuw%aZZrCG{LUE_hiXom$96d*)H{@XuLaemvW@ zACGIcU-r|oY}OOY<}t_vJp&IE{mnxAQHx`Cg3K>#csSVUhQ5_$k!J7yVEef1SIz1d zHlo}Br+M?(M|Hc;h%zsapurEaooMe7_LZmxZ^B0aj=TrCh?qafvG3N~U4I!>XEE8r zXg-%7Mr2Oli2AFGNLu6J1?POrkp1@i#&g`Wo>P;vW@v-Y&A1i$fw51UBN^usj;DNP zn>i}Ia_u`6?QOvms&3$3;8SCik;ibbKX8^=+WJbV`Q|55~!3Qm0(XnUmj6rkI5GP1!Ihh0s2I zyTd!lq!M&+Uy;aD9{Ef83Z2sqh(lE0I5 zo)_W~(6*Ui5Q7&21r98(?|b;p(6K7SruX2bj_J2|h^! zY<$zY?$8NlJ~x=ZHNXz?qi(itY%{%>`N#ywiiwWkLqb8M)K#7{k37nBc`j*ijy4LN z_Z@=sZl5c)gK`XzWj){b^W?UhudKH2PP1YMBnYJA(M5WHv{^b=bdYx4y~j;7xQRgF z)3Prkf1fvWMIaxf`!tTAgg?m(wl_H3<5pi6<_?)V@13*#?7WHV^O6*oL{8!*`ANBw z@Q`_N4xZ$}{J@b|N_yC=bDR!k`vsW}UVdGIC*sdKJ$JGVyby5zx%uYRle|cyTsQ|0 zwjnz1h-Z-f594^GUc8PwCZ#j;!CTfT2PeoSU&!kR^5@Y{$8}sjwqNGqgckZ?Oj`Wn z`qdc9JbNFU&5Q-2>yCU;?>mmSo~aG!E3l#sJ#jw(KLqxxeDA{pd&aHp2NRG1g>iN= zvNgCrGEUYJ#v`zS;Q>3jIvY^%p29ww`7mKVuomN(K9`4b07r=>?+N~-1Nii~rgj83&AI<2Z#Et=UOhcW#|F1n0o_EJRFUxMcT-=j;!5 zJuX~vk8=|FoOtiy11{gnz&XZPnJ{R_Y8%tDoU^NGlt0d?;_98uYpmmKtaDQBux@bC zw71!J0eCO_Ddgv61f^nV*pRnm%F{tOb2|yBjn8tnWM|smGN}CI?c`# za%XqPmV)P~B`=Rf&Sl%$Kn4tqhU^O0QLJDd3ugPxLf~xVWWa?jcgW%}Ix(GDLAGyG zwX%=yKjJ1rOt6^YvIT>QY%k36a(HI&BX&7y_991TB--*_5O@U<(f<-lS(jXmLN1o= z%F0|rcInh{KuX(jxq>~j4_Om%ceux70w5v>gKjzUVJL3BvJQUc6 zO_$|Dc3k^%)21mmn4s+L4S3W1a^Q5hRc^=L{SgO4TSKf@PmV^U!-m%P_u7@&W6l;( z23E9DJ_pp;bs_`m=7?P|#^BP}PmJODPSh@O8riYgEOmC8qnTX=Ik=D!WDqvlDeYFC zXXuAE54*D!Yx^TxhI3@a;FjA=K}6f}bq#GW!_yry_84MjWm=@I3c64}S;t7vV7W8n zXdjudZOY!k(at(E?V5F)?XQA~NMwhML&t3nu!1atA9@A6;Tk%xJ8o0Z&i7m3^8{&I zAX{Y>LO#?bc(1c{;QDD>N3gG5cVj0^u=9hw{QJ-1&_LMNqD%+{BJO!j5JCvkGruooOH3>b}mlU+!vg%*??J48rg8TS z>mTjRba%OX3~7k*m|dNPO(Nvne@JMLxZlV+MiOLhvpV3u%1eE^>yqd0z=z6JF}PRUr;9d4Wz@yAiG@2 zhoiT$^#Zf`zTY4V14nuBn0@}-uAag9z>jZ(CsA)tX1u`&@3=;!`_IHnQm)*}IC(y4 zZDhTj^JyD=w2OvwgB3kY7eqPmGH=v52K@O}>)_n@&W!Ku;|=oe1%7r{f-ghdj)N#! z5a)0ThMSIrE0%HeGQKDC?twTAqgKM1AN7v>8wQuG3`_Q{_w#Nthv{)VnT}%;8cCnz zd~9sm^z<&9_Q7C61N9C3OAV;cs1P@?6Ge+SPC~?`BOY=lq07euX-Rqbb}A=9;>rMT zv>!OscpHD%uB}ZcooE1W=<2_aQkIE!F>4)nggAYY$SZkDUU58qdU@?0Yw119bdO#w zqr=@BIgw>vlpx=V5OEjscksBX$sv zz=5p05Puw~GX3Dpyg4|N;g#XRfwagLo-!YgzRYmeIG7-YVdhmQx%jP0j*H0P*fX`8`eWt~3s zE38{=uSTGpm<)70A&0Q-D1I)ZpmEm){-_Jch+$}x?H#5wBOe@Pgyiktk>~umdMBPd z=1>F|lp`W3#Nw{VK6`Zz^5QXv!eZt6k&MHQdjnT9K?hygzWlZ<`Gx0Jr%p`X+D|po z_Rzm@;)pBOvhNJ@qCio{Y(vg2tOEzA zJ7a$=9eCuG+m7KVPxoGE`ZM4DMmhrqz|N8OXH+r-YsAxYan2*1J~)qa^Z8eOHCsDcg+YYJOa~QSn^8_jdqD+F~5T^+Q%K@e+N9 zv)vZY;MWJ^Wu5Yzv}v!>+I0o9wASthIcj&P;2CTygp-rP*dv@%^o~h%$7364TiBZJ z2kIc>XMXk~=M=+E{Rt#V7k3Vxjz@-ovPy?qzBrP6kUnRB&~?fF7Hz#?S4EVLnB;Qp z&6XttC&+xfBn{3XBPJJ6mzZeq>rl$dywpp6CNQM4yK5!~E;GZzdG{ygz{knu^(~x0 z@^^|5BdI@WDN`km*8Ey4@Mg;!tGfZ(n=PFT+&DSt!3`a60HzZFq_|3fl>-h4B!av4 z9WnRayvwbs^w@h6R#o`+Uk>oFvUK9{SZ*vYVAVnfDbwV1u#FArq(gdtU+$d(gP?o& zjm7H4fs^DR%S1d@Oh(7W)UhDICwU%w-l-yH^{HP5D5NgTLAvRgV6ZrrSxM{b>o!=a zhzraISy`8|9>K3Kvv(U<9L-F?kRNS>*RUPf1!M%z`!cIJupK?%<7OoP{n|6us`cyV^GKe zuEd4<`ZC~}bT}stz!p;$9d=MjTj%N&WdL?qNkee78SHRP#~J4c#8~Qyc!>?JWyqo3 znoaTZ08==&kQG5XKaiF3V|5ukksovu-&6r<|b0%g+1|Wjj8Ne=W=Q({N2J(N@B1Iu$koJ1{cyA};;n0_{KK;iv<9 zFC>GXCzCbolUZNRCN0Lm(YIOTVSQ02;>By`>*?q)=x+`@d?+(Hi`?_}eJO#Hg)7*$ zuPgbmUO`72;`8jz_UwBoFv*Mauukp$2j!z)$p`t_UxDXt0B!pf8DPJ>hoNm_hZf6} zbl?dd4;>tr(c$^b;!=oD6m}qFe;H=7C$vq*5kp?%$bJ~-alHMy0WAaMX8UEEz|I+z z9Xzg)aop|11MnlSZ`)lWQT;G>Xbi2lonl0AINSakV}+bYS1fH8S9dv2z|;l0NI06+jqL_t*VLOLf5Q094$ zJ|(mlcTdRJ2e9Gf4kwEEd@`Ilf5q2-rXA(Tz!hdXf-)`NIUGDV?Q}fHk1a<&D;>pS zxz9RqGk1hZ7}&!>kOSv1G2#S+e+=&74B(S$ot{%6vtR%pJX{&XvK%^1q~&!usgWIe zk)+^}j|3DX6a+ouoda;5{grb;8ha`yMh+b~W;SjP?#;0oA`(#cy1^CuTQbLHqGxn~ zH-qm~BOVSA96at&o@B^@fepu=E11HgzPWda-_V87alNQW_GNS6h zm&K>&qy{I~kUlcfV^($rl;N&}Oh$l_^4^No=_6EIwG}@6K z9n0(y%#<@=9eMCP^SBMGoO;T`K@a5h9e>au{Ya19zPRTAv$z2iCP~(=8#0}Pg9dVK z*$}J0r|vat%sRX5 zhBDh0SIib$=0lnK#sF$$gi;xUEzqbV=y}}c5jXI+n~mIdbv{h6bIbbhv5ag^W5y2z zjuE!8x_Ztyf8d1xxgUHu7<989H*ec$_TLlSNnmf7NWg$CoUpR1U|q9t4(7~uJ9wg% zU_f}H#h7(1<={Ee_ICw2T%B3n86XRAKwH^=bjmpt7=+R(0Uc$)gxJYj*SoUdJlv3R z-h*$>cjeIe_RaDrC-lHG)sTmFG2RzAp0A3@An=g?Q$ z8uCIGv=P#hAMG3Te)|EfY^(EgJq|~o4!9a?OR!0V0N3uEJ_QpZu8u^TLYdNbC=<%1 zT(VC&@bF0*9%;XC(L_K2r{H~01(z`NYc4gXsx1A+IdB7fhCi*q- zMt{k=15b%3`63>xo!rXJ$CnU~0KCBifpzOH>>qfb?%HgZ*S@@D?S9S<2iqJk=#Dx> zSeRRKdD$NWYlpW>MZM0uI--ldcHN+R)ueN>>?9bTf2M5@Zy5>epZJ1JSPvPVz~kQW zQL}01ab{`^JwebLV3II&=xkd>ox<^iZ9z`(q@BZ7FnH@~Z+Dxs*;kF)j!fRe;PCnF z2=xjq!4rMhI@_m3g>^P^z}|oM?>%H5vbKSGkk92>Zu2jk;zX?G`J`c=4=1`EjDX*McYRiwK)+{T>kQ z-9_3z;7A)l9PJ9{z^H4`-qUTqpnmX3Q0^lu*q#oZWxwhkL$LS8VQY(ykDc5B9b}wp zv*QmNFyX`d#J7KVtBdphuzLX${wr43ar<*G`?U8B-gD7+9y<~|wt>eJP`|JZ+~1Dd zvBezOhpqW|JY)65iYyB+3d4X916Tw+(>|NQ4cGnF0mC4AW!pN=GKF)^*CEbd_8_VNXeS?t_xdumkhOY1Mr`$8ba2)3B8{YA(batc` z&|%O{CxuQUQYO}4Y7WnxZB8;@aD+~0>nuX5W&5}Qo;}8=?GP3R&v(bRm2>Sg_g3GX zpD-)qKQeuDzszweB`$dLdf(b$?B-+Sm}UN!7HvOdh2|pxh-N)tmqEU~k`Gq(VEArea0)DVl^lr!_1Lc=-ap&acr?27>F$n;BhD>2yWbC-} z*!3>n=ZWi(ujMzp;ZpY=j_&yY8|b*S<4;WQfsZ(hCvW*{`v~o=PCwK|Xglk#Faw#1 z8GMPsuMbzQCog418g&k35pgcB{bO5&Y-qa(pbxk0yBHg1>uhOZ*7$pUkiTu+InLge z_kKEf--pfbJL;4L?4*piW*y@k0kkth_aHXC|CV#!0Ud6bU3O5ic5uK7Zmx((|M+$uQm9B6ZZH#BT-y0)|O9cl7( z#|$5w&t(QSICld#QU0Dolg{A`({Pdrc$rO9kdp!6=ktupBjeyGGH@`3?g)5caK64R zzzYMJO_^C-I4Jn_*j@_*Ksg8{FCLvd_W9>d`&(;!I*jF@rwqPs&?(eK5@xzdOdueP z+CjtU;Zr_NChv>W@d2j+IsK4w>4%cnT<|94od&=E3a<?$hqmRhr?3f^iLdk z(7x0wj51Spz&gJi41hS`AGMPf43M|m)1PTPQVv`@=VJVrTZzHT@!-UxF5oP!TR-Ho z9Nhho9YkXHLePb+SSJUQ1-tLu<93X|Y3=Iib`uqa>dP^-e~^Kp&+0v8vJNq2K)b?( z83xmn9f7k;oF}c5wP59itPB{0VgQOjY;Z2i!nF)c<58x$rG6Vh8^(mgJ$7dVR^RFT zfSxi$*&g}wn8QEM1?XY7J?$%&f6!0R1`#i9nDY5$5GxkLKBdcUTOa9NlVfY3VSY~T zAWlvegfwBFlEr?)+MM6cK<~nyQ9DVMZ3l>3YbRx3W8qu2>=`B@yE@u~GBahM&zx97 zd(LgsQdXO2{5F6%W6MQA*`#+7_zQIi>X-R}E80JXmviq|i=4 zjCR-2(Y1;{(kLtHf#-;4pzR%a_#qdCc7iS3YnGQ+)hY9k5Bi?@wS8uM{~_1bxdICw zaNKuTC;j@Hb~9Ue^!t>kx~5IiPJ!XB2L;d>Zg$A*^q=N?I&3^_0r?;&e$XA0vY7lt zAa8e(@Q*sny6)@iv$x%#zr;jJFcEK8-L1pB*5Wc}xAgDNZ0mQ$;J#w(3eM~!hL_E< z?GyX#eQMO+b4RCE>qgNq!7*j?P1(Mc^+l{VTCW)Rp$-^O#^_{V>%4#A-cOqTE<4F# ziQ&w*+5QKURD7>Uz<+<(524S5qYj?9cinQ!Zu^Ck?0p#TE_pc9Z-Q?2*i6p-4fO!I zITpe3QQLhN3JZvrnG^eNwhibkrv@CcBcs7CH$=?Xx5KaNxzV;KyV__29LD{gv*!}T z9~^b*xHCzv_@dj`e<+#`^jkJEm+dvyUuMQTUTGV%jT`u|+0y+<=Xk&&F!wd4=dCSASL8q2P9iNDI1Jbkki{ieAKxK% z1!w-?s1646IAJvE0h0=Fj`HOlvMb&ulQ#~-IQ&@{@-g(-e}gGe5qMEc9ZSM{@fKnn}DN*z!%23ivoY)gv;sgx9@0aGlP5I=;{n| zxUeZW_V?Z3Yz%R9EO8E*sSB=MyR-JUWF5@Wv34h~i?@H1op#~~N4qOwhq*E=XAAVr zOcHRy0Cco}oD&jdYz@*q2~LOI|~`M?jhazAtvHUmCe=7LE- z*9Ki7*7xyjx5Mg+us>tl88SEV;c&El3MS4byU#b6On@9GnX4Vyk-nFi{_Y#Y2?po` zJT}d|&84I6Ld+rCfSqWxI>?uBVuHm@u4Vczw0qbC_&`^L6Gt+A9VYQ6er1+daK{UD z3HjS=)F<*H4qw**{*;MjLZ&^|W`U0try`+k#fFM-u2ZJis)!r=#*8C_l@Hw5@C+kv znh4!nH-xJZxGCd?$$$YV0tO#4U~mFj3^0(kU?&B}_5_X_cw{?2w`vG^Fwv1DP$t?1@*+?l$s{GhT4Yy5(?Vovk96(FF3h7QB{t-Nq$d5W?pNiH0`Po^w3xIa(^TjpfrHp7#vSapT-8W((?QFLcK@+33AlAY8c4JJpVPKX~rHfeTlusd$-8{9hwH@S4+ z;nR_aJQ6p?5kFqz$a?ni^!sVFiM1Pt?I){(6>sWEyuk~39`T4CbKq#u-?RN94T0rz zw+AME5Wov{5Oq&y9gNPdHJDV13ZQA5KWd+y?+ABDz^)LmOJ(ZdXxMiLFTD#UhNSG| z3z;z)ux-2Dv9xy~v`?nPPFYWs4dW2tBfp#g=ZLG`YrC=&7z@k2W_~HakUTq{dZ9aqlZNfM9mD6DV|E1$bO`w7JVTT`QF@a;?uZri!QSlqBB;+MJK2A5fBbl? zyJOn!`p8Ul@BOr$$hpHz+7;0x!N*X_jk=Ka8pRK~x6SVhlVpD45+=r+_> z2jAqjX-l2BQVW)-FVqdfz^zYov@<8XH?%RtFWJx9&dz1mO4qgB7=AY%Tiux-v&X72{ zSf|+Cf;ySEyDnrqlVw2O&i$WsE7h22!-N^bO83?f6Vx-d+_P?AAJSgH2lXt=^6i#1 zV*|gRQ(wMKp?ul~&Wo+jI9Om(}|^Eu)`ZYdCLJFXyJg-IbXG{ z5Cky+I|U%a9_zIE10xPXF!-QT44DxyXhlBg!oUF$ul*zVKu!dd7d41r9+bU#=$PHY zXLmAm1kNHbL;zND!U43@pLmjwPtQRJj*y+09dG60;0g{b`9NRN5LdZX7;>C)d@!hB zI?4p^1BZi&1K`TR5eI53-9w@NNCP(b(b0q*VK6^#wmDT${|N4p0lVUZauK(E{iJ;v z+sLZAbHovU&>Q8k-Z1c@?U5Xg3GIP)2e~njmo^Q0q+_5zx7_Up+2j#-METuqcrg-i z?QxR@(vJMWG|Rvsm$naw1S?cBKquO7=e&K%;aKpzFWZTvmFI19vthl{wwX>F=QT0o zbv{`^LwT{KnE^Hod!l_s*)nLSt-fqe`Pzxk7nq_=KlspSIQd5!|H!(foU(3#=b5Jk zwugM+hd{>z=b-Te+c(<`(xEGLrme~L%XSU>cWu&6h9VvOH}#I@OkiPx0`)BO)23M_ z?TPh+^n8IU>rphcDcOHV(IF1~0@@7&+cd7Ry2p_HJn|ihKgyx5 zY>#x_IY~f1J{Gv&u-}6G*i8T)a0(G{A3Oe}<8ta<@<3k!+|gFn+T(rqW_Jf6KR;_H zx5y)22HCF0?3=Dow@Wi)W`kXc-2H$F`@|g^gULGBDOT;J9NQ09m{hf~==$RMgAOJ7CseN`Sm{`I6024I81OfdD?=86g@z3rI`+n9J z%94E*;(Iz~&2raJSRVAH0}Vb1yiY_;@w{XL*|v|J}%U!v)jH;WY90}1Iq-h#5yW3FTD1T$XAHQ&r^wO=zYq@qi&VDZ}ah$C}I`QG;}BKQ5d5B zFet`=*dHAE!jP7_^DzNwL%^ta=} zxXo&kI{CS*4-8TnqGVtijvbQ^t=gR;(<^<>c8YTR!GAer)5-M*-No>fm?EE)wGghw zlv66Lv|HG>bWBU-nN~i?hJM@+{sfGlp5z+!09{MnH6Y7CTZwYmB#k_m6BB$i2Rz`M z$sIy6Puw}{FD^YU-tR9__XwE8;)JGeyK%hAjVFGi6Y;xt==?%%)ExpRP<%Ow2`3jY zDUXQ~PRgECPHnL4hEh zcQ{}8l^Za4&gU5T>A1$^KpJJ2D|@B#uN~QGrbc%cWDiF{4)P0T5Cm{Ud<=c81$lT% zny7k+;u&U)I4MW|2zG8VAj&o%D?&ayi{e+QxKj0w(}Q(VuDnt%^5n5x-f8EDtkRK3 z-NxaQuk0jwhyjm2Kg37boljTlb(+pQ7V}#wL~Uoo4H?e690=nE?+;Go4PAo9fjQ!k zN6zw-x7?nIn{m{tmS3JML!Ltp4oLkM!O55ZP%iL|lbtJHl0VP6@#L3{x=F0=UBxVv9$}H0<2l!%ZK0*`^na9KR*S8{%@no#!36!qkN};j}7r)-63C8h_ffq`mz!; z)H5701e`Mv4`M*xvfV*0DHCX9IpoEouZNGRi7>fCfJqfFm)a3`qCopsYwEuB4+(xQ+^xB`OVZ;7!?l`D7l-M}F*+ zK$9;Y($U^w-|XwK(#dv!^r%4mDIaOjR`4A5rkz>aLmK3xY^22sHtiO+0vcR{2b?qC zp`?z?&$`1kF=AS|M^0vtkB?#0eF*YI(a|QTKd>!?pX@&<1N3K`!VWD2*M_VE&%V(w zpV#haH?R!YcVqPT*&S~-4{clC(^(eq-)tT8Ir{*otUK5h>kM{-$FwrL<(V}+y7Y0o zgJpAYFJV5)1VZ4*Yixz5EQmkZt+)q$5^0bJeCfQeWWEl8fifLG4t_Cs#b7!vARR%{ zfDaui@{##vI^sctl}a2XADKs<7m`WR;yejG<&}A(WS8mr&ZBSveIcI62O2ocNwAPI zATRPEFt5Zqu53wzyp#dwNb_w1@o|B9;e=s@GmQYA?qi(sq$vN_mCype+2B7WRkyeO*Z7$4i+@ zIR{_rSW5n=JT2$EOAt8dK^z7?ypfYe-t-w)=!n6E|56`0DS(?R2NM23j{_6G44=+V zm)E4_QKl7po`gq8V+T0E4==;%TbEr?@G%VOtuXRDiEL5yz$_}h5I^RTm@|%BQON^? zHLfZl&L5=Hv4Z~NCE^I zSZ>*n-Nw)SID&`I$4{5n#h#NlWQ&^!psh;Y%qQdIIr!1>hOV4IfvptE3qGX5QT7RF zubez$d4Au?c=SKgfoHt52gb$mU^&jXNZGmIB|YJIV=n5a&R0)@8_@NHD!ZLm5?PV&vNjWd8d!uAHo$^HgyIe zCs8`Of_h{AV(|byd4vL#c_R5iW@%&6hKemF4wqv2fL>ysB!3}2(vppGp*$H%;m`J6 ztWC&r^PNLhUrx{?o_gfVUkKBvy!mvIb{Cbm9^zWod6zeK-Y{i_$aEQB=o~l9M?Yel zxz`S4d2^L{cypC;JV)L}3{q~4NjB}w>Jc?SPE$u&b}4+L@>6E6cz`blYQX?1s~^%3 zM_R6cxOnRv779`27s?Zt4qhDCgN6<~>HRc1s=-Qeu1?HT?oqA+!ok6+5Ch~z9NNj; z(i-asWnYYyubnV|8b_2P+bQcAysITc5jmMebXv@hC9|Ks>H<9NM`mnY zkTQFWx=$d#>=S7ZnDA$a(uH!N&M|?+(A%{V4uY}Lv)(2RZk#s{j(3`wh4ye)2ArS; z@kJ1<43!gt6|L-cSD%+0oR{Nt`Fzv3hHSv9Tny5XB@G_LItk98@#&bqm}j`tkh>5M z%2kN2-qWQXKK7NCz0~s9zAI@H#08TJ+!>LyO)_s1{773#U6ROAE=@~1@A78o51;_3 zOfMB++>q(Ob5O+_eW^SfA)g#bK;K3UQFZ}+q6UPLR@SxDUD9yiC)3k9$DmFQ63G{F zei~)PYEGJg7;zv<-i4fD>AY}o%Dk02`k1gB;>GKFI7Zr@&auk`$xKT;K`pnk~;3pSI*J(AEUv`-}=@CeLh}rS&abPIr%30 zRkoEV8Kk2fl^zube$bPg{V4ui(Vj7Rq9>O@A4*?GCon=^C zUAL}rDPF8N6fN%V6qi6tf#MV|?iSpNx5d2_FYZooifeIqC&2>&PTqIF``hQ+XJ6;% z`E~LqSCT6;bFGXy*P3HK<9@tm=TvVp2P+IP&^ty)%uVKN-PU|0_+N<&hIhM^BKuzS z5Fl^KwsW%p^4UK+>hBkpD-?b3TmpWdm6s$hpqn)}s>wOhYW#Hc&6g;#m`MNQ#DSDb zzxu@+`FI8&3$niAS3Ze*rPdD`A%wQI>n5gZi8T3WZOvx9rjv>11AtZr#kB?HV-vp4 z^my08hNfzwvq3GxhW2$Y&1Jju1a@?Y&zw_Kj20%GMNPm{;h%Rw0bFY0e zi9^7)Fi9;?JG2QkUUKM!v5bJwtJfe_zM`jnLAKwEO-PQ(k?mlR5rF#nmfT8nU!G9&6jCqtzA{Z;N|QJ)>R<-F{rtSp z_rRg#F~s!CA*03VnQdcri7%uHg`c$?pBQ#)u_=9W7-(#whu&A+WNYhAHe_8=3oKlt z_w^&BCFrcRkC1KbvgI33!(^2C4zSvO7;Aab*WBt>iLfN{cV>TUDo{Q)6h-Ykc{0^q zvSl1b2bg$)C~>9jEQQ`ls%`vnGkyH?qKsi7Hr68dQC)W3hQHST;j~Q8sNj^YYCUyl zJF=0&vMSkT2DrezeP&W0js*Ad$a&6T zo0%4O2KUj;tlNc(6MFxTmpOE=(~g1CITy0!_b+WIl=T zpi&IIW8jBgkS0dRRGhyeN(33|I~H3mtMpW*cp@`naFwXE;VJ#};^8lOZ)z~%I#){_ z(_L$9Gjd33ie7EhSzfy|p6(RcP~w*~bb6_LL=-RBRLz{1%Flw8C{*hd?zSdqEv5RY zR*toEM|rP*E$LgQC!Z+Q)F)8;$qjJ*-B3!^6?g?WT?yN>TC8b5#_)G|&a+v3*I1*36=rl(!Krcuyy& z{J;WSL3I8zy!0I!HuWQ-QF3%;=BMb75@n5()rz=VtRj<gpkmOVcy z?ulk_X$^vntSH9sV88FdZ$%5|NJ$Tg29?n=(DA4RjHphF^tn8uzZYgDcd`W7_BH%k zEeJM}fRyZnX?7fZ#8hNCt$z-}mi_;mC(fEV|cQ~1Dr+^1kB2L&L!uP3n1 zUvG*_ZN!~z)ZvNsu;eUnbdi>s!98=Scp~w`Jb7C~6lhg7$w|BB7LzXhtH1As$Miq$ zJJN98TJmMU#w4BFu(Ft(g$CR+!)K#Avd1n9xNA4RV|XktiedUA*lFU%-6Lc5cA+qo z1m_Y<6%Dj{yCYdiw<+vvzeJgtA&@DAoqm@2;{3%opBZr)F;R2yr&0sQ%53aO^v2My zczlXK0zKjoPM&^G+470c>bhnAvip&t+1&9ksHSLbE%(RYd*X9%J}<=I)WDBhZHwT~ z{M?l75+9^ibf~VbuGd~~o;lHVd~sjGecea|^3K@9^Nts%ntBoZud%k?Kf(EPW;+sgbAww~pX9%Z^%IX;xXVJKC>CKY zz-I7#Z{wsUNkoXc?Jx?)d#hJn`P@Hd^|%qV*qoQabvyhRHsf7Q&kw5ppu!RMF}cqM z`e|UGc(fBae{WRrgoPaI3UOQUB(hQfPIfkGKII(y(0&%^aw)N=ReWB5UaM?jWtMm`4H|Msb_Ybgt^@=2(rTQ}7Na_f)z^*~kROpQhvH5ZrWLYQ-W96w#Y_#kd z45*+w!P|yMS_u=98{8JFQH;9!{8_XxqyO{A57M;RVy46>>hJpghA0wo#bNerPHDNp zZ7eRQskj3BRb4BuWeG}%k)B~(&$i21t2HCQZ38LZRM6I~ zPN8ksYfV*@GvG1$sHzE!5>Cjx)9~T2oT?>8d<{;n@+a+V^BF%EACrH)vX&B}c~}@+ zg}UWFs==KBu(y<};Fwf$3`%M$p}>bc0Nf&o+*G#(TWv*9UHE^qbjWCF@o|HjvDl1D z9>*3<0f_&m&HiOt_VN8qUt}0(DnE>tou|>$$_`a zYxX&hTdHGxIS;aKLv=anm)Jo4A8%;>gTnon2hiW`1!1sra%MDvwk7s{e>=gQED@gm z2W$Qg-{zOYCBnwvb_AUVvJaz+-w)5`Qk#=L5oIX;XK(&vbc-Oll${vHJs(H`xkdO2 z|DLvK(SijZ9IjS`^!nkBgnzTw{u6-<$D3lJbUtR=jb*OilO~&1h>yJ8m{J8PMT?X>1Dr*e#k55?R-5{{jkr&cb?mV1RIv`|!J5HiahTdxJ{s=q$6 z&xQCaBsO9i#Js4UKYs)dTG<1|nMH<;-7v0X36z4m{_+Tux`P15YRp^K=VNPihd3*p zva&K`I4e-;6uUJj?pqG{iw8X6t5r&l zkBH6meERE|8}N9#p8@c{`K1{OuRi``2(=D$>;(67wXLtVong&5hsYcyJNZZYz+EqF zDqHVI_G;%lyMsv4>Oha}sc91?kBo5Ktp6lzRSM>&A=#mc^8IyCm2*2xAkZSX?MO!$@KT6xm#%8kIs7;VpU4i5$T+u1OoYUM}-!yM!6MM@~)#Pu` zKRgkc*QY#jOd4F`$D4JoYXVicqxd!39G+M?4sH-nKb>&*GS%Q8bpg%)MjqN%8Ay}gWX^3jLZt(Mng5O!ez@oMTwIzpYV%AFyz2_g@R)Oi*p9)GqG@m{yj|tpOjFu%B{TxM{(R+f+3G3cYxKXoaRu_;ut4JkIg=E)!e_}AmZ1g`LzYL}6p`Q?51#U0Tr#-K) zC3h`rdq6CQzw*@arbcTg`7pWdiF{}I{#%nM&*rOa7*Y6d%~B%SbUrp57L~jz<=1k* z-9gtTRsJV_Ya+L6x9x*%YxU#od+-#0s7AFas%uz_KfqhDqAmJpG18L~h^{9EaH z+NoT!H$UgRR#n&Q{;Y&+-jvl&U6m#t907f@El4=^dvde<27Ex1=u+5itASh+lhU?v-tr*bX`3aYU9)@`)|P3wJKm7Vcvu5|Ud!WC&*F`v9SvRLcakl1dNYJH z6VO^|W5%q}jReIkDgZq_p%ecOjB74d`W6yR%pnVc-DDDK&=3VH5lBn18CD}?&SVS4 zDF*s)eUtnNv0u4y{5*B1!RqOt#oCimY`l7wU)PSHupYL7%lh^C^S$ka@%j_F?MMEs z*BjwouSr(eQn{FojUNRZmg;A#ti6U4#ZtMPVAoCSZ=V0?s*>yOkEyPoKWT;WRKaFB zMDc@6jS5{+eVfnCwJUVwdJONjE_(~W6WD$smBT5?M}jsoROkLdDny`QQLm#7?NOUa zxo|e4BP(CeJ5&8mpumSxtz4DV3WbrBD%a_*-fC-wciVfXJLh*0!uq|s>+y%PRVmc) zxCzzVH3=4{oR!%+ufSiLg}4&DrraG;fo8z(7qNsddb3382nFBj*Pk80{2$|}XD(4L zUW)a+p0U$VsQKYf^Du1jc@ZLcG+D&BJDxt1KEh+w?oA&*1}*f8D*Lq-9!VnP`D!MhIRt?uF`1> z-dz+Ck>CY93b0gnvLLxgo9?Z69WK-amhd+Ht?)qp$H&BoD+(uzz(&tdO2$kHFKS|IIx6kTlhgVnft|@7x#Q05+Hxex6E=!`&wn-`Lj}kYf0K!x8we?c6hX`t zr}wT$Y)2x9`OQ{h;vc@tU)qeuOibSks51Ju_nxe&=W&n1-8#YF=&!Z`epyn$Oyf6) zbQ?m<`2~o^6kB>IfY$!NYW z!{{RordmHOXrK_By3!q(x>DwPKga&@Ku+)ve-fvW{_yX8B|x<_N(id5vGJ?BYh6M> zAaQUrZIIj9`}GXD#DUlX?Ptuj&8-K>z)Fj&u=wS9Z=@xiU47N&wDxmBl=L{I$U&;f zxIvbT5};p4jyoKXD@k>_0_}JY&J9>nU97cAzCG_;W<(M+mJM%%M0~2Zo{;ptjMKI0 zhPAjm72mu@5s8ZM`JExaXSpe|rrGL#*lcgRL%a_*t9&AzI83M&q)LsBqj?CyJc5ny zw7J$=jH{h@NQ64Fy5w)iM+PI`zYD*xw1xEzT`eUY!VDvPC1%xoEZTYLKa}Z5tv#C} zc8L09*tf@1Tmm(L?O{FK6b78q$Imw>w*2-7(yZ^A@qe-~%AmmZEQp9@{qQ7_czjk^ zL`j7QQ#~Cqeg%^{^-W=0$IbuITQFtg?NXG^oz7G zD1L17_GFp4?OC`gtlwTar)y+ICpx8C2XRRwZHi{A-{#CvB)k4fhM*ZAg}1g5-GV4l z@0IjqaCN7pUF=-3O8*5geL)nn>279MJ9_N=@xBaYIxt$A5?@-1LeiCgEaT$ECU1>% zeZUM!NNF(*ykm4DX@MarRZp?Q>(mA#WSn@{0DTOL;iPXq)0oAYiU|Y2&?v&Ug@`0WXxS-B!&J5aAH|8+c#XAh~9{7>2BT8Gq(;9QX` zMc~2YI`B!#3Mcrj=W}zULOl6k&x5vp<0k=P_B1IL2lzjLuZYsQjq?>5{psh?AJ&=n zruBk%O>2~FI37ym7~W$;&0UwXrSIQK8)`?!Sn|0%*X+AWtsNvAPnVRYye2~OH8Wyy zp9K&oA)pj>3S>H68mkD`eJs$F@oje+Ei~)(o>~gx>TW4EqG(B~)LL$PBXPbYg2P#6GINE zF`bQ0hUy7mGIPWG>z?K)pp~RlgZ=auXC3NP7T;^##KtF9q;Is~*)LjQnY+hL_WLN$ zz(35cm1MY{f&BwknSO24-*5M)*2SA3USWUe%c^D$ua9ooNfjx{pswsaRclwybz(Mv$M$vU%vHhelu*3=HfC#rVNRP53qnWej0M1 z+>NDArcj`ahuwSWp@_r^(>J{u(i#f+VI)@2Y+{yrZkUem-+R-FY2*a-(7~- z&!7uqgEZuq;<#r1$MefsEh`B^zJ;m$pMMo%ga@f0fUJBfi?7rYlKh30NC#XSpm3El>A+ zU*bF|ynOq5L)>qPZMroP>@VfF$6jc8+L34A#9>Q)_(0=1;xJECVY3bu}{1{*tlbd>T3UHuix z__IP)dc!&_gBXMSE;3ZVb4DJ$^L2ewJ5%&SoH0rkfx=<4CVxe8HMl;Uh9S!9A(ojx zqjAHrHvD6**}#B0d^Qe~?J8T}q|8!3>%@|(^7f?k*Fkwnj!55}>Hb$^fdVd9Laq4T z581c!?~Tf=1bUz+Ivg3(b6lWrD&^Z^VjJ;*^3AzB_z+g588x8|pc1Pu%nL;i!)Y?x zCYjP9-50Y3>wc?P)d5@c(HD;8Ixzz2ij+2GUCE5ppJwy&K9tQzb3gWcF-yt?%^@Z} z6~$4IJMA|aEvLA%(I|)bNR8KTzwYc{uMX|}bZUz!sW4ykflp-4ooYQdw3WOy1*ARm zRiL8f&J)sDkvRepgBQuknRy4M#=kD7Ki^ZQAP_{h8uwj(`)8dYIw*6{j03Zb=vJTS zdkf_U_u|yIWflGRc)>*Ns>QlBpS}KBRR8nTY=N<{eDp{|L(UesQWAf;Kx;E>&~VI< zE7Tiqa6X#O*~6Sti|>BkQ56~1yX&NTId;6u`sb(SCjOoJ(?Dwri{pMJP00j3@}XDf z=30!Y4!n>-fn3--&3l_VL&T|rZtv8&3kspA1jVZ)M-gm6(i@8-#5qI6U}mVArTRwV zqG4`lOo4ht8?g^^cv7snw;`GmZue!{C#`l=-S!v9PSbtL?`=+hF}jC16%X!T(AIAj z-8>&sT-cq~<_EPPF`9%w^~B1-LUNzUJoR z%b?1S|Ki5~y)4gC>;iZzV(1mhUD1JfI%?s=49uCMI zf0;jG4kTtrNHPB8;*D4vwx>7-)cPa$;9e+co@0yfXs7>k45Ki;c-Wm?<&Yxi!qQ@Z zox11YbthoI<(qk*C?eeKO+MfiQdxO=9^gxMg2?%xzfO=3?T?Jxk$kEK zlK;wA{FQGQLJ1bdxx)74wf$UC^cp`bD8~mcM9$=&H12mi6D?|L2u59wPBtqH(58)PHOFpVUn-EsD1H|NZyjOus7i`nUgM z^U>1yNcW}9w8B?^8_)eKAM{UJh_;m0dFMcRspA}hO58nbJVz|mv|lBem(eJh1DTlP zYbdX^N~-B0la(yIk-oROGk@Dc{VPZH&v5!eiiE!+6xfguO+2Iwb+yjZE`2%f3_DqL zyE>+cw4F}~e3x$U;%zuWRgYhL>o*0s6Vre12;8b#4W-fXnP&I5C*$r!D#uhU6gn^G zzA-7K{i{zPDn${FICnN6#r*JE{oj*P|8!=fR+<(c1OI7-C`wFiH_rv2CVT&znOq6Z zA(msaCHpUrf`4`Xi!92!0;SE5M@lO$0}^!VyB#k=t_UyKJ9McrcZ)J^!(`vh{Ci67 zpTotF?E>`1OS??{MrS179EnO~J{(=FBZ$zr@8+Z!d#FDhES-q%bw6>pPsE^s`7p8FjJ}nV$;-eeA7m$L|p2;%E=S*(F*T4Kx6Z;Il6CiJa59}{A(>o zRGL<_Wupl{S8VU@?~=^>Qc1doQJ>ttI=ypDecREdu6pz^iR_}zG!zc9=Re;>@uJ1+ zrrv}dz!#D>cfY3s)}r=0-TPUW?yno)sy=^WT4|kmtalh}GOIHL*~Zji2?kDmVl}9v z{@}Eb0I=u3-#sf+buxq`{0*_whv9~ zsQpmzHluch%=^!iwNw(&6|60$7Y+kWM9!&^D$wKjE1|@TNVcAgYl>{zfCXX1a`bw;iUE#Nwd+>xN*(`< z$1}fpCrIsw0*Ohcy9bvE~1={vq#4yEaZ0Co<0K%h{e4%Vf z|D~w?fHYxCRx0p;^#dN|4@ly=Gx{wM-Rhv~_WFcy2k-X5?>e7X;7GH~?`$8_()w#) zGbuZ4nSO1nUB$nuif6>M5Ui+R>$zR&4< zo*!N_{kT=;6t>>U4{4wU!3kW^ZJCOwsBm&wRlDR^I-iPeM=0qga>$*D;ay(37kK%q z((hnbxA^y7n=Y|XV9kd;p1al6Q@Ud8v}H70zm%@QZ8_DHim!K&C_g$EH5%zv9z1hL z{pOt+Z)P$RN%R%nYH6?1H=VnHFRqHi#gB7zm|AEc{+FSEKxVw zU2LuxjP_N;h2krz7sOmiCIyG`-v@q$)AZHSf}`iRzYQK-mA*neI%@Z^xj9Zei20K{ z9nxey&BxWrkVwzJv5>dc?uzN(5x{8V(ocLvhjkxdu^)#MRBZA}j;yNvJ4Bqud}iN3 z-Zao3_Uhh#NRi)l0Uu&m3uL6)9}D4NOP&5DPcxZx$b&4dcvDtFb3T2#P_Hn2yDPM8 za~cu%GaFA=u*x^{d)Nh^8JNPhS*gc3n~UporIvH9h0crGFYqasTlzh&lzlAdF{VBO z=ZMQ&-N%RDDI$(wjQ#$)Ch#fm9Ha;EYx3s|96;)-Qa^D&-S?mny4)%wG)kwFo7@`V za?+z{=W!FPc($N(xO4x;eUac}&RlbFD|PdJ+mq^J9kpL@WI*n7Mp|6zgB`{x!JC%lxp>my3EcgFfu08W$nn{Z5eZ%L@C2=a~c{e)Q1CUIHLRLiVLilqCo(7ui37ELL zO%Lmx9!ReaVO|G&FZ(TL+-?DWeO#3=e7mSs6NfF4&j@C?-kR5fOQQC2!t9=INnZSZ8Hh$E&9&}c)p<8qk9?K{{4#+^qUTF z*fy1}1!8O97jONry75xii*VxjHZi%Z?`Y!AvYj_n4?z}JnDooX$eu9Cr9*hc61LL) z*lh5^WLC>zT{5%zVm<1kSR2ZO*jBsH&7oDvDiexb^bAww8mZ)1q4vA5Rh+?ea99tP zW3&&|@gfK#5xS=04@2YQLnAvS@l+uQIG$hsB6i{JcqKd~-h+3}1-fZ1FlhQnq2J-z z5%%D|D_P!Qf`CA~s{B?)`}0th(LQ>g(?K15Xg!Uwhin#s`2Yd`D5_Jdx@U>$CyQxm ze>q#tv$MHS&j64?PyNY4c#gMk*bWZ+yyFVPZqFGs7=-(-gT%qMYqI$8=3Udfsh;q@ za}{bE+BD(bt4_bvS9CZNO{>0q{=n45dd@Lc$}=oP~VQ(GkJXltQ~`_Ids zo3>0Gy)jxsNyP1bli>ZVaPo_Q{m=vkle7)Qz**ujRQ`L6rMW#B8z-=O?E54ysRU#Y zBw<^>fxO}C71(LCPdB@fW?;_Ls{4y;JGW=ZotcJSJ->qA-awZ!%P8`6X!5u2&Bp{1 zt*R&Fa4#9LNYQ3YJ;D7k3b5l9vEKKtXe1zp|J>vR2*wILY_R1+X$uIAc;N^h^j0A6 zLIZW-?g%`4Z+@xHz4S%^NRv$#SIeZ$Vw}!BK)T1RY#1J~?9euXe30i%(-gb*9UQ*R zuQ<%FuI3J}=|tCB4#`q90XI=HuW$TjYlpJO#i-k-`a*K&kOyy0U$+SQ!cxzB|h_|=uk6=tl~#s(ARNH0^D zow?!=EM!|#(7*1{012S023EGD;SdIZeOfFv2m6MaK- zVISK-v%;*A&eV?RO*Vo7k_-Vm_5jfg$r~L3U3lV5l$&>E*%^Nhp=O~}SaN+XO%g}R zlfX)=Kf@?7c;IE5{h7S`jEloGDDLA6U@NM@elS<8wa9K3Uhd7xm(6S8Tpb+MFKxc7 zzEHVm{Wh~iak=1kW%?g|Dr&r6O^kDqRp5K)ly) zOdAM!2Oq)I*t7Ptue)ShQ7kWyrT^TkBZ1D>X-qkC!psO8f-s+FFY}SuPf*9RL@2yr zrPfHvM=NA^j`Mhk#;{>NK-%~5M0#Uvao7SOqTnG0yB>7V5QVU9M{3O!QFQ}!Jq$sOngI2+M)c|Jp#_&7F|eF| z7p8EKMvgdHbCvaaS9XOM%J6Ing|rV2M3xV{HA2{r=MYpa*fJ6!O3zXMY)v7kV)T*w zJ4@cZwBZtw|IRrGPgP3VROu^c93p*J6E7n7!p<6i$b zg=6Ac8a2y@LY*JVZSlc536j(s{rQ;W$MaY8SAdNnMGPc%?2$W}b%e!$b^_uZ>zA-Q zIBy@GWfrVLl;ol*S+C6wlc#>Asx%+tJ@dM&r#V(?r$2@l^9G(N4c6K=arp@jF}_Ev zerwFP-^mt`cTamCGw-cnv{?Igi=uPe!a@Q&z&C6*nAyVC#P|wzJV%0!&ifm+bXyQ9 zorK)Mt;D?q)2kmHV+(jfjcPy`65my})Xg*5!qcioaa9`>b8U)i(y3bx0z8_1Blbo) z5*5`2xse6Wu8Kk3G^i$=2O&%PH^p^7B|`aB{Kh7&U?@=z;*--o?ur(mo^&|Kn-&DY zGj{YwxnrL0rtC$k-VrVUCXr$LaHLLcnXg;^z>Wg)qs1Oq<5{riT~FyiXekdyysd+|p|$LUVdW zCO>eC)R<(-ky9?0dYMM~Haa5oM$TeAwgqE-Zr$<+hs#K%c5(1Zi!l=#(DuaZ%WUb_ zya_uY*Ns%Qb3LsyrU(6@YWl{Y4a8SXT|PUi+m?oln?b1e1?n4gT`28KRi}g*6Lf)B zmPrCjQ1_Bfv~sQ_06oqqDy_I8TW;l4^~a5eRVfI}nPC!Mx6#z*shqua?=h-Di?^!`^Bb&-=m(Mt>p%KPIt>Czu3iS+xemW<~;!xcP%}Zn}ba~&Y!k? zCSumFxzR=j+WdNB6hZ}~>3mv$qKOOInO4;Mo$zrOIR{$HK45(cmTi~)gPE$zUumK{ zI-5r~i2DXllE>>leI&G2QKf70-0=uHT-PivtC z<|2>a83Y=xz-!d%el@j1#$UbO+m}gQ8}o!!?+&!WUs|!d*fq~eA>`ynDO65W@N9Bw ze-gDtHZ}WQX0MuB^|O&9L*+pZ%ip}_Sk3vxAM~y1q67M7hH_x~^5M8|111w>o|xmc zzga>VKqTEy{Oz=4UNuDNUDglf(89qmE&8|1M%>Q=^{{*&eoL55PdFVr5x&!ck{(EK zXo8eRR&27_L6Ho=L5^-0^?HnAFOF@#Jm0k`HRY)xxB=jIb9NbX*6g%!Az}v8<}^v+ z-aCakfKGHRu`b+RI!l<6dQYt7mU`Vf*57rA^dY^U%tr8Z1|!BoBhj&6p4P1Q>mE)Q z7G7sIaRbs>=Gfbw+Z4O1@R004kd;Dnzp0-ZnF6V`R^#@h%m-w1WPvAA(usp{Kbx2U z1wJ&K_#6unW4SXK9ux;Pk{uyE+wihTJmpTjt3yDdv2S+Yyb ze-)PkH}a#)M2=o}ms9JCf2y8O*Anu?@4=!XoZ}clA{~WhY(mmwM5nN4=c$*U%+gl@ zXshzm+fU=JQFP@&qVBOY}b-vQ}CV+A~*IoIN*W@vcB_8JXwQkZqC4ng#Np}r}eNJ1g_EnH{y~H zGXo)t@L2RYTVmWtSJxM={d7>7GFIqF1=q!^FQxMpAD%TIoOQ1oxi8p3w7+F z{PN`E^w5~}#$2IiB|M=0aXHOrTH>$!AO6+Llg8(*w#zo{b3nYe3_G@h#ATA+=l;UL zyiuFLa+*mYyANfu)g*}F8EuRR&vg%pE^T#=4mCktt;qg2l{#Z@eF zwT;D}JbbH3Ww^UOz1UlB1jl>H>4JNRHl<62r838JM4oNK)X1um!dYRDNA>Arg>L)9 zn+(Ccp|oliJ9o6~5%i4gyfQbhyV*EN(C(0R8ZsM6=$a0*uPsAPg=|;`0m>7&R6E21 z8jkW;gI`c4?8fSZ7D7jAsSV+gO>YjAIY0o$n7tDu`i1v)sR-IU8c}WLPQ1Qni{alLgGnBE>%?%%=Q(X*V!v{@nJuvU zX%BjT$K&4XF)u1Js)fp4P8;UmR}|L#ZLvyyl19&*6#}S^;yps;>(<;!77F9RALj@$ z%@O{toH#D-2o5`%J z;UnM{ImE_zNdh*9rPrURGXVXOlO&DQ7EK!ibVsaQEif)o_tK&*N0eSYtYfYbA=3HF zmA)_b6m&m<4koR}(QiS=>X^nn_5q zJz&by!+Q)ewN==sZ+h>&W>0{kzMy%CmLPU6vTw5Y=ce6yy1!~u%zGdL$F9@jtRD?~ ze2r^w?Bb@Kjs(-pxU*qLJx8XS@I6Go8~>g^UfpNPN^n)d+b>>LxLjcTQ7E8e%j^=_ z3ap2m=G!u__^M7=H{P6O(Px`9b7$dIJ*LTFmlj%P&ox1qM}otO%6LkMc978TZq-4_ zXSD^NmxtB6nxg}+69Pev=v-Yq@nJh|8-#C+jKbUI0<)V;tE`WE5W6sPVvBl3RgFRNh_xMWJ|=l0SP zG*m1xnriI}G91oh0zXfi4xKm@khbNmih2EH zB_0`6UUo>uvfacceKiCm`BeRY8(pt~&jM-6^immF)6pj}562>|uKRLXm%gzWroQ#* zeNUEdAEy9+sEBtS-99Q5#?B;b!^Qn(8JlOE%q>i1cjWgc3TRDKj^S0v>SuP1ADI0J zkzE+w9CG*AVQwbE#V>lunDw-FhO+rA7M*o)L*9fD%?>sSe<{FIvPuP;C>R@_E;dcr z5=z;(M}OgqUG|zIZb1VRY6j z2zw1`ce~s|!Qy03(Im0aUZrp}KGb3H%glOIl#Z-qQO$>OLs=4ZrCmQ>fzR#bBxecCoQxPG$C4N{RPC z(I%KwA*LEO5SDt--HH5`ZC9wV{Q`S;Z;x3xCca#dvzV-jH#T7~RnG-|4Qua~ z10M@LX;Hgj@d;LtTzvGb=Nf)Sv?DY#GRMdeupbXR3E>wKeQ~L;;orOy+$;<#5G&X z21#@CTn|Kig(o$S!t=!p-vI}+Nx9;@^#$v?p0}n!vVWMrw=AClJoPIw1@aK(BCwnq z1-3B=m0Laa&-TCfJ0*F1Zvi3XKJteNqM>bcA51LRqjbNpTN)KG>>grBSXqEnW)4Uo z^GjFB_fkstPBRfW77ja2lAp&Z@?Jfk^H$massEOpB%Uv2%AVir^yhf$%&HuLe3jhP45c@?Y<&FjqBC^c!96Y~U$( z-Egn6L;Ipz@7x@q*&-%SOAw}yo1kr21R81Otj%l|ZnaJ?t`j5S$f}`+K()yz^Dh;h za?V3(wq2%`@9{A;wADQ@B63q;D?8TtWY3spfsgs85GHI=?#Ce{#4yCKCfOUQA*5A; zV?c+Hw1KVn21@(aLe`6GITHc9a;CG$$iK|WZd{{A*qqezzZ3t$WV$w}PYnNrN)j&! z=^+}W3PIw`?&+7f<6F|3h&76zJXjdkPh&sdAEJPISkrB_Fr!C(ojt5Z17FPuh%uORAB#S}omw=2=oJQx+?K&5`{1qEKQrWOPsN8c}L=k^tew(?F$c^vf!f0cr9hF7!rSl2 zs%k9viOWo?oy!|I(Ot^eOV&uSM~XF*=lR30f~T*eD1W1)EbJcx_z1 z0`0vPt8zs!x8JBYcc>#ZL&j=4%cs(s2nF(_tltePj0ffQGLofDBbgY2QFD+T!a0Jv zR)}-QQJl!ce^gz`3!xeBM)i%YsVO`+45o@b6|>D_&}5IA?7dn6Rc+(gG3xO(*tiyN zqiv!p<^~C&2-|OsW<6Az_U>=l-u*xvZ$I2~G8TSqc6;~YgHACmDHJioG1g_4$0SGG z<;cX5Qf*3ZY?Uc?H8++bpiH-tn-uYNWb?Yg~xBb*NtoQ zz5tR#m_HeA|4yN6-o_&VaI^q=SbwVvt!b+5Il0kUs&FFDX*Q#rw@hvCuwZfa_Pd~c zrIA9VuHNb+M*&WL?S46E!3z1t4mQ{ja2cj>*^@cAe1s~!^wt&bDdOTBu$${7gc!Zx zE#CTq$fCW>%#howASAe0Gemj{6g4z?FX^vzPcmnV3jsP97>b<1Hum|R9q{3 zbG0+xoW{39Nctmzn&YkW$eYtj(NAx@t$VF$7~YV~gVYuWo|)ENZt)1V6M58Mh+b)~ zId_Xc=2b}ep0IGG@P-@CdMh+i^1R8~y9~z1T|vQ7SCNd2Fldp%$P`sS^7atK=)9|6 zN=hC4QZ$ezx8yTWs3F{&%;Z5XlKW-#n6Es$@eQe6Y5OR}DfbPZXU5FM{)VRREMbi3 z$yEfs&BMyRu*=#y764QqRF@7s<#rxqK04`+GkVTfOc-`FHYEAN!aN)M@N#p0- zAv^stL73s0P)|d*Ci28bZrp~wptSXHXR~W zrKfbNfnpvZ0>Q)xvHOu{2~)F+yKlXdR_jAgBk!uEYoI}=G^y70GQN#z)h@0Hf%VrA z!og7QS76kHh7t75+Yg<52s#D}8?LDn1GJo$v8sADFX)^%>fTo)I8U%me&+ndcR*GZ zkEcT7A?bFb_VsJTMGtRp**Tj09w`^)su|~1hks%M{k@?fTq>J1Vw+rO1paSR~06*mnsH*>VB4m+U#meL*8_O7r0bb z^(GfL=UnZXcdICKU2&_^{n|^r_y~uo^zB239@NkVbqjRVI+;Z%{FFxiu>`LAphNY> zIKv<{3BmjUS4s)t^!4r}$BzR{J@s#7$HE#Ow;xW7P1D`C-Z2{pM<@)XooCdeU7eBD zk2tJ62AmG5aMv(-fP|d~y()Ffq3kReoan(K9}9K{7W*z;2a!ZLi;C}nGxN({_>#$u z;bVwqOvd*q8{x_&l5o<)-%)hd>1cj=UK)6quEm8)OwjX3Pmu7Mo_z;~;?-9(bE~>7 zyD}ADI+NRE992&f71Zal8U~QwJYkfi6F$5?y!a^R^{UEC&Ct_Gt|7T&QouG(+;e%s z8ls-2bpJylhX8N4_CZ8)1VfcJ)0BL$|c4Ogz;TvGif6)x3Td{wNzHNRv$9sq3Z1`IufIcq9XU5K)m+oGvz><5D zv*5I2pg42FzxVhj^Bv!>2sNtwbU@^h-EgtG>L9E&+;f{EN-TctlN+W#e~r@gKJaHS zT-)QvALL&%@}CQU3oS znftqA3@?$UTzI9vu~rTJyMpmwLb|D-pcm&;lPdZO|5@euFKriGsTFJy2mg4e720s| rKOX4+|MvesYySxP{vSk3*E1qHAiLjHjGzJm{!o_Jcv~rJ8uGsYgHd0n literal 0 HcmV?d00001 diff --git a/docs/spelling_wordlist.txt b/docs/spelling_wordlist.txt index 6f0bb1afa71..6bfa3f90bc1 100644 --- a/docs/spelling_wordlist.txt +++ b/docs/spelling_wordlist.txt @@ -1,19 +1,6 @@ + AArch -AnyCallable -autoreload -autoreloading -CPython -Fargate -Firehose -Gunicorn -HTTPPropagator -INfo -IPv -MySQL -OpenTracing -Runtimes -RuntimeErrors -SpanContext +agentless aiobotocore aiohttp aiomysql @@ -21,22 +8,26 @@ aiopg aioredis algolia algoliasearch -agentless analytics +AnyCallable api app appsec +AppSec aredis args ascii asgi asm +assertIn async asyncio asyncpg attrs autodetected autopatching +autoreload +autoreloading aws backend backends @@ -45,15 +36,18 @@ backported backporting bdd bikeshedding +Blowfish booleans boto botocore -CGroup cassandra +cattrs +CGroup cgroups cherrypy ciapp client_ip +CMake codepath collect committer @@ -64,15 +58,20 @@ contextvar contextvars coroutine coroutines +CPU +CPython +CUPTI Cython datadog datadoghq +dataset datastore datastores dbapi ddtrace deallocating deprecations +DES deserializing django docstring @@ -80,60 +79,75 @@ doctest dogpile dogpile.cache dogstatsd -dunder dsn +dunder elasticsearch elasticsearch1 elasticsearch7 embeddings +Enablement enqueue +enqueuer entrypoint entrypoints env -enqueuer eol eventbridge exec +Fargate fastapi +Firehose formatter -gRPC generativeai gevent -graphql +Gitlab +GPU graphene +graphql greenlet greenlets grpc +gRPC gunicorn +Gunicorn hostname hostnames +hotspot http httplib +HTTPPropagator https httpx -iPython iast +IAST +importlib +INfo ini InitContainer initializer integration integrations ip +IPv +iPython iterable -JSON jinja js +JSON kafka kinesis +Kinesis kombu kubernetes kwarg kwargs -LLM langchain langchain_community +libdatadog +libddwaf lifecycle linters +LLM lockfiles logbook loguru @@ -150,30 +164,37 @@ middlewares misencoded moderations mongoengine +msgpack multiline multiprocess multithreaded mypy mysql +MySQL mysqlclient mysqldb -msgpack +# tests/contrib/openai/test_openai_v1.py +Nam namespace NeedsAppKey +NSight obfuscator +ObjectProxy +oce openai opensearch opentelemetry opentracer opentracing +OpenTracing otel -ObjectProxy packfile packfiles parameterized parsers patcher perf +Perfetto pid plugin posix @@ -183,6 +204,7 @@ preconfigured prepend prepended profiler +programmatically protobuf proxying psutil @@ -199,28 +221,34 @@ pyodbc pyston pytest pytest-bdd +PyTorch quickstart ratelimit redis rediscluster renderer renderers -resolvers repo +resolvers respawn riotfile -rq +RLock rowcount +rq runnable runtime -runtimes runtime-id -RLock +RuntimeErrors +runtimes +Runtimes sanic screenshots serializable +serverless +Serverless sha sns +SpanContext sql sqlalchemy sqlite @@ -239,23 +267,27 @@ subdomains submodule submodules substring +suitespec +TensorBoard testagent TestCase testrunner +Timeseries timestamp tokenizer tracecontext tracestate tweens -uWSGI +# docs/configuration.rst +uest unbuffered unicode uninstrumented unittest unix +unobfuscated unpatch unpatched -unobfuscated unregister unshallow unvendored @@ -264,6 +296,7 @@ url urls username uvicorn +uWSGI vendored versioned vertexai @@ -278,26 +311,3 @@ Wrapt wsgi xfail yaaredis -Kinesis -AppSec -libddwaf -Serverless -serverless -cattrs -IAST -programmatically -DES -Blowfish -Gitlab -Enablement -hotspot -CMake -libdatadog -importlib -oce -assertIn -# tests/contrib/openai/test_openai_v1.py -Nam -# docs/configuration.rst -uest -suitespec diff --git a/hatch.toml b/hatch.toml index 21610b6c776..f3a3c2cee36 100644 --- a/hatch.toml +++ b/hatch.toml @@ -368,6 +368,34 @@ python = ["3.10", "3.11", "3.12"] +## pytorch profiling test + +[envs.profiling_pytorch] +dependencies = [ + "pytest", + "pytest-cov", + "requests", + "hypothesis", + "torch>=1.8.1", + "torchvision", + "lz4", +] + +[envs.profiling_pytorch.env-vars] +DD_PROFILING_ENABLED = "true" +DD_PROFILING_PYTORCH_ENABLED = "true" +CMAKE_BUILD_PARALLEL_LEVEL = "12" + +[envs.profiling_pytorch.scripts] +test = [ + "uname -a", + "pip freeze", + "python -m pytest tests/profiling_v2/test_pytorch.py -vvv --capture=tee-sys", +] + +[[envs.profiling_pytorch.matrix]] +python = ["3.12"] + ## Unit Tests [envs.ddtrace_unit_tests] diff --git a/releasenotes/notes/profiling-add-pytorch-integration-0683123b7bb83f99.yaml b/releasenotes/notes/profiling-add-pytorch-integration-0683123b7bb83f99.yaml new file mode 100644 index 00000000000..891e039a204 --- /dev/null +++ b/releasenotes/notes/profiling-add-pytorch-integration-0683123b7bb83f99.yaml @@ -0,0 +1,8 @@ +--- +features: + - | + profiling: Adds an experimental integration with the PyTorch profiler which can be enabled + by setting ``DD_PROFILING_PYTORCH_ENABLED=true``. This feature instruments the PyTorch + profiler API (https://pytorch.org/docs/stable/_modules/torch/profiler/profiler.html) + so that GPU profiling data can be sent to Datadog for visualization. + This feature supports torch version >= 1.8.1. diff --git a/tests/profiling_v2/simple_program_pytorch_gpu.py b/tests/profiling_v2/simple_program_pytorch_gpu.py new file mode 100644 index 00000000000..8d846c52de4 --- /dev/null +++ b/tests/profiling_v2/simple_program_pytorch_gpu.py @@ -0,0 +1,42 @@ +import torch +import torch.nn +import torch.optim +from torch.profiler import ProfilerActivity +import torch.utils.data +import torchvision.datasets +import torchvision.models +from torchvision.models import ResNet18_Weights +from torchvision.models import resnet18 +import torchvision.transforms as T + + +def cifar(): + transform = T.Compose([T.Resize(224), T.ToTensor(), T.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))]) + train_set = torchvision.datasets.CIFAR10(root="./data", train=True, download=True, transform=transform) + train_loader = torch.utils.data.DataLoader(train_set, batch_size=32, shuffle=True) + device = torch.device("cuda") + model = resnet18(weights=ResNet18_Weights.DEFAULT).cuda() + criterion = torch.nn.CrossEntropyLoss() + optimizer = torch.optim.SGD(model.parameters(), lr=0.001, momentum=0.9) + model.train() + + def train(data): + inputs, labels = data[0].to(device=device), data[1].to(device=device) + outputs = model(inputs) + loss = criterion(outputs, labels) + optimizer.zero_grad() + loss.backward() + optimizer.step() + + with torch.profiler.profile( + activities=[ProfilerActivity.CUDA], + ): + for step, batch_data in enumerate(train_loader): + print("step #%d" % step) + if step >= (1 + 1 + 3) * 2: + break + train(batch_data) + + +if __name__ == "__main__": + cifar() diff --git a/tests/profiling_v2/test_pytorch.py b/tests/profiling_v2/test_pytorch.py new file mode 100644 index 00000000000..e50d5b46d55 --- /dev/null +++ b/tests/profiling_v2/test_pytorch.py @@ -0,0 +1,44 @@ +import os +import sys + +import pytest + +from tests.profiling.collector import pprof_utils +from tests.utils import call_program + + +@pytest.mark.skipif(not os.getenv("DD_PROFILING_PYTORCH_ENABLED", False), reason="Not testing pytorch GPU") +def test_call_script_pytorch_gpu(tmp_path, monkeypatch): + filename = str(tmp_path / "pprof") + monkeypatch.setenv("DD_PROFILING_OUTPUT_PPROF", filename) + monkeypatch.setenv("DD_PROFILING_ENABLED", "1") + monkeypatch.setenv("DD_PROFILING_PYTORCH_ENABLED", "1") + stdout, stderr, exitcode, pid = call_program( + "ddtrace-run", sys.executable, os.path.join(os.path.dirname(__file__), "simple_program_pytorch_gpu.py") + ) + assert exitcode == 0, f"Profiler exited with code {exitcode}. Stderr: {stderr}" + + profile = pprof_utils.parse_profile(filename) + samples = pprof_utils.get_samples_with_value_type(profile, "gpu-time") + assert len(samples) > 0 + print("number of gpu time samples: ", len(samples)) + print("first sample: ", samples[0]) + + expected_sample = pprof_utils.StackEvent( + locations=[ + pprof_utils.StackLocation( + function_name="Memset (Device)", + filename="unknown-file", + line_no=0, + ), + pprof_utils.StackLocation( + function_name="PYTORCH_DeviceType.CUDA", + filename="unknown-file", + line_no=0, + ), + ], + ) + pprof_utils.assert_profile_has_sample(profile, samples=samples, expected_sample=expected_sample) + + gpu_device_label_samples = pprof_utils.get_samples_with_label_key(profile, "gpu device name") + assert len(gpu_device_label_samples) > 0 From fa4aa3deebd3e5260f64e82e409a2574bccc456c Mon Sep 17 00:00:00 2001 From: Federico Mon Date: Mon, 16 Dec 2024 08:44:47 +0100 Subject: [PATCH 35/78] chore(iast): attach to root span if running inside pytest (#11604) Co-authored-by: Alberto Vara --- ddtrace/appsec/_iast/__init__.py | 22 ++++ ddtrace/appsec/_iast/_iast_request_context.py | 80 ++++++++----- ddtrace/appsec/_iast/_pytest_plugin.py | 110 ++++++++++++++++++ ddtrace/appsec/_iast/reporter.py | 68 +++++++++++ ddtrace/appsec/iast/__init__.py | 1 + ddtrace/contrib/pytest/_plugin_v2.py | 7 ++ ddtrace/contrib/pytest/plugin.py | 14 +++ tests/appsec/iast/aspects/test_add_aspect.py | 5 +- tests/appsec/iast/conftest.py | 4 +- .../taint_tracking/test_native_taint_range.py | 5 +- .../taint_tracking/test_taint_tracking.py | 4 +- .../contrib/django/test_django_appsec_iast.py | 4 +- .../fastapi/test_fastapi_appsec_iast.py | 5 +- tests/contrib/flask/app.py | 7 ++ ...st_appsec_flask_pytest_iast_no_snapshot.py | 59 ++++++++++ tests/contrib/flask/test_flask_appsec_iast.py | 3 +- tests/contrib/flask/test_flask_pytest_iast.py | 28 +++++ 17 files changed, 391 insertions(+), 35 deletions(-) create mode 100644 ddtrace/appsec/_iast/_pytest_plugin.py create mode 100644 tests/contrib/flask/test_appsec_flask_pytest_iast_no_snapshot.py create mode 100644 tests/contrib/flask/test_flask_pytest_iast.py diff --git a/ddtrace/appsec/_iast/__init__.py b/ddtrace/appsec/_iast/__init__.py index 3e4b04a0b6a..fe488c87e46 100644 --- a/ddtrace/appsec/_iast/__init__.py +++ b/ddtrace/appsec/_iast/__init__.py @@ -29,10 +29,12 @@ def wrapped_function(wrapped, instance, args, kwargs): """ # noqa: RST201, RST213, RST210 import inspect +import os import sys from ddtrace.internal.logger import get_logger from ddtrace.internal.module import ModuleWatchdog +from ddtrace.settings.asm import config as asm_config from ._overhead_control_engine import OverheadControl from ._utils import _is_iast_enabled @@ -91,6 +93,26 @@ def enable_iast_propagation(): _iast_propagation_enabled = True +def _iast_pytest_activation(): + global _iast_propagation_enabled + global oce + if _iast_propagation_enabled: + return + os.environ["DD_IAST_ENABLED"] = os.environ.get("DD_IAST_ENABLED") or "1" + os.environ["_DD_IAST_USE_ROOT_SPAN"] = os.environ.get("_DD_IAST_USE_ROOT_SPAN") or "true" + os.environ["DD_IAST_REQUEST_SAMPLING"] = os.environ.get("DD_IAST_REQUEST_SAMPLING") or "100.0" + os.environ["_DD_APPSEC_DEDUPLICATION_ENABLED"] = os.environ.get("_DD_APPSEC_DEDUPLICATION_ENABLED") or "false" + os.environ["DD_IAST_VULNERABILITIES_PER_REQUEST"] = os.environ.get("DD_IAST_VULNERABILITIES_PER_REQUEST") or "1000" + os.environ["DD_IAST_MAX_CONCURRENT_REQUESTS"] = os.environ.get("DD_IAST_MAX_CONCURRENT_REQUESTS") or "1000" + + asm_config._iast_request_sampling = 100.0 + asm_config._deduplication_enabled = False + asm_config._iast_max_vulnerabilities_per_requests = 1000 + asm_config._iast_max_concurrent_requests = 1000 + enable_iast_propagation() + oce.reconfigure() + + def disable_iast_propagation(): """Remove IAST AST patching from the ModuleWatchdog. Only for testing proposes""" # DEV: These imports are here to avoid _ast.ast_patching import in the top level diff --git a/ddtrace/appsec/_iast/_iast_request_context.py b/ddtrace/appsec/_iast/_iast_request_context.py index f49d2bc59bd..b711ae61195 100644 --- a/ddtrace/appsec/_iast/_iast_request_context.py +++ b/ddtrace/appsec/_iast/_iast_request_context.py @@ -1,3 +1,4 @@ +import os import sys from typing import Dict from typing import Optional @@ -21,6 +22,7 @@ from ddtrace.constants import ORIGIN_KEY from ddtrace.internal import core from ddtrace.internal.logger import get_logger +from ddtrace.internal.utils.formats import asbool log = get_logger(__name__) @@ -109,39 +111,61 @@ def is_iast_request_enabled(): return False +def _move_iast_data_to_root_span(): + return asbool(os.getenv("_DD_IAST_USE_ROOT_SPAN")) + + +def _create_and_attach_iast_report_to_span(req_span: Span, existing_data: Optional[str], merge: bool = False): + report_data: Optional[IastSpanReporter] = get_iast_reporter() + if merge and existing_data is not None and report_data is not None: + previous_data = IastSpanReporter() + previous_data._from_json(existing_data) + + report_data._merge(previous_data) + + if report_data is not None: + report_data.build_and_scrub_value_parts() + req_span.set_tag_str(IAST.JSON, report_data._to_str()) + _set_metric_iast_request_tainted() + _set_span_tag_iast_request_tainted(req_span) + _set_span_tag_iast_executed_sink(req_span) + + set_iast_request_enabled(False) + end_iast_context(req_span) + + if req_span.get_tag(ORIGIN_KEY) is None: + req_span.set_tag_str(ORIGIN_KEY, APPSEC.ORIGIN_VALUE) + + oce.release_request() + + def _iast_end_request(ctx=None, span=None, *args, **kwargs): try: - if span: - req_span = span + move_to_root = _move_iast_data_to_root_span() + if move_to_root: + req_span = core.get_root_span() else: - req_span = ctx.get_item("req_span") + if span: + req_span = span + else: + req_span = ctx.get_item("req_span") if _is_iast_enabled(): - exist_data = req_span.get_tag(IAST.JSON) - if exist_data is None and req_span.get_metric(IAST.ENABLED) is None: - if not is_iast_request_enabled(): - req_span.set_metric(IAST.ENABLED, 0.0) - end_iast_context(req_span) - oce.release_request() - return - - req_span.set_metric(IAST.ENABLED, 1.0) - report_data: Optional[IastSpanReporter] = get_iast_reporter() - - if report_data: - report_data.build_and_scrub_value_parts() - req_span.set_tag_str(IAST.JSON, report_data._to_str()) - _set_metric_iast_request_tainted() - _set_span_tag_iast_request_tainted(req_span) - _set_span_tag_iast_executed_sink(req_span) - - set_iast_request_enabled(False) - end_iast_context(req_span) - - if req_span.get_tag(ORIGIN_KEY) is None: - req_span.set_tag_str(ORIGIN_KEY, APPSEC.ORIGIN_VALUE) - - oce.release_request() + existing_data = req_span.get_tag(IAST.JSON) + if existing_data is None: + if req_span.get_metric(IAST.ENABLED) is None: + if not is_iast_request_enabled(): + req_span.set_metric(IAST.ENABLED, 0.0) + end_iast_context(req_span) + oce.release_request() + return + + req_span.set_metric(IAST.ENABLED, 1.0) + _create_and_attach_iast_report_to_span(req_span, existing_data, merge=False) + + elif move_to_root: + # Data exists from a previous request, we will merge both reports + _create_and_attach_iast_report_to_span(req_span, existing_data, merge=True) except Exception: log.debug("[IAST] Error finishing IAST context", exc_info=True) diff --git a/ddtrace/appsec/_iast/_pytest_plugin.py b/ddtrace/appsec/_iast/_pytest_plugin.py new file mode 100644 index 00000000000..672acc4a031 --- /dev/null +++ b/ddtrace/appsec/_iast/_pytest_plugin.py @@ -0,0 +1,110 @@ +#!/usr/bin/env python3 +import dataclasses +import json +from typing import List + +from ddtrace.appsec._constants import IAST +from ddtrace.appsec._iast._utils import _is_iast_enabled +from ddtrace.appsec._iast.reporter import Vulnerability +from ddtrace.internal.logger import get_logger + + +log = get_logger(__name__) + + +@dataclasses.dataclass(unsafe_hash=True) +class VulnerabilityFoundInTest(Vulnerability): + test: str + + +try: + import pytest + + @pytest.fixture(autouse=_is_iast_enabled()) + def ddtrace_iast(request, ddspan): + """ + Extract the vulnerabilities discovered in tests. + Optionally output the test as failed if vulnerabilities are found. + """ + yield + data = ddspan.get_tag(IAST.JSON) + if not data: + return + + json_data = json.loads(data) + + if json_data["vulnerabilities"]: + for vuln in json_data["vulnerabilities"]: + vuln_data.append( + VulnerabilityFoundInTest( + test=request.node.nodeid, + type=vuln["type"], + evidence=vuln["evidence"], + location=vuln["location"], + ) + ) + + if request.config.getoption("ddtrace-iast-fail-tests"): + vulns = ", ".join([vuln["type"] for vuln in json_data["vulnerabilities"]]) + pytest.fail(f"There are vulnerabilities in the code: {vulns}") + +except ImportError: + log.debug("pytest not imported") + + +vuln_data: List[VulnerabilityFoundInTest] = [] + + +def extract_code_snippet(filepath, line_number, context=3): + """Extracts code snippet around the given line number.""" + try: + with open(filepath, "r") as file: + lines = file.readlines() + start = max(0, line_number - context - 1) + end = min(len(lines), line_number + context) + code = lines[start:end] + return code, start # Return lines and starting line number + except Exception: + log.debug("Error reading file %s", filepath, exc_info=True) + return "", 0 + + +def print_iast_report(terminalreporter): + if not _is_iast_enabled(): + return + + if not vuln_data: + terminalreporter.write_sep("=", "Datadog Code Security Report", purple=True, bold=True) + terminalreporter.write_line("No vulnerabilities found.") + return + + terminalreporter.write_sep("=", "Datadog Code Security Report", purple=True, bold=True) + + for entry in vuln_data: + terminalreporter.write_line(f"Test: {entry.test}", bold=True) + high_severity = entry.type.endswith("INJECTION") + terminalreporter.write_line( + f"Vulnerability: {entry.type}", + # TODO(@gnufede): Add remediation links, where remediation is a dict with the vulnerability as key + # f" - \033]8;;{remediation[entry.type]}\033\\Remediation\033]8;;\033\\ \n", + bold=True, + red=high_severity, + yellow=not high_severity, + ) + terminalreporter.write_line(f"Location: {entry.location['path']}:{entry.location['line']}") + code_snippet, start_line = extract_code_snippet(entry.location["path"], entry.location["line"]) + + if code_snippet: + terminalreporter.write_line("Code:") + + if start_line is not None: + for i, line in enumerate(code_snippet, start=start_line + 1): + if i == entry.location["line"]: + terminalreporter.write(f"{i:4d}: {line}", bold=True, purple=True) + else: + terminalreporter.write(f"{i:4d}: {line}") + else: + # If there's an error extracting the code snippet + terminalreporter.write_line(code_snippet[0], bold=True) + + terminalreporter.write_sep("=") diff --git a/ddtrace/appsec/_iast/reporter.py b/ddtrace/appsec/_iast/reporter.py index 249d8e21278..c7004909cc9 100644 --- a/ddtrace/appsec/_iast/reporter.py +++ b/ddtrace/appsec/_iast/reporter.py @@ -121,6 +121,74 @@ def __hash__(self) -> int: """ return reduce(operator.xor, (hash(obj) for obj in set(self.sources) | self.vulnerabilities)) + def _merge(self, other: "IastSpanReporter") -> None: + """ + Merges the current IAST span reporter with another IAST span reporter. + + Args: + - other (IastSpanReporter): IAST span reporter to merge. + """ + len_previous_sources = len(self.sources) + self.sources = self.sources + other.sources + self._update_vulnerabilities(other, len_previous_sources) + + def _update_vulnerabilities(self, other: "IastSpanReporter", offset: int): + for vuln in other.vulnerabilities: + if ( + hasattr(vuln, "evidence") + and hasattr(vuln.evidence, "valueParts") + and vuln.evidence.valueParts is not None + ): + for part in vuln.evidence.valueParts: + if "source" in part: + part["source"] = part["source"] + offset + self.vulnerabilities.add(vuln) + + def _from_json(self, json_str: str): + """ + Initializes the IAST span reporter from a JSON string. + + Args: + - json_str (str): JSON string. + """ + from ._taint_tracking import str_to_origin + + data = json.loads(json_str) + self.sources = [] + for i in data["sources"]: + source = Source( + origin=str_to_origin(i["origin"]), + name=i["name"], + ) + if "value" in i: + source.value = i["value"] + if "redacted" in i: + source.redacted = i["redacted"] + if "pattern" in i: + source.pattern = i["pattern"] + self.sources.append(source) + + self.vulnerabilities = set() + for i in data["vulnerabilities"]: + evidence = Evidence() + if "ranges" in i["evidence"]: + evidence._ranges = i["evidence"]["ranges"] + if "value" in i["evidence"]: + evidence.value = i["evidence"]["value"] + if "valueParts" in i["evidence"]: + evidence.valueParts = i["evidence"]["valueParts"] + if "dialect" in i["evidence"]: + evidence.dialect = i["evidence"]["dialect"] + self.vulnerabilities.add( + Vulnerability( + type=i["type"], + evidence=evidence, + location=Location( + spanId=i["location"]["spanId"], path=i["location"]["path"], line=i["location"]["line"] + ), + ) + ) + def _to_dict(self): return { "sources": [i._to_dict() for i in self.sources], diff --git a/ddtrace/appsec/iast/__init__.py b/ddtrace/appsec/iast/__init__.py index c72c2be9167..ece53d092cb 100644 --- a/ddtrace/appsec/iast/__init__.py +++ b/ddtrace/appsec/iast/__init__.py @@ -1,2 +1,3 @@ +from ddtrace.appsec._iast import _iast_pytest_activation # noqa: F401 from ddtrace.appsec._iast import ddtrace_iast_flask_patch # noqa: F401 from ddtrace.appsec._iast import enable_iast_propagation # noqa: F401 diff --git a/ddtrace/contrib/pytest/_plugin_v2.py b/ddtrace/contrib/pytest/_plugin_v2.py index d3825578d7a..e9420f62527 100644 --- a/ddtrace/contrib/pytest/_plugin_v2.py +++ b/ddtrace/contrib/pytest/_plugin_v2.py @@ -533,6 +533,13 @@ def _pytest_terminal_summary_post_yield(terminalreporter, failed_reports_initial @pytest.hookimpl(hookwrapper=True, tryfirst=True) def pytest_terminal_summary(terminalreporter, exitstatus, config): """Report flaky or failed tests""" + try: + from ddtrace.appsec._iast._pytest_plugin import print_iast_report + + print_iast_report(terminalreporter) + except Exception: # noqa: E722 + log.debug("Encountered error during code security summary", exc_info=True) + if not is_test_visibility_enabled(): yield return diff --git a/ddtrace/contrib/pytest/plugin.py b/ddtrace/contrib/pytest/plugin.py index a8da8c3a5ca..a09a81be49a 100644 --- a/ddtrace/contrib/pytest/plugin.py +++ b/ddtrace/contrib/pytest/plugin.py @@ -15,6 +15,8 @@ import pytest +from ddtrace.appsec._iast._pytest_plugin import ddtrace_iast # noqa:F401 +from ddtrace.appsec._iast._utils import _is_iast_enabled from ddtrace.contrib.pytest._utils import _USE_PLUGIN_V2 from ddtrace.contrib.pytest._utils import _extract_span from ddtrace.contrib.pytest._utils import _pytest_version_supports_itr @@ -67,10 +69,22 @@ def pytest_addoption(parser): help=DDTRACE_INCLUDE_CLASS_HELP_MSG, ) + group._addoption( + "--ddtrace-iast-fail-tests", + action="store_true", + dest="ddtrace-iast-fail-tests", + default=False, + help=DDTRACE_INCLUDE_CLASS_HELP_MSG, + ) + parser.addini("ddtrace", DDTRACE_HELP_MSG, type="bool") parser.addini("no-ddtrace", DDTRACE_HELP_MSG, type="bool") parser.addini("ddtrace-patch-all", PATCH_ALL_HELP_MSG, type="bool") parser.addini("ddtrace-include-class-name", DDTRACE_INCLUDE_CLASS_HELP_MSG, type="bool") + if _is_iast_enabled(): + from ddtrace.appsec._iast import _iast_pytest_activation + + _iast_pytest_activation() # Version-specific pytest hooks diff --git a/tests/appsec/iast/aspects/test_add_aspect.py b/tests/appsec/iast/aspects/test_add_aspect.py index db8f9b212c8..f9f86a4413c 100644 --- a/tests/appsec/iast/aspects/test_add_aspect.py +++ b/tests/appsec/iast/aspects/test_add_aspect.py @@ -15,6 +15,7 @@ from ddtrace.appsec._iast._taint_tracking.aspects import add_aspect from tests.appsec.iast.conftest import _end_iast_context_and_oce from tests.appsec.iast.conftest import _start_iast_context_and_oce +from tests.utils import override_env from tests.utils import override_global_config @@ -319,7 +320,9 @@ def test_propagate_ranges_with_no_context(caplog): ) reset_context() - with override_global_config(dict(_iast_debug=True)), caplog.at_level(logging.DEBUG): + with override_env({"_DD_IAST_USE_ROOT_SPAN": "false"}), override_global_config( + dict(_iast_debug=True) + ), caplog.at_level(logging.DEBUG): result_2 = add_aspect(result, "another_string") create_context() diff --git a/tests/appsec/iast/conftest.py b/tests/appsec/iast/conftest.py index a277e912829..3daa3611f51 100644 --- a/tests/appsec/iast/conftest.py +++ b/tests/appsec/iast/conftest.py @@ -142,7 +142,9 @@ def check_native_code_exception_in_each_python_aspect_test(request, caplog): if "skip_iast_check_logs" in request.keywords: yield else: - with override_global_config(dict(_iast_debug=True)), caplog.at_level(logging.DEBUG): + with override_env({"_DD_IAST_USE_ROOT_SPAN": "false"}), override_global_config( + dict(_iast_debug=True) + ), caplog.at_level(logging.DEBUG): yield log_messages = [record.message for record in caplog.get_records("call")] diff --git a/tests/appsec/iast/taint_tracking/test_native_taint_range.py b/tests/appsec/iast/taint_tracking/test_native_taint_range.py index d1683b5ffb4..00079d7772b 100644 --- a/tests/appsec/iast/taint_tracking/test_native_taint_range.py +++ b/tests/appsec/iast/taint_tracking/test_native_taint_range.py @@ -32,6 +32,7 @@ from ddtrace.appsec._iast._taint_tracking.aspects import format_aspect from ddtrace.appsec._iast._taint_tracking.aspects import join_aspect from tests.appsec.iast.conftest import IAST_VALID_LOG +from tests.utils import override_env from tests.utils import override_global_config @@ -499,7 +500,9 @@ def test_race_conditions_reset_contexts_threads(caplog, telemetry_writer): """we want to validate context is working correctly among multiple request and no race condition creating and destroying contexts """ - with override_global_config(dict(_iast_debug=True)), caplog.at_level(logging.DEBUG): + with override_env({"_DD_IAST_USE_ROOT_SPAN": "false"}), override_global_config( + dict(_iast_debug=True) + ), caplog.at_level(logging.DEBUG): pool = ThreadPool(processes=3) results_async = [pool.apply_async(reset_contexts_loop) for _ in range(70)] _ = [res.get() for res in results_async] diff --git a/tests/appsec/iast/taint_tracking/test_taint_tracking.py b/tests/appsec/iast/taint_tracking/test_taint_tracking.py index 90d9b0c064a..ac3d009633f 100644 --- a/tests/appsec/iast/taint_tracking/test_taint_tracking.py +++ b/tests/appsec/iast/taint_tracking/test_taint_tracking.py @@ -47,7 +47,9 @@ def test_taint_object_with_no_context_should_be_noop(): @pytest.mark.skip_iast_check_logs def test_propagate_ranges_with_no_context(caplog): reset_context() - with override_global_config(dict(_iast_debug=True)), caplog.at_level(logging.DEBUG): + with override_env({"_DD_IAST_USE_ROOT_SPAN": "false"}), override_global_config( + dict(_iast_debug=True) + ), caplog.at_level(logging.DEBUG): string_input = taint_pyobject( pyobject="abcde", source_name="abcde", source_value="abcde", source_origin=OriginType.PARAMETER ) diff --git a/tests/contrib/django/test_django_appsec_iast.py b/tests/contrib/django/test_django_appsec_iast.py index 89495dcac80..efe0fa9acd0 100644 --- a/tests/contrib/django/test_django_appsec_iast.py +++ b/tests/contrib/django/test_django_appsec_iast.py @@ -41,7 +41,9 @@ def check_native_code_exception_in_each_django_test(request, caplog, telemetry_w yield else: caplog.set_level(logging.DEBUG) - with override_global_config(dict(_iast_debug=True)), caplog.at_level(logging.DEBUG): + with override_env({"_DD_IAST_USE_ROOT_SPAN": "false"}), override_global_config( + dict(_iast_debug=True) + ), caplog.at_level(logging.DEBUG): yield log_messages = [record.message for record in caplog.get_records("call")] diff --git a/tests/contrib/fastapi/test_fastapi_appsec_iast.py b/tests/contrib/fastapi/test_fastapi_appsec_iast.py index 9688c7d06b7..7f1a140ffc2 100644 --- a/tests/contrib/fastapi/test_fastapi_appsec_iast.py +++ b/tests/contrib/fastapi/test_fastapi_appsec_iast.py @@ -24,6 +24,7 @@ from ddtrace.contrib.internal.fastapi.patch import patch as patch_fastapi from ddtrace.contrib.sqlite3.patch import patch as patch_sqlite_sqli from tests.appsec.iast.iast_utils import get_line_and_hash +from tests.utils import override_env from tests.utils import override_global_config @@ -57,7 +58,9 @@ def check_native_code_exception_in_each_fastapi_test(request, caplog, telemetry_ yield else: caplog.set_level(logging.DEBUG) - with override_global_config(dict(_iast_debug=True)), caplog.at_level(logging.DEBUG): + with override_env({"_DD_IAST_USE_ROOT_SPAN": "false"}), override_global_config( + dict(_iast_debug=True) + ), caplog.at_level(logging.DEBUG): yield log_messages = [record.msg for record in caplog.get_records("call")] diff --git a/tests/contrib/flask/app.py b/tests/contrib/flask/app.py index fbd06dd6990..82059ce0eaa 100644 --- a/tests/contrib/flask/app.py +++ b/tests/contrib/flask/app.py @@ -1,3 +1,4 @@ +import hashlib import os import subprocess import sys @@ -100,3 +101,9 @@ def run_subcommunicatenoshell(): subp.wait() ret = subp.returncode return str(ret), 200 + + +@app.route("/md5sum") +def md5sum(): + data = request.args.get("q").encode() + return hashlib.md5(data).hexdigest() diff --git a/tests/contrib/flask/test_appsec_flask_pytest_iast_no_snapshot.py b/tests/contrib/flask/test_appsec_flask_pytest_iast_no_snapshot.py new file mode 100644 index 00000000000..801cffa4b8a --- /dev/null +++ b/tests/contrib/flask/test_appsec_flask_pytest_iast_no_snapshot.py @@ -0,0 +1,59 @@ +#!/usr/bin/env python3 + +import os +import subprocess +import time + +import pytest + + +@pytest.mark.parametrize("iast_enabled", ["true", "false"]) +@pytest.mark.parametrize("iast_request_sampling", ["100.0", "0.0"]) +@pytest.mark.parametrize("pytest_use_new_plugin", ["true", "false"]) +def test_flask_pytest_iast(iast_enabled, iast_request_sampling, pytest_use_new_plugin): + from tests.utils import _build_env + + env = _build_env() + env.update( + { + # Avoid noisy database spans being output on app startup/teardown. + "DD_TRACE_SQLITE3_ENABLED": "0", + "DD_TRACE_SQLITE_ENABLED": "0", + "DD_IAST_ENABLED": iast_enabled, + "DD_TRACE_DEBUG": "true", + "DD_PYTEST_USE_NEW_PLUGIN_BETA": pytest_use_new_plugin, + "DD_IAST_REQUEST_SAMPLING": iast_request_sampling, + # "DD_API_KEY": "invalidapikey", + # "DD_CIVISIBILITY_AGENTLESS_ENABLED": "1", + } + ) + proc = subprocess.Popen( + "pytest --ddtrace --ddtrace-patch-all --no-cov tests/contrib/flask/test_flask_pytest_iast.py".split(), + stdout=subprocess.PIPE, + stderr=subprocess.PIPE, + close_fds=True, + env=env, + preexec_fn=os.setsid, + cwd=str(os.path.dirname(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))), + ) + try: + time.sleep(0.2) + finally: + proc.wait() + # DEV uncomment this line if you need more info locally + # stdout = proc.stdout.read() + + stderr = proc.stderr.read() + split_stderr = stderr.decode("utf-8").split("\n") + + found = False + for line in split_stderr: + if "WEAK_HASH" in line: + assert line.startswith("finishing span name='pytest.test'") + found = True + break + + if iast_enabled == "true" and iast_request_sampling == "100": + assert found + else: + assert not found diff --git a/tests/contrib/flask/test_flask_appsec_iast.py b/tests/contrib/flask/test_flask_appsec_iast.py index 020cbe27a98..f1bed61cb9d 100644 --- a/tests/contrib/flask/test_flask_appsec_iast.py +++ b/tests/contrib/flask/test_flask_appsec_iast.py @@ -19,6 +19,7 @@ from ddtrace.contrib.sqlite3.patch import patch as patch_sqlite_sqli from tests.appsec.iast.iast_utils import get_line_and_hash from tests.contrib.flask import BaseFlaskTestCase +from tests.utils import override_env from tests.utils import override_global_config @@ -35,7 +36,7 @@ def inject_fixtures(self, caplog, telemetry_writer): # noqa: F811 self._caplog = caplog def setUp(self): - with override_global_config( + with override_env({"_DD_IAST_USE_ROOT_SPAN": "false"}), override_global_config( dict( _iast_enabled=True, _deduplication_enabled=False, diff --git a/tests/contrib/flask/test_flask_pytest_iast.py b/tests/contrib/flask/test_flask_pytest_iast.py new file mode 100644 index 00000000000..fcacf6b36c9 --- /dev/null +++ b/tests/contrib/flask/test_flask_pytest_iast.py @@ -0,0 +1,28 @@ +#!/usr/bin/env python3 +""" +This test suite is actually used as fixture in end-to-end test +for pytest IAST integration. +""" + +import urllib.parse + +import pytest + +from .app import app as real_app + + +@pytest.fixture() +def app(): + return real_app + + +@pytest.fixture() +def client(app): + return app.test_client() + + +def test_md5_request(client): + data = b"foobar" + urlencoded_data = urllib.parse.urlencode({"q": data}) + response = client.get("/md5sum?%s" % urlencoded_data) + assert response.status_code == 200 From fb82c64f516048e7146ce6a5bcd8961a6cded29a Mon Sep 17 00:00:00 2001 From: "Gabriele N. Tornetta" Date: Mon, 16 Dec 2024 11:10:18 +0000 Subject: [PATCH 36/78] ci: pass correct suite names in CircleCI gen script (#11668) We make sure to pass the correct suite names where required so that the right jobs can be included/excluded. The current behaviour failed to resolve paths for suite names, causing all the Circle CI jobs to run. ## Checklist - [x] PR author has checked that all the criteria below are met - The PR description includes an overview of the change - The PR description articulates the motivation for the change - The change includes tests OR the PR description describes a testing strategy - The PR description notes risks associated with the change, if any - Newly-added code is easy to change - The change follows the [library release note guidelines](https://ddtrace.readthedocs.io/en/stable/releasenotes.html) - The change includes or references documentation updates if necessary - Backport labels are set (if [applicable](https://ddtrace.readthedocs.io/en/latest/contributing.html#backporting)) ## Reviewer Checklist - [ ] Reviewer has checked that all the criteria below are met - Title is accurate - All changes are related to the pull request's stated goal - Avoids breaking [API](https://ddtrace.readthedocs.io/en/stable/versioning.html#interfaces) changes - Testing strategy adequately addresses listed risks - Newly-added code is easy to change - Release note makes sense to a user of the library - If necessary, author has acknowledged and discussed the performance implications of this PR as reported in the benchmarks PR comment - Backport labels are set in a manner that is consistent with the [release branch maintenance policy](https://ddtrace.readthedocs.io/en/latest/contributing.html#backporting) --- scripts/gen_circleci_config.py | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/scripts/gen_circleci_config.py b/scripts/gen_circleci_config.py index 0c7c8344e58..7225ea38d22 100644 --- a/scripts/gen_circleci_config.py +++ b/scripts/gen_circleci_config.py @@ -17,10 +17,9 @@ def gen_required_suites(template: dict) -> None: required_suites = template["requires_tests"]["requires"] = [] for_each_testrun_needed( suites=sorted( - set(n.rpartition("::")[-1] for n, s in get_suites().items() if not s.get("skip", False)) - & set(template["jobs"].keys()) + set(n for n, s in get_suites().items() if not s.get("skip", False)) & set(template["jobs"].keys()) ), - action=lambda suite: required_suites.append(suite), + action=lambda suite: required_suites.append(suite.rpartition("::")[-1]), git_selections=extract_git_commit_selections(os.getenv("GIT_COMMIT_DESC", "")), ) From cc03ddc643360c272cf1ca8fd541760af170fca0 Mon Sep 17 00:00:00 2001 From: "Gabriele N. Tornetta" Date: Mon, 16 Dec 2024 11:18:04 +0000 Subject: [PATCH 37/78] chore(di): accurate function line mapping (#11607) We make the line-to-function mapping more accurate by capturing the code objects exported by a module before the module is executed. This gives us the full code object content of the model, which matches the source more closely than the executed module. As a result, we are able to get hold of decorated functions before they "disappear" inside an arbitrary data structure of a custom decorator. ## Performance Considerations The extra accuracy comes with memory and computing costs. The captured code objects have to be stored on the module. We expect the memory cost to be < 1% the overall memory footprint of the target application. When a function location is looked up, the code object needs to be resolved to a function. We query the GC to find what we are looking for. This is generally an expensive operation, so we perform it on demand, when an instrumentation needs to be applied, instead of looking up every code object after a module import. ## Checklist - [x] PR author has checked that all the criteria below are met - The PR description includes an overview of the change - The PR description articulates the motivation for the change - The change includes tests OR the PR description describes a testing strategy - The PR description notes risks associated with the change, if any - Newly-added code is easy to change - The change follows the [library release note guidelines](https://ddtrace.readthedocs.io/en/stable/releasenotes.html) - The change includes or references documentation updates if necessary - Backport labels are set (if [applicable](https://ddtrace.readthedocs.io/en/latest/contributing.html#backporting)) ## Reviewer Checklist - [x] Reviewer has checked that all the criteria below are met - Title is accurate - All changes are related to the pull request's stated goal - Avoids breaking [API](https://ddtrace.readthedocs.io/en/stable/versioning.html#interfaces) changes - Testing strategy adequately addresses listed risks - Newly-added code is easy to change - Release note makes sense to a user of the library - If necessary, author has acknowledged and discussed the performance implications of this PR as reported in the benchmarks PR comment - Backport labels are set in a manner that is consistent with the [release branch maintenance policy](https://ddtrace.readthedocs.io/en/latest/contributing.html#backporting) --- ddtrace/debugging/_debugger.py | 4 + ddtrace/debugging/_function/discovery.py | 155 ++++++++++++++++++--- ddtrace/internal/utils/inspection.py | 10 ++ tests/debugging/function/test_discovery.py | 26 +++- tests/debugging/test_debugger.py | 104 ++++++-------- tests/submod/custom_decorated_stuff.py | 17 +++ 6 files changed, 235 insertions(+), 81 deletions(-) create mode 100644 tests/submod/custom_decorated_stuff.py diff --git a/ddtrace/debugging/_debugger.py b/ddtrace/debugging/_debugger.py index 65b9ecfec5e..7d4a283b26d 100644 --- a/ddtrace/debugging/_debugger.py +++ b/ddtrace/debugging/_debugger.py @@ -6,6 +6,7 @@ from pathlib import Path import sys import threading +from types import CodeType from types import FunctionType from types import ModuleType from types import TracebackType @@ -73,6 +74,9 @@ class DebuggerError(Exception): class DebuggerModuleWatchdog(ModuleWatchdog): _locations: Set[str] = set() + def transform(self, code: CodeType, module: ModuleType) -> CodeType: + return FunctionDiscovery.transformer(code, module) + @classmethod def register_origin_hook(cls, origin: Path, hook: ModuleHookType) -> None: if origin in cls._locations: diff --git a/ddtrace/debugging/_function/discovery.py b/ddtrace/debugging/_function/discovery.py index 9cabb4b3a04..e7d37246f5f 100644 --- a/ddtrace/debugging/_function/discovery.py +++ b/ddtrace/debugging/_function/discovery.py @@ -4,6 +4,7 @@ from wrapt import FunctionWrapper +from ddtrace.internal.compat import PYTHON_VERSION_INFO from ddtrace.internal.utils.inspection import undecorated @@ -12,6 +13,7 @@ except ImportError: from typing_extensions import Protocol # type: ignore[assignment] +from types import CodeType from types import FunctionType from types import ModuleType from typing import Any @@ -27,6 +29,7 @@ from ddtrace.internal.logger import get_logger from ddtrace.internal.module import origin from ddtrace.internal.safety import _isinstance +from ddtrace.internal.utils.inspection import collect_code_objects from ddtrace.internal.utils.inspection import linenos @@ -49,6 +52,8 @@ class FullyNamed(Protocol): class FullyNamedFunction(FullyNamed): """A fully named function object.""" + __qualname__: str + def __call__(self, *args, **kwargs): pass @@ -119,7 +124,48 @@ def _local_name(name: str, f: FunctionType) -> str: return func_name -def _collect_functions(module: ModuleType) -> Dict[str, FullyNamedFunction]: +class _FunctionCodePair: + """Function-Code Pair + + This class allows us to resolve a code object to a function object by + querying the GC on-demand. + """ + + __slots__ = ("function", "code") + + def __init__(self, code: Optional[CodeType] = None, function: Optional[FunctionType] = None) -> None: + if code is not None and function is not None and function.__code__ is not code: + raise ValueError("Function and code objects do not match") + + self.function = function + self.code = function.__code__ if function is not None else code + + def resolve(self) -> FullyNamedFunction: + import gc + + if self.function is not None: + return cast(FullyNamedFunction, self.function) + + code = self.code + functions = [_ for _ in gc.get_referrers(code) if isinstance(_, FunctionType) and _.__code__ is code] + n = len(functions) + if n == 0: + msg = f"Cannot resolve code object to function: {code}" + raise ValueError(msg) + if n > 1: + # This can happen for functions that are created at runtime rather + # than compile time. We do not support this case deliberately for + # now. + msg = f"Multiple functions found for code object {code}" + raise ValueError(msg) + + f = cast(FullyNamedFunction, functions[0]) + f.__fullname__ = f"{f.__module__}.{f.__qualname__}" + + return f + + +def _collect_functions(module: ModuleType) -> Dict[str, _FunctionCodePair]: """Collect functions from a given module. All the collected functions are augmented with a ``__fullname__`` attribute @@ -160,7 +206,9 @@ def _collect_functions(module: ModuleType) -> Dict[str, FullyNamedFunction]: # try to retrieve any potentially decorated function so # that we don't end up returning the decorator function # instead of the original function. - functions[fullname] = undecorated(f, name, path) if name == k else o + functions[fullname] = _FunctionCodePair( + function=cast(FunctionType, undecorated(f, name, path) if name == k else o) + ) try: if f.__closure__: @@ -189,28 +237,46 @@ class FunctionDiscovery(defaultdict): def __init__(self, module: ModuleType) -> None: super().__init__(list) - self._module = module - self._fullname_index = {} - functions = _collect_functions(module) - seen_functions = set() module_path = origin(module) if module_path is None: # We are not going to collect anything because no code objects will # match the origin. return - for fname, function in functions.items(): - if ( - function not in seen_functions - and Path(cast(FunctionType, function).__code__.co_filename).resolve() == module_path - ): - # We only map line numbers for functions that actually belong to - # the module. - for lineno in linenos(cast(FunctionType, function)): - self[lineno].append(function) - self._fullname_index[fname] = function - seen_functions.add(function) + self._module = module + self._fullname_index = _collect_functions(module) + if PYTHON_VERSION_INFO < (3, 11): + self._name_index: Dict[str, List[_FunctionCodePair]] = defaultdict(list) + self._cached: Dict[int, List[FullyNamedFunction]] = {} + + # Create the line to function mapping + if hasattr(module, "__dd_code__"): + for code in module.__dd_code__: + fcp = _FunctionCodePair(code=code) + if PYTHON_VERSION_INFO >= (3, 11): + # From this version of Python we can derive the qualified + # name of the function directly from the code object. + fullname = f"{module.__name__}.{code.co_qualname}" + self._fullname_index[fullname] = fcp + else: + self._name_index[code.co_name].append(fcp) + for lineno in linenos(code): + self[lineno].append(_FunctionCodePair(code=code)) + else: + # If the module was already loaded we don't have its code object + seen_functions = set() + for _, fcp in self._fullname_index.items(): + function = fcp.resolve() + if ( + function not in seen_functions + and Path(cast(FunctionType, function).__code__.co_filename).resolve() == module_path + ): + # We only map line numbers for functions that actually belong to + # the module. + for lineno in linenos(cast(FunctionType, function)): + self[lineno].append(_FunctionCodePair(function=cast(FunctionType, function))) + seen_functions.add(function) def at_line(self, line: int) -> List[FullyNamedFunction]: """Get the functions at the given line. @@ -218,14 +284,55 @@ def at_line(self, line: int) -> List[FullyNamedFunction]: Note that, in general, there can be multiple copies of the same functions. This can happen as a result, e.g., of using decorators. """ - return self[line] + if line in self._cached: + return self._cached[line] + + if line in self: + functions = [] + for fcp in self[line]: + try: + functions.append(fcp.resolve()) + except ValueError: + pass + + if not functions: + del self[line] + else: + self._cached[line] = functions + + return functions + + return [] def by_name(self, qualname: str) -> FullyNamedFunction: """Get the function by its qualified name.""" - fullname = ".".join((self._module.__name__, qualname)) + fullname = f"{self._module.__name__}.{qualname}" try: - return self._fullname_index[fullname] + return self._fullname_index[fullname].resolve() except KeyError: + if PYTHON_VERSION_INFO < (3, 11): + # Check if any code objects whose names match the last part of + # the qualified name have a function with the same qualified + # name. + for name, fcps in self._name_index.items(): + if qualname == name or qualname.endswith(f".{name}"): + for fcp in list(fcps): + try: + f = fcp.resolve() + + # We have resolved the function so we can now + # get its full name + self._fullname_index[f"{self._module.__name__}.{f.__qualname__}"] = fcp + + # We can remove the entry from the name index + fcps.pop(0) + + # If this is the function we are looking for, + # return it + if f.__qualname__ == qualname: + return f + except ValueError: + pass raise ValueError("Function '%s' not found" % fullname) @classmethod @@ -241,4 +348,12 @@ def from_module(cls, module: ModuleType) -> "FunctionDiscovery": return module.__function_discovery__ except AttributeError: fd = module.__function_discovery__ = cls(module) # type: ignore[attr-defined] + if hasattr(module, "__dd_code__"): + # We no longer need to keep this collection around + del module.__dd_code__ return fd + + @classmethod + def transformer(cls, code: CodeType, module: ModuleType) -> CodeType: + module.__dd_code__ = collect_code_objects(code) # type: ignore[attr-defined] # type: ignore[attr-defined] + return code diff --git a/ddtrace/internal/utils/inspection.py b/ddtrace/internal/utils/inspection.py index 7f739d4bf65..bb24a3ae80d 100644 --- a/ddtrace/internal/utils/inspection.py +++ b/ddtrace/internal/utils/inspection.py @@ -5,6 +5,7 @@ from pathlib import Path from types import CodeType from types import FunctionType +from typing import Iterator from typing import Set from typing import cast @@ -112,3 +113,12 @@ def match(g): pass return f + + +def collect_code_objects(code: CodeType) -> Iterator[CodeType]: + q = deque([code]) + while q: + c = q.popleft() + for new_code in (_ for _ in c.co_consts if isinstance(_, CodeType)): + yield new_code + q.append(new_code) diff --git a/tests/debugging/function/test_discovery.py b/tests/debugging/function/test_discovery.py index 1cd977d06af..6f247bc7fd0 100644 --- a/tests/debugging/function/test_discovery.py +++ b/tests/debugging/function/test_discovery.py @@ -1,6 +1,7 @@ import pytest from ddtrace.debugging._function.discovery import FunctionDiscovery +from ddtrace.internal.module import ModuleWatchdog import tests.submod.stuff as stuff @@ -12,7 +13,7 @@ def stuff_discovery(): def test_abs_stuff(): import tests.submod.absstuff as absstuff - assert sorted(FunctionDiscovery.from_module(absstuff).keys()) == [7, 11, 16, 19] + assert set(FunctionDiscovery.from_module(absstuff).keys()) >= {7, 11, 16, 19} def test_function_discovery(stuff_discovery): @@ -106,16 +107,35 @@ def test_discovery_after_external_wrapping(stuff): def wrapper(wrapped, inst, args, kwargs): pass + original_function = stuff.Stuff.instancestuff + wrapt.wrap_function_wrapper(stuff, "Stuff.instancestuff", wrapper) assert isinstance(stuff.Stuff.instancestuff, (wrapt.BoundFunctionWrapper, wrapt.FunctionWrapper)) code = stuff.Stuff.instancestuff.__code__ - f = FunctionDiscovery(stuff)[36][0] + f, *_ = FunctionDiscovery(stuff).at_line(36) - assert isinstance(f, (wrapt.BoundFunctionWrapper, wrapt.FunctionWrapper)) + assert f is original_function or isinstance(f, (wrapt.BoundFunctionWrapper, wrapt.FunctionWrapper)), f assert f.__code__ is code def test_property_non_function_getter(stuff_discovery): with pytest.raises(ValueError): stuff_discovery.by_name("PropertyStuff.foo") + + +def test_custom_decorated_stuff(): + class DiscoveryModuleWatchdog(ModuleWatchdog): + def transform(self, code, module): + return FunctionDiscovery.transformer(code, module) + + DiscoveryModuleWatchdog.install() + + import tests.submod.custom_decorated_stuff as custom_decorated_stuff + + fd = FunctionDiscovery.from_module(custom_decorated_stuff) + + (home,) = fd.at_line(17) + assert home.__qualname__ == "home" + + DiscoveryModuleWatchdog.uninstall() diff --git a/tests/debugging/test_debugger.py b/tests/debugging/test_debugger.py index 2a0bdaf13d8..ed337c27f1e 100644 --- a/tests/debugging/test_debugger.py +++ b/tests/debugging/test_debugger.py @@ -39,8 +39,6 @@ from tests.debugging.utils import ddexpr from tests.debugging.utils import ddstrtempl from tests.internal.remoteconfig import rcm_endpoint -from tests.submod.stuff import Stuff -from tests.submod.stuff import modulestuff as imported_modulestuff from tests.utils import TracerTestCase from tests.utils import call_program @@ -71,7 +69,7 @@ def simple_debugger_test(probe, func): return snapshots -def test_debugger_line_probe_on_instance_method(): +def test_debugger_line_probe_on_instance_method(stuff): snapshots = simple_debugger_test( create_snapshot_line_probe( probe_id="probe-instance-method", @@ -79,7 +77,7 @@ def test_debugger_line_probe_on_instance_method(): line=36, condition=None, ), - lambda: Stuff().instancestuff(), + stuff.Stuff().instancestuff, ) (snapshot,) = snapshots @@ -89,15 +87,15 @@ def test_debugger_line_probe_on_instance_method(): assert snapshot["debugger"]["snapshot"]["duration"] is None -def test_debugger_line_probe_on_imported_module_function(): - lineno = min(linenos(imported_modulestuff)) +def test_debugger_line_probe_on_imported_module_function(stuff): + lineno = min(linenos(stuff.modulestuff)) snapshots = simple_debugger_test( create_snapshot_line_probe( probe_id="probe-instance-method", source_file="tests/submod/stuff.py", line=lineno, ), - lambda: imported_modulestuff(42), + lambda: stuff.modulestuff(42), ) (snapshot,) = snapshots @@ -107,7 +105,7 @@ def test_debugger_line_probe_on_imported_module_function(): @pytest.mark.parametrize( - "probe, trigger", + "probe", [ ( create_snapshot_function_probe( @@ -115,8 +113,7 @@ def test_debugger_line_probe_on_imported_module_function(): module="tests.submod.stuff", func_qname="Stuff.instancestuff", rate=1000, - ), - lambda: Stuff().instancestuff(42), + ) ), ( create_snapshot_line_probe( @@ -124,14 +121,11 @@ def test_debugger_line_probe_on_imported_module_function(): source_file="tests/submod/stuff.py", line=36, rate=1000, - ), - lambda: Stuff().instancestuff(42), + ) ), ], ) -def test_debugger_probe_new_delete(probe, trigger): - global Stuff - +def test_debugger_probe_new_delete(probe, stuff): with debugger() as d: probe_id = probe.probe_id d.add_probes(probe) @@ -139,7 +133,7 @@ def test_debugger_probe_new_delete(probe, trigger): assert probe in d._probe_registry assert _get_probe_location(probe) in d.__watchdog__._instance._locations - trigger() + stuff.Stuff().instancestuff(42) d.remove_probes(probe) @@ -148,7 +142,7 @@ def test_debugger_probe_new_delete(probe, trigger): assert _get_probe_location(probe) not in d.__watchdog__._instance._locations - trigger() + stuff.Stuff().instancestuff(42) # Unload and reload the module to ensure that the injection hook # has actually been removed. @@ -158,15 +152,15 @@ def test_debugger_probe_new_delete(probe, trigger): __import__("tests.submod.stuff") # Make Stuff refer to the reloaded class - Stuff = sys.modules["tests.submod.stuff"].Stuff + stuff.Stuff = sys.modules["tests.submod.stuff"].Stuff - trigger() + stuff.Stuff().instancestuff(42) (snapshot,) = d.uploader.wait_for_payloads() assert snapshot["debugger"]["snapshot"]["probe"]["id"] == probe_id -def test_debugger_function_probe_on_instance_method(): +def test_debugger_function_probe_on_instance_method(stuff): snapshots = simple_debugger_test( create_snapshot_function_probe( probe_id="probe-instance-method", @@ -174,7 +168,7 @@ def test_debugger_function_probe_on_instance_method(): func_qname="Stuff.instancestuff", condition=None, ), - lambda: Stuff().instancestuff(42), + lambda: stuff.Stuff().instancestuff(42), ) (snapshot,) = snapshots @@ -221,7 +215,7 @@ def test_debugger_function_probe_on_function_with_exception(): assert return_capture["throwable"]["type"] == "Exception" -def test_debugger_invalid_condition(): +def test_debugger_invalid_condition(stuff): with debugger() as d: d.add_probes( create_snapshot_line_probe( @@ -232,12 +226,12 @@ def test_debugger_invalid_condition(): ), good_probe(), ) - Stuff().instancestuff() + stuff.Stuff().instancestuff() assert all(s["debugger"]["snapshot"]["probe"]["id"] != "foo" for s in d.uploader.wait_for_payloads()) -def test_debugger_conditional_line_probe_on_instance_method(): +def test_debugger_conditional_line_probe_on_instance_method(stuff): snapshots = simple_debugger_test( create_snapshot_line_probe( probe_id="probe-instance-method", @@ -245,7 +239,7 @@ def test_debugger_conditional_line_probe_on_instance_method(): line=36, condition=DDExpression(dsl="True", callable=dd_compile(True)), ), - lambda: Stuff().instancestuff(), + lambda: stuff.Stuff().instancestuff(), ) (snapshot,) = snapshots @@ -258,7 +252,7 @@ def test_debugger_conditional_line_probe_on_instance_method(): assert captures["locals"] == {} -def test_debugger_invalid_line(): +def test_debugger_invalid_line(stuff): with debugger() as d: d.add_probes( create_snapshot_line_probe( @@ -268,13 +262,13 @@ def test_debugger_invalid_line(): ), good_probe(), ) - Stuff().instancestuff() + stuff.Stuff().instancestuff() assert all(s["debugger"]["snapshot"]["probe"]["id"] != "invalidline" for s in d.uploader.wait_for_payloads()) @mock.patch("ddtrace.debugging._debugger.log") -def test_debugger_invalid_source_file(log): +def test_debugger_invalid_source_file(log, stuff): with debugger() as d: d.add_probes( create_snapshot_line_probe( @@ -284,7 +278,7 @@ def test_debugger_invalid_source_file(log): ), good_probe(), ) - Stuff().instancestuff() + stuff.Stuff().instancestuff() log.error.assert_called_once_with( "Cannot inject probe %s: source file %s cannot be resolved", "invalidsource", "tests/submod/bonkers.py" @@ -293,7 +287,7 @@ def test_debugger_invalid_source_file(log): assert all(s["debugger"]["snapshot"]["probe"]["id"] != "invalidsource" for s in d.uploader.wait_for_payloads()) -def test_debugger_decorated_method(): +def test_debugger_decorated_method(stuff): simple_debugger_test( create_snapshot_line_probe( probe_id="probe-decorated-method", @@ -301,7 +295,7 @@ def test_debugger_decorated_method(): line=48, condition=None, ), - Stuff().decoratedstuff, + stuff.Stuff().decoratedstuff, ) @@ -324,7 +318,7 @@ def test_debugger_max_probes(mock_log): mock_log.warning.assert_called_once_with("Too many active probes. Ignoring new ones.") -def test_debugger_tracer_correlation(): +def test_debugger_tracer_correlation(stuff): with debugger() as d: d.add_probes( create_snapshot_line_probe( @@ -338,16 +332,14 @@ def test_debugger_tracer_correlation(): with d._tracer.trace("test-span") as span: trace_id = format_trace_id(span.trace_id) span_id = str(span.span_id) - Stuff().instancestuff() + stuff.Stuff().instancestuff() snapshots = d.uploader.wait_for_payloads() assert all(snapshot["dd"]["trace_id"] == trace_id for snapshot in snapshots) assert all(snapshot["dd"]["span_id"] == span_id for snapshot in snapshots) -def test_debugger_captured_exception(): - from tests.submod import stuff - +def test_debugger_captured_exception(stuff): snapshots = simple_debugger_test( create_snapshot_line_probe( probe_id="captured-exception-test", @@ -364,7 +356,7 @@ def test_debugger_captured_exception(): assert captures["throwable"]["type"] == "Exception" -def test_debugger_multiple_threads(): +def test_debugger_multiple_threads(stuff): with debugger() as d: probes = [ good_probe(), @@ -372,7 +364,7 @@ def test_debugger_multiple_threads(): ] d.add_probes(*probes) - callables = [Stuff().instancestuff, lambda: Stuff().propertystuff] + callables = [stuff.Stuff().instancestuff, lambda: stuff.Stuff().propertystuff] threads = [Thread(target=callables[_ % len(callables)]) for _ in range(10)] for t in threads: @@ -409,59 +401,57 @@ def create_stuff_line_metric_probe(kind, value=None): ) -def test_debugger_metric_probe_simple_count(mock_metrics): +def test_debugger_metric_probe_simple_count(mock_metrics, stuff): with debugger() as d: d.add_probes(create_stuff_line_metric_probe(MetricProbeKind.COUNTER)) - Stuff().instancestuff() + stuff.Stuff().instancestuff() assert ( call("probe.test.counter", 1.0, ["foo:bar", "debugger.probeid:metric-probe-test"]) in mock_metrics.increment.mock_calls ) -def test_debugger_metric_probe_count_value(mock_metrics): +def test_debugger_metric_probe_count_value(mock_metrics, stuff): with debugger() as d: d.add_probes(create_stuff_line_metric_probe(MetricProbeKind.COUNTER, {"ref": "bar"})) - Stuff().instancestuff(40) + stuff.Stuff().instancestuff(40) assert ( call("probe.test.counter", 40.0, ["foo:bar", "debugger.probeid:metric-probe-test"]) in mock_metrics.increment.mock_calls ) -def test_debugger_metric_probe_guage_value(mock_metrics): +def test_debugger_metric_probe_guage_value(mock_metrics, stuff): with debugger() as d: d.add_probes(create_stuff_line_metric_probe(MetricProbeKind.GAUGE, {"ref": "bar"})) - Stuff().instancestuff(41) + stuff.Stuff().instancestuff(41) assert ( call("probe.test.counter", 41.0, ["foo:bar", "debugger.probeid:metric-probe-test"]) in mock_metrics.gauge.mock_calls ) -def test_debugger_metric_probe_histogram_value(mock_metrics): +def test_debugger_metric_probe_histogram_value(mock_metrics, stuff): with debugger() as d: d.add_probes(create_stuff_line_metric_probe(MetricProbeKind.HISTOGRAM, {"ref": "bar"})) - Stuff().instancestuff(42) + stuff.Stuff().instancestuff(42) assert ( call("probe.test.counter", 42.0, ["foo:bar", "debugger.probeid:metric-probe-test"]) in mock_metrics.histogram.mock_calls ) -def test_debugger_metric_probe_distribution_value(mock_metrics): +def test_debugger_metric_probe_distribution_value(mock_metrics, stuff): with debugger() as d: d.add_probes(create_stuff_line_metric_probe(MetricProbeKind.DISTRIBUTION, {"ref": "bar"})) - Stuff().instancestuff(43) + stuff.Stuff().instancestuff(43) assert ( call("probe.test.counter", 43.0, ["foo:bar", "debugger.probeid:metric-probe-test"]) in mock_metrics.distribution.mock_calls ) -def test_debugger_multiple_function_probes_on_same_function(): - global Stuff - +def test_debugger_multiple_function_probes_on_same_function(stuff): probes = [ create_snapshot_function_probe( probe_id="probe-instance-method-%d" % i, @@ -475,9 +465,9 @@ def test_debugger_multiple_function_probes_on_same_function(): with debugger() as d: d.add_probes(*probes) - wrapping_context = DebuggerWrappingContext.extract(Stuff.instancestuff) + wrapping_context = DebuggerWrappingContext.extract(stuff.Stuff.instancestuff) assert wrapping_context.probes == {probe.probe_id: probe for probe in probes} - Stuff().instancestuff(42) + stuff.Stuff().instancestuff(42) d.collector.wait( lambda q: Counter(s.probe.probe_id for s in q) @@ -492,7 +482,7 @@ def test_debugger_multiple_function_probes_on_same_function(): assert "probe-instance-method-1" not in wrapping_context.probes - Stuff().instancestuff(42) + stuff.Stuff().instancestuff(42) d.collector.wait( lambda q: Counter(s.probe.probe_id for s in q) @@ -505,7 +495,7 @@ def test_debugger_multiple_function_probes_on_same_function(): d.remove_probes(probes[0], probes[2]) - Stuff().instancestuff(42) + stuff.Stuff().instancestuff(42) assert Counter(s.probe.probe_id for s in d.test_queue) == { "probe-instance-method-0": 2, @@ -514,12 +504,10 @@ def test_debugger_multiple_function_probes_on_same_function(): } with pytest.raises(AttributeError): - Stuff.instancestuff.__dd_wrappers__ + stuff.Stuff.instancestuff.__dd_wrappers__ def test_debugger_multiple_function_probes_on_same_lazy_module(): - sys.modules.pop("tests.submod.stuff", None) - probes = [ create_snapshot_function_probe( probe_id="probe-instance-method-%d" % i, diff --git a/tests/submod/custom_decorated_stuff.py b/tests/submod/custom_decorated_stuff.py new file mode 100644 index 00000000000..1150859b6dd --- /dev/null +++ b/tests/submod/custom_decorated_stuff.py @@ -0,0 +1,17 @@ +class App: + def __init__(self): + self.views = {} + + def route(self, path): + def wrapper(view): + self.views[path] = view + + return wrapper + + +app = App() + + +@app.route("/home") +def home(): + pass From 073f7f5e0dcb620d10e85e63c2dbc0974a697280 Mon Sep 17 00:00:00 2001 From: Christophe Papazian <114495376+christophe-papazian@users.noreply.github.com> Date: Mon, 16 Dec 2024 14:42:57 +0100 Subject: [PATCH 38/78] chore(ci): upgrade python for build action (#11735) python 3.7 is no longer supported on ubuntu. This PR update the python version for the build action to 3.12 ## Checklist - [x] PR author has checked that all the criteria below are met - The PR description includes an overview of the change - The PR description articulates the motivation for the change - The change includes tests OR the PR description describes a testing strategy - The PR description notes risks associated with the change, if any - Newly-added code is easy to change - The change follows the [library release note guidelines](https://ddtrace.readthedocs.io/en/stable/releasenotes.html) - The change includes or references documentation updates if necessary - Backport labels are set (if [applicable](https://ddtrace.readthedocs.io/en/latest/contributing.html#backporting)) ## Reviewer Checklist - [x] Reviewer has checked that all the criteria below are met - Title is accurate - All changes are related to the pull request's stated goal - Avoids breaking [API](https://ddtrace.readthedocs.io/en/stable/versioning.html#interfaces) changes - Testing strategy adequately addresses listed risks - Newly-added code is easy to change - Release note makes sense to a user of the library - If necessary, author has acknowledged and discussed the performance implications of this PR as reported in the benchmarks PR comment - Backport labels are set in a manner that is consistent with the [release branch maintenance policy](https://ddtrace.readthedocs.io/en/latest/contributing.html#backporting) --- .github/workflows/build_deploy.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/build_deploy.yml b/.github/workflows/build_deploy.yml index 77d52c757f5..df5184f83e5 100644 --- a/.github/workflows/build_deploy.yml +++ b/.github/workflows/build_deploy.yml @@ -40,7 +40,7 @@ jobs: - uses: actions/setup-python@v5 name: Install Python with: - python-version: '3.7' + python-version: '3.12' - name: Build sdist run: | pip install "setuptools_scm[toml]>=4" "cython" "cmake>=3.24.2,<3.28" "setuptools-rust" From 1e3bcc778f54631f3dbc9e1010262a2e1ba878e9 Mon Sep 17 00:00:00 2001 From: Romain Komorn <136473744+romainkomorndatadog@users.noreply.github.com> Date: Mon, 16 Dec 2024 15:24:38 +0000 Subject: [PATCH 39/78] chore(ci_visibility): support pytest-benchmark plugin in v2 of pytest plugin (#11736) This updates the v2 pytest plugin to support adding benchmark data from the `pytest-benchmark` plugin. In the past, this was provided through the `ddtrace.pytest_benchmark` plugin, but this moves the logic into the main `ddtrace` plugin in order to be able to deprecate the smaller plugin. A new `_set_item_tags()` method is added to the TestVisibility item classes to be able to modify/add tags that would otherwise be set by the parent class. This is slightly different from the existing `_set_span_tags()` that is mostly intended to manipulate tags on the span directly. The constants in the `pytest_benchmark` plugin directory are somewhat clumsy and should be cleaned up in `ddtrace==3.0.0`. ## Checklist - [x] PR author has checked that all the criteria below are met - The PR description includes an overview of the change - The PR description articulates the motivation for the change - The change includes tests OR the PR description describes a testing strategy - The PR description notes risks associated with the change, if any - Newly-added code is easy to change - The change follows the [library release note guidelines](https://ddtrace.readthedocs.io/en/stable/releasenotes.html) - The change includes or references documentation updates if necessary - Backport labels are set (if [applicable](https://ddtrace.readthedocs.io/en/latest/contributing.html#backporting)) ## Reviewer Checklist - [x] Reviewer has checked that all the criteria below are met - Title is accurate - All changes are related to the pull request's stated goal - Avoids breaking [API](https://ddtrace.readthedocs.io/en/stable/versioning.html#interfaces) changes - Testing strategy adequately addresses listed risks - Newly-added code is easy to change - Release note makes sense to a user of the library - If necessary, author has acknowledged and discussed the performance implications of this PR as reported in the benchmarks PR comment - Backport labels are set in a manner that is consistent with the [release branch maintenance policy](https://ddtrace.readthedocs.io/en/latest/contributing.html#backporting) --- .riot/requirements/498209d.txt | 22 ++++ .riot/requirements/e5cd460.txt | 22 ++++ ddtrace/contrib/pytest/_benchmark_utils.py | 35 ++++++ ddtrace/contrib/pytest/_plugin_v2.py | 5 + ddtrace/contrib/pytest_benchmark/constants.py | 21 ++++ ddtrace/contrib/pytest_benchmark/plugin.py | 14 ++- ddtrace/internal/ci_visibility/api/_base.py | 7 +- ddtrace/internal/ci_visibility/api/_test.py | 28 ++++- ddtrace/internal/ci_visibility/constants.py | 1 + ddtrace/internal/ci_visibility/recorder.py | 11 ++ .../test_visibility/_benchmark_mixin.py | 75 ++++++++++++ ddtrace/internal/test_visibility/api.py | 3 +- riotfile.py | 32 ++--- .../pytest_benchmark/test_pytest_benchmark.py | 110 +++--------------- 14 files changed, 274 insertions(+), 112 deletions(-) create mode 100644 .riot/requirements/498209d.txt create mode 100644 .riot/requirements/e5cd460.txt create mode 100644 ddtrace/contrib/pytest/_benchmark_utils.py create mode 100644 ddtrace/internal/test_visibility/_benchmark_mixin.py diff --git a/.riot/requirements/498209d.txt b/.riot/requirements/498209d.txt new file mode 100644 index 00000000000..b975548628c --- /dev/null +++ b/.riot/requirements/498209d.txt @@ -0,0 +1,22 @@ +# +# This file is autogenerated by pip-compile with Python 3.11 +# by the following command: +# +# pip-compile --allow-unsafe --no-annotate .riot/requirements/498209d.in +# +attrs==24.3.0 +coverage[toml]==7.6.9 +hypothesis==6.45.0 +iniconfig==2.0.0 +mock==5.1.0 +msgpack==1.1.0 +opentracing==2.4.0 +packaging==24.2 +pluggy==1.5.0 +py-cpuinfo==9.0.0 +pytest==8.3.4 +pytest-benchmark==4.0.0 +pytest-cov==6.0.0 +pytest-mock==3.14.0 +pytest-randomly==3.16.0 +sortedcontainers==2.4.0 diff --git a/.riot/requirements/e5cd460.txt b/.riot/requirements/e5cd460.txt new file mode 100644 index 00000000000..d14867cc689 --- /dev/null +++ b/.riot/requirements/e5cd460.txt @@ -0,0 +1,22 @@ +# +# This file is autogenerated by pip-compile with Python 3.12 +# by the following command: +# +# pip-compile --allow-unsafe --no-annotate .riot/requirements/e5cd460.in +# +attrs==24.3.0 +coverage[toml]==7.6.9 +hypothesis==6.45.0 +iniconfig==2.0.0 +mock==5.1.0 +msgpack==1.1.0 +opentracing==2.4.0 +packaging==24.2 +pluggy==1.5.0 +py-cpuinfo==9.0.0 +pytest==8.3.4 +pytest-benchmark==4.0.0 +pytest-cov==6.0.0 +pytest-mock==3.14.0 +pytest-randomly==3.16.0 +sortedcontainers==2.4.0 diff --git a/ddtrace/contrib/pytest/_benchmark_utils.py b/ddtrace/contrib/pytest/_benchmark_utils.py new file mode 100644 index 00000000000..77dd6061b13 --- /dev/null +++ b/ddtrace/contrib/pytest/_benchmark_utils.py @@ -0,0 +1,35 @@ +import pytest + +from ddtrace.contrib.pytest._utils import _get_test_id_from_item +from ddtrace.contrib.pytest_benchmark.constants import PLUGIN_METRICS_V2 +from ddtrace.internal.logger import get_logger +from ddtrace.internal.test_visibility._benchmark_mixin import BenchmarkDurationData +from ddtrace.internal.test_visibility.api import InternalTest + + +log = get_logger(__name__) + + +def _set_benchmark_data_from_item(item: pytest.Item) -> None: + try: + fixture = hasattr(item, "funcargs") and item.funcargs.get("benchmark") + + if not fixture or not fixture.stats: + return + + stat_object = item.funcargs.get("benchmark").stats.stats + + data_kwargs = {} + + for data_attr, stats_attr in PLUGIN_METRICS_V2.items(): + if hasattr(stat_object, stats_attr): + data_kwargs[data_attr] = getattr(stat_object, stats_attr) + + test_id = _get_test_id_from_item(item) + benchmark_data = BenchmarkDurationData(**data_kwargs) + + InternalTest.set_benchmark_data(test_id, benchmark_data, is_benchmark=True) + + except Exception: # noqa: E722 + log.debug("Unable to set benchmark data for item %s", item, exc_info=True) + return None diff --git a/ddtrace/contrib/pytest/_plugin_v2.py b/ddtrace/contrib/pytest/_plugin_v2.py index e9420f62527..e51739ccee9 100644 --- a/ddtrace/contrib/pytest/_plugin_v2.py +++ b/ddtrace/contrib/pytest/_plugin_v2.py @@ -13,6 +13,7 @@ from ddtrace.contrib.internal.coverage.patch import run_coverage_report from ddtrace.contrib.internal.coverage.utils import _is_coverage_invoked_by_coverage_run from ddtrace.contrib.internal.coverage.utils import _is_coverage_patched +from ddtrace.contrib.pytest._benchmark_utils import _set_benchmark_data_from_item from ddtrace.contrib.pytest._plugin_v1 import _extract_reason from ddtrace.contrib.pytest._plugin_v1 import _is_pytest_cov_enabled from ddtrace.contrib.pytest._types import _pytest_report_teststatus_return_type @@ -457,6 +458,10 @@ def _pytest_runtest_makereport(item: pytest.Item, call: pytest_CallInfo, outcome if test_outcome.status is None and call.when != "teardown": return + # Support for pytest-benchmark plugin + if item.config.pluginmanager.hasplugin("benchmark"): + _set_benchmark_data_from_item(item) + # Record a result if we haven't already recorded it: if not InternalTest.is_finished(test_id): InternalTest.finish(test_id, test_outcome.status, test_outcome.skip_reason, test_outcome.exc_info) diff --git a/ddtrace/contrib/pytest_benchmark/constants.py b/ddtrace/contrib/pytest_benchmark/constants.py index 974208509f7..b4c4f7f5b27 100644 --- a/ddtrace/contrib/pytest_benchmark/constants.py +++ b/ddtrace/contrib/pytest_benchmark/constants.py @@ -56,3 +56,24 @@ STATISTICS_STDDEV_OUTLIERS: PLUGIN_STDDEV_OUTLIERS, STATISTICS_TOTAL: PLUGIN_TOTAL, } + +PLUGIN_METRICS_V2 = { + "duration_mean": PLUGIN_MEAN, + "duration_runs": PLUGIN_ROUNDS, + "statistics_hd15iqr": PLUGIN_HD15IQR, + "statistics_iqr": PLUGIN_IQR, + "statistics_iqr_outliers": PLUGIN_IQR_OUTLIERS, + "statistics_ld15iqr": PLUGIN_LD15IQR, + "statistics_max": PLUGIN_MAX, + "statistics_mean": PLUGIN_MEAN, + "statistics_median": PLUGIN_MEDIAN, + "statistics_min": PLUGIN_MIN, + "statistics_n": PLUGIN_ROUNDS, + "statistics_ops": PLUGIN_OPS, + "statistics_outliers": PLUGIN_OUTLIERS, + "statistics_q1": PLUGIN_Q1, + "statistics_q3": PLUGIN_Q3, + "statistics_std_dev": PLUGIN_STDDEV, + "statistics_std_dev_outliers": PLUGIN_STDDEV_OUTLIERS, + "statistics_total": PLUGIN_TOTAL, +} diff --git a/ddtrace/contrib/pytest_benchmark/plugin.py b/ddtrace/contrib/pytest_benchmark/plugin.py index 461b5f931ac..4cb76148dbc 100644 --- a/ddtrace/contrib/pytest_benchmark/plugin.py +++ b/ddtrace/contrib/pytest_benchmark/plugin.py @@ -1,9 +1,19 @@ +from ddtrace import DDTraceDeprecationWarning +from ddtrace.contrib.pytest._utils import _USE_PLUGIN_V2 from ddtrace.contrib.pytest.plugin import is_enabled as is_ddtrace_enabled +from ddtrace.vendor.debtcollector import deprecate def pytest_configure(config): if config.pluginmanager.hasplugin("benchmark") and config.pluginmanager.hasplugin("ddtrace"): if is_ddtrace_enabled(config): - from ._plugin import _PytestBenchmarkPlugin + deprecate( + "this version of the ddtrace.pytest_benchmark plugin is deprecated", + message="it will be integrated with the main pytest ddtrace plugin", + removal_version="3.0.0", + category=DDTraceDeprecationWarning, + ) + if not _USE_PLUGIN_V2: + from ._plugin import _PytestBenchmarkPlugin - config.pluginmanager.register(_PytestBenchmarkPlugin(), "_datadog-pytest-benchmark") + config.pluginmanager.register(_PytestBenchmarkPlugin(), "_datadog-pytest-benchmark") diff --git a/ddtrace/internal/ci_visibility/api/_base.py b/ddtrace/internal/ci_visibility/api/_base.py index dbaa48d1af3..f1e2cd2b3b0 100644 --- a/ddtrace/internal/ci_visibility/api/_base.py +++ b/ddtrace/internal/ci_visibility/api/_base.py @@ -207,7 +207,8 @@ def _finish_span(self, override_finish_time: Optional[float] = None) -> None: if self._session_settings.atr_settings is not None and self._session_settings.atr_settings.enabled: self._set_atr_tags() - # Allow item-level _set_span_tags() to potentially overwrite default and hierarchy tags. + # Allow items to potentially overwrite default and hierarchy tags. + self._set_item_tags() self._set_span_tags() self._add_all_tags_to_span() @@ -247,6 +248,10 @@ def _set_default_tags(self) -> None: if self._source_file_info.end_line is not None: self.set_tag(test.SOURCE_END, self._source_file_info.end_line) + def _set_item_tags(self) -> None: + """Overridable by subclasses to set tags specific to the item type""" + pass + def _set_itr_tags(self, itr_enabled: bool) -> None: """Note: some tags are also added in the parent class as well as some individual item classes""" if not itr_enabled: diff --git a/ddtrace/internal/ci_visibility/api/_test.py b/ddtrace/internal/ci_visibility/api/_test.py index c0eb615cd03..7a61473ff92 100644 --- a/ddtrace/internal/ci_visibility/api/_test.py +++ b/ddtrace/internal/ci_visibility/api/_test.py @@ -5,6 +5,7 @@ from typing import Optional from typing import Union +from ddtrace.contrib.pytest_benchmark.constants import BENCHMARK_INFO from ddtrace.ext import SpanTypes from ddtrace.ext import test from ddtrace.ext.test_visibility import ITR_SKIPPING_LEVEL @@ -17,6 +18,7 @@ from ddtrace.internal.ci_visibility.api._base import TestVisibilityItemBase from ddtrace.internal.ci_visibility.api._base import TestVisibilitySessionSettings from ddtrace.internal.ci_visibility.api._coverage_data import TestVisibilityCoverageData +from ddtrace.internal.ci_visibility.constants import BENCHMARK from ddtrace.internal.ci_visibility.constants import TEST from ddtrace.internal.ci_visibility.constants import TEST_EFD_ABORT_REASON from ddtrace.internal.ci_visibility.constants import TEST_IS_NEW @@ -25,6 +27,8 @@ from ddtrace.internal.ci_visibility.telemetry.events import record_event_created_test from ddtrace.internal.ci_visibility.telemetry.events import record_event_finished_test from ddtrace.internal.logger import get_logger +from ddtrace.internal.test_visibility._benchmark_mixin import BENCHMARK_TAG_MAP +from ddtrace.internal.test_visibility._benchmark_mixin import BenchmarkDurationData from ddtrace.internal.test_visibility._efd_mixins import EFDTestStatus from ddtrace.internal.test_visibility._internal_item_ids import InternalTestId from ddtrace.internal.test_visibility.coverage_lines import CoverageLines @@ -78,8 +82,8 @@ def __init__( self._atr_is_retry = is_atr_retry self._atr_retries: List[TestVisibilityTest] = [] - # Currently unsupported - self._is_benchmark = None + self._is_benchmark = False + self._benchmark_duration_data: Optional[BenchmarkDurationData] = None def __repr__(self) -> str: suite_name = self.parent.name if self.parent is not None else "none" @@ -93,6 +97,11 @@ def _get_hierarchy_tags(self) -> Dict[str, str]: test.NAME: self.name, } + def _set_item_tags(self) -> None: + """Overrides parent tags for cases where they need to be modified""" + if self._is_benchmark: + self.set_tag(test.TYPE, BENCHMARK) + def _set_efd_tags(self) -> None: if self._efd_is_retry: self.set_tag(TEST_IS_RETRY, self._efd_is_retry) @@ -398,3 +407,18 @@ def _get_browser_driver(self): if self._span is None: return None return self._span.get_tag("test.browser.driver") + + # + # Benchmark test functionality + # + def set_benchmark_data(self, duration_data: Optional[BenchmarkDurationData], is_benchmark: bool = True): + self._benchmark_duration_data = duration_data + self._is_benchmark = is_benchmark + + if self._benchmark_duration_data is not None: + self.set_tag(BENCHMARK_INFO, "Time") + + for tag, attr in BENCHMARK_TAG_MAP.items(): + value = getattr(self._benchmark_duration_data, tag) + if value is not None: + self.set_tag(attr, value) diff --git a/ddtrace/internal/ci_visibility/constants.py b/ddtrace/internal/ci_visibility/constants.py index f30b6743f5a..7ace37b9424 100644 --- a/ddtrace/internal/ci_visibility/constants.py +++ b/ddtrace/internal/ci_visibility/constants.py @@ -4,6 +4,7 @@ SUITE = "suite" TEST = "test" +BENCHMARK = "benchmark" EVENT_TYPE = "type" diff --git a/ddtrace/internal/ci_visibility/recorder.py b/ddtrace/internal/ci_visibility/recorder.py index 225221a4a7d..b306a1e7912 100644 --- a/ddtrace/internal/ci_visibility/recorder.py +++ b/ddtrace/internal/ci_visibility/recorder.py @@ -74,6 +74,7 @@ from ddtrace.internal.service import Service from ddtrace.internal.test_visibility._atr_mixins import ATRTestMixin from ddtrace.internal.test_visibility._atr_mixins import AutoTestRetriesSettings +from ddtrace.internal.test_visibility._benchmark_mixin import BenchmarkTestMixin from ddtrace.internal.test_visibility._efd_mixins import EFDTestMixin from ddtrace.internal.test_visibility._efd_mixins import EFDTestStatus from ddtrace.internal.test_visibility._internal_item_ids import InternalTestId @@ -1181,6 +1182,15 @@ def _on_set_test_parameters(item_id: TestId, parameters: str): CIVisibility.get_test_by_id(item_id).set_parameters(parameters) +@_requires_civisibility_enabled +def _on_set_benchmark_data(set_benchmark_data_args: BenchmarkTestMixin.SetBenchmarkDataArgs): + item_id = set_benchmark_data_args.test_id + data = set_benchmark_data_args.benchmark_data + is_benchmark = set_benchmark_data_args.is_benchmark + log.debug("Handling set benchmark data for test id %s, data %s, is_benchmark %s", item_id, data, is_benchmark) + CIVisibility.get_test_by_id(item_id).set_benchmark_data(data, is_benchmark) + + def _register_test_handlers(): log.debug("Registering test handlers") core.on("test_visibility.test.discover", _on_discover_test) @@ -1188,6 +1198,7 @@ def _register_test_handlers(): core.on("test_visibility.test.start", _on_start_test) core.on("test_visibility.test.finish", _on_finish_test) core.on("test_visibility.test.set_parameters", _on_set_test_parameters) + core.on("test_visibility.test.set_benchmark_data", _on_set_benchmark_data) @_requires_civisibility_enabled diff --git a/ddtrace/internal/test_visibility/_benchmark_mixin.py b/ddtrace/internal/test_visibility/_benchmark_mixin.py new file mode 100644 index 00000000000..c41d45b10a5 --- /dev/null +++ b/ddtrace/internal/test_visibility/_benchmark_mixin.py @@ -0,0 +1,75 @@ +import typing as t + +from ddtrace.ext.test_visibility._utils import _catch_and_log_exceptions +from ddtrace.internal import core +from ddtrace.internal.logger import get_logger +from ddtrace.internal.test_visibility._internal_item_ids import InternalTestId + + +log = get_logger(__name__) + + +class BenchmarkDurationData(t.NamedTuple): + duration_info: t.Optional[str] = None + duration_mean: t.Optional[float] = None + duration_runs: t.Optional[int] = None + statistics_hd15iqr: t.Optional[float] = None + statistics_iqr: t.Optional[float] = None + statistics_iqr_outliers: t.Optional[float] = None + statistics_ld15iqr: t.Optional[float] = None + statistics_max: t.Optional[float] = None + statistics_mean: t.Optional[float] = None + statistics_median: t.Optional[float] = None + statistics_min: t.Optional[float] = None + statistics_n: t.Optional[float] = None + statistics_ops: t.Optional[float] = None + statistics_outliers: t.Optional[float] = None + statistics_q1: t.Optional[float] = None + statistics_q3: t.Optional[float] = None + statistics_std_dev: t.Optional[float] = None + statistics_std_dev_outliers: t.Optional[float] = None + statistics_total: t.Optional[float] = None + + +class BenchmarkTestMixin: + class SetBenchmarkDataArgs(t.NamedTuple): + test_id: InternalTestId + benchmark_data: t.Optional[BenchmarkDurationData] + is_benchmark: bool = True + + @classmethod + @_catch_and_log_exceptions + def set_benchmark_data( + cls, + item_id: InternalTestId, + benchmark_data: t.Optional[BenchmarkDurationData] = None, + is_benchmark: bool = True, + ): + log.debug("Setting benchmark data for test %s: %s", item_id, benchmark_data) + core.dispatch( + "test_visibility.test.set_benchmark_data", + (BenchmarkTestMixin.SetBenchmarkDataArgs(item_id, benchmark_data, is_benchmark),), + ) + + +BENCHMARK_TAG_MAP = { + "duration_info": "benchmark.duration.info", + "duration_mean": "benchmark.duration.mean", + "duration_runs": "benchmark.duration.runs", + "statistics_hd15iqr": "benchmark.duration.statistics.hd15iqr", + "statistics_iqr": "benchmark.duration.statistics.iqr", + "statistics_iqr_outliers": "benchmark.duration.statistics.iqr_outliers", + "statistics_ld15iqr": "benchmark.duration.statistics.ld15iqr", + "statistics_max": "benchmark.duration.statistics.max", + "statistics_mean": "benchmark.duration.statistics.mean", + "statistics_median": "benchmark.duration.statistics.median", + "statistics_min": "benchmark.duration.statistics.min", + "statistics_n": "benchmark.duration.statistics.n", + "statistics_ops": "benchmark.duration.statistics.ops", + "statistics_outliers": "benchmark.duration.statistics.outliers", + "statistics_q1": "benchmark.duration.statistics.q1", + "statistics_q3": "benchmark.duration.statistics.q3", + "statistics_std_dev": "benchmark.duration.statistics.std_dev", + "statistics_std_dev_outliers": "benchmark.duration.statistics.std_dev_outliers", + "statistics_total": "benchmark.duration.statistics.total", +} diff --git a/ddtrace/internal/test_visibility/api.py b/ddtrace/internal/test_visibility/api.py index c5084d320cb..d66dbcc32c7 100644 --- a/ddtrace/internal/test_visibility/api.py +++ b/ddtrace/internal/test_visibility/api.py @@ -14,6 +14,7 @@ from ddtrace.internal.logger import get_logger from ddtrace.internal.test_visibility._atr_mixins import ATRSessionMixin from ddtrace.internal.test_visibility._atr_mixins import ATRTestMixin +from ddtrace.internal.test_visibility._benchmark_mixin import BenchmarkTestMixin from ddtrace.internal.test_visibility._efd_mixins import EFDSessionMixin from ddtrace.internal.test_visibility._efd_mixins import EFDTestMixin from ddtrace.internal.test_visibility._internal_item_ids import InternalTestId @@ -132,7 +133,7 @@ class InternalTestSuite(ext_api.TestSuite, InternalTestBase, ITRMixin): pass -class InternalTest(ext_api.Test, InternalTestBase, ITRMixin, EFDTestMixin, ATRTestMixin): +class InternalTest(ext_api.Test, InternalTestBase, ITRMixin, EFDTestMixin, ATRTestMixin, BenchmarkTestMixin): class FinishArgs(NamedTuple): """InternalTest allows finishing with an overridden finish time (for EFD and other retry purposes)""" diff --git a/riotfile.py b/riotfile.py index b12bfcc1181..653023da524 100644 --- a/riotfile.py +++ b/riotfile.py @@ -1723,26 +1723,32 @@ def select_pys(min_version: str = MIN_PYTHON_VERSION, max_version: str = MAX_PYT ), Venv( name="pytest-benchmark", + pys=select_pys(min_version="3.7", max_version="3.12"), command="pytest {cmdargs} --no-ddtrace --no-cov tests/contrib/pytest_benchmark/", pkgs={ "msgpack": latest, "pytest-randomly": latest, }, - env={ - "DD_PYTEST_USE_NEW_PLUGIN_BETA": "0", - }, venvs=[ Venv( - venvs=[ - Venv( - pys=select_pys(min_version="3.7", max_version="3.10"), - pkgs={ - "pytest-benchmark": [ - ">=3.1.0,<=4.0.0", - ] - }, - ) - ], + pkgs={ + "pytest-benchmark": [ + ">=3.1.0,<=4.0.0", + ] + }, + env={ + "DD_PYTEST_USE_NEW_PLUGIN_BETA": "0", + }, + ), + Venv( + pkgs={ + "pytest-benchmark": [ + ">=3.1.0,<=4.0.0", + ] + }, + env={ + "DD_PYTEST_USE_NEW_PLUGIN_BETA": "1", + }, ), ], ), diff --git a/tests/contrib/pytest_benchmark/test_pytest_benchmark.py b/tests/contrib/pytest_benchmark/test_pytest_benchmark.py index eedb634ccce..ba55659b8f8 100644 --- a/tests/contrib/pytest_benchmark/test_pytest_benchmark.py +++ b/tests/contrib/pytest_benchmark/test_pytest_benchmark.py @@ -1,10 +1,5 @@ import os -from unittest import mock -import pytest - -import ddtrace -from ddtrace.contrib.pytest.plugin import is_enabled from ddtrace.contrib.pytest_benchmark.constants import BENCHMARK_INFO from ddtrace.contrib.pytest_benchmark.constants import BENCHMARK_MEAN from ddtrace.contrib.pytest_benchmark.constants import BENCHMARK_RUN @@ -25,51 +20,10 @@ from ddtrace.contrib.pytest_benchmark.constants import STATISTICS_STDDEV_OUTLIERS from ddtrace.contrib.pytest_benchmark.constants import STATISTICS_TOTAL from ddtrace.ext.test import TEST_TYPE -from ddtrace.internal.ci_visibility import CIVisibility -from ddtrace.internal.ci_visibility._api_client import TestVisibilityAPISettings -from tests.ci_visibility.test_encoder import _patch_dummy_writer -from tests.utils import TracerTestCase -from tests.utils import override_env - - -class PytestTestCase(TracerTestCase): - @pytest.fixture(autouse=True) - def fixtures(self, testdir, monkeypatch, git_repo): - self.testdir = testdir - self.monkeypatch = monkeypatch - self.git_repo = git_repo - - @pytest.fixture(autouse=True) - def _dummy_check_enabled_features(self): - """By default, assume that _check_enabled_features() returns an ITR-disabled response. - - Tests that need a different response should re-patch the CIVisibility object. - """ - with mock.patch( - "ddtrace.internal.ci_visibility.recorder.CIVisibility._check_enabled_features", - return_value=TestVisibilityAPISettings(False, False, False, False), - ): - yield +from tests.contrib.pytest.test_pytest import PytestTestCaseBase - def inline_run(self, *args): - """Execute test script with test tracer.""" - - class CIVisibilityPlugin: - @staticmethod - def pytest_configure(config): - if is_enabled(config): - with _patch_dummy_writer(): - assert CIVisibility.enabled - CIVisibility.disable() - CIVisibility.enable(tracer=self.tracer, config=ddtrace.config.pytest) - - with override_env(dict(DD_API_KEY="foobar.baz")): - return self.testdir.inline_run(*args, plugins=[CIVisibilityPlugin()]) - - def subprocess_run(self, *args): - """Execute test script with test tracer.""" - return self.testdir.runpytest_subprocess(*args) +class PytestTestCase(PytestTestCaseBase): def test_span_contains_benchmark(self): """Test with benchmark.""" py_file = self.testdir.makepyfile( @@ -93,54 +47,24 @@ def test_sum_longer(benchmark): assert spans[0].get_tag(TEST_TYPE) == "benchmark" assert spans[0].get_tag(BENCHMARK_INFO) == "Time" - assert isinstance(spans[0].get_metric(BENCHMARK_MEAN), float) or isinstance( - spans[0].get_metric(BENCHMARK_MEAN), int - ) + assert isinstance(spans[0].get_metric(BENCHMARK_MEAN), (float, int)) assert isinstance(spans[0].get_metric(BENCHMARK_RUN), int) - assert isinstance(spans[0].get_metric(STATISTICS_HD15IQR), float) or isinstance( - spans[0].get_metric(STATISTICS_HD15IQR), int - ) - assert isinstance(spans[0].get_metric(STATISTICS_IQR), float) or isinstance( - spans[0].get_metric(STATISTICS_IQR), int - ) - assert isinstance(spans[0].get_metric(STATISTICS_IQR_OUTLIERS), int) or isinstance( - spans[0].get_metric(STATISTICS_IQR_OUTLIERS), int - ) - assert isinstance(spans[0].get_metric(STATISTICS_LD15IQR), float) or isinstance( - spans[0].get_metric(STATISTICS_LD15IQR), int - ) - assert isinstance(spans[0].get_metric(STATISTICS_MAX), float) or isinstance( - spans[0].get_metric(STATISTICS_MAX), int - ) - assert isinstance(spans[0].get_metric(STATISTICS_MEAN), float) or isinstance( - spans[0].get_metric(STATISTICS_MEAN), int - ) - assert isinstance(spans[0].get_metric(STATISTICS_MEDIAN), float) or isinstance( - spans[0].get_metric(STATISTICS_MEDIAN), int - ) - assert isinstance(spans[0].get_metric(STATISTICS_MIN), float) or isinstance( - spans[0].get_metric(STATISTICS_MIN), int - ) - assert isinstance(spans[0].get_metric(STATISTICS_OPS), float) or isinstance( - spans[0].get_metric(STATISTICS_OPS), int - ) + assert isinstance(spans[0].get_metric(STATISTICS_HD15IQR), (float, int)) + assert isinstance(spans[0].get_metric(STATISTICS_IQR), (float, int)) + assert isinstance(spans[0].get_metric(STATISTICS_IQR_OUTLIERS), (float, int)) + assert isinstance(spans[0].get_metric(STATISTICS_LD15IQR), (float, int)) + assert isinstance(spans[0].get_metric(STATISTICS_MAX), (float, int)) + assert isinstance(spans[0].get_metric(STATISTICS_MEAN), (float, int)) + assert isinstance(spans[0].get_metric(STATISTICS_MEDIAN), (float, int)) + assert isinstance(spans[0].get_metric(STATISTICS_MIN), (float, int)) + assert isinstance(spans[0].get_metric(STATISTICS_OPS), (float, int)) assert isinstance(spans[0].get_tag(STATISTICS_OUTLIERS), str) - assert isinstance(spans[0].get_metric(STATISTICS_Q1), float) or isinstance( - spans[0].get_metric(STATISTICS_Q1), int - ) - assert isinstance(spans[0].get_metric(STATISTICS_Q3), float) or isinstance( - spans[0].get_metric(STATISTICS_Q3), int - ) + assert isinstance(spans[0].get_metric(STATISTICS_Q1), (float, int)) + assert isinstance(spans[0].get_metric(STATISTICS_Q3), (float, int)) assert isinstance(spans[0].get_metric(STATISTICS_N), int) - assert isinstance(spans[0].get_metric(STATISTICS_STDDEV), float) or isinstance( - spans[0].get_metric(STATISTICS_STDDEV), int - ) - assert isinstance(spans[0].get_metric(STATISTICS_STDDEV_OUTLIERS), float) or isinstance( - spans[0].get_metric(STATISTICS_STDDEV_OUTLIERS), int - ) - assert isinstance(spans[0].get_metric(STATISTICS_TOTAL), float) or isinstance( - spans[0].get_metric(STATISTICS_TOTAL), int - ) + assert isinstance(spans[0].get_metric(STATISTICS_STDDEV), (float, int)) + assert isinstance(spans[0].get_metric(STATISTICS_STDDEV_OUTLIERS), (float, int)) + assert isinstance(spans[0].get_metric(STATISTICS_TOTAL), (float, int)) assert spans[0].get_metric(BENCHMARK_MEAN) > 0.0002 assert spans[0].get_metric(BENCHMARK_RUN) > 0 From f2841926d9c8b9b1387bcad262a8e35aff9ab018 Mon Sep 17 00:00:00 2001 From: Christophe Papazian <114495376+christophe-papazian@users.noreply.github.com> Date: Mon, 16 Dec 2024 17:09:21 +0100 Subject: [PATCH 40/78] feat(asm): add stack trace report for iast (#11574) - Add stack trace report to IAST events - Update test-agent to 1.20.0 to add support for stack traces report using msgpack (https://github.com/DataDog/images/pull/6234) - factorize stack trace code already used for exploit prevention, to be usable for iast too. - improve code of `ddtrace_iast_flask_patch` so it gets compatible with the new feature - improve `test_ddtrace_iast_flask_patch` to check that + operator was properly replaced. - add stack trace report unit test on threat tests System tests will also be enabled with https://github.com/DataDog/system-tests/pull/3663 APPSEC-55904 ## Checklist - [x] PR author has checked that all the criteria below are met - The PR description includes an overview of the change - The PR description articulates the motivation for the change - The change includes tests OR the PR description describes a testing strategy - The PR description notes risks associated with the change, if any - Newly-added code is easy to change - The change follows the [library release note guidelines](https://ddtrace.readthedocs.io/en/stable/releasenotes.html) - The change includes or references documentation updates if necessary - Backport labels are set (if [applicable](https://ddtrace.readthedocs.io/en/latest/contributing.html#backporting)) ## Reviewer Checklist - [x] Reviewer has checked that all the criteria below are met - Title is accurate - All changes are related to the pull request's stated goal - Avoids breaking [API](https://ddtrace.readthedocs.io/en/stable/versioning.html#interfaces) changes - Testing strategy adequately addresses listed risks - Newly-added code is easy to change - Release note makes sense to a user of the library - If necessary, author has acknowledged and discussed the performance implications of this PR as reported in the benchmarks PR comment - Backport labels are set in a manner that is consistent with the [release branch maintenance policy](https://ddtrace.readthedocs.io/en/latest/contributing.html#backporting) --- .circleci/config.templ.yml | 2 +- .gitlab/services.yml | 2 +- ddtrace/appsec/_constants.py | 7 ++- .../_exploit_prevention/stack_traces.py | 44 ++++++++++++------- ddtrace/appsec/_iast/__init__.py | 9 +++- ddtrace/appsec/_iast/_ast/ast_patching.py | 27 +----------- ddtrace/appsec/_iast/_iast_request_context.py | 9 ++++ ddtrace/appsec/_iast/reporter.py | 12 +++++ ddtrace/appsec/_processor.py | 6 +-- docker-compose.yml | 2 +- ...tack_traces_for_iast-cd2c008168f6181e.yaml | 4 ++ tests/appsec/contrib_appsec/conftest.py | 13 +++++- tests/appsec/contrib_appsec/utils.py | 23 ++++++---- .../integrations/pygoat_tests/test_pygoat.py | 3 +- .../test_flask_entrypoint_iast_patches.py | 13 ++++-- 15 files changed, 111 insertions(+), 65 deletions(-) create mode 100644 releasenotes/notes/stack_traces_for_iast-cd2c008168f6181e.yaml diff --git a/.circleci/config.templ.yml b/.circleci/config.templ.yml index 0dee5155002..73994eab222 100644 --- a/.circleci/config.templ.yml +++ b/.circleci/config.templ.yml @@ -16,7 +16,7 @@ mongo_image: &mongo_image mongo:3.6@sha256:19c11a8f1064fd2bb713ef1270f79a742a184 httpbin_image: &httpbin_image kennethreitz/httpbin@sha256:2c7abc4803080c22928265744410173b6fea3b898872c01c5fd0f0f9df4a59fb vertica_image: &vertica_image vertica/vertica-ce:latest rabbitmq_image: &rabbitmq_image rabbitmq:3.7-alpine -testagent_image: &testagent_image ghcr.io/datadog/dd-apm-test-agent/ddapm-test-agent:v1.17.0 +testagent_image: &testagent_image ghcr.io/datadog/dd-apm-test-agent/ddapm-test-agent:v1.20.0 parameters: coverage: diff --git a/.gitlab/services.yml b/.gitlab/services.yml index 3adcb973e89..51e28c38cc5 100644 --- a/.gitlab/services.yml +++ b/.gitlab/services.yml @@ -12,7 +12,7 @@ DD_REMOTE_CONFIGURATION_REFRESH_INTERVAL: 5s DD_DOGSTATSD_NON_LOCAL_TRAFFIC: true testagent: - name: registry.ddbuild.io/images/mirror/dd-apm-test-agent/ddapm-test-agent:v1.17.0 + name: registry.ddbuild.io/images/mirror/dd-apm-test-agent/ddapm-test-agent:v1.20.0 alias: testagent variables: LOG_LEVEL: INFO diff --git a/ddtrace/appsec/_constants.py b/ddtrace/appsec/_constants.py index fbb1264f6f9..83cb53e78ff 100644 --- a/ddtrace/appsec/_constants.py +++ b/ddtrace/appsec/_constants.py @@ -328,7 +328,6 @@ class DEFAULT(metaclass=Constant_Class): class EXPLOIT_PREVENTION(metaclass=Constant_Class): - STACK_TRACES: Literal["_dd.stack"] = "_dd.stack" STACK_TRACE_ID: Literal["stack_id"] = "stack_id" EP_ENABLED: Literal["DD_APPSEC_RASP_ENABLED"] = "DD_APPSEC_RASP_ENABLED" STACK_TRACE_ENABLED: Literal["DD_APPSEC_STACK_TRACE_ENABLED"] = "DD_APPSEC_STACK_TRACE_ENABLED" @@ -358,3 +357,9 @@ class FINGERPRINTING(metaclass=Constant_Class): HEADER = PREFIX + "http.header" NETWORK = PREFIX + "http.network" SESSION = PREFIX + "session" + + +class STACK_TRACE(metaclass=Constant_Class): + RASP = "exploit" + IAST = "vulnerability" + TAG: Literal["_dd.stack"] = "_dd.stack" diff --git a/ddtrace/appsec/_exploit_prevention/stack_traces.py b/ddtrace/appsec/_exploit_prevention/stack_traces.py index 8276d0c51bc..8d32a028ab9 100644 --- a/ddtrace/appsec/_exploit_prevention/stack_traces.py +++ b/ddtrace/appsec/_exploit_prevention/stack_traces.py @@ -3,36 +3,48 @@ from typing import Any from typing import Dict from typing import Iterable -from typing import List from typing import Optional from ddtrace._trace.span import Span -from ddtrace.appsec._constants import EXPLOIT_PREVENTION +from ddtrace.appsec._constants import STACK_TRACE from ddtrace.settings.asm import config as asm_config import ddtrace.tracer def report_stack( - message: str, span: Optional[Span] = None, crop_stack: Optional[str] = None, stack_id: Optional[str] = None -): + message: Optional[str] = None, + span: Optional[Span] = None, + crop_stack: Optional[str] = None, + stack_id: Optional[str] = None, + namespace: str = STACK_TRACE.RASP, +) -> bool: """ Report a stack trace to the current span. This is used to report stack traces for exploit prevention. Return the stack id for the reported stack trace to link it in triggers. """ - if not asm_config._ep_enabled or not asm_config._ep_stack_trace_enabled: - return None + if not asm_config._ep_stack_trace_enabled: + # stack trace report disabled + return False + if namespace == STACK_TRACE.RASP and not (asm_config._asm_enabled and asm_config._ep_enabled): + # exploit prevention stack trace with ep disabled + return False + if namespace == STACK_TRACE.IAST and not (asm_config._iast_enabled): + # iast stack trace with iast disabled + return False + if span is None: span = ddtrace.tracer.current_span() if span is None or stack_id is None: - return None + return False root_span = span._local_root or span - appsec_traces = root_span.get_struct_tag(EXPLOIT_PREVENTION.STACK_TRACES) or {} - exploit: List[Any] = appsec_traces.get("exploit", []) + appsec_traces = root_span.get_struct_tag(STACK_TRACE.TAG) or {} + current_list = appsec_traces.get(namespace, []) + total_length = len(current_list) # Do not report more than the maximum number of stack traces - if asm_config._ep_max_stack_traces and len(exploit) >= asm_config._ep_max_stack_traces: - return None + if asm_config._ep_max_stack_traces and total_length >= asm_config._ep_max_stack_traces: + return False stack = inspect.stack() if crop_stack is not None: @@ -43,8 +55,9 @@ def report_stack( res: Dict[str, Any] = { "language": "python", "id": stack_id, - "message": message, } + if message is not None: + res["message"] = message if len(stack) > asm_config._ep_max_stack_trace_depth > 0: top_stack = int(asm_config._ep_max_stack_trace_depth * asm_config._ep_stack_top_percent / 100) bottom_stack = asm_config._ep_max_stack_trace_depth - top_stack @@ -61,6 +74,7 @@ def report_stack( for i in iterator ] res["frames"] = frames - exploit.append(res) - appsec_traces["exploit"] = exploit - root_span.set_struct_tag(EXPLOIT_PREVENTION.STACK_TRACES, appsec_traces) + current_list.append(res) + appsec_traces[namespace] = current_list + root_span.set_struct_tag(STACK_TRACE.TAG, appsec_traces) + return True diff --git a/ddtrace/appsec/_iast/__init__.py b/ddtrace/appsec/_iast/__init__.py index fe488c87e46..724819b17df 100644 --- a/ddtrace/appsec/_iast/__init__.py +++ b/ddtrace/appsec/_iast/__init__.py @@ -31,6 +31,7 @@ def wrapped_function(wrapped, instance, args, kwargs): import inspect import os import sys +import types from ddtrace.internal.logger import get_logger from ddtrace.internal.module import ModuleWatchdog @@ -61,7 +62,7 @@ def ddtrace_iast_flask_patch(): module_name = inspect.currentframe().f_back.f_globals["__name__"] module = sys.modules[module_name] try: - module_path, patched_ast = astpatch_module(module, remove_flask_run=True) + module_path, patched_ast = astpatch_module(module) except Exception: log.debug("Unexpected exception while AST patching", exc_info=True) return @@ -71,8 +72,12 @@ def ddtrace_iast_flask_patch(): return compiled_code = compile(patched_ast, module_path, "exec") + # creating a new module environment to execute the patched code from scratch + new_module = types.ModuleType(module_name) + module.__dict__.clear() + module.__dict__.update(new_module.__dict__) + # executing the compiled code in the new module environment exec(compiled_code, module.__dict__) # nosec B102 - sys.modules[module_name] = compiled_code _iast_propagation_enabled = False diff --git a/ddtrace/appsec/_iast/_ast/ast_patching.py b/ddtrace/appsec/_iast/_ast/ast_patching.py index 6a1e4c2d3b6..7e2258bd556 100644 --- a/ddtrace/appsec/_iast/_ast/ast_patching.py +++ b/ddtrace/appsec/_iast/_ast/ast_patching.py @@ -3,7 +3,6 @@ import ast import codecs import os -import re from sys import builtin_module_names from sys import version_info import textwrap @@ -388,27 +387,6 @@ def visit_ast( return modified_ast -_FLASK_INSTANCE_REGEXP = re.compile(r"(\S*)\s*=.*Flask\(.*") - - -def _remove_flask_run(text: Text) -> Text: - """ - Find and remove flask app.run() call. This is used for patching - the app.py file and exec'ing to replace the module without creating - a new instance. - """ - flask_instance_name = re.search(_FLASK_INSTANCE_REGEXP, text) - if not flask_instance_name: - return text - groups = flask_instance_name.groups() - if not groups: - return text - - instance_name = groups[-1] - new_text = re.sub(instance_name + r"\.run\(.*\)", "pass", text) - return new_text - - _DIR_WRAPPER = textwrap.dedent( f""" @@ -442,7 +420,7 @@ def {_PREFIX}set_dir_filter(): ) -def astpatch_module(module: ModuleType, remove_flask_run: bool = False) -> Tuple[str, Optional[ast.Module]]: +def astpatch_module(module: ModuleType) -> Tuple[str, Optional[ast.Module]]: module_name = module.__name__ module_origin = origin(module) @@ -482,9 +460,6 @@ def astpatch_module(module: ModuleType, remove_flask_run: bool = False) -> Tuple log.debug("empty file: %s", module_path) return "", None - if remove_flask_run: - source_text = _remove_flask_run(source_text) - if not asbool(os.environ.get(IAST.ENV_NO_DIR_PATCH, "false")) and version_info > (3, 7): # Add the dir filter so __ddtrace stuff is not returned by dir(module) # does not work in 3.7 because it enters into infinite recursion diff --git a/ddtrace/appsec/_iast/_iast_request_context.py b/ddtrace/appsec/_iast/_iast_request_context.py index b711ae61195..a28c2d3ff0d 100644 --- a/ddtrace/appsec/_iast/_iast_request_context.py +++ b/ddtrace/appsec/_iast/_iast_request_context.py @@ -50,6 +50,7 @@ def __init__(self, span: Optional[Span] = None): self.request_enabled: bool = False self.iast_reporter: Optional[IastSpanReporter] = None self.iast_span_metrics: Dict[str, int] = {} + self.iast_stack_trace_id: int = 0 def _get_iast_context() -> Optional[IASTEnvironment]: @@ -96,6 +97,14 @@ def get_iast_reporter() -> Optional[IastSpanReporter]: return None +def get_iast_stacktrace_id() -> int: + env = _get_iast_context() + if env: + env.iast_stack_trace_id += 1 + return env.iast_stack_trace_id + return 0 + + def set_iast_request_enabled(request_enabled) -> None: env = _get_iast_context() if env: diff --git a/ddtrace/appsec/_iast/reporter.py b/ddtrace/appsec/_iast/reporter.py index c7004909cc9..62cc2ee8d65 100644 --- a/ddtrace/appsec/_iast/reporter.py +++ b/ddtrace/appsec/_iast/reporter.py @@ -11,6 +11,8 @@ from typing import Tuple import zlib +from ddtrace.appsec._constants import STACK_TRACE +from ddtrace.appsec._exploit_prevention.stack_traces import report_stack from ddtrace.appsec._iast._evidence_redaction import sensitive_handler from ddtrace.appsec._iast._utils import _get_source_index from ddtrace.appsec._iast.constants import VULN_INSECURE_HASHING_TYPE @@ -75,9 +77,19 @@ class Vulnerability(NotNoneDictable): evidence: Evidence location: Location hash: int = dataclasses.field(init=False, compare=False, hash=("PYTEST_CURRENT_TEST" in os.environ), repr=False) + stackId: Optional[str] = dataclasses.field(init=False, compare=False) def __post_init__(self): + # avoid circular import + from ddtrace.appsec._iast._iast_request_context import get_iast_stacktrace_id + self.hash = zlib.crc32(repr(self).encode()) + stacktrace_id = get_iast_stacktrace_id() + self.stackId = None + if stacktrace_id: + str_id = str(stacktrace_id) + if report_stack(stack_id=str_id, namespace=STACK_TRACE.IAST): + self.stackId = str_id def __repr__(self): return f"Vulnerability(type='{self.type}', location={self.location})" diff --git a/ddtrace/appsec/_processor.py b/ddtrace/appsec/_processor.py index 4ba8222c89a..06328d1201a 100644 --- a/ddtrace/appsec/_processor.py +++ b/ddtrace/appsec/_processor.py @@ -21,10 +21,12 @@ from ddtrace.appsec._constants import EXPLOIT_PREVENTION from ddtrace.appsec._constants import FINGERPRINTING from ddtrace.appsec._constants import SPAN_DATA_NAMES +from ddtrace.appsec._constants import STACK_TRACE from ddtrace.appsec._constants import WAF_ACTIONS from ddtrace.appsec._constants import WAF_DATA_NAMES from ddtrace.appsec._ddwaf import DDWaf_result from ddtrace.appsec._ddwaf.ddwaf_types import ddwaf_context_capsule +from ddtrace.appsec._exploit_prevention.stack_traces import report_stack from ddtrace.appsec._metrics import _set_waf_init_metric from ddtrace.appsec._metrics import _set_waf_request_metrics from ddtrace.appsec._metrics import _set_waf_updates_metric @@ -325,10 +327,8 @@ def _waf_action( blocked = parameters blocked[WAF_ACTIONS.TYPE] = "none" elif action == WAF_ACTIONS.STACK_ACTION: - from ddtrace.appsec._exploit_prevention.stack_traces import report_stack - stack_trace_id = parameters["stack_id"] - report_stack("exploit detected", span, crop_trace, stack_id=stack_trace_id) + report_stack("exploit detected", span, crop_trace, stack_id=stack_trace_id, namespace=STACK_TRACE.RASP) for rule in waf_results.data: rule[EXPLOIT_PREVENTION.STACK_TRACE_ID] = stack_trace_id diff --git a/docker-compose.yml b/docker-compose.yml index bed5a6ce8ee..cf3738c2dbe 100644 --- a/docker-compose.yml +++ b/docker-compose.yml @@ -129,7 +129,7 @@ services: volumes: - ddagent:/tmp/ddagent:rw testagent: - image: ghcr.io/datadog/dd-apm-test-agent/ddapm-test-agent:v1.17.0 + image: ghcr.io/datadog/dd-apm-test-agent/ddapm-test-agent:v1.20.0 ports: - "127.0.0.1:9126:8126" volumes: diff --git a/releasenotes/notes/stack_traces_for_iast-cd2c008168f6181e.yaml b/releasenotes/notes/stack_traces_for_iast-cd2c008168f6181e.yaml new file mode 100644 index 00000000000..045552e9b7d --- /dev/null +++ b/releasenotes/notes/stack_traces_for_iast-cd2c008168f6181e.yaml @@ -0,0 +1,4 @@ +--- +features: + - | + Code Security: This introduces stack trace reports for Code Security. diff --git a/tests/appsec/contrib_appsec/conftest.py b/tests/appsec/contrib_appsec/conftest.py index 9773ef124c9..74ad0f655ef 100644 --- a/tests/appsec/contrib_appsec/conftest.py +++ b/tests/appsec/contrib_appsec/conftest.py @@ -45,8 +45,17 @@ def check_waf_timeout(request): @pytest.fixture -def get_tag(root_span): - yield lambda name: root_span().get_tag(name) +def get_tag(test_spans, root_span): + # checking both root spans and web spans for the tag + def get(name): + for span in test_spans.spans: + if span.parent_id is None or span.span_type == "web": + res = span.get_tag(name) + if res is not None: + return res + return root_span().get_tag(name) + + yield get @pytest.fixture diff --git a/tests/appsec/contrib_appsec/utils.py b/tests/appsec/contrib_appsec/utils.py index 0712e6d6fd8..0d195df764e 100644 --- a/tests/appsec/contrib_appsec/utils.py +++ b/tests/appsec/contrib_appsec/utils.py @@ -62,9 +62,13 @@ def location(self, response) -> str: def body(self, response) -> str: raise NotImplementedError + def get_stack_trace(self, root_span, namespace): + appsec_traces = root_span().get_struct_tag(asm_constants.STACK_TRACE.TAG) or {} + stacks = appsec_traces.get(namespace, []) + return stacks + def check_for_stack_trace(self, root_span): - appsec_traces = root_span().get_struct_tag(asm_constants.EXPLOIT_PREVENTION.STACK_TRACES) or {} - exploit = appsec_traces.get("exploit", []) + exploit = self.get_stack_trace(root_span, "exploit") stack_ids = sorted(set(t["id"] for t in exploit)) triggers = get_triggers(root_span()) stack_id_in_triggers = sorted(set(t["stack_id"] for t in (triggers or []) if "stack_id" in t)) @@ -1385,9 +1389,9 @@ def validate_top_function(trace): # there may have been multiple evaluations of other rules too assert (("rule_type", endpoint), ("waf_version", DDWAF_VERSION)) in evals if action_level == 2: - assert get_tag("rasp.request.done") is None + assert get_tag("rasp.request.done") is None, get_tag("rasp.request.done") else: - assert get_tag("rasp.request.done") == endpoint + assert get_tag("rasp.request.done") == endpoint, get_tag("rasp.request.done") assert get_metric(APPSEC.RASP_DURATION) is not None assert get_metric(APPSEC.RASP_DURATION_EXT) is not None assert get_metric(APPSEC.RASP_RULE_EVAL) is not None @@ -1398,7 +1402,7 @@ def validate_top_function(trace): assert "rasp" not in n assert get_triggers(root_span()) is None assert self.check_for_stack_trace(root_span) == [] - assert get_tag("rasp.request.done") == endpoint + assert get_tag("rasp.request.done") == endpoint, get_tag("rasp.request.done") @pytest.mark.parametrize("asm_enabled", [True, False]) @pytest.mark.parametrize("auto_events_enabled", [True, False]) @@ -1505,21 +1509,22 @@ def test_fingerprinting(self, interface, root_span, get_tag, asm_enabled, user_a assert get_tag(asm_constants.FINGERPRINTING.SESSION) is None def test_iast(self, interface, root_span, get_tag): - if interface.name == "fastapi" and asm_config._iast_enabled: - raise pytest.xfail("fastapi does not fully support IAST for now") - from ddtrace.ext import http - url = "/rasp/command_injection/?cmd=ls" + url = "/rasp/command_injection/?cmd=." self.update_tracer(interface) response = interface.client.get(url) assert self.status(response) == 200 assert get_tag(http.STATUS_CODE) == "200" assert self.body(response).startswith("command_injection endpoint") + stack_traces = self.get_stack_trace(root_span, "vulnerability") if asm_config._iast_enabled: assert get_tag("_dd.iast.json") is not None + # checking for iast stack traces + assert stack_traces else: assert get_tag("_dd.iast.json") is None + assert stack_traces == [] @contextmanager diff --git a/tests/appsec/integrations/pygoat_tests/test_pygoat.py b/tests/appsec/integrations/pygoat_tests/test_pygoat.py index e60d5336b35..f3dd0f173ee 100644 --- a/tests/appsec/integrations/pygoat_tests/test_pygoat.py +++ b/tests/appsec/integrations/pygoat_tests/test_pygoat.py @@ -26,6 +26,7 @@ def client(): agent_client = requests.session() reply = agent_client.get(TESTAGENT_URL + "/start" + TESTAGENT_TOKEN_PARAM, headers=TESTAGENT_HEADERS) + assert reply.status_code == 200 pygoat_client, token = login_to_pygoat() @@ -65,7 +66,7 @@ def get_traces(agent_client: requests.Session) -> requests.Response: def vulnerability_in_traces(vuln_type: str, agent_client: requests.Session) -> bool: time.sleep(5) traces = get_traces(agent_client) - assert traces.status_code == 200 + assert traces.status_code == 200, traces.text traces_list = json.loads(traces.text) class InnerBreakException(Exception): diff --git a/tests/appsec/integrations/test_flask_entrypoint_iast_patches.py b/tests/appsec/integrations/test_flask_entrypoint_iast_patches.py index f0eeb1eb626..4f54bc675c3 100644 --- a/tests/appsec/integrations/test_flask_entrypoint_iast_patches.py +++ b/tests/appsec/integrations/test_flask_entrypoint_iast_patches.py @@ -7,11 +7,19 @@ def test_ddtrace_iast_flask_patch(): import dis import io + import re import sys from tests.utils import override_env from tests.utils import override_global_config + PATTERN = r"""Disassembly of add_test: +(\s*7 0 RESUME 0 +)?\s*8 \d LOAD_GLOBAL \d \((NULL \+ )?_ddtrace_aspects\) +\s*\d+ LOAD_(ATTR|METHOD)\s+\d \(add_aspect\) +\s*\d+ LOAD_FAST 0 \(a\) +\s*\d+ LOAD_FAST 1 \(b\)""" + with override_global_config(dict(_iast_enabled=True)), override_env( dict(DD_IAST_ENABLED="true", DD_IAST_REQUEST_SAMPLING="100") ): @@ -21,10 +29,9 @@ def test_ddtrace_iast_flask_patch(): dis.dis(flask_entrypoint, file=dis_output) str_output = dis_output.getvalue() # Should have replaced the binary op with the aspect in add_test: - assert "(add_aspect)" in str_output - assert "BINARY_ADD" in str_output or "BINARY_OP" not in str_output + assert re.search(PATTERN, str_output), str_output # Should have replaced the app.run() with a pass: - assert "Disassembly of run" not in str_output + # assert "Disassembly of run" not in str_output, str_output del sys.modules["tests.appsec.iast.fixtures.entrypoint.app_main_patched"] From 639da637e4546e2f82aee8d88c55bc470d8acec8 Mon Sep 17 00:00:00 2001 From: Brett Langdon Date: Mon, 16 Dec 2024 11:37:19 -0500 Subject: [PATCH 41/78] ci: wait longer for opensearch to start (#11739) --- tests/contrib/elasticsearch/test_elasticsearch.py | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/tests/contrib/elasticsearch/test_elasticsearch.py b/tests/contrib/elasticsearch/test_elasticsearch.py index 92ce195c92f..b80b4486e71 100644 --- a/tests/contrib/elasticsearch/test_elasticsearch.py +++ b/tests/contrib/elasticsearch/test_elasticsearch.py @@ -41,14 +41,16 @@ def wait_for_es(host: str, port: int): - for _ in range(20): + # Wait for up to 160 seconds for ES to start. + # DEV: Elasticsearch is pretty quick, but OpenSearch can take a long time to start. + for _ in range(80): try: conn = HTTPConnection(f"{host}:{port}") conn.request("GET", "/") conn.getresponse() return except Exception: - time.sleep(1) + time.sleep(2) raise Exception(f"Could not connect to ES at {host}:{port}") From bd6c2e1d07bc5bcb218af61f5f384c23aa09fb3a Mon Sep 17 00:00:00 2001 From: "github-actions[bot]" <41898282+github-actions[bot]@users.noreply.github.com> Date: Mon, 16 Dec 2024 16:47:31 +0000 Subject: [PATCH 42/78] chore: update mariadb latest version to 1.1.11 (#11727) ## Checklist - [ ] PR author has checked that all the criteria below are met - The PR description includes an overview of the change - The PR description articulates the motivation for the change - The change includes tests OR the PR description describes a testing strategy - The PR description notes risks associated with the change, if any - Newly-added code is easy to change - The change follows the [library release note guidelines](https://ddtrace.readthedocs.io/en/stable/releasenotes.html) - The change includes or references documentation updates if necessary - Backport labels are set (if [applicable](https://ddtrace.readthedocs.io/en/latest/contributing.html#backporting)) ## Reviewer Checklist - [ ] Reviewer has checked that all the criteria below are met - Title is accurate - All changes are related to the pull request's stated goal - Avoids breaking [API](https://ddtrace.readthedocs.io/en/stable/versioning.html#interfaces) changes - Testing strategy adequately addresses listed risks - Newly-added code is easy to change - Release note makes sense to a user of the library - If necessary, author has acknowledged and discussed the performance implications of this PR as reported in the benchmarks PR comment - Backport labels are set in a manner that is consistent with the [release branch maintenance policy](https://ddtrace.readthedocs.io/en/latest/contributing.html#backporting) Co-authored-by: quinna-h <175135214+quinna-h@users.noreply.github.com> --- .riot/requirements/1050efa.txt | 12 ++++++------ .riot/requirements/12c10e8.txt | 18 +++++++++--------- .riot/requirements/12cb0e7.txt | 12 ++++++------ .riot/requirements/147bedb.txt | 18 +++++++++--------- .riot/requirements/16b7aa5.txt | 10 +++++----- .riot/requirements/1bf3da5.txt | 12 ++++++------ .riot/requirements/1e0ec0b.txt | 14 +++++++------- .riot/requirements/4ed631d.txt | 12 ++++++------ .riot/requirements/769aa27.txt | 12 ++++++------ .riot/requirements/85acf6e.txt | 12 ++++++------ .riot/requirements/8a17cb2.txt | 12 ++++++------ .riot/requirements/e75aea6.txt | 16 ++++++++-------- .riot/requirements/fb50881.txt | 14 +++++++------- 13 files changed, 87 insertions(+), 87 deletions(-) diff --git a/.riot/requirements/1050efa.txt b/.riot/requirements/1050efa.txt index d69750cdf3f..df4832ccbd1 100644 --- a/.riot/requirements/1050efa.txt +++ b/.riot/requirements/1050efa.txt @@ -5,16 +5,16 @@ # pip-compile --allow-unsafe --no-annotate .riot/requirements/1050efa.in # attrs==24.2.0 -coverage[toml]==7.6.1 +coverage[toml]==7.6.9 hypothesis==6.45.0 iniconfig==2.0.0 -mariadb==1.1.10 +mariadb==1.1.11 mock==5.1.0 opentracing==2.4.0 -packaging==24.1 +packaging==24.2 pluggy==1.5.0 -pytest==8.3.2 -pytest-cov==5.0.0 +pytest==8.3.4 +pytest-cov==6.0.0 pytest-mock==3.14.0 -pytest-randomly==3.15.0 +pytest-randomly==3.16.0 sortedcontainers==2.4.0 diff --git a/.riot/requirements/12c10e8.txt b/.riot/requirements/12c10e8.txt index abe4c79bdd0..75ea709c67a 100644 --- a/.riot/requirements/12c10e8.txt +++ b/.riot/requirements/12c10e8.txt @@ -5,20 +5,20 @@ # pip-compile --allow-unsafe --no-annotate .riot/requirements/12c10e8.in # attrs==24.2.0 -coverage[toml]==7.6.1 +coverage[toml]==7.6.9 exceptiongroup==1.2.2 hypothesis==6.45.0 -importlib-metadata==8.4.0 +importlib-metadata==8.5.0 iniconfig==2.0.0 -mariadb==1.1.10 +mariadb==1.1.11 mock==5.1.0 opentracing==2.4.0 -packaging==24.1 +packaging==24.2 pluggy==1.5.0 -pytest==8.3.2 -pytest-cov==5.0.0 +pytest==8.3.4 +pytest-cov==6.0.0 pytest-mock==3.14.0 -pytest-randomly==3.15.0 +pytest-randomly==3.16.0 sortedcontainers==2.4.0 -tomli==2.0.1 -zipp==3.20.0 +tomli==2.2.1 +zipp==3.21.0 diff --git a/.riot/requirements/12cb0e7.txt b/.riot/requirements/12cb0e7.txt index 95dd85db63b..303f7985e32 100644 --- a/.riot/requirements/12cb0e7.txt +++ b/.riot/requirements/12cb0e7.txt @@ -5,16 +5,16 @@ # pip-compile --allow-unsafe --no-annotate .riot/requirements/12cb0e7.in # attrs==24.2.0 -coverage[toml]==7.6.1 +coverage[toml]==7.6.9 hypothesis==6.45.0 iniconfig==2.0.0 -mariadb==1.1.10 +mariadb==1.1.11 mock==5.1.0 opentracing==2.4.0 -packaging==24.1 +packaging==24.2 pluggy==1.5.0 -pytest==8.3.2 -pytest-cov==5.0.0 +pytest==8.3.4 +pytest-cov==6.0.0 pytest-mock==3.14.0 -pytest-randomly==3.15.0 +pytest-randomly==3.16.0 sortedcontainers==2.4.0 diff --git a/.riot/requirements/147bedb.txt b/.riot/requirements/147bedb.txt index d128fe5aaa2..b03efd4dc82 100644 --- a/.riot/requirements/147bedb.txt +++ b/.riot/requirements/147bedb.txt @@ -5,20 +5,20 @@ # pip-compile --allow-unsafe --no-annotate .riot/requirements/147bedb.in # attrs==24.2.0 -coverage[toml]==7.6.1 +coverage[toml]==7.6.9 exceptiongroup==1.2.2 hypothesis==6.45.0 -importlib-metadata==8.4.0 +importlib-metadata==8.5.0 iniconfig==2.0.0 -mariadb==1.1.10 +mariadb==1.1.11 mock==5.1.0 opentracing==2.4.0 -packaging==24.1 +packaging==24.2 pluggy==1.5.0 -pytest==8.3.2 -pytest-cov==5.0.0 +pytest==8.3.4 +pytest-cov==6.0.0 pytest-mock==3.14.0 -pytest-randomly==3.15.0 +pytest-randomly==3.16.0 sortedcontainers==2.4.0 -tomli==2.0.1 -zipp==3.20.0 +tomli==2.2.1 +zipp==3.21.0 diff --git a/.riot/requirements/16b7aa5.txt b/.riot/requirements/16b7aa5.txt index e53e34a8205..f8d2399a7eb 100644 --- a/.riot/requirements/16b7aa5.txt +++ b/.riot/requirements/16b7aa5.txt @@ -8,17 +8,17 @@ attrs==24.2.0 coverage[toml]==7.6.1 exceptiongroup==1.2.2 hypothesis==6.45.0 -importlib-metadata==8.4.0 +importlib-metadata==8.5.0 iniconfig==2.0.0 mariadb==1.0.11 mock==5.1.0 opentracing==2.4.0 -packaging==24.1 +packaging==24.2 pluggy==1.5.0 -pytest==8.3.2 +pytest==8.3.4 pytest-cov==5.0.0 pytest-mock==3.14.0 pytest-randomly==3.15.0 sortedcontainers==2.4.0 -tomli==2.0.1 -zipp==3.20.0 +tomli==2.2.1 +zipp==3.20.2 diff --git a/.riot/requirements/1bf3da5.txt b/.riot/requirements/1bf3da5.txt index 24b990913fb..ffd311eb163 100644 --- a/.riot/requirements/1bf3da5.txt +++ b/.riot/requirements/1bf3da5.txt @@ -8,17 +8,17 @@ attrs==24.2.0 coverage[toml]==7.6.1 exceptiongroup==1.2.2 hypothesis==6.45.0 -importlib-metadata==8.4.0 +importlib-metadata==8.5.0 iniconfig==2.0.0 -mariadb==1.1.10 +mariadb==1.1.11 mock==5.1.0 opentracing==2.4.0 -packaging==24.1 +packaging==24.2 pluggy==1.5.0 -pytest==8.3.2 +pytest==8.3.4 pytest-cov==5.0.0 pytest-mock==3.14.0 pytest-randomly==3.15.0 sortedcontainers==2.4.0 -tomli==2.0.1 -zipp==3.20.0 +tomli==2.2.1 +zipp==3.20.2 diff --git a/.riot/requirements/1e0ec0b.txt b/.riot/requirements/1e0ec0b.txt index 26f75087ae7..5f5ddcf3598 100644 --- a/.riot/requirements/1e0ec0b.txt +++ b/.riot/requirements/1e0ec0b.txt @@ -5,18 +5,18 @@ # pip-compile --allow-unsafe --no-annotate .riot/requirements/1e0ec0b.in # attrs==24.2.0 -coverage[toml]==7.6.1 +coverage[toml]==7.6.9 exceptiongroup==1.2.2 hypothesis==6.45.0 iniconfig==2.0.0 -mariadb==1.1.10 +mariadb==1.1.11 mock==5.1.0 opentracing==2.4.0 -packaging==24.1 +packaging==24.2 pluggy==1.5.0 -pytest==8.3.2 -pytest-cov==5.0.0 +pytest==8.3.4 +pytest-cov==6.0.0 pytest-mock==3.14.0 -pytest-randomly==3.15.0 +pytest-randomly==3.16.0 sortedcontainers==2.4.0 -tomli==2.0.1 +tomli==2.2.1 diff --git a/.riot/requirements/4ed631d.txt b/.riot/requirements/4ed631d.txt index b627027f63d..a63d5635068 100644 --- a/.riot/requirements/4ed631d.txt +++ b/.riot/requirements/4ed631d.txt @@ -5,16 +5,16 @@ # pip-compile --allow-unsafe --no-annotate .riot/requirements/4ed631d.in # attrs==24.2.0 -coverage[toml]==7.6.1 +coverage[toml]==7.6.9 hypothesis==6.45.0 iniconfig==2.0.0 -mariadb==1.1.10 +mariadb==1.1.11 mock==5.1.0 opentracing==2.4.0 -packaging==24.1 +packaging==24.2 pluggy==1.5.0 -pytest==8.3.2 -pytest-cov==5.0.0 +pytest==8.3.4 +pytest-cov==6.0.0 pytest-mock==3.14.0 -pytest-randomly==3.15.0 +pytest-randomly==3.16.0 sortedcontainers==2.4.0 diff --git a/.riot/requirements/769aa27.txt b/.riot/requirements/769aa27.txt index be5205a4bd0..3b3c8a013dd 100644 --- a/.riot/requirements/769aa27.txt +++ b/.riot/requirements/769aa27.txt @@ -5,16 +5,16 @@ # pip-compile --allow-unsafe --no-annotate .riot/requirements/769aa27.in # attrs==24.2.0 -coverage[toml]==7.6.1 +coverage[toml]==7.6.9 hypothesis==6.45.0 iniconfig==2.0.0 -mariadb==1.1.10 +mariadb==1.1.11 mock==5.1.0 opentracing==2.4.0 -packaging==24.1 +packaging==24.2 pluggy==1.5.0 -pytest==8.3.2 -pytest-cov==5.0.0 +pytest==8.3.4 +pytest-cov==6.0.0 pytest-mock==3.14.0 -pytest-randomly==3.15.0 +pytest-randomly==3.16.0 sortedcontainers==2.4.0 diff --git a/.riot/requirements/85acf6e.txt b/.riot/requirements/85acf6e.txt index 0bb20810181..d5d68c47b81 100644 --- a/.riot/requirements/85acf6e.txt +++ b/.riot/requirements/85acf6e.txt @@ -5,18 +5,18 @@ # pip-compile --allow-unsafe --no-annotate .riot/requirements/85acf6e.in # attrs==24.2.0 -coverage[toml]==7.6.1 +coverage[toml]==7.6.9 exceptiongroup==1.2.2 hypothesis==6.45.0 iniconfig==2.0.0 mariadb==1.0.11 mock==5.1.0 opentracing==2.4.0 -packaging==24.1 +packaging==24.2 pluggy==1.5.0 -pytest==8.3.2 -pytest-cov==5.0.0 +pytest==8.3.4 +pytest-cov==6.0.0 pytest-mock==3.14.0 -pytest-randomly==3.15.0 +pytest-randomly==3.16.0 sortedcontainers==2.4.0 -tomli==2.0.1 +tomli==2.2.1 diff --git a/.riot/requirements/8a17cb2.txt b/.riot/requirements/8a17cb2.txt index 3d204884507..f8916e4d431 100644 --- a/.riot/requirements/8a17cb2.txt +++ b/.riot/requirements/8a17cb2.txt @@ -8,17 +8,17 @@ attrs==24.2.0 coverage[toml]==7.6.1 exceptiongroup==1.2.2 hypothesis==6.45.0 -importlib-metadata==8.4.0 +importlib-metadata==8.5.0 iniconfig==2.0.0 -mariadb==1.1.10 +mariadb==1.1.11 mock==5.1.0 opentracing==2.4.0 -packaging==24.1 +packaging==24.2 pluggy==1.5.0 -pytest==8.3.2 +pytest==8.3.4 pytest-cov==5.0.0 pytest-mock==3.14.0 pytest-randomly==3.15.0 sortedcontainers==2.4.0 -tomli==2.0.1 -zipp==3.20.0 +tomli==2.2.1 +zipp==3.20.2 diff --git a/.riot/requirements/e75aea6.txt b/.riot/requirements/e75aea6.txt index 4d191b83adc..704e2fecf98 100644 --- a/.riot/requirements/e75aea6.txt +++ b/.riot/requirements/e75aea6.txt @@ -5,20 +5,20 @@ # pip-compile --allow-unsafe --no-annotate .riot/requirements/e75aea6.in # attrs==24.2.0 -coverage[toml]==7.6.1 +coverage[toml]==7.6.9 exceptiongroup==1.2.2 hypothesis==6.45.0 -importlib-metadata==8.4.0 +importlib-metadata==8.5.0 iniconfig==2.0.0 mariadb==1.0.11 mock==5.1.0 opentracing==2.4.0 -packaging==24.1 +packaging==24.2 pluggy==1.5.0 -pytest==8.3.2 -pytest-cov==5.0.0 +pytest==8.3.4 +pytest-cov==6.0.0 pytest-mock==3.14.0 -pytest-randomly==3.15.0 +pytest-randomly==3.16.0 sortedcontainers==2.4.0 -tomli==2.0.1 -zipp==3.20.0 +tomli==2.2.1 +zipp==3.21.0 diff --git a/.riot/requirements/fb50881.txt b/.riot/requirements/fb50881.txt index f39cd8b01b4..6ec4fc75d44 100644 --- a/.riot/requirements/fb50881.txt +++ b/.riot/requirements/fb50881.txt @@ -5,18 +5,18 @@ # pip-compile --allow-unsafe --no-annotate .riot/requirements/fb50881.in # attrs==24.2.0 -coverage[toml]==7.6.1 +coverage[toml]==7.6.9 exceptiongroup==1.2.2 hypothesis==6.45.0 iniconfig==2.0.0 -mariadb==1.1.10 +mariadb==1.1.11 mock==5.1.0 opentracing==2.4.0 -packaging==24.1 +packaging==24.2 pluggy==1.5.0 -pytest==8.3.2 -pytest-cov==5.0.0 +pytest==8.3.4 +pytest-cov==6.0.0 pytest-mock==3.14.0 -pytest-randomly==3.15.0 +pytest-randomly==3.16.0 sortedcontainers==2.4.0 -tomli==2.0.1 +tomli==2.2.1 From 3828edb95d6947e095f331722465b83416ee8f48 Mon Sep 17 00:00:00 2001 From: "github-actions[bot]" <41898282+github-actions[bot]@users.noreply.github.com> Date: Mon, 16 Dec 2024 16:49:55 +0000 Subject: [PATCH 43/78] chore: update asyncpg latest version to 0.30.0 (#11728) ## Checklist - [x] PR author has checked that all the criteria below are met - The PR description includes an overview of the change - The PR description articulates the motivation for the change - The change includes tests OR the PR description describes a testing strategy - The PR description notes risks associated with the change, if any - Newly-added code is easy to change - The change follows the [library release note guidelines](https://ddtrace.readthedocs.io/en/stable/releasenotes.html) - The change includes or references documentation updates if necessary - Backport labels are set (if [applicable](https://ddtrace.readthedocs.io/en/latest/contributing.html#backporting)) ## Reviewer Checklist - [x] Reviewer has checked that all the criteria below are met - Title is accurate - All changes are related to the pull request's stated goal - Avoids breaking [API](https://ddtrace.readthedocs.io/en/stable/versioning.html#interfaces) changes - Testing strategy adequately addresses listed risks - Newly-added code is easy to change - Release note makes sense to a user of the library - If necessary, author has acknowledged and discussed the performance implications of this PR as reported in the benchmarks PR comment - Backport labels are set in a manner that is consistent with the [release branch maintenance policy](https://ddtrace.readthedocs.io/en/latest/contributing.html#backporting) Co-authored-by: quinna-h <175135214+quinna-h@users.noreply.github.com> --- .riot/requirements/12594bd.txt | 32 ++++++++++++++++---------------- .riot/requirements/1f2ab25.txt | 30 +++++++++++++++--------------- .riot/requirements/4c87f15.txt | 23 +++++++++++------------ .riot/requirements/6ebd15f.txt | 22 +++++++++++----------- .riot/requirements/85c8e30.txt | 10 +++++----- .riot/requirements/aaf6987.txt | 24 ++++++++++++------------ .riot/requirements/b970d9a.txt | 23 +++++++++++------------ .riot/requirements/bc5cfa5.txt | 28 ++++++++++++++-------------- .riot/requirements/ca86aae.txt | 30 +++++++++++++++--------------- .riot/requirements/f7ca81b.txt | 10 +++++----- .riot/requirements/fa9267f.txt | 16 ++++++++-------- 11 files changed, 123 insertions(+), 125 deletions(-) diff --git a/.riot/requirements/12594bd.txt b/.riot/requirements/12594bd.txt index e8e2f1234f5..9311ce66a7f 100644 --- a/.riot/requirements/12594bd.txt +++ b/.riot/requirements/12594bd.txt @@ -2,25 +2,25 @@ # This file is autogenerated by pip-compile with Python 3.9 # by the following command: # -# pip-compile --no-annotate .riot/requirements/12594bd.in +# pip-compile --allow-unsafe --no-annotate .riot/requirements/12594bd.in # -async-timeout==4.0.3 -asyncpg==0.29.0 -attrs==23.2.0 -coverage[toml]==7.4.0 -exceptiongroup==1.2.0 +async-timeout==5.0.1 +asyncpg==0.30.0 +attrs==24.2.0 +coverage[toml]==7.6.9 +exceptiongroup==1.2.2 hypothesis==6.45.0 -importlib-metadata==7.0.1 +importlib-metadata==8.5.0 iniconfig==2.0.0 mock==5.1.0 opentracing==2.4.0 -packaging==23.2 -pluggy==1.3.0 -pytest==7.4.4 -pytest-asyncio==0.21.1 -pytest-cov==4.1.0 -pytest-mock==3.12.0 -pytest-randomly==3.15.0 +packaging==24.2 +pluggy==1.5.0 +pytest==8.3.4 +pytest-asyncio==0.21.2 +pytest-cov==6.0.0 +pytest-mock==3.14.0 +pytest-randomly==3.16.0 sortedcontainers==2.4.0 -tomli==2.0.1 -zipp==3.17.0 +tomli==2.2.1 +zipp==3.21.0 diff --git a/.riot/requirements/1f2ab25.txt b/.riot/requirements/1f2ab25.txt index eb17561c98f..ee70e55666e 100644 --- a/.riot/requirements/1f2ab25.txt +++ b/.riot/requirements/1f2ab25.txt @@ -2,25 +2,25 @@ # This file is autogenerated by pip-compile with Python 3.8 # by the following command: # -# pip-compile --no-annotate .riot/requirements/1f2ab25.in +# pip-compile --allow-unsafe --no-annotate .riot/requirements/1f2ab25.in # -async-timeout==4.0.3 -asyncpg==0.29.0 -attrs==23.2.0 -coverage[toml]==7.4.0 -exceptiongroup==1.2.0 +async-timeout==5.0.1 +asyncpg==0.30.0 +attrs==24.2.0 +coverage[toml]==7.6.1 +exceptiongroup==1.2.2 hypothesis==6.45.0 -importlib-metadata==7.0.1 +importlib-metadata==8.5.0 iniconfig==2.0.0 mock==5.1.0 opentracing==2.4.0 -packaging==23.2 -pluggy==1.3.0 -pytest==7.4.4 -pytest-asyncio==0.21.1 -pytest-cov==4.1.0 -pytest-mock==3.12.0 +packaging==24.2 +pluggy==1.5.0 +pytest==8.3.4 +pytest-asyncio==0.21.2 +pytest-cov==5.0.0 +pytest-mock==3.14.0 pytest-randomly==3.15.0 sortedcontainers==2.4.0 -tomli==2.0.1 -zipp==3.17.0 +tomli==2.2.1 +zipp==3.20.2 diff --git a/.riot/requirements/4c87f15.txt b/.riot/requirements/4c87f15.txt index e5138ff00ca..1ab11af15de 100644 --- a/.riot/requirements/4c87f15.txt +++ b/.riot/requirements/4c87f15.txt @@ -2,21 +2,20 @@ # This file is autogenerated by pip-compile with Python 3.11 # by the following command: # -# pip-compile --no-annotate .riot/requirements/4c87f15.in +# pip-compile --allow-unsafe --no-annotate .riot/requirements/4c87f15.in # -async-timeout==4.0.3 -asyncpg==0.29.0 -attrs==23.2.0 -coverage[toml]==7.4.0 +asyncpg==0.30.0 +attrs==24.2.0 +coverage[toml]==7.6.9 hypothesis==6.45.0 iniconfig==2.0.0 mock==5.1.0 opentracing==2.4.0 -packaging==23.2 -pluggy==1.3.0 -pytest==7.4.4 -pytest-asyncio==0.21.1 -pytest-cov==4.1.0 -pytest-mock==3.12.0 -pytest-randomly==3.15.0 +packaging==24.2 +pluggy==1.5.0 +pytest==8.3.4 +pytest-asyncio==0.21.2 +pytest-cov==6.0.0 +pytest-mock==3.14.0 +pytest-randomly==3.16.0 sortedcontainers==2.4.0 diff --git a/.riot/requirements/6ebd15f.txt b/.riot/requirements/6ebd15f.txt index e15eb9136da..8665e48a87a 100644 --- a/.riot/requirements/6ebd15f.txt +++ b/.riot/requirements/6ebd15f.txt @@ -2,24 +2,24 @@ # This file is autogenerated by pip-compile with Python 3.9 # by the following command: # -# pip-compile --no-annotate .riot/requirements/6ebd15f.in +# pip-compile --allow-unsafe --no-annotate .riot/requirements/6ebd15f.in # asyncpg==0.23.0 -attrs==23.2.0 -coverage[toml]==7.5.4 -exceptiongroup==1.2.1 +attrs==24.2.0 +coverage[toml]==7.6.9 +exceptiongroup==1.2.2 hypothesis==6.45.0 -importlib-metadata==8.0.0 +importlib-metadata==8.5.0 iniconfig==2.0.0 mock==5.1.0 opentracing==2.4.0 -packaging==24.1 +packaging==24.2 pluggy==1.5.0 -pytest==8.2.2 +pytest==8.3.4 pytest-asyncio==0.21.2 -pytest-cov==5.0.0 +pytest-cov==6.0.0 pytest-mock==3.14.0 -pytest-randomly==3.15.0 +pytest-randomly==3.16.0 sortedcontainers==2.4.0 -tomli==2.0.1 -zipp==3.19.2 +tomli==2.2.1 +zipp==3.21.0 diff --git a/.riot/requirements/85c8e30.txt b/.riot/requirements/85c8e30.txt index e24c6ea9c30..4d03cfcf992 100644 --- a/.riot/requirements/85c8e30.txt +++ b/.riot/requirements/85c8e30.txt @@ -2,21 +2,21 @@ # This file is autogenerated by pip-compile with Python 3.7 # by the following command: # -# pip-compile --config=pyproject.toml --no-annotate --resolver=backtracking .riot/requirements/85c8e30.in +# pip-compile --allow-unsafe --config=pyproject.toml --no-annotate --resolver=backtracking .riot/requirements/85c8e30.in # asyncpg==0.28.0 -attrs==23.2.0 +attrs==24.2.0 coverage[toml]==7.2.7 -exceptiongroup==1.2.0 +exceptiongroup==1.2.2 hypothesis==6.45.0 importlib-metadata==6.7.0 iniconfig==2.0.0 mock==5.1.0 opentracing==2.4.0 -packaging==23.2 +packaging==24.0 pluggy==1.2.0 pytest==7.4.4 -pytest-asyncio==0.21.1 +pytest-asyncio==0.21.2 pytest-cov==4.1.0 pytest-mock==3.11.1 pytest-randomly==3.12.0 diff --git a/.riot/requirements/aaf6987.txt b/.riot/requirements/aaf6987.txt index c5cf067fda4..325d23c244d 100644 --- a/.riot/requirements/aaf6987.txt +++ b/.riot/requirements/aaf6987.txt @@ -2,22 +2,22 @@ # This file is autogenerated by pip-compile with Python 3.10 # by the following command: # -# pip-compile --no-annotate .riot/requirements/aaf6987.in +# pip-compile --allow-unsafe --no-annotate .riot/requirements/aaf6987.in # asyncpg==0.24.0 -attrs==23.2.0 -coverage[toml]==7.4.0 -exceptiongroup==1.2.0 +attrs==24.2.0 +coverage[toml]==7.6.9 +exceptiongroup==1.2.2 hypothesis==6.45.0 iniconfig==2.0.0 mock==5.1.0 opentracing==2.4.0 -packaging==23.2 -pluggy==1.3.0 -pytest==7.4.4 -pytest-asyncio==0.21.1 -pytest-cov==4.1.0 -pytest-mock==3.12.0 -pytest-randomly==3.15.0 +packaging==24.2 +pluggy==1.5.0 +pytest==8.3.4 +pytest-asyncio==0.21.2 +pytest-cov==6.0.0 +pytest-mock==3.14.0 +pytest-randomly==3.16.0 sortedcontainers==2.4.0 -tomli==2.0.1 +tomli==2.2.1 diff --git a/.riot/requirements/b970d9a.txt b/.riot/requirements/b970d9a.txt index 4c48960f797..5b6566c9768 100644 --- a/.riot/requirements/b970d9a.txt +++ b/.riot/requirements/b970d9a.txt @@ -2,21 +2,20 @@ # This file is autogenerated by pip-compile with Python 3.11 # by the following command: # -# pip-compile --no-annotate .riot/requirements/b970d9a.in +# pip-compile --allow-unsafe --no-annotate .riot/requirements/b970d9a.in # -async-timeout==4.0.3 -asyncpg==0.29.0 -attrs==23.2.0 -coverage[toml]==7.4.0 +asyncpg==0.30.0 +attrs==24.2.0 +coverage[toml]==7.6.9 hypothesis==6.45.0 iniconfig==2.0.0 mock==5.1.0 opentracing==2.4.0 -packaging==23.2 -pluggy==1.3.0 -pytest==7.4.4 -pytest-asyncio==0.21.1 -pytest-cov==4.1.0 -pytest-mock==3.12.0 -pytest-randomly==3.15.0 +packaging==24.2 +pluggy==1.5.0 +pytest==8.3.4 +pytest-asyncio==0.21.2 +pytest-cov==6.0.0 +pytest-mock==3.14.0 +pytest-randomly==3.16.0 sortedcontainers==2.4.0 diff --git a/.riot/requirements/bc5cfa5.txt b/.riot/requirements/bc5cfa5.txt index 6bb0594f42e..69e33321ec8 100644 --- a/.riot/requirements/bc5cfa5.txt +++ b/.riot/requirements/bc5cfa5.txt @@ -2,23 +2,23 @@ # This file is autogenerated by pip-compile with Python 3.10 # by the following command: # -# pip-compile --no-annotate .riot/requirements/bc5cfa5.in +# pip-compile --allow-unsafe --no-annotate .riot/requirements/bc5cfa5.in # -async-timeout==4.0.3 -asyncpg==0.29.0 -attrs==23.2.0 -coverage[toml]==7.4.0 -exceptiongroup==1.2.0 +async-timeout==5.0.1 +asyncpg==0.30.0 +attrs==24.2.0 +coverage[toml]==7.6.9 +exceptiongroup==1.2.2 hypothesis==6.45.0 iniconfig==2.0.0 mock==5.1.0 opentracing==2.4.0 -packaging==23.2 -pluggy==1.3.0 -pytest==7.4.4 -pytest-asyncio==0.21.1 -pytest-cov==4.1.0 -pytest-mock==3.12.0 -pytest-randomly==3.15.0 +packaging==24.2 +pluggy==1.5.0 +pytest==8.3.4 +pytest-asyncio==0.21.2 +pytest-cov==6.0.0 +pytest-mock==3.14.0 +pytest-randomly==3.16.0 sortedcontainers==2.4.0 -tomli==2.0.1 +tomli==2.2.1 diff --git a/.riot/requirements/ca86aae.txt b/.riot/requirements/ca86aae.txt index f37a682c7bd..be8832160e7 100644 --- a/.riot/requirements/ca86aae.txt +++ b/.riot/requirements/ca86aae.txt @@ -2,25 +2,25 @@ # This file is autogenerated by pip-compile with Python 3.8 # by the following command: # -# pip-compile --no-annotate .riot/requirements/ca86aae.in +# pip-compile --allow-unsafe --no-annotate .riot/requirements/ca86aae.in # -async-timeout==4.0.3 -asyncpg==0.29.0 -attrs==23.2.0 -coverage[toml]==7.4.0 -exceptiongroup==1.2.0 +async-timeout==5.0.1 +asyncpg==0.30.0 +attrs==24.2.0 +coverage[toml]==7.6.1 +exceptiongroup==1.2.2 hypothesis==6.45.0 -importlib-metadata==7.0.1 +importlib-metadata==8.5.0 iniconfig==2.0.0 mock==5.1.0 opentracing==2.4.0 -packaging==23.2 -pluggy==1.3.0 -pytest==7.4.4 -pytest-asyncio==0.21.1 -pytest-cov==4.1.0 -pytest-mock==3.12.0 +packaging==24.2 +pluggy==1.5.0 +pytest==8.3.4 +pytest-asyncio==0.21.2 +pytest-cov==5.0.0 +pytest-mock==3.14.0 pytest-randomly==3.15.0 sortedcontainers==2.4.0 -tomli==2.0.1 -zipp==3.17.0 +tomli==2.2.1 +zipp==3.20.2 diff --git a/.riot/requirements/f7ca81b.txt b/.riot/requirements/f7ca81b.txt index 8a63c4f0856..14de8077cae 100644 --- a/.riot/requirements/f7ca81b.txt +++ b/.riot/requirements/f7ca81b.txt @@ -2,21 +2,21 @@ # This file is autogenerated by pip-compile with Python 3.7 # by the following command: # -# pip-compile --config=pyproject.toml --no-annotate --resolver=backtracking .riot/requirements/f7ca81b.in +# pip-compile --allow-unsafe --config=pyproject.toml --no-annotate --resolver=backtracking .riot/requirements/f7ca81b.in # asyncpg==0.28.0 -attrs==23.2.0 +attrs==24.2.0 coverage[toml]==7.2.7 -exceptiongroup==1.2.0 +exceptiongroup==1.2.2 hypothesis==6.45.0 importlib-metadata==6.7.0 iniconfig==2.0.0 mock==5.1.0 opentracing==2.4.0 -packaging==23.2 +packaging==24.0 pluggy==1.2.0 pytest==7.4.4 -pytest-asyncio==0.21.1 +pytest-asyncio==0.21.2 pytest-cov==4.1.0 pytest-mock==3.11.1 pytest-randomly==3.12.0 diff --git a/.riot/requirements/fa9267f.txt b/.riot/requirements/fa9267f.txt index 80c862d47e3..675a460f3bc 100644 --- a/.riot/requirements/fa9267f.txt +++ b/.riot/requirements/fa9267f.txt @@ -2,20 +2,20 @@ # This file is autogenerated by pip-compile with Python 3.12 # by the following command: # -# pip-compile --no-annotate .riot/requirements/fa9267f.in +# pip-compile --allow-unsafe --no-annotate .riot/requirements/fa9267f.in # -asyncpg==0.29.0 -attrs==23.2.0 -coverage[toml]==7.5.4 +asyncpg==0.30.0 +attrs==24.2.0 +coverage[toml]==7.6.9 hypothesis==6.45.0 iniconfig==2.0.0 mock==5.1.0 opentracing==2.4.0 -packaging==24.1 +packaging==24.2 pluggy==1.5.0 -pytest==8.2.2 +pytest==8.3.4 pytest-asyncio==0.21.2 -pytest-cov==5.0.0 +pytest-cov==6.0.0 pytest-mock==3.14.0 -pytest-randomly==3.15.0 +pytest-randomly==3.16.0 sortedcontainers==2.4.0 From 872f1af90be3b6769d531bb92b1cb677e64dea73 Mon Sep 17 00:00:00 2001 From: "github-actions[bot]" <41898282+github-actions[bot]@users.noreply.github.com> Date: Mon, 16 Dec 2024 16:52:55 +0000 Subject: [PATCH 44/78] chore: update pyodbc latest version to 5.2.0 (#11733) Update pyodbc lockfiles and dependency package lockfiles. This performs the following updates: 1) Some pyodbc lockfiles use pyodbc `latest`. This will update pyodbc and dependencies. 2) Some pyodbc lockfiles use a pinned (non-latest) version of pyodbc, but require the `latest` version of another package. This will update all such packages. ## Checklist - [x] PR author has checked that all the criteria below are met - The PR description includes an overview of the change - The PR description articulates the motivation for the change - The change includes tests OR the PR description describes a testing strategy - The PR description notes risks associated with the change, if any - Newly-added code is easy to change - The change follows the [library release note guidelines](https://ddtrace.readthedocs.io/en/stable/releasenotes.html) - The change includes or references documentation updates if necessary - Backport labels are set (if [applicable](https://ddtrace.readthedocs.io/en/latest/contributing.html#backporting)) ## Reviewer Checklist - [x] Reviewer has checked that all the criteria below are met - Title is accurate - All changes are related to the pull request's stated goal - Avoids breaking [API](https://ddtrace.readthedocs.io/en/stable/versioning.html#interfaces) changes - Testing strategy adequately addresses listed risks - Newly-added code is easy to change - Release note makes sense to a user of the library - If necessary, author has acknowledged and discussed the performance implications of this PR as reported in the benchmarks PR comment - Backport labels are set in a manner that is consistent with the [release branch maintenance policy](https://ddtrace.readthedocs.io/en/latest/contributing.html#backporting) Co-authored-by: quinna-h <175135214+quinna-h@users.noreply.github.com> --- .riot/requirements/17879d0.txt | 14 +++++++------- .riot/requirements/1810da7.txt | 10 +++++----- .riot/requirements/188a403.txt | 16 ++++++++-------- .riot/requirements/1af9cfa.txt | 12 ++++++------ .riot/requirements/1ef773e.txt | 12 ++++++------ .riot/requirements/9a81f68.txt | 18 +++++++++--------- .riot/requirements/c4dace8.txt | 12 ++++++------ .riot/requirements/ed78a8f.txt | 12 ++++++------ 8 files changed, 53 insertions(+), 53 deletions(-) diff --git a/.riot/requirements/17879d0.txt b/.riot/requirements/17879d0.txt index 9e7c9459a2b..339e06c1336 100644 --- a/.riot/requirements/17879d0.txt +++ b/.riot/requirements/17879d0.txt @@ -5,18 +5,18 @@ # pip-compile --allow-unsafe --no-annotate .riot/requirements/17879d0.in # attrs==24.2.0 -coverage[toml]==7.6.1 +coverage[toml]==7.6.9 exceptiongroup==1.2.2 hypothesis==6.45.0 iniconfig==2.0.0 mock==5.1.0 opentracing==2.4.0 -packaging==24.1 +packaging==24.2 pluggy==1.5.0 -pyodbc==5.1.0 -pytest==8.3.2 -pytest-cov==5.0.0 +pyodbc==5.2.0 +pytest==8.3.4 +pytest-cov==6.0.0 pytest-mock==3.14.0 -pytest-randomly==3.15.0 +pytest-randomly==3.16.0 sortedcontainers==2.4.0 -tomli==2.0.1 +tomli==2.2.1 diff --git a/.riot/requirements/1810da7.txt b/.riot/requirements/1810da7.txt index 4c80eec5a83..020c016edce 100644 --- a/.riot/requirements/1810da7.txt +++ b/.riot/requirements/1810da7.txt @@ -8,17 +8,17 @@ attrs==24.2.0 coverage[toml]==7.6.1 exceptiongroup==1.2.2 hypothesis==6.45.0 -importlib-metadata==8.4.0 +importlib-metadata==8.5.0 iniconfig==2.0.0 mock==5.1.0 opentracing==2.4.0 -packaging==24.1 +packaging==24.2 pluggy==1.5.0 pyodbc==4.0.39 -pytest==8.3.2 +pytest==8.3.4 pytest-cov==5.0.0 pytest-mock==3.14.0 pytest-randomly==3.15.0 sortedcontainers==2.4.0 -tomli==2.0.1 -zipp==3.20.0 +tomli==2.2.1 +zipp==3.20.2 diff --git a/.riot/requirements/188a403.txt b/.riot/requirements/188a403.txt index 62354b22cef..37541506b8d 100644 --- a/.riot/requirements/188a403.txt +++ b/.riot/requirements/188a403.txt @@ -5,20 +5,20 @@ # pip-compile --allow-unsafe --no-annotate .riot/requirements/188a403.in # attrs==24.2.0 -coverage[toml]==7.6.1 +coverage[toml]==7.6.9 exceptiongroup==1.2.2 hypothesis==6.45.0 -importlib-metadata==8.4.0 +importlib-metadata==8.5.0 iniconfig==2.0.0 mock==5.1.0 opentracing==2.4.0 -packaging==24.1 +packaging==24.2 pluggy==1.5.0 pyodbc==4.0.39 -pytest==8.3.2 -pytest-cov==5.0.0 +pytest==8.3.4 +pytest-cov==6.0.0 pytest-mock==3.14.0 -pytest-randomly==3.15.0 +pytest-randomly==3.16.0 sortedcontainers==2.4.0 -tomli==2.0.1 -zipp==3.20.0 +tomli==2.2.1 +zipp==3.21.0 diff --git a/.riot/requirements/1af9cfa.txt b/.riot/requirements/1af9cfa.txt index 3a796251a8b..7a5efd36134 100644 --- a/.riot/requirements/1af9cfa.txt +++ b/.riot/requirements/1af9cfa.txt @@ -5,18 +5,18 @@ # pip-compile --allow-unsafe --no-annotate .riot/requirements/1af9cfa.in # attrs==24.2.0 -coverage[toml]==7.6.1 +coverage[toml]==7.6.9 exceptiongroup==1.2.2 hypothesis==6.45.0 iniconfig==2.0.0 mock==5.1.0 opentracing==2.4.0 -packaging==24.1 +packaging==24.2 pluggy==1.5.0 pyodbc==4.0.39 -pytest==8.3.2 -pytest-cov==5.0.0 +pytest==8.3.4 +pytest-cov==6.0.0 pytest-mock==3.14.0 -pytest-randomly==3.15.0 +pytest-randomly==3.16.0 sortedcontainers==2.4.0 -tomli==2.0.1 +tomli==2.2.1 diff --git a/.riot/requirements/1ef773e.txt b/.riot/requirements/1ef773e.txt index 16dcedbeacf..88ce7283fd9 100644 --- a/.riot/requirements/1ef773e.txt +++ b/.riot/requirements/1ef773e.txt @@ -5,16 +5,16 @@ # pip-compile --allow-unsafe --no-annotate .riot/requirements/1ef773e.in # attrs==24.2.0 -coverage[toml]==7.6.1 +coverage[toml]==7.6.9 hypothesis==6.45.0 iniconfig==2.0.0 mock==5.1.0 opentracing==2.4.0 -packaging==24.1 +packaging==24.2 pluggy==1.5.0 -pyodbc==5.1.0 -pytest==8.3.2 -pytest-cov==5.0.0 +pyodbc==5.2.0 +pytest==8.3.4 +pytest-cov==6.0.0 pytest-mock==3.14.0 -pytest-randomly==3.15.0 +pytest-randomly==3.16.0 sortedcontainers==2.4.0 diff --git a/.riot/requirements/9a81f68.txt b/.riot/requirements/9a81f68.txt index de20e1d5784..83a8d9649f8 100644 --- a/.riot/requirements/9a81f68.txt +++ b/.riot/requirements/9a81f68.txt @@ -5,20 +5,20 @@ # pip-compile --allow-unsafe --no-annotate .riot/requirements/9a81f68.in # attrs==24.2.0 -coverage[toml]==7.6.1 +coverage[toml]==7.6.9 exceptiongroup==1.2.2 hypothesis==6.45.0 -importlib-metadata==8.4.0 +importlib-metadata==8.5.0 iniconfig==2.0.0 mock==5.1.0 opentracing==2.4.0 -packaging==24.1 +packaging==24.2 pluggy==1.5.0 -pyodbc==5.1.0 -pytest==8.3.2 -pytest-cov==5.0.0 +pyodbc==5.2.0 +pytest==8.3.4 +pytest-cov==6.0.0 pytest-mock==3.14.0 -pytest-randomly==3.15.0 +pytest-randomly==3.16.0 sortedcontainers==2.4.0 -tomli==2.0.1 -zipp==3.20.0 +tomli==2.2.1 +zipp==3.21.0 diff --git a/.riot/requirements/c4dace8.txt b/.riot/requirements/c4dace8.txt index a6044120570..b828932c4c2 100644 --- a/.riot/requirements/c4dace8.txt +++ b/.riot/requirements/c4dace8.txt @@ -8,17 +8,17 @@ attrs==24.2.0 coverage[toml]==7.6.1 exceptiongroup==1.2.2 hypothesis==6.45.0 -importlib-metadata==8.4.0 +importlib-metadata==8.5.0 iniconfig==2.0.0 mock==5.1.0 opentracing==2.4.0 -packaging==24.1 +packaging==24.2 pluggy==1.5.0 -pyodbc==5.1.0 -pytest==8.3.2 +pyodbc==5.2.0 +pytest==8.3.4 pytest-cov==5.0.0 pytest-mock==3.14.0 pytest-randomly==3.15.0 sortedcontainers==2.4.0 -tomli==2.0.1 -zipp==3.20.0 +tomli==2.2.1 +zipp==3.20.2 diff --git a/.riot/requirements/ed78a8f.txt b/.riot/requirements/ed78a8f.txt index 95a78831a4d..d9c1cb0b78f 100644 --- a/.riot/requirements/ed78a8f.txt +++ b/.riot/requirements/ed78a8f.txt @@ -5,16 +5,16 @@ # pip-compile --allow-unsafe --no-annotate .riot/requirements/ed78a8f.in # attrs==24.2.0 -coverage[toml]==7.6.1 +coverage[toml]==7.6.9 hypothesis==6.45.0 iniconfig==2.0.0 mock==5.1.0 opentracing==2.4.0 -packaging==24.1 +packaging==24.2 pluggy==1.5.0 -pyodbc==5.1.0 -pytest==8.3.2 -pytest-cov==5.0.0 +pyodbc==5.2.0 +pytest==8.3.4 +pytest-cov==6.0.0 pytest-mock==3.14.0 -pytest-randomly==3.15.0 +pytest-randomly==3.16.0 sortedcontainers==2.4.0 From 28c4466fb6a8f87c64750b0c1dcca91a3ecf39c4 Mon Sep 17 00:00:00 2001 From: Emmett Butler <723615+emmettbutler@users.noreply.github.com> Date: Mon, 16 Dec 2024 09:07:16 -0800 Subject: [PATCH 45/78] chore: refactor the debugger for clarity (#11721) ## Checklist - [x] PR author has checked that all the criteria below are met - The PR description includes an overview of the change - The PR description articulates the motivation for the change - The change includes tests OR the PR description describes a testing strategy - The PR description notes risks associated with the change, if any - Newly-added code is easy to change - The change follows the [library release note guidelines](https://ddtrace.readthedocs.io/en/stable/releasenotes.html) - The change includes or references documentation updates if necessary - Backport labels are set (if [applicable](https://ddtrace.readthedocs.io/en/latest/contributing.html#backporting)) ## Reviewer Checklist - [x] Reviewer has checked that all the criteria below are met - Title is accurate - All changes are related to the pull request's stated goal - Avoids breaking [API](https://ddtrace.readthedocs.io/en/stable/versioning.html#interfaces) changes - Testing strategy adequately addresses listed risks - Newly-added code is easy to change - Release note makes sense to a user of the library - If necessary, author has acknowledged and discussed the performance implications of this PR as reported in the benchmarks PR comment - Backport labels are set in a manner that is consistent with the [release branch maintenance policy](https://ddtrace.readthedocs.io/en/latest/contributing.html#backporting) --- ddtrace/debugging/_debugger.py | 12 ++++++------ ddtrace/debugging/_function/store.py | 25 ++++++++++++------------- 2 files changed, 18 insertions(+), 19 deletions(-) diff --git a/ddtrace/debugging/_debugger.py b/ddtrace/debugging/_debugger.py index 7d4a283b26d..1c2429ba569 100644 --- a/ddtrace/debugging/_debugger.py +++ b/ddtrace/debugging/_debugger.py @@ -25,7 +25,7 @@ from ddtrace._trace.tracer import Tracer from ddtrace.debugging._config import di_config from ddtrace.debugging._function.discovery import FunctionDiscovery -from ddtrace.debugging._function.store import FullyNamedWrappedFunction +from ddtrace.debugging._function.store import FullyNamedContextWrappedFunction from ddtrace.debugging._function.store import FunctionStore from ddtrace.debugging._metrics import metrics from ddtrace.debugging._probe.model import FunctionLocationMixin @@ -386,7 +386,7 @@ def _probe_injection_hook(self, module: ModuleType) -> None: # Group probes by function so that we decompile each function once and # bulk-inject the probes. - probes_for_function: Dict[FullyNamedWrappedFunction, List[Probe]] = defaultdict(list) + probes_for_function: Dict[FullyNamedContextWrappedFunction, List[Probe]] = defaultdict(list) for probe in self._probe_registry.get_pending(str(origin(module))): if not isinstance(probe, LineLocationMixin): continue @@ -410,7 +410,7 @@ def _probe_injection_hook(self, module: ModuleType) -> None: log.error(message) self._probe_registry.set_error(probe, "NoFunctionsAtLine", message) continue - for function in (cast(FullyNamedWrappedFunction, _) for _ in functions): + for function in (cast(FullyNamedContextWrappedFunction, _) for _ in functions): probes_for_function[function].append(cast(LineProbe, probe)) for function, probes in probes_for_function.items(): @@ -485,14 +485,14 @@ def _eject_probes(self, probes_to_eject: List[LineProbe]) -> None: module = self.__watchdog__.get_by_origin(resolved_source) if module is not None: # The module is still loaded, so we can try to eject the hooks - probes_for_function: Dict[FullyNamedWrappedFunction, List[LineProbe]] = defaultdict(list) + probes_for_function: Dict[FullyNamedContextWrappedFunction, List[LineProbe]] = defaultdict(list) for probe in probes: if not isinstance(probe, LineLocationMixin): continue line = probe.line assert line is not None, probe # nosec functions = FunctionDiscovery.from_module(module).at_line(line) - for function in (cast(FullyNamedWrappedFunction, _) for _ in functions): + for function in (cast(FullyNamedContextWrappedFunction, _) for _ in functions): probes_for_function[function].append(probe) for function, ps in probes_for_function.items(): @@ -599,7 +599,7 @@ def _unwrap_functions(self, probes: List[FunctionProbe]) -> None: context = cast(DebuggerWrappingContext, DebuggerWrappingContext.extract(function)) context.remove_probe(probe) if not context.has_probes(): - self._function_store.unwrap(cast(FullyNamedWrappedFunction, function)) + self._function_store.unwrap(cast(FullyNamedContextWrappedFunction, function)) log.debug("Unwrapped %r", registered_probe) else: log.error("Attempted to unwrap %r, but no wrapper found", registered_probe) diff --git a/ddtrace/debugging/_function/store.py b/ddtrace/debugging/_function/store.py index 9a17aae3b91..e11c75070a2 100644 --- a/ddtrace/debugging/_function/store.py +++ b/ddtrace/debugging/_function/store.py @@ -13,14 +13,14 @@ from ddtrace.internal.injection import HookType from ddtrace.internal.injection import eject_hooks from ddtrace.internal.injection import inject_hooks -from ddtrace.internal.wrapping import WrappedFunction +from ddtrace.internal.wrapping.context import ContextWrappedFunction from ddtrace.internal.wrapping.context import WrappingContext WrapperType = Callable[[FunctionType, Any, Any, Any], Any] -class FullyNamedWrappedFunction(FullyNamed, WrappedFunction): +class FullyNamedContextWrappedFunction(FullyNamed, ContextWrappedFunction): """A fully named wrapper function.""" @@ -54,17 +54,17 @@ def _store(self, function: FunctionType) -> None: if function not in self._code_map: self._code_map[function] = function.__code__ - def inject_hooks(self, function: FullyNamedWrappedFunction, hooks: List[HookInfoType]) -> Set[str]: + def inject_hooks(self, function: FullyNamedContextWrappedFunction, hooks: List[HookInfoType]) -> Set[str]: """Bulk-inject hooks into a function. Returns the set of probe IDs for those probes that failed to inject. """ try: - return self.inject_hooks(cast(FullyNamedWrappedFunction, function.__dd_wrapped__), hooks) + f = cast(FunctionType, cast(FullyNamedContextWrappedFunction, function.__dd_context_wrapped__.__wrapped__)) # type: ignore[union-attr] except AttributeError: f = cast(FunctionType, function) - self._store(f) - return {p.probe_id for _, _, p in inject_hooks(f, hooks)} + self._store(f) + return {p.probe_id for _, _, p in inject_hooks(f, hooks)} def eject_hooks(self, function: FunctionType, hooks: List[HookInfoType]) -> Set[str]: """Bulk-eject hooks from a function. @@ -72,15 +72,14 @@ def eject_hooks(self, function: FunctionType, hooks: List[HookInfoType]) -> Set[ Returns the set of probe IDs for those probes that failed to eject. """ try: - wrapped = cast(FullyNamedWrappedFunction, function).__dd_wrapped__ + f = cast(FullyNamedContextWrappedFunction, function).__dd_context_wrapped__.__wrapped__ # type: ignore[union-attr] except AttributeError: # Not a wrapped function so we can actually eject from it - return {p.probe_id for _, _, p in eject_hooks(function, hooks)} - else: - # Try on the wrapped function. - return self.eject_hooks(cast(FunctionType, wrapped), hooks) + f = function - def inject_hook(self, function: FullyNamedWrappedFunction, hook: HookType, line: int, arg: Any) -> bool: + return {p.probe_id for _, _, p in eject_hooks(cast(FunctionType, f), hooks)} + + def inject_hook(self, function: FullyNamedContextWrappedFunction, hook: HookType, line: int, arg: Any) -> bool: """Inject a hook into a function.""" return not not self.inject_hooks(function, [(hook, line, arg)]) @@ -94,7 +93,7 @@ def wrap(self, function: FunctionType, wrapping_context: WrappingContext) -> Non self._wrapper_map[function] = wrapping_context wrapping_context.wrap() - def unwrap(self, function: FullyNamedWrappedFunction) -> None: + def unwrap(self, function: FullyNamedContextWrappedFunction) -> None: """Unwrap a hook around a wrapped function.""" self._wrapper_map.pop(cast(FunctionType, function)).unwrap() From 56bdd61173b2e66fc884362b88db95834064cebe Mon Sep 17 00:00:00 2001 From: Munir Abdinur Date: Mon, 16 Dec 2024 16:22:57 -0500 Subject: [PATCH 46/78] fix(telemetry): avoids stopping periodic threads in telemetry writer atexit hooks (#11708) Resolves: https://github.com/DataDog/dd-trace-py/issues/11622 ## Checklist - [ ] PR author has checked that all the criteria below are met - The PR description includes an overview of the change - The PR description articulates the motivation for the change - The change includes tests OR the PR description describes a testing strategy - The PR description notes risks associated with the change, if any - Newly-added code is easy to change - The change follows the [library release note guidelines](https://ddtrace.readthedocs.io/en/stable/releasenotes.html) - The change includes or references documentation updates if necessary - Backport labels are set (if [applicable](https://ddtrace.readthedocs.io/en/latest/contributing.html#backporting)) ## Reviewer Checklist - [ ] Reviewer has checked that all the criteria below are met - Title is accurate - All changes are related to the pull request's stated goal - Avoids breaking [API](https://ddtrace.readthedocs.io/en/stable/versioning.html#interfaces) changes - Testing strategy adequately addresses listed risks - Newly-added code is easy to change - Release note makes sense to a user of the library - If necessary, author has acknowledged and discussed the performance implications of this PR as reported in the benchmarks PR comment - Backport labels are set in a manner that is consistent with the [release branch maintenance policy](https://ddtrace.readthedocs.io/en/latest/contributing.html#backporting) --- ddtrace/internal/telemetry/writer.py | 26 +++++++------------ .../telemetry-deadlocks-ea3f457ab0611c8b.yaml | 4 +++ 2 files changed, 14 insertions(+), 16 deletions(-) create mode 100644 releasenotes/notes/telemetry-deadlocks-ea3f457ab0611c8b.yaml diff --git a/ddtrace/internal/telemetry/writer.py b/ddtrace/internal/telemetry/writer.py index 63e672e24dd..71de6b03907 100644 --- a/ddtrace/internal/telemetry/writer.py +++ b/ddtrace/internal/telemetry/writer.py @@ -181,7 +181,6 @@ def __init__(self, is_periodic=True, agentless=None): self._forked = False # type: bool self._events_queue = [] # type: List[Dict] self._configuration_queue = {} # type: Dict[str, Dict] - self._lock = forksafe.Lock() # type: forksafe.ResetObject self._imported_dependencies: Dict[str, str] = dict() self._product_enablement = {product.value: False for product in TELEMETRY_APM_PRODUCT} self._send_product_change_updates = False @@ -245,10 +244,6 @@ def disable(self): """ self._enabled = False self.reset_queues() - if self._is_running(): - self.stop() - else: - self.status = ServiceStatus.STOPPED def enable_agentless_client(self, enabled=True): # type: (bool) -> None @@ -297,7 +292,7 @@ def add_integration(self, integration_name, patched, auto_patched=None, error_ms :param bool auto_enabled: True if module is enabled in _monkey.PATCH_MODULES """ # Integrations can be patched before the telemetry writer is enabled. - with self._lock: + with self._service_lock: if integration_name not in self._integrations_queue: self._integrations_queue[integration_name] = {"name": integration_name} @@ -382,20 +377,20 @@ def _app_integrations_changed_event(self, integrations): def _flush_integrations_queue(self): # type: () -> List[Dict] """Flushes and returns a list of all queued integrations""" - with self._lock: + with self._service_lock: integrations = list(self._integrations_queue.values()) self._integrations_queue = dict() return integrations def _flush_new_imported_dependencies(self) -> Set[str]: - with self._lock: + with self._service_lock: new_deps = modules.get_newly_imported_modules() return new_deps def _flush_configuration_queue(self): # type: () -> List[Dict] """Flushes and returns a list of all queued configurations""" - with self._lock: + with self._service_lock: configurations = list(self._configuration_queue.values()) self._configuration_queue = {} return configurations @@ -414,7 +409,7 @@ def _app_dependencies_loaded_event(self, newly_imported_deps: List[str]): if not _TelemetryConfig.DEPENDENCY_COLLECTION or not self._enabled: return - with self._lock: + with self._service_lock: packages = update_imported_dependencies(self._imported_dependencies, newly_imported_deps) if packages: @@ -451,7 +446,7 @@ def product_activated(self, product, enabled): self._send_product_change_updates = True def remove_configuration(self, configuration_name): - with self._lock: + with self._service_lock: del self._configuration_queue[configuration_name] def add_configuration(self, configuration_name, configuration_value, origin="unknown"): @@ -465,7 +460,7 @@ def add_configuration(self, configuration_name, configuration_value, origin="unk # convert unsupported types to strings configuration_value = str(configuration_value) - with self._lock: + with self._service_lock: self._configuration_queue[configuration_name] = { "name": configuration_name, "origin": origin, @@ -475,7 +470,7 @@ def add_configuration(self, configuration_name, configuration_value, origin="unk def add_configurations(self, configuration_list): # type: (List[Tuple[str, Union[bool, float, str], str]]) -> None """Creates and queues a list of configurations""" - with self._lock: + with self._service_lock: for name, value, _origin in configuration_list: self._configuration_queue[name] = { "name": name, @@ -566,7 +561,7 @@ def add_distribution_metric(self, namespace, name, value=1.0, tags=None): def _flush_log_metrics(self): # type () -> Set[Metric] - with self._lock: + with self._service_lock: log_metrics = self._logs self._logs = set() return log_metrics @@ -652,7 +647,7 @@ def reset_queues(self): def _flush_events_queue(self): # type: () -> List[Dict] """Flushes and returns a list of all telemtery event""" - with self._lock: + with self._service_lock: events = self._events_queue self._events_queue = [] return events @@ -668,7 +663,6 @@ def _fork_writer(self): if self._is_running(): self.stop(join=False) - # Enable writer service in child process to avoid interpreter shutdown # error in Python 3.12 self.enable() diff --git a/releasenotes/notes/telemetry-deadlocks-ea3f457ab0611c8b.yaml b/releasenotes/notes/telemetry-deadlocks-ea3f457ab0611c8b.yaml new file mode 100644 index 00000000000..1fe2739767d --- /dev/null +++ b/releasenotes/notes/telemetry-deadlocks-ea3f457ab0611c8b.yaml @@ -0,0 +1,4 @@ +--- +fixes: + - | + library: Resolves deadlocks that could occur when sending instrumentation telemetry data after an unhandled exception is raised. From 62766b047491e76b1fd41647c62d8f6b45e9b9aa Mon Sep 17 00:00:00 2001 From: Yun Kim <35776586+Yun-Kim@users.noreply.github.com> Date: Mon, 16 Dec 2024 16:43:57 -0500 Subject: [PATCH 47/78] chore(llmobs): ensure propagated parent IDs are still using span tags (#11745) This PR makes a small revert to #11543 where accessing propagated parent IDs (for distributed tracing) were unwittingly changed to access via the span store object, even though automatic context propagation results are always added to the span as tags (not the store). While all other LLMObs SDK data is added/accessed via the span store, `_dd.p.llmobs_parent_id` is automatically added by the tracer internals so we'll continue using this for now until our overall context management solution removes this problem entirely. ## Checklist - [x] PR author has checked that all the criteria below are met - The PR description includes an overview of the change - The PR description articulates the motivation for the change - The change includes tests OR the PR description describes a testing strategy - The PR description notes risks associated with the change, if any - Newly-added code is easy to change - The change follows the [library release note guidelines](https://ddtrace.readthedocs.io/en/stable/releasenotes.html) - The change includes or references documentation updates if necessary - Backport labels are set (if [applicable](https://ddtrace.readthedocs.io/en/latest/contributing.html#backporting)) ## Reviewer Checklist - [x] Reviewer has checked that all the criteria below are met - Title is accurate - All changes are related to the pull request's stated goal - Avoids breaking [API](https://ddtrace.readthedocs.io/en/stable/versioning.html#interfaces) changes - Testing strategy adequately addresses listed risks - Newly-added code is easy to change - Release note makes sense to a user of the library - If necessary, author has acknowledged and discussed the performance implications of this PR as reported in the benchmarks PR comment - Backport labels are set in a manner that is consistent with the [release branch maintenance policy](https://ddtrace.readthedocs.io/en/latest/contributing.html#backporting) --- ddtrace/llmobs/_integrations/base.py | 2 +- ddtrace/llmobs/_integrations/bedrock.py | 2 +- ddtrace/llmobs/_llmobs.py | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/ddtrace/llmobs/_integrations/base.py b/ddtrace/llmobs/_integrations/base.py index 709e72f3a26..a6968ce0d83 100644 --- a/ddtrace/llmobs/_integrations/base.py +++ b/ddtrace/llmobs/_integrations/base.py @@ -133,7 +133,7 @@ def trace(self, pin: Pin, operation_id: str, submit_to_llmobs: bool = False, **k # The LLMObs parent ID tag is not set at span start time. We need to manually set the parent ID tag now # in these cases to avoid conflicting with the later propagated tags. parent_id = _get_llmobs_parent_id(span) or "undefined" - span.set_tag_str(PARENT_ID_KEY, str(parent_id)) + span._set_ctx_item(PARENT_ID_KEY, str(parent_id)) return span @classmethod diff --git a/ddtrace/llmobs/_integrations/bedrock.py b/ddtrace/llmobs/_integrations/bedrock.py index bf8b020ebea..d2d57b50ed3 100644 --- a/ddtrace/llmobs/_integrations/bedrock.py +++ b/ddtrace/llmobs/_integrations/bedrock.py @@ -36,7 +36,7 @@ def _llmobs_set_tags( operation: str = "", ) -> None: """Extract prompt/response tags from a completion and set them as temporary "_ml_obs.*" tags.""" - if span._get_ctx_item(PROPAGATED_PARENT_ID_KEY) is None: + if span.get_tag(PROPAGATED_PARENT_ID_KEY) is None: parent_id = _get_llmobs_parent_id(span) or "undefined" span._set_ctx_item(PARENT_ID_KEY, parent_id) parameters = {} diff --git a/ddtrace/llmobs/_llmobs.py b/ddtrace/llmobs/_llmobs.py index 867edbdca4f..49815151118 100644 --- a/ddtrace/llmobs/_llmobs.py +++ b/ddtrace/llmobs/_llmobs.py @@ -410,7 +410,7 @@ def _start_span( if ml_app is None: ml_app = _get_ml_app(span) span._set_ctx_item(ML_APP, ml_app) - if span._get_ctx_item(PROPAGATED_PARENT_ID_KEY) is None: + if span.get_tag(PROPAGATED_PARENT_ID_KEY) is None: # For non-distributed traces or spans in the first service of a distributed trace, # The LLMObs parent ID tag is not set at span start time. We need to manually set the parent ID tag now # in these cases to avoid conflicting with the later propagated tags. From 6de35c8638ce9da853d51af53d283822c7b72682 Mon Sep 17 00:00:00 2001 From: Brett Langdon Date: Mon, 16 Dec 2024 17:06:28 -0500 Subject: [PATCH 48/78] ci: add clearer breakdown of test stages and dependencies between jobs (#11712) --- .gitlab/tests.yml | 11 +++++++---- scripts/gen_gitlab_config.py | 2 +- 2 files changed, 8 insertions(+), 5 deletions(-) diff --git a/.gitlab/tests.yml b/.gitlab/tests.yml index 83a5d4231b8..4495c6fa6a6 100644 --- a/.gitlab/tests.yml +++ b/.gitlab/tests.yml @@ -1,5 +1,7 @@ stages: - - tests + - precheck + - hatch + - riot variables: RIOT_RUN_CMD: riot -P -v run --exitfirst --pass-env -s @@ -22,7 +24,7 @@ variables: .test_base_hatch: extends: .testrunner - stage: tests + stage: hatch # Hatch doesn't use pre-built wheels or venvs so we can start them right away needs: [] parallel: 4 @@ -57,7 +59,7 @@ variables: build_base_venvs: extends: .testrunner - stage: tests + stage: riot parallel: matrix: - PYTHON_VERSION: ["3.7", "3.8", "3.9", "3.10", "3.11", "3.12"] @@ -76,9 +78,10 @@ build_base_venvs: - ddtrace/internal/datadog/profiling/crashtracker/crashtracker_exe* - ddtrace/internal/datadog/profiling/test/test_* +# Do not define a `needs:` in order to depend on the whole `precheck` stage .test_base_riot: extends: .testrunner - stage: tests + stage: riot needs: [ build_base_venvs ] parallel: 4 services: diff --git a/scripts/gen_gitlab_config.py b/scripts/gen_gitlab_config.py index 2b139ce798d..c868b0f1c86 100644 --- a/scripts/gen_gitlab_config.py +++ b/scripts/gen_gitlab_config.py @@ -114,7 +114,7 @@ def check(name: str, command: str, paths: t.Set[str]) -> None: with TESTS_GEN.open("a") as f: print(f'"{name}":', file=f) print(" extends: .testrunner", file=f) - print(" stage: tests", file=f) + print(" stage: precheck", file=f) print(" needs: []", file=f) print(" script:", file=f) print(f" - {command}", file=f) From 05302eb92d28ea1f9277aabdd5067babea65a455 Mon Sep 17 00:00:00 2001 From: Romain Komorn <136473744+romainkomorndatadog@users.noreply.github.com> Date: Tue, 17 Dec 2024 11:44:36 +0000 Subject: [PATCH 49/78] chore(ci_visibility): add support for pytest_bdd to v2 plugin (#11753) This adds support for the `pytest-bdd` plugin to the new version of the `pytest` plugin, bypassing the `ddtrace.pytest_bdd` plugin and instantiating a "subplugin" on demand when `pytest-bdd` is detected and `ddtrace` is enabled. To support the way the legacy `pytest-bdd` plugin worked, new functionality is added: - `InternalTestSession.get_tracer()` to have access to the tracer used by Test Visibility - `InternalTest.overwrite_attributes()` to allow overwriting attributes that have already been set (or in the case of the `suite` name, that are automatically set based on test hierarchy) ## Checklist - [x] PR author has checked that all the criteria below are met - The PR description includes an overview of the change - The PR description articulates the motivation for the change - The change includes tests OR the PR description describes a testing strategy - The PR description notes risks associated with the change, if any - Newly-added code is easy to change - The change follows the [library release note guidelines](https://ddtrace.readthedocs.io/en/stable/releasenotes.html) - The change includes or references documentation updates if necessary - Backport labels are set (if [applicable](https://ddtrace.readthedocs.io/en/latest/contributing.html#backporting)) ## Reviewer Checklist - [x] Reviewer has checked that all the criteria below are met - Title is accurate - All changes are related to the pull request's stated goal - Avoids breaking [API](https://ddtrace.readthedocs.io/en/stable/versioning.html#interfaces) changes - Testing strategy adequately addresses listed risks - Newly-added code is easy to change - Release note makes sense to a user of the library - If necessary, author has acknowledged and discussed the performance implications of this PR as reported in the benchmarks PR comment - Backport labels are set in a manner that is consistent with the [release branch maintenance policy](https://ddtrace.readthedocs.io/en/latest/contributing.html#backporting) --- ddtrace/contrib/pytest/_plugin_v2.py | 6 + .../contrib/pytest/_pytest_bdd_subplugin.py | 110 ++++++++++++++++++ ddtrace/contrib/pytest_bdd/plugin.py | 17 ++- ddtrace/internal/ci_visibility/api/_test.py | 22 ++++ ddtrace/internal/ci_visibility/recorder.py | 21 ++++ ddtrace/internal/test_visibility/api.py | 40 ++++++- riotfile.py | 27 ++++- tests/contrib/pytest_bdd/test_pytest_bdd.py | 51 +------- 8 files changed, 238 insertions(+), 56 deletions(-) create mode 100644 ddtrace/contrib/pytest/_pytest_bdd_subplugin.py diff --git a/ddtrace/contrib/pytest/_plugin_v2.py b/ddtrace/contrib/pytest/_plugin_v2.py index e51739ccee9..f1da8d2db11 100644 --- a/ddtrace/contrib/pytest/_plugin_v2.py +++ b/ddtrace/contrib/pytest/_plugin_v2.py @@ -196,6 +196,12 @@ def pytest_configure(config: pytest_Config) -> None: enable_test_visibility(config=dd_config.pytest) if _is_pytest_cov_enabled(config): patch_coverage() + + # pytest-bdd plugin support + if config.pluginmanager.hasplugin("pytest-bdd"): + from ddtrace.contrib.pytest._pytest_bdd_subplugin import _PytestBddSubPlugin + + config.pluginmanager.register(_PytestBddSubPlugin(), "_datadog-pytest-bdd") else: # If the pytest ddtrace plugin is not enabled, we should disable CI Visibility, as it was enabled during # pytest_load_initial_conftests diff --git a/ddtrace/contrib/pytest/_pytest_bdd_subplugin.py b/ddtrace/contrib/pytest/_pytest_bdd_subplugin.py new file mode 100644 index 00000000000..7c964af3d59 --- /dev/null +++ b/ddtrace/contrib/pytest/_pytest_bdd_subplugin.py @@ -0,0 +1,110 @@ +"""Provides functionality to support the pytest-bdd plugin as part of the ddtrace integration + +NOTE: This replaces the previous ddtrace.pytest_bdd plugin. + +This plugin mainly modifies the names of the test, its suite, and parameters. It does not, however modify the tests' +suite from the perspective of Test Visibility data. + +The plugin is only instantiated and added if the pytest-bdd plugin itself is installed and enabled, because the hook +implementations will cause errors unless the hookspecs are added by the original plugin. +""" +from pathlib import Path +import sys + +import pytest + +from ddtrace.contrib.pytest._utils import _get_test_id_from_item +from ddtrace.contrib.pytest_bdd import get_version +from ddtrace.contrib.pytest_bdd._plugin import _extract_span +from ddtrace.contrib.pytest_bdd._plugin import _get_step_func_args_json +from ddtrace.contrib.pytest_bdd._plugin import _store_span +from ddtrace.contrib.pytest_bdd.constants import FRAMEWORK +from ddtrace.contrib.pytest_bdd.constants import STEP_KIND +from ddtrace.ext import test +from ddtrace.internal.logger import get_logger +from ddtrace.internal.test_visibility.api import InternalTest +from ddtrace.internal.test_visibility.api import InternalTestSession + + +log = get_logger(__name__) + + +def _get_workspace_relative_path(feature_path_str: str) -> Path: + feature_path = Path(feature_path_str).resolve() + workspace_path = InternalTestSession.get_workspace_path() + if workspace_path: + try: + return feature_path.relative_to(workspace_path) + except ValueError: # noqa: E722 + log.debug("Feature path %s is not relative to workspace path %s", feature_path, workspace_path) + return feature_path + + +class _PytestBddSubPlugin: + def __init__(self): + self.framework_version = get_version() + + @staticmethod + @pytest.hookimpl(tryfirst=True) + def pytest_bdd_before_scenario(request, feature, scenario): + test_id = _get_test_id_from_item(request.node) + feature_path = _get_workspace_relative_path(scenario.feature.filename) + codeowners = InternalTestSession.get_path_codeowners(feature_path) + + InternalTest.overwrite_attributes( + test_id, name=scenario.name, suite_name=str(feature_path), codeowners=codeowners + ) + + @pytest.hookimpl(tryfirst=True) + def pytest_bdd_before_step(self, request, feature, scenario, step, step_func): + feature_test_id = _get_test_id_from_item(request.node) + + feature_span = InternalTest.get_span(feature_test_id) + + tracer = InternalTestSession.get_tracer() + if tracer is None: + return + + span = tracer.start_span( + step.type, + resource=step.name, + span_type=STEP_KIND, + child_of=feature_span, + activate=True, + ) + span.set_tag_str("component", "pytest_bdd") + + span.set_tag(test.FRAMEWORK, FRAMEWORK) + span.set_tag(test.FRAMEWORK_VERSION, self.framework_version) + + feature_path = _get_workspace_relative_path(scenario.feature.filename) + + span.set_tag(test.FILE, str(feature_path)) + span.set_tag(test.CODEOWNERS, InternalTestSession.get_path_codeowners(feature_path)) + + _store_span(step_func, span) + + @staticmethod + @pytest.hookimpl(trylast=True) + def pytest_bdd_after_step(request, feature, scenario, step, step_func, step_func_args): + span = _extract_span(step_func) + if span is not None: + step_func_args_json = _get_step_func_args_json(step, step_func, step_func_args) + if step_func_args: + span.set_tag(test.PARAMETERS, step_func_args_json) + span.finish() + + @staticmethod + def pytest_bdd_step_error(request, feature, scenario, step, step_func, step_func_args, exception): + span = _extract_span(step_func) + if span is not None: + if hasattr(exception, "__traceback__"): + tb = exception.__traceback__ + else: + # PY2 compatibility workaround + _, _, tb = sys.exc_info() + step_func_args_json = _get_step_func_args_json(step, step_func, step_func_args) + if step_func_args: + span.set_tag(test.PARAMETERS, step_func_args_json) + span.set_exc_info(type(exception), exception, tb) + span.finish() diff --git a/ddtrace/contrib/pytest_bdd/plugin.py b/ddtrace/contrib/pytest_bdd/plugin.py index 68da4a3a3c5..1dc714c89c5 100644 --- a/ddtrace/contrib/pytest_bdd/plugin.py +++ b/ddtrace/contrib/pytest_bdd/plugin.py @@ -1,9 +1,20 @@ +from ddtrace import DDTraceDeprecationWarning +from ddtrace.contrib.pytest._utils import _USE_PLUGIN_V2 from ddtrace.contrib.pytest.plugin import is_enabled as is_ddtrace_enabled +from ddtrace.vendor.debtcollector import deprecate def pytest_configure(config): if config.pluginmanager.hasplugin("pytest-bdd") and config.pluginmanager.hasplugin("ddtrace"): - if is_ddtrace_enabled(config): - from ._plugin import _PytestBddPlugin + if not _USE_PLUGIN_V2: + if is_ddtrace_enabled(config): + from ._plugin import _PytestBddPlugin - config.pluginmanager.register(_PytestBddPlugin(), "_datadog-pytest-bdd") + deprecate( + "the ddtrace.pytest_bdd plugin is deprecated", + message="it will be integrated with the main pytest ddtrace plugin", + removal_version="3.0.0", + category=DDTraceDeprecationWarning, + ) + + config.pluginmanager.register(_PytestBddPlugin(), "_datadog-pytest-bdd") diff --git a/ddtrace/internal/ci_visibility/api/_test.py b/ddtrace/internal/ci_visibility/api/_test.py index 7a61473ff92..c63d9753eb8 100644 --- a/ddtrace/internal/ci_visibility/api/_test.py +++ b/ddtrace/internal/ci_visibility/api/_test.py @@ -85,6 +85,9 @@ def __init__( self._is_benchmark = False self._benchmark_duration_data: Optional[BenchmarkDurationData] = None + # Some parameters can be overwritten: + self._overwritten_suite_name: Optional[str] = None + def __repr__(self) -> str: suite_name = self.parent.name if self.parent is not None else "none" module_name = self.parent.parent.name if self.parent is not None and self.parent.parent is not None else "none" @@ -102,6 +105,9 @@ def _set_item_tags(self) -> None: if self._is_benchmark: self.set_tag(test.TYPE, BENCHMARK) + if self._overwritten_suite_name is not None: + self.set_tag(test.SUITE, self._overwritten_suite_name) + def _set_efd_tags(self) -> None: if self._efd_is_retry: self.set_tag(TEST_IS_RETRY, self._efd_is_retry) @@ -202,6 +208,22 @@ def finish_itr_skipped(self) -> None: self.mark_itr_skipped() self.finish_test(TestStatus.SKIP) + def overwrite_attributes( + self, + name: Optional[str] = None, + suite_name: Optional[str] = None, + parameters: Optional[str] = None, + codeowners: Optional[List[str]] = None, + ) -> None: + if name is not None: + self.name = name + if suite_name is not None: + self._overwritten_suite_name = suite_name + if parameters is not None: + self.set_parameters(parameters) + if codeowners is not None: + self._codeowners = codeowners + def add_coverage_data(self, coverage_data: Dict[Path, CoverageLines]) -> None: self._coverage_data.add_covered_files(coverage_data) diff --git a/ddtrace/internal/ci_visibility/recorder.py b/ddtrace/internal/ci_visibility/recorder.py index b306a1e7912..0046c21be15 100644 --- a/ddtrace/internal/ci_visibility/recorder.py +++ b/ddtrace/internal/ci_visibility/recorder.py @@ -79,6 +79,7 @@ from ddtrace.internal.test_visibility._efd_mixins import EFDTestStatus from ddtrace.internal.test_visibility._internal_item_ids import InternalTestId from ddtrace.internal.test_visibility._itr_mixins import ITRMixin +from ddtrace.internal.test_visibility.api import InternalTest from ddtrace.internal.test_visibility.coverage_lines import CoverageLines from ddtrace.internal.utils.formats import asbool from ddtrace.internal.utils.http import verify_url @@ -1008,6 +1009,12 @@ def _on_session_get_codeowners() -> Optional[Codeowners]: return CIVisibility.get_codeowners() +@_requires_civisibility_enabled +def _on_session_get_tracer() -> Optional[Tracer]: + log.debug("Getting tracer") + return CIVisibility.get_tracer() + + @_requires_civisibility_enabled def _on_session_is_atr_enabled() -> bool: log.debug("Getting Auto Test Retries enabled") @@ -1041,6 +1048,7 @@ def _register_session_handlers(): core.on("test_visibility.session.start", _on_start_session) core.on("test_visibility.session.finish", _on_finish_session) core.on("test_visibility.session.get_codeowners", _on_session_get_codeowners, "codeowners") + core.on("test_visibility.session.get_tracer", _on_session_get_tracer, "tracer") core.on("test_visibility.session.get_path_codeowners", _on_session_get_path_codeowners, "path_codeowners") core.on("test_visibility.session.get_workspace_path", _on_session_get_workspace_path, "workspace_path") core.on("test_visibility.session.is_atr_enabled", _on_session_is_atr_enabled, "is_atr_enabled") @@ -1191,6 +1199,18 @@ def _on_set_benchmark_data(set_benchmark_data_args: BenchmarkTestMixin.SetBenchm CIVisibility.get_test_by_id(item_id).set_benchmark_data(data, is_benchmark) +@_requires_civisibility_enabled +def _on_test_overwrite_attributes(overwrite_attribute_args: InternalTest.OverwriteAttributesArgs): + item_id = overwrite_attribute_args.test_id + name = overwrite_attribute_args.name + suite_name = overwrite_attribute_args.suite_name + parameters = overwrite_attribute_args.parameters + codeowners = overwrite_attribute_args.codeowners + + log.debug("Handling overwrite attributes: %s", overwrite_attribute_args) + CIVisibility.get_test_by_id(item_id).overwrite_attributes(name, suite_name, parameters, codeowners) + + def _register_test_handlers(): log.debug("Registering test handlers") core.on("test_visibility.test.discover", _on_discover_test) @@ -1199,6 +1219,7 @@ def _register_test_handlers(): core.on("test_visibility.test.finish", _on_finish_test) core.on("test_visibility.test.set_parameters", _on_set_test_parameters) core.on("test_visibility.test.set_benchmark_data", _on_set_benchmark_data) + core.on("test_visibility.test.overwrite_attributes", _on_test_overwrite_attributes) @_requires_civisibility_enabled diff --git a/ddtrace/internal/test_visibility/api.py b/ddtrace/internal/test_visibility/api.py index d66dbcc32c7..84f559a4701 100644 --- a/ddtrace/internal/test_visibility/api.py +++ b/ddtrace/internal/test_visibility/api.py @@ -3,6 +3,7 @@ from typing import NamedTuple from ddtrace import Span +from ddtrace import Tracer from ddtrace.ext.test_visibility import api as ext_api from ddtrace.ext.test_visibility._test_visibility_base import TestSessionId from ddtrace.ext.test_visibility._utils import _catch_and_log_exceptions @@ -73,7 +74,15 @@ def get_codeowners() -> t.Optional[_Codeowners]: @staticmethod @_catch_and_log_exceptions - def get_workspace_path() -> Path: + def get_tracer() -> t.Optional[Tracer]: + log.debug("Getting test session tracer") + tracer: t.Optional[Tracer] = core.dispatch_with_results("test_visibility.session.get_tracer").tracer.value + log.debug("Got test session tracer: %s", tracer) + return tracer + + @staticmethod + @_catch_and_log_exceptions + def get_workspace_path() -> t.Optional[Path]: log.debug("Getting session workspace path") workspace_path: Path = core.dispatch_with_results( @@ -165,3 +174,32 @@ def is_new_test(item_id: InternalTestId) -> bool: is_new = bool(core.dispatch_with_results("test_visibility.test.is_new", (item_id,)).is_new.value) log.debug("Test %s is new: %s", item_id, is_new) return is_new + + class OverwriteAttributesArgs(NamedTuple): + test_id: InternalTestId + name: t.Optional[str] = None + suite_name: t.Optional[str] = None + parameters: t.Optional[str] = None + codeowners: t.Optional[t.List[str]] = None + + @staticmethod + @_catch_and_log_exceptions + def overwrite_attributes( + item_id: InternalTestId, + name: t.Optional[str] = None, + suite_name: t.Optional[str] = None, + parameters: t.Optional[str] = None, + codeowners: t.Optional[t.List[str]] = None, + ): + log.debug( + "Overwriting attributes for test %s: name=%s" ", suite_name=%s" ", parameters=%s" ", codeowners=%s", + item_id, + name, + suite_name, + parameters, + codeowners, + ) + core.dispatch( + "test_visibility.test.overwrite_attributes", + (InternalTest.OverwriteAttributesArgs(item_id, name, suite_name, parameters, codeowners),), + ) diff --git a/riotfile.py b/riotfile.py index 653023da524..567a5f65d66 100644 --- a/riotfile.py +++ b/riotfile.py @@ -1695,9 +1695,6 @@ def select_pys(min_version: str = MIN_PYTHON_VERSION, max_version: str = MAX_PYT "more_itertools": "<8.11.0", "pytest-randomly": latest, }, - env={ - "DD_PYTEST_USE_NEW_PLUGIN_BETA": "0", - }, venvs=[ Venv( pys=select_pys(min_version="3.7", max_version="3.9"), @@ -1708,6 +1705,18 @@ def select_pys(min_version: str = MIN_PYTHON_VERSION, max_version: str = MAX_PYT ">=6.0,<6.1", ] }, + venvs=[ + Venv( + env={ + "DD_PYTEST_USE_NEW_PLUGIN_BETA": "0", + }, + ), + Venv( + env={ + "DD_PYTEST_USE_NEW_PLUGIN_BETA": "1", + }, + ), + ], ), Venv( pys=select_pys(min_version="3.10"), @@ -1718,6 +1727,18 @@ def select_pys(min_version: str = MIN_PYTHON_VERSION, max_version: str = MAX_PYT ">=6.0,<6.1", ] }, + venvs=[ + Venv( + env={ + "DD_PYTEST_USE_NEW_PLUGIN_BETA": "0", + }, + ), + Venv( + env={ + "DD_PYTEST_USE_NEW_PLUGIN_BETA": "1", + }, + ), + ], ), ], ), diff --git a/tests/contrib/pytest_bdd/test_pytest_bdd.py b/tests/contrib/pytest_bdd/test_pytest_bdd.py index e8e4e14172b..edf3ab90454 100644 --- a/tests/contrib/pytest_bdd/test_pytest_bdd.py +++ b/tests/contrib/pytest_bdd/test_pytest_bdd.py @@ -1,22 +1,12 @@ import json import os -from unittest import mock -import pytest - -import ddtrace from ddtrace.constants import ERROR_MSG -from ddtrace.contrib.pytest.plugin import is_enabled from ddtrace.contrib.pytest_bdd._plugin import _get_step_func_args_json from ddtrace.contrib.pytest_bdd._plugin import get_version from ddtrace.ext import test -from ddtrace.internal.ci_visibility import CIVisibility -from ddtrace.internal.ci_visibility._api_client import TestVisibilityAPISettings -from tests.ci_visibility.util import _patch_dummy_writer from tests.contrib.patch import emit_integration_and_version_to_test_agent -from tests.utils import DummyCIVisibilityWriter -from tests.utils import TracerTestCase -from tests.utils import override_env +from tests.contrib.pytest.test_pytest import PytestTestCaseBase _SIMPLE_SCENARIO = """ @@ -28,44 +18,7 @@ """ -class TestPytest(TracerTestCase): - @pytest.fixture(autouse=True) - def fixtures(self, testdir, monkeypatch): - self.testdir = testdir - self.monkeypatch = monkeypatch - - @pytest.fixture(autouse=True) - def _dummy_check_enabled_features(self): - """By default, assume that _check_enabled_features() returns an ITR-disabled response. - - Tests that need a different response should re-patch the CIVisibility object. - """ - with mock.patch( - "ddtrace.internal.ci_visibility.recorder.CIVisibility._check_enabled_features", - return_value=TestVisibilityAPISettings(False, False, False, False), - ): - yield - - def inline_run(self, *args): - """Execute test script with test tracer.""" - - class CIVisibilityPlugin: - @staticmethod - def pytest_configure(config): - if is_enabled(config): - with _patch_dummy_writer(): - assert CIVisibility.enabled - CIVisibility.disable() - CIVisibility.enable(tracer=self.tracer, config=ddtrace.config.pytest) - - with override_env(dict(DD_API_KEY="foobar.baz")): - self.tracer.configure(writer=DummyCIVisibilityWriter("https://citestcycle-intake.banana")) - return self.testdir.inline_run(*args, plugins=[CIVisibilityPlugin()]) - - def subprocess_run(self, *args): - """Execute test script with test tracer.""" - return self.testdir.runpytest_subprocess(*args) - +class TestPytest(PytestTestCaseBase): def test_and_emit_get_version(self): version = get_version() assert isinstance(version, str) From a4f0e380a459542795f2d881895f18765211893b Mon Sep 17 00:00:00 2001 From: Alberto Vara Date: Tue, 17 Dec 2024 12:57:28 +0100 Subject: [PATCH 50/78] fix(iast): check context is enable in request and builtins patched funcions (#11752) This fix resolves an issue where AppSec was using a patched request and builtins functions, creating telemetry errors. ## Checklist - [x] PR author has checked that all the criteria below are met - The PR description includes an overview of the change - The PR description articulates the motivation for the change - The change includes tests OR the PR description describes a testing strategy - The PR description notes risks associated with the change, if any - Newly-added code is easy to change - The change follows the [library release note guidelines](https://ddtrace.readthedocs.io/en/stable/releasenotes.html) - The change includes or references documentation updates if necessary - Backport labels are set (if [applicable](https://ddtrace.readthedocs.io/en/latest/contributing.html#backporting)) ## Reviewer Checklist - [x] Reviewer has checked that all the criteria below are met - Title is accurate - All changes are related to the pull request's stated goal - Avoids breaking [API](https://ddtrace.readthedocs.io/en/stable/versioning.html#interfaces) changes - Testing strategy adequately addresses listed risks - Newly-added code is easy to change - Release note makes sense to a user of the library - If necessary, author has acknowledged and discussed the performance implications of this PR as reported in the benchmarks PR comment - Backport labels are set in a manner that is consistent with the [release branch maintenance policy](https://ddtrace.readthedocs.io/en/latest/contributing.html#backporting) --- ddtrace/appsec/_common_module_patches.py | 16 ++++++++++++---- ...ast-propagation-error-2-ba4a998133269a7c.yaml | 5 +++++ 2 files changed, 17 insertions(+), 4 deletions(-) create mode 100644 releasenotes/notes/iast-fix-iast-propagation-error-2-ba4a998133269a7c.yaml diff --git a/ddtrace/appsec/_common_module_patches.py b/ddtrace/appsec/_common_module_patches.py index a5ab2d1533d..e7ce12d13e9 100644 --- a/ddtrace/appsec/_common_module_patches.py +++ b/ddtrace/appsec/_common_module_patches.py @@ -60,8 +60,10 @@ def wrapped_read_F3E51D71B4EC16EF(original_read_callable, instance, args, kwargs """ wrapper for _io.BytesIO and _io.StringIO read function """ + from ddtrace.appsec._iast._iast_request_context import is_iast_request_enabled + result = original_read_callable(*args, **kwargs) - if asm_config._iast_enabled: + if asm_config._iast_enabled and is_iast_request_enabled(): from ddtrace.appsec._iast._taint_tracking import OriginType from ddtrace.appsec._iast._taint_tracking import Source from ddtrace.appsec._iast._taint_tracking import get_tainted_ranges @@ -87,7 +89,9 @@ def wrapped_open_CFDDB7ABBA9081B6(original_open_callable, instance, args, kwargs """ wrapper for open file function """ - if asm_config._iast_enabled: + from ddtrace.appsec._iast._iast_request_context import is_iast_request_enabled + + if asm_config._iast_enabled and is_iast_request_enabled(): try: from ddtrace.appsec._iast.taint_sinks.path_traversal import check_and_report_path_traversal @@ -176,7 +180,9 @@ def wrapped_request_D8CB81E472AF98A2(original_request_callable, instance, args, wrapper for third party requests.request function https://requests.readthedocs.io """ - if asm_config._iast_enabled: + from ddtrace.appsec._iast._iast_request_context import is_iast_request_enabled + + if asm_config._iast_enabled and is_iast_request_enabled(): from ddtrace.appsec._iast.taint_sinks.ssrf import _iast_report_ssrf _iast_report_ssrf(original_request_callable, *args, **kwargs) @@ -216,7 +222,9 @@ def wrapped_system_5542593D237084A7(original_command_callable, instance, args, k """ command = args[0] if args else kwargs.get("command", None) if command is not None: - if asm_config._iast_enabled: + from ddtrace.appsec._iast._iast_request_context import is_iast_request_enabled + + if asm_config._iast_enabled and is_iast_request_enabled(): from ddtrace.appsec._iast.taint_sinks.command_injection import _iast_report_cmdi _iast_report_cmdi(command) diff --git a/releasenotes/notes/iast-fix-iast-propagation-error-2-ba4a998133269a7c.yaml b/releasenotes/notes/iast-fix-iast-propagation-error-2-ba4a998133269a7c.yaml new file mode 100644 index 00000000000..4918edb17a7 --- /dev/null +++ b/releasenotes/notes/iast-fix-iast-propagation-error-2-ba4a998133269a7c.yaml @@ -0,0 +1,5 @@ +--- +fixes: + - | + ASM: This fix resolves an issue where AppSec was using a patched request and builtins functions, + creating telemetry errors. \ No newline at end of file From 7b77598f43fea1a408dc162c18b25357b8c1191f Mon Sep 17 00:00:00 2001 From: Dylan Burns Date: Tue, 17 Dec 2024 11:49:00 -0500 Subject: [PATCH 51/78] fix(datastreams): botocore - log warning on kinesis stream metadata not found (#11647) For Data Streams Monitoring with AWS Kinesis, the map on the DSM page will break if `StreamARN` isn't provided to the Kinesis `get_records` call. This is because dd-trace-py requires the `StreamARN` when generating a consume checkpoint for an in-edge in the data streams map. If there's no consume checkpoint, the data streams map incorrectly renders as two graphs instead of one connected graph. However, the Kinesis `get_records` API lists `StreamARN` as an optional parameter (see [docs](https://boto3.amazonaws.com/v1/documentation/api/1.35.9/reference/services/kinesis/client/get_records.html)). If it isn't provided, dd-trace-py only outputs a debug-level log, and the DSM map is rendered incorrectly. This PR ensures dd-trace-py outputs a warning that is more visible to the developer. This PR can be closed if the current state of error handling is acceptable, since I don't know if there are restrictions on logging warnings for this repo. The warning helps to debug the broken DSM map, which is time-consuming to debug otherwise. No testing updates are required since there isn't a unit test file for this specific module. ## Checklist - [x] PR author has checked that all the criteria below are met - The PR description includes an overview of the change - The PR description articulates the motivation for the change - The change includes tests OR the PR description describes a testing strategy - The PR description notes risks associated with the change, if any - Newly-added code is easy to change - The change follows the [library release note guidelines](https://ddtrace.readthedocs.io/en/stable/releasenotes.html) - The change includes or references documentation updates if necessary - Backport labels are set (if [applicable](https://ddtrace.readthedocs.io/en/latest/contributing.html#backporting)) ## Reviewer Checklist - [x] Reviewer has checked that all the criteria below are met - Title is accurate - All changes are related to the pull request's stated goal - Avoids breaking [API](https://ddtrace.readthedocs.io/en/stable/versioning.html#interfaces) changes - Testing strategy adequately addresses listed risks - Newly-added code is easy to change - Release note makes sense to a user of the library - If necessary, author has acknowledged and discussed the performance implications of this PR as reported in the benchmarks PR comment - Backport labels are set in a manner that is consistent with the [release branch maintenance policy](https://ddtrace.readthedocs.io/en/latest/contributing.html#backporting) --- ddtrace/internal/datastreams/botocore.py | 8 ++++++-- ...inesis-stream-metadata-not-found-a921cabed5d4397e.yaml | 3 +++ 2 files changed, 9 insertions(+), 2 deletions(-) create mode 100644 releasenotes/notes/log-warning-on-kinesis-stream-metadata-not-found-a921cabed5d4397e.yaml diff --git a/ddtrace/internal/datastreams/botocore.py b/ddtrace/internal/datastreams/botocore.py index ec004f1ff9a..aeafa70ec2e 100644 --- a/ddtrace/internal/datastreams/botocore.py +++ b/ddtrace/internal/datastreams/botocore.py @@ -187,6 +187,10 @@ def handle_sqs_receive(_, params, result, *args): log.debug("Error receiving SQS message with data streams monitoring enabled", exc_info=True) +class StreamMetadataNotFound(Exception): + pass + + def record_data_streams_path_for_kinesis_stream(params, time_estimate, context_json, record): from . import data_streams_processor as processor @@ -194,7 +198,7 @@ def record_data_streams_path_for_kinesis_stream(params, time_estimate, context_j if not stream: log.debug("Unable to determine StreamARN and/or StreamName for request with params: ", params) - return + raise StreamMetadataNotFound() payload_size = calculate_kinesis_payload_size(record) ctx = DsmPathwayCodec.decode(context_json, processor()) @@ -210,7 +214,7 @@ def handle_kinesis_receive(_, params, time_estimate, context_json, record, *args try: record_data_streams_path_for_kinesis_stream(params, time_estimate, context_json, record) except Exception: - log.debug("Failed to report data streams monitoring info for kinesis", exc_info=True) + log.warning("Failed to report data streams monitoring info for kinesis", exc_info=True) if config._data_streams_enabled: diff --git a/releasenotes/notes/log-warning-on-kinesis-stream-metadata-not-found-a921cabed5d4397e.yaml b/releasenotes/notes/log-warning-on-kinesis-stream-metadata-not-found-a921cabed5d4397e.yaml new file mode 100644 index 00000000000..ed0dda53ea8 --- /dev/null +++ b/releasenotes/notes/log-warning-on-kinesis-stream-metadata-not-found-a921cabed5d4397e.yaml @@ -0,0 +1,3 @@ +fixes: + - | + datastreams: Logs at warning level for Kinesis errors that break the Data Streams Monitoring map. From 29ccfdc897d8cb239396f70aea55dcb9df7cb8e0 Mon Sep 17 00:00:00 2001 From: "Gabriele N. Tornetta" Date: Tue, 17 Dec 2024 16:58:53 +0000 Subject: [PATCH 52/78] chore(di): cache function code pair resolution (#11757) We make sure to cache the result of the function code pair resolution for subsequent calls. ## Checklist - [x] PR author has checked that all the criteria below are met - The PR description includes an overview of the change - The PR description articulates the motivation for the change - The change includes tests OR the PR description describes a testing strategy - The PR description notes risks associated with the change, if any - Newly-added code is easy to change - The change follows the [library release note guidelines](https://ddtrace.readthedocs.io/en/stable/releasenotes.html) - The change includes or references documentation updates if necessary - Backport labels are set (if [applicable](https://ddtrace.readthedocs.io/en/latest/contributing.html#backporting)) ## Reviewer Checklist - [x] Reviewer has checked that all the criteria below are met - Title is accurate - All changes are related to the pull request's stated goal - Avoids breaking [API](https://ddtrace.readthedocs.io/en/stable/versioning.html#interfaces) changes - Testing strategy adequately addresses listed risks - Newly-added code is easy to change - Release note makes sense to a user of the library - If necessary, author has acknowledged and discussed the performance implications of this PR as reported in the benchmarks PR comment - Backport labels are set in a manner that is consistent with the [release branch maintenance policy](https://ddtrace.readthedocs.io/en/latest/contributing.html#backporting) --- ddtrace/debugging/_function/discovery.py | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/ddtrace/debugging/_function/discovery.py b/ddtrace/debugging/_function/discovery.py index e7d37246f5f..6a259f0f93c 100644 --- a/ddtrace/debugging/_function/discovery.py +++ b/ddtrace/debugging/_function/discovery.py @@ -159,7 +159,8 @@ def resolve(self) -> FullyNamedFunction: msg = f"Multiple functions found for code object {code}" raise ValueError(msg) - f = cast(FullyNamedFunction, functions[0]) + self.function = _f = functions[0] + f = cast(FullyNamedFunction, _f) f.__fullname__ = f"{f.__module__}.{f.__qualname__}" return f @@ -254,6 +255,7 @@ def __init__(self, module: ModuleType) -> None: if hasattr(module, "__dd_code__"): for code in module.__dd_code__: fcp = _FunctionCodePair(code=code) + if PYTHON_VERSION_INFO >= (3, 11): # From this version of Python we can derive the qualified # name of the function directly from the code object. @@ -261,8 +263,9 @@ def __init__(self, module: ModuleType) -> None: self._fullname_index[fullname] = fcp else: self._name_index[code.co_name].append(fcp) + for lineno in linenos(code): - self[lineno].append(_FunctionCodePair(code=code)) + self[lineno].append(fcp) else: # If the module was already loaded we don't have its code object seen_functions = set() From 01d9b50f8819fed2345e9bab93d9828fa8966ebe Mon Sep 17 00:00:00 2001 From: Federico Mon Date: Tue, 17 Dec 2024 18:04:30 +0100 Subject: [PATCH 53/78] feat(asm): standalone sca billing (#11655) --- ddtrace/_trace/tracer.py | 6 +- ...andalone-sca-billing-925c84d69fe061ce.yaml | 4 + tests/appsec/appsec/test_asm_standalone.py | 134 +++++- tests/tracer/test_propagation.py | 441 ++++++++++-------- tests/tracer/test_tracer.py | 48 +- 5 files changed, 390 insertions(+), 243 deletions(-) create mode 100644 releasenotes/notes/feat-standalone-sca-billing-925c84d69fe061ce.yaml diff --git a/ddtrace/_trace/tracer.py b/ddtrace/_trace/tracer.py index 8c82efbdf37..6027976d6dc 100644 --- a/ddtrace/_trace/tracer.py +++ b/ddtrace/_trace/tracer.py @@ -236,7 +236,9 @@ def __init__( self._iast_enabled = asm_config._iast_enabled self._appsec_standalone_enabled = asm_config._appsec_standalone_enabled self._dogstatsd_url = agent.get_stats_url() if dogstatsd_url is None else dogstatsd_url - self._apm_opt_out = (self._asm_enabled or self._iast_enabled) and self._appsec_standalone_enabled + self._apm_opt_out = self._appsec_standalone_enabled and ( + self._asm_enabled or self._iast_enabled or config._sca_enabled + ) if self._apm_opt_out: self.enabled = False # Disable compute stats (neither agent or tracer should compute them) @@ -498,7 +500,7 @@ def configure( if appsec_standalone_enabled is not None: self._appsec_standalone_enabled = asm_config._appsec_standalone_enabled = appsec_standalone_enabled - if self._appsec_standalone_enabled and (self._asm_enabled or self._iast_enabled): + if self._appsec_standalone_enabled and (self._asm_enabled or self._iast_enabled or config._sca_enabled): self._apm_opt_out = True self.enabled = False # Disable compute stats (neither agent or tracer should compute them) diff --git a/releasenotes/notes/feat-standalone-sca-billing-925c84d69fe061ce.yaml b/releasenotes/notes/feat-standalone-sca-billing-925c84d69fe061ce.yaml new file mode 100644 index 00000000000..733aaea6262 --- /dev/null +++ b/releasenotes/notes/feat-standalone-sca-billing-925c84d69fe061ce.yaml @@ -0,0 +1,4 @@ +--- +features: + - | + ASM: This introduces "Standalone SCA billing", opting out for APM billing and applying to only SCA. Enable this by setting these two environment variables: ``DD_APPSEC_SCA_ENABLED`` and ``DD_EXPERIMENTAL_APPSEC_STANDALONE_ENABLED`` diff --git a/tests/appsec/appsec/test_asm_standalone.py b/tests/appsec/appsec/test_asm_standalone.py index 31624724069..6841314cea8 100644 --- a/tests/appsec/appsec/test_asm_standalone.py +++ b/tests/appsec/appsec/test_asm_standalone.py @@ -1,41 +1,145 @@ #!/usr/bin/env python3 +import copy + import pytest +import ddtrace from ddtrace.contrib.trace_utils import set_http_meta from ddtrace.ext import SpanTypes +from tests.utils import override_env @pytest.fixture( params=[ - {"iast_enabled": True, "appsec_enabled": True, "appsec_standalone_enabled": True}, - {"iast_enabled": True, "appsec_enabled": True, "appsec_standalone_enabled": False}, - {"iast_enabled": True, "appsec_enabled": False, "appsec_standalone_enabled": False}, - {"iast_enabled": True, "appsec_enabled": False, "appsec_standalone_enabled": True}, - {"iast_enabled": False, "appsec_enabled": True, "appsec_standalone_enabled": True}, - {"iast_enabled": False, "appsec_enabled": True, "appsec_standalone_enabled": False}, - {"iast_enabled": False, "appsec_enabled": False, "appsec_standalone_enabled": False}, - {"iast_enabled": False, "appsec_enabled": False, "appsec_standalone_enabled": True}, - {"appsec_enabled": True}, - {"appsec_enabled": False}, - {"iast_enabled": True}, - {"iast_enabled": False}, + {"DD_APPSEC_SCA_ENABLED": "1", "iast_enabled": True, "appsec_enabled": True, "appsec_standalone_enabled": True}, + { + "DD_APPSEC_SCA_ENABLED": "1", + "iast_enabled": True, + "appsec_enabled": True, + "appsec_standalone_enabled": False, + }, + { + "DD_APPSEC_SCA_ENABLED": "1", + "iast_enabled": True, + "appsec_enabled": False, + "appsec_standalone_enabled": False, + }, + { + "DD_APPSEC_SCA_ENABLED": "1", + "iast_enabled": True, + "appsec_enabled": False, + "appsec_standalone_enabled": True, + }, + { + "DD_APPSEC_SCA_ENABLED": "1", + "iast_enabled": False, + "appsec_enabled": True, + "appsec_standalone_enabled": True, + }, + { + "DD_APPSEC_SCA_ENABLED": "1", + "iast_enabled": False, + "appsec_enabled": True, + "appsec_standalone_enabled": False, + }, + { + "DD_APPSEC_SCA_ENABLED": "1", + "iast_enabled": False, + "appsec_enabled": False, + "appsec_standalone_enabled": False, + }, + { + "DD_APPSEC_SCA_ENABLED": "1", + "iast_enabled": False, + "appsec_enabled": False, + "appsec_standalone_enabled": True, + }, + {"DD_APPSEC_SCA_ENABLED": "1", "appsec_enabled": True}, + {"DD_APPSEC_SCA_ENABLED": "1", "appsec_enabled": False}, + {"DD_APPSEC_SCA_ENABLED": "1", "iast_enabled": True}, + {"DD_APPSEC_SCA_ENABLED": "1", "iast_enabled": False}, + {"DD_APPSEC_SCA_ENABLED": "0", "iast_enabled": True, "appsec_enabled": True, "appsec_standalone_enabled": True}, + { + "DD_APPSEC_SCA_ENABLED": "0", + "iast_enabled": True, + "appsec_enabled": True, + "appsec_standalone_enabled": False, + }, + { + "DD_APPSEC_SCA_ENABLED": "0", + "iast_enabled": True, + "appsec_enabled": False, + "appsec_standalone_enabled": False, + }, + { + "DD_APPSEC_SCA_ENABLED": "0", + "iast_enabled": True, + "appsec_enabled": False, + "appsec_standalone_enabled": True, + }, + { + "DD_APPSEC_SCA_ENABLED": "0", + "iast_enabled": False, + "appsec_enabled": True, + "appsec_standalone_enabled": True, + }, + { + "DD_APPSEC_SCA_ENABLED": "0", + "iast_enabled": False, + "appsec_enabled": True, + "appsec_standalone_enabled": False, + }, + { + "DD_APPSEC_SCA_ENABLED": "0", + "iast_enabled": False, + "appsec_enabled": False, + "appsec_standalone_enabled": False, + }, + { + "DD_APPSEC_SCA_ENABLED": "0", + "iast_enabled": False, + "appsec_enabled": False, + "appsec_standalone_enabled": True, + }, + {"DD_APPSEC_SCA_ENABLED": "0", "appsec_enabled": True}, + {"DD_APPSEC_SCA_ENABLED": "0", "appsec_enabled": False}, + {"DD_APPSEC_SCA_ENABLED": "0", "iast_enabled": True}, + {"DD_APPSEC_SCA_ENABLED": "0", "iast_enabled": False}, ] ) def tracer_appsec_standalone(request, tracer): - tracer.configure(api_version="v0.4", **request.param) - yield tracer, request.param + new_env = {k: v for k, v in request.param.items() if k.startswith("DD_")} + with override_env(new_env): + # Reset the config so it picks up the env var value + ddtrace.config._reset() + + # Copy the params to a new dict, including the env var + request_param_copy = copy.deepcopy(request.param) + + # Remove the environment variables as they are unexpected args for the tracer configure + request.param.pop("DD_APPSEC_SCA_ENABLED", None) + tracer.configure(api_version="v0.4", **request.param) + + yield tracer, request_param_copy + # Reset tracer configuration + ddtrace.config._reset() tracer.configure(api_version="v0.4", appsec_enabled=False, appsec_standalone_enabled=False, iast_enabled=False) def test_appsec_standalone_apm_enabled_metric(tracer_appsec_standalone): tracer, args = tracer_appsec_standalone + with tracer.trace("test", span_type=SpanTypes.WEB) as span: set_http_meta(span, {}, raw_uri="http://example.com/.git", status_code="404") if args.get("appsec_standalone_enabled", None) and ( - args.get("appsec_enabled", None) or args.get("iast_enabled", None) + args.get("appsec_enabled", None) + or args.get("iast_enabled", None) + or args.get("DD_APPSEC_SCA_ENABLED", "0") == "1" ): + assert tracer._apm_opt_out is True assert span.get_metric("_dd.apm.enabled") == 0.0 else: + assert tracer._apm_opt_out is False assert span.get_metric("_dd.apm.enabled") is None diff --git a/tests/tracer/test_propagation.py b/tests/tracer/test_propagation.py index 2e1a299c4d4..61fec650a70 100644 --- a/tests/tracer/test_propagation.py +++ b/tests/tracer/test_propagation.py @@ -7,6 +7,7 @@ import mock import pytest +import ddtrace from ddtrace import tracer as ddtracer from ddtrace._trace._span_link import SpanLink from ddtrace._trace.context import Context @@ -45,6 +46,7 @@ from tests.contrib.fastapi.conftest import test_spans as fastapi_test_spans # noqa:F401 from tests.contrib.fastapi.conftest import tracer # noqa:F401 +from ..utils import override_env from ..utils import override_global_config @@ -318,95 +320,107 @@ def test_extract(tracer): # noqa: F811 assert len(context.get_all_baggage_items()) == 3 +@pytest.mark.parametrize("sca_enabled", ["true", "false"]) @pytest.mark.parametrize("appsec_enabled", [True, False]) @pytest.mark.parametrize("iast_enabled", [True, False]) def test_asm_standalone_minimum_trace_per_minute_has_no_downstream_propagation( - tracer, appsec_enabled, iast_enabled # noqa: F811 + tracer, sca_enabled, appsec_enabled, iast_enabled # noqa: F811 ): - if not appsec_enabled and not iast_enabled: - pytest.skip("AppSec or IAST must be enabled") - - tracer.configure(appsec_enabled=appsec_enabled, appsec_standalone_enabled=True, iast_enabled=iast_enabled) - try: - headers = { - "x-datadog-trace-id": "1234", - "x-datadog-parent-id": "5678", - "x-datadog-sampling-priority": str(USER_KEEP), - "x-datadog-origin": "synthetics", - "x-datadog-tags": "_dd.p.test=value,any=tag", - "ot-baggage-key1": "value1", - } + if not appsec_enabled and not iast_enabled and sca_enabled == "false": + pytest.skip("SCA, AppSec or IAST must be enabled") + + with override_env({"DD_APPSEC_SCA_ENABLED": sca_enabled}): + ddtrace.config._reset() + + tracer.configure(appsec_enabled=appsec_enabled, appsec_standalone_enabled=True, iast_enabled=iast_enabled) + try: + headers = { + "x-datadog-trace-id": "1234", + "x-datadog-parent-id": "5678", + "x-datadog-sampling-priority": str(USER_KEEP), + "x-datadog-origin": "synthetics", + "x-datadog-tags": "_dd.p.test=value,any=tag", + "ot-baggage-key1": "value1", + } - context = HTTPPropagator.extract(headers) + context = HTTPPropagator.extract(headers) - tracer.context_provider.activate(context) + tracer.context_provider.activate(context) - with tracer.trace("local_root_span0") as span: - # First span should be kept, as we keep 1 per min - assert span.trace_id == 1234 - assert span.parent_id == 5678 - # Priority is unset - assert span.context.sampling_priority is None - assert "_sampling_priority_v1" not in span._metrics - assert span.context.dd_origin == "synthetics" - assert "_dd.p.test" in span.context._meta - assert "_dd.p.appsec" not in span.context._meta + with tracer.trace("local_root_span0") as span: + # First span should be kept, as we keep 1 per min + assert span.trace_id == 1234 + assert span.parent_id == 5678 + # Priority is unset + assert span.context.sampling_priority is None + assert "_sampling_priority_v1" not in span._metrics + assert span.context.dd_origin == "synthetics" + assert "_dd.p.test" in span.context._meta + assert "_dd.p.appsec" not in span.context._meta - next_headers = {} - HTTPPropagator.inject(span.context, next_headers) + next_headers = {} + HTTPPropagator.inject(span.context, next_headers) - # Ensure propagation of headers is interrupted - assert "x-datadog-origin" not in next_headers - assert "x-datadog-tags" not in next_headers - assert "x-datadog-trace-id" not in next_headers - assert "x-datadog-parent-id" not in next_headers - assert "x-datadog-sampling-priority" not in next_headers + # Ensure propagation of headers is interrupted + assert "x-datadog-origin" not in next_headers + assert "x-datadog-tags" not in next_headers + assert "x-datadog-trace-id" not in next_headers + assert "x-datadog-parent-id" not in next_headers + assert "x-datadog-sampling-priority" not in next_headers - # Span priority was unset, but as we keep 1 per min, it should be kept - # Since we have a rate limiter, priorities used are USER_KEEP and USER_REJECT - assert span._metrics["_sampling_priority_v1"] == USER_KEEP + # Span priority was unset, but as we keep 1 per min, it should be kept + # Since we have a rate limiter, priorities used are USER_KEEP and USER_REJECT + assert span._metrics["_sampling_priority_v1"] == USER_KEEP - finally: - tracer.configure(appsec_enabled=False, appsec_standalone_enabled=False) + finally: + with override_env({"DD_APPSEC_SCA_ENABLED": "0"}): + ddtrace.config._reset() + tracer.configure(appsec_enabled=False, appsec_standalone_enabled=False) +@pytest.mark.parametrize("sca_enabled", ["true", "false"]) @pytest.mark.parametrize("appsec_enabled", [True, False]) @pytest.mark.parametrize("iast_enabled", [True, False]) def test_asm_standalone_missing_propagation_tags_no_appsec_event_trace_dropped( - tracer, appsec_enabled, iast_enabled # noqa: F811 + tracer, sca_enabled, appsec_enabled, iast_enabled # noqa: F811 ): - if not appsec_enabled and not iast_enabled: - pytest.skip("AppSec or IAST must be enabled") + if not appsec_enabled and not iast_enabled and sca_enabled == "false": + pytest.skip("SCA, AppSec or IAST must be enabled") - tracer.configure(appsec_enabled=appsec_enabled, appsec_standalone_enabled=True, iast_enabled=iast_enabled) - try: - with tracer.trace("local_root_span0"): - # First span should be kept, as we keep 1 per min - pass + with override_env({"DD_APPSEC_SCA_ENABLED": sca_enabled}): + ddtrace.config._reset() - headers = {} + tracer.configure(appsec_enabled=appsec_enabled, appsec_standalone_enabled=True, iast_enabled=iast_enabled) + try: + with tracer.trace("local_root_span0"): + # First span should be kept, as we keep 1 per min + pass - context = HTTPPropagator.extract(headers) + headers = {} - tracer.context_provider.activate(context) + context = HTTPPropagator.extract(headers) - with tracer.trace("local_root_span") as span: - assert "_dd.p.appsec" not in span.context._meta + tracer.context_provider.activate(context) - next_headers = {} - HTTPPropagator.inject(span.context, next_headers) + with tracer.trace("local_root_span") as span: + assert "_dd.p.appsec" not in span.context._meta - # Ensure propagation of headers takes place as expected - assert "x-datadog-origin" not in next_headers - assert "x-datadog-tags" not in next_headers - assert "x-datadog-trace-id" not in next_headers - assert "x-datadog-parent-id" not in next_headers - assert "x-datadog-sampling-priority" not in next_headers + next_headers = {} + HTTPPropagator.inject(span.context, next_headers) - # Ensure span is dropped (no appsec event upstream or in this span) - assert span._metrics["_sampling_priority_v1"] == USER_REJECT - finally: - tracer.configure(appsec_enabled=False, appsec_standalone_enabled=False) + # Ensure propagation of headers takes place as expected + assert "x-datadog-origin" not in next_headers + assert "x-datadog-tags" not in next_headers + assert "x-datadog-trace-id" not in next_headers + assert "x-datadog-parent-id" not in next_headers + assert "x-datadog-sampling-priority" not in next_headers + + # Ensure span is dropped (no appsec event upstream or in this span) + assert span._metrics["_sampling_priority_v1"] == USER_REJECT + finally: + with override_env({"DD_APPSEC_SCA_ENABLED": "0"}): + ddtrace.config._reset() + tracer.configure(appsec_enabled=False, appsec_standalone_enabled=False) def test_asm_standalone_missing_propagation_tags_appsec_event_present_trace_kept(tracer): # noqa: F811 @@ -443,58 +457,63 @@ def test_asm_standalone_missing_propagation_tags_appsec_event_present_trace_kept tracer.configure(appsec_enabled=False, appsec_standalone_enabled=False) +@pytest.mark.parametrize("sca_enabled", ["true", "false"]) @pytest.mark.parametrize("appsec_enabled", [True, False]) @pytest.mark.parametrize("iast_enabled", [True, False]) def test_asm_standalone_missing_appsec_tag_no_appsec_event_propagation_resets( - tracer, appsec_enabled, iast_enabled # noqa: F811 + tracer, sca_enabled, appsec_enabled, iast_enabled # noqa: F811 ): - if not appsec_enabled and not iast_enabled: - pytest.skip("AppSec or IAST must be enabled") + if not appsec_enabled and not iast_enabled and sca_enabled == "false": + pytest.skip("SCA, AppSec or IAST must be enabled") + + with override_env({"DD_APPSEC_SCA_ENABLED": sca_enabled}): + ddtrace.config._reset() + tracer.configure(appsec_enabled=appsec_enabled, appsec_standalone_enabled=True, iast_enabled=iast_enabled) + try: + with tracer.trace("local_root_span0"): + # First span should be kept, as we keep 1 per min + pass + + headers = { + "x-datadog-trace-id": "1234", + "x-datadog-parent-id": "5678", + "x-datadog-sampling-priority": str(USER_KEEP), + "x-datadog-origin": "synthetics", + "x-datadog-tags": "_dd.p.test=value,any=tag", + "ot-baggage-key1": "value1", + } - tracer.configure(appsec_enabled=appsec_enabled, appsec_standalone_enabled=True, iast_enabled=iast_enabled) - try: - with tracer.trace("local_root_span0"): - # First span should be kept, as we keep 1 per min - pass + context = HTTPPropagator.extract(headers) - headers = { - "x-datadog-trace-id": "1234", - "x-datadog-parent-id": "5678", - "x-datadog-sampling-priority": str(USER_KEEP), - "x-datadog-origin": "synthetics", - "x-datadog-tags": "_dd.p.test=value,any=tag", - "ot-baggage-key1": "value1", - } + tracer.context_provider.activate(context) - context = HTTPPropagator.extract(headers) + with tracer.trace("local_root_span") as span: + assert span.trace_id == 1234 + assert span.parent_id == 5678 + # Priority is unset + assert span.context.sampling_priority is None + assert "_sampling_priority_v1" not in span._metrics + assert span.context.dd_origin == "synthetics" + assert "_dd.p.test" in span.context._meta + assert "_dd.p.appsec" not in span.context._meta - tracer.context_provider.activate(context) + next_headers = {} + HTTPPropagator.inject(span.context, next_headers) - with tracer.trace("local_root_span") as span: - assert span.trace_id == 1234 - assert span.parent_id == 5678 - # Priority is unset - assert span.context.sampling_priority is None - assert "_sampling_priority_v1" not in span._metrics - assert span.context.dd_origin == "synthetics" - assert "_dd.p.test" in span.context._meta - assert "_dd.p.appsec" not in span.context._meta + # Ensure propagation of headers takes place as expected + assert "x-datadog-origin" not in next_headers + assert "x-datadog-tags" not in next_headers + assert "x-datadog-trace-id" not in next_headers + assert "x-datadog-parent-id" not in next_headers + assert "x-datadog-sampling-priority" not in next_headers - next_headers = {} - HTTPPropagator.inject(span.context, next_headers) - - # Ensure propagation of headers takes place as expected - assert "x-datadog-origin" not in next_headers - assert "x-datadog-tags" not in next_headers - assert "x-datadog-trace-id" not in next_headers - assert "x-datadog-parent-id" not in next_headers - assert "x-datadog-sampling-priority" not in next_headers - - # Priority was unset, and trace is not kept, so it should be dropped - # As we have a rate limiter, priorities used are USER_KEEP and USER_REJECT - assert span._metrics["_sampling_priority_v1"] == USER_REJECT - finally: - tracer.configure(appsec_enabled=False, appsec_standalone_enabled=False) + # Priority was unset, and trace is not kept, so it should be dropped + # As we have a rate limiter, priorities used are USER_KEEP and USER_REJECT + assert span._metrics["_sampling_priority_v1"] == USER_REJECT + finally: + with override_env({"DD_APPSEC_SCA_ENABLED": "false"}): + ddtrace.config._reset() + tracer.configure(appsec_enabled=False, appsec_standalone_enabled=False) def test_asm_standalone_missing_appsec_tag_appsec_event_present_trace_kept( @@ -546,131 +565,141 @@ def test_asm_standalone_missing_appsec_tag_appsec_event_present_trace_kept( @pytest.mark.parametrize("upstream_priority", ["1", "2"]) +@pytest.mark.parametrize("sca_enabled", ["true", "false"]) @pytest.mark.parametrize("appsec_enabled", [True, False]) @pytest.mark.parametrize("iast_enabled", [True, False]) def test_asm_standalone_present_appsec_tag_no_appsec_event_propagation_set_to_user_keep( - tracer, upstream_priority, appsec_enabled, iast_enabled # noqa: F811 + tracer, upstream_priority, sca_enabled, appsec_enabled, iast_enabled # noqa: F811 ): - if not appsec_enabled and not iast_enabled: - pytest.skip("AppSec or IAST must be enabled") - - tracer.configure(appsec_enabled=appsec_enabled, appsec_standalone_enabled=True, iast_enabled=iast_enabled) - try: - with tracer.trace("local_root_span0"): - # First span should be kept, as we keep 1 per min - pass - - headers = { - "x-datadog-trace-id": "1234", - "x-datadog-parent-id": "5678", - "x-datadog-sampling-priority": upstream_priority, - "x-datadog-origin": "synthetics", - "x-datadog-tags": "_dd.p.appsec=1,any=tag", - "ot-baggage-key1": "value1", - } + if not appsec_enabled and not iast_enabled and sca_enabled == "false": + pytest.skip("SCA, AppSec or IAST must be enabled") + + with override_env({"DD_APPSEC_SCA_ENABLED": sca_enabled}): + ddtrace.config._reset() + tracer.configure(appsec_enabled=appsec_enabled, appsec_standalone_enabled=True, iast_enabled=iast_enabled) + try: + with tracer.trace("local_root_span0"): + # First span should be kept, as we keep 1 per min + pass + + headers = { + "x-datadog-trace-id": "1234", + "x-datadog-parent-id": "5678", + "x-datadog-sampling-priority": upstream_priority, + "x-datadog-origin": "synthetics", + "x-datadog-tags": "_dd.p.appsec=1,any=tag", + "ot-baggage-key1": "value1", + } - context = HTTPPropagator.extract(headers) + context = HTTPPropagator.extract(headers) - tracer.context_provider.activate(context) + tracer.context_provider.activate(context) - with tracer.trace("local_root_span") as span: - assert span.trace_id == 1234 - assert span.parent_id == 5678 - # Enforced user keep regardless of upstream priority - assert span.context.sampling_priority == USER_KEEP - assert span.context.dd_origin == "synthetics" - assert span.context._meta == { - "_dd.origin": "synthetics", - "_dd.p.dm": "-3", - "_dd.p.appsec": "1", - } - with tracer.trace("child_span") as child_span: - assert child_span.trace_id == 1234 - assert child_span.parent_id != 5678 - assert child_span.context.sampling_priority == USER_KEEP - assert child_span.context.dd_origin == "synthetics" - assert child_span.context._meta == { + with tracer.trace("local_root_span") as span: + assert span.trace_id == 1234 + assert span.parent_id == 5678 + # Enforced user keep regardless of upstream priority + assert span.context.sampling_priority == USER_KEEP + assert span.context.dd_origin == "synthetics" + assert span.context._meta == { "_dd.origin": "synthetics", "_dd.p.dm": "-3", "_dd.p.appsec": "1", } - - next_headers = {} - HTTPPropagator.inject(span.context, next_headers) - assert next_headers["x-datadog-origin"] == "synthetics" - assert next_headers["x-datadog-sampling-priority"] == str(USER_KEEP) - assert next_headers["x-datadog-trace-id"] == "1234" - assert next_headers["x-datadog-tags"].startswith("_dd.p.appsec=1,") - - # Ensure span sets user keep regardless of received priority (appsec event upstream) - assert span._metrics["_sampling_priority_v1"] == USER_KEEP - - finally: - tracer.configure(appsec_enabled=False, appsec_standalone_enabled=False) + with tracer.trace("child_span") as child_span: + assert child_span.trace_id == 1234 + assert child_span.parent_id != 5678 + assert child_span.context.sampling_priority == USER_KEEP + assert child_span.context.dd_origin == "synthetics" + assert child_span.context._meta == { + "_dd.origin": "synthetics", + "_dd.p.dm": "-3", + "_dd.p.appsec": "1", + } + + next_headers = {} + HTTPPropagator.inject(span.context, next_headers) + assert next_headers["x-datadog-origin"] == "synthetics" + assert next_headers["x-datadog-sampling-priority"] == str(USER_KEEP) + assert next_headers["x-datadog-trace-id"] == "1234" + assert next_headers["x-datadog-tags"].startswith("_dd.p.appsec=1,") + + # Ensure span sets user keep regardless of received priority (appsec event upstream) + assert span._metrics["_sampling_priority_v1"] == USER_KEEP + + finally: + with override_env({"DD_APPSEC_SCA_ENABLED": sca_enabled}): + ddtrace.config._reset() + tracer.configure(appsec_enabled=False, appsec_standalone_enabled=False) @pytest.mark.parametrize("upstream_priority", ["1", "2"]) +@pytest.mark.parametrize("sca_enabled", ["true", "false"]) @pytest.mark.parametrize("appsec_enabled", [True, False]) @pytest.mark.parametrize("iast_enabled", [True, False]) def test_asm_standalone_present_appsec_tag_appsec_event_present_propagation_force_keep( - tracer, upstream_priority, appsec_enabled, iast_enabled # noqa: F811 + tracer, upstream_priority, sca_enabled, appsec_enabled, iast_enabled # noqa: F811 ): - if not appsec_enabled and not iast_enabled: - pytest.skip("AppSec or IAST must be enabled") - - tracer.configure(appsec_enabled=appsec_enabled, appsec_standalone_enabled=True, iast_enabled=iast_enabled) - try: - with tracer.trace("local_root_span0"): - # First span should be kept, as we keep 1 per min - pass - - headers = { - "x-datadog-trace-id": "1234", - "x-datadog-parent-id": "5678", - "x-datadog-sampling-priority": upstream_priority, - "x-datadog-origin": "synthetics", - "x-datadog-tags": "_dd.p.appsec=1,any=tag", - "ot-baggage-key1": "value1", - } + if not appsec_enabled and not iast_enabled and sca_enabled == "false": + pytest.skip("SCA, AppSec or IAST must be enabled") + + with override_env({"DD_APPSEC_SCA_ENABLED": sca_enabled}): + ddtrace.config._reset() + tracer.configure(appsec_enabled=appsec_enabled, appsec_standalone_enabled=True, iast_enabled=iast_enabled) + try: + with tracer.trace("local_root_span0"): + # First span should be kept, as we keep 1 per min + pass + + headers = { + "x-datadog-trace-id": "1234", + "x-datadog-parent-id": "5678", + "x-datadog-sampling-priority": upstream_priority, + "x-datadog-origin": "synthetics", + "x-datadog-tags": "_dd.p.appsec=1,any=tag", + "ot-baggage-key1": "value1", + } - context = HTTPPropagator.extract(headers) + context = HTTPPropagator.extract(headers) - tracer.context_provider.activate(context) + tracer.context_provider.activate(context) - with tracer.trace("local_root_span") as span: - _asm_manual_keep(span) - assert span.trace_id == 1234 - assert span.parent_id == 5678 - assert span.context.sampling_priority == USER_KEEP # user keep always - assert span.context.dd_origin == "synthetics" - assert span.context._meta == { - "_dd.origin": "synthetics", - "_dd.p.dm": "-4", - "_dd.p.appsec": "1", - } - with tracer.trace("child_span") as child_span: - assert child_span.trace_id == 1234 - assert child_span.parent_id != 5678 - assert child_span.context.sampling_priority == USER_KEEP # user keep always - assert child_span.context.dd_origin == "synthetics" - assert child_span.context._meta == { + with tracer.trace("local_root_span") as span: + _asm_manual_keep(span) + assert span.trace_id == 1234 + assert span.parent_id == 5678 + assert span.context.sampling_priority == USER_KEEP # user keep always + assert span.context.dd_origin == "synthetics" + assert span.context._meta == { "_dd.origin": "synthetics", "_dd.p.dm": "-4", "_dd.p.appsec": "1", } - - next_headers = {} - HTTPPropagator.inject(span.context, next_headers) - assert next_headers["x-datadog-origin"] == "synthetics" - assert next_headers["x-datadog-sampling-priority"] == str(USER_KEEP) # user keep always - assert next_headers["x-datadog-trace-id"] == "1234" - assert next_headers["x-datadog-tags"].startswith("_dd.p.appsec=1,") - - # Ensure span set to user keep regardless received priority (appsec event upstream) - assert span._metrics["_sampling_priority_v1"] == USER_KEEP # user keep always - - finally: - tracer.configure(appsec_enabled=False, appsec_standalone_enabled=False) + with tracer.trace("child_span") as child_span: + assert child_span.trace_id == 1234 + assert child_span.parent_id != 5678 + assert child_span.context.sampling_priority == USER_KEEP # user keep always + assert child_span.context.dd_origin == "synthetics" + assert child_span.context._meta == { + "_dd.origin": "synthetics", + "_dd.p.dm": "-4", + "_dd.p.appsec": "1", + } + + next_headers = {} + HTTPPropagator.inject(span.context, next_headers) + assert next_headers["x-datadog-origin"] == "synthetics" + assert next_headers["x-datadog-sampling-priority"] == str(USER_KEEP) # user keep always + assert next_headers["x-datadog-trace-id"] == "1234" + assert next_headers["x-datadog-tags"].startswith("_dd.p.appsec=1,") + + # Ensure span set to user keep regardless received priority (appsec event upstream) + assert span._metrics["_sampling_priority_v1"] == USER_KEEP # user keep always + + finally: + with override_env({"DD_APPSEC_SCA_ENABLED": sca_enabled}): + ddtrace.config._reset() + tracer.configure(appsec_enabled=False, appsec_standalone_enabled=False) def test_extract_with_baggage_http_propagation(tracer): # noqa: F811 diff --git a/tests/tracer/test_tracer.py b/tests/tracer/test_tracer.py index f432403d3f9..4cdcf876aba 100644 --- a/tests/tracer/test_tracer.py +++ b/tests/tracer/test_tracer.py @@ -2043,30 +2043,38 @@ def test_import_ddtrace_tracer_not_module(): assert isinstance(tracer, Tracer) +@pytest.mark.parametrize("sca_enabled", ["true", "false"]) @pytest.mark.parametrize("appsec_enabled", [True, False]) @pytest.mark.parametrize("iast_enabled", [True, False]) -def test_asm_standalone_configuration(appsec_enabled, iast_enabled): - if not appsec_enabled and not iast_enabled: - pytest.skip("AppSec or IAST must be enabled") +def test_asm_standalone_configuration(sca_enabled, appsec_enabled, iast_enabled): + if not appsec_enabled and not iast_enabled and sca_enabled == "false": + pytest.skip("SCA, AppSec or IAST must be enabled") + + with override_env({"DD_APPSEC_SCA_ENABLED": sca_enabled}): + ddtrace.config._reset() + tracer = ddtrace.Tracer() + tracer.configure(appsec_enabled=appsec_enabled, iast_enabled=iast_enabled, appsec_standalone_enabled=True) + if appsec_enabled: + assert tracer._asm_enabled is True + if iast_enabled: + assert tracer._iast_enabled is True + if sca_enabled == "true": + assert bool(ddtrace.config._sca_enabled) is True + + assert tracer._appsec_standalone_enabled is True + assert tracer._apm_opt_out is True + assert tracer.enabled is False + + assert isinstance(tracer._sampler.limiter, RateLimiter) + assert tracer._sampler.limiter.rate_limit == 1 + assert tracer._sampler.limiter.time_window == 60e9 + + assert tracer._compute_stats is False - tracer = ddtrace.Tracer() - tracer.configure(appsec_enabled=appsec_enabled, iast_enabled=iast_enabled, appsec_standalone_enabled=True) - if appsec_enabled: - assert tracer._asm_enabled is True - if iast_enabled: - assert tracer._iast_enabled is True - - assert tracer._appsec_standalone_enabled is True - assert tracer._apm_opt_out is True - assert tracer.enabled is False - - assert isinstance(tracer._sampler.limiter, RateLimiter) - assert tracer._sampler.limiter.rate_limit == 1 - assert tracer._sampler.limiter.time_window == 60e9 - - assert tracer._compute_stats is False # reset tracer values - tracer.configure(appsec_enabled=False, iast_enabled=False, appsec_standalone_enabled=False) + with override_env({"DD_APPSEC_SCA_ENABLED": "false"}): + ddtrace.config._reset() + tracer.configure(appsec_enabled=False, iast_enabled=False, appsec_standalone_enabled=False) def test_gc_not_used_on_root_spans(): From de9bc48a9172278d4607aac4e3f3a8778c81f109 Mon Sep 17 00:00:00 2001 From: Quinna Halim Date: Tue, 17 Dec 2024 19:20:37 -0500 Subject: [PATCH 54/78] chore(ci): switch ubuntu runner image in generate package versions workflow (#11749) `ubuntu-latest` was upgraded to use `ubuntu-24.04`. This is incompatible with `python 3.7`, which we still test and support (and is needed for the `Generate Package Versions` workflow in order to build all the riot environments). This PR switches to using the `ubuntu-22.04` image (the previous latest). When we drop 3.7 support, we can switch back to `ubuntu-latest`. ## Checklist - [x] PR author has checked that all the criteria below are met - The PR description includes an overview of the change - The PR description articulates the motivation for the change - The change includes tests OR the PR description describes a testing strategy - The PR description notes risks associated with the change, if any - Newly-added code is easy to change - The change follows the [library release note guidelines](https://ddtrace.readthedocs.io/en/stable/releasenotes.html) - The change includes or references documentation updates if necessary - Backport labels are set (if [applicable](https://ddtrace.readthedocs.io/en/latest/contributing.html#backporting)) ## Reviewer Checklist - [x] Reviewer has checked that all the criteria below are met - Title is accurate - All changes are related to the pull request's stated goal - Avoids breaking [API](https://ddtrace.readthedocs.io/en/stable/versioning.html#interfaces) changes - Testing strategy adequately addresses listed risks - Newly-added code is easy to change - Release note makes sense to a user of the library - If necessary, author has acknowledged and discussed the performance implications of this PR as reported in the benchmarks PR comment - Backport labels are set in a manner that is consistent with the [release branch maintenance policy](https://ddtrace.readthedocs.io/en/latest/contributing.html#backporting) --- .github/workflows/generate-package-versions.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/generate-package-versions.yml b/.github/workflows/generate-package-versions.yml index 4db524c3d04..740edc20725 100644 --- a/.github/workflows/generate-package-versions.yml +++ b/.github/workflows/generate-package-versions.yml @@ -8,7 +8,7 @@ on: jobs: generate-package-versions: name: Generate package versions - runs-on: ubuntu-latest + runs-on: ubuntu-22.04 permissions: actions: read contents: write From beb87f64f7e1713b8ea76ba7050e2242093cb7d0 Mon Sep 17 00:00:00 2001 From: Duncan Harvey <35278470+duncanpharvey@users.noreply.github.com> Date: Tue, 17 Dec 2024 21:49:59 -0500 Subject: [PATCH 55/78] feat(azure_functions): add azure functions integration (#11474) This PR adds an integration for tracing the [azure-functions](https://pypi.org/project/azure-functions/) package. ### Additional Notes: - This change only supports the [v2 programming model](https://learn.microsoft.com/en-us/azure/azure-functions/functions-reference-python?tabs=get-started%2Casgi%2Capplication-level&pivots=python-mode-decorators). If there are enough requests for the v1 programming model we can add tracing in a future PR - This change only supports tracing [Http triggers](https://github.com/Azure/azure-functions-python-library/blob/dd4fac4db0ff4ca3cd01d314a0ddf280aa59813e/azure/functions/decorators/function_app.py#L462). Tracing for other triggers will be added in future PRs - Azure Functions package currently supports Python versions `3.7` to `3.11` (no `3.12` support at the moment) - Builds off the integration work started by @gord02 in https://github.com/DataDog/dd-trace-py/pull/9726 - Dockerfile changes to testrunner made in https://github.com/DataDog/dd-trace-py/pull/11617 and https://github.com/DataDog/dd-trace-py/pull/11609 * `mariadb` install was broken in the testrunner image * [azure-functions-core-tools](https://github.com/Azure/azure-functions-core-tools) package must be installed on the test runner for tests to work - Version pinned to [4.0.6280](https://github.com/Azure/azure-functions-core-tools/releases/tag/4.0.6280) due to some issues with the most recent versions - Package only supported on `linux/amd64` architecture ## Checklist - [x] PR author has checked that all the criteria below are met - The PR description includes an overview of the change - The PR description articulates the motivation for the change - The change includes tests OR the PR description describes a testing strategy - The PR description notes risks associated with the change, if any - Newly-added code is easy to change - The change follows the [library release note guidelines](https://ddtrace.readthedocs.io/en/stable/releasenotes.html) - The change includes or references documentation updates if necessary - Backport labels are set (if [applicable](https://ddtrace.readthedocs.io/en/latest/contributing.html#backporting)) ## Reviewer Checklist - [x] Reviewer has checked that all the criteria below are met - Title is accurate - All changes are related to the pull request's stated goal - Avoids breaking [API](https://ddtrace.readthedocs.io/en/stable/versioning.html#interfaces) changes - Testing strategy adequately addresses listed risks - Newly-added code is easy to change - Release note makes sense to a user of the library - If necessary, author has acknowledged and discussed the performance implications of this PR as reported in the benchmarks PR comment - Backport labels are set in a manner that is consistent with the [release branch maintenance policy](https://ddtrace.readthedocs.io/en/latest/contributing.html#backporting) --- .riot/requirements/1337ee3.txt | 26 ++++++ .riot/requirements/14b54db.txt | 24 ++++++ .riot/requirements/1e62aea.txt | 26 ++++++ .riot/requirements/73109d5.txt | 29 +++++++ .riot/requirements/c2420c2.txt | 26 ++++++ ddtrace/_monkey.py | 2 + ddtrace/_trace/trace_handlers.py | 38 +++++++++ ddtrace/contrib/azure_functions/__init__.py | 46 ++++++++++ ddtrace/contrib/azure_functions/patch.py | 14 +++ .../contrib/internal/azure_functions/patch.py | 85 +++++++++++++++++++ ddtrace/ext/__init__.py | 1 + ...unctions-integration-108911bfe1e5f081.yaml | 3 + riotfile.py | 9 ++ tests/contrib/azure_functions/__init__.py | 0 .../azure_function_app/function_app.py | 24 ++++++ .../azure_function_app/host.json | 15 ++++ .../azure_function_app/local.settings.json | 10 +++ .../test_azure_functions_patch.py | 31 +++++++ .../test_azure_functions_snapshot.py | 64 ++++++++++++++ tests/contrib/suitespec.yml | 14 +++ ...unctions_snapshot.test_http_get_error.json | 36 ++++++++ ...e_functions_snapshot.test_http_get_ok.json | 33 +++++++ ..._functions_snapshot.test_http_post_ok.json | 33 +++++++ 23 files changed, 589 insertions(+) create mode 100644 .riot/requirements/1337ee3.txt create mode 100644 .riot/requirements/14b54db.txt create mode 100644 .riot/requirements/1e62aea.txt create mode 100644 .riot/requirements/73109d5.txt create mode 100644 .riot/requirements/c2420c2.txt create mode 100644 ddtrace/contrib/azure_functions/__init__.py create mode 100644 ddtrace/contrib/azure_functions/patch.py create mode 100644 ddtrace/contrib/internal/azure_functions/patch.py create mode 100644 releasenotes/notes/feat-add-azure-functions-integration-108911bfe1e5f081.yaml create mode 100644 tests/contrib/azure_functions/__init__.py create mode 100644 tests/contrib/azure_functions/azure_function_app/function_app.py create mode 100644 tests/contrib/azure_functions/azure_function_app/host.json create mode 100644 tests/contrib/azure_functions/azure_function_app/local.settings.json create mode 100644 tests/contrib/azure_functions/test_azure_functions_patch.py create mode 100644 tests/contrib/azure_functions/test_azure_functions_snapshot.py create mode 100644 tests/snapshots/tests.contrib.azure_functions.test_azure_functions_snapshot.test_http_get_error.json create mode 100644 tests/snapshots/tests.contrib.azure_functions.test_azure_functions_snapshot.test_http_get_ok.json create mode 100644 tests/snapshots/tests.contrib.azure_functions.test_azure_functions_snapshot.test_http_post_ok.json diff --git a/.riot/requirements/1337ee3.txt b/.riot/requirements/1337ee3.txt new file mode 100644 index 00000000000..1b296ead110 --- /dev/null +++ b/.riot/requirements/1337ee3.txt @@ -0,0 +1,26 @@ +# +# This file is autogenerated by pip-compile with Python 3.8 +# by the following command: +# +# pip-compile --allow-unsafe --no-annotate .riot/requirements/1337ee3.in +# +attrs==24.2.0 +azure-functions==1.21.3 +certifi==2024.8.30 +charset-normalizer==3.4.0 +coverage[toml]==7.6.1 +exceptiongroup==1.2.2 +hypothesis==6.45.0 +idna==3.10 +iniconfig==2.0.0 +mock==5.1.0 +opentracing==2.4.0 +packaging==24.2 +pluggy==1.5.0 +pytest==8.3.3 +pytest-cov==5.0.0 +pytest-mock==3.14.0 +requests==2.32.3 +sortedcontainers==2.4.0 +tomli==2.1.0 +urllib3==2.2.3 diff --git a/.riot/requirements/14b54db.txt b/.riot/requirements/14b54db.txt new file mode 100644 index 00000000000..6b103b5f841 --- /dev/null +++ b/.riot/requirements/14b54db.txt @@ -0,0 +1,24 @@ +# +# This file is autogenerated by pip-compile with Python 3.11 +# by the following command: +# +# pip-compile --allow-unsafe --no-annotate .riot/requirements/14b54db.in +# +attrs==24.2.0 +azure-functions==1.21.3 +certifi==2024.8.30 +charset-normalizer==3.4.0 +coverage[toml]==7.6.8 +hypothesis==6.45.0 +idna==3.10 +iniconfig==2.0.0 +mock==5.1.0 +opentracing==2.4.0 +packaging==24.2 +pluggy==1.5.0 +pytest==8.3.3 +pytest-cov==6.0.0 +pytest-mock==3.14.0 +requests==2.32.3 +sortedcontainers==2.4.0 +urllib3==2.2.3 diff --git a/.riot/requirements/1e62aea.txt b/.riot/requirements/1e62aea.txt new file mode 100644 index 00000000000..4a152a7b448 --- /dev/null +++ b/.riot/requirements/1e62aea.txt @@ -0,0 +1,26 @@ +# +# This file is autogenerated by pip-compile with Python 3.10 +# by the following command: +# +# pip-compile --allow-unsafe --no-annotate .riot/requirements/1e62aea.in +# +attrs==24.2.0 +azure-functions==1.21.3 +certifi==2024.8.30 +charset-normalizer==3.4.0 +coverage[toml]==7.6.8 +exceptiongroup==1.2.2 +hypothesis==6.45.0 +idna==3.10 +iniconfig==2.0.0 +mock==5.1.0 +opentracing==2.4.0 +packaging==24.2 +pluggy==1.5.0 +pytest==8.3.3 +pytest-cov==6.0.0 +pytest-mock==3.14.0 +requests==2.32.3 +sortedcontainers==2.4.0 +tomli==2.1.0 +urllib3==2.2.3 diff --git a/.riot/requirements/73109d5.txt b/.riot/requirements/73109d5.txt new file mode 100644 index 00000000000..42b5dd0e30c --- /dev/null +++ b/.riot/requirements/73109d5.txt @@ -0,0 +1,29 @@ +# +# This file is autogenerated by pip-compile with Python 3.7 +# by the following command: +# +# pip-compile --allow-unsafe --config=pyproject.toml --no-annotate --resolver=backtracking .riot/requirements/73109d5.in +# +attrs==24.2.0 +azure-functions==1.21.3 +certifi==2024.8.30 +charset-normalizer==3.4.0 +coverage[toml]==7.2.7 +exceptiongroup==1.2.2 +hypothesis==6.45.0 +idna==3.10 +importlib-metadata==6.7.0 +iniconfig==2.0.0 +mock==5.1.0 +opentracing==2.4.0 +packaging==24.0 +pluggy==1.2.0 +pytest==7.4.4 +pytest-cov==4.1.0 +pytest-mock==3.11.1 +requests==2.31.0 +sortedcontainers==2.4.0 +tomli==2.0.1 +typing-extensions==4.7.1 +urllib3==2.0.7 +zipp==3.15.0 diff --git a/.riot/requirements/c2420c2.txt b/.riot/requirements/c2420c2.txt new file mode 100644 index 00000000000..2d6d61d7a79 --- /dev/null +++ b/.riot/requirements/c2420c2.txt @@ -0,0 +1,26 @@ +# +# This file is autogenerated by pip-compile with Python 3.9 +# by the following command: +# +# pip-compile --allow-unsafe --no-annotate .riot/requirements/c2420c2.in +# +attrs==24.2.0 +azure-functions==1.21.3 +certifi==2024.8.30 +charset-normalizer==3.4.0 +coverage[toml]==7.6.8 +exceptiongroup==1.2.2 +hypothesis==6.45.0 +idna==3.10 +iniconfig==2.0.0 +mock==5.1.0 +opentracing==2.4.0 +packaging==24.2 +pluggy==1.5.0 +pytest==8.3.3 +pytest-cov==6.0.0 +pytest-mock==3.14.0 +requests==2.32.3 +sortedcontainers==2.4.0 +tomli==2.1.0 +urllib3==2.2.3 diff --git a/ddtrace/_monkey.py b/ddtrace/_monkey.py index b0c17213130..8dd83558c83 100644 --- a/ddtrace/_monkey.py +++ b/ddtrace/_monkey.py @@ -94,6 +94,7 @@ "yaaredis": True, "asyncpg": True, "aws_lambda": True, # patch only in AWS Lambda environments + "azure_functions": True, "tornado": False, "openai": True, "langchain": True, @@ -143,6 +144,7 @@ "futures": ("concurrent.futures.thread",), "vertica": ("vertica_python",), "aws_lambda": ("datadog_lambda",), + "azure_functions": ("azure.functions",), "httplib": ("http.client",), "kafka": ("confluent_kafka",), "google_generativeai": ("google.generativeai",), diff --git a/ddtrace/_trace/trace_handlers.py b/ddtrace/_trace/trace_handlers.py index 1807ae220f6..7c2ba02d6b4 100644 --- a/ddtrace/_trace/trace_handlers.py +++ b/ddtrace/_trace/trace_handlers.py @@ -28,6 +28,7 @@ from ddtrace.ext import http from ddtrace.internal import core from ddtrace.internal.compat import maybe_stringify +from ddtrace.internal.compat import parse from ddtrace.internal.constants import COMPONENT from ddtrace.internal.constants import FLASK_ENDPOINT from ddtrace.internal.constants import FLASK_URL_RULE @@ -675,6 +676,40 @@ def _set_span_pointer(span: "Span", span_pointer_description: _SpanPointerDescri ) +def _set_azure_function_tags(span, azure_functions_config, function_name, trigger): + span.set_tag_str(COMPONENT, azure_functions_config.integration_name) + span.set_tag_str(SPAN_KIND, SpanKind.SERVER) + span.set_tag_str("aas.function.name", function_name) # codespell:ignore + span.set_tag_str("aas.function.trigger", trigger) # codespell:ignore + + +def _on_azure_functions_request_span_modifier(ctx, azure_functions_config, req): + span = ctx.get_item("req_span") + parsed_url = parse.urlparse(req.url) + path = parsed_url.path + span.resource = f"{req.method} {path}" + trace_utils.set_http_meta( + span, + azure_functions_config, + method=req.method, + url=req.url, + request_headers=req.headers, + request_body=req.get_body(), + route=path, + ) + + +def _on_azure_functions_start_response(ctx, azure_functions_config, res, function_name, trigger): + span = ctx.get_item("req_span") + _set_azure_function_tags(span, azure_functions_config, function_name, trigger) + trace_utils.set_http_meta( + span, + azure_functions_config, + status_code=res.status_code if res else None, + response_headers=res.headers if res else None, + ) + + def listen(): core.on("wsgi.request.prepare", _on_request_prepare) core.on("wsgi.request.prepared", _on_request_prepared) @@ -723,6 +758,8 @@ def listen(): core.on("botocore.kinesis.GetRecords.post", _on_botocore_kinesis_getrecords_post) core.on("redis.async_command.post", _on_redis_command_post) core.on("redis.command.post", _on_redis_command_post) + core.on("azure.functions.request_call_modifier", _on_azure_functions_request_span_modifier) + core.on("azure.functions.start_response", _on_azure_functions_start_response) core.on("test_visibility.enable", _on_test_visibility_enable) core.on("test_visibility.disable", _on_test_visibility_disable) @@ -754,6 +791,7 @@ def listen(): "rq.worker.perform_job", "rq.job.perform", "rq.job.fetch_many", + "azure.functions.patched_route_request", ): core.on(f"context.started.start_span.{context_name}", _start_span) diff --git a/ddtrace/contrib/azure_functions/__init__.py b/ddtrace/contrib/azure_functions/__init__.py new file mode 100644 index 00000000000..208b971efaa --- /dev/null +++ b/ddtrace/contrib/azure_functions/__init__.py @@ -0,0 +1,46 @@ +""" +The azure_functions integration traces all http requests to your Azure Function app. + +Enabling +~~~~~~~~ + +Use :func:`patch()` to manually enable the integration:: + + from ddtrace import patch + patch(azure_functions=True) + + +Global Configuration +~~~~~~~~~~~~~~~~~~~~ + +.. py:data:: ddtrace.config.azure_functions["service"] + + The service name reported by default for azure_functions instances. + + This option can also be set with the ``DD_SERVICE`` environment + variable. + + Default: ``"azure_functions"`` + +""" + +from ddtrace.internal.utils.importlib import require_modules + + +required_modules = ["azure.functions"] + +with require_modules(required_modules) as missing_modules: + if not missing_modules: + # Required to allow users to import from `ddtrace.contrib.azure_functions.patch` directly + import warnings as _w + + with _w.catch_warnings(): + _w.simplefilter("ignore", DeprecationWarning) + from . import patch as _ # noqa: F401, I001 + + # Expose public methods + from ddtrace.contrib.internal.azure_functions.patch import get_version + from ddtrace.contrib.internal.azure_functions.patch import patch + from ddtrace.contrib.internal.azure_functions.patch import unpatch + + __all__ = ["patch", "unpatch", "get_version"] diff --git a/ddtrace/contrib/azure_functions/patch.py b/ddtrace/contrib/azure_functions/patch.py new file mode 100644 index 00000000000..1a23613972d --- /dev/null +++ b/ddtrace/contrib/azure_functions/patch.py @@ -0,0 +1,14 @@ +from ddtrace.contrib.internal.azure_functions.patch import * # noqa: F403 +from ddtrace.internal.utils.deprecations import DDTraceDeprecationWarning +from ddtrace.vendor.debtcollector import deprecate + + +def __getattr__(name): + deprecate( + ("%s.%s is deprecated" % (__name__, name)), + category=DDTraceDeprecationWarning, + ) + + if name in globals(): + return globals()[name] + raise AttributeError("%s has no attribute %s", __name__, name) diff --git a/ddtrace/contrib/internal/azure_functions/patch.py b/ddtrace/contrib/internal/azure_functions/patch.py new file mode 100644 index 00000000000..15089a2e733 --- /dev/null +++ b/ddtrace/contrib/internal/azure_functions/patch.py @@ -0,0 +1,85 @@ +import azure.functions as azure_functions +from wrapt import wrap_function_wrapper as _w + +from ddtrace import config +from ddtrace.contrib.trace_utils import int_service +from ddtrace.contrib.trace_utils import unwrap as _u +from ddtrace.ext import SpanTypes +from ddtrace.internal import core +from ddtrace.internal.schema import schematize_cloud_faas_operation +from ddtrace.internal.schema import schematize_service_name +from ddtrace.pin import Pin + + +config._add( + "azure_functions", + { + "_default_service": schematize_service_name("azure_functions"), + }, +) + + +def get_version(): + # type: () -> str + return getattr(azure_functions, "__version__", "") + + +def patch(): + """ + Patch `azure.functions` module for tracing + """ + # Check to see if we have patched azure.functions yet or not + if getattr(azure_functions, "_datadog_patch", False): + return + azure_functions._datadog_patch = True + + Pin().onto(azure_functions.FunctionApp) + _w("azure.functions", "FunctionApp.route", _patched_route) + + +def _patched_route(wrapped, instance, args, kwargs): + trigger = "Http" + + pin = Pin.get_from(instance) + if not pin or not pin.enabled(): + return wrapped(*args, **kwargs) + + def _wrapper(func): + function_name = func.__name__ + + def wrap_function(req: azure_functions.HttpRequest, context: azure_functions.Context): + operation_name = schematize_cloud_faas_operation( + "azure.functions.invoke", cloud_provider="azure", cloud_service="functions" + ) + with core.context_with_data( + "azure.functions.patched_route_request", + span_name=operation_name, + pin=pin, + service=int_service(pin, config.azure_functions), + span_type=SpanTypes.SERVERLESS, + ) as ctx, ctx.span: + ctx.set_item("req_span", ctx.span) + core.dispatch("azure.functions.request_call_modifier", (ctx, config.azure_functions, req)) + res = None + try: + res = func(req) + return res + finally: + core.dispatch( + "azure.functions.start_response", (ctx, config.azure_functions, res, function_name, trigger) + ) + + # Needed to correctly display function name when running 'func start' locally + wrap_function.__name__ = function_name + + return wrapped(*args, **kwargs)(wrap_function) + + return _wrapper + + +def unpatch(): + if not getattr(azure_functions, "_datadog_patch", False): + return + azure_functions._datadog_patch = False + + _u(azure_functions.FunctionApp, "route") diff --git a/ddtrace/ext/__init__.py b/ddtrace/ext/__init__.py index 2387dbd63a4..965dd04f43f 100644 --- a/ddtrace/ext/__init__.py +++ b/ddtrace/ext/__init__.py @@ -7,6 +7,7 @@ class SpanTypes(object): HTTP = "http" MONGODB = "mongodb" REDIS = "redis" + SERVERLESS = "serverless" SQL = "sql" TEMPLATE = "template" TEST = "test" diff --git a/releasenotes/notes/feat-add-azure-functions-integration-108911bfe1e5f081.yaml b/releasenotes/notes/feat-add-azure-functions-integration-108911bfe1e5f081.yaml new file mode 100644 index 00000000000..b9b7b255564 --- /dev/null +++ b/releasenotes/notes/feat-add-azure-functions-integration-108911bfe1e5f081.yaml @@ -0,0 +1,3 @@ +features: + - | + azure_functions: This introduces support for Azure Functions. diff --git a/riotfile.py b/riotfile.py index 567a5f65d66..86d6ed5bf76 100644 --- a/riotfile.py +++ b/riotfile.py @@ -2837,6 +2837,15 @@ def select_pys(min_version: str = MIN_PYTHON_VERSION, max_version: str = MAX_PYT "envier": "==0.5.2", }, ), + Venv( + name="azure_functions", + command="pytest {cmdargs} tests/contrib/azure_functions", + pys=select_pys(min_version="3.7", max_version="3.11"), + pkgs={ + "azure.functions": latest, + "requests": latest, + }, + ), Venv( name="sourcecode", command="pytest {cmdargs} tests/sourcecode", diff --git a/tests/contrib/azure_functions/__init__.py b/tests/contrib/azure_functions/__init__.py new file mode 100644 index 00000000000..e69de29bb2d diff --git a/tests/contrib/azure_functions/azure_function_app/function_app.py b/tests/contrib/azure_functions/azure_function_app/function_app.py new file mode 100644 index 00000000000..edff02b0bb0 --- /dev/null +++ b/tests/contrib/azure_functions/azure_function_app/function_app.py @@ -0,0 +1,24 @@ +from ddtrace import patch + + +patch(azure_functions=True) + +import azure.functions as func # noqa: E402 + + +app = func.FunctionApp() + + +@app.route(route="httpgetok", auth_level=func.AuthLevel.ANONYMOUS, methods=[func.HttpMethod.GET]) +def http_get_ok(req: func.HttpRequest) -> func.HttpResponse: + return func.HttpResponse("Hello Datadog!") + + +@app.route(route="httpgeterror", auth_level=func.AuthLevel.ANONYMOUS, methods=[func.HttpMethod.GET]) +def http_get_error(req: func.HttpRequest) -> func.HttpResponse: + raise Exception("Test Error") + + +@app.route(route="httppostok", auth_level=func.AuthLevel.ANONYMOUS, methods=[func.HttpMethod.POST]) +def http_post_ok(req: func.HttpRequest) -> func.HttpResponse: + return func.HttpResponse("Hello Datadog!") diff --git a/tests/contrib/azure_functions/azure_function_app/host.json b/tests/contrib/azure_functions/azure_function_app/host.json new file mode 100644 index 00000000000..06d01bdaa95 --- /dev/null +++ b/tests/contrib/azure_functions/azure_function_app/host.json @@ -0,0 +1,15 @@ +{ + "version": "2.0", + "logging": { + "applicationInsights": { + "samplingSettings": { + "isEnabled": true, + "excludedTypes": "Request" + } + } + }, + "extensionBundle": { + "id": "Microsoft.Azure.Functions.ExtensionBundle", + "version": "[4.*, 5.0.0)" + } +} diff --git a/tests/contrib/azure_functions/azure_function_app/local.settings.json b/tests/contrib/azure_functions/azure_function_app/local.settings.json new file mode 100644 index 00000000000..fb38bf93ca8 --- /dev/null +++ b/tests/contrib/azure_functions/azure_function_app/local.settings.json @@ -0,0 +1,10 @@ +{ + "IsEncrypted": false, + "Values": { + "FUNCTIONS_WORKER_RUNTIME": "python", + "FUNCTIONS_EXTENSION_VERSION": "~4", + "AzureWebJobsFeatureFlags": "EnableWorkerIndexing", + "AzureWebJobsStorage": "", + "WEBSITE_SITE_NAME": "test-func" + } +} diff --git a/tests/contrib/azure_functions/test_azure_functions_patch.py b/tests/contrib/azure_functions/test_azure_functions_patch.py new file mode 100644 index 00000000000..acc58df654a --- /dev/null +++ b/tests/contrib/azure_functions/test_azure_functions_patch.py @@ -0,0 +1,31 @@ +# This test script was automatically generated by the contrib-patch-tests.py +# script. If you want to make changes to it, you should make sure that you have +# removed the ``_generated`` suffix from the file name, to prevent the content +# from being overwritten by future re-generations. + +from ddtrace.contrib.azure_functions import get_version +from ddtrace.contrib.azure_functions.patch import patch + + +try: + from ddtrace.contrib.azure_functions.patch import unpatch +except ImportError: + unpatch = None +from tests.contrib.patch import PatchTestCase + + +class TestAzure_FunctionsPatch(PatchTestCase.Base): + __integration_name__ = "azure_functions" + __module_name__ = "azure.functions" + __patch_func__ = patch + __unpatch_func__ = unpatch + __get_version__ = get_version + + def assert_module_patched(self, azure_functions): + pass + + def assert_not_module_patched(self, azure_functions): + pass + + def assert_not_module_double_patched(self, azure_functions): + pass diff --git a/tests/contrib/azure_functions/test_azure_functions_snapshot.py b/tests/contrib/azure_functions/test_azure_functions_snapshot.py new file mode 100644 index 00000000000..c236122181f --- /dev/null +++ b/tests/contrib/azure_functions/test_azure_functions_snapshot.py @@ -0,0 +1,64 @@ +import os +import signal +import subprocess +import time + +import pytest + +from tests.webclient import Client + + +DEFAULT_HEADERS = { + "User-Agent": "python-httpx/x.xx.x", +} + + +@pytest.fixture +def azure_functions_client(): + # Copy the env to get the correct PYTHONPATH and such + # from the virtualenv. + # webservers might exec or fork into another process, so we need to os.setsid() to create a process group + # (all of which will listen to signals sent to the parent) so that we can kill the whole application. + proc = subprocess.Popen( + ["func", "start"], + stdout=subprocess.DEVNULL, + stderr=subprocess.DEVNULL, + close_fds=True, + env=os.environ.copy(), + preexec_fn=os.setsid, + cwd=os.path.join(os.path.dirname(__file__), "azure_function_app"), + ) + try: + client = Client("http://0.0.0.0:7071") + # Wait for the server to start up + try: + client.wait(delay=0.5) + yield client + client.get_ignored("/shutdown") + except Exception: + pass + # At this point the traces have been sent to the test agent + # but the test agent hasn't necessarily finished processing + # the traces (race condition) so wait just a bit for that + # processing to complete. + time.sleep(1) + finally: + os.killpg(proc.pid, signal.SIGKILL) + proc.wait() + + +@pytest.mark.snapshot +def test_http_get_ok(azure_functions_client: Client) -> None: + assert azure_functions_client.get("/api/httpgetok?key=val", headers=DEFAULT_HEADERS).status_code == 200 + + +@pytest.mark.snapshot(ignores=["meta.error.stack"]) +def test_http_get_error(azure_functions_client: Client) -> None: + assert azure_functions_client.get("/api/httpgeterror", headers=DEFAULT_HEADERS).status_code == 500 + + +@pytest.mark.snapshot +def test_http_post_ok(azure_functions_client: Client) -> None: + assert ( + azure_functions_client.post("/api/httppostok", headers=DEFAULT_HEADERS, data={"key": "val"}).status_code == 200 + ) diff --git a/tests/contrib/suitespec.yml b/tests/contrib/suitespec.yml index 2f14127ddf0..83a48ea1f48 100644 --- a/tests/contrib/suitespec.yml +++ b/tests/contrib/suitespec.yml @@ -23,6 +23,9 @@ components: - ddtrace/contrib/aws_lambda/* - ddtrace/contrib/internal/aws_lambda/* - ddtrace/ext/aws.py + azure_functions: + - ddtrace/contrib/azure_functions/* + - ddtrace/contrib/internal/azure_functions/* botocore: - ddtrace/contrib/botocore/* - ddtrace/contrib/internal/botocore/* @@ -374,6 +377,17 @@ suites: - tests/snapshots/tests.{suite}.* runner: riot snapshot: true + azure_functions: + paths: + - '@bootstrap' + - '@core' + - '@contrib' + - '@tracing' + - '@azure_functions' + - tests/contrib/azure_functions/* + - tests/snapshots/tests.contrib.azure_functions.* + runner: riot + snapshot: true botocore: parallelism: 6 paths: diff --git a/tests/snapshots/tests.contrib.azure_functions.test_azure_functions_snapshot.test_http_get_error.json b/tests/snapshots/tests.contrib.azure_functions.test_azure_functions_snapshot.test_http_get_error.json new file mode 100644 index 00000000000..4e0cf3e81b1 --- /dev/null +++ b/tests/snapshots/tests.contrib.azure_functions.test_azure_functions_snapshot.test_http_get_error.json @@ -0,0 +1,36 @@ +[[ + { + "name": "azure.functions.invoke", + "service": "test-func", + "resource": "GET /api/httpgeterror", + "trace_id": 0, + "span_id": 1, + "parent_id": 0, + "type": "serverless", + "error": 1, + "meta": { + "_dd.p.dm": "-0", + "_dd.p.tid": "6750ad8500000000", + "aas.function.name": "http_get_error", + "aas.function.trigger": "Http", + "component": "azure_functions", + "error.message": "Test Error", + "error.stack": "Traceback (most recent call last):\n File \"/root/project/ddtrace/contrib/internal/azure_functions/patch.py\", line 65, in wrap_function\n res = func(req)\n ^^^^^^^^^\n File \"/root/project/tests/contrib/azure_functions/azure_function_app/function_app.py\", line 19, in http_get_error\n raise Exception(\"Test Error\")\nException: Test Error\n", + "error.type": "builtins.Exception", + "http.method": "GET", + "http.route": "/api/httpgeterror", + "http.url": "http://0.0.0.0:7071/api/httpgeterror", + "http.useragent": "python-httpx/x.xx.x", + "language": "python", + "runtime-id": "d7efb82603894b91af0e18f95bfb40ce", + "span.kind": "server" + }, + "metrics": { + "_dd.top_level": 1, + "_dd.tracer_kr": 1.0, + "_sampling_priority_v1": 1, + "process_id": 98042 + }, + "duration": 3862875, + "start": 1733340549814399761 + }]] diff --git a/tests/snapshots/tests.contrib.azure_functions.test_azure_functions_snapshot.test_http_get_ok.json b/tests/snapshots/tests.contrib.azure_functions.test_azure_functions_snapshot.test_http_get_ok.json new file mode 100644 index 00000000000..415678e4dec --- /dev/null +++ b/tests/snapshots/tests.contrib.azure_functions.test_azure_functions_snapshot.test_http_get_ok.json @@ -0,0 +1,33 @@ +[[ + { + "name": "azure.functions.invoke", + "service": "test-func", + "resource": "GET /api/httpgetok", + "trace_id": 0, + "span_id": 1, + "parent_id": 0, + "type": "serverless", + "meta": { + "_dd.p.dm": "-0", + "_dd.p.tid": "6750ad7d00000000", + "aas.function.name": "http_get_ok", + "aas.function.trigger": "Http", + "component": "azure_functions", + "http.method": "GET", + "http.route": "/api/httpgetok", + "http.status_code": "200", + "http.url": "http://0.0.0.0:7071/api/httpgetok?key=val", + "http.useragent": "python-httpx/x.xx.x", + "language": "python", + "runtime-id": "2dd77b70098048f5a6b7d3a7d53d1082", + "span.kind": "server" + }, + "metrics": { + "_dd.top_level": 1, + "_dd.tracer_kr": 1.0, + "_sampling_priority_v1": 1, + "process_id": 96455 + }, + "duration": 1160792, + "start": 1733340541444015424 + }]] diff --git a/tests/snapshots/tests.contrib.azure_functions.test_azure_functions_snapshot.test_http_post_ok.json b/tests/snapshots/tests.contrib.azure_functions.test_azure_functions_snapshot.test_http_post_ok.json new file mode 100644 index 00000000000..44c0491b7a7 --- /dev/null +++ b/tests/snapshots/tests.contrib.azure_functions.test_azure_functions_snapshot.test_http_post_ok.json @@ -0,0 +1,33 @@ +[[ + { + "name": "azure.functions.invoke", + "service": "test-func", + "resource": "POST /api/httppostok", + "trace_id": 0, + "span_id": 1, + "parent_id": 0, + "type": "serverless", + "meta": { + "_dd.p.dm": "-0", + "_dd.p.tid": "6750ad8e00000000", + "aas.function.name": "http_post_ok", + "aas.function.trigger": "Http", + "component": "azure_functions", + "http.method": "POST", + "http.route": "/api/httppostok", + "http.status_code": "200", + "http.url": "http://0.0.0.0:7071/api/httppostok", + "http.useragent": "python-httpx/x.xx.x", + "language": "python", + "runtime-id": "891babf5be3d4b86bd44163cd50c74b0", + "span.kind": "server" + }, + "metrics": { + "_dd.top_level": 1, + "_dd.tracer_kr": 1.0, + "_sampling_priority_v1": 1, + "process_id": 99631 + }, + "duration": 293958, + "start": 1733340558198232376 + }]] From 59c068fcaa8841ea1d9389b90f72dffed654cb26 Mon Sep 17 00:00:00 2001 From: Federico Mon Date: Wed, 18 Dec 2024 11:52:22 +0100 Subject: [PATCH 56/78] refactor(iast): simplify ``__mod__`` aspect (#11601) --- .../_taint_tracking/Aspects/AspectModulo.cpp | 43 ++++++++----------- 1 file changed, 18 insertions(+), 25 deletions(-) diff --git a/ddtrace/appsec/_iast/_taint_tracking/Aspects/AspectModulo.cpp b/ddtrace/appsec/_iast/_taint_tracking/Aspects/AspectModulo.cpp index a08f76d9f3d..b7454de26f8 100644 --- a/ddtrace/appsec/_iast/_taint_tracking/Aspects/AspectModulo.cpp +++ b/ddtrace/appsec/_iast/_taint_tracking/Aspects/AspectModulo.cpp @@ -2,7 +2,7 @@ #include "Helpers.h" static PyObject* -do_modulo(PyObject* text, PyObject* insert_tuple_or_obj) +do_modulo(PyObject* text, PyObject* insert_tuple_or_obj, py::object py_candidate_text, py::object py_candidate_tuple) { PyObject* result = nullptr; @@ -13,18 +13,22 @@ do_modulo(PyObject* text, PyObject* insert_tuple_or_obj) Py_INCREF(insert_tuple); } else { insert_tuple = PyTuple_Pack(1, insert_tuple_or_obj); - if (insert_tuple == nullptr) { - return nullptr; - } } - if (PyUnicode_Check(text)) { + if (PyUnicode_Check(text) && insert_tuple != nullptr) { result = PyUnicode_Format(text, insert_tuple); - } else if (PyBytes_Check(text) or PyByteArray_Check(text)) { - auto method_name = PyUnicode_FromString("__mod__"); - result = PyObject_CallMethodObjArgs(text, method_name, insert_tuple, nullptr); - Py_DECREF(method_name); } else { + try { + py::object res_py = py_candidate_text.attr("__mod__")(py_candidate_tuple); + PyObject* res_pyo = res_py.ptr(); + if (res_pyo != nullptr) { + Py_INCREF(res_pyo); + } + return res_pyo; + } catch (py::error_already_set& e) { + e.restore(); + return nullptr; + } } Py_DECREF(insert_tuple); if (has_pyerr()) { @@ -49,21 +53,7 @@ api_modulo_aspect(PyObject* self, PyObject* const* args, const Py_ssize_t nargs) // Lambda to get the result of the modulo operation auto get_result = [&]() -> PyObject* { - PyObject* res = do_modulo(candidate_text, candidate_tuple); - if (res == nullptr) { - try { - py::object res_py = py_candidate_text.attr("__mod__")(py_candidate_tuple); - PyObject* res_pyo = res_py.ptr(); - if (res_pyo != nullptr) { - Py_INCREF(res_pyo); - } - return res_pyo; - } catch (py::error_already_set& e) { - e.restore(); - return nullptr; - } - } - return res; + return do_modulo(candidate_text, candidate_tuple, py_candidate_text, py_candidate_tuple); }; TRY_CATCH_ASPECT("modulo_aspect", return get_result(), , { @@ -107,7 +97,10 @@ api_modulo_aspect(PyObject* self, PyObject* const* args, const Py_ssize_t nargs) } py::tuple formatted_parameters(list_formatted_parameters); - PyObject* applied_params = do_modulo(StringToPyObject(fmttext, py_str_type).ptr(), formatted_parameters.ptr()); + PyObject* applied_params = do_modulo(StringToPyObject(fmttext, py_str_type).ptr(), + formatted_parameters.ptr(), + StringToPyObject(fmttext, py_str_type), + formatted_parameters); if (applied_params == nullptr) { return get_result(); } From a7e94042b42aa696bd38538e3453ca3633dbac9b Mon Sep 17 00:00:00 2001 From: Brett Langdon Date: Wed, 18 Dec 2024 10:09:53 -0500 Subject: [PATCH 57/78] ci(celery): increase amqp task timeout (#11741) --- tests/contrib/celery/test_tagging.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/contrib/celery/test_tagging.py b/tests/contrib/celery/test_tagging.py index af40c4f9209..2809364ba13 100644 --- a/tests/contrib/celery/test_tagging.py +++ b/tests/contrib/celery/test_tagging.py @@ -102,7 +102,7 @@ def test_amqp_task(instrument_celery, traced_amqp_celery_app): shutdown_timeout=30, ): t = add.delay(4, 4) - assert t.get(timeout=2) == 8 + assert t.get(timeout=30) == 8 # wait for spans to be received time.sleep(3) From e46e3b5785ee26aec4fbf9e7cd2c13c40a435697 Mon Sep 17 00:00:00 2001 From: Taegyun Kim Date: Wed, 18 Dec 2024 11:13:43 -0500 Subject: [PATCH 58/78] chore(profiling): remove unused mutex (#11774) ## Checklist - [x] PR author has checked that all the criteria below are met - The PR description includes an overview of the change - The PR description articulates the motivation for the change - The change includes tests OR the PR description describes a testing strategy - The PR description notes risks associated with the change, if any - Newly-added code is easy to change - The change follows the [library release note guidelines](https://ddtrace.readthedocs.io/en/stable/releasenotes.html) - The change includes or references documentation updates if necessary - Backport labels are set (if [applicable](https://ddtrace.readthedocs.io/en/latest/contributing.html#backporting)) ## Reviewer Checklist - [x] Reviewer has checked that all the criteria below are met - Title is accurate - All changes are related to the pull request's stated goal - Avoids breaking [API](https://ddtrace.readthedocs.io/en/stable/versioning.html#interfaces) changes - Testing strategy adequately addresses listed risks - Newly-added code is easy to change - Release note makes sense to a user of the library - If necessary, author has acknowledged and discussed the performance implications of this PR as reported in the benchmarks PR comment - Backport labels are set in a manner that is consistent with the [release branch maintenance policy](https://ddtrace.readthedocs.io/en/latest/contributing.html#backporting) --- .../datadog/profiling/dd_wrapper/include/sample_manager.hpp | 1 - 1 file changed, 1 deletion(-) diff --git a/ddtrace/internal/datadog/profiling/dd_wrapper/include/sample_manager.hpp b/ddtrace/internal/datadog/profiling/dd_wrapper/include/sample_manager.hpp index baf6af2b33a..30c4048e967 100644 --- a/ddtrace/internal/datadog/profiling/dd_wrapper/include/sample_manager.hpp +++ b/ddtrace/internal/datadog/profiling/dd_wrapper/include/sample_manager.hpp @@ -19,7 +19,6 @@ class SampleManager private: static inline unsigned int max_nframes{ g_default_max_nframes }; static inline SampleType type_mask{ SampleType::All }; - static inline std::mutex init_mutex{}; static inline size_t sample_pool_capacity{ g_default_sample_pool_capacity }; static inline std::unique_ptr sample_pool{ nullptr }; From c7b888d09cdfba1186c49a91a7370b8bdccb5648 Mon Sep 17 00:00:00 2001 From: Brett Langdon Date: Wed, 18 Dec 2024 11:23:24 -0500 Subject: [PATCH 59/78] ci: test with Python 3.13 (#10821) This change adjusts CI and the library itself to work under Python 3.13. Any tests that failed under 3.13 are skipped on 3.13 for now and will be unskipped in a future change. ## Checklist - [x] PR author has checked that all the criteria below are met - The PR description includes an overview of the change - The PR description articulates the motivation for the change - The change includes tests OR the PR description describes a testing strategy - The PR description notes risks associated with the change, if any - Newly-added code is easy to change - The change follows the [library release note guidelines](https://ddtrace.readthedocs.io/en/stable/releasenotes.html) - The change includes or references documentation updates if necessary - Backport labels are set (if [applicable](https://ddtrace.readthedocs.io/en/latest/contributing.html#backporting)) ## Reviewer Checklist - [x] Reviewer has checked that all the criteria below are met - Title is accurate - All changes are related to the pull request's stated goal - Avoids breaking [API](https://ddtrace.readthedocs.io/en/stable/versioning.html#interfaces) changes - Testing strategy adequately addresses listed risks - Newly-added code is easy to change - Release note makes sense to a user of the library - If necessary, author has acknowledged and discussed the performance implications of this PR as reported in the benchmarks PR comment - Backport labels are set in a manner that is consistent with the [release branch maintenance policy](https://ddtrace.readthedocs.io/en/latest/contributing.html#backporting) --------- Co-authored-by: Emmett Butler Co-authored-by: Gabriele N. Tornetta Co-authored-by: Emmett Butler <723615+emmettbutler@users.noreply.github.com> Co-authored-by: Federico Mon Co-authored-by: erikayasuda <153395705+erikayasuda@users.noreply.github.com> Co-authored-by: Gabriele N. Tornetta --- .github/workflows/build_deploy.yml | 2 +- .github/workflows/build_python_3.yml | 12 +- .../workflows/generate-package-versions.yml | 5 + .github/workflows/requirements-locks.yml | 4 +- .gitlab/download-dependency-wheels.sh | 2 +- .gitlab/package.yml | 21 +++ .gitlab/testrunner.yml | 4 +- .gitlab/tests.yml | 6 +- .riot/requirements/102dfdd.txt | 20 +++ .riot/requirements/104daf8.txt | 25 +++ .riot/requirements/104f450.txt | 20 +++ .riot/requirements/1053dce.txt | 26 +++ .riot/requirements/114bad8.txt | 29 ++++ .riot/requirements/11f2bd0.txt | 38 ++++ .riot/requirements/11fd02a.txt | 19 ++ .riot/requirements/1261ed3.txt | 31 ++++ .riot/requirements/1304e20.txt | 26 +++ .riot/requirements/1332b9d.txt | 38 ++++ .riot/requirements/13658ae.txt | 24 +++ .riot/requirements/136fddd.txt | 21 +++ .riot/requirements/1374394.txt | 34 ++++ .riot/requirements/1381214.txt | 21 +++ .riot/requirements/13ae267.txt | 20 +++ .riot/requirements/141bfd1.txt | 32 ++++ .riot/requirements/141f7eb.txt | 24 +++ .riot/requirements/1463930.txt | 20 +++ .riot/requirements/14be2f6.txt | 25 +++ .riot/requirements/14d7e8a.txt | 31 ++++ .riot/requirements/14f1594.txt | 21 +++ .riot/requirements/152e97f.txt | 21 +++ .riot/requirements/1584f8c.txt | 29 ++++ .riot/requirements/164c3ce.txt | 31 ++++ .riot/requirements/167b853.txt | 21 +++ .riot/requirements/16acf84.txt | 27 +++ .riot/requirements/16cc321.txt | 20 +++ .riot/requirements/16d2d1f.txt | 48 +++++ .riot/requirements/16de9c4.txt | 37 ++++ .riot/requirements/178f7d5.txt | 20 +++ .riot/requirements/17d40ef.txt | 20 +++ .riot/requirements/1819cb6.txt | 29 ++++ .riot/requirements/188244e.txt | 20 +++ .riot/requirements/18c6e70.txt | 19 ++ .riot/requirements/18e9526.txt | 28 +++ .riot/requirements/192c7c0.txt | 22 +++ .riot/requirements/19bbf6d.txt | 22 +++ .riot/requirements/1a485c9.txt | 23 +++ .riot/requirements/1a508dc.txt | 30 ++++ .riot/requirements/1acabe0.txt | 20 +++ .riot/requirements/1ada88e.txt | 29 ++++ .riot/requirements/1aed5dc.txt | 30 ++++ .riot/requirements/1b86c06.txt | 27 +++ .riot/requirements/1b8d922.txt | 21 +++ .riot/requirements/1ba390a.txt | 21 +++ .riot/requirements/1bf4d76.txt | 23 +++ .riot/requirements/1c22cf9.txt | 20 +++ .riot/requirements/1cb554e.txt | 21 +++ .riot/requirements/1ce0711.txt | 24 +++ .riot/requirements/1ce93b3.txt | 22 +++ .riot/requirements/1d74d67.txt | 24 +++ .riot/requirements/1d8a93c.txt | 48 +++++ .riot/requirements/1dd5678.txt | 30 ++++ .../requirements/{15e6ff4.txt => 1df4764.txt} | 32 ++-- .riot/requirements/1e19c17.txt | 29 ++++ .riot/requirements/1e4bb51.txt | 24 +++ .riot/requirements/1e4dfe1.txt | 28 +++ .riot/requirements/1e659c4.txt | 20 +++ .riot/requirements/1e70094.txt | 42 +++++ .riot/requirements/1ebb239.txt | 35 ++++ .riot/requirements/1ec9462.txt | 20 +++ .riot/requirements/1f3b209.txt | 20 +++ .riot/requirements/1fa3005.txt | 21 +++ .riot/requirements/1fc9ecc.txt | 20 +++ .riot/requirements/1fe8dd2.txt | 83 +++++++++ .riot/requirements/248da41.txt | 24 +++ .riot/requirements/2538ed0.txt | 23 +++ .riot/requirements/2581b3a.txt | 20 +++ .riot/requirements/2644218.txt | 22 +++ .riot/requirements/27d0ff8.txt | 21 +++ .riot/requirements/27e3d7b.txt | 21 +++ .riot/requirements/2d6c3d0.txt | 20 +++ .riot/requirements/2dd0811.txt | 21 +++ .riot/requirements/3ab519c.txt | 28 +++ .riot/requirements/3b804dc.txt | 28 +++ .riot/requirements/3c3f295.txt | 23 +++ .riot/requirements/3dd53da.txt | 22 +++ .riot/requirements/3f1be84.txt | 23 +++ .../requirements/{1edf426.txt => 4132bce.txt} | 12 +- .riot/requirements/44eeaa9.txt | 28 +++ .riot/requirements/4fd1520.txt | 23 +++ .riot/requirements/5b922fc.txt | 45 +++++ .riot/requirements/6cf373b.txt | 19 ++ .riot/requirements/70e034f.txt | 24 +++ .riot/requirements/74ccb83.txt | 20 +++ .riot/requirements/788c304.txt | 27 +++ .riot/requirements/7a40e08.txt | 22 +++ .../requirements/{921bc6c.txt => 7bbf828.txt} | 32 ++-- .riot/requirements/8ce955f.txt | 28 +++ .riot/requirements/91fe586.txt | 25 +++ .riot/requirements/9a07d4a.txt | 23 +++ .riot/requirements/9a5c0d9.txt | 32 ++++ .riot/requirements/a0cc2a4.txt | 21 +++ .riot/requirements/a9f396a.txt | 31 ++++ .riot/requirements/ae8bd25.txt | 26 +++ .riot/requirements/b29075f.txt | 38 ++++ .riot/requirements/b403d9d.txt | 49 ++++++ .riot/requirements/bc64f49.txt | 35 ++++ .riot/requirements/bc7a1f4.txt | 21 +++ .riot/requirements/bcbec2a.txt | 46 +++++ .riot/requirements/bebdd41.txt | 19 ++ .riot/requirements/c1351c9.txt | 21 +++ .riot/requirements/c4d4455.txt | 20 +++ .riot/requirements/c77bbb6.txt | 48 +++++ .riot/requirements/c8b476b.txt | 32 ++++ .riot/requirements/d5098dd.txt | 22 +++ .riot/requirements/d7dfbc2.txt | 22 +++ .riot/requirements/d81ad99.txt | 20 +++ .riot/requirements/db78045.txt | 21 +++ .riot/requirements/dbc6a48.txt | 35 ++++ .riot/requirements/dbeb1d7.txt | 22 +++ .riot/requirements/ddd8721.txt | 20 +++ .riot/requirements/dedea98.txt | 20 +++ .riot/requirements/df7a937.txt | 20 +++ .riot/requirements/e06abee.txt | 38 ++++ .riot/requirements/e20152c.txt | 20 +++ .riot/requirements/e2bf559.txt | 23 +++ .riot/requirements/ee48b16.txt | 22 +++ .riot/requirements/f20c964.txt | 30 ++++ .riot/requirements/f339e99.txt | 19 ++ .riot/requirements/f33b994.txt | 23 +++ .riot/requirements/f46a802.txt | 20 +++ .riot/requirements/f4fafb3.txt | 48 +++++ .riot/requirements/fbee8ab.txt | 25 +++ .../appsec/_iast/_taint_tracking/__init__.py | 14 +- ddtrace/debugging/_expressions.py | 26 +-- ddtrace/internal/_threads.cpp | 46 ++++- ddtrace/internal/injection.py | 25 ++- ddtrace/internal/wrapping/__init__.py | 2 + ddtrace/internal/wrapping/context.py | 50 +++++- ddtrace/profiling/collector/stack.pyx | 6 +- docker-compose.yml | 4 + docker/.python-version | 2 +- docker/Dockerfile | 11 +- docs/versioning.rst | 6 +- hatch.toml | 8 +- lib-injection/dl_wheels.py | 5 +- lib-injection/sources/sitecustomize.py | 2 +- pyproject.toml | 5 +- .../notes/threethirteen-d40d659d8939fe5e.yaml | 4 + riotfile.py | 85 ++++----- setup.py | 4 +- src/core/Cargo.lock | 164 +++--------------- src/core/Cargo.toml | 2 +- tests/contrib/futures/test_propagation.py | 2 + .../crashtracker/test_crashtracker.py | 1 + tests/internal/symbol_db/test_symbols.py | 2 + tests/internal/test_forksafe.py | 2 + tests/internal/test_injection.py | 2 + tests/internal/test_wrapping.py | 9 + 158 files changed, 3540 insertions(+), 284 deletions(-) create mode 100644 .riot/requirements/102dfdd.txt create mode 100644 .riot/requirements/104daf8.txt create mode 100644 .riot/requirements/104f450.txt create mode 100644 .riot/requirements/1053dce.txt create mode 100644 .riot/requirements/114bad8.txt create mode 100644 .riot/requirements/11f2bd0.txt create mode 100644 .riot/requirements/11fd02a.txt create mode 100644 .riot/requirements/1261ed3.txt create mode 100644 .riot/requirements/1304e20.txt create mode 100644 .riot/requirements/1332b9d.txt create mode 100644 .riot/requirements/13658ae.txt create mode 100644 .riot/requirements/136fddd.txt create mode 100644 .riot/requirements/1374394.txt create mode 100644 .riot/requirements/1381214.txt create mode 100644 .riot/requirements/13ae267.txt create mode 100644 .riot/requirements/141bfd1.txt create mode 100644 .riot/requirements/141f7eb.txt create mode 100644 .riot/requirements/1463930.txt create mode 100644 .riot/requirements/14be2f6.txt create mode 100644 .riot/requirements/14d7e8a.txt create mode 100644 .riot/requirements/14f1594.txt create mode 100644 .riot/requirements/152e97f.txt create mode 100644 .riot/requirements/1584f8c.txt create mode 100644 .riot/requirements/164c3ce.txt create mode 100644 .riot/requirements/167b853.txt create mode 100644 .riot/requirements/16acf84.txt create mode 100644 .riot/requirements/16cc321.txt create mode 100644 .riot/requirements/16d2d1f.txt create mode 100644 .riot/requirements/16de9c4.txt create mode 100644 .riot/requirements/178f7d5.txt create mode 100644 .riot/requirements/17d40ef.txt create mode 100644 .riot/requirements/1819cb6.txt create mode 100644 .riot/requirements/188244e.txt create mode 100644 .riot/requirements/18c6e70.txt create mode 100644 .riot/requirements/18e9526.txt create mode 100644 .riot/requirements/192c7c0.txt create mode 100644 .riot/requirements/19bbf6d.txt create mode 100644 .riot/requirements/1a485c9.txt create mode 100644 .riot/requirements/1a508dc.txt create mode 100644 .riot/requirements/1acabe0.txt create mode 100644 .riot/requirements/1ada88e.txt create mode 100644 .riot/requirements/1aed5dc.txt create mode 100644 .riot/requirements/1b86c06.txt create mode 100644 .riot/requirements/1b8d922.txt create mode 100644 .riot/requirements/1ba390a.txt create mode 100644 .riot/requirements/1bf4d76.txt create mode 100644 .riot/requirements/1c22cf9.txt create mode 100644 .riot/requirements/1cb554e.txt create mode 100644 .riot/requirements/1ce0711.txt create mode 100644 .riot/requirements/1ce93b3.txt create mode 100644 .riot/requirements/1d74d67.txt create mode 100644 .riot/requirements/1d8a93c.txt create mode 100644 .riot/requirements/1dd5678.txt rename .riot/requirements/{15e6ff4.txt => 1df4764.txt} (65%) create mode 100644 .riot/requirements/1e19c17.txt create mode 100644 .riot/requirements/1e4bb51.txt create mode 100644 .riot/requirements/1e4dfe1.txt create mode 100644 .riot/requirements/1e659c4.txt create mode 100644 .riot/requirements/1e70094.txt create mode 100644 .riot/requirements/1ebb239.txt create mode 100644 .riot/requirements/1ec9462.txt create mode 100644 .riot/requirements/1f3b209.txt create mode 100644 .riot/requirements/1fa3005.txt create mode 100644 .riot/requirements/1fc9ecc.txt create mode 100644 .riot/requirements/1fe8dd2.txt create mode 100644 .riot/requirements/248da41.txt create mode 100644 .riot/requirements/2538ed0.txt create mode 100644 .riot/requirements/2581b3a.txt create mode 100644 .riot/requirements/2644218.txt create mode 100644 .riot/requirements/27d0ff8.txt create mode 100644 .riot/requirements/27e3d7b.txt create mode 100644 .riot/requirements/2d6c3d0.txt create mode 100644 .riot/requirements/2dd0811.txt create mode 100644 .riot/requirements/3ab519c.txt create mode 100644 .riot/requirements/3b804dc.txt create mode 100644 .riot/requirements/3c3f295.txt create mode 100644 .riot/requirements/3dd53da.txt create mode 100644 .riot/requirements/3f1be84.txt rename .riot/requirements/{1edf426.txt => 4132bce.txt} (70%) create mode 100644 .riot/requirements/44eeaa9.txt create mode 100644 .riot/requirements/4fd1520.txt create mode 100644 .riot/requirements/5b922fc.txt create mode 100644 .riot/requirements/6cf373b.txt create mode 100644 .riot/requirements/70e034f.txt create mode 100644 .riot/requirements/74ccb83.txt create mode 100644 .riot/requirements/788c304.txt create mode 100644 .riot/requirements/7a40e08.txt rename .riot/requirements/{921bc6c.txt => 7bbf828.txt} (65%) create mode 100644 .riot/requirements/8ce955f.txt create mode 100644 .riot/requirements/91fe586.txt create mode 100644 .riot/requirements/9a07d4a.txt create mode 100644 .riot/requirements/9a5c0d9.txt create mode 100644 .riot/requirements/a0cc2a4.txt create mode 100644 .riot/requirements/a9f396a.txt create mode 100644 .riot/requirements/ae8bd25.txt create mode 100644 .riot/requirements/b29075f.txt create mode 100644 .riot/requirements/b403d9d.txt create mode 100644 .riot/requirements/bc64f49.txt create mode 100644 .riot/requirements/bc7a1f4.txt create mode 100644 .riot/requirements/bcbec2a.txt create mode 100644 .riot/requirements/bebdd41.txt create mode 100644 .riot/requirements/c1351c9.txt create mode 100644 .riot/requirements/c4d4455.txt create mode 100644 .riot/requirements/c77bbb6.txt create mode 100644 .riot/requirements/c8b476b.txt create mode 100644 .riot/requirements/d5098dd.txt create mode 100644 .riot/requirements/d7dfbc2.txt create mode 100644 .riot/requirements/d81ad99.txt create mode 100644 .riot/requirements/db78045.txt create mode 100644 .riot/requirements/dbc6a48.txt create mode 100644 .riot/requirements/dbeb1d7.txt create mode 100644 .riot/requirements/ddd8721.txt create mode 100644 .riot/requirements/dedea98.txt create mode 100644 .riot/requirements/df7a937.txt create mode 100644 .riot/requirements/e06abee.txt create mode 100644 .riot/requirements/e20152c.txt create mode 100644 .riot/requirements/e2bf559.txt create mode 100644 .riot/requirements/ee48b16.txt create mode 100644 .riot/requirements/f20c964.txt create mode 100644 .riot/requirements/f339e99.txt create mode 100644 .riot/requirements/f33b994.txt create mode 100644 .riot/requirements/f46a802.txt create mode 100644 .riot/requirements/f4fafb3.txt create mode 100644 .riot/requirements/fbee8ab.txt create mode 100644 releasenotes/notes/threethirteen-d40d659d8939fe5e.yaml diff --git a/.github/workflows/build_deploy.yml b/.github/workflows/build_deploy.yml index df5184f83e5..bc6a8b0b3d2 100644 --- a/.github/workflows/build_deploy.yml +++ b/.github/workflows/build_deploy.yml @@ -25,7 +25,7 @@ jobs: build_wheels: uses: ./.github/workflows/build_python_3.yml with: - cibw_build: 'cp37* cp38* cp39* cp310* cp311* cp312*' + cibw_build: 'cp37* cp38* cp39* cp310* cp311* cp312* cp313*' build_sdist: name: Build source distribution diff --git a/.github/workflows/build_python_3.yml b/.github/workflows/build_python_3.yml index 02832a008b9..fac67e45f82 100644 --- a/.github/workflows/build_python_3.yml +++ b/.github/workflows/build_python_3.yml @@ -25,7 +25,7 @@ jobs: - uses: actions/setup-python@v5 with: python-version: '3.8' - - run: pip install cibuildwheel==2.16.5 + - run: pip install cibuildwheel==2.22.0 - id: set-matrix env: CIBW_BUILD: ${{ inputs.cibw_build }} @@ -34,7 +34,7 @@ jobs: { cibuildwheel --print-build-identifiers --platform linux --arch x86_64,i686 | jq -cR '{only: ., os: "ubuntu-latest"}' \ && cibuildwheel --print-build-identifiers --platform linux --arch aarch64 | jq -cR '{only: ., os: "arm-4core-linux"}' \ - && cibuildwheel --print-build-identifiers --platform windows --arch AMD64,x86 | jq -cR '{only: ., os: "windows-latest"}' \ + && cibuildwheel --print-build-identifiers --platform windows --arch AMD64,x86 | grep -v 313 | jq -cR '{only: ., os: "windows-latest"}' \ && cibuildwheel --print-build-identifiers --platform macos --arch x86_64,universal2 | jq -cR '{only: ., os: "macos-13"}' } | jq -sc ) @@ -83,7 +83,7 @@ jobs: - name: Build wheels arm64 if: always() && matrix.os == 'arm-4core-linux' - run: /home/runner/.local/bin/pipx run cibuildwheel==2.16.5 --only ${{ matrix.only }} + run: /home/runner/.local/bin/pipx run cibuildwheel==2.22.0 --only ${{ matrix.only }} env: CIBW_SKIP: ${{ inputs.cibw_skip }} CIBW_PRERELEASE_PYTHONS: ${{ inputs.cibw_prerelease_pythons }} @@ -107,7 +107,7 @@ jobs: rm -rf ./tempwheelhouse CIBW_REPAIR_WHEEL_COMMAND_MACOS: | zip -d {wheel} \*.c \*.cpp \*.cc \*.h \*.hpp \*.pyx && - delocate-wheel --require-archs {delocate_archs} -w {dest_dir} -v {wheel} + MACOSX_DEPLOYMENT_TARGET=12.7 delocate-wheel --require-archs {delocate_archs} -w {dest_dir} -v {wheel} CIBW_REPAIR_WHEEL_COMMAND_WINDOWS: choco install -y 7zip && 7z d -r "{wheel}" *.c *.cpp *.cc *.h *.hpp *.pyx && @@ -117,7 +117,7 @@ jobs: - name: Build wheels if: always() && matrix.os != 'arm-4core-linux' - uses: pypa/cibuildwheel@v2.16.5 + uses: pypa/cibuildwheel@v2.22.0 with: only: ${{ matrix.only }} env: @@ -143,7 +143,7 @@ jobs: rm -rf ./tempwheelhouse CIBW_REPAIR_WHEEL_COMMAND_MACOS: | zip -d {wheel} \*.c \*.cpp \*.cc \*.h \*.hpp \*.pyx && - delocate-wheel --require-archs {delocate_archs} -w {dest_dir} -v {wheel} + MACOSX_DEPLOYMENT_TARGET=12.7 delocate-wheel --require-archs {delocate_archs} -w {dest_dir} -v {wheel} CIBW_REPAIR_WHEEL_COMMAND_WINDOWS: choco install -y 7zip && 7z d -r "{wheel}" *.c *.cpp *.cc *.h *.hpp *.pyx && diff --git a/.github/workflows/generate-package-versions.yml b/.github/workflows/generate-package-versions.yml index 740edc20725..b8729e882c9 100644 --- a/.github/workflows/generate-package-versions.yml +++ b/.github/workflows/generate-package-versions.yml @@ -49,6 +49,11 @@ jobs: with: python-version: "3.12" + - name: Setup Python 3.13 + uses: actions/setup-python@v5 + with: + python-version: "3.13" + - name: Set up QEMU uses: docker/setup-qemu-action@v2 diff --git a/.github/workflows/requirements-locks.yml b/.github/workflows/requirements-locks.yml index 69400d35dbd..23a1c05a517 100644 --- a/.github/workflows/requirements-locks.yml +++ b/.github/workflows/requirements-locks.yml @@ -11,7 +11,7 @@ jobs: validate: name: Check requirements lockfiles runs-on: ubuntu-latest - container: ghcr.io/datadog/dd-trace-py/testrunner:47c7b5287da25643e46652e6d222a40a52f2382a@sha256:3a02dafeff9cd72966978816d1b39b54f5517af4049396923b95c8452f604269 + container: ghcr.io/datadog/dd-trace-py/testrunner:0a50e839f4b1600f02157518b8d016451b346578@sha256:5dae9bc7872f69b31b612690f0748c7ad71ab90ef28a754b2ae93d0ba505837b steps: - uses: actions/checkout@v4 with: @@ -23,7 +23,7 @@ jobs: run: git config --global --add safe.directory "$GITHUB_WORKSPACE" - name: Set python interpreters - run: pyenv global 3.10 3.7 3.8 3.9 3.11 3.12 + run: pyenv global 3.10 3.7 3.8 3.9 3.11 3.12 3.13 - name: Install Dependencies run: pip install --upgrade pip && pip install riot==0.20.1 diff --git a/.gitlab/download-dependency-wheels.sh b/.gitlab/download-dependency-wheels.sh index 431e662e4c7..c80c60af07b 100755 --- a/.gitlab/download-dependency-wheels.sh +++ b/.gitlab/download-dependency-wheels.sh @@ -20,7 +20,7 @@ export PYTHONUNBUFFERED=TRUE --local-ddtrace \ --arch x86_64 \ --arch aarch64 \ - --platform musllinux_1_1 \ + --platform musllinux_1_2 \ --platform manylinux2014 \ --output-dir ../pywheels-dep \ --verbose diff --git a/.gitlab/package.yml b/.gitlab/package.yml index 74d76bc0ae4..0cf300d7cbd 100644 --- a/.gitlab/package.yml +++ b/.gitlab/package.yml @@ -1,3 +1,22 @@ +build_base_venvs: + extends: .testrunner + stage: package + parallel: + matrix: + - PYTHON_VERSION: ["3.7", "3.8", "3.9", "3.10", "3.11", "3.12", "3.13"] + variables: + CMAKE_BUILD_PARALLEL_LEVEL: 12 + PIP_VERBOSE: 1 + script: + - pip install riot==0.20.0 + - riot -P -v generate --python=$PYTHON_VERSION + artifacts: + name: venv_$PYTHON_VERSION + paths: + - .riot/venv_* + - ddtrace/**/*.so* + - ddtrace/internal/datadog/profiling/crashtracker/crashtracker_exe* + download_ddtrace_artifacts: image: registry.ddbuild.io/github-cli:v27480869-eafb11d-2.43.0 tags: [ "arch:amd64" ] @@ -31,6 +50,8 @@ download_dependency_wheels: PYTHON_VERSION: "3.11" - PYTHON_IMAGE_TAG: "3.12.0" PYTHON_VERSION: "3.12" + - PYTHON_IMAGE_TAG: "3.13.0" + PYTHON_VERSION: "3.13" script: - .gitlab/download-dependency-wheels.sh artifacts: diff --git a/.gitlab/testrunner.yml b/.gitlab/testrunner.yml index f1fd4806506..fe9fb34bec6 100644 --- a/.gitlab/testrunner.yml +++ b/.gitlab/testrunner.yml @@ -1,9 +1,9 @@ .testrunner: - image: registry.ddbuild.io/images/mirror/dd-trace-py/testrunner:47c7b5287da25643e46652e6d222a40a52f2382a@sha256:3a02dafeff9cd72966978816d1b39b54f5517af4049396923b95c8452f604269 + image: registry.ddbuild.io/images/mirror/dd-trace-py/testrunner:0a50e839f4b1600f02157518b8d016451b346578@sha256:5dae9bc7872f69b31b612690f0748c7ad71ab90ef28a754b2ae93d0ba505837b # DEV: we have a larger pool of amd64 runners, prefer that over arm64 tags: [ "arch:amd64" ] timeout: 20m before_script: - ulimit -c unlimited - - pyenv global 3.12 3.7 3.8 3.9 3.10 3.11 3.13-dev + - pyenv global 3.12 3.7 3.8 3.9 3.10 3.11 3.13 - export _CI_DD_AGENT_URL=http://${HOST_IP}:8126/ diff --git a/.gitlab/tests.yml b/.gitlab/tests.yml index 4495c6fa6a6..ce1fb8fd0ad 100644 --- a/.gitlab/tests.yml +++ b/.gitlab/tests.yml @@ -11,12 +11,12 @@ variables: # CI_DEBUG_SERVICES: "true" .testrunner: - image: registry.ddbuild.io/images/mirror/dd-trace-py/testrunner:47c7b5287da25643e46652e6d222a40a52f2382a@sha256:3a02dafeff9cd72966978816d1b39b54f5517af4049396923b95c8452f604269 + image: registry.ddbuild.io/images/mirror/dd-trace-py/testrunner:0a50e839f4b1600f02157518b8d016451b346578@sha256:5dae9bc7872f69b31b612690f0748c7ad71ab90ef28a754b2ae93d0ba505837b # DEV: we have a larger pool of amd64 runners, prefer that over arm64 tags: [ "arch:amd64" ] timeout: 20m before_script: - - pyenv global 3.12 3.7 3.8 3.9 3.10 3.11 3.13-dev + - pyenv global 3.12 3.7 3.8 3.9 3.10 3.11 3.13 - export _CI_DD_AGENT_URL=http://${HOST_IP}:8126/ @@ -62,7 +62,7 @@ build_base_venvs: stage: riot parallel: matrix: - - PYTHON_VERSION: ["3.7", "3.8", "3.9", "3.10", "3.11", "3.12"] + - PYTHON_VERSION: ["3.7", "3.8", "3.9", "3.10", "3.11", "3.12", "3.13"] variables: CMAKE_BUILD_PARALLEL_LEVEL: 12 PIP_VERBOSE: 1 diff --git a/.riot/requirements/102dfdd.txt b/.riot/requirements/102dfdd.txt new file mode 100644 index 00000000000..40bf3c75049 --- /dev/null +++ b/.riot/requirements/102dfdd.txt @@ -0,0 +1,20 @@ +# +# This file is autogenerated by pip-compile with Python 3.13 +# by the following command: +# +# pip-compile --allow-unsafe --no-annotate .riot/requirements/102dfdd.in +# +attrs==24.2.0 +coverage[toml]==7.6.1 +hypothesis==6.45.0 +iniconfig==2.0.0 +mock==5.1.0 +opentracing==2.4.0 +packaging==24.1 +pluggy==1.5.0 +pytest==8.3.3 +pytest-cov==5.0.0 +pytest-mock==3.14.0 +pytest-randomly==3.15.0 +sortedcontainers==2.4.0 +structlog==20.2.0 diff --git a/.riot/requirements/104daf8.txt b/.riot/requirements/104daf8.txt new file mode 100644 index 00000000000..e25e2cb84d2 --- /dev/null +++ b/.riot/requirements/104daf8.txt @@ -0,0 +1,25 @@ +# +# This file is autogenerated by pip-compile with Python 3.13 +# by the following command: +# +# pip-compile --allow-unsafe --no-annotate .riot/requirements/104daf8.in +# +attrs==24.2.0 +certifi==2024.8.30 +charset-normalizer==3.3.2 +coverage[toml]==7.6.1 +hypothesis==6.45.0 +idna==3.10 +iniconfig==2.0.0 +mock==5.1.0 +opensearch-py[requests]==1.1.0 +opentracing==2.4.0 +packaging==24.1 +pluggy==1.5.0 +pytest==8.3.3 +pytest-cov==5.0.0 +pytest-mock==3.14.0 +pytest-randomly==3.15.0 +requests==2.32.3 +sortedcontainers==2.4.0 +urllib3==1.26.20 diff --git a/.riot/requirements/104f450.txt b/.riot/requirements/104f450.txt new file mode 100644 index 00000000000..a9bf25ae538 --- /dev/null +++ b/.riot/requirements/104f450.txt @@ -0,0 +1,20 @@ +# +# This file is autogenerated by pip-compile with Python 3.13 +# by the following command: +# +# pip-compile --allow-unsafe --no-annotate .riot/requirements/104f450.in +# +attrs==24.2.0 +coverage[toml]==7.6.1 +hypothesis==6.45.0 +iniconfig==2.0.0 +logbook==1.0.0 +mock==5.1.0 +opentracing==2.4.0 +packaging==24.1 +pluggy==1.5.0 +pytest==8.3.3 +pytest-cov==5.0.0 +pytest-mock==3.14.0 +pytest-randomly==3.15.0 +sortedcontainers==2.4.0 diff --git a/.riot/requirements/1053dce.txt b/.riot/requirements/1053dce.txt new file mode 100644 index 00000000000..5b1c1d31dbe --- /dev/null +++ b/.riot/requirements/1053dce.txt @@ -0,0 +1,26 @@ +# +# This file is autogenerated by pip-compile with Python 3.13 +# by the following command: +# +# pip-compile --allow-unsafe --no-annotate .riot/requirements/1053dce.in +# +attrs==24.2.0 +coverage[toml]==7.6.1 +gevent==24.2.1 +greenlet==3.1.1 +hypothesis==6.45.0 +iniconfig==2.0.0 +mock==5.1.0 +opentracing==2.4.0 +packaging==24.1 +pluggy==1.5.0 +pytest==8.3.3 +pytest-cov==5.0.0 +pytest-mock==3.14.0 +pytest-randomly==3.15.0 +sortedcontainers==2.4.0 +zope-event==5.0 +zope-interface==7.0.3 + +# The following packages are considered to be unsafe in a requirements file: +setuptools==75.1.0 diff --git a/.riot/requirements/114bad8.txt b/.riot/requirements/114bad8.txt new file mode 100644 index 00000000000..27a7f4e24f7 --- /dev/null +++ b/.riot/requirements/114bad8.txt @@ -0,0 +1,29 @@ +# +# This file is autogenerated by pip-compile with Python 3.13 +# by the following command: +# +# pip-compile --allow-unsafe --no-annotate .riot/requirements/114bad8.in +# +attrs==24.2.0 +blinker==1.8.2 +click==8.1.7 +coverage[toml]==7.6.1 +flask==3.0.3 +flask-caching==1.10.1 +hypothesis==6.45.0 +iniconfig==2.0.0 +itsdangerous==2.2.0 +jinja2==3.1.4 +markupsafe==2.1.5 +mock==5.1.0 +opentracing==2.4.0 +packaging==24.1 +pluggy==1.5.0 +pytest==8.3.3 +pytest-cov==5.0.0 +pytest-mock==3.14.0 +pytest-randomly==3.15.0 +python-memcached==1.62 +redis==5.1.1 +sortedcontainers==2.4.0 +werkzeug==3.0.4 diff --git a/.riot/requirements/11f2bd0.txt b/.riot/requirements/11f2bd0.txt new file mode 100644 index 00000000000..fdab5d63d33 --- /dev/null +++ b/.riot/requirements/11f2bd0.txt @@ -0,0 +1,38 @@ +# +# This file is autogenerated by pip-compile with Python 3.13 +# by the following command: +# +# pip-compile --allow-unsafe --no-annotate .riot/requirements/11f2bd0.in +# +annotated-types==0.7.0 +attrs==24.2.0 +blinker==1.8.2 +certifi==2024.8.30 +charset-normalizer==3.3.2 +click==8.1.7 +coverage[toml]==7.6.1 +flask==2.3.3 +flask-openapi3==4.0.1 +hypothesis==6.45.0 +idna==3.10 +importlib-metadata==8.5.0 +iniconfig==2.0.0 +itsdangerous==2.2.0 +jinja2==3.1.4 +markupsafe==2.1.5 +mock==5.1.0 +opentracing==2.4.0 +packaging==24.1 +pluggy==1.5.0 +pydantic==2.9.2 +pydantic-core==2.23.4 +pytest==8.3.3 +pytest-cov==5.0.0 +pytest-mock==3.14.0 +pytest-randomly==3.15.0 +requests==2.32.3 +sortedcontainers==2.4.0 +typing-extensions==4.12.2 +urllib3==1.26.20 +werkzeug==2.3.8 +zipp==3.20.2 diff --git a/.riot/requirements/11fd02a.txt b/.riot/requirements/11fd02a.txt new file mode 100644 index 00000000000..c00ae722bbb --- /dev/null +++ b/.riot/requirements/11fd02a.txt @@ -0,0 +1,19 @@ +# +# This file is autogenerated by pip-compile with Python 3.13 +# by the following command: +# +# pip-compile --allow-unsafe --no-annotate .riot/requirements/11fd02a.in +# +attrs==24.2.0 +coverage[toml]==7.6.1 +hypothesis==6.45.0 +iniconfig==2.0.0 +mock==5.1.0 +opentracing==2.4.0 +packaging==24.1 +pluggy==1.5.0 +pytest==8.3.3 +pytest-cov==5.0.0 +pytest-mock==3.14.0 +pytest-randomly==3.15.0 +sortedcontainers==2.4.0 diff --git a/.riot/requirements/1261ed3.txt b/.riot/requirements/1261ed3.txt new file mode 100644 index 00000000000..cf97c1bc502 --- /dev/null +++ b/.riot/requirements/1261ed3.txt @@ -0,0 +1,31 @@ +# +# This file is autogenerated by pip-compile with Python 3.13 +# by the following command: +# +# pip-compile --allow-unsafe --no-annotate .riot/requirements/1261ed3.in +# +aiohappyeyeballs==2.4.3 +aiohttp==3.10.9 +aiohttp-jinja2==1.5.1 +aiosignal==1.3.1 +attrs==24.2.0 +coverage[toml]==7.6.1 +frozenlist==1.4.1 +hypothesis==6.45.0 +idna==3.10 +iniconfig==2.0.0 +jinja2==3.1.4 +markupsafe==2.1.5 +mock==5.1.0 +multidict==6.1.0 +opentracing==2.4.0 +packaging==24.1 +pluggy==1.5.0 +pytest==8.3.3 +pytest-aiohttp==1.0.5 +pytest-asyncio==0.23.7 +pytest-cov==5.0.0 +pytest-mock==3.14.0 +pytest-randomly==3.15.0 +sortedcontainers==2.4.0 +yarl==1.13.1 diff --git a/.riot/requirements/1304e20.txt b/.riot/requirements/1304e20.txt new file mode 100644 index 00000000000..54f718e4122 --- /dev/null +++ b/.riot/requirements/1304e20.txt @@ -0,0 +1,26 @@ +# +# This file is autogenerated by pip-compile with Python 3.13 +# by the following command: +# +# pip-compile --allow-unsafe --no-annotate .riot/requirements/1304e20.in +# +asgiref==3.8.1 +attrs==24.2.0 +coverage[toml]==7.6.1 +django==4.2.16 +django-configurations==2.5.1 +djangorestframework==3.15.2 +hypothesis==6.45.0 +iniconfig==2.0.0 +mock==5.1.0 +opentracing==2.4.0 +packaging==24.1 +pluggy==1.5.0 +pytest==8.3.3 +pytest-cov==5.0.0 +pytest-django[testing]==3.10.0 +pytest-mock==3.14.0 +pytest-randomly==3.15.0 +six==1.16.0 +sortedcontainers==2.4.0 +sqlparse==0.5.1 diff --git a/.riot/requirements/1332b9d.txt b/.riot/requirements/1332b9d.txt new file mode 100644 index 00000000000..49dced5d336 --- /dev/null +++ b/.riot/requirements/1332b9d.txt @@ -0,0 +1,38 @@ +# +# This file is autogenerated by pip-compile with Python 3.13 +# by the following command: +# +# pip-compile --allow-unsafe --no-annotate .riot/requirements/1332b9d.in +# +asn1crypto==1.5.1 +attrs==24.2.0 +certifi==2024.8.30 +cffi==1.17.1 +charset-normalizer==3.3.2 +coverage[toml]==7.6.1 +cryptography==38.0.4 +filelock==3.16.1 +hypothesis==6.45.0 +idna==3.10 +iniconfig==2.0.0 +mock==5.1.0 +opentracing==2.4.0 +packaging==24.1 +platformdirs==4.3.6 +pluggy==1.5.0 +pycparser==2.22 +pyjwt==2.9.0 +pyopenssl==23.2.0 +pytest==8.3.3 +pytest-cov==5.0.0 +pytest-mock==3.14.0 +pytest-randomly==3.15.0 +pytz==2024.2 +requests==2.32.3 +responses==0.16.0 +six==1.16.0 +snowflake-connector-python==3.12.2 +sortedcontainers==2.4.0 +tomlkit==0.13.2 +typing-extensions==4.12.2 +urllib3==2.2.3 diff --git a/.riot/requirements/13658ae.txt b/.riot/requirements/13658ae.txt new file mode 100644 index 00000000000..e4ac641af5c --- /dev/null +++ b/.riot/requirements/13658ae.txt @@ -0,0 +1,24 @@ +# +# This file is autogenerated by pip-compile with Python 3.13 +# by the following command: +# +# pip-compile --allow-unsafe --no-annotate .riot/requirements/13658ae.in +# +attrs==24.2.0 +certifi==2024.8.30 +coverage[toml]==7.6.1 +elastic-transport==8.15.0 +elasticsearch==8.15.1 +elasticsearch7==7.17.12 +hypothesis==6.45.0 +iniconfig==2.0.0 +mock==5.1.0 +opentracing==2.4.0 +packaging==24.1 +pluggy==1.5.0 +pytest==8.3.3 +pytest-cov==5.0.0 +pytest-mock==3.14.0 +pytest-randomly==3.15.0 +sortedcontainers==2.4.0 +urllib3==1.26.20 diff --git a/.riot/requirements/136fddd.txt b/.riot/requirements/136fddd.txt new file mode 100644 index 00000000000..848b88850d0 --- /dev/null +++ b/.riot/requirements/136fddd.txt @@ -0,0 +1,21 @@ +# +# This file is autogenerated by pip-compile with Python 3.13 +# by the following command: +# +# pip-compile --allow-unsafe --no-annotate .riot/requirements/136fddd.in +# +attrs==24.2.0 +coverage[toml]==7.6.1 +elasticsearch5==5.5.6 +hypothesis==6.45.0 +iniconfig==2.0.0 +mock==5.1.0 +opentracing==2.4.0 +packaging==24.1 +pluggy==1.5.0 +pytest==8.3.3 +pytest-cov==5.0.0 +pytest-mock==3.14.0 +pytest-randomly==3.15.0 +sortedcontainers==2.4.0 +urllib3==2.2.3 diff --git a/.riot/requirements/1374394.txt b/.riot/requirements/1374394.txt new file mode 100644 index 00000000000..9e287a285b0 --- /dev/null +++ b/.riot/requirements/1374394.txt @@ -0,0 +1,34 @@ +# +# This file is autogenerated by pip-compile with Python 3.13 +# by the following command: +# +# pip-compile --allow-unsafe --no-annotate .riot/requirements/1374394.in +# +astunparse==1.6.3 +attrs==24.2.0 +blinker==1.8.2 +certifi==2024.8.30 +charset-normalizer==3.3.2 +click==8.1.7 +coverage[toml]==7.6.1 +flask==3.0.3 +hypothesis==6.45.0 +idna==3.10 +iniconfig==2.0.0 +itsdangerous==2.2.0 +jinja2==3.1.4 +markupsafe==2.1.5 +mock==5.1.0 +opentracing==2.4.0 +packaging==24.1 +pluggy==1.5.0 +pytest==8.3.3 +pytest-cov==5.0.0 +pytest-mock==3.14.0 +requests==2.32.3 +six==1.16.0 +sortedcontainers==2.4.0 +urllib3==2.2.3 +virtualenv-clone==0.5.7 +werkzeug==3.0.4 +wheel==0.44.0 diff --git a/.riot/requirements/1381214.txt b/.riot/requirements/1381214.txt new file mode 100644 index 00000000000..583f505bac4 --- /dev/null +++ b/.riot/requirements/1381214.txt @@ -0,0 +1,21 @@ +# +# This file is autogenerated by pip-compile with Python 3.13 +# by the following command: +# +# pip-compile --allow-unsafe --no-annotate .riot/requirements/1381214.in +# +attrs==24.2.0 +coverage[toml]==7.6.1 +dramatiq==1.17.0 +hypothesis==6.45.0 +iniconfig==2.0.0 +mock==5.1.0 +opentracing==2.4.0 +packaging==24.1 +pluggy==1.5.0 +prometheus-client==0.21.0 +pytest==8.3.3 +pytest-cov==5.0.0 +pytest-mock==3.14.0 +redis==5.1.1 +sortedcontainers==2.4.0 diff --git a/.riot/requirements/13ae267.txt b/.riot/requirements/13ae267.txt new file mode 100644 index 00000000000..72f91d44446 --- /dev/null +++ b/.riot/requirements/13ae267.txt @@ -0,0 +1,20 @@ +# +# This file is autogenerated by pip-compile with Python 3.13 +# by the following command: +# +# pip-compile --allow-unsafe --no-annotate .riot/requirements/13ae267.in +# +attrs==24.2.0 +coverage[toml]==7.6.1 +hypothesis==6.45.0 +iniconfig==2.0.0 +loguru==0.7.2 +mock==5.1.0 +opentracing==2.4.0 +packaging==24.1 +pluggy==1.5.0 +pytest==8.3.3 +pytest-cov==5.0.0 +pytest-mock==3.14.0 +pytest-randomly==3.15.0 +sortedcontainers==2.4.0 diff --git a/.riot/requirements/141bfd1.txt b/.riot/requirements/141bfd1.txt new file mode 100644 index 00000000000..ca6a38880e2 --- /dev/null +++ b/.riot/requirements/141bfd1.txt @@ -0,0 +1,32 @@ +# +# This file is autogenerated by pip-compile with Python 3.13 +# by the following command: +# +# pip-compile --allow-unsafe --no-annotate .riot/requirements/141bfd1.in +# +attrs==24.2.0 +certifi==2024.8.30 +charset-normalizer==3.3.2 +click==7.1.2 +coverage[toml]==7.6.1 +flask==1.1.4 +gunicorn==23.0.0 +httpretty==1.0.5 +hypothesis==6.45.0 +idna==3.10 +iniconfig==2.0.0 +itsdangerous==1.1.0 +jinja2==2.11.3 +markupsafe==1.1.1 +mock==5.1.0 +opentracing==2.4.0 +packaging==24.1 +pluggy==1.5.0 +pytest==8.3.3 +pytest-cov==5.0.0 +pytest-mock==3.14.0 +pytest-randomly==3.15.0 +requests==2.32.3 +sortedcontainers==2.4.0 +urllib3==2.2.3 +werkzeug==1.0.1 diff --git a/.riot/requirements/141f7eb.txt b/.riot/requirements/141f7eb.txt new file mode 100644 index 00000000000..d8494646e5d --- /dev/null +++ b/.riot/requirements/141f7eb.txt @@ -0,0 +1,24 @@ +# +# This file is autogenerated by pip-compile with Python 3.13 +# by the following command: +# +# pip-compile --allow-unsafe --no-annotate .riot/requirements/141f7eb.in +# +attrs==24.2.0 +cattrs==22.2.0 +coverage[toml]==7.6.1 +hypothesis==6.45.0 +iniconfig==2.0.0 +mock==5.1.0 +molten==1.0.2 +mypy-extensions==1.0.0 +opentracing==2.4.0 +packaging==24.1 +pluggy==1.5.0 +pytest==8.3.3 +pytest-cov==5.0.0 +pytest-mock==3.14.0 +pytest-randomly==3.15.0 +sortedcontainers==2.4.0 +typing-extensions==3.10.0.2 +typing-inspect==0.6.0 diff --git a/.riot/requirements/1463930.txt b/.riot/requirements/1463930.txt new file mode 100644 index 00000000000..313484f83ce --- /dev/null +++ b/.riot/requirements/1463930.txt @@ -0,0 +1,20 @@ +# +# This file is autogenerated by pip-compile with Python 3.13 +# by the following command: +# +# pip-compile --allow-unsafe --no-annotate .riot/requirements/1463930.in +# +attrs==24.2.0 +coverage[toml]==7.6.1 +hypothesis==6.45.0 +iniconfig==2.0.0 +mock==5.1.0 +msgpack==1.0.8 +opentracing==2.4.0 +packaging==24.1 +pluggy==1.5.0 +pytest==8.3.3 +pytest-cov==5.0.0 +pytest-mock==3.14.0 +pytest-randomly==3.15.0 +sortedcontainers==2.4.0 diff --git a/.riot/requirements/14be2f6.txt b/.riot/requirements/14be2f6.txt new file mode 100644 index 00000000000..0a516b36c05 --- /dev/null +++ b/.riot/requirements/14be2f6.txt @@ -0,0 +1,25 @@ +# +# This file is autogenerated by pip-compile with Python 3.13 +# by the following command: +# +# pip-compile --allow-unsafe --no-annotate .riot/requirements/14be2f6.in +# +algoliasearch==2.6.3 +attrs==24.2.0 +certifi==2024.8.30 +charset-normalizer==3.3.2 +coverage[toml]==7.6.1 +hypothesis==6.45.0 +idna==3.10 +iniconfig==2.0.0 +mock==5.1.0 +opentracing==2.4.0 +packaging==24.1 +pluggy==1.5.0 +pytest==8.3.3 +pytest-cov==5.0.0 +pytest-mock==3.14.0 +pytest-randomly==3.15.0 +requests==2.32.3 +sortedcontainers==2.4.0 +urllib3==1.26.20 diff --git a/.riot/requirements/14d7e8a.txt b/.riot/requirements/14d7e8a.txt new file mode 100644 index 00000000000..979467f1e35 --- /dev/null +++ b/.riot/requirements/14d7e8a.txt @@ -0,0 +1,31 @@ +# +# This file is autogenerated by pip-compile with Python 3.13 +# by the following command: +# +# pip-compile --allow-unsafe --no-annotate .riot/requirements/14d7e8a.in +# +aiohappyeyeballs==2.4.3 +aiohttp==3.10.9 +aiohttp-jinja2==1.6 +aiosignal==1.3.1 +attrs==24.2.0 +coverage[toml]==7.6.1 +frozenlist==1.4.1 +hypothesis==6.45.0 +idna==3.10 +iniconfig==2.0.0 +jinja2==3.1.4 +markupsafe==2.1.5 +mock==5.1.0 +multidict==6.1.0 +opentracing==2.4.0 +packaging==24.1 +pluggy==1.5.0 +pytest==8.3.3 +pytest-aiohttp==1.0.5 +pytest-asyncio==0.23.7 +pytest-cov==5.0.0 +pytest-mock==3.14.0 +pytest-randomly==3.15.0 +sortedcontainers==2.4.0 +yarl==1.13.1 diff --git a/.riot/requirements/14f1594.txt b/.riot/requirements/14f1594.txt new file mode 100644 index 00000000000..16c4e6c559a --- /dev/null +++ b/.riot/requirements/14f1594.txt @@ -0,0 +1,21 @@ +# +# This file is autogenerated by pip-compile with Python 3.13 +# by the following command: +# +# pip-compile --allow-unsafe --no-annotate .riot/requirements/14f1594.in +# +attrs==24.2.0 +coverage[toml]==7.6.1 +hypothesis==6.45.0 +iniconfig==2.0.0 +mock==5.1.0 +mongoengine==0.29.1 +opentracing==2.4.0 +packaging==24.1 +pluggy==1.5.0 +pymongo==3.12.3 +pytest==8.3.3 +pytest-cov==5.0.0 +pytest-mock==3.14.0 +pytest-randomly==3.15.0 +sortedcontainers==2.4.0 diff --git a/.riot/requirements/152e97f.txt b/.riot/requirements/152e97f.txt new file mode 100644 index 00000000000..973e252ab4f --- /dev/null +++ b/.riot/requirements/152e97f.txt @@ -0,0 +1,21 @@ +# +# This file is autogenerated by pip-compile with Python 3.13 +# by the following command: +# +# pip-compile --allow-unsafe --no-annotate .riot/requirements/152e97f.in +# +attrs==24.2.0 +coverage[toml]==7.6.1 +elasticsearch6==6.8.2 +hypothesis==6.45.0 +iniconfig==2.0.0 +mock==5.1.0 +opentracing==2.4.0 +packaging==24.1 +pluggy==1.5.0 +pytest==8.3.3 +pytest-cov==5.0.0 +pytest-mock==3.14.0 +pytest-randomly==3.15.0 +sortedcontainers==2.4.0 +urllib3==2.2.3 diff --git a/.riot/requirements/1584f8c.txt b/.riot/requirements/1584f8c.txt new file mode 100644 index 00000000000..602372e9b06 --- /dev/null +++ b/.riot/requirements/1584f8c.txt @@ -0,0 +1,29 @@ +# +# This file is autogenerated by pip-compile with Python 3.13 +# by the following command: +# +# pip-compile --allow-unsafe --no-annotate .riot/requirements/1584f8c.in +# +asgiref==3.8.1 +attrs==24.2.0 +coverage[toml]==7.6.1 +django==4.2.16 +django-configurations==2.5.1 +django-hosts==6.0 +hypothesis==6.45.0 +iniconfig==2.0.0 +mock==5.1.0 +opentracing==2.4.0 +packaging==24.1 +pluggy==1.5.0 +pytest==8.3.3 +pytest-cov==5.0.0 +pytest-django[testing]==3.10.0 +pytest-mock==3.14.0 +pytest-randomly==3.15.0 +six==1.16.0 +sortedcontainers==2.4.0 +sqlparse==0.5.1 + +# The following packages are considered to be unsafe in a requirements file: +setuptools==75.1.0 diff --git a/.riot/requirements/164c3ce.txt b/.riot/requirements/164c3ce.txt new file mode 100644 index 00000000000..5acfc83a32e --- /dev/null +++ b/.riot/requirements/164c3ce.txt @@ -0,0 +1,31 @@ +# +# This file is autogenerated by pip-compile with Python 3.13 +# by the following command: +# +# pip-compile --allow-unsafe --no-annotate .riot/requirements/164c3ce.in +# +aiohappyeyeballs==2.4.3 +aiohttp==3.10.9 +aiohttp-jinja2==1.5.1 +aiosignal==1.3.1 +attrs==24.2.0 +coverage[toml]==7.6.1 +frozenlist==1.4.1 +hypothesis==6.45.0 +idna==3.10 +iniconfig==2.0.0 +jinja2==3.1.4 +markupsafe==2.1.5 +mock==5.1.0 +multidict==6.1.0 +opentracing==2.4.0 +packaging==24.1 +pluggy==1.5.0 +pytest==8.3.3 +pytest-aiohttp==1.0.5 +pytest-asyncio==0.23.7 +pytest-cov==5.0.0 +pytest-mock==3.14.0 +pytest-randomly==3.15.0 +sortedcontainers==2.4.0 +yarl==1.13.1 diff --git a/.riot/requirements/167b853.txt b/.riot/requirements/167b853.txt new file mode 100644 index 00000000000..71aa1ae2587 --- /dev/null +++ b/.riot/requirements/167b853.txt @@ -0,0 +1,21 @@ +# +# This file is autogenerated by pip-compile with Python 3.13 +# by the following command: +# +# pip-compile --allow-unsafe --no-annotate .riot/requirements/167b853.in +# +attrs==24.2.0 +coverage[toml]==7.6.1 +hypothesis==6.45.0 +iniconfig==2.0.0 +jinja2==3.1.4 +markupsafe==2.1.5 +mock==5.1.0 +opentracing==2.4.0 +packaging==24.1 +pluggy==1.5.0 +pytest==8.3.3 +pytest-cov==5.0.0 +pytest-mock==3.14.0 +pytest-randomly==3.15.0 +sortedcontainers==2.4.0 diff --git a/.riot/requirements/16acf84.txt b/.riot/requirements/16acf84.txt new file mode 100644 index 00000000000..402495f9654 --- /dev/null +++ b/.riot/requirements/16acf84.txt @@ -0,0 +1,27 @@ +# +# This file is autogenerated by pip-compile with Python 3.13 +# by the following command: +# +# pip-compile --allow-unsafe --no-annotate .riot/requirements/16acf84.in +# +asgiref==3.8.1 +attrs==24.2.0 +coverage[toml]==7.6.1 +django==3.2.25 +django-configurations==2.5.1 +djangorestframework==3.11.2 +hypothesis==6.45.0 +iniconfig==2.0.0 +mock==5.1.0 +opentracing==2.4.0 +packaging==24.1 +pluggy==1.5.0 +pytest==8.3.3 +pytest-cov==5.0.0 +pytest-django[testing]==3.10.0 +pytest-mock==3.14.0 +pytest-randomly==3.15.0 +pytz==2024.2 +six==1.16.0 +sortedcontainers==2.4.0 +sqlparse==0.5.1 diff --git a/.riot/requirements/16cc321.txt b/.riot/requirements/16cc321.txt new file mode 100644 index 00000000000..e46e05ff1bb --- /dev/null +++ b/.riot/requirements/16cc321.txt @@ -0,0 +1,20 @@ +# +# This file is autogenerated by pip-compile with Python 3.13 +# by the following command: +# +# pip-compile --allow-unsafe --no-annotate .riot/requirements/16cc321.in +# +attrs==24.2.0 +coverage[toml]==7.6.1 +hypothesis==6.45.0 +iniconfig==2.0.0 +mock==5.1.0 +msgpack==1.1.0 +opentracing==2.4.0 +packaging==24.1 +pluggy==1.5.0 +pytest==8.3.3 +pytest-cov==5.0.0 +pytest-mock==3.14.0 +pytest-randomly==3.15.0 +sortedcontainers==2.4.0 diff --git a/.riot/requirements/16d2d1f.txt b/.riot/requirements/16d2d1f.txt new file mode 100644 index 00000000000..7092a5762ac --- /dev/null +++ b/.riot/requirements/16d2d1f.txt @@ -0,0 +1,48 @@ +# +# This file is autogenerated by pip-compile with Python 3.13 +# by the following command: +# +# pip-compile --allow-unsafe --no-annotate .riot/requirements/16d2d1f.in +# +attrs==24.2.0 +certifi==2024.8.30 +charset-normalizer==2.1.1 +click==8.1.7 +coverage[toml]==7.6.1 +deprecated==1.2.14 +flask==2.1.3 +gevent==24.2.1 +greenlet==3.1.1 +hypothesis==6.45.0 +idna==3.10 +importlib-metadata==8.5.0 +iniconfig==2.0.0 +itsdangerous==2.2.0 +jinja2==3.1.4 +markupsafe==2.0.1 +mock==5.1.0 +opentelemetry-api==1.15.0 +opentelemetry-instrumentation==0.45b0 +opentelemetry-instrumentation-flask==0.45b0 +opentelemetry-instrumentation-wsgi==0.45b0 +opentelemetry-semantic-conventions==0.45b0 +opentelemetry-util-http==0.45b0 +opentracing==2.4.0 +packaging==24.1 +pluggy==1.5.0 +pytest==8.3.3 +pytest-asyncio==0.21.1 +pytest-cov==5.0.0 +pytest-mock==3.14.0 +pytest-randomly==3.15.0 +requests==2.28.1 +sortedcontainers==2.4.0 +urllib3==1.26.20 +werkzeug==2.1.2 +wrapt==1.16.0 +zipp==3.20.2 +zope-event==5.0 +zope-interface==7.0.3 + +# The following packages are considered to be unsafe in a requirements file: +setuptools==75.1.0 diff --git a/.riot/requirements/16de9c4.txt b/.riot/requirements/16de9c4.txt new file mode 100644 index 00000000000..ed357be4e45 --- /dev/null +++ b/.riot/requirements/16de9c4.txt @@ -0,0 +1,37 @@ +# +# This file is autogenerated by pip-compile with Python 3.13 +# by the following command: +# +# pip-compile --allow-unsafe --no-annotate .riot/requirements/16de9c4.in +# +aiohappyeyeballs==2.4.3 +aiohttp==3.10.9 +aiosignal==1.3.1 +attrs==24.2.0 +certifi==2024.8.30 +charset-normalizer==3.3.2 +coverage[toml]==7.6.1 +elastic-transport==8.15.0 +elasticsearch[async]==8.15.1 +elasticsearch7[async]==7.17.12 +events==0.5 +frozenlist==1.4.1 +hypothesis==6.45.0 +idna==3.10 +iniconfig==2.0.0 +mock==5.1.0 +multidict==6.1.0 +opensearch-py[async]==2.7.1 +opentracing==2.4.0 +packaging==24.1 +pluggy==1.5.0 +pytest==8.3.3 +pytest-cov==5.0.0 +pytest-mock==3.14.0 +pytest-randomly==3.15.0 +python-dateutil==2.9.0.post0 +requests==2.32.3 +six==1.16.0 +sortedcontainers==2.4.0 +urllib3==1.26.20 +yarl==1.13.1 diff --git a/.riot/requirements/178f7d5.txt b/.riot/requirements/178f7d5.txt new file mode 100644 index 00000000000..4d7d3e5b6e6 --- /dev/null +++ b/.riot/requirements/178f7d5.txt @@ -0,0 +1,20 @@ +# +# This file is autogenerated by pip-compile with Python 3.13 +# by the following command: +# +# pip-compile --allow-unsafe --no-annotate .riot/requirements/178f7d5.in +# +attrs==24.2.0 +coverage[toml]==7.6.1 +hypothesis==6.45.0 +iniconfig==2.0.0 +logbook==1.7.0.post0 +mock==5.1.0 +opentracing==2.4.0 +packaging==24.1 +pluggy==1.5.0 +pytest==8.3.3 +pytest-cov==5.0.0 +pytest-mock==3.14.0 +pytest-randomly==3.15.0 +sortedcontainers==2.4.0 diff --git a/.riot/requirements/17d40ef.txt b/.riot/requirements/17d40ef.txt new file mode 100644 index 00000000000..53c94aadbe1 --- /dev/null +++ b/.riot/requirements/17d40ef.txt @@ -0,0 +1,20 @@ +# +# This file is autogenerated by pip-compile with Python 3.13 +# by the following command: +# +# pip-compile --allow-unsafe --no-annotate .riot/requirements/17d40ef.in +# +attrs==24.2.0 +coverage[toml]==7.6.1 +hypothesis==6.45.0 +iniconfig==2.0.0 +loguru==0.4.1 +mock==5.1.0 +opentracing==2.4.0 +packaging==24.1 +pluggy==1.5.0 +pytest==8.3.3 +pytest-cov==5.0.0 +pytest-mock==3.14.0 +pytest-randomly==3.15.0 +sortedcontainers==2.4.0 diff --git a/.riot/requirements/1819cb6.txt b/.riot/requirements/1819cb6.txt new file mode 100644 index 00000000000..0c9e45ced2c --- /dev/null +++ b/.riot/requirements/1819cb6.txt @@ -0,0 +1,29 @@ +# +# This file is autogenerated by pip-compile with Python 3.13 +# by the following command: +# +# pip-compile --allow-unsafe --no-annotate .riot/requirements/1819cb6.in +# +attrs==24.2.0 +blinker==1.8.2 +click==7.1.2 +coverage[toml]==7.6.1 +flask==1.1.4 +flask-caching==1.10.1 +hypothesis==6.45.0 +iniconfig==2.0.0 +itsdangerous==1.1.0 +jinja2==2.11.3 +markupsafe==1.1.1 +mock==5.1.0 +opentracing==2.4.0 +packaging==24.1 +pluggy==1.5.0 +pytest==8.3.3 +pytest-cov==5.0.0 +pytest-mock==3.14.0 +pytest-randomly==3.15.0 +python-memcached==1.62 +redis==5.1.1 +sortedcontainers==2.4.0 +werkzeug==1.0.1 diff --git a/.riot/requirements/188244e.txt b/.riot/requirements/188244e.txt new file mode 100644 index 00000000000..7a30a1a4b8e --- /dev/null +++ b/.riot/requirements/188244e.txt @@ -0,0 +1,20 @@ +# +# This file is autogenerated by pip-compile with Python 3.13 +# by the following command: +# +# pip-compile --allow-unsafe --no-annotate .riot/requirements/188244e.in +# +attrs==24.2.0 +coverage[toml]==7.6.1 +hypothesis==6.45.0 +iniconfig==2.0.0 +mock==5.1.0 +msgpack==1.1.0 +opentracing==2.4.0 +packaging==24.1 +pluggy==1.5.0 +pytest==8.3.3 +pytest-cov==5.0.0 +pytest-mock==3.14.0 +pytest-randomly==3.15.0 +sortedcontainers==2.4.0 diff --git a/.riot/requirements/18c6e70.txt b/.riot/requirements/18c6e70.txt new file mode 100644 index 00000000000..f257d8ded2b --- /dev/null +++ b/.riot/requirements/18c6e70.txt @@ -0,0 +1,19 @@ +# +# This file is autogenerated by pip-compile with Python 3.13 +# by the following command: +# +# pip-compile --allow-unsafe --no-annotate .riot/requirements/18c6e70.in +# +attrs==24.2.0 +coverage[toml]==7.6.1 +hypothesis==6.45.0 +iniconfig==2.0.0 +mock==5.1.0 +opentracing==2.4.0 +packaging==24.1 +pluggy==1.5.0 +pytest==8.3.3 +pytest-cov==5.0.0 +pytest-mock==3.14.0 +pytest-randomly==3.15.0 +sortedcontainers==2.4.0 diff --git a/.riot/requirements/18e9526.txt b/.riot/requirements/18e9526.txt new file mode 100644 index 00000000000..ce6bddab69f --- /dev/null +++ b/.riot/requirements/18e9526.txt @@ -0,0 +1,28 @@ +# +# This file is autogenerated by pip-compile with Python 3.13 +# by the following command: +# +# pip-compile --allow-unsafe --no-annotate .riot/requirements/18e9526.in +# +attrs==24.2.0 +certifi==2024.8.30 +charset-normalizer==3.3.2 +coverage[toml]==7.6.1 +events==0.5 +hypothesis==6.45.0 +idna==3.10 +iniconfig==2.0.0 +mock==5.1.0 +opensearch-py[requests]==2.7.1 +opentracing==2.4.0 +packaging==24.1 +pluggy==1.5.0 +pytest==8.3.3 +pytest-cov==5.0.0 +pytest-mock==3.14.0 +pytest-randomly==3.15.0 +python-dateutil==2.9.0.post0 +requests==2.32.3 +six==1.16.0 +sortedcontainers==2.4.0 +urllib3==2.2.3 diff --git a/.riot/requirements/192c7c0.txt b/.riot/requirements/192c7c0.txt new file mode 100644 index 00000000000..15f53062f83 --- /dev/null +++ b/.riot/requirements/192c7c0.txt @@ -0,0 +1,22 @@ +# +# This file is autogenerated by pip-compile with Python 3.13 +# by the following command: +# +# pip-compile --allow-unsafe --no-annotate .riot/requirements/192c7c0.in +# +attrs==24.2.0 +certifi==2024.8.30 +coverage[toml]==7.6.1 +elasticsearch==7.17.12 +hypothesis==6.45.0 +iniconfig==2.0.0 +mock==5.1.0 +opentracing==2.4.0 +packaging==24.1 +pluggy==1.5.0 +pytest==8.3.3 +pytest-cov==5.0.0 +pytest-mock==3.14.0 +pytest-randomly==3.15.0 +sortedcontainers==2.4.0 +urllib3==1.26.20 diff --git a/.riot/requirements/19bbf6d.txt b/.riot/requirements/19bbf6d.txt new file mode 100644 index 00000000000..1e31a198638 --- /dev/null +++ b/.riot/requirements/19bbf6d.txt @@ -0,0 +1,22 @@ +# +# This file is autogenerated by pip-compile with Python 3.13 +# by the following command: +# +# pip-compile --allow-unsafe --no-annotate .riot/requirements/19bbf6d.in +# +attrs==24.2.0 +coverage[toml]==7.6.1 +dnspython==2.7.0 +hypothesis==6.45.0 +iniconfig==2.0.0 +mock==5.1.0 +mongoengine==0.29.1 +opentracing==2.4.0 +packaging==24.1 +pluggy==1.5.0 +pymongo==4.10.1 +pytest==8.3.3 +pytest-cov==5.0.0 +pytest-mock==3.14.0 +pytest-randomly==3.15.0 +sortedcontainers==2.4.0 diff --git a/.riot/requirements/1a485c9.txt b/.riot/requirements/1a485c9.txt new file mode 100644 index 00000000000..558f2540488 --- /dev/null +++ b/.riot/requirements/1a485c9.txt @@ -0,0 +1,23 @@ +# +# This file is autogenerated by pip-compile with Python 3.13 +# by the following command: +# +# pip-compile --allow-unsafe --no-annotate .riot/requirements/1a485c9.in +# +attrs==24.2.0 +coverage[toml]==7.6.1 +decorator==5.1.1 +dogpile-cache==1.3.3 +hypothesis==6.45.0 +iniconfig==2.0.0 +mock==5.1.0 +opentracing==2.4.0 +packaging==24.1 +pbr==6.1.0 +pluggy==1.5.0 +pytest==8.3.3 +pytest-cov==5.0.0 +pytest-mock==3.14.0 +pytest-randomly==3.15.0 +sortedcontainers==2.4.0 +stevedore==5.3.0 diff --git a/.riot/requirements/1a508dc.txt b/.riot/requirements/1a508dc.txt new file mode 100644 index 00000000000..6e2dfecef5e --- /dev/null +++ b/.riot/requirements/1a508dc.txt @@ -0,0 +1,30 @@ +# +# This file is autogenerated by pip-compile with Python 3.13 +# by the following command: +# +# pip-compile --allow-unsafe --no-annotate .riot/requirements/1a508dc.in +# +asgiref==3.8.1 +attrs==24.2.0 +coverage[toml]==7.6.1 +django==3.2.25 +django-configurations==2.5.1 +django-hosts==4.0 +hypothesis==6.45.0 +iniconfig==2.0.0 +mock==5.1.0 +opentracing==2.4.0 +packaging==24.1 +pluggy==1.5.0 +pytest==8.3.3 +pytest-cov==5.0.0 +pytest-django[testing]==3.10.0 +pytest-mock==3.14.0 +pytest-randomly==3.15.0 +pytz==2024.2 +six==1.16.0 +sortedcontainers==2.4.0 +sqlparse==0.5.1 + +# The following packages are considered to be unsafe in a requirements file: +setuptools==75.1.0 diff --git a/.riot/requirements/1acabe0.txt b/.riot/requirements/1acabe0.txt new file mode 100644 index 00000000000..0f106bcd2dc --- /dev/null +++ b/.riot/requirements/1acabe0.txt @@ -0,0 +1,20 @@ +# +# This file is autogenerated by pip-compile with Python 3.13 +# by the following command: +# +# pip-compile --allow-unsafe --no-annotate .riot/requirements/1acabe0.in +# +attrs==24.2.0 +coverage[toml]==7.6.1 +hypothesis==6.45.0 +iniconfig==2.0.0 +mock==5.1.0 +opentracing==2.4.0 +packaging==24.1 +pluggy==1.5.0 +pytest==8.3.3 +pytest-asyncio==0.21.1 +pytest-cov==5.0.0 +pytest-mock==3.14.0 +pytest-randomly==3.15.0 +sortedcontainers==2.4.0 diff --git a/.riot/requirements/1ada88e.txt b/.riot/requirements/1ada88e.txt new file mode 100644 index 00000000000..5fc0aa5664d --- /dev/null +++ b/.riot/requirements/1ada88e.txt @@ -0,0 +1,29 @@ +# +# This file is autogenerated by pip-compile with Python 3.13 +# by the following command: +# +# pip-compile --allow-unsafe --no-annotate .riot/requirements/1ada88e.in +# +asgiref==3.8.1 +attrs==24.2.0 +coverage[toml]==7.6.1 +django==4.2.16 +django-configurations==2.5.1 +django-hosts==5.2 +hypothesis==6.45.0 +iniconfig==2.0.0 +mock==5.1.0 +opentracing==2.4.0 +packaging==24.1 +pluggy==1.5.0 +pytest==8.3.3 +pytest-cov==5.0.0 +pytest-django[testing]==3.10.0 +pytest-mock==3.14.0 +pytest-randomly==3.15.0 +six==1.16.0 +sortedcontainers==2.4.0 +sqlparse==0.5.1 + +# The following packages are considered to be unsafe in a requirements file: +setuptools==75.1.0 diff --git a/.riot/requirements/1aed5dc.txt b/.riot/requirements/1aed5dc.txt new file mode 100644 index 00000000000..4d8f8858d78 --- /dev/null +++ b/.riot/requirements/1aed5dc.txt @@ -0,0 +1,30 @@ +# +# This file is autogenerated by pip-compile with Python 3.13 +# by the following command: +# +# pip-compile --allow-unsafe --no-annotate .riot/requirements/1aed5dc.in +# +attrs==24.2.0 +blinker==1.8.2 +cachelib==0.9.0 +click==7.1.2 +coverage[toml]==7.6.1 +flask==1.1.4 +flask-caching==2.3.0 +hypothesis==6.45.0 +iniconfig==2.0.0 +itsdangerous==1.1.0 +jinja2==2.11.3 +markupsafe==1.1.1 +mock==5.1.0 +opentracing==2.4.0 +packaging==24.1 +pluggy==1.5.0 +pytest==8.3.3 +pytest-cov==5.0.0 +pytest-mock==3.14.0 +pytest-randomly==3.15.0 +python-memcached==1.62 +redis==5.1.1 +sortedcontainers==2.4.0 +werkzeug==1.0.1 diff --git a/.riot/requirements/1b86c06.txt b/.riot/requirements/1b86c06.txt new file mode 100644 index 00000000000..68de1371257 --- /dev/null +++ b/.riot/requirements/1b86c06.txt @@ -0,0 +1,27 @@ +# +# This file is autogenerated by pip-compile with Python 3.13 +# by the following command: +# +# pip-compile --allow-unsafe --no-annotate .riot/requirements/1b86c06.in +# +attrs==24.2.0 +certifi==2024.8.30 +coverage[toml]==7.6.1 +h11==0.14.0 +httpcore==0.12.3 +httpx==0.17.1 +hypothesis==6.45.0 +idna==3.10 +iniconfig==2.0.0 +mock==5.1.0 +opentracing==2.4.0 +packaging==24.1 +pluggy==1.5.0 +pytest==8.3.3 +pytest-asyncio==0.21.1 +pytest-cov==5.0.0 +pytest-mock==3.14.0 +pytest-randomly==3.15.0 +rfc3986[idna2008]==1.5.0 +sniffio==1.3.1 +sortedcontainers==2.4.0 diff --git a/.riot/requirements/1b8d922.txt b/.riot/requirements/1b8d922.txt new file mode 100644 index 00000000000..76a225cb035 --- /dev/null +++ b/.riot/requirements/1b8d922.txt @@ -0,0 +1,21 @@ +# +# This file is autogenerated by pip-compile with Python 3.13 +# by the following command: +# +# pip-compile --allow-unsafe --no-annotate .riot/requirements/1b8d922.in +# +attrs==24.2.0 +coverage[toml]==7.6.1 +hypothesis==6.45.0 +iniconfig==2.0.0 +mako==1.1.6 +markupsafe==2.1.5 +mock==5.1.0 +opentracing==2.4.0 +packaging==24.1 +pluggy==1.5.0 +pytest==8.3.3 +pytest-cov==5.0.0 +pytest-mock==3.14.0 +pytest-randomly==3.15.0 +sortedcontainers==2.4.0 diff --git a/.riot/requirements/1ba390a.txt b/.riot/requirements/1ba390a.txt new file mode 100644 index 00000000000..71d341c1fbb --- /dev/null +++ b/.riot/requirements/1ba390a.txt @@ -0,0 +1,21 @@ +# +# This file is autogenerated by pip-compile with Python 3.13 +# by the following command: +# +# pip-compile --allow-unsafe --no-annotate .riot/requirements/1ba390a.in +# +attrs==24.2.0 +coverage[toml]==7.6.1 +decorator==5.1.1 +dogpile-cache==0.9.2 +hypothesis==6.45.0 +iniconfig==2.0.0 +mock==5.1.0 +opentracing==2.4.0 +packaging==24.1 +pluggy==1.5.0 +pytest==8.3.3 +pytest-cov==5.0.0 +pytest-mock==3.14.0 +pytest-randomly==3.15.0 +sortedcontainers==2.4.0 diff --git a/.riot/requirements/1bf4d76.txt b/.riot/requirements/1bf4d76.txt new file mode 100644 index 00000000000..be2efe8e43c --- /dev/null +++ b/.riot/requirements/1bf4d76.txt @@ -0,0 +1,23 @@ +# +# This file is autogenerated by pip-compile with Python 3.13 +# by the following command: +# +# pip-compile --allow-unsafe --no-annotate .riot/requirements/1bf4d76.in +# +attrs==24.2.0 +coverage[toml]==7.6.1 +decorator==5.1.1 +dogpile-cache==1.3.3 +hypothesis==6.45.0 +iniconfig==2.0.0 +mock==5.1.0 +opentracing==2.4.0 +packaging==24.1 +pbr==6.1.0 +pluggy==1.5.0 +pytest==8.3.3 +pytest-cov==5.0.0 +pytest-mock==3.14.0 +pytest-randomly==3.15.0 +sortedcontainers==2.4.0 +stevedore==5.3.0 diff --git a/.riot/requirements/1c22cf9.txt b/.riot/requirements/1c22cf9.txt new file mode 100644 index 00000000000..091cd98d529 --- /dev/null +++ b/.riot/requirements/1c22cf9.txt @@ -0,0 +1,20 @@ +# +# This file is autogenerated by pip-compile with Python 3.13 +# by the following command: +# +# pip-compile --allow-unsafe --no-annotate .riot/requirements/1c22cf9.in +# +attrs==24.2.0 +coverage[toml]==7.6.1 +hypothesis==6.45.0 +iniconfig==2.0.0 +mock==5.1.0 +opentracing==2.4.0 +packaging==24.1 +pluggy==1.5.0 +pylibmc==1.6.3 +pytest==8.3.3 +pytest-cov==5.0.0 +pytest-mock==3.14.0 +pytest-randomly==3.15.0 +sortedcontainers==2.4.0 diff --git a/.riot/requirements/1cb554e.txt b/.riot/requirements/1cb554e.txt new file mode 100644 index 00000000000..27f518b59cc --- /dev/null +++ b/.riot/requirements/1cb554e.txt @@ -0,0 +1,21 @@ +# +# This file is autogenerated by pip-compile with Python 3.13 +# by the following command: +# +# pip-compile --allow-unsafe --no-annotate .riot/requirements/1cb554e.in +# +attrs==24.2.0 +coverage[toml]==7.6.1 +hypothesis==6.45.0 +iniconfig==2.0.0 +mock==5.1.0 +opentracing==2.4.0 +packaging==24.1 +pluggy==1.5.0 +pymemcache==3.4.4 +pytest==8.3.3 +pytest-cov==5.0.0 +pytest-mock==3.14.0 +pytest-randomly==3.15.0 +six==1.16.0 +sortedcontainers==2.4.0 diff --git a/.riot/requirements/1ce0711.txt b/.riot/requirements/1ce0711.txt new file mode 100644 index 00000000000..6721b5e5b0b --- /dev/null +++ b/.riot/requirements/1ce0711.txt @@ -0,0 +1,24 @@ +# +# This file is autogenerated by pip-compile with Python 3.13 +# by the following command: +# +# pip-compile --allow-unsafe --no-annotate .riot/requirements/1ce0711.in +# +attrs==24.2.0 +beautifulsoup4==4.12.3 +coverage[toml]==7.6.1 +hypothesis==6.45.0 +iniconfig==2.0.0 +mock==5.1.0 +opentracing==2.4.0 +packaging==24.1 +pluggy==1.5.0 +pytest==8.3.3 +pytest-cov==5.0.0 +pytest-mock==3.14.0 +pytest-randomly==3.15.0 +sortedcontainers==2.4.0 +soupsieve==2.6 +waitress==3.0.0 +webob==1.8.8 +webtest==3.0.1 diff --git a/.riot/requirements/1ce93b3.txt b/.riot/requirements/1ce93b3.txt new file mode 100644 index 00000000000..a0edba9ffd0 --- /dev/null +++ b/.riot/requirements/1ce93b3.txt @@ -0,0 +1,22 @@ +# +# This file is autogenerated by pip-compile with Python 3.13 +# by the following command: +# +# pip-compile --allow-unsafe --no-annotate .riot/requirements/1ce93b3.in +# +attrs==24.2.0 +coverage[toml]==7.6.1 +dnspython==2.7.0 +hypothesis==6.45.0 +iniconfig==2.0.0 +mock==5.1.0 +mongoengine==0.29.1 +opentracing==2.4.0 +packaging==24.1 +pluggy==1.5.0 +pymongo==4.8.0 +pytest==8.3.3 +pytest-cov==5.0.0 +pytest-mock==3.14.0 +pytest-randomly==3.15.0 +sortedcontainers==2.4.0 diff --git a/.riot/requirements/1d74d67.txt b/.riot/requirements/1d74d67.txt new file mode 100644 index 00000000000..32873cff656 --- /dev/null +++ b/.riot/requirements/1d74d67.txt @@ -0,0 +1,24 @@ +# +# This file is autogenerated by pip-compile with Python 3.13 +# by the following command: +# +# pip-compile --allow-unsafe --no-annotate .riot/requirements/1d74d67.in +# +aniso8601==9.0.1 +attrs==24.2.0 +coverage[toml]==7.6.1 +graphene==3.3 +graphql-core==3.2.4 +graphql-relay==3.2.0 +hypothesis==6.45.0 +iniconfig==2.0.0 +mock==5.1.0 +opentracing==2.4.0 +packaging==24.1 +pluggy==1.5.0 +pytest==8.3.3 +pytest-asyncio==0.21.1 +pytest-cov==5.0.0 +pytest-mock==3.14.0 +pytest-randomly==3.15.0 +sortedcontainers==2.4.0 diff --git a/.riot/requirements/1d8a93c.txt b/.riot/requirements/1d8a93c.txt new file mode 100644 index 00000000000..54f5d2a96c9 --- /dev/null +++ b/.riot/requirements/1d8a93c.txt @@ -0,0 +1,48 @@ +# +# This file is autogenerated by pip-compile with Python 3.13 +# by the following command: +# +# pip-compile --allow-unsafe --no-annotate .riot/requirements/1d8a93c.in +# +aiosqlite==0.17.0 +annotated-types==0.7.0 +attrs==24.2.0 +blinker==1.8.2 +bytecode==0.15.1 +cattrs==22.2.0 +certifi==2024.8.30 +charset-normalizer==3.3.2 +click==8.1.7 +coverage[toml]==7.6.1 +envier==0.5.2 +flask==3.0.3 +hypothesis==6.45.0 +idna==3.10 +iniconfig==2.0.0 +iso8601==1.1.0 +itsdangerous==2.2.0 +jinja2==3.1.4 +markupsafe==2.1.5 +mock==5.1.0 +opentracing==2.4.0 +packaging==24.1 +peewee==3.17.6 +pluggy==1.5.0 +pony==0.7.19 +protobuf==5.28.2 +pycryptodome==3.21.0 +pydantic==2.9.2 +pydantic-core==2.23.4 +pypika-tortoise==0.1.6 +pytest==8.3.3 +pytest-cov==5.0.0 +pytest-mock==3.14.0 +pytz==2024.2 +requests==2.32.3 +sortedcontainers==2.4.0 +sqlalchemy==2.0.35 +tortoise-orm==0.21.6 +typing-extensions==4.12.2 +urllib3==2.2.3 +werkzeug==3.0.4 +xmltodict==0.13.0 diff --git a/.riot/requirements/1dd5678.txt b/.riot/requirements/1dd5678.txt new file mode 100644 index 00000000000..c3ed6ec2447 --- /dev/null +++ b/.riot/requirements/1dd5678.txt @@ -0,0 +1,30 @@ +# +# This file is autogenerated by pip-compile with Python 3.13 +# by the following command: +# +# pip-compile --allow-unsafe --no-annotate .riot/requirements/1dd5678.in +# +attrs==24.2.0 +coverage[toml]==7.6.1 +gevent==24.2.1 +greenlet==3.1.1 +httpretty==1.1.4 +hypothesis==6.45.0 +iniconfig==2.0.0 +mock==5.1.0 +opentracing==2.4.0 +packaging==24.1 +pluggy==1.5.0 +pyfakefs==5.6.0 +pytest==8.3.3 +pytest-asyncio==0.23.8 +pytest-cov==5.0.0 +pytest-mock==3.14.0 +pytest-randomly==3.15.0 +python-json-logger==2.0.7 +sortedcontainers==2.4.0 +zope-event==5.0 +zope-interface==7.0.3 + +# The following packages are considered to be unsafe in a requirements file: +setuptools==75.1.0 diff --git a/.riot/requirements/15e6ff4.txt b/.riot/requirements/1df4764.txt similarity index 65% rename from .riot/requirements/15e6ff4.txt rename to .riot/requirements/1df4764.txt index 205310cd885..d6cef24569d 100644 --- a/.riot/requirements/15e6ff4.txt +++ b/.riot/requirements/1df4764.txt @@ -1,21 +1,21 @@ # -# This file is autogenerated by pip-compile with Python 3.12 +# This file is autogenerated by pip-compile with Python 3.13 # by the following command: # -# pip-compile --allow-unsafe --no-annotate .riot/requirements/15e6ff4.in +# pip-compile --allow-unsafe --no-annotate .riot/requirements/1df4764.in # annotated-types==0.7.0 -anyio==4.6.2.post1 +anyio==4.7.0 attrs==24.2.0 -boto3==1.35.62 -botocore==1.35.62 +boto3==1.35.78 +botocore==1.35.78 certifi==2024.8.30 -coverage[toml]==7.6.7 -fastapi==0.115.5 +coverage[toml]==7.6.9 +fastapi==0.115.6 h11==0.14.0 httpcore==1.0.7 httpretty==1.1.4 -httpx==0.27.2 +httpx==0.28.1 hypothesis==6.45.0 idna==3.10 iniconfig==2.0.0 @@ -25,22 +25,22 @@ msgpack==1.1.0 opentracing==2.4.0 packaging==24.2 pluggy==1.5.0 -pydantic==2.9.2 -pydantic-core==2.23.4 -pytest==8.3.3 +pydantic==2.10.3 +pydantic-core==2.27.1 +pytest==8.3.4 pytest-cov==6.0.0 pytest-mock==3.14.0 pytest-randomly==3.16.0 python-dateutil==2.9.0.post0 -s3transfer==0.10.3 -six==1.16.0 +s3transfer==0.10.4 +six==1.17.0 sniffio==1.3.1 sortedcontainers==2.4.0 -starlette==0.41.2 +starlette==0.41.3 structlog==24.4.0 typing-extensions==4.12.2 urllib3==2.2.3 -wheel==0.45.0 +wheel==0.45.1 # The following packages are considered to be unsafe in a requirements file: -setuptools==75.5.0 +setuptools==75.6.0 diff --git a/.riot/requirements/1e19c17.txt b/.riot/requirements/1e19c17.txt new file mode 100644 index 00000000000..615658928e1 --- /dev/null +++ b/.riot/requirements/1e19c17.txt @@ -0,0 +1,29 @@ +# +# This file is autogenerated by pip-compile with Python 3.13 +# by the following command: +# +# pip-compile --allow-unsafe --no-annotate .riot/requirements/1e19c17.in +# +anyio==4.6.0 +asgiref==3.0.0 +async-timeout==3.0.1 +attrs==24.2.0 +certifi==2024.8.30 +coverage[toml]==7.6.1 +h11==0.14.0 +httpcore==1.0.6 +httpx==0.27.2 +hypothesis==6.45.0 +idna==3.10 +iniconfig==2.0.0 +mock==5.1.0 +opentracing==2.4.0 +packaging==24.1 +pluggy==1.5.0 +pytest==8.3.3 +pytest-asyncio==0.21.1 +pytest-cov==5.0.0 +pytest-mock==3.14.0 +pytest-randomly==3.15.0 +sniffio==1.3.1 +sortedcontainers==2.4.0 diff --git a/.riot/requirements/1e4bb51.txt b/.riot/requirements/1e4bb51.txt new file mode 100644 index 00000000000..c160a2df5e6 --- /dev/null +++ b/.riot/requirements/1e4bb51.txt @@ -0,0 +1,24 @@ +# +# This file is autogenerated by pip-compile with Python 3.13 +# by the following command: +# +# pip-compile --allow-unsafe --no-annotate .riot/requirements/1e4bb51.in +# +aniso8601==9.0.1 +attrs==24.2.0 +coverage[toml]==7.6.1 +graphene==3.0 +graphql-core==3.1.7 +graphql-relay==3.1.5 +hypothesis==6.45.0 +iniconfig==2.0.0 +mock==5.1.0 +opentracing==2.4.0 +packaging==24.1 +pluggy==1.5.0 +pytest==8.3.3 +pytest-asyncio==0.21.1 +pytest-cov==5.0.0 +pytest-mock==3.14.0 +pytest-randomly==3.15.0 +sortedcontainers==2.4.0 diff --git a/.riot/requirements/1e4dfe1.txt b/.riot/requirements/1e4dfe1.txt new file mode 100644 index 00000000000..11f08da5171 --- /dev/null +++ b/.riot/requirements/1e4dfe1.txt @@ -0,0 +1,28 @@ +# +# This file is autogenerated by pip-compile with Python 3.13 +# by the following command: +# +# pip-compile --allow-unsafe --no-annotate .riot/requirements/1e4dfe1.in +# +aiohappyeyeballs==2.4.3 +aiohttp==3.10.9 +aiosignal==1.3.1 +attrs==24.2.0 +coverage[toml]==7.6.1 +frozenlist==1.4.1 +hypothesis==6.45.0 +idna==3.10 +iniconfig==2.0.0 +mock==5.1.0 +multidict==6.1.0 +opentracing==2.4.0 +packaging==24.1 +pluggy==1.5.0 +pytest==8.3.3 +pytest-aiohttp==1.0.5 +pytest-asyncio==0.23.7 +pytest-cov==5.0.0 +pytest-mock==3.14.0 +pytest-randomly==3.15.0 +sortedcontainers==2.4.0 +yarl==1.13.1 diff --git a/.riot/requirements/1e659c4.txt b/.riot/requirements/1e659c4.txt new file mode 100644 index 00000000000..ef8e4a09e09 --- /dev/null +++ b/.riot/requirements/1e659c4.txt @@ -0,0 +1,20 @@ +# +# This file is autogenerated by pip-compile with Python 3.13 +# by the following command: +# +# pip-compile --allow-unsafe --no-annotate .riot/requirements/1e659c4.in +# +attrs==24.2.0 +coverage[toml]==7.6.1 +hypothesis==6.45.0 +iniconfig==2.0.0 +mock==5.1.0 +opentracing==2.4.0 +packaging==24.1 +pluggy==1.5.0 +pymemcache==4.0.0 +pytest==8.3.3 +pytest-cov==5.0.0 +pytest-mock==3.14.0 +pytest-randomly==3.15.0 +sortedcontainers==2.4.0 diff --git a/.riot/requirements/1e70094.txt b/.riot/requirements/1e70094.txt new file mode 100644 index 00000000000..ac90db74765 --- /dev/null +++ b/.riot/requirements/1e70094.txt @@ -0,0 +1,42 @@ +# +# This file is autogenerated by pip-compile with Python 3.13 +# by the following command: +# +# pip-compile --allow-unsafe --no-annotate .riot/requirements/1e70094.in +# +attrs==24.2.0 +beautifulsoup4==4.12.3 +certifi==2024.8.30 +charset-normalizer==3.3.2 +coverage[toml]==7.6.1 +hupper==1.12.1 +hypothesis==6.45.0 +idna==3.10 +iniconfig==2.0.0 +mock==5.1.0 +opentracing==2.4.0 +packaging==24.1 +pastedeploy==3.1.0 +plaster==1.1.2 +plaster-pastedeploy==1.0.1 +pluggy==1.5.0 +pserve-test-app @ file:///root/project/tests/contrib/pyramid/pserve_app +pyramid==2.0.2 +pytest==8.3.3 +pytest-cov==5.0.0 +pytest-mock==3.14.0 +pytest-randomly==3.15.0 +requests==2.32.3 +sortedcontainers==2.4.0 +soupsieve==2.6 +translationstring==1.4 +urllib3==2.2.3 +venusian==3.1.0 +waitress==3.0.0 +webob==1.8.8 +webtest==3.0.1 +zope-deprecation==5.0 +zope-interface==7.0.3 + +# The following packages are considered to be unsafe in a requirements file: +setuptools==75.1.0 diff --git a/.riot/requirements/1ebb239.txt b/.riot/requirements/1ebb239.txt new file mode 100644 index 00000000000..baa97737f91 --- /dev/null +++ b/.riot/requirements/1ebb239.txt @@ -0,0 +1,35 @@ +# +# This file is autogenerated by pip-compile with Python 3.13 +# by the following command: +# +# pip-compile --allow-unsafe --no-annotate .riot/requirements/1ebb239.in +# +attrs==24.2.0 +autocommand==2.2.2 +cheroot==10.0.1 +cherrypy==18.10.0 +coverage[toml]==7.6.1 +hypothesis==6.45.0 +iniconfig==2.0.0 +jaraco-collections==5.1.0 +jaraco-context==6.0.1 +jaraco-functools==4.1.0 +jaraco-text==4.0.0 +mock==5.1.0 +more-itertools==8.10.0 +opentracing==2.4.0 +packaging==24.1 +pluggy==1.5.0 +portend==3.2.0 +pytest==8.3.3 +pytest-cov==5.0.0 +pytest-mock==3.14.0 +pytest-randomly==3.15.0 +python-dateutil==2.9.0.post0 +six==1.16.0 +sortedcontainers==2.4.0 +tempora==5.7.0 +zc-lockfile==3.0.post1 + +# The following packages are considered to be unsafe in a requirements file: +setuptools==75.1.0 diff --git a/.riot/requirements/1ec9462.txt b/.riot/requirements/1ec9462.txt new file mode 100644 index 00000000000..da918b276a7 --- /dev/null +++ b/.riot/requirements/1ec9462.txt @@ -0,0 +1,20 @@ +# +# This file is autogenerated by pip-compile with Python 3.13 +# by the following command: +# +# pip-compile --allow-unsafe --no-annotate .riot/requirements/1ec9462.in +# +attrs==24.2.0 +coverage[toml]==7.6.1 +hypothesis==6.45.0 +iniconfig==2.0.0 +mock==5.1.0 +opentracing==2.4.0 +packaging==24.1 +pluggy==1.5.0 +pytest==8.3.3 +pytest-cov==5.0.0 +pytest-mock==3.14.0 +pytest-randomly==3.15.0 +sortedcontainers==2.4.0 +urllib3==2.0.0 diff --git a/.riot/requirements/1f3b209.txt b/.riot/requirements/1f3b209.txt new file mode 100644 index 00000000000..ed48c26f9b8 --- /dev/null +++ b/.riot/requirements/1f3b209.txt @@ -0,0 +1,20 @@ +# +# This file is autogenerated by pip-compile with Python 3.13 +# by the following command: +# +# pip-compile --allow-unsafe --no-annotate .riot/requirements/1f3b209.in +# +attrs==24.2.0 +coverage[toml]==7.6.1 +hypothesis==6.45.0 +iniconfig==2.0.0 +mariadb==1.1.10 +mock==5.1.0 +opentracing==2.4.0 +packaging==24.1 +pluggy==1.5.0 +pytest==8.3.3 +pytest-cov==5.0.0 +pytest-mock==3.14.0 +pytest-randomly==3.15.0 +sortedcontainers==2.4.0 diff --git a/.riot/requirements/1fa3005.txt b/.riot/requirements/1fa3005.txt new file mode 100644 index 00000000000..d05c2537930 --- /dev/null +++ b/.riot/requirements/1fa3005.txt @@ -0,0 +1,21 @@ +# +# This file is autogenerated by pip-compile with Python 3.13 +# by the following command: +# +# pip-compile --allow-unsafe --no-annotate .riot/requirements/1fa3005.in +# +attrs==24.2.0 +coverage[toml]==7.6.1 +hypothesis==6.45.0 +iniconfig==2.0.0 +jinja2==3.0.3 +markupsafe==2.1.5 +mock==5.1.0 +opentracing==2.4.0 +packaging==24.1 +pluggy==1.5.0 +pytest==8.3.3 +pytest-cov==5.0.0 +pytest-mock==3.14.0 +pytest-randomly==3.15.0 +sortedcontainers==2.4.0 diff --git a/.riot/requirements/1fc9ecc.txt b/.riot/requirements/1fc9ecc.txt new file mode 100644 index 00000000000..f4245743dde --- /dev/null +++ b/.riot/requirements/1fc9ecc.txt @@ -0,0 +1,20 @@ +# +# This file is autogenerated by pip-compile with Python 3.13 +# by the following command: +# +# pip-compile --allow-unsafe --no-annotate .riot/requirements/1fc9ecc.in +# +attrs==24.2.0 +coverage[toml]==7.6.1 +hypothesis==6.45.0 +iniconfig==2.0.0 +mariadb==1.1.10 +mock==5.1.0 +opentracing==2.4.0 +packaging==24.1 +pluggy==1.5.0 +pytest==8.3.3 +pytest-cov==5.0.0 +pytest-mock==3.14.0 +pytest-randomly==3.15.0 +sortedcontainers==2.4.0 diff --git a/.riot/requirements/1fe8dd2.txt b/.riot/requirements/1fe8dd2.txt new file mode 100644 index 00000000000..c6356e47072 --- /dev/null +++ b/.riot/requirements/1fe8dd2.txt @@ -0,0 +1,83 @@ +# +# This file is autogenerated by pip-compile with Python 3.13 +# by the following command: +# +# pip-compile --no-annotate .riot/requirements/1fe8dd2.in +# +aiohappyeyeballs==2.4.4 +aiohttp==3.11.10 +aiosignal==1.3.1 +annotated-types==0.7.0 +anyio==4.7.0 +appdirs==1.4.4 +attrs==24.2.0 +certifi==2024.8.30 +charset-normalizer==3.4.0 +coverage[toml]==7.6.9 +dataclasses-json==0.6.7 +datasets==3.2.0 +dill==0.3.8 +distro==1.9.0 +filelock==3.16.1 +frozenlist==1.5.0 +fsspec[http]==2024.9.0 +h11==0.14.0 +httpcore==1.0.7 +httpx==0.28.1 +huggingface-hub==0.26.5 +hypothesis==6.45.0 +idna==3.10 +iniconfig==2.0.0 +jiter==0.8.2 +jsonpatch==1.33 +jsonpointer==3.0.0 +langchain==0.2.17 +langchain-community==0.2.19 +langchain-core==0.2.43 +langchain-openai==0.1.25 +langchain-text-splitters==0.2.4 +langsmith==0.1.147 +marshmallow==3.23.1 +mock==5.1.0 +multidict==6.1.0 +multiprocess==0.70.16 +mypy-extensions==1.0.0 +nest-asyncio==1.6.0 +numpy==1.26.4 +openai==1.57.2 +opentracing==2.4.0 +orjson==3.10.12 +packaging==24.2 +pandas==2.2.3 +pluggy==1.5.0 +propcache==0.2.1 +pyarrow==18.1.0 +pydantic==2.10.3 +pydantic-core==2.27.1 +pysbd==0.3.4 +pytest==8.3.4 +pytest-asyncio==0.21.1 +pytest-cov==6.0.0 +pytest-mock==3.14.0 +python-dateutil==2.9.0.post0 +pytz==2024.2 +pyyaml==6.0.2 +ragas==0.1.21 +regex==2024.11.6 +requests==2.32.3 +requests-toolbelt==1.0.0 +six==1.17.0 +sniffio==1.3.1 +sortedcontainers==2.4.0 +sqlalchemy==2.0.36 +tenacity==8.5.0 +tiktoken==0.8.0 +tqdm==4.67.1 +typing-extensions==4.12.2 +typing-inspect==0.9.0 +tzdata==2024.2 +urllib3==2.2.3 +vcrpy==6.0.2 +wrapt==1.17.0 +xxhash==3.5.0 +yarl==1.18.3 diff --git a/.riot/requirements/248da41.txt b/.riot/requirements/248da41.txt new file mode 100644 index 00000000000..34d903b5cbf --- /dev/null +++ b/.riot/requirements/248da41.txt @@ -0,0 +1,24 @@ +# +# This file is autogenerated by pip-compile with Python 3.13 +# by the following command: +# +# pip-compile --allow-unsafe --no-annotate .riot/requirements/248da41.in +# +attrs==24.2.0 +certifi==2024.8.30 +charset-normalizer==3.3.2 +coverage[toml]==7.6.1 +docker==7.1.0 +hypothesis==6.45.0 +idna==3.10 +iniconfig==2.0.0 +mock==5.1.0 +opentracing==2.4.0 +packaging==24.1 +pluggy==1.5.0 +pytest==8.3.3 +pytest-cov==5.0.0 +pytest-mock==3.14.0 +requests==2.32.3 +sortedcontainers==2.4.0 +urllib3==2.2.3 diff --git a/.riot/requirements/2538ed0.txt b/.riot/requirements/2538ed0.txt new file mode 100644 index 00000000000..f3d631a3ba0 --- /dev/null +++ b/.riot/requirements/2538ed0.txt @@ -0,0 +1,23 @@ +# +# This file is autogenerated by pip-compile with Python 3.13 +# by the following command: +# +# pip-compile --allow-unsafe --no-annotate .riot/requirements/2538ed0.in +# +attrs==24.2.0 +certifi==2024.8.30 +coverage[toml]==7.6.1 +elastic-transport==8.15.0 +elasticsearch==8.0.1 +hypothesis==6.45.0 +iniconfig==2.0.0 +mock==5.1.0 +opentracing==2.4.0 +packaging==24.1 +pluggy==1.5.0 +pytest==8.3.3 +pytest-cov==5.0.0 +pytest-mock==3.14.0 +pytest-randomly==3.15.0 +sortedcontainers==2.4.0 +urllib3==2.2.3 diff --git a/.riot/requirements/2581b3a.txt b/.riot/requirements/2581b3a.txt new file mode 100644 index 00000000000..b0fbf422fae --- /dev/null +++ b/.riot/requirements/2581b3a.txt @@ -0,0 +1,20 @@ +# +# This file is autogenerated by pip-compile with Python 3.13 +# by the following command: +# +# pip-compile --allow-unsafe --no-annotate .riot/requirements/2581b3a.in +# +attrs==24.2.0 +coverage[toml]==7.6.1 +hypothesis==6.45.0 +iniconfig==2.0.0 +mock==5.1.0 +mysql-connector-python==9.0.0 +opentracing==2.4.0 +packaging==24.1 +pluggy==1.5.0 +pytest==8.3.3 +pytest-cov==5.0.0 +pytest-mock==3.14.0 +pytest-randomly==3.15.0 +sortedcontainers==2.4.0 diff --git a/.riot/requirements/2644218.txt b/.riot/requirements/2644218.txt new file mode 100644 index 00000000000..0af7a95877a --- /dev/null +++ b/.riot/requirements/2644218.txt @@ -0,0 +1,22 @@ +# +# This file is autogenerated by pip-compile with Python 3.13 +# by the following command: +# +# pip-compile --allow-unsafe --no-annotate .riot/requirements/2644218.in +# +attrs==24.2.0 +coverage[toml]==7.6.1 +httpretty==1.1.4 +hypothesis==6.45.0 +iniconfig==2.0.0 +mock==5.1.0 +msgpack==1.1.0 +opentracing==2.4.0 +packaging==24.1 +pluggy==1.5.0 +pytest==8.3.3 +pytest-asyncio==0.24.0 +pytest-cov==5.0.0 +pytest-mock==3.14.0 +sortedcontainers==2.4.0 +typing-extensions==4.12.2 diff --git a/.riot/requirements/27d0ff8.txt b/.riot/requirements/27d0ff8.txt new file mode 100644 index 00000000000..291fe50cacc --- /dev/null +++ b/.riot/requirements/27d0ff8.txt @@ -0,0 +1,21 @@ +# +# This file is autogenerated by pip-compile with Python 3.13 +# by the following command: +# +# pip-compile --allow-unsafe --no-annotate .riot/requirements/27d0ff8.in +# +attrs==24.2.0 +coverage[toml]==7.6.1 +hypothesis==6.45.0 +iniconfig==2.0.0 +mako==1.3.5 +markupsafe==2.1.5 +mock==5.1.0 +opentracing==2.4.0 +packaging==24.1 +pluggy==1.5.0 +pytest==8.3.3 +pytest-cov==5.0.0 +pytest-mock==3.14.0 +pytest-randomly==3.15.0 +sortedcontainers==2.4.0 diff --git a/.riot/requirements/27e3d7b.txt b/.riot/requirements/27e3d7b.txt new file mode 100644 index 00000000000..602a0f0c52d --- /dev/null +++ b/.riot/requirements/27e3d7b.txt @@ -0,0 +1,21 @@ +# +# This file is autogenerated by pip-compile with Python 3.13 +# by the following command: +# +# pip-compile --allow-unsafe --no-annotate .riot/requirements/27e3d7b.in +# +attrs==24.2.0 +coverage[toml]==7.6.1 +graphql-core==3.2.4 +hypothesis==6.45.0 +iniconfig==2.0.0 +mock==5.1.0 +opentracing==2.4.0 +packaging==24.1 +pluggy==1.5.0 +pytest==8.3.3 +pytest-asyncio==0.21.1 +pytest-cov==5.0.0 +pytest-mock==3.14.0 +pytest-randomly==3.15.0 +sortedcontainers==2.4.0 diff --git a/.riot/requirements/2d6c3d0.txt b/.riot/requirements/2d6c3d0.txt new file mode 100644 index 00000000000..a2b00eb5c7c --- /dev/null +++ b/.riot/requirements/2d6c3d0.txt @@ -0,0 +1,20 @@ +# +# This file is autogenerated by pip-compile with Python 3.13 +# by the following command: +# +# pip-compile --allow-unsafe --no-annotate .riot/requirements/2d6c3d0.in +# +attrs==24.2.0 +coverage[toml]==7.6.1 +hypothesis==6.45.0 +iniconfig==2.0.0 +mock==5.1.0 +msgpack==1.1.0 +opentracing==2.4.0 +packaging==24.1 +pluggy==1.5.0 +pytest==8.3.3 +pytest-cov==5.0.0 +pytest-mock==3.14.0 +pytest-randomly==3.15.0 +sortedcontainers==2.4.0 diff --git a/.riot/requirements/2dd0811.txt b/.riot/requirements/2dd0811.txt new file mode 100644 index 00000000000..ecd42e076bd --- /dev/null +++ b/.riot/requirements/2dd0811.txt @@ -0,0 +1,21 @@ +# +# This file is autogenerated by pip-compile with Python 3.13 +# by the following command: +# +# pip-compile --allow-unsafe --no-annotate .riot/requirements/2dd0811.in +# +attrs==24.2.0 +coverage[toml]==7.6.1 +graphql-core==3.2.4 +hypothesis==6.45.0 +iniconfig==2.0.0 +mock==5.1.0 +opentracing==2.4.0 +packaging==24.1 +pluggy==1.5.0 +pytest==8.3.3 +pytest-asyncio==0.21.1 +pytest-cov==5.0.0 +pytest-mock==3.14.0 +pytest-randomly==3.15.0 +sortedcontainers==2.4.0 diff --git a/.riot/requirements/3ab519c.txt b/.riot/requirements/3ab519c.txt new file mode 100644 index 00000000000..fd80ad8e698 --- /dev/null +++ b/.riot/requirements/3ab519c.txt @@ -0,0 +1,28 @@ +# +# This file is autogenerated by pip-compile with Python 3.13 +# by the following command: +# +# pip-compile --allow-unsafe --no-annotate .riot/requirements/3ab519c.in +# +anyio==4.6.0 +asgiref==3.8.1 +attrs==24.2.0 +certifi==2024.8.30 +coverage[toml]==7.6.1 +h11==0.14.0 +httpcore==1.0.6 +httpx==0.27.2 +hypothesis==6.45.0 +idna==3.10 +iniconfig==2.0.0 +mock==5.1.0 +opentracing==2.4.0 +packaging==24.1 +pluggy==1.5.0 +pytest==8.3.3 +pytest-asyncio==0.21.1 +pytest-cov==5.0.0 +pytest-mock==3.14.0 +pytest-randomly==3.15.0 +sniffio==1.3.1 +sortedcontainers==2.4.0 diff --git a/.riot/requirements/3b804dc.txt b/.riot/requirements/3b804dc.txt new file mode 100644 index 00000000000..aa60e7c9491 --- /dev/null +++ b/.riot/requirements/3b804dc.txt @@ -0,0 +1,28 @@ +# +# This file is autogenerated by pip-compile with Python 3.13 +# by the following command: +# +# pip-compile --allow-unsafe --no-annotate .riot/requirements/3b804dc.in +# +anyio==4.6.0 +asgiref==3.8.1 +attrs==24.2.0 +certifi==2024.8.30 +coverage[toml]==7.6.1 +h11==0.14.0 +httpcore==1.0.6 +httpx==0.27.2 +hypothesis==6.45.0 +idna==3.10 +iniconfig==2.0.0 +mock==5.1.0 +opentracing==2.4.0 +packaging==24.1 +pluggy==1.5.0 +pytest==8.3.3 +pytest-asyncio==0.21.1 +pytest-cov==5.0.0 +pytest-mock==3.14.0 +pytest-randomly==3.15.0 +sniffio==1.3.1 +sortedcontainers==2.4.0 diff --git a/.riot/requirements/3c3f295.txt b/.riot/requirements/3c3f295.txt new file mode 100644 index 00000000000..c97658e408e --- /dev/null +++ b/.riot/requirements/3c3f295.txt @@ -0,0 +1,23 @@ +# +# This file is autogenerated by pip-compile with Python 3.13 +# by the following command: +# +# pip-compile --allow-unsafe --no-annotate .riot/requirements/3c3f295.in +# +attrs==24.2.0 +certifi==2024.8.30 +coverage[toml]==7.6.1 +elastic-transport==8.15.0 +elasticsearch8==8.0.1 +hypothesis==6.45.0 +iniconfig==2.0.0 +mock==5.1.0 +opentracing==2.4.0 +packaging==24.1 +pluggy==1.5.0 +pytest==8.3.3 +pytest-cov==5.0.0 +pytest-mock==3.14.0 +pytest-randomly==3.15.0 +sortedcontainers==2.4.0 +urllib3==2.2.3 diff --git a/.riot/requirements/3dd53da.txt b/.riot/requirements/3dd53da.txt new file mode 100644 index 00000000000..088ac0ddd7e --- /dev/null +++ b/.riot/requirements/3dd53da.txt @@ -0,0 +1,22 @@ +# +# This file is autogenerated by pip-compile with Python 3.13 +# by the following command: +# +# pip-compile --allow-unsafe --no-annotate .riot/requirements/3dd53da.in +# +attrs==24.2.0 +coverage[toml]==7.6.1 +dnspython==2.7.0 +hypothesis==6.45.0 +iniconfig==2.0.0 +mock==5.1.0 +mongoengine==0.29.1 +opentracing==2.4.0 +packaging==24.1 +pluggy==1.5.0 +pymongo==4.8.0 +pytest==8.3.3 +pytest-cov==5.0.0 +pytest-mock==3.14.0 +pytest-randomly==3.15.0 +sortedcontainers==2.4.0 diff --git a/.riot/requirements/3f1be84.txt b/.riot/requirements/3f1be84.txt new file mode 100644 index 00000000000..fb754701b3b --- /dev/null +++ b/.riot/requirements/3f1be84.txt @@ -0,0 +1,23 @@ +# +# This file is autogenerated by pip-compile with Python 3.13 +# by the following command: +# +# pip-compile --allow-unsafe --no-annotate .riot/requirements/3f1be84.in +# +attrs==24.2.0 +certifi==2024.8.30 +coverage[toml]==7.6.1 +elastic-transport==8.15.0 +elasticsearch8==8.15.1 +hypothesis==6.45.0 +iniconfig==2.0.0 +mock==5.1.0 +opentracing==2.4.0 +packaging==24.1 +pluggy==1.5.0 +pytest==8.3.3 +pytest-cov==5.0.0 +pytest-mock==3.14.0 +pytest-randomly==3.15.0 +sortedcontainers==2.4.0 +urllib3==2.2.3 diff --git a/.riot/requirements/1edf426.txt b/.riot/requirements/4132bce.txt similarity index 70% rename from .riot/requirements/1edf426.txt rename to .riot/requirements/4132bce.txt index 56a5eb28b4d..b27023913a3 100644 --- a/.riot/requirements/1edf426.txt +++ b/.riot/requirements/4132bce.txt @@ -2,11 +2,11 @@ # This file is autogenerated by pip-compile with Python 3.12 # by the following command: # -# pip-compile --allow-unsafe --no-annotate .riot/requirements/1edf426.in +# pip-compile --no-annotate .riot/requirements/4132bce.in # attrs==24.2.0 -coverage[toml]==7.6.4 -gevent==24.11.1 +coverage[toml]==7.6.9 +gevent==23.9.1 greenlet==3.1.1 hypothesis==6.45.0 iniconfig==2.0.0 @@ -14,13 +14,13 @@ mock==5.1.0 opentracing==2.4.0 packaging==24.2 pluggy==1.5.0 -pytest==8.3.3 +pytest==8.3.4 pytest-cov==6.0.0 pytest-mock==3.14.0 pytest-randomly==3.16.0 sortedcontainers==2.4.0 zope-event==5.0 -zope-interface==7.1.1 +zope-interface==7.2 # The following packages are considered to be unsafe in a requirements file: -setuptools==75.3.0 +# setuptools diff --git a/.riot/requirements/44eeaa9.txt b/.riot/requirements/44eeaa9.txt new file mode 100644 index 00000000000..138f4161595 --- /dev/null +++ b/.riot/requirements/44eeaa9.txt @@ -0,0 +1,28 @@ +# +# This file is autogenerated by pip-compile with Python 3.13 +# by the following command: +# +# pip-compile --allow-unsafe --no-annotate .riot/requirements/44eeaa9.in +# +aiohappyeyeballs==2.4.3 +aiohttp==3.10.9 +aiosignal==1.3.1 +attrs==24.2.0 +coverage[toml]==7.6.1 +frozenlist==1.4.1 +hypothesis==6.45.0 +idna==3.10 +iniconfig==2.0.0 +mock==5.1.0 +multidict==6.1.0 +opentracing==2.4.0 +packaging==24.1 +pluggy==1.5.0 +pytest==8.3.3 +pytest-aiohttp==1.0.5 +pytest-asyncio==0.23.7 +pytest-cov==5.0.0 +pytest-mock==3.14.0 +pytest-randomly==3.15.0 +sortedcontainers==2.4.0 +yarl==1.13.1 diff --git a/.riot/requirements/4fd1520.txt b/.riot/requirements/4fd1520.txt new file mode 100644 index 00000000000..88c1fc5703a --- /dev/null +++ b/.riot/requirements/4fd1520.txt @@ -0,0 +1,23 @@ +# +# This file is autogenerated by pip-compile with Python 3.13 +# by the following command: +# +# pip-compile --allow-unsafe --no-annotate .riot/requirements/4fd1520.in +# +attrs==24.2.0 +coverage[toml]==7.6.1 +decorator==5.1.1 +dogpile-cache==1.3.3 +hypothesis==6.45.0 +iniconfig==2.0.0 +mock==5.1.0 +opentracing==2.4.0 +packaging==24.1 +pbr==6.1.0 +pluggy==1.5.0 +pytest==8.3.3 +pytest-cov==5.0.0 +pytest-mock==3.14.0 +pytest-randomly==3.15.0 +sortedcontainers==2.4.0 +stevedore==5.3.0 diff --git a/.riot/requirements/5b922fc.txt b/.riot/requirements/5b922fc.txt new file mode 100644 index 00000000000..ff7fa5e6ba6 --- /dev/null +++ b/.riot/requirements/5b922fc.txt @@ -0,0 +1,45 @@ +# +# This file is autogenerated by pip-compile with Python 3.13 +# by the following command: +# +# pip-compile --allow-unsafe --no-annotate .riot/requirements/5b922fc.in +# +asgiref==3.8.1 +attrs==24.2.0 +certifi==2024.8.30 +charset-normalizer==2.1.1 +click==7.1.2 +coverage[toml]==7.6.1 +flask==1.1.4 +gevent==24.2.1 +greenlet==3.1.1 +hypothesis==6.45.0 +idna==3.10 +iniconfig==2.0.0 +itsdangerous==1.1.0 +jinja2==2.11.3 +markupsafe==2.0.1 +mock==5.1.0 +opentelemetry-api==1.0.0 +opentelemetry-instrumentation==0.19b0 +opentelemetry-instrumentation-flask==0.19b0 +opentelemetry-instrumentation-wsgi==0.19b0 +opentelemetry-util-http==0.19b0 +opentracing==2.4.0 +packaging==24.1 +pluggy==1.5.0 +pytest==8.3.3 +pytest-asyncio==0.21.1 +pytest-cov==5.0.0 +pytest-mock==3.14.0 +pytest-randomly==3.15.0 +requests==2.28.1 +sortedcontainers==2.4.0 +urllib3==1.26.20 +werkzeug==1.0.1 +wrapt==1.16.0 +zope-event==5.0 +zope-interface==7.0.3 + +# The following packages are considered to be unsafe in a requirements file: +setuptools==75.1.0 diff --git a/.riot/requirements/6cf373b.txt b/.riot/requirements/6cf373b.txt new file mode 100644 index 00000000000..e69fda1f1ed --- /dev/null +++ b/.riot/requirements/6cf373b.txt @@ -0,0 +1,19 @@ +# +# This file is autogenerated by pip-compile with Python 3.13 +# by the following command: +# +# pip-compile --allow-unsafe --no-annotate .riot/requirements/6cf373b.in +# +attrs==24.2.0 +coverage[toml]==7.6.1 +hypothesis==6.45.0 +iniconfig==2.0.0 +mock==5.1.0 +opentracing==2.4.0 +packaging==24.1 +pluggy==1.5.0 +pytest==8.3.3 +pytest-cov==5.0.0 +pytest-mock==3.14.0 +pytest-randomly==3.15.0 +sortedcontainers==2.4.0 diff --git a/.riot/requirements/70e034f.txt b/.riot/requirements/70e034f.txt new file mode 100644 index 00000000000..12950d5019e --- /dev/null +++ b/.riot/requirements/70e034f.txt @@ -0,0 +1,24 @@ +# +# This file is autogenerated by pip-compile with Python 3.13 +# by the following command: +# +# pip-compile --allow-unsafe --no-annotate .riot/requirements/70e034f.in +# +attrs==24.2.0 +cattrs==22.2.0 +coverage[toml]==7.6.1 +hypothesis==6.45.0 +iniconfig==2.0.0 +mock==5.1.0 +molten==1.0.2 +mypy-extensions==1.0.0 +opentracing==2.4.0 +packaging==24.1 +pluggy==1.5.0 +pytest==8.3.3 +pytest-cov==5.0.0 +pytest-mock==3.14.0 +pytest-randomly==3.15.0 +sortedcontainers==2.4.0 +typing-extensions==3.10.0.2 +typing-inspect==0.6.0 diff --git a/.riot/requirements/74ccb83.txt b/.riot/requirements/74ccb83.txt new file mode 100644 index 00000000000..9a3462b41cd --- /dev/null +++ b/.riot/requirements/74ccb83.txt @@ -0,0 +1,20 @@ +# +# This file is autogenerated by pip-compile with Python 3.13 +# by the following command: +# +# pip-compile --allow-unsafe --no-annotate .riot/requirements/74ccb83.in +# +attrs==24.2.0 +coverage[toml]==7.6.1 +hypothesis==6.45.0 +iniconfig==2.0.0 +mock==5.1.0 +opentracing==2.4.0 +packaging==24.1 +pluggy==1.5.0 +pytest==8.3.3 +pytest-cov==5.0.0 +pytest-mock==3.14.0 +pytest-randomly==3.15.0 +sortedcontainers==2.4.0 +urllib3==2.2.3 diff --git a/.riot/requirements/788c304.txt b/.riot/requirements/788c304.txt new file mode 100644 index 00000000000..36e1cd013d9 --- /dev/null +++ b/.riot/requirements/788c304.txt @@ -0,0 +1,27 @@ +# +# This file is autogenerated by pip-compile with Python 3.13 +# by the following command: +# +# pip-compile --allow-unsafe --no-annotate .riot/requirements/788c304.in +# +anyio==4.6.0 +attrs==24.2.0 +certifi==2024.8.30 +coverage[toml]==7.6.1 +h11==0.14.0 +httpcore==1.0.6 +httpx==0.27.2 +hypothesis==6.45.0 +idna==3.10 +iniconfig==2.0.0 +mock==5.1.0 +opentracing==2.4.0 +packaging==24.1 +pluggy==1.5.0 +pytest==8.3.3 +pytest-asyncio==0.21.1 +pytest-cov==5.0.0 +pytest-mock==3.14.0 +pytest-randomly==3.15.0 +sniffio==1.3.1 +sortedcontainers==2.4.0 diff --git a/.riot/requirements/7a40e08.txt b/.riot/requirements/7a40e08.txt new file mode 100644 index 00000000000..a770877b6ee --- /dev/null +++ b/.riot/requirements/7a40e08.txt @@ -0,0 +1,22 @@ +# +# This file is autogenerated by pip-compile with Python 3.13 +# by the following command: +# +# pip-compile --allow-unsafe --no-annotate .riot/requirements/7a40e08.in +# +attrs==24.2.0 +certifi==2024.8.30 +coverage[toml]==7.6.1 +elasticsearch7==7.13.4 +hypothesis==6.45.0 +iniconfig==2.0.0 +mock==5.1.0 +opentracing==2.4.0 +packaging==24.1 +pluggy==1.5.0 +pytest==8.3.3 +pytest-cov==5.0.0 +pytest-mock==3.14.0 +pytest-randomly==3.15.0 +sortedcontainers==2.4.0 +urllib3==1.26.20 diff --git a/.riot/requirements/921bc6c.txt b/.riot/requirements/7bbf828.txt similarity index 65% rename from .riot/requirements/921bc6c.txt rename to .riot/requirements/7bbf828.txt index fd44244070f..e1c39713bce 100644 --- a/.riot/requirements/921bc6c.txt +++ b/.riot/requirements/7bbf828.txt @@ -1,21 +1,21 @@ # -# This file is autogenerated by pip-compile with Python 3.12 +# This file is autogenerated by pip-compile with Python 3.13 # by the following command: # -# pip-compile --allow-unsafe --no-annotate .riot/requirements/921bc6c.in +# pip-compile --allow-unsafe --no-annotate .riot/requirements/7bbf828.in # annotated-types==0.7.0 -anyio==4.6.2.post1 +anyio==4.7.0 attrs==24.2.0 -boto3==1.35.62 -botocore==1.35.62 +boto3==1.35.78 +botocore==1.35.78 certifi==2024.8.30 -coverage[toml]==7.6.7 -fastapi==0.115.5 +coverage[toml]==7.6.9 +fastapi==0.115.6 h11==0.14.0 httpcore==1.0.7 httpretty==1.1.4 -httpx==0.27.2 +httpx==0.28.1 hypothesis==6.45.0 idna==3.10 iniconfig==2.0.0 @@ -25,22 +25,22 @@ msgpack==1.1.0 opentracing==2.4.0 packaging==24.2 pluggy==1.5.0 -pydantic==2.9.2 -pydantic-core==2.23.4 -pytest==8.3.3 +pydantic==2.10.3 +pydantic-core==2.27.1 +pytest==8.3.4 pytest-cov==6.0.0 pytest-mock==3.14.0 pytest-randomly==3.16.0 python-dateutil==2.9.0.post0 -s3transfer==0.10.3 -six==1.16.0 +s3transfer==0.10.4 +six==1.17.0 sniffio==1.3.1 sortedcontainers==2.4.0 -starlette==0.41.2 +starlette==0.41.3 structlog==24.4.0 typing-extensions==4.12.2 urllib3==2.2.3 -wheel==0.45.0 +wheel==0.45.1 # The following packages are considered to be unsafe in a requirements file: -setuptools==75.5.0 +setuptools==75.6.0 diff --git a/.riot/requirements/8ce955f.txt b/.riot/requirements/8ce955f.txt new file mode 100644 index 00000000000..6a3a0e63588 --- /dev/null +++ b/.riot/requirements/8ce955f.txt @@ -0,0 +1,28 @@ +# +# This file is autogenerated by pip-compile with Python 3.13 +# by the following command: +# +# pip-compile --allow-unsafe --no-annotate .riot/requirements/8ce955f.in +# +anyio==4.6.0 +attrs==24.2.0 +certifi==2024.8.30 +coverage[toml]==7.6.1 +h11==0.14.0 +httpcore==0.16.3 +httpx==0.23.3 +hypothesis==6.45.0 +idna==3.10 +iniconfig==2.0.0 +mock==5.1.0 +opentracing==2.4.0 +packaging==24.1 +pluggy==1.5.0 +pytest==8.3.3 +pytest-asyncio==0.21.1 +pytest-cov==5.0.0 +pytest-mock==3.14.0 +pytest-randomly==3.15.0 +rfc3986[idna2008]==1.5.0 +sniffio==1.3.1 +sortedcontainers==2.4.0 diff --git a/.riot/requirements/91fe586.txt b/.riot/requirements/91fe586.txt new file mode 100644 index 00000000000..46d48acec17 --- /dev/null +++ b/.riot/requirements/91fe586.txt @@ -0,0 +1,25 @@ +# +# This file is autogenerated by pip-compile with Python 3.13 +# by the following command: +# +# pip-compile --allow-unsafe --no-annotate .riot/requirements/91fe586.in +# +attrs==24.2.0 +certifi==2024.8.30 +charset-normalizer==3.3.2 +coverage[toml]==7.6.1 +hypothesis==6.45.0 +idna==3.10 +iniconfig==2.0.0 +mock==5.1.0 +opentracing==2.4.0 +packaging==24.1 +pluggy==1.5.0 +pytest==8.3.3 +pytest-cov==5.0.0 +pytest-mock==3.14.0 +pytest-randomly==3.15.0 +requests==2.32.3 +requests-mock==1.12.1 +sortedcontainers==2.4.0 +urllib3==1.26.20 diff --git a/.riot/requirements/9a07d4a.txt b/.riot/requirements/9a07d4a.txt new file mode 100644 index 00000000000..027306e2816 --- /dev/null +++ b/.riot/requirements/9a07d4a.txt @@ -0,0 +1,23 @@ +# +# This file is autogenerated by pip-compile with Python 3.13 +# by the following command: +# +# pip-compile --allow-unsafe --no-annotate .riot/requirements/9a07d4a.in +# +amqp==5.2.0 +attrs==24.2.0 +coverage[toml]==7.6.1 +hypothesis==6.45.0 +iniconfig==2.0.0 +kombu==5.4.2 +mock==5.1.0 +opentracing==2.4.0 +packaging==24.1 +pluggy==1.5.0 +pytest==8.3.3 +pytest-cov==5.0.0 +pytest-mock==3.14.0 +pytest-randomly==3.15.0 +sortedcontainers==2.4.0 +tzdata==2024.2 +vine==5.1.0 diff --git a/.riot/requirements/9a5c0d9.txt b/.riot/requirements/9a5c0d9.txt new file mode 100644 index 00000000000..edab275315a --- /dev/null +++ b/.riot/requirements/9a5c0d9.txt @@ -0,0 +1,32 @@ +# +# This file is autogenerated by pip-compile with Python 3.13 +# by the following command: +# +# pip-compile --allow-unsafe --no-annotate .riot/requirements/9a5c0d9.in +# +attrs==24.2.0 +certifi==2024.8.30 +charset-normalizer==3.3.2 +coverage[toml]==7.6.1 +gevent==24.2.1 +greenlet==3.1.1 +gunicorn==23.0.0 +hypothesis==6.45.0 +idna==3.10 +iniconfig==2.0.0 +mock==5.1.0 +opentracing==2.4.0 +packaging==24.1 +pluggy==1.5.0 +pytest==8.3.3 +pytest-cov==5.0.0 +pytest-mock==3.14.0 +pytest-randomly==3.15.0 +requests==2.32.3 +sortedcontainers==2.4.0 +urllib3==2.2.3 +zope-event==5.0 +zope-interface==7.0.3 + +# The following packages are considered to be unsafe in a requirements file: +setuptools==75.1.0 diff --git a/.riot/requirements/a0cc2a4.txt b/.riot/requirements/a0cc2a4.txt new file mode 100644 index 00000000000..f724ecdac7a --- /dev/null +++ b/.riot/requirements/a0cc2a4.txt @@ -0,0 +1,21 @@ +# +# This file is autogenerated by pip-compile with Python 3.13 +# by the following command: +# +# pip-compile --allow-unsafe --no-annotate .riot/requirements/a0cc2a4.in +# +attrs==24.2.0 +coverage[toml]==7.6.1 +hypothesis==6.45.0 +iniconfig==2.0.0 +mock==5.1.0 +opentracing==2.4.0 +packaging==24.1 +pluggy==1.5.0 +pymemcache==3.5.2 +pytest==8.3.3 +pytest-cov==5.0.0 +pytest-mock==3.14.0 +pytest-randomly==3.15.0 +six==1.16.0 +sortedcontainers==2.4.0 diff --git a/.riot/requirements/a9f396a.txt b/.riot/requirements/a9f396a.txt new file mode 100644 index 00000000000..4505eee48b0 --- /dev/null +++ b/.riot/requirements/a9f396a.txt @@ -0,0 +1,31 @@ +# +# This file is autogenerated by pip-compile with Python 3.13 +# by the following command: +# +# pip-compile --allow-unsafe --no-annotate .riot/requirements/a9f396a.in +# +aiohappyeyeballs==2.4.3 +aiohttp==3.10.9 +aiohttp-jinja2==1.6 +aiosignal==1.3.1 +attrs==24.2.0 +coverage[toml]==7.6.1 +frozenlist==1.4.1 +hypothesis==6.45.0 +idna==3.10 +iniconfig==2.0.0 +jinja2==3.1.4 +markupsafe==2.1.5 +mock==5.1.0 +multidict==6.1.0 +opentracing==2.4.0 +packaging==24.1 +pluggy==1.5.0 +pytest==8.3.3 +pytest-aiohttp==1.0.5 +pytest-asyncio==0.23.7 +pytest-cov==5.0.0 +pytest-mock==3.14.0 +pytest-randomly==3.15.0 +sortedcontainers==2.4.0 +yarl==1.13.1 diff --git a/.riot/requirements/ae8bd25.txt b/.riot/requirements/ae8bd25.txt new file mode 100644 index 00000000000..f0736d28cfc --- /dev/null +++ b/.riot/requirements/ae8bd25.txt @@ -0,0 +1,26 @@ +# +# This file is autogenerated by pip-compile with Python 3.13 +# by the following command: +# +# pip-compile --allow-unsafe --no-annotate .riot/requirements/ae8bd25.in +# +asgiref==3.8.1 +attrs==24.2.0 +coverage[toml]==7.6.1 +django==4.2.16 +django-configurations==2.5.1 +djangorestframework==3.15.2 +hypothesis==6.45.0 +iniconfig==2.0.0 +mock==5.1.0 +opentracing==2.4.0 +packaging==24.1 +pluggy==1.5.0 +pytest==8.3.3 +pytest-cov==5.0.0 +pytest-django[testing]==3.10.0 +pytest-mock==3.14.0 +pytest-randomly==3.15.0 +six==1.16.0 +sortedcontainers==2.4.0 +sqlparse==0.5.1 diff --git a/.riot/requirements/b29075f.txt b/.riot/requirements/b29075f.txt new file mode 100644 index 00000000000..d070fd9e2f2 --- /dev/null +++ b/.riot/requirements/b29075f.txt @@ -0,0 +1,38 @@ +# +# This file is autogenerated by pip-compile with Python 3.13 +# by the following command: +# +# pip-compile --allow-unsafe --no-annotate .riot/requirements/b29075f.in +# +annotated-types==0.7.0 +attrs==24.2.0 +blinker==1.8.2 +certifi==2024.8.30 +charset-normalizer==3.3.2 +click==8.1.7 +coverage[toml]==7.6.1 +flask==3.0.3 +flask-openapi3==4.0.1 +hypothesis==6.45.0 +idna==3.10 +importlib-metadata==8.5.0 +iniconfig==2.0.0 +itsdangerous==2.2.0 +jinja2==3.1.4 +markupsafe==2.1.5 +mock==5.1.0 +opentracing==2.4.0 +packaging==24.1 +pluggy==1.5.0 +pydantic==2.9.2 +pydantic-core==2.23.4 +pytest==8.3.3 +pytest-cov==5.0.0 +pytest-mock==3.14.0 +pytest-randomly==3.15.0 +requests==2.32.3 +sortedcontainers==2.4.0 +typing-extensions==4.12.2 +urllib3==1.26.20 +werkzeug==3.0.4 +zipp==3.20.2 diff --git a/.riot/requirements/b403d9d.txt b/.riot/requirements/b403d9d.txt new file mode 100644 index 00000000000..1cb46c6afb0 --- /dev/null +++ b/.riot/requirements/b403d9d.txt @@ -0,0 +1,49 @@ +# +# This file is autogenerated by pip-compile with Python 3.13 +# by the following command: +# +# pip-compile --allow-unsafe --no-annotate .riot/requirements/b403d9d.in +# +aiobotocore==2.3.1 +aiohappyeyeballs==2.4.3 +aiohttp==3.10.9 +aioitertools==0.12.0 +aiosignal==1.3.1 +attrs==24.2.0 +botocore==1.24.21 +certifi==2024.8.30 +charset-normalizer==3.3.2 +coverage[toml]==7.6.1 +elastic-transport==8.15.0 +elasticsearch==8.15.1 +events==0.5 +frozenlist==1.4.1 +gevent==24.2.1 +greenlet==3.1.1 +hypothesis==6.45.0 +idna==3.10 +iniconfig==2.0.0 +jmespath==1.0.1 +mock==5.1.0 +multidict==6.1.0 +opensearch-py==2.7.1 +opentracing==2.4.0 +packaging==24.1 +pluggy==1.5.0 +pynamodb==5.5.1 +pytest==8.3.3 +pytest-cov==5.0.0 +pytest-mock==3.14.0 +pytest-randomly==3.15.0 +python-dateutil==2.9.0.post0 +requests==2.32.3 +six==1.16.0 +sortedcontainers==2.4.0 +urllib3==1.26.20 +wrapt==1.16.0 +yarl==1.13.1 +zope-event==5.0 +zope-interface==7.0.3 + +# The following packages are considered to be unsafe in a requirements file: +setuptools==75.1.0 diff --git a/.riot/requirements/bc64f49.txt b/.riot/requirements/bc64f49.txt new file mode 100644 index 00000000000..ab6f8840549 --- /dev/null +++ b/.riot/requirements/bc64f49.txt @@ -0,0 +1,35 @@ +# +# This file is autogenerated by pip-compile with Python 3.13 +# by the following command: +# +# pip-compile --allow-unsafe --no-annotate .riot/requirements/bc64f49.in +# +attrs==24.2.0 +autocommand==2.2.2 +cheroot==10.0.1 +cherrypy==18.10.0 +coverage[toml]==7.6.1 +hypothesis==6.45.0 +iniconfig==2.0.0 +jaraco-collections==5.1.0 +jaraco-context==6.0.1 +jaraco-functools==4.1.0 +jaraco-text==4.0.0 +mock==5.1.0 +more-itertools==8.10.0 +opentracing==2.4.0 +packaging==24.1 +pluggy==1.5.0 +portend==3.2.0 +pytest==8.3.3 +pytest-cov==5.0.0 +pytest-mock==3.14.0 +pytest-randomly==3.15.0 +python-dateutil==2.9.0.post0 +six==1.16.0 +sortedcontainers==2.4.0 +tempora==5.7.0 +zc-lockfile==3.0.post1 + +# The following packages are considered to be unsafe in a requirements file: +setuptools==75.1.0 diff --git a/.riot/requirements/bc7a1f4.txt b/.riot/requirements/bc7a1f4.txt new file mode 100644 index 00000000000..a73a0ac6da4 --- /dev/null +++ b/.riot/requirements/bc7a1f4.txt @@ -0,0 +1,21 @@ +# +# This file is autogenerated by pip-compile with Python 3.13 +# by the following command: +# +# pip-compile --allow-unsafe --no-annotate .riot/requirements/bc7a1f4.in +# +attrs==24.2.0 +coverage[toml]==7.6.1 +elasticsearch1==1.10.0 +hypothesis==6.45.0 +iniconfig==2.0.0 +mock==5.1.0 +opentracing==2.4.0 +packaging==24.1 +pluggy==1.5.0 +pytest==8.3.3 +pytest-cov==5.0.0 +pytest-mock==3.14.0 +pytest-randomly==3.15.0 +sortedcontainers==2.4.0 +urllib3==1.26.20 diff --git a/.riot/requirements/bcbec2a.txt b/.riot/requirements/bcbec2a.txt new file mode 100644 index 00000000000..665c0aadc1a --- /dev/null +++ b/.riot/requirements/bcbec2a.txt @@ -0,0 +1,46 @@ +# +# This file is autogenerated by pip-compile with Python 3.13 +# by the following command: +# +# pip-compile --allow-unsafe --no-annotate .riot/requirements/bcbec2a.in +# +annotated-types==0.7.0 +anyio==4.7.0 +attrs==24.2.0 +boto3==1.35.78 +botocore==1.35.78 +certifi==2024.8.30 +coverage[toml]==7.6.9 +fastapi==0.115.6 +h11==0.14.0 +httpcore==1.0.7 +httpretty==1.1.4 +httpx==0.28.1 +hypothesis==6.45.0 +idna==3.10 +iniconfig==2.0.0 +jmespath==1.0.1 +mock==5.1.0 +msgpack==1.1.0 +opentracing==2.4.0 +packaging==24.2 +pluggy==1.5.0 +pydantic==2.10.3 +pydantic-core==2.27.1 +pytest==8.3.4 +pytest-cov==6.0.0 +pytest-mock==3.14.0 +pytest-randomly==3.16.0 +python-dateutil==2.9.0.post0 +s3transfer==0.10.4 +six==1.17.0 +sniffio==1.3.1 +sortedcontainers==2.4.0 +starlette==0.41.3 +structlog==24.4.0 +typing-extensions==4.12.2 +urllib3==2.2.3 +wheel==0.45.1 + +# The following packages are considered to be unsafe in a requirements file: +setuptools==75.6.0 diff --git a/.riot/requirements/bebdd41.txt b/.riot/requirements/bebdd41.txt new file mode 100644 index 00000000000..c0918e4e15a --- /dev/null +++ b/.riot/requirements/bebdd41.txt @@ -0,0 +1,19 @@ +# +# This file is autogenerated by pip-compile with Python 3.13 +# by the following command: +# +# pip-compile --allow-unsafe --no-annotate .riot/requirements/bebdd41.in +# +attrs==24.2.0 +coverage[toml]==7.6.1 +hypothesis==6.45.0 +iniconfig==2.0.0 +mock==5.1.0 +opentracing==2.4.0 +packaging==24.1 +pluggy==1.5.0 +pytest==8.3.3 +pytest-cov==5.0.0 +pytest-mock==3.14.0 +pytest-randomly==3.15.0 +sortedcontainers==2.4.0 diff --git a/.riot/requirements/c1351c9.txt b/.riot/requirements/c1351c9.txt new file mode 100644 index 00000000000..10e97c081a4 --- /dev/null +++ b/.riot/requirements/c1351c9.txt @@ -0,0 +1,21 @@ +# +# This file is autogenerated by pip-compile with Python 3.13 +# by the following command: +# +# pip-compile --allow-unsafe --no-annotate .riot/requirements/c1351c9.in +# +attrs==24.2.0 +coverage[toml]==7.6.1 +hypothesis==6.45.0 +iniconfig==2.0.0 +mock==5.1.0 +opentracing==2.4.0 +packaging==24.1 +pluggy==1.5.0 +pytest==8.3.3 +pytest-asyncio==0.23.7 +pytest-cov==5.0.0 +pytest-mock==3.14.0 +pytest-randomly==3.15.0 +redis==5.1.1 +sortedcontainers==2.4.0 diff --git a/.riot/requirements/c4d4455.txt b/.riot/requirements/c4d4455.txt new file mode 100644 index 00000000000..1a8b9f970ef --- /dev/null +++ b/.riot/requirements/c4d4455.txt @@ -0,0 +1,20 @@ +# +# This file is autogenerated by pip-compile with Python 3.13 +# by the following command: +# +# pip-compile --allow-unsafe --no-annotate .riot/requirements/c4d4455.in +# +attrs==24.2.0 +coverage[toml]==7.6.1 +hypothesis==6.45.0 +iniconfig==2.0.0 +mock==5.1.0 +opentracing==2.4.0 +packaging==24.1 +pluggy==1.5.0 +pytest==8.3.3 +pytest-asyncio==0.21.1 +pytest-cov==5.0.0 +pytest-mock==3.14.0 +pytest-randomly==3.15.0 +sortedcontainers==2.4.0 diff --git a/.riot/requirements/c77bbb6.txt b/.riot/requirements/c77bbb6.txt new file mode 100644 index 00000000000..3f53bcba5e6 --- /dev/null +++ b/.riot/requirements/c77bbb6.txt @@ -0,0 +1,48 @@ +# +# This file is autogenerated by pip-compile with Python 3.13 +# by the following command: +# +# pip-compile --allow-unsafe --no-annotate .riot/requirements/c77bbb6.in +# +attrs==24.2.0 +certifi==2024.8.30 +charset-normalizer==2.1.1 +click==8.1.7 +coverage[toml]==7.6.1 +deprecated==1.2.14 +flask==2.1.3 +gevent==24.2.1 +greenlet==3.1.1 +hypothesis==6.45.0 +idna==3.10 +importlib-metadata==8.4.0 +iniconfig==2.0.0 +itsdangerous==2.2.0 +jinja2==3.1.4 +markupsafe==2.0.1 +mock==5.1.0 +opentelemetry-api==1.27.0 +opentelemetry-instrumentation==0.48b0 +opentelemetry-instrumentation-flask==0.48b0 +opentelemetry-instrumentation-wsgi==0.48b0 +opentelemetry-semantic-conventions==0.48b0 +opentelemetry-util-http==0.48b0 +opentracing==2.4.0 +packaging==24.1 +pluggy==1.5.0 +pytest==8.3.3 +pytest-asyncio==0.21.1 +pytest-cov==5.0.0 +pytest-mock==3.14.0 +pytest-randomly==3.15.0 +requests==2.28.1 +sortedcontainers==2.4.0 +urllib3==1.26.20 +werkzeug==2.1.2 +wrapt==1.16.0 +zipp==3.20.2 +zope-event==5.0 +zope-interface==7.0.3 + +# The following packages are considered to be unsafe in a requirements file: +setuptools==75.1.0 diff --git a/.riot/requirements/c8b476b.txt b/.riot/requirements/c8b476b.txt new file mode 100644 index 00000000000..d8fd4322d7f --- /dev/null +++ b/.riot/requirements/c8b476b.txt @@ -0,0 +1,32 @@ +# +# This file is autogenerated by pip-compile with Python 3.13 +# by the following command: +# +# pip-compile --allow-unsafe --no-annotate .riot/requirements/c8b476b.in +# +attrs==24.2.0 +certifi==2024.8.30 +charset-normalizer==3.3.2 +coverage[toml]==7.6.1 +gevent==24.2.1 +greenlet==3.1.1 +gunicorn==20.0.4 +hypothesis==6.45.0 +idna==3.10 +iniconfig==2.0.0 +mock==5.1.0 +opentracing==2.4.0 +packaging==24.1 +pluggy==1.5.0 +pytest==8.3.3 +pytest-cov==5.0.0 +pytest-mock==3.14.0 +pytest-randomly==3.15.0 +requests==2.32.3 +sortedcontainers==2.4.0 +urllib3==2.2.3 +zope-event==5.0 +zope-interface==7.0.3 + +# The following packages are considered to be unsafe in a requirements file: +setuptools==75.1.0 diff --git a/.riot/requirements/d5098dd.txt b/.riot/requirements/d5098dd.txt new file mode 100644 index 00000000000..bb4ade61f8a --- /dev/null +++ b/.riot/requirements/d5098dd.txt @@ -0,0 +1,22 @@ +# +# This file is autogenerated by pip-compile with Python 3.13 +# by the following command: +# +# pip-compile --allow-unsafe --no-annotate .riot/requirements/d5098dd.in +# +attrs==24.2.0 +certifi==2024.8.30 +coverage[toml]==7.6.1 +elasticsearch7==7.17.12 +hypothesis==6.45.0 +iniconfig==2.0.0 +mock==5.1.0 +opentracing==2.4.0 +packaging==24.1 +pluggy==1.5.0 +pytest==8.3.3 +pytest-cov==5.0.0 +pytest-mock==3.14.0 +pytest-randomly==3.15.0 +sortedcontainers==2.4.0 +urllib3==1.26.20 diff --git a/.riot/requirements/d7dfbc2.txt b/.riot/requirements/d7dfbc2.txt new file mode 100644 index 00000000000..2bee6eee691 --- /dev/null +++ b/.riot/requirements/d7dfbc2.txt @@ -0,0 +1,22 @@ +# +# This file is autogenerated by pip-compile with Python 3.13 +# by the following command: +# +# pip-compile --allow-unsafe --no-annotate .riot/requirements/d7dfbc2.in +# +attrs==24.2.0 +coverage[toml]==7.6.1 +dnspython==2.7.0 +hypothesis==6.45.0 +iniconfig==2.0.0 +mock==5.1.0 +mongoengine==0.29.1 +opentracing==2.4.0 +packaging==24.1 +pluggy==1.5.0 +pymongo==4.10.1 +pytest==8.3.3 +pytest-cov==5.0.0 +pytest-mock==3.14.0 +pytest-randomly==3.15.0 +sortedcontainers==2.4.0 diff --git a/.riot/requirements/d81ad99.txt b/.riot/requirements/d81ad99.txt new file mode 100644 index 00000000000..3efb0a138c2 --- /dev/null +++ b/.riot/requirements/d81ad99.txt @@ -0,0 +1,20 @@ +# +# This file is autogenerated by pip-compile with Python 3.13 +# by the following command: +# +# pip-compile --allow-unsafe --no-annotate .riot/requirements/d81ad99.in +# +attrs==24.2.0 +coverage[toml]==7.6.1 +hypothesis==6.45.0 +iniconfig==2.0.0 +mock==5.1.0 +opentracing==2.4.0 +packaging==24.1 +pluggy==1.5.0 +pytest==8.3.3 +pytest-asyncio==0.21.1 +pytest-cov==5.0.0 +pytest-mock==3.14.0 +pytest-randomly==3.15.0 +sortedcontainers==2.4.0 diff --git a/.riot/requirements/db78045.txt b/.riot/requirements/db78045.txt new file mode 100644 index 00000000000..7a92cc52123 --- /dev/null +++ b/.riot/requirements/db78045.txt @@ -0,0 +1,21 @@ +# +# This file is autogenerated by pip-compile with Python 3.13 +# by the following command: +# +# pip-compile --allow-unsafe --no-annotate .riot/requirements/db78045.in +# +attrs==24.2.0 +coverage[toml]==7.6.1 +elasticsearch2==2.5.1 +hypothesis==6.45.0 +iniconfig==2.0.0 +mock==5.1.0 +opentracing==2.4.0 +packaging==24.1 +pluggy==1.5.0 +pytest==8.3.3 +pytest-cov==5.0.0 +pytest-mock==3.14.0 +pytest-randomly==3.15.0 +sortedcontainers==2.4.0 +urllib3==1.26.20 diff --git a/.riot/requirements/dbc6a48.txt b/.riot/requirements/dbc6a48.txt new file mode 100644 index 00000000000..e29a7f2eeee --- /dev/null +++ b/.riot/requirements/dbc6a48.txt @@ -0,0 +1,35 @@ +# +# This file is autogenerated by pip-compile with Python 3.13 +# by the following command: +# +# pip-compile --no-annotate .riot/requirements/dbc6a48.in +# +amqp==5.3.1 +attrs==24.2.0 +billiard==4.2.1 +celery[redis]==5.4.0 +click==8.1.7 +click-didyoumean==0.3.1 +click-plugins==1.1.1 +click-repl==0.3.0 +coverage[toml]==7.6.9 +hypothesis==6.45.0 +iniconfig==2.0.0 +kombu==5.4.2 +mock==5.1.0 +more-itertools==8.10.0 +opentracing==2.4.0 +packaging==24.2 +pluggy==1.5.0 +prompt-toolkit==3.0.48 +pytest==8.3.4 +pytest-cov==6.0.0 +pytest-mock==3.14.0 +pytest-randomly==3.16.0 +python-dateutil==2.9.0.post0 +redis==5.2.1 +six==1.17.0 +sortedcontainers==2.4.0 +tzdata==2024.2 +vine==5.1.0 +wcwidth==0.2.13 diff --git a/.riot/requirements/dbeb1d7.txt b/.riot/requirements/dbeb1d7.txt new file mode 100644 index 00000000000..bbde6777f1c --- /dev/null +++ b/.riot/requirements/dbeb1d7.txt @@ -0,0 +1,22 @@ +# +# This file is autogenerated by pip-compile with Python 3.13 +# by the following command: +# +# pip-compile --allow-unsafe --no-annotate .riot/requirements/dbeb1d7.in +# +attrs==24.2.0 +coverage[toml]==7.6.1 +hypothesis==6.45.0 +iniconfig==2.0.0 +mock==5.1.0 +opentracing==2.4.0 +packaging==24.1 +pluggy==1.5.0 +pytest==8.3.3 +pytest-cov==5.0.0 +pytest-mock==3.14.0 +pytest-randomly==3.15.0 +sortedcontainers==2.4.0 + +# The following packages are considered to be unsafe in a requirements file: +setuptools==75.1.0 diff --git a/.riot/requirements/ddd8721.txt b/.riot/requirements/ddd8721.txt new file mode 100644 index 00000000000..baa4f15e9af --- /dev/null +++ b/.riot/requirements/ddd8721.txt @@ -0,0 +1,20 @@ +# +# This file is autogenerated by pip-compile with Python 3.13 +# by the following command: +# +# pip-compile --allow-unsafe --no-annotate .riot/requirements/ddd8721.in +# +attrs==24.2.0 +coverage[toml]==7.6.1 +hypothesis==6.45.0 +iniconfig==2.0.0 +mock==5.1.0 +msgpack==1.1.0 +opentracing==2.4.0 +packaging==24.1 +pluggy==1.5.0 +pytest==8.3.3 +pytest-cov==5.0.0 +pytest-mock==3.14.0 +pytest-randomly==3.15.0 +sortedcontainers==2.4.0 diff --git a/.riot/requirements/dedea98.txt b/.riot/requirements/dedea98.txt new file mode 100644 index 00000000000..dca66df78da --- /dev/null +++ b/.riot/requirements/dedea98.txt @@ -0,0 +1,20 @@ +# +# This file is autogenerated by pip-compile with Python 3.13 +# by the following command: +# +# pip-compile --allow-unsafe --no-annotate .riot/requirements/dedea98.in +# +attrs==24.2.0 +coverage[toml]==7.6.1 +hypothesis==6.45.0 +iniconfig==2.0.0 +mock==5.1.0 +opentracing==2.4.0 +packaging==24.1 +pluggy==1.5.0 +pytest==8.3.3 +pytest-cov==5.0.0 +pytest-mock==3.14.0 +pytest-randomly==3.15.0 +sortedcontainers==2.4.0 +structlog==24.4.0 diff --git a/.riot/requirements/df7a937.txt b/.riot/requirements/df7a937.txt new file mode 100644 index 00000000000..35a49fc7ae3 --- /dev/null +++ b/.riot/requirements/df7a937.txt @@ -0,0 +1,20 @@ +# +# This file is autogenerated by pip-compile with Python 3.13 +# by the following command: +# +# pip-compile --allow-unsafe --no-annotate .riot/requirements/df7a937.in +# +attrs==24.2.0 +coverage[toml]==7.6.1 +hypothesis==6.45.0 +iniconfig==2.0.0 +mock==5.1.0 +msgpack==1.1.0 +opentracing==2.4.0 +packaging==24.1 +pluggy==1.5.0 +pytest==8.3.3 +pytest-cov==5.0.0 +pytest-mock==3.14.0 +pytest-randomly==3.15.0 +sortedcontainers==2.4.0 diff --git a/.riot/requirements/e06abee.txt b/.riot/requirements/e06abee.txt new file mode 100644 index 00000000000..e7be89f2738 --- /dev/null +++ b/.riot/requirements/e06abee.txt @@ -0,0 +1,38 @@ +# +# This file is autogenerated by pip-compile with Python 3.13 +# by the following command: +# +# pip-compile --allow-unsafe --no-annotate .riot/requirements/e06abee.in +# +annotated-types==0.7.0 +attrs==24.2.0 +blinker==1.8.2 +certifi==2024.8.30 +charset-normalizer==3.3.2 +click==8.1.7 +coverage[toml]==7.6.1 +flask==3.0.3 +flask-openapi3==4.0.1 +hypothesis==6.45.0 +idna==3.10 +importlib-metadata==8.5.0 +iniconfig==2.0.0 +itsdangerous==2.2.0 +jinja2==3.1.4 +markupsafe==2.1.5 +mock==5.1.0 +opentracing==2.4.0 +packaging==24.1 +pluggy==1.5.0 +pydantic==2.9.2 +pydantic-core==2.23.4 +pytest==8.3.3 +pytest-cov==5.0.0 +pytest-mock==3.14.0 +pytest-randomly==3.15.0 +requests==2.32.3 +sortedcontainers==2.4.0 +typing-extensions==4.12.2 +urllib3==1.26.20 +werkzeug==3.0.4 +zipp==3.20.2 diff --git a/.riot/requirements/e20152c.txt b/.riot/requirements/e20152c.txt new file mode 100644 index 00000000000..3aeacecfdcd --- /dev/null +++ b/.riot/requirements/e20152c.txt @@ -0,0 +1,20 @@ +# +# This file is autogenerated by pip-compile with Python 3.13 +# by the following command: +# +# pip-compile --allow-unsafe --no-annotate .riot/requirements/e20152c.in +# +attrs==24.2.0 +coverage[toml]==7.6.1 +hypothesis==6.45.0 +iniconfig==2.0.0 +mock==5.1.0 +msgpack==1.1.0 +opentracing==2.4.0 +packaging==24.1 +pluggy==1.5.0 +pytest==8.3.3 +pytest-cov==5.0.0 +pytest-mock==3.14.0 +pytest-randomly==3.15.0 +sortedcontainers==2.4.0 diff --git a/.riot/requirements/e2bf559.txt b/.riot/requirements/e2bf559.txt new file mode 100644 index 00000000000..cef46e50c2d --- /dev/null +++ b/.riot/requirements/e2bf559.txt @@ -0,0 +1,23 @@ +# +# This file is autogenerated by pip-compile with Python 3.13 +# by the following command: +# +# pip-compile --allow-unsafe --no-annotate .riot/requirements/e2bf559.in +# +attrs==24.2.0 +certifi==2024.8.30 +coverage[toml]==7.6.1 +elastic-transport==8.15.0 +elasticsearch==8.15.1 +hypothesis==6.45.0 +iniconfig==2.0.0 +mock==5.1.0 +opentracing==2.4.0 +packaging==24.1 +pluggy==1.5.0 +pytest==8.3.3 +pytest-cov==5.0.0 +pytest-mock==3.14.0 +pytest-randomly==3.15.0 +sortedcontainers==2.4.0 +urllib3==2.2.3 diff --git a/.riot/requirements/ee48b16.txt b/.riot/requirements/ee48b16.txt new file mode 100644 index 00000000000..116921f222d --- /dev/null +++ b/.riot/requirements/ee48b16.txt @@ -0,0 +1,22 @@ +# +# This file is autogenerated by pip-compile with Python 3.13 +# by the following command: +# +# pip-compile --allow-unsafe --no-annotate .riot/requirements/ee48b16.in +# +attrs==24.2.0 +certifi==2024.8.30 +coverage[toml]==7.6.1 +elasticsearch==7.13.4 +hypothesis==6.45.0 +iniconfig==2.0.0 +mock==5.1.0 +opentracing==2.4.0 +packaging==24.1 +pluggy==1.5.0 +pytest==8.3.3 +pytest-cov==5.0.0 +pytest-mock==3.14.0 +pytest-randomly==3.15.0 +sortedcontainers==2.4.0 +urllib3==1.26.20 diff --git a/.riot/requirements/f20c964.txt b/.riot/requirements/f20c964.txt new file mode 100644 index 00000000000..ab4cf486d17 --- /dev/null +++ b/.riot/requirements/f20c964.txt @@ -0,0 +1,30 @@ +# +# This file is autogenerated by pip-compile with Python 3.13 +# by the following command: +# +# pip-compile --allow-unsafe --no-annotate .riot/requirements/f20c964.in +# +attrs==24.2.0 +blinker==1.8.2 +cachelib==0.9.0 +click==8.1.7 +coverage[toml]==7.6.1 +flask==3.0.3 +flask-caching==2.3.0 +hypothesis==6.45.0 +iniconfig==2.0.0 +itsdangerous==2.2.0 +jinja2==3.1.4 +markupsafe==2.1.5 +mock==5.1.0 +opentracing==2.4.0 +packaging==24.1 +pluggy==1.5.0 +pytest==8.3.3 +pytest-cov==5.0.0 +pytest-mock==3.14.0 +pytest-randomly==3.15.0 +python-memcached==1.62 +redis==5.1.1 +sortedcontainers==2.4.0 +werkzeug==3.0.4 diff --git a/.riot/requirements/f339e99.txt b/.riot/requirements/f339e99.txt new file mode 100644 index 00000000000..b300c0bc5b4 --- /dev/null +++ b/.riot/requirements/f339e99.txt @@ -0,0 +1,19 @@ +# +# This file is autogenerated by pip-compile with Python 3.13 +# by the following command: +# +# pip-compile --allow-unsafe --no-annotate .riot/requirements/f339e99.in +# +attrs==24.2.0 +coverage[toml]==7.6.1 +hypothesis==6.45.0 +iniconfig==2.0.0 +mock==5.1.0 +opentracing==2.4.0 +packaging==24.1 +pluggy==1.5.0 +pytest==8.3.3 +pytest-cov==5.0.0 +pytest-mock==3.14.0 +pytest-randomly==3.15.0 +sortedcontainers==2.4.0 diff --git a/.riot/requirements/f33b994.txt b/.riot/requirements/f33b994.txt new file mode 100644 index 00000000000..28facac819d --- /dev/null +++ b/.riot/requirements/f33b994.txt @@ -0,0 +1,23 @@ +# +# This file is autogenerated by pip-compile with Python 3.13 +# by the following command: +# +# pip-compile --allow-unsafe --no-annotate .riot/requirements/f33b994.in +# +attrs==24.2.0 +click==8.1.7 +coverage[toml]==7.6.1 +hypothesis==6.45.0 +iniconfig==2.0.0 +mock==5.1.0 +opentracing==2.4.0 +packaging==24.1 +pluggy==1.5.0 +pytest==8.3.3 +pytest-asyncio==0.21.1 +pytest-cov==5.0.0 +pytest-mock==3.14.0 +pytest-randomly==3.15.0 +redis==5.1.1 +rq==1.16.2 +sortedcontainers==2.4.0 diff --git a/.riot/requirements/f46a802.txt b/.riot/requirements/f46a802.txt new file mode 100644 index 00000000000..46033d5a506 --- /dev/null +++ b/.riot/requirements/f46a802.txt @@ -0,0 +1,20 @@ +# +# This file is autogenerated by pip-compile with Python 3.13 +# by the following command: +# +# pip-compile --allow-unsafe --no-annotate .riot/requirements/f46a802.in +# +attrs==24.2.0 +coverage[toml]==7.6.1 +hypothesis==6.45.0 +iniconfig==2.0.0 +mock==5.1.0 +msgpack==1.1.0 +opentracing==2.4.0 +packaging==24.1 +pluggy==1.5.0 +pytest==8.3.3 +pytest-cov==5.0.0 +pytest-mock==3.14.0 +pytest-randomly==3.15.0 +sortedcontainers==2.4.0 diff --git a/.riot/requirements/f4fafb3.txt b/.riot/requirements/f4fafb3.txt new file mode 100644 index 00000000000..09db801e27b --- /dev/null +++ b/.riot/requirements/f4fafb3.txt @@ -0,0 +1,48 @@ +# +# This file is autogenerated by pip-compile with Python 3.13 +# by the following command: +# +# pip-compile --allow-unsafe --no-annotate .riot/requirements/f4fafb3.in +# +attrs==24.2.0 +certifi==2024.8.30 +charset-normalizer==2.1.1 +click==8.1.7 +coverage[toml]==7.6.1 +deprecated==1.2.14 +flask==2.1.3 +gevent==24.2.1 +greenlet==3.1.1 +hypothesis==6.45.0 +idna==3.10 +importlib-metadata==8.0.0 +iniconfig==2.0.0 +itsdangerous==2.2.0 +jinja2==3.1.4 +markupsafe==2.0.1 +mock==5.1.0 +opentelemetry-api==1.26.0 +opentelemetry-instrumentation==0.47b0 +opentelemetry-instrumentation-flask==0.47b0 +opentelemetry-instrumentation-wsgi==0.47b0 +opentelemetry-semantic-conventions==0.47b0 +opentelemetry-util-http==0.47b0 +opentracing==2.4.0 +packaging==24.1 +pluggy==1.5.0 +pytest==8.3.3 +pytest-asyncio==0.21.1 +pytest-cov==5.0.0 +pytest-mock==3.14.0 +pytest-randomly==3.15.0 +requests==2.28.1 +sortedcontainers==2.4.0 +urllib3==1.26.20 +werkzeug==2.1.2 +wrapt==1.16.0 +zipp==3.20.2 +zope-event==5.0 +zope-interface==7.0.3 + +# The following packages are considered to be unsafe in a requirements file: +setuptools==75.1.0 diff --git a/.riot/requirements/fbee8ab.txt b/.riot/requirements/fbee8ab.txt new file mode 100644 index 00000000000..df12821215c --- /dev/null +++ b/.riot/requirements/fbee8ab.txt @@ -0,0 +1,25 @@ +# +# This file is autogenerated by pip-compile with Python 3.13 +# by the following command: +# +# pip-compile --allow-unsafe --no-annotate .riot/requirements/fbee8ab.in +# +attrs==24.2.0 +certifi==2024.8.30 +charset-normalizer==3.3.2 +coverage[toml]==7.6.1 +hypothesis==6.45.0 +idna==3.10 +iniconfig==2.0.0 +mock==5.1.0 +opensearch-py[requests]==2.0.1 +opentracing==2.4.0 +packaging==24.1 +pluggy==1.5.0 +pytest==8.3.3 +pytest-cov==5.0.0 +pytest-mock==3.14.0 +pytest-randomly==3.15.0 +requests==2.32.3 +sortedcontainers==2.4.0 +urllib3==1.26.20 diff --git a/ddtrace/appsec/_iast/_taint_tracking/__init__.py b/ddtrace/appsec/_iast/_taint_tracking/__init__.py index a6bad81f64c..839f4b3537f 100644 --- a/ddtrace/appsec/_iast/_taint_tracking/__init__.py +++ b/ddtrace/appsec/_iast/_taint_tracking/__init__.py @@ -1,10 +1,14 @@ from io import BytesIO from io import StringIO import itertools +from typing import TYPE_CHECKING # noqa:F401 from typing import Any -from typing import Sequence from typing import Tuple + +if TYPE_CHECKING: # pragma: no cover + from typing import Sequence # noqa:F401 + from ddtrace.internal._unpatched import _threading as threading from ddtrace.internal.logger import get_logger @@ -263,7 +267,9 @@ def trace_calls_and_returns(frame, event, arg): threading.settrace(trace_calls_and_returns) -def copy_ranges_to_string(pyobject: str, ranges: Sequence[TaintRange]) -> str: +def copy_ranges_to_string(pyobject, ranges): + # type: (str, Sequence[TaintRange]) -> str + # NB this function uses comment-based type annotation because TaintRange is conditionally imported if not isinstance(pyobject, IAST.TAINTEABLE_TYPES): # type: ignore[misc] return pyobject @@ -297,7 +303,9 @@ def copy_ranges_to_string(pyobject: str, ranges: Sequence[TaintRange]) -> str: # Given a list of ranges, try to match them with the iterable and return a new iterable with a new range applied that # matched the original one Source. If no range matches, take the Source from the first one. -def copy_ranges_to_iterable_with_strings(iterable: Sequence[str], ranges: Sequence[TaintRange]) -> Sequence[str]: +def copy_ranges_to_iterable_with_strings(iterable, ranges): + # type: (Sequence[str], Sequence[TaintRange]) -> Sequence[str] + # NB this function uses comment-based type annotation because TaintRange is conditionally imported iterable_type = type(iterable) new_result = [] diff --git a/ddtrace/debugging/_expressions.py b/ddtrace/debugging/_expressions.py index ccab7549d8f..32b87017cdf 100644 --- a/ddtrace/debugging/_expressions.py +++ b/ddtrace/debugging/_expressions.py @@ -63,7 +63,9 @@ def _is_identifier(name: str) -> bool: def short_circuit_instrs(op: str, label: Label) -> List[Instr]: value = "FALSE" if op == "and" else "TRUE" - if PY >= (3, 12): + if PY >= (3, 13): + return [Instr("COPY", 1), Instr("TO_BOOL"), Instr(f"POP_JUMP_IF_{value}", label), Instr("POP_TOP")] + elif PY >= (3, 12): return [Instr("COPY", 1), Instr(f"POP_JUMP_IF_{value}", label), Instr("POP_TOP")] return [Instr(f"JUMP_IF_{value}_OR_POP", label)] @@ -145,6 +147,9 @@ def _compile_direct_predicate(self, ast: DDASTType) -> Optional[List[Instr]]: value.append(Instr("LOAD_FAST", "_locals")) value.append(IN_OPERATOR_INSTR) else: + if PY >= (3, 13): + # UNARY_NOT requires a boolean value + value.append(Instr("TO_BOOL")) value.append(Instr("UNARY_NOT")) return value @@ -250,17 +255,18 @@ def _compile_direct_operation(self, ast: DDASTType) -> Optional[List[Instr]]: return None def _call_function(self, func: Callable, *args: List[Instr]) -> List[Instr]: - if PY < (3, 11): - return [Instr("LOAD_CONST", func)] + list(chain(*args)) + [Instr("CALL_FUNCTION", len(args))] - elif PY >= (3, 12): + if PY >= (3, 13): + return [Instr("LOAD_CONST", func), Instr("PUSH_NULL")] + list(chain(*args)) + [Instr("CALL", len(args))] + if PY >= (3, 12): return [Instr("PUSH_NULL"), Instr("LOAD_CONST", func)] + list(chain(*args)) + [Instr("CALL", len(args))] + if PY >= (3, 11): + return ( + [Instr("PUSH_NULL"), Instr("LOAD_CONST", func)] + + list(chain(*args)) + + [Instr("PRECALL", len(args)), Instr("CALL", len(args))] + ) - # Python 3.11 - return ( - [Instr("PUSH_NULL"), Instr("LOAD_CONST", func)] - + list(chain(*args)) - + [Instr("PRECALL", len(args)), Instr("CALL", len(args))] - ) + return [Instr("LOAD_CONST", func)] + list(chain(*args)) + [Instr("CALL_FUNCTION", len(args))] def _compile_arg_operation(self, ast: DDASTType) -> Optional[List[Instr]]: # arg_operation => {"": []} diff --git a/ddtrace/internal/_threads.cpp b/ddtrace/internal/_threads.cpp index 152b7b0da6c..d775544827b 100644 --- a/ddtrace/internal/_threads.cpp +++ b/ddtrace/internal/_threads.cpp @@ -20,8 +20,13 @@ class GILGuard public: inline GILGuard() { - if (!_Py_IsFinalizing()) +#if PY_VERSION_HEX >= 0x030d0000 + if (!Py_IsFinalizing()) { +#else + if (!_Py_IsFinalizing()) { +#endif _state = PyGILState_Ensure(); + } } inline ~GILGuard() { @@ -42,13 +47,23 @@ class AllowThreads public: inline AllowThreads() { - if (!_Py_IsFinalizing()) +#if PY_VERSION_HEX >= 0x30d0000 + if (!Py_IsFinalizing()) { +#else + if (!_Py_IsFinalizing()) { +#endif _state = PyEval_SaveThread(); + } } inline ~AllowThreads() { - if (!_Py_IsFinalizing()) +#if PY_VERSION_HEX >= 0x30d0000 + if (!Py_IsFinalizing()) { +#else + if (!_Py_IsFinalizing()) { +#endif PyEval_RestoreThread(_state); + } } private: @@ -266,8 +281,13 @@ PeriodicThread_start(PeriodicThread* self, PyObject* args) } } - if (_Py_IsFinalizing()) +#if PY_VERSION_HEX >= 0x30d0000 + if (Py_IsFinalizing()) { +#else + if (_Py_IsFinalizing()) { +#endif break; + } if (PeriodicThread__periodic(self)) { // Error @@ -278,8 +298,15 @@ PeriodicThread_start(PeriodicThread* self, PyObject* args) // Run the shutdown callback if there was no error and we are not // at Python shutdown. - if (!self->_atexit && !error && self->_on_shutdown != Py_None && !_Py_IsFinalizing()) - PeriodicThread__on_shutdown(self); + if (!self->_atexit && !error && self->_on_shutdown != Py_None) { +#if PY_VERSION_HEX >= 0x30d0000 + if (!Py_IsFinalizing()) { +#else + if (!_Py_IsFinalizing()) { +#endif + PeriodicThread__on_shutdown(self); + } + } // Notify the join method that the thread has stopped self->_stopped->set(); @@ -418,9 +445,14 @@ PeriodicThread_dealloc(PeriodicThread* self) // Since the native thread holds a strong reference to this object, we // can only get here if the thread has actually stopped. - if (_Py_IsFinalizing()) +#if PY_VERSION_HEX >= 0x30d0000 + if (Py_IsFinalizing()) { +#else + if (_Py_IsFinalizing()) { +#endif // Do nothing. We are about to terminate and release resources anyway. return; + } // If we are trying to stop from the same thread, then we are still running. // This should happen rarely, so we don't worry about the memory leak this diff --git a/ddtrace/internal/injection.py b/ddtrace/internal/injection.py index d6fa2715ec7..787e0160e66 100644 --- a/ddtrace/internal/injection.py +++ b/ddtrace/internal/injection.py @@ -25,8 +25,25 @@ class InvalidLine(Exception): """ +# DEV: This is the bytecode equivalent of +# >>> hook(arg) +# Additionally, we must discard the return value (top of the stack) to restore +# the stack to the state prior to the call. + INJECTION_ASSEMBLY = Assembly() -if PY >= (3, 12): +if PY >= (3, 14): + raise NotImplementedError("Python >= 3.14 is not supported yet") +elif PY >= (3, 13): + INJECTION_ASSEMBLY.parse( + r""" + load_const {hook} + push_null + load_const {arg} + call 1 + pop_top + """ + ) +elif PY >= (3, 12): INJECTION_ASSEMBLY.parse( r""" push_null @@ -91,15 +108,11 @@ def _inject_hook(code: Bytecode, hook: HookType, lineno: int, arg: Any) -> None: if not locs: raise InvalidLine("Line %d does not exist or is either blank or a comment" % lineno) - # DEV: This is the bytecode equivalent of - # >>> hook(arg) - # Additionally, we must discard the return value (top of the stack) to - # restore the stack to the state prior to the call. for i in locs: code[i:i] = INJECTION_ASSEMBLY.bind(dict(hook=hook, arg=arg), lineno=lineno) -_INJECT_HOOK_OPCODE_POS = 0 if PY < (3, 11) else 1 +_INJECT_HOOK_OPCODE_POS = 1 if (3, 11) <= PY < (3, 13) else 0 _INJECT_ARG_OPCODE_POS = 1 if PY < (3, 11) else 2 diff --git a/ddtrace/internal/wrapping/__init__.py b/ddtrace/internal/wrapping/__init__.py index dae0c183ac0..83598e1911c 100644 --- a/ddtrace/internal/wrapping/__init__.py +++ b/ddtrace/internal/wrapping/__init__.py @@ -144,6 +144,8 @@ def wrap_bytecode(wrapper, wrapped): bc.Instr("RESUME", 0, lineno=lineno - 1), bc.Instr("PUSH_NULL", lineno=lineno), ] + if PY >= (3, 13): + instrs[1], instrs[2] = instrs[2], instrs[1] if code.co_cellvars: instrs[0:0] = [Instr("MAKE_CELL", bc.CellVar(_), lineno=lineno) for _ in code.co_cellvars] diff --git a/ddtrace/internal/wrapping/context.py b/ddtrace/internal/wrapping/context.py index 138f542720e..c6b4ee896e2 100644 --- a/ddtrace/internal/wrapping/context.py +++ b/ddtrace/internal/wrapping/context.py @@ -70,7 +70,55 @@ CONTEXT_RETURN = Assembly() CONTEXT_FOOT = Assembly() -if sys.version_info >= (3, 12): +if sys.version_info >= (3, 14): + raise NotImplementedError("Python >= 3.14 is not supported yet") +elif sys.version_info >= (3, 13): + CONTEXT_HEAD.parse( + r""" + load_const {context_enter} + push_null + call 0 + pop_top + """ + ) + CONTEXT_RETURN.parse( + r""" + push_null + load_const {context_return} + swap 3 + call 1 + """ + ) + + CONTEXT_RETURN_CONST = Assembly() + CONTEXT_RETURN_CONST.parse( + r""" + load_const {context_return} + push_null + load_const {value} + call 1 + """ + ) + + CONTEXT_FOOT.parse( + r""" + try @_except lasti + push_exc_info + load_const {context_exit} + push_null + call 0 + pop_top + reraise 2 + tried + + _except: + copy 3 + pop_except + reraise 1 + """ + ) + +elif sys.version_info >= (3, 12): CONTEXT_HEAD.parse( r""" push_null diff --git a/ddtrace/profiling/collector/stack.pyx b/ddtrace/profiling/collector/stack.pyx index f3758d13989..c7ba1ec3e83 100644 --- a/ddtrace/profiling/collector/stack.pyx +++ b/ddtrace/profiling/collector/stack.pyx @@ -157,7 +157,11 @@ from cpython.ref cimport Py_DECREF cdef extern from "": PyObject* _PyThread_CurrentFrames() -IF PY_VERSION_HEX >= 0x030b0000: +IF PY_VERSION_HEX >= 0x30d0000: + cdef extern from "": + PyObject* _PyThread_CurrentExceptions() + +ELIF PY_VERSION_HEX >= 0x030b0000: cdef extern from "": PyObject* _PyThread_CurrentExceptions() diff --git a/docker-compose.yml b/docker-compose.yml index cf3738c2dbe..cf40a4a256d 100644 --- a/docker-compose.yml +++ b/docker-compose.yml @@ -152,6 +152,10 @@ services: - "127.0.0.1:5433:5433" testrunner: + # DEV uncomment to test local changes to the Dockerfile + # build: + # context: ./docker + # dockerfile: Dockerfile image: ghcr.io/datadog/dd-trace-py/testrunner:47c7b5287da25643e46652e6d222a40a52f2382a@sha256:3a02dafeff9cd72966978816d1b39b54f5517af4049396923b95c8452f604269 command: bash environment: diff --git a/docker/.python-version b/docker/.python-version index decc1955c11..9924540f9a4 100644 --- a/docker/.python-version +++ b/docker/.python-version @@ -4,4 +4,4 @@ 3.9 3.10 3.11 -3.13-dev +3.13 diff --git a/docker/Dockerfile b/docker/Dockerfile index 79f207724db..8ff9be89e48 100644 --- a/docker/Dockerfile +++ b/docker/Dockerfile @@ -1,7 +1,7 @@ # DEV: Use `debian:slim` instead of an `alpine` image to support installing wheels from PyPI # this drastically improves test execution time since python dependencies don't all # have to be built from source all the time (grpcio takes forever to install) -FROM debian:buster-20221219-slim +FROM debian:bookworm-slim ARG TARGETARCH ARG HATCH_VERSION=1.12.0 @@ -34,7 +34,6 @@ RUN apt-get update \ gnupg \ jq \ libbz2-dev \ - libenchant-dev \ libffi-dev \ liblzma-dev \ libmemcached-dev \ @@ -47,9 +46,7 @@ RUN apt-get update \ libsqlite3-dev \ libsqliteodbc \ libssh-dev \ - libssl-dev \ patch \ - python-openssl\ unixodbc-dev \ wget \ zlib1g-dev \ @@ -61,7 +58,7 @@ RUN apt-get install -y --no-install-recommends nodejs npm \ # MariaDB is a dependency for tests RUN curl https://mariadb.org/mariadb_release_signing_key.pgp | gpg --dearmor > /etc/apt/trusted.gpg.d/mariadb.gpg \ - && echo "deb [arch=amd64,arm64] https://mirror.mariadb.org/repo/11.rolling/debian/ buster main" > /etc/apt/sources.list.d/mariadb.list \ + && echo "deb [arch=amd64,arm64] https://mirror.mariadb.org/repo/11.rolling/debian/ bookworm main" > /etc/apt/sources.list.d/mariadb.list \ && apt-get update \ && apt-get install -y --no-install-recommends libmariadb-dev libmariadb-dev-compat @@ -71,7 +68,7 @@ RUN if [ "$TARGETARCH" = "amd64" ]; \ then \ curl https://packages.microsoft.com/keys/microsoft.asc | gpg --dearmor > microsoft.gpg \ && mv microsoft.gpg /etc/apt/trusted.gpg.d/microsoft.gpg \ - && echo "deb [arch=amd64] https://packages.microsoft.com/repos/microsoft-debian-buster-prod buster main" > /etc/apt/sources.list.d/dotnetdev.list \ + && echo "deb [arch=amd64] https://packages.microsoft.com/repos/microsoft-debian-bookworm-prod bookworm main" > /etc/apt/sources.list.d/dotnetdev.list \ && apt-get update \ && apt-get install -y --no-install-recommends azure-functions-core-tools-4=4.0.6280-1; \ fi @@ -93,7 +90,7 @@ RUN curl https://sh.rustup.rs -sSf | \ sh -s -- --default-toolchain stable -y # Install pyenv and necessary Python versions -RUN git clone --depth 1 --branch v2.4.2 https://github.com/pyenv/pyenv "${PYENV_ROOT}" \ +RUN git clone --depth 1 --branch v2.4.22 https://github.com/pyenv/pyenv "${PYENV_ROOT}" \ && cd /root \ && pyenv local | xargs -L 1 pyenv install \ && cd - diff --git a/docs/versioning.rst b/docs/versioning.rst index 0972213f51c..fdd71f8de08 100644 --- a/docs/versioning.rst +++ b/docs/versioning.rst @@ -109,17 +109,17 @@ Supported runtimes * - Linux - x86-64, i686, AArch64 - CPython - - 3.7-3.12 + - 3.7-3.13 - ``>=2.0,<3`` * - MacOS - Intel, Apple Silicon - CPython - - 3.7-3.12 + - 3.7-3.13 - ``>=2.0,<3`` * - Windows - 64bit, 32bit - CPython - - 3.7-3.12 + - 3.7-3.13 - ``>=2.0,<3`` * - Linux - x86-64, i686, AArch64 diff --git a/hatch.toml b/hatch.toml index f3a3c2cee36..7dae1538613 100644 --- a/hatch.toml +++ b/hatch.toml @@ -172,7 +172,7 @@ extra-dependencies = [ ] [[envs.integration_test.matrix]] -python = ["3.7", "3.8", "3.9", "3.10", "3.11", "3.12"] +python = ["3.7", "3.8", "3.9", "3.10", "3.11", "3.12", "3.13"] [envs.integration_test.env-vars] _DD_CIVISIBILITY_USE_CI_CONTEXT_PROVIDER = "1" @@ -294,7 +294,7 @@ test = [ ] [[envs.appsec_iast_native.matrix]] -python = ["3.7", "3.8", "3.9", "3.10", "3.11", "3.12"] +python = ["3.7", "3.8", "3.9", "3.10", "3.11", "3.12", "3.13"] ## ASM FastAPI @@ -364,7 +364,7 @@ test = [ ] [[envs.appsec_aggregated_leak_testing.matrix]] -python = ["3.10", "3.11", "3.12"] +python = ["3.10", "3.11", "3.12", "3.13"] @@ -468,7 +468,7 @@ pytest = ["~=6.0", "~=7.0"] [[envs.pytest_plugin_v2.matrix]] -python = ["3.9", "3.10", "3.12"] +python = ["3.9", "3.10", "3.12", "3.13"] pytest = ["~=6.0", "~=7.0", "~=8.0"] [envs.snapshot_viewer] diff --git a/lib-injection/dl_wheels.py b/lib-injection/dl_wheels.py index e10d8e53e0e..81c5715611d 100755 --- a/lib-injection/dl_wheels.py +++ b/lib-injection/dl_wheels.py @@ -16,6 +16,7 @@ ./dl_wheels.py --help """ + import argparse import itertools import os @@ -41,9 +42,9 @@ ) # Supported Python versions lists all python versions that can install at least one version of the ddtrace library. -supported_versions = ["2.7", "3.6", "3.7", "3.8", "3.9", "3.10", "3.11", "3.12"] +supported_versions = ["2.7", "3.6", "3.7", "3.8", "3.9", "3.10", "3.11", "3.12", "3.13"] supported_arches = ["aarch64", "x86_64", "i686"] -supported_platforms = ["musllinux_1_1", "manylinux2014"] +supported_platforms = ["musllinux_1_2", "manylinux2014"] parser = argparse.ArgumentParser(description=__doc__) parser.add_argument( diff --git a/lib-injection/sources/sitecustomize.py b/lib-injection/sources/sitecustomize.py index 7d28a3c4d4a..0f87b770edd 100644 --- a/lib-injection/sources/sitecustomize.py +++ b/lib-injection/sources/sitecustomize.py @@ -264,7 +264,7 @@ def _inject(): except Exception: _log("user-installed ddtrace not found, configuring application to use injection site-packages") - current_platform = "manylinux2014" if _get_clib() == "gnu" else "musllinux_1_1" + current_platform = "manylinux2014" if _get_clib() == "gnu" else "musllinux_1_2" _log("detected platform %s" % current_platform, level="debug") pkgs_path = os.path.join(SCRIPT_DIR, "ddtrace_pkgs") diff --git a/pyproject.toml b/pyproject.toml index 9c8ff26d223..3c83cfc5067 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -23,13 +23,16 @@ classifiers = [ "Programming Language :: Python :: 3.10", "Programming Language :: Python :: 3.11", "Programming Language :: Python :: 3.12", + "Programming Language :: Python :: 3.13", ] dependencies = [ - "bytecode>=0.15.0; python_version>='3.12'", + "bytecode>=0.16.0; python_version>='3.13.0'", + "bytecode>=0.15.0; python_version~='3.12.0'", "bytecode>=0.14.0; python_version~='3.11.0'", "bytecode>=0.13.0; python_version<'3.11'", "envier~=0.5", "importlib_metadata<=6.5.0; python_version<'3.8'", + "legacy-cgi>=2.0.0; python_version>='3.13.0'", "opentelemetry-api>=1", "protobuf>=3", "typing_extensions", diff --git a/releasenotes/notes/threethirteen-d40d659d8939fe5e.yaml b/releasenotes/notes/threethirteen-d40d659d8939fe5e.yaml new file mode 100644 index 00000000000..837858691fe --- /dev/null +++ b/releasenotes/notes/threethirteen-d40d659d8939fe5e.yaml @@ -0,0 +1,4 @@ +--- +upgrade: + - | + Makes the library compatible with Python 3.13 diff --git a/riotfile.py b/riotfile.py index 86d6ed5bf76..6db9102786f 100644 --- a/riotfile.py +++ b/riotfile.py @@ -17,7 +17,8 @@ (3, 10), (3, 11), (3, 12), -] + (3, 13), +] # type: List[Tuple[int, int]] def version_to_str(version: Tuple[int, int]) -> str: @@ -70,9 +71,9 @@ def select_pys(min_version: str = MIN_PYTHON_VERSION, max_version: str = MAX_PYT """Helper to select python versions from the list of versions we support >>> select_pys() - ['3.7', '3.8', '3.9', '3.10', '3.11', '3.12'] + ['3.7', '3.8', '3.9', '3.10', '3.11', '3.12', '3.13'] >>> select_pys(min_version='3') - ['3.7', '3.8', '3.9', '3.10', '3.11', '3.12'] + ['3.7', '3.8', '3.9', '3.10', '3.11', '3.12', '3.13'] >>> select_pys(max_version='3') [] >>> select_pys(min_version='3.7', max_version='3.9') @@ -142,7 +143,7 @@ def select_pys(min_version: str = MIN_PYTHON_VERSION, max_version: str = MAX_PYT ), Venv( name="appsec_iast", - pys=select_pys(), + pys=select_pys(max_version="3.12"), command="pytest -v {cmdargs} tests/appsec/iast/", pkgs={ "requests": latest, @@ -164,7 +165,7 @@ def select_pys(min_version: str = MIN_PYTHON_VERSION, max_version: str = MAX_PYT ), Venv( name="appsec_iast_memcheck", - pys=select_pys(min_version="3.9"), + pys=select_pys(min_version="3.9", max_version="3.12"), command="pytest {cmdargs} --memray --stacks=35 tests/appsec/iast_memcheck/", pkgs={ "requests": latest, @@ -263,7 +264,7 @@ def select_pys(min_version: str = MIN_PYTHON_VERSION, max_version: str = MAX_PYT ), # Flask 3.x.x Venv( - pys=select_pys(min_version="3.8"), + pys=select_pys(min_version="3.8", max_version="3.12"), pkgs={ "flask": "~=3.0", "langchain": "==0.0.354", @@ -396,7 +397,7 @@ def select_pys(min_version: str = MIN_PYTHON_VERSION, max_version: str = MAX_PYT "msgpack": [latest], "pytest-randomly": latest, }, - pys=select_pys(), + pys=select_pys(max_version="3.12"), venvs=[ Venv( name="datastreams-latest", @@ -520,7 +521,7 @@ def select_pys(min_version: str = MIN_PYTHON_VERSION, max_version: str = MAX_PYT command="pytest {cmdargs} --no-cov tests/commands/test_runner.py", venvs=[ Venv( - pys=select_pys(), + pys=select_pys(max_version="3.12"), pkgs={ "redis": latest, "gevent": latest, @@ -606,7 +607,7 @@ def select_pys(min_version: str = MIN_PYTHON_VERSION, max_version: str = MAX_PYT Venv( name="falcon", command="pytest {cmdargs} tests/contrib/falcon", - pys=select_pys(min_version="3.7"), + pys=select_pys(min_version="3.7", max_version="3.12"), pkgs={ "falcon": [ "~=3.0.0", @@ -828,7 +829,7 @@ def select_pys(min_version: str = MIN_PYTHON_VERSION, max_version: str = MAX_PYT ), Venv( # django started supporting psycopg3 in 4.2 for versions >3.1.8 - pys=select_pys(min_version="3.8"), + pys=select_pys(min_version="3.8", max_version="3.12"), pkgs={ "django": ["~=4.2"], "psycopg": latest, @@ -919,7 +920,7 @@ def select_pys(min_version: str = MIN_PYTHON_VERSION, max_version: str = MAX_PYT }, ), Venv( - pys=select_pys(min_version="3.12"), + pys="3.12", pkgs={ "sqlalchemy": latest, "django": latest, @@ -1208,7 +1209,7 @@ def select_pys(min_version: str = MIN_PYTHON_VERSION, max_version: str = MAX_PYT pkgs={"psycopg2-binary": "~=2.8.0"}, ), Venv( - pys=select_pys(min_version="3.7"), + pys=select_pys(min_version="3.7", max_version="3.12"), # psycopg2-binary added support for Python 3.9/3.10 in 2.9.1 # psycopg2-binary added support for Python 3.11 in 2.9.2 pkgs={"psycopg2-binary": ["~=2.9.2", latest]}, @@ -1232,7 +1233,7 @@ def select_pys(min_version: str = MIN_PYTHON_VERSION, max_version: str = MAX_PYT }, ), Venv( - pys=select_pys(min_version="3.12"), + pys=select_pys(min_version="3.12", max_version="3.12"), pkgs={ "pytest-asyncio": "==0.23.7", }, @@ -1312,7 +1313,7 @@ def select_pys(min_version: str = MIN_PYTHON_VERSION, max_version: str = MAX_PYT pkgs={"starlette": ["~=0.21.0", "~=0.33.0", latest]}, ), Venv( - pys=select_pys(min_version="3.12"), + pys="3.12", pkgs={"starlette": latest}, ), ], @@ -1335,7 +1336,7 @@ def select_pys(min_version: str = MIN_PYTHON_VERSION, max_version: str = MAX_PYT }, venvs=[ Venv( - pys=select_pys(min_version="3.7"), + pys=select_pys(min_version="3.7", max_version="3.12"), pkgs={ "sqlalchemy": ["~=1.3.0", latest], "psycopg2-binary": latest, @@ -1436,7 +1437,7 @@ def select_pys(min_version: str = MIN_PYTHON_VERSION, max_version: str = MAX_PYT }, venvs=[ Venv( - pys=select_pys(min_version="3.8"), + pys=select_pys(min_version="3.8", max_version="3.12"), pkgs={"botocore": "==1.34.49", "boto3": "==1.34.49"}, ), ], @@ -1505,7 +1506,7 @@ def select_pys(min_version: str = MIN_PYTHON_VERSION, max_version: str = MAX_PYT pkgs={"pymysql": "~=0.10"}, ), Venv( - pys=select_pys(min_version="3.7"), + pys=select_pys(min_version="3.7", max_version="3.12"), pkgs={ "pymysql": [ "~=1.0", @@ -1561,7 +1562,7 @@ def select_pys(min_version: str = MIN_PYTHON_VERSION, max_version: str = MAX_PYT }, ), Venv( - pys=select_pys(min_version="3.12"), + pys=select_pys(min_version="3.12", max_version="3.12"), pkgs={"aiobotocore": latest}, ), ], @@ -1584,14 +1585,14 @@ def select_pys(min_version: str = MIN_PYTHON_VERSION, max_version: str = MAX_PYT ), Venv( # fastapi added support for Python 3.11 in 0.86.0 - pys=select_pys(min_version="3.11"), + pys=select_pys(min_version="3.11", max_version="3.12"), pkgs={"fastapi": ["~=0.86.0", latest], "anyio": ">=3.4.0,<4.0"}, ), ], ), Venv( name="aiomysql", - pys=select_pys(min_version="3.7"), + pys=select_pys(min_version="3.7", max_version="3.12"), command="pytest {cmdargs} tests/contrib/aiomysql", pkgs={ "pytest-randomly": latest, @@ -1638,7 +1639,7 @@ def select_pys(min_version: str = MIN_PYTHON_VERSION, max_version: str = MAX_PYT ], ), Venv( - pys=select_pys(min_version="3.10"), + pys=select_pys(min_version="3.10", max_version="3.12"), pkgs={ "pytest": [ "~=6.0", @@ -1719,7 +1720,7 @@ def select_pys(min_version: str = MIN_PYTHON_VERSION, max_version: str = MAX_PYT ], ), Venv( - pys=select_pys(min_version="3.10"), + pys=select_pys(min_version="3.10", max_version="3.12"), pkgs={ "pytest-bdd": [ ">=4.0,<5.0", @@ -1800,7 +1801,7 @@ def select_pys(min_version: str = MIN_PYTHON_VERSION, max_version: str = MAX_PYT ), Venv( # grpcio added support for Python 3.12 in 1.59 - pys=select_pys(min_version="3.12"), + pys=select_pys(min_version="3.12", max_version="3.12"), pkgs={ "grpcio": ["~=1.59.0", latest], "pytest-asyncio": "==0.23.7", @@ -2002,7 +2003,7 @@ def select_pys(min_version: str = MIN_PYTHON_VERSION, max_version: str = MAX_PYT }, venvs=[ Venv( - pys=select_pys(min_version="3.7"), + pys=select_pys(min_version="3.7", max_version="3.12"), pkgs={ "aiopg": ["~=1.0", "~=1.4.0"], }, @@ -2247,7 +2248,7 @@ def select_pys(min_version: str = MIN_PYTHON_VERSION, max_version: str = MAX_PYT }, ), Venv( - pys=select_pys(min_version="3.12"), + pys="3.12", pkgs={ "sanic": [latest], "sanic-testing": "~=22.3.0", @@ -2322,7 +2323,7 @@ def select_pys(min_version: str = MIN_PYTHON_VERSION, max_version: str = MAX_PYT pkgs={"asyncpg": ["~=0.27", latest]}, ), Venv( - pys=select_pys(min_version="3.12"), + pys=select_pys(min_version="3.12", max_version="3.12"), pkgs={"asyncpg": [latest]}, ), ], @@ -2355,7 +2356,7 @@ def select_pys(min_version: str = MIN_PYTHON_VERSION, max_version: str = MAX_PYT # sqlite3 is tied to the Python version and is not installable via pip # To test a range of versions without updating Python, we use Linux only pysqlite3-binary package # Remove pysqlite3-binary on Python 3.9+ locally on non-linux machines - Venv(pys=select_pys(min_version="3.9"), pkgs={"pysqlite3-binary": [latest]}), + Venv(pys=select_pys(min_version="3.9", max_version="3.12"), pkgs={"pysqlite3-binary": [latest]}), Venv(pys=select_pys(max_version="3.8"), pkgs={"importlib-metadata": latest}), ], ), @@ -2420,7 +2421,7 @@ def select_pys(min_version: str = MIN_PYTHON_VERSION, max_version: str = MAX_PYT ), Venv( name="consul", - pys=select_pys(), + pys=select_pys(max_version="3.12"), command="pytest {cmdargs} tests/contrib/consul", pkgs={ "python-consul": [ @@ -2534,8 +2535,8 @@ def select_pys(min_version: str = MIN_PYTHON_VERSION, max_version: str = MAX_PYT pkgs={"gevent": latest}, ), Venv( - pys=select_pys(min_version="3.12"), - pkgs={"gevent": latest}, + pys="3.12", + pkgs={"gevent": "~=23.9.0"}, ), ], ), @@ -2557,7 +2558,7 @@ def select_pys(min_version: str = MIN_PYTHON_VERSION, max_version: str = MAX_PYT ), Venv( # pyodbc added support for Python 3.11 in 4.0.35 - pys=select_pys(min_version="3.11"), + pys=select_pys(min_version="3.11", max_version="3.12"), pkgs={"pyodbc": [latest]}, ), ], @@ -2624,7 +2625,7 @@ def select_pys(min_version: str = MIN_PYTHON_VERSION, max_version: str = MAX_PYT ), Venv( # tornado added support for Python 3.10 in 6.2 - pys=select_pys(min_version="3.10"), + pys=select_pys(min_version="3.10", max_version="3.12"), pkgs={"tornado": ["==6.2", "==6.3.1"]}, ), ], @@ -2640,7 +2641,7 @@ def select_pys(min_version: str = MIN_PYTHON_VERSION, max_version: str = MAX_PYT ), Venv( # mysqlclient added support for Python 3.9/3.10 in 2.1 - pys=select_pys(min_version="3.9"), + pys=select_pys(min_version="3.9", max_version="3.12"), pkgs={"mysqlclient": ["~=2.1", latest]}, ), ], @@ -2709,7 +2710,7 @@ def select_pys(min_version: str = MIN_PYTHON_VERSION, max_version: str = MAX_PYT "cohere": latest, "anthropic": "==0.26.0", }, - pys=select_pys(min_version="3.9"), + pys=select_pys(min_version="3.9", max_version="3.12"), ), Venv( pkgs={ @@ -2727,14 +2728,14 @@ def select_pys(min_version: str = MIN_PYTHON_VERSION, max_version: str = MAX_PYT "botocore": latest, "cohere": latest, }, - pys=select_pys(min_version="3.9"), + pys=select_pys(min_version="3.9", max_version="3.12"), ), ], ), Venv( name="anthropic", command="pytest {cmdargs} tests/contrib/anthropic", - pys=select_pys(min_version="3.8"), + pys=select_pys(min_version="3.8", max_version="3.12"), pkgs={ "pytest-asyncio": latest, "vcrpy": latest, @@ -2744,7 +2745,7 @@ def select_pys(min_version: str = MIN_PYTHON_VERSION, max_version: str = MAX_PYT Venv( name="google_generativeai", command="pytest {cmdargs} tests/contrib/google_generativeai", - pys=select_pys(min_version="3.9"), + pys=select_pys(min_version="3.9", max_version="3.12"), pkgs={ "pytest-asyncio": latest, "google-generativeai": [latest], @@ -2756,7 +2757,7 @@ def select_pys(min_version: str = MIN_PYTHON_VERSION, max_version: str = MAX_PYT Venv( name="vertexai", command="pytest {cmdargs} tests/contrib/vertexai", - pys=select_pys(min_version="3.9"), + pys=select_pys(min_version="3.9", max_version="3.12"), pkgs={ "pytest-asyncio": latest, "vertexai": [latest], @@ -2820,7 +2821,7 @@ def select_pys(min_version: str = MIN_PYTHON_VERSION, max_version: str = MAX_PYT pkgs={"confluent-kafka": ["~=1.9.2", latest]}, ), # confluent-kafka added support for Python 3.11 in 2.0.2 - Venv(pys=select_pys(min_version="3.11"), pkgs={"confluent-kafka": latest}), + Venv(pys=select_pys(min_version="3.11", max_version="3.12"), pkgs={"confluent-kafka": latest}), ], ), ], @@ -2858,7 +2859,7 @@ def select_pys(min_version: str = MIN_PYTHON_VERSION, max_version: str = MAX_PYT Venv( name="ci_visibility", command="pytest --no-ddtrace {cmdargs} tests/ci_visibility", - pys=select_pys(), + pys=select_pys(max_version="3.12"), pkgs={ "msgpack": latest, "coverage": latest, @@ -3003,7 +3004,7 @@ def select_pys(min_version: str = MIN_PYTHON_VERSION, max_version: str = MAX_PYT ), # Python 3.12 Venv( - pys=select_pys(min_version="3.12"), + pys="3.12", pkgs={"uwsgi": latest}, venvs=[ Venv( @@ -3143,7 +3144,7 @@ def select_pys(min_version: str = MIN_PYTHON_VERSION, max_version: str = MAX_PYT ), # Python 3.12 Venv( - pys=select_pys(min_version="3.12"), + pys="3.12", pkgs={"uwsgi": latest}, venvs=[ Venv( diff --git a/setup.py b/setup.py index 13b0cb4a4f0..74e8f8187d7 100644 --- a/setup.py +++ b/setup.py @@ -527,7 +527,7 @@ def get_exts_for(name): sources=[ "ddtrace/appsec/_iast/_stacktrace.c", ], - extra_compile_args=debug_compile_args, + extra_compile_args=extra_compile_args + debug_compile_args, ) ) @@ -553,7 +553,7 @@ def get_exts_for(name): ) # Echion doesn't build on 3.7, so just skip it outright for now - if sys.version_info >= (3, 8): + if sys.version_info >= (3, 8) and sys.version_info < (3, 13): ext_modules.append( CMakeExtension( "ddtrace.internal.datadog.profiling.stack_v2._stack_v2", diff --git a/src/core/Cargo.lock b/src/core/Cargo.lock index 27f510e5ddc..f840798f96e 100644 --- a/src/core/Cargo.lock +++ b/src/core/Cargo.lock @@ -14,12 +14,6 @@ version = "1.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0c4b4d0bd25bd0b74681c0ad21497610ce1b7c91b1022cd21c80c6fbdd9476b0" -[[package]] -name = "bitflags" -version = "2.5.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cf4b9d6a944f767f8e5e0db018570623c85f3d925ac718db4e06d0187adb21c1" - [[package]] name = "bytes" version = "1.6.1" @@ -46,7 +40,7 @@ version = "0.1.0" dependencies = [ "datadog-ddsketch", "pyo3", - "pyo3-build-config", + "pyo3-build-config 0.21.2", ] [[package]] @@ -57,9 +51,9 @@ checksum = "60b1af1c220855b6ceac025d3f6ecdd2b7c4894bfe9cd9bda4fbb4bc7c0d4cf0" [[package]] name = "heck" -version = "0.4.1" +version = "0.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "95505c38b4572b2d910cecb0281560f54b440a19336cbbcb27bf6ce6adc6f5a8" +checksum = "2304e00983f87ffb38b55b444b5e3b60a884b5d30c0fca7d82fe33449bbe55ea" [[package]] name = "indoc" @@ -82,16 +76,6 @@ version = "0.2.154" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ae743338b92ff9146ce83992f766a31066a91a8c84a45e0e9f21e7cf6de6d346" -[[package]] -name = "lock_api" -version = "0.4.12" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "07af8b9cdd281b7915f413fa73f29ebd5d55d0d3f0155584dade1ff18cea1b17" -dependencies = [ - "autocfg", - "scopeguard", -] - [[package]] name = "memoffset" version = "0.9.1" @@ -107,29 +91,6 @@ version = "1.19.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3fdb12b2476b595f9358c5161aa467c2438859caa136dec86c26fdd2efe17b92" -[[package]] -name = "parking_lot" -version = "0.12.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7e4af0ca4f6caed20e900d564c242b8e5d4903fdacf31d3daf527b66fe6f42fb" -dependencies = [ - "lock_api", - "parking_lot_core", -] - -[[package]] -name = "parking_lot_core" -version = "0.9.10" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1e401f977ab385c9e4e3ab30627d6f26d00e2c73eef317493c4ec6d468726cf8" -dependencies = [ - "cfg-if", - "libc", - "redox_syscall", - "smallvec", - "windows-targets", -] - [[package]] name = "portable-atomic" version = "1.6.0" @@ -170,17 +131,17 @@ dependencies = [ [[package]] name = "pyo3" -version = "0.21.2" +version = "0.22.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a5e00b96a521718e08e03b1a622f01c8a8deb50719335de3f60b3b3950f069d8" +checksum = "15ee168e30649f7f234c3d49ef5a7a6cbf5134289bc46c29ff3155fa3221c225" dependencies = [ "cfg-if", "indoc", "libc", "memoffset", - "parking_lot", + "once_cell", "portable-atomic", - "pyo3-build-config", + "pyo3-build-config 0.22.3", "pyo3-ffi", "pyo3-macros", "unindent", @@ -196,21 +157,31 @@ dependencies = [ "target-lexicon", ] +[[package]] +name = "pyo3-build-config" +version = "0.22.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e61cef80755fe9e46bb8a0b8f20752ca7676dcc07a5277d8b7768c6172e529b3" +dependencies = [ + "once_cell", + "target-lexicon", +] + [[package]] name = "pyo3-ffi" -version = "0.21.2" +version = "0.22.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "01be5843dc60b916ab4dad1dca6d20b9b4e6ddc8e15f50c47fe6d85f1fb97403" +checksum = "67ce096073ec5405f5ee2b8b31f03a68e02aa10d5d4f565eca04acc41931fa1c" dependencies = [ "libc", - "pyo3-build-config", + "pyo3-build-config 0.22.3", ] [[package]] name = "pyo3-macros" -version = "0.21.2" +version = "0.22.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "77b34069fc0682e11b31dbd10321cbf94808394c56fd996796ce45217dfac53c" +checksum = "2440c6d12bc8f3ae39f1e775266fa5122fd0c8891ce7520fa6048e683ad3de28" dependencies = [ "proc-macro2", "pyo3-macros-backend", @@ -220,13 +191,13 @@ dependencies = [ [[package]] name = "pyo3-macros-backend" -version = "0.21.2" +version = "0.22.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "08260721f32db5e1a5beae69a55553f56b99bd0e1c3e6e0a5e8851a9d0f5a85c" +checksum = "1be962f0e06da8f8465729ea2cb71a416d2257dff56cbe40a70d3e62a93ae5d1" dependencies = [ "heck", "proc-macro2", - "pyo3-build-config", + "pyo3-build-config 0.22.3", "quote", "syn 2.0.61", ] @@ -240,27 +211,6 @@ dependencies = [ "proc-macro2", ] -[[package]] -name = "redox_syscall" -version = "0.5.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "469052894dcb553421e483e4209ee581a45100d31b4018de03e5a7ad86374a7e" -dependencies = [ - "bitflags", -] - -[[package]] -name = "scopeguard" -version = "1.2.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "94143f37725109f92c262ed2cf5e59bce7498c01bcc1502d7b9afe439a4e9f49" - -[[package]] -name = "smallvec" -version = "1.13.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3c5e1a9a646d36c3599cd173a41282daf47c44583ad367b8e6837255952e5c67" - [[package]] name = "syn" version = "1.0.109" @@ -300,67 +250,3 @@ name = "unindent" version = "0.2.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c7de7d73e1754487cb58364ee906a499937a0dfabd86bcb980fa99ec8c8fa2ce" - -[[package]] -name = "windows-targets" -version = "0.52.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6f0713a46559409d202e70e28227288446bf7841d3211583a4b53e3f6d96e7eb" -dependencies = [ - "windows_aarch64_gnullvm", - "windows_aarch64_msvc", - "windows_i686_gnu", - "windows_i686_gnullvm", - "windows_i686_msvc", - "windows_x86_64_gnu", - "windows_x86_64_gnullvm", - "windows_x86_64_msvc", -] - -[[package]] -name = "windows_aarch64_gnullvm" -version = "0.52.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7088eed71e8b8dda258ecc8bac5fb1153c5cffaf2578fc8ff5d61e23578d3263" - -[[package]] -name = "windows_aarch64_msvc" -version = "0.52.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9985fd1504e250c615ca5f281c3f7a6da76213ebd5ccc9561496568a2752afb6" - -[[package]] -name = "windows_i686_gnu" -version = "0.52.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "88ba073cf16d5372720ec942a8ccbf61626074c6d4dd2e745299726ce8b89670" - -[[package]] -name = "windows_i686_gnullvm" -version = "0.52.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "87f4261229030a858f36b459e748ae97545d6f1ec60e5e0d6a3d32e0dc232ee9" - -[[package]] -name = "windows_i686_msvc" -version = "0.52.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "db3c2bf3d13d5b658be73463284eaf12830ac9a26a90c717b7f771dfe97487bf" - -[[package]] -name = "windows_x86_64_gnu" -version = "0.52.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4e4246f76bdeff09eb48875a0fd3e2af6aada79d409d33011886d3e1581517d9" - -[[package]] -name = "windows_x86_64_gnullvm" -version = "0.52.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "852298e482cd67c356ddd9570386e2862b5673c85bd5f88df9ab6802b334c596" - -[[package]] -name = "windows_x86_64_msvc" -version = "0.52.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bec47e5bfd1bff0eeaf6d8b485cc1074891a197ab4225d504cb7a1ab88b02bf0" diff --git a/src/core/Cargo.toml b/src/core/Cargo.toml index 3353bc8b504..94eeb6d7a3e 100644 --- a/src/core/Cargo.toml +++ b/src/core/Cargo.toml @@ -9,7 +9,7 @@ strip = "debuginfo" opt-level = 3 [dependencies] -pyo3 = { version = "0.21.2", features = ["extension-module"] } +pyo3 = { version = "0.22.3", features = ["extension-module"] } datadog-ddsketch = { git = "https://github.com/DataDog/libdatadog", rev = "v14.3.1" } [build-dependencies] diff --git a/tests/contrib/futures/test_propagation.py b/tests/contrib/futures/test_propagation.py index d4d5beb8946..037aebd9ce5 100644 --- a/tests/contrib/futures/test_propagation.py +++ b/tests/contrib/futures/test_propagation.py @@ -1,4 +1,5 @@ import concurrent.futures +import sys import time import pytest @@ -406,6 +407,7 @@ def fn(): assert spans[1].parent_id == spans[0].span_id +@pytest.mark.skipif(sys.version_info > (3, 12), reason="Fails on 3.13") @pytest.mark.subprocess(ddtrace_run=True, timeout=5) def test_concurrent_futures_with_gevent(): """Check compatibility between the integration and gevent""" diff --git a/tests/internal/crashtracker/test_crashtracker.py b/tests/internal/crashtracker/test_crashtracker.py index ed338ce95bb..a4074745f83 100644 --- a/tests/internal/crashtracker/test_crashtracker.py +++ b/tests/internal/crashtracker/test_crashtracker.py @@ -506,6 +506,7 @@ def test_crashtracker_user_tags_envvar(run_python_code_in_subprocess): @pytest.mark.skipif(not sys.platform.startswith("linux"), reason="Linux only") +@pytest.mark.skipif(sys.version_info > (3, 12), reason="Fails on 3.13") def test_crashtracker_set_tag_profiler_config(run_python_code_in_subprocess): port, sock = utils.crashtracker_receiver_bind() assert sock diff --git a/tests/internal/symbol_db/test_symbols.py b/tests/internal/symbol_db/test_symbols.py index a97f6c5bcee..56fa45b3edc 100644 --- a/tests/internal/symbol_db/test_symbols.py +++ b/tests/internal/symbol_db/test_symbols.py @@ -1,5 +1,6 @@ from importlib.machinery import ModuleSpec from pathlib import Path +import sys from types import ModuleType import typing as t @@ -22,6 +23,7 @@ def foo(a, b, c=None): assert {s.name for s in symbols if s.symbol_type == SymbolType.LOCAL} == {"loc"} +@pytest.mark.skipif(sys.version_info > (3, 12), reason="fails on 3.13") def test_symbols_class(): class Sup: pass diff --git a/tests/internal/test_forksafe.py b/tests/internal/test_forksafe.py index e9c5a42c9ef..f9a32f460c5 100644 --- a/tests/internal/test_forksafe.py +++ b/tests/internal/test_forksafe.py @@ -1,5 +1,6 @@ from collections import Counter import os +import sys import pytest @@ -299,6 +300,7 @@ def fn(): assert exit_code == 42 +@pytest.mark.skipif(sys.version_info > (3, 12), reason="fails on 3.13") @pytest.mark.subprocess( out=lambda _: Counter(_) == {"C": 3, "T": 4}, err=None, diff --git a/tests/internal/test_injection.py b/tests/internal/test_injection.py index 3b74c589d62..871726620a8 100644 --- a/tests/internal/test_injection.py +++ b/tests/internal/test_injection.py @@ -1,4 +1,5 @@ from contextlib import contextmanager +import sys import mock import pytest @@ -205,6 +206,7 @@ def test_inject_in_loop(): assert hook.call_count == n +@pytest.mark.skipif(sys.version_info > (3, 12), reason="Fails on 3.13") def test_inject_in_generator(): lo = next(iter(linenos(generator_target))) hook = mock.Mock() diff --git a/tests/internal/test_wrapping.py b/tests/internal/test_wrapping.py index d27eadac43b..7c9a071545d 100644 --- a/tests/internal/test_wrapping.py +++ b/tests/internal/test_wrapping.py @@ -95,6 +95,7 @@ def f(a, b, c=None): assert not channel1 and not channel2 +@pytest.mark.skipif(sys.version_info > (3, 12), reason="segfault on 3.13") def test_wrap_generator(): channel = [] @@ -116,6 +117,7 @@ def g(): assert list(g()) == list(range(10)) == channel +@pytest.mark.skipif(sys.version_info > (3, 12), reason="segfault on 3.13") def test_wrap_generator_send(): def wrapper(f, args, kwargs): return f(*args, **kwargs) @@ -142,6 +144,7 @@ def g(): assert list(range(10)) == channel +@pytest.mark.skipif(sys.version_info > (3, 12), reason="segfault on 3.13") def test_wrap_generator_throw_close(): def wrapper_maker(channel): def wrapper(f, args, kwargs): @@ -215,6 +218,7 @@ def f(): assert [frame.f_code.co_name for frame in f()[:4]] == ["f", "wrapper", "f", "test_wrap_stack"] +@pytest.mark.skipif(sys.version_info > (3, 12), reason="segfault on 3.13") @pytest.mark.asyncio async def test_wrap_async_context_manager_exception_on_exit(): def wrapper(f, args, kwargs): @@ -231,6 +235,7 @@ async def g(): await acm.__aexit__(ValueError, None, None) +@pytest.mark.skipif(sys.version_info > (3, 12), reason="segfault on 3.13") def test_wrap_generator_yield_from(): channel = [] @@ -304,6 +309,7 @@ def wrapper(f, args, kwargs): assert f(1, path="bar", foo="baz") == (1, (), "bar", {"foo": "baz"}) +@pytest.mark.skipif(sys.version_info > (3, 12), reason="segfault on 3.13") @pytest.mark.asyncio async def test_async_generator(): async def stream(): @@ -340,6 +346,7 @@ async def agwrapper(f, args, kwargs): assert awrapper_called +@pytest.mark.skipif(sys.version_info > (3, 12), reason="segfault on 3.13") @pytest.mark.asyncio async def test_wrap_async_generator_send(): def wrapper(f, args, kwargs): @@ -372,6 +379,7 @@ async def consume(): await consume() +@pytest.mark.skipif(sys.version_info > (3, 12), reason="segfault on 3.13") @pytest.mark.asyncio async def test_double_async_for_with_exception(): channel = None @@ -416,6 +424,7 @@ async def stream(): b"".join([_ async for _ in s]) +@pytest.mark.skipif(sys.version_info > (3, 12), reason="segfault on 3.13") @pytest.mark.asyncio async def test_wrap_async_generator_throw_close(): channel = [] From 1501bff342c55fca79d2687e75f1f1b3382d2fb9 Mon Sep 17 00:00:00 2001 From: David Sanchez <838104+sanchda@users.noreply.github.com> Date: Wed, 18 Dec 2024 13:10:38 -0500 Subject: [PATCH 60/78] chore(profiling): refactor fetch libdatadog (#11747) This patch converts fetch libdatadog to a pure cmake implementation and updates the way the `dd_wrapper` is called so it should only be built once. In current-main with an empty `build` directory: ``` Performance counter stats for './build_standalone.sh -- -- --': 63,864.14 msec task-clock:u # 1.005 CPUs utilized 0 context-switches:u # 0.000 /sec 0 cpu-migrations:u # 0.000 /sec 2,853,585 page-faults:u # 44.682 K/sec cycles:u instructions:u branches:u branch-misses:u 63.534837508 seconds time elapsed 53.238101000 seconds user 10.718485000 seconds sys ``` After applying this patch: ``` Performance counter stats for './build_standalone.sh -- -- --': 33,262.53 msec task-clock:u # 0.985 CPUs utilized 0 context-switches:u # 0.000 /sec 0 cpu-migrations:u # 0.000 /sec 1,638,895 page-faults:u # 49.272 K/sec cycles:u instructions:u branches:u branch-misses:u 33.757125924 seconds time elapsed 27.434846000 seconds user 5.887714000 seconds sys ``` So--a small difference, but a good difference. ## Checklist - [X] PR author has checked that all the criteria below are met - The PR description includes an overview of the change - The PR description articulates the motivation for the change - The change includes tests OR the PR description describes a testing strategy - The PR description notes risks associated with the change, if any - Newly-added code is easy to change - The change follows the [library release note guidelines](https://ddtrace.readthedocs.io/en/stable/releasenotes.html) - The change includes or references documentation updates if necessary - Backport labels are set (if [applicable](https://ddtrace.readthedocs.io/en/latest/contributing.html#backporting)) ## Reviewer Checklist - [x] Reviewer has checked that all the criteria below are met - Title is accurate - All changes are related to the pull request's stated goal - Avoids breaking [API](https://ddtrace.readthedocs.io/en/stable/versioning.html#interfaces) changes - Testing strategy adequately addresses listed risks - Newly-added code is easy to change - Release note makes sense to a user of the library - If necessary, author has acknowledged and discussed the performance implications of this PR as reported in the benchmarks PR comment - Backport labels are set in a manner that is consistent with the [release branch maintenance policy](https://ddtrace.readthedocs.io/en/latest/contributing.html#backporting) --------- Co-authored-by: Taegyun Kim --- .../datadog/profiling/build_standalone.sh | 8 +- .../profiling/cmake/FindLibdatadog.cmake | 103 ++++++++++++++++-- .../profiling/cmake/tools/fetch_libdatadog.sh | 100 ----------------- .../cmake/tools/libdatadog_checksums.txt | 5 - .../profiling/crashtracker/CMakeLists.txt | 11 +- .../profiling/dd_wrapper/CMakeLists.txt | 17 ++- .../datadog/profiling/ddup/CMakeLists.txt | 13 +-- .../datadog/profiling/stack_v2/CMakeLists.txt | 11 +- 8 files changed, 125 insertions(+), 143 deletions(-) delete mode 100755 ddtrace/internal/datadog/profiling/cmake/tools/fetch_libdatadog.sh delete mode 100644 ddtrace/internal/datadog/profiling/cmake/tools/libdatadog_checksums.txt diff --git a/ddtrace/internal/datadog/profiling/build_standalone.sh b/ddtrace/internal/datadog/profiling/build_standalone.sh index 286f6d179a2..beeda4f21b4 100755 --- a/ddtrace/internal/datadog/profiling/build_standalone.sh +++ b/ddtrace/internal/datadog/profiling/build_standalone.sh @@ -103,8 +103,8 @@ cmake_args=( -DPython3_ROOT_DIR=$(python3 -c "import sysconfig; print(sysconfig.get_config_var('prefix'))") ) -# Initial build targets; no matter what, dd_wrapper is the base dependency, so it's always built -targets=("dd_wrapper") +# Initial build targets; start out empty +targets=() set_cc() { if [ -z "${CC:-}" ]; then @@ -333,7 +333,9 @@ add_target() { targets+=("crashtracker") ;; dd_wrapper) - # We always build dd_wrapper, so no need to add it to the list + # `dd_wrapper` is a dependency of other targets, but the overall structure is weird when it's given explicitly + # so we only include it when it's called explicitly + targets+=("dd_wrapper") ;; stack_v2) targets+=("stack_v2") diff --git a/ddtrace/internal/datadog/profiling/cmake/FindLibdatadog.cmake b/ddtrace/internal/datadog/profiling/cmake/FindLibdatadog.cmake index 6e103fe7d70..3a96fbeb353 100644 --- a/ddtrace/internal/datadog/profiling/cmake/FindLibdatadog.cmake +++ b/ddtrace/internal/datadog/profiling/cmake/FindLibdatadog.cmake @@ -1,27 +1,106 @@ -# Only add this project if Datadog::Profiling is not already defined +# Only proceed if Datadog::Profiling (provided by libdatadog) isn't already defined if(TARGET Datadog::Profiling) return() endif() -include(ExternalProject) -set(TAG_LIBDATADOG - "v14.3.1" - CACHE STRING "libdatadog github tag") +# Set the FetchContent paths early +set(FETCHCONTENT_BASE_DIR + "${CMAKE_CURRENT_BINARY_DIR}/_deps" + CACHE PATH "FetchContent base directory") +set(FETCHCONTENT_DOWNLOADS_DIR + "${FETCHCONTENT_BASE_DIR}/downloads" + CACHE PATH "FetchContent downloads directory") -set(Datadog_BUILD_DIR ${CMAKE_BINARY_DIR}/libdatadog) -set(Datadog_ROOT ${Datadog_BUILD_DIR}/libdatadog-${TAG_LIBDATADOG}) +include_guard(GLOBAL) +include(FetchContent) -message(STATUS "${CMAKE_CURRENT_LIST_DIR}/tools/fetch_libdatadog.sh ${TAG_LIBDATADOG} ${Datadog_ROOT}") -execute_process(COMMAND "${CMAKE_CURRENT_LIST_DIR}/tools/fetch_libdatadog.sh" ${TAG_LIBDATADOG} ${Datadog_ROOT} - WORKING_DIRECTORY ${CMAKE_CURRENT_LIST_DIR} COMMAND_ERROR_IS_FATAL ANY) +# Set version if not already set +if(NOT DEFINED TAG_LIBDATADOG) + set(TAG_LIBDATADOG + "v14.3.1" + CACHE STRING "libdatadog github tag") +endif() + +if(NOT DEFINED DD_CHECKSUMS) + set(DD_CHECKSUMS + "57f83aff275628bb1af89c22bb4bd696726daf2a9e09b6cd0d966b29e65a7ad6 libdatadog-aarch64-alpine-linux-musl.tar.gz" + "2be2efa98dfc32f109abdd79242a8e046a7a300c77634135eb293e000ecd4a4c libdatadog-aarch64-apple-darwin.tar.gz" + "36db8d50ccabb71571158ea13835c0f1d05d30b32135385f97c16343cfb6ddd4 libdatadog-aarch64-unknown-linux-gnu.tar.gz" + "2f61fd21cf2f8147743e414b4a8c77250a17be3aecc42a69ffe54f0a603d5c92 libdatadog-x86_64-alpine-linux-musl.tar.gz" + "f01f05600591063eba4faf388f54c155ab4e6302e5776c7855e3734955f7daf7 libdatadog-x86_64-unknown-linux-gnu.tar.gz") +endif() + +# Determine platform-specific tarball name in a way that conforms to the libdatadog naming scheme in Github releases +if(CMAKE_SYSTEM_PROCESSOR MATCHES "aarch64|arm64") + set(DD_ARCH "aarch64") +elseif(CMAKE_SYSTEM_PROCESSOR MATCHES "x86_64|amd64") + set(DD_ARCH "x86_64") +else() + message(FATAL_ERROR "Unsupported architecture: ${CMAKE_SYSTEM_PROCESSOR}") +endif() + +if(APPLE) + set(DD_PLATFORM "apple-darwin") +elseif(UNIX) + execute_process( + COMMAND ldd --version + OUTPUT_VARIABLE LDD_OUTPUT + ERROR_VARIABLE LDD_OUTPUT + OUTPUT_STRIP_TRAILING_WHITESPACE) + if(LDD_OUTPUT MATCHES "musl") + set(DD_PLATFORM "alpine-linux-musl") + else() + set(DD_PLATFORM "unknown-linux-gnu") + endif() +else() + message(FATAL_ERROR "Unsupported operating system") +endif() + +set(DD_TARBALL "libdatadog-${DD_ARCH}-${DD_PLATFORM}.tar.gz") + +# Make sure we can get the checksum for the tarball +foreach(ENTRY IN LISTS DD_CHECKSUMS) + if(ENTRY MATCHES "^([a-fA-F0-9]+) ${DD_TARBALL}$") + set(DD_HASH "${CMAKE_MATCH_1}") + break() + endif() +endforeach() + +if(NOT DEFINED DD_HASH) + message(FATAL_ERROR "Could not find checksum for ${DD_TARBALL}") +endif() + +# Clean up any existing downloads if they exist +set(TARBALL_PATH "${FETCHCONTENT_DOWNLOADS_DIR}/${DD_TARBALL}") +if(EXISTS "${TARBALL_PATH}") + file(SHA256 "${TARBALL_PATH}" EXISTING_HASH) + if(NOT EXISTING_HASH STREQUAL DD_HASH) + file(REMOVE "${TARBALL_PATH}") + # Also remove the subbuild directory to force a fresh download + file(REMOVE_RECURSE "${CMAKE_CURRENT_BINARY_DIR}/_deps/libdatadog-subbuild") + endif() +endif() + +# Use FetchContent to download and extract the library +FetchContent_Declare( + libdatadog + URL "https://github.com/DataDog/libdatadog/releases/download/${TAG_LIBDATADOG}/${DD_TARBALL}" + URL_HASH SHA256=${DD_HASH} + DOWNLOAD_DIR "${FETCHCONTENT_DOWNLOADS_DIR}" SOURCE_DIR "${FETCHCONTENT_BASE_DIR}/libdatadog-src") + +# Make the content available +FetchContent_MakeAvailable(libdatadog) +# Set up paths +get_filename_component(Datadog_ROOT "${libdatadog_SOURCE_DIR}" ABSOLUTE) set(Datadog_DIR "${Datadog_ROOT}/cmake") -# Prefer static library to shared library +# Configure library preferences (static over shared) set(CMAKE_FIND_LIBRARY_SUFFIXES_BACKUP ${CMAKE_FIND_LIBRARY_SUFFIXES}) set(CMAKE_FIND_LIBRARY_SUFFIXES .a) +# Find the package find_package(Datadog REQUIRED) -# Restore CMAKE_FIND_LIBRARY_SUFFIXES +# Restore library preferences set(CMAKE_FIND_LIBRARY_SUFFIXES ${CMAKE_FIND_LIBRARY_SUFFIXES_BACKUP}) diff --git a/ddtrace/internal/datadog/profiling/cmake/tools/fetch_libdatadog.sh b/ddtrace/internal/datadog/profiling/cmake/tools/fetch_libdatadog.sh deleted file mode 100755 index a1e55066089..00000000000 --- a/ddtrace/internal/datadog/profiling/cmake/tools/fetch_libdatadog.sh +++ /dev/null @@ -1,100 +0,0 @@ -#!/bin/bash -# http://redsymbol.net/articles/unofficial-bash-strict-mode/ -set -euox pipefail -IFS=$'\n\t' - -usage() { - echo "Usage :" - echo "$0 " - echo "" - echo "Example" - echo " $0 v0.7.0-rc.1 ./vendor" -} - -if [ $# != 2 ] || [ "$1" == "-h" ]; then - usage - exit 1 -fi - -SCRIPTPATH=$(readlink -f "$0") -SCRIPTDIR=$(dirname "$SCRIPTPATH") - -OS_NAME=$(uname -s) -MARCH=$(uname -m) - -TAG_LIBDATADOG=$1 -TARGET_EXTRACT=$2 - -CHECKSUM_FILE=${SCRIPTDIR}/libdatadog_checksums.txt - -# if os is darwin, set distribution to apple-darwin and march to aarch64 -if [[ "$OS_NAME" == "Darwin" ]]; then - DISTRIBUTION="apple-darwin" - # if march is arm64 set it to aarch64 - if [[ "$MARCH" == "arm64" ]]; then - MARCH="aarch64" - else - echo "Unsupported architecture $MARCH for $OS_NAME" - exit 1 - fi -elif [[ "$OS_NAME" == "Linux" ]]; then - # Test for musl - MUSL_LIBC=$(ldd /bin/ls | grep 'musl' | head -1 | cut -d ' ' -f1 || true) - if [[ -n ${MUSL_LIBC-""} ]]; then - DISTRIBUTION="alpine-linux-musl" - else - DISTRIBUTION="unknown-linux-gnu" - fi -else - echo "Unsupported OS $OS_NAME" - exit 1 -fi - -# https://github.com/DataDog/libdatadog/releases/download/v0.7.0-rc.1/libdatadog-aarch64-alpine-linux-musl.tar.gz -TAR_LIBDATADOG=libdatadog-${MARCH}-${DISTRIBUTION}.tar.gz -GITHUB_URL_LIBDATADOG=https://github.com/DataDog/libdatadog/releases/download/${TAG_LIBDATADOG}/${TAR_LIBDATADOG} - -SHA256_LIBDATADOG="blank" -while IFS=' ' read -r checksum filename; do - if [ "$filename" == "$TAR_LIBDATADOG" ]; then - SHA256_LIBDATADOG="$checksum $filename" - break - fi -done < "$CHECKSUM_FILE" - -if [ "$SHA256_LIBDATADOG" == "blank" ]; then - echo "Could not find checksum for ${TAR_LIBDATADOG} in ${CHECKSUM_FILE}" - exit 1 -else - echo "Using libdatadog sha256: ${SHA256_LIBDATADOG}" -fi - -mkdir -p "$TARGET_EXTRACT" || true -cd "$TARGET_EXTRACT" - -if [[ -e "${TAR_LIBDATADOG}" ]]; then - already_present=1 -else - already_present=0 - echo "Downloading libdatadog ${GITHUB_URL_LIBDATADOG}..." - if command -v curl > /dev/null 2>&1; then - curl -fsSLO "${GITHUB_URL_LIBDATADOG}" - elif command -v wget > /dev/null 2>&1; then - wget -q -O "${GITHUB_URL_LIBDATADOG##*/}" "${GITHUB_URL_LIBDATADOG}" - else - echo "Error: neither curl nor wget is available." >&2 - exit 1 - fi -fi - -echo "Checking libdatadog sha256" -if ! echo "${SHA256_LIBDATADOG}" | sha256sum -c -; then - echo "Error validating libdatadog SHA256" - echo "Please clear $TARGET_EXTRACT before restarting" - exit 1 -fi - -if [[ $already_present -eq 0 || ! -f "cmake/DatadogConfig.cmake" ]]; then - echo "Extracting ${TAR_LIBDATADOG}" - tar xf "${TAR_LIBDATADOG}" --strip-components=1 --no-same-owner -fi diff --git a/ddtrace/internal/datadog/profiling/cmake/tools/libdatadog_checksums.txt b/ddtrace/internal/datadog/profiling/cmake/tools/libdatadog_checksums.txt deleted file mode 100644 index ca856e996ae..00000000000 --- a/ddtrace/internal/datadog/profiling/cmake/tools/libdatadog_checksums.txt +++ /dev/null @@ -1,5 +0,0 @@ -57f83aff275628bb1af89c22bb4bd696726daf2a9e09b6cd0d966b29e65a7ad6 libdatadog-aarch64-alpine-linux-musl.tar.gz -2be2efa98dfc32f109abdd79242a8e046a7a300c77634135eb293e000ecd4a4c libdatadog-aarch64-apple-darwin.tar.gz -36db8d50ccabb71571158ea13835c0f1d05d30b32135385f97c16343cfb6ddd4 libdatadog-aarch64-unknown-linux-gnu.tar.gz -2f61fd21cf2f8147743e414b4a8c77250a17be3aecc42a69ffe54f0a603d5c92 libdatadog-x86_64-alpine-linux-musl.tar.gz -f01f05600591063eba4faf388f54c155ab4e6302e5776c7855e3734955f7daf7 libdatadog-x86_64-unknown-linux-gnu.tar.gz diff --git a/ddtrace/internal/datadog/profiling/crashtracker/CMakeLists.txt b/ddtrace/internal/datadog/profiling/crashtracker/CMakeLists.txt index 2ae02df66f2..c23a3e3ddce 100644 --- a/ddtrace/internal/datadog/profiling/crashtracker/CMakeLists.txt +++ b/ddtrace/internal/datadog/profiling/crashtracker/CMakeLists.txt @@ -10,12 +10,11 @@ message(STATUS "Building extension: ${EXTENSION_NAME}") # Get the cmake modules for this project list(APPEND CMAKE_MODULE_PATH "${CMAKE_CURRENT_LIST_DIR}/../cmake") -# Includes -include(FetchContent) -include(ExternalProject) -include(FindLibdatadog) - -add_subdirectory(../dd_wrapper ${CMAKE_CURRENT_BINARY_DIR}/../dd_wrapper_build) +# Having a common target in a subdirectory like this is a hack and a mistake, but it's fiddly to change it so we haven't +# been able to. Instead, make sure that the binary path set in the subdirectory is stable *as a string* in order to make +# sure the caches work. +get_filename_component(DD_WRAPPER_BUILD_DIR ${CMAKE_CURRENT_BINARY_DIR}/../dd_wrapper_build ABSOLUTE) +add_subdirectory(../dd_wrapper ${DD_WRAPPER_BUILD_DIR}) find_package(Python3 COMPONENTS Interpreter Development) diff --git a/ddtrace/internal/datadog/profiling/dd_wrapper/CMakeLists.txt b/ddtrace/internal/datadog/profiling/dd_wrapper/CMakeLists.txt index 809569d8493..c427abdcfbc 100644 --- a/ddtrace/internal/datadog/profiling/dd_wrapper/CMakeLists.txt +++ b/ddtrace/internal/datadog/profiling/dd_wrapper/CMakeLists.txt @@ -12,15 +12,24 @@ get_filename_component(dd_wrapper_BUILD_DIR "${CMAKE_CURRENT_SOURCE_DIR}/../ddtr list(APPEND CMAKE_MODULE_PATH "${CMAKE_CURRENT_SOURCE_DIR}/../cmake") # Includes -include(FetchContent) -include(ExternalProject) -include(FindLibdatadog) include(AnalysisFunc) include(FindClangtidy) include(FindCppcheck) include(FindInfer) include(CheckSymbolExists) +# Load libdatadog +include(FindLibdatadog) + +# Since this file is currently only loaded as a subdirectory, we need to propagate certain libdatadog variables up to +# the parent scope. +set(Datadog_INCLUDE_DIRS + ${Datadog_INCLUDE_DIRS} + PARENT_SCOPE) +set(Datadog_LIBRARIES + ${Datadog_LIBRARIES} + PARENT_SCOPE) + set(THREADS_PREFER_PTHREAD_FLAG ON) find_package(Threads REQUIRED) @@ -51,7 +60,7 @@ target_include_directories(dd_wrapper PRIVATE include ${Datadog_INCLUDE_DIRS}) target_link_libraries(dd_wrapper PRIVATE ${Datadog_LIBRARIES} Threads::Threads) -# Figure out the suffix. Try to approximate the cpython way of doing things. C library +# Figure out the suffix. Try to approximate the cpython way of doing things. check_symbol_exists(__GLIBC__ "features.h" HAVE_GLIBC) check_symbol_exists(__MUSL__ "features.h" HAVE_MUSL) diff --git a/ddtrace/internal/datadog/profiling/ddup/CMakeLists.txt b/ddtrace/internal/datadog/profiling/ddup/CMakeLists.txt index fe92fac3952..6a4cb4e8803 100644 --- a/ddtrace/internal/datadog/profiling/ddup/CMakeLists.txt +++ b/ddtrace/internal/datadog/profiling/ddup/CMakeLists.txt @@ -13,14 +13,11 @@ message(STATUS "Building extension: ${EXTENSION_NAME}") # Get the cmake modules for this project list(APPEND CMAKE_MODULE_PATH "${CMAKE_CURRENT_LIST_DIR}/../cmake") -# Includes -include(FetchContent) -include(ExternalProject) -include(FindLibdatadog) - -# Technically, this should be its own project which we `include()`, but I don't want to deal with that when so many -# things may yet be factored differently. -add_subdirectory(../dd_wrapper ${CMAKE_CURRENT_BINARY_DIR}/../dd_wrapper_build) +# Having a common target in a subdirectory like this is a hack and a mistake, but it's fiddly to change it so we haven't +# been able to. Instead, make sure that the binary path set in the subdirectory is stable *as a string* in order to make +# sure the caches work. +get_filename_component(DD_WRAPPER_BUILD_DIR ${CMAKE_CURRENT_BINARY_DIR}/../dd_wrapper_build ABSOLUTE) +add_subdirectory(../dd_wrapper ${DD_WRAPPER_BUILD_DIR}) find_package(Python3 COMPONENTS Interpreter Development) diff --git a/ddtrace/internal/datadog/profiling/stack_v2/CMakeLists.txt b/ddtrace/internal/datadog/profiling/stack_v2/CMakeLists.txt index 69788494920..77952e09d41 100644 --- a/ddtrace/internal/datadog/profiling/stack_v2/CMakeLists.txt +++ b/ddtrace/internal/datadog/profiling/stack_v2/CMakeLists.txt @@ -11,16 +11,17 @@ message(STATUS "Building extension: ${EXTENSION_NAME}") # Custom cmake modules are in the parent directory list(APPEND CMAKE_MODULE_PATH "${CMAKE_CURRENT_SOURCE_DIR}/../cmake") +# Having a common target in a subdirectory like this is a hack and a mistake, but it's fiddly to change it so we haven't +# been able to. Instead, make sure that the binary path set in the subdirectory is stable *as a string* in order to make +# sure the caches work. +get_filename_component(DD_WRAPPER_BUILD_DIR ${CMAKE_CURRENT_BINARY_DIR}/../dd_wrapper_build ABSOLUTE) +add_subdirectory(../dd_wrapper ${DD_WRAPPER_BUILD_DIR}) + # Includes include(FetchContent) -include(ExternalProject) include(AnalysisFunc) include(FindCppcheck) -# dd_wrapper should be its own project at one point, if the current design is kept, but whether or not we keep that -# design is unknown. Hack it for now. -add_subdirectory(../dd_wrapper ${CMAKE_CURRENT_BINARY_DIR}/../dd_wrapper_build) - find_package(Python3 COMPONENTS Interpreter Development) # Make sure we have necessary Python variables From 28132911ed31192f5d1ea0e78aee30b0f26890c7 Mon Sep 17 00:00:00 2001 From: Federico Mon Date: Thu, 19 Dec 2024 09:39:39 +0100 Subject: [PATCH 61/78] ci: enable standalone sca system tests (#11769) CI: Enables [system tests for Standalone SCA billing](https://github.com/DataDog/system-tests/pull/3690) to run on dd-trace-py's CI ## Checklist - [x] PR author has checked that all the criteria below are met - The PR description includes an overview of the change - The PR description articulates the motivation for the change - The change includes tests OR the PR description describes a testing strategy - The PR description notes risks associated with the change, if any - Newly-added code is easy to change - The change follows the [library release note guidelines](https://ddtrace.readthedocs.io/en/stable/releasenotes.html) - The change includes or references documentation updates if necessary - Backport labels are set (if [applicable](https://ddtrace.readthedocs.io/en/latest/contributing.html#backporting)) ## Reviewer Checklist - [x] Reviewer has checked that all the criteria below are met - Title is accurate - All changes are related to the pull request's stated goal - Avoids breaking [API](https://ddtrace.readthedocs.io/en/stable/versioning.html#interfaces) changes - Testing strategy adequately addresses listed risks - Newly-added code is easy to change - Release note makes sense to a user of the library - If necessary, author has acknowledged and discussed the performance implications of this PR as reported in the benchmarks PR comment - Backport labels are set in a manner that is consistent with the [release branch maintenance policy](https://ddtrace.readthedocs.io/en/latest/contributing.html#backporting) --- .github/workflows/system-tests.yml | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/.github/workflows/system-tests.yml b/.github/workflows/system-tests.yml index ce795db4fe2..ccf6c6501d9 100644 --- a/.github/workflows/system-tests.yml +++ b/.github/workflows/system-tests.yml @@ -213,6 +213,10 @@ jobs: if: always() && steps.docker_load.outcome == 'success' && matrix.scenario == 'appsec-1' run: ./run.sh IAST_STANDALONE + - name: Run SCA_STANDALONE + if: always() && steps.docker_load.outcome == 'success' && matrix.scenario == 'appsec-1' + run: ./run.sh SCA_STANDALONE + - name: Run APPSEC_RUNTIME_ACTIVATION if: always() && steps.docker_load.outcome == 'success' && matrix.scenario == 'appsec-1' run: ./run.sh APPSEC_RUNTIME_ACTIVATION From b632a714bae6ab1d5703e1834d31cf9b31ceed27 Mon Sep 17 00:00:00 2001 From: "Gabriele N. Tornetta" Date: Thu, 19 Dec 2024 16:13:52 +0000 Subject: [PATCH 62/78] chore(er): correct exception ID field name (#11737) We correct the name of the field that is expected to carry the exception ID. ## Checklist - [x] PR author has checked that all the criteria below are met - The PR description includes an overview of the change - The PR description articulates the motivation for the change - The change includes tests OR the PR description describes a testing strategy - The PR description notes risks associated with the change, if any - Newly-added code is easy to change - The change follows the [library release note guidelines](https://ddtrace.readthedocs.io/en/stable/releasenotes.html) - The change includes or references documentation updates if necessary - Backport labels are set (if [applicable](https://ddtrace.readthedocs.io/en/latest/contributing.html#backporting)) ## Reviewer Checklist - [x] Reviewer has checked that all the criteria below are met - Title is accurate - All changes are related to the pull request's stated goal - Avoids breaking [API](https://ddtrace.readthedocs.io/en/stable/versioning.html#interfaces) changes - Testing strategy adequately addresses listed risks - Newly-added code is easy to change - Release note makes sense to a user of the library - If necessary, author has acknowledged and discussed the performance implications of this PR as reported in the benchmarks PR comment - Backport labels are set in a manner that is consistent with the [release branch maintenance policy](https://ddtrace.readthedocs.io/en/latest/contributing.html#backporting) --- ddtrace/debugging/_exception/replay.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ddtrace/debugging/_exception/replay.py b/ddtrace/debugging/_exception/replay.py index 3a54bce6f51..5b9f1a9f330 100644 --- a/ddtrace/debugging/_exception/replay.py +++ b/ddtrace/debugging/_exception/replay.py @@ -170,7 +170,7 @@ class SpanExceptionSnapshot(Snapshot): @property def data(self) -> t.Dict[str, t.Any]: data = super().data - data.update({"exception-id": str(self.exc_id)}) + data.update({"exceptionId": str(self.exc_id)}) return data From f483beb207f3670318ac01e8b314c02c0de0c070 Mon Sep 17 00:00:00 2001 From: Brett Langdon Date: Thu, 19 Dec 2024 11:28:24 -0500 Subject: [PATCH 63/78] ci: do not use datadog-ci binary (#11789) --- .gitlab-ci.yml | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml index 4105e2d5eb0..748942af278 100644 --- a/.gitlab-ci.yml +++ b/.gitlab-ci.yml @@ -93,7 +93,6 @@ check_new_flaky_tests: stage: quality-gate extends: .testrunner script: - - curl -L --fail "https://github.com/DataDog/datadog-ci/releases/latest/download/datadog-ci_linux-x64" --output "/usr/local/bin/datadog-ci" && chmod +x /usr/local/bin/datadog-ci - export DD_SITE=datadoghq.com - export DD_API_KEY=$(aws ssm get-parameter --region us-east-1 --name ci.${CI_PROJECT_NAME}.dd-api-key-qualitygate --with-decryption --query "Parameter.Value" --out text) - export DD_APP_KEY=$(aws ssm get-parameter --region us-east-1 --name ci.${CI_PROJECT_NAME}.dd-app-key-qualitygate --with-decryption --query "Parameter.Value" --out text) @@ -101,4 +100,4 @@ check_new_flaky_tests: except: - main - '[0-9].[0-9]*' - - 'mq-working-branch**' \ No newline at end of file + - 'mq-working-branch**' From 0035bfee97f544c650ee51ba4279cc3df5c99c82 Mon Sep 17 00:00:00 2001 From: "Gabriele N. Tornetta" Date: Thu, 19 Dec 2024 16:45:54 +0000 Subject: [PATCH 64/78] chore(di): capture exception chain (#11771) We augment the exception fields with known exception chaining attributes to allow capturing exception chaining relations. The fields need to be added manually because they are part of the BaseException built-in fields and are not included in the object's __dict__ attribute. ## Checklist - [x] PR author has checked that all the criteria below are met - The PR description includes an overview of the change - The PR description articulates the motivation for the change - The change includes tests OR the PR description describes a testing strategy - The PR description notes risks associated with the change, if any - Newly-added code is easy to change - The change follows the [library release note guidelines](https://ddtrace.readthedocs.io/en/stable/releasenotes.html) - The change includes or references documentation updates if necessary - Backport labels are set (if [applicable](https://ddtrace.readthedocs.io/en/latest/contributing.html#backporting)) ## Reviewer Checklist - [ ] Reviewer has checked that all the criteria below are met - Title is accurate - All changes are related to the pull request's stated goal - Avoids breaking [API](https://ddtrace.readthedocs.io/en/stable/versioning.html#interfaces) changes - Testing strategy adequately addresses listed risks - Newly-added code is easy to change - Release note makes sense to a user of the library - If necessary, author has acknowledged and discussed the performance implications of this PR as reported in the benchmarks PR comment - Backport labels are set in a manner that is consistent with the [release branch maintenance policy](https://ddtrace.readthedocs.io/en/latest/contributing.html#backporting) --- ddtrace/debugging/_signal/utils.py | 9 +++++++++ tests/debugging/exception/test_replay.py | 4 ++-- tests/debugging/test_debugger.py | 20 +++++++++++++++++++- tests/debugging/test_encoding.py | 16 +++++++++++++++- 4 files changed, 45 insertions(+), 4 deletions(-) diff --git a/ddtrace/debugging/_signal/utils.py b/ddtrace/debugging/_signal/utils.py index b2e5d8e285b..09b319598ef 100644 --- a/ddtrace/debugging/_signal/utils.py +++ b/ddtrace/debugging/_signal/utils.py @@ -304,6 +304,15 @@ def capture_value( } fields = get_fields(value) + + # Capture exception chain for exceptions + if _isinstance(value, BaseException): + for attr in ("args", "__cause__", "__context__", "__suppress_context__"): + try: + fields[attr] = object.__getattribute__(value, attr) + except AttributeError: + pass + captured_fields = { n: ( capture_value(v, level=level - 1, maxlen=maxlen, maxsize=maxsize, maxfields=maxfields, stopping_cond=cond) diff --git a/tests/debugging/exception/test_replay.py b/tests/debugging/exception/test_replay.py index 8b9a2a7d830..54baeb8b826 100644 --- a/tests/debugging/exception/test_replay.py +++ b/tests/debugging/exception/test_replay.py @@ -161,8 +161,8 @@ def b_chain(bar): m = 4 try: a(bar % m) - except ValueError: - raise KeyError("chain it") + except ValueError as exc: + raise KeyError("chain it") from exc def c(foo=42): with self.trace("c"): diff --git a/tests/debugging/test_debugger.py b/tests/debugging/test_debugger.py index ed337c27f1e..0cc65bc43cf 100644 --- a/tests/debugging/test_debugger.py +++ b/tests/debugging/test_debugger.py @@ -210,7 +210,25 @@ def test_debugger_function_probe_on_function_with_exception(): return_capture = snapshot_data["captures"]["return"] assert return_capture["arguments"] == {} - assert return_capture["locals"] == {"@exception": {"fields": {}, "type": "Exception"}} + assert return_capture["locals"] == { + "@exception": { + "type": "Exception", + "fields": { + "args": { + "type": "tuple", + "elements": [ + {"type": "str", "value": "'Hello'"}, + {"type": "str", "value": "'world!'"}, + {"type": "int", "value": "42"}, + ], + "size": 3, + }, + "__cause__": {"type": "NoneType", "isNull": True}, + "__context__": {"type": "NoneType", "isNull": True}, + "__suppress_context__": {"type": "bool", "value": "False"}, + }, + } + } assert return_capture["throwable"]["message"] == "'Hello', 'world!', 42" assert return_capture["throwable"]["type"] == "Exception" diff --git a/tests/debugging/test_encoding.py b/tests/debugging/test_encoding.py index c06e5000ed8..c22851f1112 100644 --- a/tests/debugging/test_encoding.py +++ b/tests/debugging/test_encoding.py @@ -191,7 +191,21 @@ def _(): exc = context.pop("throwable") assert context["arguments"] == {} - assert context["locals"] == {"@exception": {"type": "Exception", "fields": {}}} + assert context["locals"] == { + "@exception": { + "type": "Exception", + "fields": { + "args": { + "type": "tuple", + "elements": [{"type": "str", "value": "'test'"}, {"type": "str", "value": "'me'"}], + "size": 2, + }, + "__cause__": {"type": "NoneType", "isNull": True}, + "__context__": {"type": "NoneType", "isNull": True}, + "__suppress_context__": {"type": "bool", "value": "False"}, + }, + } + } assert exc["message"] == "'test', 'me'" assert exc["type"] == "Exception" From 315a48f6f23dd2901533a63ecfff4d1d11daee03 Mon Sep 17 00:00:00 2001 From: "Gabriele N. Tornetta" Date: Thu, 19 Dec 2024 16:46:19 +0000 Subject: [PATCH 65/78] chore(er): include exception hash (#11772) We include the span tag that carries the exception hash, according to the RFC. ## Checklist - [x] PR author has checked that all the criteria below are met - The PR description includes an overview of the change - The PR description articulates the motivation for the change - The change includes tests OR the PR description describes a testing strategy - The PR description notes risks associated with the change, if any - Newly-added code is easy to change - The change follows the [library release note guidelines](https://ddtrace.readthedocs.io/en/stable/releasenotes.html) - The change includes or references documentation updates if necessary - Backport labels are set (if [applicable](https://ddtrace.readthedocs.io/en/latest/contributing.html#backporting)) ## Reviewer Checklist - [ ] Reviewer has checked that all the criteria below are met - Title is accurate - All changes are related to the pull request's stated goal - Avoids breaking [API](https://ddtrace.readthedocs.io/en/stable/versioning.html#interfaces) changes - Testing strategy adequately addresses listed risks - Newly-added code is easy to change - Release note makes sense to a user of the library - If necessary, author has acknowledged and discussed the performance implications of this PR as reported in the benchmarks PR comment - Backport labels are set in a manner that is consistent with the [release branch maintenance policy](https://ddtrace.readthedocs.io/en/latest/contributing.html#backporting) --- ddtrace/debugging/_exception/replay.py | 10 ++++++---- 1 file changed, 6 insertions(+), 4 deletions(-) diff --git a/ddtrace/debugging/_exception/replay.py b/ddtrace/debugging/_exception/replay.py index 5b9f1a9f330..080b4cbfc61 100644 --- a/ddtrace/debugging/_exception/replay.py +++ b/ddtrace/debugging/_exception/replay.py @@ -40,7 +40,8 @@ CAPTURE_TRACE_TAG = "_dd.debug.error.trace_captured" # unique exception id -EXCEPTION_ID_TAG = "_dd.debug.error.exception_id" +EXCEPTION_HASH_TAG = "_dd.debug.error.exception_hash" +EXCEPTION_ID_TAG = "_dd.debug.error.exception_capture_id" # link to matching snapshot for every frame in the traceback FRAME_SNAPSHOT_ID_TAG = "_dd.debug.error.%d.snapshot_id" @@ -80,9 +81,8 @@ def exception_chain_ident(chain: ExceptionChain) -> int: return h -def limit_exception(chain: ExceptionChain) -> bool: +def limit_exception(exc_ident: int) -> bool: try: - exc_ident = exception_chain_ident(chain) hg = EXCEPTION_IDENT_LIMITER.get(exc_ident) if hg is None: # We haven't seen this exception yet, or it's been evicted @@ -218,7 +218,8 @@ def on_span_exception( # No exceptions to capture return - if limit_exception(chain): + exc_ident = exception_chain_ident(chain) + if limit_exception(exc_ident): # We have seen this exception recently return @@ -272,6 +273,7 @@ def on_span_exception( _tb = _tb.tb_next span.set_tag_str(DEBUG_INFO_TAG, "true") + span.set_tag_str(EXCEPTION_HASH_TAG, str(exc_ident)) span.set_tag_str(EXCEPTION_ID_TAG, str(exc_id)) @classmethod From e9dbe4f74e5ebf607c51ba17f854fc3c2b219f64 Mon Sep 17 00:00:00 2001 From: Brett Langdon Date: Thu, 19 Dec 2024 13:02:51 -0500 Subject: [PATCH 66/78] ci: wait for dependent services before running tests (#11780) --- .gitlab/tests.yml | 13 +++-- .riot/requirements/151d7b0.txt | 41 ++++++++++++++ .riot/requirements/1805689.txt | 37 ------------- hatch.toml | 2 +- riotfile.py | 1 + scripts/gen_gitlab_config.py | 21 ++++---- tests/suitespec.yml | 6 +++ tests/wait-for-services.py | 98 ++++++++++++++++++++++++++-------- 8 files changed, 145 insertions(+), 74 deletions(-) create mode 100644 .riot/requirements/151d7b0.txt delete mode 100644 .riot/requirements/1805689.txt diff --git a/.gitlab/tests.yml b/.gitlab/tests.yml index ce1fb8fd0ad..d38a22cf0ff 100644 --- a/.gitlab/tests.yml +++ b/.gitlab/tests.yml @@ -1,7 +1,7 @@ stages: - precheck - - hatch - riot + - hatch variables: RIOT_RUN_CMD: riot -P -v run --exitfirst --pass-env -s @@ -30,6 +30,9 @@ variables: parallel: 4 # DEV: This is the max retries that GitLab currently allows for retry: 2 + before_script: + - !reference [.testrunner, before_script] + - pip install riot==0.20.1 script: - export PYTEST_ADDOPTS="${PYTEST_ADDOPTS} --ddtrace" - export _DD_CIVISIBILITY_USE_CI_CONTEXT_PROVIDER=true @@ -51,7 +54,7 @@ variables: services: - !reference [.services, testagent] before_script: - - !reference [.testrunner, before_script] + - !reference [.test_base_hatch, before_script] # DEV: All job variables get shared with services, setting `DD_TRACE_AGENT_URL` on the testagent will tell it to forward all requests to the # agent at that host. Therefore setting this as a variable will cause recursive requests to the testagent - export DD_TRACE_AGENT_URL="http://testagent:9126" @@ -88,12 +91,14 @@ build_base_venvs: - !reference [.services, ddagent] # DEV: This is the max retries that GitLab currently allows for retry: 2 - script: + before_script: + - !reference [.testrunner, before_script] - pip install riot==0.20.1 - unset DD_SERVICE - unset DD_ENV - unset DD_TAGS - unset DD_TRACE_REMOVE_INTEGRATION_SERVICE_NAMES_ENABLED + script: - | hashes=( $(riot list --hash-only "${SUITE_NAME}" | sort | ./.gitlab/ci-split-input.sh) ) if [[ ${#hashes[@]} -eq 0 ]]; then @@ -116,7 +121,7 @@ build_base_venvs: - !reference [.test_base_riot, services] - !reference [.services, testagent] before_script: - - !reference [.testrunner, before_script] + - !reference [.test_base_riot, before_script] # DEV: All job variables get shared with services, setting `DD_TRACE_AGENT_URL` on the testagent will tell it to forward all requests to the # agent at that host. Therefore setting this as a variable will cause recursive requests to the testagent - export DD_TRACE_AGENT_URL="http://testagent:9126" diff --git a/.riot/requirements/151d7b0.txt b/.riot/requirements/151d7b0.txt new file mode 100644 index 00000000000..9593b418017 --- /dev/null +++ b/.riot/requirements/151d7b0.txt @@ -0,0 +1,41 @@ +# +# This file is autogenerated by pip-compile with Python 3.9 +# by the following command: +# +# pip-compile --allow-unsafe --no-annotate .riot/requirements/151d7b0.in +# +amqp==2.6.1 +attrs==24.3.0 +cassandra-driver==3.29.2 +certifi==2024.12.14 +charset-normalizer==3.4.0 +click==8.1.7 +coverage[toml]==7.6.9 +exceptiongroup==1.2.2 +future==1.0.0 +geomet==0.2.1.post1 +hypothesis==6.45.0 +idna==3.10 +importlib-metadata==8.5.0 +iniconfig==2.0.0 +kombu==4.2.2.post1 +mock==5.1.0 +mysql-connector-python==9.1.0 +opentracing==2.4.0 +packaging==24.2 +pluggy==1.5.0 +psycopg2-binary==2.9.10 +pytest==8.3.4 +pytest-cov==6.0.0 +pytest-mock==3.14.0 +pytest-randomly==3.16.0 +python-dateutil==2.9.0.post0 +pytz==2024.2 +requests==2.32.3 +six==1.17.0 +sortedcontainers==2.4.0 +tomli==2.2.1 +urllib3==2.2.3 +vertica-python==0.6.14 +vine==1.3.0 +zipp==3.21.0 diff --git a/.riot/requirements/1805689.txt b/.riot/requirements/1805689.txt deleted file mode 100644 index e76e16e1946..00000000000 --- a/.riot/requirements/1805689.txt +++ /dev/null @@ -1,37 +0,0 @@ -# -# This file is autogenerated by pip-compile with Python 3.9 -# by the following command: -# -# pip-compile --no-annotate .riot/requirements/1805689.in -# -amqp==2.6.1 -attrs==23.1.0 -cassandra-driver==3.28.0 -click==8.1.7 -coverage[toml]==7.3.4 -exceptiongroup==1.2.0 -future==0.18.3 -geomet==0.2.1.post1 -hypothesis==6.45.0 -importlib-metadata==7.0.0 -iniconfig==2.0.0 -kombu==4.2.2.post1 -mock==5.1.0 -mysql-connector-python==8.2.0 -opentracing==2.4.0 -packaging==23.2 -pluggy==1.3.0 -protobuf==4.21.12 -psycopg2-binary==2.9.9 -pytest==7.4.3 -pytest-cov==4.1.0 -pytest-mock==3.12.0 -pytest-randomly==3.15.0 -python-dateutil==2.8.2 -pytz==2023.3.post1 -six==1.16.0 -sortedcontainers==2.4.0 -tomli==2.0.1 -vertica-python==0.6.14 -vine==1.3.0 -zipp==3.17.0 diff --git a/hatch.toml b/hatch.toml index 7dae1538613..ff11ec3f743 100644 --- a/hatch.toml +++ b/hatch.toml @@ -124,7 +124,7 @@ extra-dependencies = [ ] [envs.slotscheck.scripts] -_ = [ +test = [ "python -m slotscheck -v ddtrace/", ] diff --git a/riotfile.py b/riotfile.py index 6db9102786f..9b1ba5497e7 100644 --- a/riotfile.py +++ b/riotfile.py @@ -586,6 +586,7 @@ def select_pys(min_version: str = MIN_PYTHON_VERSION, max_version: str = MAX_PYT "vertica-python": ">=0.6.0,<0.7.0", "kombu": ">=4.2.0,<4.3.0", "pytest-randomly": latest, + "requests": latest, }, ), Venv( diff --git a/scripts/gen_gitlab_config.py b/scripts/gen_gitlab_config.py index c868b0f1c86..22b236ddfc5 100644 --- a/scripts/gen_gitlab_config.py +++ b/scripts/gen_gitlab_config.py @@ -15,7 +15,7 @@ class JobSpec: runner: str pattern: t.Optional[str] = None snapshot: bool = False - services: t.Optional[t.Set[str]] = None + services: t.Optional[t.List[str]] = None env: t.Optional[t.Dict[str, str]] = None parallelism: t.Optional[int] = None retry: t.Optional[int] = None @@ -32,16 +32,25 @@ def __str__(self) -> str: lines.append(f"{self.name}:") lines.append(f" extends: {base}") - if self.services: + services = set(self.services or []) + if services: lines.append(" services:") - _services = [f"!reference [.services, {_}]" for _ in self.services] + _services = [f"!reference [.services, {_}]" for _ in services] if self.snapshot: _services.insert(0, f"!reference [{base}, services]") for service in _services: lines.append(f" - {service}") + wait_for: t.Set[str] = services.copy() + if self.snapshot: + wait_for.add("testagent") + if wait_for: + lines.append(" before_script:") + lines.append(f" - !reference [{base}, before_script]") + lines.append(f" - riot -v run -s --pass-env wait -- {' '.join(wait_for)}") + env = self.env if not env or "SUITE_NAME" not in env: env = env or {} @@ -89,7 +98,6 @@ def gen_required_suites() -> None: TESTS_GEN.write_text( (GITLAB / "tests.yml").read_text().replace(r"{{services.yml}}", (GITLAB / "services.yml").read_text()) ) - # Generate the list of suites to run with TESTS_GEN.open("a") as f: for suite in required_suites: @@ -159,11 +167,6 @@ def check(name: str, command: str, paths: t.Set[str]) -> None: command="hatch run meta-testing:meta-testing", paths={"**conftest.py"}, ) - check( - name="slotscheck", - command="hatch run slotscheck:_", - paths={"**.py"}, - ) # ----------------------------------------------------------------------------- diff --git a/tests/suitespec.yml b/tests/suitespec.yml index c6a89720676..4b13005d662 100644 --- a/tests/suitespec.yml +++ b/tests/suitespec.yml @@ -195,6 +195,12 @@ suites: - tests/cache/* runner: riot snapshot: true + slotscheck: + parallelism: 1 + paths: + - 'ddtrace/**/*.py' + runner: hatch + snapshot: false profile: env: DD_TRACE_AGENT_URL: '' diff --git a/tests/wait-for-services.py b/tests/wait-for-services.py index 2f3fc29e7b3..048cb6948a8 100644 --- a/tests/wait-for-services.py +++ b/tests/wait-for-services.py @@ -1,10 +1,16 @@ +import logging +import os import sys import time +import typing as t from cassandra.cluster import Cluster from cassandra.cluster import NoHostAvailable from contrib.config import CASSANDRA_CONFIG +from contrib.config import ELASTICSEARCH_CONFIG +from contrib.config import HTTPBIN_CONFIG from contrib.config import MYSQL_CONFIG +from contrib.config import OPENSEARCH_CONFIG from contrib.config import POSTGRES_CONFIG from contrib.config import RABBITMQ_CONFIG from contrib.config import VERTICA_CONFIG @@ -12,72 +18,83 @@ import mysql.connector from psycopg2 import OperationalError from psycopg2 import connect +import requests import vertica_python -def try_until_timeout(exception): +logging.basicConfig(level=logging.INFO) +log = logging.getLogger(__name__) + + +def try_until_timeout(exception, tries: int = 100, timeout: float = 0.2, args: t.Optional[t.Dict[str, t.Any]] = None): """Utility decorator that tries to call a check until there is a timeout. The default timeout is about 20 seconds. """ + if not args: + args = {} def wrap(fn): - def wrapper(*args, **kwargs): + def wrapper(**kwargs): err = None - for _ in range(100): + _kwargs = args.copy() + _kwargs.update(kwargs) + + for i in range(tries): try: - fn() + log.info("Attempt %d: %s(%r)", i, fn.__name__, _kwargs) + fn(**_kwargs) except exception as e: err = e - time.sleep(0.2) + time.sleep(timeout) else: break else: if err: raise err + log.info("Succeeded: %s", fn.__name__) return wrapper return wrap -@try_until_timeout(OperationalError) -def check_postgres(): - conn = connect(**POSTGRES_CONFIG) +@try_until_timeout(OperationalError, args={"pg_config": POSTGRES_CONFIG}) +def check_postgres(pg_config): + conn = connect(**pg_config) try: conn.cursor().execute("SELECT 1;") finally: conn.close() -@try_until_timeout(NoHostAvailable) -def check_cassandra(): - with Cluster(**CASSANDRA_CONFIG).connect() as conn: +@try_until_timeout(NoHostAvailable, args={"cassandra_config": CASSANDRA_CONFIG}) +def check_cassandra(cassandra_config): + with Cluster(**cassandra_config).connect() as conn: conn.execute("SELECT now() FROM system.local") -@try_until_timeout(Exception) -def check_mysql(): - conn = mysql.connector.connect(**MYSQL_CONFIG) +@try_until_timeout(Exception, args={"mysql_config": MYSQL_CONFIG}) +def check_mysql(mysql_config): + conn = mysql.connector.connect(**mysql_config) try: conn.cursor().execute("SELECT 1;") finally: conn.close() -@try_until_timeout(Exception) -def check_vertica(): - conn = vertica_python.connect(**VERTICA_CONFIG) +@try_until_timeout(Exception, args={"vertica_config": VERTICA_CONFIG}) +def check_vertica(vertica_config): + conn = vertica_python.connect(**vertica_config) try: conn.cursor().execute("SELECT 1;") finally: conn.close() -@try_until_timeout(Exception) -def check_rabbitmq(): - url = "amqp://{user}:{password}@{host}:{port}//".format(**RABBITMQ_CONFIG) +@try_until_timeout(Exception, args={"url": "amqp://{user}:{password}@{host}:{port}//".format(**RABBITMQ_CONFIG)}) +def check_rabbitmq(url): conn = kombu.Connection(url) try: conn.connect() @@ -85,17 +102,52 @@ def check_rabbitmq(): conn.release() +@try_until_timeout(Exception, args={"url": os.environ.get("DD_TRACE_AGENT_URL", "http://localhost:8126")}) +def check_agent(url): + if not url.endswith("/"): + url += "/" + + res = requests.get(url) + if res.status_code not in (404, 200): + raise Exception("Agent not ready") + + +@try_until_timeout(Exception, args={"url": "http://{host}:{port}/".format(**ELASTICSEARCH_CONFIG)}) +def check_elasticsearch(url): + requests.get(url).raise_for_status() + + +@try_until_timeout( + Exception, tries=120, timeout=1, args={"url": "http://{host}:{port}/".format(**OPENSEARCH_CONFIG)} +) # 2 minutes, OpenSearch is slow to start +def check_opensearch(url): + requests.get(url).raise_for_status() + + +@try_until_timeout(Exception, args={"url": "http://{host}:{port}/".format(**HTTPBIN_CONFIG)}) +def check_httpbin(url): + requests.get(url).raise_for_status() + + if __name__ == "__main__": check_functions = { "cassandra": check_cassandra, - "postgres": check_postgres, + "ddagent": check_agent, + "elasticsearch": check_elasticsearch, + "httpbin_local": check_httpbin, "mysql": check_mysql, - "vertica": check_vertica, + "opensearch": check_opensearch, + "postgres": check_postgres, "rabbitmq": check_rabbitmq, + "testagent": check_agent, + "vertica": check_vertica, } if len(sys.argv) >= 2: for service in sys.argv[1:]: - check_functions[service]() + if service not in check_functions: + log.warning("Unknown service: %s", service) + else: + check_functions[service]() else: print("usage: python {} SERVICE_NAME".format(sys.argv[0])) sys.exit(1) From 89d82c3f11305f0ae4025fa5f7349342846b1bd2 Mon Sep 17 00:00:00 2001 From: Emmett Butler <723615+emmettbutler@users.noreply.github.com> Date: Thu, 19 Dec 2024 10:15:25 -0800 Subject: [PATCH 67/78] docs: add details to the release note about 3.13 (#11792) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This change lists everything that is currently known not to work with Python 3.13 ## Checklist - [x] PR author has checked that all the criteria below are met - The PR description includes an overview of the change - The PR description articulates the motivation for the change - The change includes tests OR the PR description describes a testing strategy - The PR description notes risks associated with the change, if any - Newly-added code is easy to change - The change follows the [library release note guidelines](https://ddtrace.readthedocs.io/en/stable/releasenotes.html) - The change includes or references documentation updates if necessary - Backport labels are set (if [applicable](https://ddtrace.readthedocs.io/en/latest/contributing.html#backporting)) ## Reviewer Checklist - [ ] Reviewer has checked that all the criteria below are met - Title is accurate - All changes are related to the pull request's stated goal - Avoids breaking [API](https://ddtrace.readthedocs.io/en/stable/versioning.html#interfaces) changes - Testing strategy adequately addresses listed risks - Newly-added code is easy to change - Release note makes sense to a user of the library - If necessary, author has acknowledged and discussed the performance implications of this PR as reported in the benchmarks PR comment - Backport labels are set in a manner that is consistent with the [release branch maintenance policy](https://ddtrace.readthedocs.io/en/latest/contributing.html#backporting) --------- Co-authored-by: Vítor De Araújo --- .../notes/threethirteen-d40d659d8939fe5e.yaml | 50 ++++++++++++++++++- 1 file changed, 49 insertions(+), 1 deletion(-) diff --git a/releasenotes/notes/threethirteen-d40d659d8939fe5e.yaml b/releasenotes/notes/threethirteen-d40d659d8939fe5e.yaml index 837858691fe..3a229695abd 100644 --- a/releasenotes/notes/threethirteen-d40d659d8939fe5e.yaml +++ b/releasenotes/notes/threethirteen-d40d659d8939fe5e.yaml @@ -1,4 +1,52 @@ --- upgrade: - | - Makes the library compatible with Python 3.13 + Makes the library compatible with Python 3.13. + + The following limitations currently apply to support for Python 3.13: + - ``ddtrace`` is not supported on Windows with Python 3.13 + - Appsec Threat Detection is not tested against Django, Flask, or FastAPI with 3.13 + - Automatic Service Naming is not tested with 3.13 + - The ``ddtrace-run`` entrypoint is not tested with 3.13 + - The following products are not tested with 3.13: + - Code Coverage + - Appsec IAST + - Data Streams Monitoring + - CI Visibility + - Continuous Profiling + - The following integrations are not tested with 3.13: + - aiobotocore + - aiomysql + - aiopg + - anthropic + - asyncpg + - avro + - botocore + - confluent-kafka + - consul + - django + - falcon + - fastapi + - freezegun + - gevent + - google_generativeai + - grpcio + - gunicorn + - langchain + - mysqlclient + - opentracing + - protobuf + - psycopg + - psycopg2 + - pymysql + - pyodbc + - pytest + - pytest-bdd + - pytest-benchmark + - sanic + - selenium + - sqlalchemy + - sqlite3 + - starlette + - tornado + - vertexai From 79069a3b41828cf194c279dcfc6b155a64f8b080 Mon Sep 17 00:00:00 2001 From: Munir Abdinur Date: Thu, 19 Dec 2024 13:31:41 -0500 Subject: [PATCH 68/78] fix(library): catch exceptions raised while enabling ddtrace integrations (#11759) ## Description - Improves the error message generated when `ddtrace` failed to patch/enable an integration. - Ensure patching modules and sub-modules are wrapped in a try-except. The ddtrace library should not crash an application if an integration can not be patched. ## Motivation Prevent issues like this: https://github.com/DataDog/dd-trace-py/issues/11603 ## Checklist - [x] PR author has checked that all the criteria below are met - The PR description includes an overview of the change - The PR description articulates the motivation for the change - The change includes tests OR the PR description describes a testing strategy - The PR description notes risks associated with the change, if any - Newly-added code is easy to change - The change follows the [library release note guidelines](https://ddtrace.readthedocs.io/en/stable/releasenotes.html) - The change includes or references documentation updates if necessary - Backport labels are set (if [applicable](https://ddtrace.readthedocs.io/en/latest/contributing.html#backporting)) ## Reviewer Checklist - [x] Reviewer has checked that all the criteria below are met - Title is accurate - All changes are related to the pull request's stated goal - Avoids breaking [API](https://ddtrace.readthedocs.io/en/stable/versioning.html#interfaces) changes - Testing strategy adequately addresses listed risks - Newly-added code is easy to change - Release note makes sense to a user of the library - If necessary, author has acknowledged and discussed the performance implications of this PR as reported in the benchmarks PR comment - Backport labels are set in a manner that is consistent with the [release branch maintenance policy](https://ddtrace.readthedocs.io/en/latest/contributing.html#backporting) --- ddtrace/_monkey.py | 16 +++++++++------- ...efactor-patch-error-ssi-1a2e9fe206d6d6df.yaml | 4 ++++ tests/telemetry/test_telemetry.py | 6 ++---- 3 files changed, 15 insertions(+), 11 deletions(-) create mode 100644 releasenotes/notes/refactor-patch-error-ssi-1a2e9fe206d6d6df.yaml diff --git a/ddtrace/_monkey.py b/ddtrace/_monkey.py index 8dd83558c83..488211e46b1 100644 --- a/ddtrace/_monkey.py +++ b/ddtrace/_monkey.py @@ -173,17 +173,22 @@ def on_import(hook): path = "%s.%s" % (prefix, module) try: imported_module = importlib.import_module(path) + imported_module.patch() + if hasattr(imported_module, "patch_submodules"): + imported_module.patch_submodules(patch_indicator) except Exception as e: if raise_errors: raise - error_msg = "failed to import ddtrace module %r when patching on import" % (path,) - log.error(error_msg, exc_info=True) - telemetry.telemetry_writer.add_integration(module, False, PATCH_MODULES.get(module) is True, error_msg) + log.error( + "failed to enable ddtrace support for %s: %s", + module, + str(e), + ) + telemetry.telemetry_writer.add_integration(module, False, PATCH_MODULES.get(module) is True, str(e)) telemetry.telemetry_writer.add_count_metric( "tracers", "integration_errors", 1, (("integration_name", module), ("error_type", type(e).__name__)) ) else: - imported_module.patch() if hasattr(imported_module, "get_versions"): versions = imported_module.get_versions() for name, v in versions.items(): @@ -196,9 +201,6 @@ def on_import(hook): module, True, PATCH_MODULES.get(module) is True, "", version=version ) - if hasattr(imported_module, "patch_submodules"): - imported_module.patch_submodules(patch_indicator) - return on_import diff --git a/releasenotes/notes/refactor-patch-error-ssi-1a2e9fe206d6d6df.yaml b/releasenotes/notes/refactor-patch-error-ssi-1a2e9fe206d6d6df.yaml new file mode 100644 index 00000000000..8afc2e7595f --- /dev/null +++ b/releasenotes/notes/refactor-patch-error-ssi-1a2e9fe206d6d6df.yaml @@ -0,0 +1,4 @@ +--- +fixes: + - | + Integrations: Improved error handling for exceptions raised during the startup of ddtrace integrations. This reduces the likelihood of the ddtrace library raising unhandled exceptions. \ No newline at end of file diff --git a/tests/telemetry/test_telemetry.py b/tests/telemetry/test_telemetry.py index d767090f6d2..558e9961afc 100644 --- a/tests/telemetry/test_telemetry.py +++ b/tests/telemetry/test_telemetry.py @@ -243,14 +243,12 @@ def test_handled_integration_error(test_agent_session, run_python_code_in_subpro _, stderr, status, _ = run_python_code_in_subprocess(code, env=env) assert status == 0, stderr - expected_stderr = b"failed to import" - assert expected_stderr in stderr + assert b"failed to enable ddtrace support for sqlite3" in stderr integrations_events = test_agent_session.get_events("app-integrations-change", subprocess=True) assert len(integrations_events) == 1 assert ( - integrations_events[0]["payload"]["integrations"][0]["error"] - == "failed to import ddtrace module 'ddtrace.contrib.sqlite3' when patching on import" + integrations_events[0]["payload"]["integrations"][0]["error"] == "module 'sqlite3' has no attribute 'connect'" ) # Get metric containing the integration error From 90dbdd33c362001325eb15ead8fa6e664ecb1753 Mon Sep 17 00:00:00 2001 From: Emmett Butler <723615+emmettbutler@users.noreply.github.com> Date: Thu, 19 Dec 2024 10:41:45 -0800 Subject: [PATCH 69/78] chore: enable tests under 3.13 for ddtrace-run (#11793) Enable tests of ddtrace-run against Py3.13, with Profiling enablement skipped because Profiling doesn't support 3.13 yet. ## Checklist - [x] PR author has checked that all the criteria below are met - The PR description includes an overview of the change - The PR description articulates the motivation for the change - The change includes tests OR the PR description describes a testing strategy - The PR description notes risks associated with the change, if any - Newly-added code is easy to change - The change follows the [library release note guidelines](https://ddtrace.readthedocs.io/en/stable/releasenotes.html) - The change includes or references documentation updates if necessary - Backport labels are set (if [applicable](https://ddtrace.readthedocs.io/en/latest/contributing.html#backporting)) ## Reviewer Checklist - [x] Reviewer has checked that all the criteria below are met - Title is accurate - All changes are related to the pull request's stated goal - Avoids breaking [API](https://ddtrace.readthedocs.io/en/stable/versioning.html#interfaces) changes - Testing strategy adequately addresses listed risks - Newly-added code is easy to change - Release note makes sense to a user of the library - If necessary, author has acknowledged and discussed the performance implications of this PR as reported in the benchmarks PR comment - Backport labels are set in a manner that is consistent with the [release branch maintenance policy](https://ddtrace.readthedocs.io/en/latest/contributing.html#backporting) --- .riot/requirements/afc1791.txt | 27 +++++++++++++++++++ .../313-ddtracerun-e34ef8d7496091b3.yaml | 4 +++ riotfile.py | 2 +- tests/commands/test_runner.py | 1 + 4 files changed, 33 insertions(+), 1 deletion(-) create mode 100644 .riot/requirements/afc1791.txt create mode 100644 releasenotes/notes/313-ddtracerun-e34ef8d7496091b3.yaml diff --git a/.riot/requirements/afc1791.txt b/.riot/requirements/afc1791.txt new file mode 100644 index 00000000000..2a3cfd4447d --- /dev/null +++ b/.riot/requirements/afc1791.txt @@ -0,0 +1,27 @@ +# +# This file is autogenerated by pip-compile with Python 3.13 +# by the following command: +# +# pip-compile --no-annotate .riot/requirements/afc1791.in +# +attrs==24.3.0 +coverage[toml]==7.6.9 +gevent==24.11.1 +greenlet==3.1.1 +hypothesis==6.45.0 +iniconfig==2.0.0 +mock==5.1.0 +opentracing==2.4.0 +packaging==24.2 +pluggy==1.5.0 +pytest==8.3.4 +pytest-cov==6.0.0 +pytest-mock==3.14.0 +pytest-randomly==3.16.0 +redis==5.2.1 +sortedcontainers==2.4.0 +zope-event==5.0 +zope-interface==7.2 + +# The following packages are considered to be unsafe in a requirements file: +# setuptools diff --git a/releasenotes/notes/313-ddtracerun-e34ef8d7496091b3.yaml b/releasenotes/notes/313-ddtracerun-e34ef8d7496091b3.yaml new file mode 100644 index 00000000000..50cf1a7d196 --- /dev/null +++ b/releasenotes/notes/313-ddtracerun-e34ef8d7496091b3.yaml @@ -0,0 +1,4 @@ +--- +upgrade: + - | + Enables tests of the ``ddtrace-run`` entrypoint with Python 3.13 diff --git a/riotfile.py b/riotfile.py index 9b1ba5497e7..e7a078a5425 100644 --- a/riotfile.py +++ b/riotfile.py @@ -521,7 +521,7 @@ def select_pys(min_version: str = MIN_PYTHON_VERSION, max_version: str = MAX_PYT command="pytest {cmdargs} --no-cov tests/commands/test_runner.py", venvs=[ Venv( - pys=select_pys(max_version="3.12"), + pys=select_pys(), pkgs={ "redis": latest, "gevent": latest, diff --git a/tests/commands/test_runner.py b/tests/commands/test_runner.py index 8c5dd0bd7f8..b6ad3cbd755 100644 --- a/tests/commands/test_runner.py +++ b/tests/commands/test_runner.py @@ -229,6 +229,7 @@ def test_debug_mode(self): assert b"debug mode has been enabled for the ddtrace logger" in p.stderr.read() +@pytest.mark.skipif(sys.version_info > (3, 12), reason="Profiling unsupported with 3.13") def test_env_profiling_enabled(monkeypatch): """DD_PROFILING_ENABLED allows enabling the global profiler.""" # Off by default From 099247ef10b10f4b879f825e45929511b09a3668 Mon Sep 17 00:00:00 2001 From: kyle Date: Thu, 19 Dec 2024 14:37:06 -0500 Subject: [PATCH 70/78] chore(llmobs): refactor trace processor tests (#11784) The trace processor tests intermingled business logic with the implementation of the trace processor. To separate them out, we introduce a few testing fixtures useful for dealing with tracing and capturing llm obs events. This reduces the amount of mocking to zero and allows us to test more realistically. There's a bit of a nasty hack to make sure the configs are updated in llmobs modules that grab a reference to it but I'll follow up to clean that up as well. This reduces most of the tests from this: ```python def test_input_parameters_are_set(): """Test that input parameters are set on the span event if they are present on the span.""" dummy_tracer = DummyTracer() mock_llmobs_span_writer = mock.MagicMock() with override_global_config(dict(_llmobs_ml_app="unnamed-ml-app")): with dummy_tracer.trace("root_llm_span", span_type=SpanTypes.LLM) as llm_span: llm_span._set_ctx_item(SPAN_KIND, "llm") llm_span._set_ctx_item(INPUT_PARAMETERS, {"key": "value"}) tp = LLMObsTraceProcessor(llmobs_span_writer=mock_llmobs_span_writer) assert tp._llmobs_span_event(llm_span)[0]["meta"]["input"]["parameters"] == {"key": "value"} ``` to this: ```python def test_input_parameters_are_set(tracer, llmobs_events): """Test that input parameters are set on the span event if they are present on the span.""" with tracer.trace("root_llm_span", span_type=SpanTypes.LLM) as llm_span: llm_span._set_ctx_item(const.SPAN_KIND, "llm") llm_span._set_ctx_item(const.INPUT_PARAMETERS, {"key": "value"}) assert llmobs_events[0]["meta"]["input"]["parameters"] == {"key": "value"} ``` --- tests/llmobs/conftest.py | 47 +++ tests/llmobs/test_llmobs.py | 254 +++++++++++++ tests/llmobs/test_llmobs_trace_processor.py | 373 -------------------- 3 files changed, 301 insertions(+), 373 deletions(-) create mode 100644 tests/llmobs/test_llmobs.py diff --git a/tests/llmobs/conftest.py b/tests/llmobs/conftest.py index 0b0ce8b7964..a7d467b3985 100644 --- a/tests/llmobs/conftest.py +++ b/tests/llmobs/conftest.py @@ -6,6 +6,7 @@ from ddtrace.internal.utils.http import Response from ddtrace.llmobs import LLMObs as llmobs_service from ddtrace.llmobs._evaluators.ragas.faithfulness import RagasFaithfulnessEvaluator +from ddtrace.llmobs._writer import LLMObsSpanWriter from tests.llmobs._utils import logs_vcr from tests.utils import DummyTracer from tests.utils import override_env @@ -212,3 +213,49 @@ def mock_ragas_evaluator(mock_llmobs_eval_metric_writer, ragas): LLMObsMockRagas.return_value = 1.0 yield RagasFaithfulnessEvaluator patcher.stop() + + +@pytest.fixture +def tracer(): + return DummyTracer() + + +@pytest.fixture +def llmobs_env(): + return { + "DD_API_KEY": "", + "DD_LLMOBS_ML_APP": "unnamed-ml-app", + } + + +class TestLLMObsSpanWriter(LLMObsSpanWriter): + def __init__(self, *args, **kwargs): + super().__init__(*args, **kwargs) + self.events = [] + + def enqueue(self, event): + self.events.append(event) + + +@pytest.fixture +def llmobs_span_writer(): + yield TestLLMObsSpanWriter(interval=1.0, timeout=1.0) + + +@pytest.fixture +def llmobs(monkeypatch, tracer, llmobs_env, llmobs_span_writer): + for env, val in llmobs_env.items(): + monkeypatch.setenv(env, val) + + # TODO: remove once rest of tests are moved off of global config tampering + with override_global_config(dict(_llmobs_ml_app=llmobs_env.get("DD_LLMOBS_ML_APP"))): + llmobs_service.enable(_tracer=tracer) + llmobs_service._instance._llmobs_span_writer = llmobs_span_writer + llmobs_service._instance._trace_processor._span_writer = llmobs_span_writer + yield llmobs + llmobs_service.disable() + + +@pytest.fixture +def llmobs_events(llmobs, llmobs_span_writer): + return llmobs_span_writer.events diff --git a/tests/llmobs/test_llmobs.py b/tests/llmobs/test_llmobs.py new file mode 100644 index 00000000000..1bae7efe9ed --- /dev/null +++ b/tests/llmobs/test_llmobs.py @@ -0,0 +1,254 @@ +import mock +import pytest + +from ddtrace.ext import SpanTypes +from ddtrace.llmobs import _constants as const +from ddtrace.llmobs._utils import _get_llmobs_parent_id +from ddtrace.llmobs._utils import _get_session_id +from tests.llmobs._utils import _expected_llmobs_llm_span_event + + +@pytest.fixture +def mock_logs(): + with mock.patch("ddtrace.llmobs._trace_processor.log") as mock_logs: + yield mock_logs + + +class TestMLApp: + @pytest.mark.parametrize("llmobs_env", [{"DD_LLMOBS_ML_APP": ""}]) + def test_tag_defaults_to_env_var(self, tracer, llmobs_env, llmobs_events): + """Test that no ml_app defaults to the environment variable DD_LLMOBS_ML_APP.""" + with tracer.trace("root_llm_span", span_type=SpanTypes.LLM) as llm_span: + llm_span._set_ctx_item(const.SPAN_KIND, "llm") + assert "ml_app:" in llmobs_events[0]["tags"] + + @pytest.mark.parametrize("llmobs_env", [{"DD_LLMOBS_ML_APP": ""}]) + def test_tag_overrides_env_var(self, tracer, llmobs_env, llmobs_events): + """Test that when ml_app is set on the span, it overrides the environment variable DD_LLMOBS_ML_APP.""" + with tracer.trace("root_llm_span", span_type=SpanTypes.LLM) as llm_span: + llm_span._set_ctx_item(const.SPAN_KIND, "llm") + llm_span._set_ctx_item(const.ML_APP, "test-ml-app") + assert "ml_app:test-ml-app" in llmobs_events[0]["tags"] + + def test_propagates_ignore_non_llmobs_spans(self, tracer, llmobs_events): + """ + Test that when ml_app is not set, we propagate from nearest LLMObs ancestor + even if there are non-LLMObs spans in between. + """ + with tracer.trace("root_llm_span", span_type=SpanTypes.LLM) as llm_span: + llm_span._set_ctx_item(const.SPAN_KIND, "llm") + llm_span._set_ctx_item(const.ML_APP, "test-ml-app") + with tracer.trace("child_span"): + with tracer.trace("llm_grandchild_span", span_type=SpanTypes.LLM) as grandchild_span: + grandchild_span._set_ctx_item(const.SPAN_KIND, "llm") + with tracer.trace("great_grandchild_span", span_type=SpanTypes.LLM) as great_grandchild_span: + great_grandchild_span._set_ctx_item(const.SPAN_KIND, "llm") + assert len(llmobs_events) == 3 + for llmobs_event in llmobs_events: + assert "ml_app:test-ml-app" in llmobs_event["tags"] + + +def test_set_correct_parent_id(tracer): + """Test that the parent_id is set as the span_id of the nearest LLMObs span in the span's ancestor tree.""" + with tracer.trace("root"): + with tracer.trace("llm_span", span_type=SpanTypes.LLM) as llm_span: + pass + assert _get_llmobs_parent_id(llm_span) is None + with tracer.trace("root_llm_span", span_type=SpanTypes.LLM) as root_span: + with tracer.trace("child_span") as child_span: + with tracer.trace("llm_span", span_type=SpanTypes.LLM) as grandchild_span: + pass + assert _get_llmobs_parent_id(root_span) is None + assert _get_llmobs_parent_id(child_span) == str(root_span.span_id) + assert _get_llmobs_parent_id(grandchild_span) == str(root_span.span_id) + + +class TestSessionId: + def test_propagate_from_ancestors(self, tracer): + """ + Test that session_id is propagated from the nearest LLMObs span in the span's ancestor tree + if no session_id is not set on the span itself. + """ + with tracer.trace("root_llm_span", span_type=SpanTypes.LLM) as root_span: + root_span._set_ctx_item(const.SESSION_ID, "test_session_id") + with tracer.trace("child_span"): + with tracer.trace("llm_span", span_type=SpanTypes.LLM) as llm_span: + pass + assert _get_session_id(llm_span) == "test_session_id" + + def test_if_set_manually(self, tracer): + """Test that session_id is extracted from the span if it is already set manually.""" + with tracer.trace("root_llm_span", span_type=SpanTypes.LLM) as root_span: + root_span._set_ctx_item(const.SESSION_ID, "test_session_id") + with tracer.trace("child_span"): + with tracer.trace("llm_span", span_type=SpanTypes.LLM) as llm_span: + llm_span._set_ctx_item(const.SESSION_ID, "test_different_session_id") + assert _get_session_id(llm_span) == "test_different_session_id" + + def test_propagates_ignore_non_llmobs_spans(self, tracer, llmobs_events): + """ + Test that when session_id is not set, we propagate from nearest LLMObs ancestor + even if there are non-LLMObs spans in between. + """ + with tracer.trace("root_llm_span", span_type=SpanTypes.LLM) as llm_span: + llm_span._set_ctx_item(const.SPAN_KIND, "llm") + llm_span._set_ctx_item(const.SESSION_ID, "session-123") + with tracer.trace("child_span"): + with tracer.trace("llm_grandchild_span", span_type=SpanTypes.LLM) as grandchild_span: + grandchild_span._set_ctx_item(const.SPAN_KIND, "llm") + with tracer.trace("great_grandchild_span", span_type=SpanTypes.LLM) as great_grandchild_span: + great_grandchild_span._set_ctx_item(const.SPAN_KIND, "llm") + + llm_event, grandchild_event, great_grandchild_event = llmobs_events + assert llm_event["session_id"] == "session-123" + assert grandchild_event["session_id"] == "session-123" + assert great_grandchild_event["session_id"] == "session-123" + + +def test_input_value_is_set(tracer, llmobs_events): + """Test that input value is set on the span event if they are present on the span.""" + with tracer.trace("root_llm_span", span_type=SpanTypes.LLM) as llm_span: + llm_span._set_ctx_item(const.SPAN_KIND, "llm") + llm_span._set_ctx_item(const.INPUT_VALUE, "value") + assert llmobs_events[0]["meta"]["input"]["value"] == "value" + + +def test_input_messages_are_set(tracer, llmobs_events): + """Test that input messages are set on the span event if they are present on the span.""" + with tracer.trace("root_llm_span", span_type=SpanTypes.LLM) as llm_span: + llm_span._set_ctx_item(const.SPAN_KIND, "llm") + llm_span._set_ctx_item(const.INPUT_MESSAGES, [{"content": "message", "role": "user"}]) + assert llmobs_events[0]["meta"]["input"]["messages"] == [{"content": "message", "role": "user"}] + + +def test_input_parameters_are_set(tracer, llmobs_events): + """Test that input parameters are set on the span event if they are present on the span.""" + with tracer.trace("root_llm_span", span_type=SpanTypes.LLM) as llm_span: + llm_span._set_ctx_item(const.SPAN_KIND, "llm") + llm_span._set_ctx_item(const.INPUT_PARAMETERS, {"key": "value"}) + assert llmobs_events[0]["meta"]["input"]["parameters"] == {"key": "value"} + + +def test_output_messages_are_set(tracer, llmobs_events): + """Test that output messages are set on the span event if they are present on the span.""" + with tracer.trace("root_llm_span", span_type=SpanTypes.LLM) as llm_span: + llm_span._set_ctx_item(const.SPAN_KIND, "llm") + llm_span._set_ctx_item(const.OUTPUT_MESSAGES, [{"content": "message", "role": "user"}]) + assert llmobs_events[0]["meta"]["output"]["messages"] == [{"content": "message", "role": "user"}] + + +def test_output_value_is_set(tracer, llmobs_events): + """Test that output value is set on the span event if they are present on the span.""" + with tracer.trace("root_llm_span", span_type=SpanTypes.LLM) as llm_span: + llm_span._set_ctx_item(const.SPAN_KIND, "llm") + llm_span._set_ctx_item(const.OUTPUT_VALUE, "value") + assert llmobs_events[0]["meta"]["output"]["value"] == "value" + + +def test_prompt_is_set(tracer, llmobs_events): + """Test that prompt is set on the span event if they are present on the span.""" + with tracer.trace("root_llm_span", span_type=SpanTypes.LLM) as llm_span: + llm_span._set_ctx_item(const.SPAN_KIND, "llm") + llm_span._set_ctx_item(const.INPUT_PROMPT, {"variables": {"var1": "var2"}}) + assert llmobs_events[0]["meta"]["input"]["prompt"] == {"variables": {"var1": "var2"}} + + +def test_prompt_is_not_set_for_non_llm_spans(tracer, llmobs_events): + """Test that prompt is NOT set on the span event if the span is not an LLM span.""" + with tracer.trace("task_span", span_type=SpanTypes.LLM) as task_span: + task_span._set_ctx_item(const.SPAN_KIND, "task") + task_span._set_ctx_item(const.INPUT_VALUE, "ival") + task_span._set_ctx_item(const.INPUT_PROMPT, {"variables": {"var1": "var2"}}) + assert llmobs_events[0]["meta"]["input"].get("prompt") is None + + +def test_metadata_is_set(tracer, llmobs_events): + """Test that metadata is set on the span event if it is present on the span.""" + with tracer.trace("root_llm_span", span_type=SpanTypes.LLM) as llm_span: + llm_span._set_ctx_item(const.SPAN_KIND, "llm") + llm_span._set_ctx_item(const.METADATA, {"key": "value"}) + assert llmobs_events[0]["meta"]["metadata"] == {"key": "value"} + + +def test_metrics_are_set(tracer, llmobs_events): + """Test that metadata is set on the span event if it is present on the span.""" + with tracer.trace("root_llm_span", span_type=SpanTypes.LLM) as llm_span: + llm_span._set_ctx_item(const.SPAN_KIND, "llm") + llm_span._set_ctx_item(const.METRICS, {"tokens": 100}) + assert llmobs_events[0]["metrics"] == {"tokens": 100} + + +def test_langchain_span_name_is_set_to_class_name(tracer, llmobs_events): + """Test span names for langchain auto-instrumented spans is set correctly.""" + with tracer.trace(const.LANGCHAIN_APM_SPAN_NAME, resource="expected_name", span_type=SpanTypes.LLM) as llm_span: + llm_span._set_ctx_item(const.SPAN_KIND, "llm") + assert llmobs_events[0]["name"] == "expected_name" + + +def test_error_is_set(tracer, llmobs_events): + """Test that error is set on the span event if it is present on the span.""" + with pytest.raises(ValueError): + with tracer.trace("root_llm_span", span_type=SpanTypes.LLM) as llm_span: + llm_span._set_ctx_item(const.SPAN_KIND, "llm") + raise ValueError("error") + span_event = llmobs_events[0] + assert span_event["meta"]["error.message"] == "error" + assert "ValueError" in span_event["meta"]["error.type"] + assert 'raise ValueError("error")' in span_event["meta"]["error.stack"] + + +def test_model_provider_defaults_to_custom(tracer, llmobs_events): + """Test that model provider defaults to "custom" if not provided.""" + with tracer.trace("root_llm_span", span_type=SpanTypes.LLM) as llm_span: + llm_span._set_ctx_item(const.SPAN_KIND, "llm") + llm_span._set_ctx_item(const.MODEL_NAME, "model_name") + span_event = llmobs_events[0] + assert span_event["meta"]["model_name"] == "model_name" + assert span_event["meta"]["model_provider"] == "custom" + + +def test_model_not_set_if_not_llm_kind_span(tracer, llmobs_events): + """Test that model name and provider not set if non-LLM span.""" + with tracer.trace("root_workflow_span", span_type=SpanTypes.LLM) as span: + span._set_ctx_item(const.SPAN_KIND, "workflow") + span._set_ctx_item(const.MODEL_NAME, "model_name") + span_event = llmobs_events[0] + assert "model_name" not in span_event["meta"] + assert "model_provider" not in span_event["meta"] + + +def test_model_and_provider_are_set(tracer, llmobs_events): + """Test that model and provider are set on the span event if they are present on the LLM-kind span.""" + with tracer.trace("root_llm_span", span_type=SpanTypes.LLM) as llm_span: + llm_span._set_ctx_item(const.SPAN_KIND, "llm") + llm_span._set_ctx_item(const.MODEL_NAME, "model_name") + llm_span._set_ctx_item(const.MODEL_PROVIDER, "model_provider") + span_event = llmobs_events[0] + assert span_event["meta"]["model_name"] == "model_name" + assert span_event["meta"]["model_provider"] == "model_provider" + + +def test_malformed_span_logs_error_instead_of_raising(mock_logs, tracer, llmobs_events): + """Test that a trying to create a span event from a malformed span will log an error instead of crashing.""" + with tracer.trace("root_llm_span", span_type=SpanTypes.LLM) as llm_span: + # span does not have SPAN_KIND tag + pass + mock_logs.error.assert_called_once_with( + "Error generating LLMObs span event for span %s, likely due to malformed span", llm_span + ) + assert len(llmobs_events) == 0 + + +def test_processor_only_creates_llmobs_span_event(tracer, llmobs_events): + """Test that the LLMObsTraceProcessor only creates LLMObs span events for LLM span types.""" + with tracer.trace("root_llm_span", service="tests.llmobs", span_type=SpanTypes.LLM) as root_span: + root_span._set_ctx_item(const.SPAN_KIND, "llm") + with tracer.trace("child_span"): + with tracer.trace("llm_span", span_type=SpanTypes.LLM) as grandchild_span: + grandchild_span._set_ctx_item(const.SPAN_KIND, "llm") + expected_grandchild_llmobs_span = _expected_llmobs_llm_span_event(grandchild_span, "llm") + expected_grandchild_llmobs_span["parent_id"] = str(root_span.span_id) + + assert len(llmobs_events) == 2 + assert llmobs_events[0] == _expected_llmobs_llm_span_event(root_span, "llm") + assert llmobs_events[1] == expected_grandchild_llmobs_span diff --git a/tests/llmobs/test_llmobs_trace_processor.py b/tests/llmobs/test_llmobs_trace_processor.py index 8eb4c4d6fb3..b55286d49c8 100644 --- a/tests/llmobs/test_llmobs_trace_processor.py +++ b/tests/llmobs/test_llmobs_trace_processor.py @@ -1,36 +1,12 @@ import mock -import pytest from ddtrace._trace.span import Span from ddtrace.ext import SpanTypes -from ddtrace.llmobs._constants import INPUT_MESSAGES -from ddtrace.llmobs._constants import INPUT_PARAMETERS -from ddtrace.llmobs._constants import INPUT_PROMPT -from ddtrace.llmobs._constants import INPUT_VALUE -from ddtrace.llmobs._constants import LANGCHAIN_APM_SPAN_NAME -from ddtrace.llmobs._constants import METADATA -from ddtrace.llmobs._constants import METRICS -from ddtrace.llmobs._constants import ML_APP -from ddtrace.llmobs._constants import MODEL_NAME -from ddtrace.llmobs._constants import MODEL_PROVIDER -from ddtrace.llmobs._constants import OUTPUT_MESSAGES -from ddtrace.llmobs._constants import OUTPUT_VALUE -from ddtrace.llmobs._constants import SESSION_ID from ddtrace.llmobs._constants import SPAN_KIND from ddtrace.llmobs._trace_processor import LLMObsTraceProcessor -from ddtrace.llmobs._utils import _get_llmobs_parent_id -from ddtrace.llmobs._utils import _get_session_id -from tests.llmobs._utils import _expected_llmobs_llm_span_event -from tests.utils import DummyTracer from tests.utils import override_global_config -@pytest.fixture -def mock_logs(): - with mock.patch("ddtrace.llmobs._trace_processor.log") as mock_logs: - yield mock_logs - - def test_processor_returns_all_traces_by_default(): """Test that the LLMObsTraceProcessor returns all traces by default.""" trace_filter = LLMObsTraceProcessor(llmobs_span_writer=mock.MagicMock()) @@ -58,352 +34,3 @@ def test_processor_returns_none_in_agentless_mode(): root_llm_span._set_ctx_item(SPAN_KIND, "llm") trace1 = [root_llm_span] assert trace_filter.process_trace(trace1) is None - - -def test_processor_creates_llmobs_span_event(): - with override_global_config(dict(_llmobs_ml_app="unnamed-ml-app")): - mock_llmobs_span_writer = mock.MagicMock() - trace_filter = LLMObsTraceProcessor(llmobs_span_writer=mock_llmobs_span_writer) - root_llm_span = Span(name="root", span_type=SpanTypes.LLM) - root_llm_span._set_ctx_item(SPAN_KIND, "llm") - trace = [root_llm_span] - trace_filter.process_trace(trace) - assert mock_llmobs_span_writer.enqueue.call_count == 1 - mock_llmobs_span_writer.assert_has_calls( - [mock.call.enqueue(_expected_llmobs_llm_span_event(root_llm_span, "llm", tags={"service": ""}))] - ) - - -def test_processor_only_creates_llmobs_span_event(): - """Test that the LLMObsTraceProcessor only creates LLMObs span events for LLM span types.""" - dummy_tracer = DummyTracer() - mock_llmobs_span_writer = mock.MagicMock() - trace_filter = LLMObsTraceProcessor(llmobs_span_writer=mock_llmobs_span_writer) - with override_global_config(dict(_llmobs_ml_app="unnamed-ml-app")): - with dummy_tracer.trace("root_llm_span", span_type=SpanTypes.LLM) as root_span: - root_span._set_ctx_item(SPAN_KIND, "llm") - with dummy_tracer.trace("child_span") as child_span: - with dummy_tracer.trace("llm_span", span_type=SpanTypes.LLM) as grandchild_span: - grandchild_span._set_ctx_item(SPAN_KIND, "llm") - trace = [root_span, child_span, grandchild_span] - expected_grandchild_llmobs_span = _expected_llmobs_llm_span_event(grandchild_span, "llm") - expected_grandchild_llmobs_span["parent_id"] = str(root_span.span_id) - trace_filter.process_trace(trace) - assert mock_llmobs_span_writer.enqueue.call_count == 2 - mock_llmobs_span_writer.assert_has_calls( - [ - mock.call.enqueue(_expected_llmobs_llm_span_event(root_span, "llm")), - mock.call.enqueue(expected_grandchild_llmobs_span), - ] - ) - - -def test_set_correct_parent_id(): - """Test that the parent_id is set as the span_id of the nearest LLMObs span in the span's ancestor tree.""" - dummy_tracer = DummyTracer() - with dummy_tracer.trace("root"): - with dummy_tracer.trace("llm_span", span_type=SpanTypes.LLM) as llm_span: - pass - assert _get_llmobs_parent_id(llm_span) is None - with dummy_tracer.trace("root_llm_span", span_type=SpanTypes.LLM) as root_span: - with dummy_tracer.trace("child_span") as child_span: - with dummy_tracer.trace("llm_span", span_type=SpanTypes.LLM) as grandchild_span: - pass - assert _get_llmobs_parent_id(root_span) is None - assert _get_llmobs_parent_id(child_span) == str(root_span.span_id) - assert _get_llmobs_parent_id(grandchild_span) == str(root_span.span_id) - - -def test_propagate_session_id_from_ancestors(): - """ - Test that session_id is propagated from the nearest LLMObs span in the span's ancestor tree - if no session_id is not set on the span itself. - """ - dummy_tracer = DummyTracer() - with dummy_tracer.trace("root_llm_span", span_type=SpanTypes.LLM) as root_span: - root_span._set_ctx_item(SESSION_ID, "test_session_id") - with dummy_tracer.trace("child_span"): - with dummy_tracer.trace("llm_span", span_type=SpanTypes.LLM) as llm_span: - pass - assert _get_session_id(llm_span) == "test_session_id" - - -def test_session_id_if_set_manually(): - """Test that session_id is extracted from the span if it is already set manually.""" - dummy_tracer = DummyTracer() - with dummy_tracer.trace("root_llm_span", span_type=SpanTypes.LLM) as root_span: - root_span._set_ctx_item(SESSION_ID, "test_session_id") - with dummy_tracer.trace("child_span"): - with dummy_tracer.trace("llm_span", span_type=SpanTypes.LLM) as llm_span: - llm_span._set_ctx_item(SESSION_ID, "test_different_session_id") - assert _get_session_id(llm_span) == "test_different_session_id" - - -def test_session_id_propagates_ignore_non_llmobs_spans(): - """ - Test that when session_id is not set, we propagate from nearest LLMObs ancestor - even if there are non-LLMObs spans in between. - """ - dummy_tracer = DummyTracer() - with override_global_config(dict(_llmobs_ml_app="")): - with dummy_tracer.trace("root_llm_span", span_type=SpanTypes.LLM) as llm_span: - llm_span._set_ctx_item(SPAN_KIND, "llm") - llm_span._set_ctx_item(SESSION_ID, "session-123") - with dummy_tracer.trace("child_span"): - with dummy_tracer.trace("llm_grandchild_span", span_type=SpanTypes.LLM) as grandchild_span: - grandchild_span._set_ctx_item(SPAN_KIND, "llm") - with dummy_tracer.trace("great_grandchild_span", span_type=SpanTypes.LLM) as great_grandchild_span: - great_grandchild_span._set_ctx_item(SPAN_KIND, "llm") - tp = LLMObsTraceProcessor(dummy_tracer._writer) - llm_span_event, _ = tp._llmobs_span_event(llm_span) - grandchild_span_event, _ = tp._llmobs_span_event(grandchild_span) - great_grandchild_span_event, _ = tp._llmobs_span_event(great_grandchild_span) - assert llm_span_event["session_id"] == "session-123" - assert grandchild_span_event["session_id"] == "session-123" - assert great_grandchild_span_event["session_id"] == "session-123" - - -def test_ml_app_tag_defaults_to_env_var(): - """Test that no ml_app defaults to the environment variable DD_LLMOBS_ML_APP.""" - dummy_tracer = DummyTracer() - with override_global_config(dict(_llmobs_ml_app="")): - with dummy_tracer.trace("root_llm_span", span_type=SpanTypes.LLM) as llm_span: - llm_span._set_ctx_item(SPAN_KIND, "llm") - pass - tp = LLMObsTraceProcessor(dummy_tracer._writer) - span_event, _ = tp._llmobs_span_event(llm_span) - assert "ml_app:" in span_event["tags"] - - -def test_ml_app_tag_overrides_env_var(): - """Test that when ml_app is set on the span, it overrides the environment variable DD_LLMOBS_ML_APP.""" - dummy_tracer = DummyTracer() - with override_global_config(dict(_llmobs_ml_app="")): - with dummy_tracer.trace("root_llm_span", span_type=SpanTypes.LLM) as llm_span: - llm_span._set_ctx_item(SPAN_KIND, "llm") - llm_span._set_ctx_item(ML_APP, "test-ml-app") - tp = LLMObsTraceProcessor(dummy_tracer._writer) - span_event, _ = tp._llmobs_span_event(llm_span) - assert "ml_app:test-ml-app" in span_event["tags"] - - -def test_ml_app_propagates_ignore_non_llmobs_spans(): - """ - Test that when ml_app is not set, we propagate from nearest LLMObs ancestor - even if there are non-LLMObs spans in between. - """ - dummy_tracer = DummyTracer() - with override_global_config(dict(_llmobs_ml_app="")): - with dummy_tracer.trace("root_llm_span", span_type=SpanTypes.LLM) as llm_span: - llm_span._set_ctx_item(SPAN_KIND, "llm") - llm_span._set_ctx_item(ML_APP, "test-ml-app") - with dummy_tracer.trace("child_span"): - with dummy_tracer.trace("llm_grandchild_span", span_type=SpanTypes.LLM) as grandchild_span: - grandchild_span._set_ctx_item(SPAN_KIND, "llm") - with dummy_tracer.trace("great_grandchild_span", span_type=SpanTypes.LLM) as great_grandchild_span: - great_grandchild_span._set_ctx_item(SPAN_KIND, "llm") - tp = LLMObsTraceProcessor(dummy_tracer._writer) - llm_span_event, _ = tp._llmobs_span_event(llm_span) - grandchild_span_event, _ = tp._llmobs_span_event(grandchild_span) - great_grandchild_span_event, _ = tp._llmobs_span_event(great_grandchild_span) - assert "ml_app:test-ml-app" in llm_span_event["tags"] - assert "ml_app:test-ml-app" in grandchild_span_event["tags"] - assert "ml_app:test-ml-app" in great_grandchild_span_event["tags"] - - -def test_malformed_span_logs_error_instead_of_raising(mock_logs): - """Test that a trying to create a span event from a malformed span will log an error instead of crashing.""" - dummy_tracer = DummyTracer() - mock_llmobs_span_writer = mock.MagicMock() - with dummy_tracer.trace("root_llm_span", span_type=SpanTypes.LLM) as llm_span: - # span does not have SPAN_KIND tag - pass - tp = LLMObsTraceProcessor(llmobs_span_writer=mock_llmobs_span_writer) - tp.process_trace([llm_span]) - mock_logs.error.assert_called_once_with( - "Error generating LLMObs span event for span %s, likely due to malformed span", llm_span - ) - mock_llmobs_span_writer.enqueue.assert_not_called() - - -def test_model_and_provider_are_set(): - """Test that model and provider are set on the span event if they are present on the LLM-kind span.""" - dummy_tracer = DummyTracer() - mock_llmobs_span_writer = mock.MagicMock() - with override_global_config(dict(_llmobs_ml_app="unnamed-ml-app")): - with dummy_tracer.trace("root_llm_span", span_type=SpanTypes.LLM) as llm_span: - llm_span._set_ctx_item(SPAN_KIND, "llm") - llm_span._set_ctx_item(MODEL_NAME, "model_name") - llm_span._set_ctx_item(MODEL_PROVIDER, "model_provider") - tp = LLMObsTraceProcessor(llmobs_span_writer=mock_llmobs_span_writer) - span_event, _ = tp._llmobs_span_event(llm_span) - assert span_event["meta"]["model_name"] == "model_name" - assert span_event["meta"]["model_provider"] == "model_provider" - - -def test_model_provider_defaults_to_custom(): - """Test that model provider defaults to "custom" if not provided.""" - dummy_tracer = DummyTracer() - mock_llmobs_span_writer = mock.MagicMock() - with override_global_config(dict(_llmobs_ml_app="unnamed-ml-app")): - with dummy_tracer.trace("root_llm_span", span_type=SpanTypes.LLM) as llm_span: - llm_span._set_ctx_item(SPAN_KIND, "llm") - llm_span._set_ctx_item(MODEL_NAME, "model_name") - tp = LLMObsTraceProcessor(llmobs_span_writer=mock_llmobs_span_writer) - span_event, _ = tp._llmobs_span_event(llm_span) - assert span_event["meta"]["model_name"] == "model_name" - assert span_event["meta"]["model_provider"] == "custom" - - -def test_model_not_set_if_not_llm_kind_span(): - """Test that model name and provider not set if non-LLM span.""" - dummy_tracer = DummyTracer() - mock_llmobs_span_writer = mock.MagicMock() - with override_global_config(dict(_llmobs_ml_app="unnamed-ml-app")): - with dummy_tracer.trace("root_workflow_span", span_type=SpanTypes.LLM) as span: - span._set_ctx_item(SPAN_KIND, "workflow") - span._set_ctx_item(MODEL_NAME, "model_name") - tp = LLMObsTraceProcessor(llmobs_span_writer=mock_llmobs_span_writer) - span_event, _ = tp._llmobs_span_event(span) - assert "model_name" not in span_event["meta"] - assert "model_provider" not in span_event["meta"] - - -def test_input_messages_are_set(): - """Test that input messages are set on the span event if they are present on the span.""" - dummy_tracer = DummyTracer() - mock_llmobs_span_writer = mock.MagicMock() - with override_global_config(dict(_llmobs_ml_app="unnamed-ml-app")): - with dummy_tracer.trace("root_llm_span", span_type=SpanTypes.LLM) as llm_span: - llm_span._set_ctx_item(SPAN_KIND, "llm") - llm_span._set_ctx_item(INPUT_MESSAGES, [{"content": "message", "role": "user"}]) - tp = LLMObsTraceProcessor(llmobs_span_writer=mock_llmobs_span_writer) - assert tp._llmobs_span_event(llm_span)[0]["meta"]["input"]["messages"] == [ - {"content": "message", "role": "user"} - ] - - -def test_input_value_is_set(): - """Test that input value is set on the span event if they are present on the span.""" - dummy_tracer = DummyTracer() - mock_llmobs_span_writer = mock.MagicMock() - with override_global_config(dict(_llmobs_ml_app="unnamed-ml-app")): - with dummy_tracer.trace("root_llm_span", span_type=SpanTypes.LLM) as llm_span: - llm_span._set_ctx_item(SPAN_KIND, "llm") - llm_span._set_ctx_item(INPUT_VALUE, "value") - tp = LLMObsTraceProcessor(llmobs_span_writer=mock_llmobs_span_writer) - assert tp._llmobs_span_event(llm_span)[0]["meta"]["input"]["value"] == "value" - - -def test_input_parameters_are_set(): - """Test that input parameters are set on the span event if they are present on the span.""" - dummy_tracer = DummyTracer() - mock_llmobs_span_writer = mock.MagicMock() - with override_global_config(dict(_llmobs_ml_app="unnamed-ml-app")): - with dummy_tracer.trace("root_llm_span", span_type=SpanTypes.LLM) as llm_span: - llm_span._set_ctx_item(SPAN_KIND, "llm") - llm_span._set_ctx_item(INPUT_PARAMETERS, {"key": "value"}) - tp = LLMObsTraceProcessor(llmobs_span_writer=mock_llmobs_span_writer) - assert tp._llmobs_span_event(llm_span)[0]["meta"]["input"]["parameters"] == {"key": "value"} - - -def test_output_messages_are_set(): - """Test that output messages are set on the span event if they are present on the span.""" - dummy_tracer = DummyTracer() - mock_llmobs_span_writer = mock.MagicMock() - with override_global_config(dict(_llmobs_ml_app="unnamed-ml-app")): - with dummy_tracer.trace("root_llm_span", span_type=SpanTypes.LLM) as llm_span: - llm_span._set_ctx_item(SPAN_KIND, "llm") - llm_span._set_ctx_item(OUTPUT_MESSAGES, [{"content": "message", "role": "user"}]) - tp = LLMObsTraceProcessor(llmobs_span_writer=mock_llmobs_span_writer) - assert tp._llmobs_span_event(llm_span)[0]["meta"]["output"]["messages"] == [ - {"content": "message", "role": "user"} - ] - - -def test_output_value_is_set(): - """Test that output value is set on the span event if they are present on the span.""" - dummy_tracer = DummyTracer() - mock_llmobs_span_writer = mock.MagicMock() - with override_global_config(dict(_llmobs_ml_app="unnamed-ml-app")): - with dummy_tracer.trace("root_llm_span", span_type=SpanTypes.LLM) as llm_span: - llm_span._set_ctx_item(SPAN_KIND, "llm") - llm_span._set_ctx_item(OUTPUT_VALUE, "value") - tp = LLMObsTraceProcessor(llmobs_span_writer=mock_llmobs_span_writer) - assert tp._llmobs_span_event(llm_span)[0]["meta"]["output"]["value"] == "value" - - -def test_prompt_is_set(): - """Test that prompt is set on the span event if they are present on the span.""" - dummy_tracer = DummyTracer() - mock_llmobs_span_writer = mock.MagicMock() - with override_global_config(dict(_llmobs_ml_app="unnamed-ml-app")): - with dummy_tracer.trace("root_llm_span", span_type=SpanTypes.LLM) as llm_span: - llm_span._set_ctx_item(SPAN_KIND, "llm") - llm_span._set_ctx_item(INPUT_PROMPT, {"variables": {"var1": "var2"}}) - tp = LLMObsTraceProcessor(llmobs_span_writer=mock_llmobs_span_writer) - assert tp._llmobs_span_event(llm_span)[0]["meta"]["input"]["prompt"] == {"variables": {"var1": "var2"}} - - -def test_prompt_is_not_set_for_non_llm_spans(): - """Test that prompt is NOT set on the span event if the span is not an LLM span.""" - dummy_tracer = DummyTracer() - mock_llmobs_span_writer = mock.MagicMock() - with override_global_config(dict(_llmobs_ml_app="unnamed-ml-app")): - with dummy_tracer.trace("task_span", span_type=SpanTypes.LLM) as task_span: - task_span._set_ctx_item(SPAN_KIND, "task") - task_span._set_ctx_item(INPUT_VALUE, "ival") - task_span._set_ctx_item(INPUT_PROMPT, {"variables": {"var1": "var2"}}) - tp = LLMObsTraceProcessor(llmobs_span_writer=mock_llmobs_span_writer) - assert tp._llmobs_span_event(task_span)[0]["meta"]["input"].get("prompt") is None - - -def test_metadata_is_set(): - """Test that metadata is set on the span event if it is present on the span.""" - dummy_tracer = DummyTracer() - mock_llmobs_span_writer = mock.MagicMock() - with override_global_config(dict(_llmobs_ml_app="unnamed-ml-app")): - with dummy_tracer.trace("root_llm_span", span_type=SpanTypes.LLM) as llm_span: - llm_span._set_ctx_item(SPAN_KIND, "llm") - llm_span._set_ctx_item(METADATA, {"key": "value"}) - tp = LLMObsTraceProcessor(llmobs_span_writer=mock_llmobs_span_writer) - assert tp._llmobs_span_event(llm_span)[0]["meta"]["metadata"] == {"key": "value"} - - -def test_metrics_are_set(): - """Test that metadata is set on the span event if it is present on the span.""" - dummy_tracer = DummyTracer() - mock_llmobs_span_writer = mock.MagicMock() - with override_global_config(dict(_llmobs_ml_app="unnamed-ml-app")): - with dummy_tracer.trace("root_llm_span", span_type=SpanTypes.LLM) as llm_span: - llm_span._set_ctx_item(SPAN_KIND, "llm") - llm_span._set_ctx_item(METRICS, {"tokens": 100}) - tp = LLMObsTraceProcessor(llmobs_span_writer=mock_llmobs_span_writer) - assert tp._llmobs_span_event(llm_span)[0]["metrics"] == {"tokens": 100} - - -def test_langchain_span_name_is_set_to_class_name(): - """Test span names for langchain auto-instrumented spans is set correctly.""" - dummy_tracer = DummyTracer() - mock_llmobs_span_writer = mock.MagicMock() - with override_global_config(dict(_llmobs_ml_app="unnamed-ml-app")): - with dummy_tracer.trace(LANGCHAIN_APM_SPAN_NAME, resource="expected_name", span_type=SpanTypes.LLM) as llm_span: - llm_span._set_ctx_item(SPAN_KIND, "llm") - tp = LLMObsTraceProcessor(llmobs_span_writer=mock_llmobs_span_writer) - assert tp._llmobs_span_event(llm_span)[0]["name"] == "expected_name" - - -def test_error_is_set(): - """Test that error is set on the span event if it is present on the span.""" - dummy_tracer = DummyTracer() - mock_llmobs_span_writer = mock.MagicMock() - with override_global_config(dict(_llmobs_ml_app="unnamed-ml-app")): - with pytest.raises(ValueError): - with dummy_tracer.trace("root_llm_span", span_type=SpanTypes.LLM) as llm_span: - llm_span._set_ctx_item(SPAN_KIND, "llm") - raise ValueError("error") - tp = LLMObsTraceProcessor(llmobs_span_writer=mock_llmobs_span_writer) - span_event, _ = tp._llmobs_span_event(llm_span) - assert span_event["meta"]["error.message"] == "error" - assert "ValueError" in span_event["meta"]["error.type"] - assert 'raise ValueError("error")' in span_event["meta"]["error.stack"] From d855c4a28824c15fd3afdbbe89315808efafdf07 Mon Sep 17 00:00:00 2001 From: David Sanchez <838104+sanchda@users.noreply.github.com> Date: Thu, 19 Dec 2024 14:43:12 -0500 Subject: [PATCH 71/78] fix(profiling): reset all profiling c++ mutexes on fork (#11768) I'm not sure why it took so long to surface this defect, but it turns out that stack v2 can deadlock applications because not all mutices are reset. The repro in #11762 appears to be pretty durable. I need to investigate it a bit more in order to distill it down into a native stress test we can use moving forward. In practice, this patch suppresses the noted behavior in the repro. ## Checklist - [X] PR author has checked that all the criteria below are met - The PR description includes an overview of the change - The PR description articulates the motivation for the change - The change includes tests OR the PR description describes a testing strategy - The PR description notes risks associated with the change, if any - Newly-added code is easy to change - The change follows the [library release note guidelines](https://ddtrace.readthedocs.io/en/stable/releasenotes.html) - The change includes or references documentation updates if necessary - Backport labels are set (if [applicable](https://ddtrace.readthedocs.io/en/latest/contributing.html#backporting)) ## Reviewer Checklist - [x] Reviewer has checked that all the criteria below are met - Title is accurate - All changes are related to the pull request's stated goal - Avoids breaking [API](https://ddtrace.readthedocs.io/en/stable/versioning.html#interfaces) changes - Testing strategy adequately addresses listed risks - Newly-added code is easy to change - Release note makes sense to a user of the library - If necessary, author has acknowledged and discussed the performance implications of this PR as reported in the benchmarks PR comment - Backport labels are set in a manner that is consistent with the [release branch maintenance policy](https://ddtrace.readthedocs.io/en/latest/contributing.html#backporting) --------- Co-authored-by: Taegyun Kim --- .../profiling/dd_wrapper/include/uploader_builder.hpp | 2 ++ .../datadog/profiling/dd_wrapper/src/code_provenance.cpp | 3 +-- .../datadog/profiling/dd_wrapper/src/ddup_interface.cpp | 1 + .../internal/datadog/profiling/dd_wrapper/src/profile.cpp | 2 +- .../internal/datadog/profiling/dd_wrapper/src/sample.cpp | 1 - .../internal/datadog/profiling/dd_wrapper/src/uploader.cpp | 3 ++- .../datadog/profiling/dd_wrapper/src/uploader_builder.cpp | 7 +++++++ .../internal/datadog/profiling/stack_v2/src/sampler.cpp | 5 +++++ .../datadog/profiling/stack_v2/src/thread_span_links.cpp | 4 +--- .../fix-profiling-native-mutices-62440b5a3d9d6c4b.yaml | 5 +++++ 10 files changed, 25 insertions(+), 8 deletions(-) create mode 100644 releasenotes/notes/fix-profiling-native-mutices-62440b5a3d9d6c4b.yaml diff --git a/ddtrace/internal/datadog/profiling/dd_wrapper/include/uploader_builder.hpp b/ddtrace/internal/datadog/profiling/dd_wrapper/include/uploader_builder.hpp index 62ee6aad853..7077096c744 100644 --- a/ddtrace/internal/datadog/profiling/dd_wrapper/include/uploader_builder.hpp +++ b/ddtrace/internal/datadog/profiling/dd_wrapper/include/uploader_builder.hpp @@ -43,6 +43,8 @@ class UploaderBuilder static void set_output_filename(std::string_view _output_filename); static std::variant build(); + + static void postfork_child(); }; } // namespace Datadog diff --git a/ddtrace/internal/datadog/profiling/dd_wrapper/src/code_provenance.cpp b/ddtrace/internal/datadog/profiling/dd_wrapper/src/code_provenance.cpp index 0a4a49a4ce5..f3147cd2034 100644 --- a/ddtrace/internal/datadog/profiling/dd_wrapper/src/code_provenance.cpp +++ b/ddtrace/internal/datadog/profiling/dd_wrapper/src/code_provenance.cpp @@ -14,9 +14,8 @@ namespace Datadog { void Datadog::CodeProvenance::postfork_child() { - get_instance().mtx.~mutex(); // Destroy the mutex + // NB placement-new to re-init and leak the mutex because doing anything else is UB new (&get_instance().mtx) std::mutex(); // Recreate the mutex - get_instance().reset(); // Reset the state } void diff --git a/ddtrace/internal/datadog/profiling/dd_wrapper/src/ddup_interface.cpp b/ddtrace/internal/datadog/profiling/dd_wrapper/src/ddup_interface.cpp index 5d3ef356c2a..9b52cbcaf6d 100644 --- a/ddtrace/internal/datadog/profiling/dd_wrapper/src/ddup_interface.cpp +++ b/ddtrace/internal/datadog/profiling/dd_wrapper/src/ddup_interface.cpp @@ -24,6 +24,7 @@ ddup_postfork_child() Datadog::Uploader::postfork_child(); Datadog::SampleManager::postfork_child(); Datadog::CodeProvenance::postfork_child(); + Datadog::UploaderBuilder::postfork_child(); } void diff --git a/ddtrace/internal/datadog/profiling/dd_wrapper/src/profile.cpp b/ddtrace/internal/datadog/profiling/dd_wrapper/src/profile.cpp index 083ad1a655d..860f9c7cd3e 100644 --- a/ddtrace/internal/datadog/profiling/dd_wrapper/src/profile.cpp +++ b/ddtrace/internal/datadog/profiling/dd_wrapper/src/profile.cpp @@ -203,6 +203,6 @@ Datadog::Profile::collect(const ddog_prof_Sample& sample, int64_t endtime_ns) void Datadog::Profile::postfork_child() { - profile_mtx.unlock(); + new (&profile_mtx) std::mutex(); cycle_buffers(); } diff --git a/ddtrace/internal/datadog/profiling/dd_wrapper/src/sample.cpp b/ddtrace/internal/datadog/profiling/dd_wrapper/src/sample.cpp index 1e7ca1b0217..4483a021803 100644 --- a/ddtrace/internal/datadog/profiling/dd_wrapper/src/sample.cpp +++ b/ddtrace/internal/datadog/profiling/dd_wrapper/src/sample.cpp @@ -408,7 +408,6 @@ Datadog::Sample::push_absolute_ns(int64_t _timestamp_ns) return true; } - bool Datadog::Sample::push_monotonic_ns(int64_t _monotonic_ns) { diff --git a/ddtrace/internal/datadog/profiling/dd_wrapper/src/uploader.cpp b/ddtrace/internal/datadog/profiling/dd_wrapper/src/uploader.cpp index 375c2e09e9e..325771946d8 100644 --- a/ddtrace/internal/datadog/profiling/dd_wrapper/src/uploader.cpp +++ b/ddtrace/internal/datadog/profiling/dd_wrapper/src/uploader.cpp @@ -175,5 +175,6 @@ Datadog::Uploader::postfork_parent() void Datadog::Uploader::postfork_child() { - unlock(); + // NB placement-new to re-init and leak the mutex because doing anything else is UB + new (&upload_lock) std::mutex(); } diff --git a/ddtrace/internal/datadog/profiling/dd_wrapper/src/uploader_builder.cpp b/ddtrace/internal/datadog/profiling/dd_wrapper/src/uploader_builder.cpp index 0661b7f217f..8ff5d45e7c2 100644 --- a/ddtrace/internal/datadog/profiling/dd_wrapper/src/uploader_builder.cpp +++ b/ddtrace/internal/datadog/profiling/dd_wrapper/src/uploader_builder.cpp @@ -186,3 +186,10 @@ Datadog::UploaderBuilder::build() return Datadog::Uploader{ output_filename, ddog_exporter }; } + +void +Datadog::UploaderBuilder::postfork_child() +{ + // NB placement-new to re-init and leak the mutex because doing anything else is UB + new (&tag_mutex) std::mutex(); +} diff --git a/ddtrace/internal/datadog/profiling/stack_v2/src/sampler.cpp b/ddtrace/internal/datadog/profiling/stack_v2/src/sampler.cpp index c05ae45477e..7ad9ad692b2 100644 --- a/ddtrace/internal/datadog/profiling/stack_v2/src/sampler.cpp +++ b/ddtrace/internal/datadog/profiling/stack_v2/src/sampler.cpp @@ -67,6 +67,11 @@ _stack_v2_atfork_child() // so we don't even reveal this function to the user _set_pid(getpid()); ThreadSpanLinks::postfork_child(); + + // `thread_info_map_lock` and `task_link_map_lock` are global locks held in echion + // NB placement-new to re-init and leak the mutex because doing anything else is UB + new (&thread_info_map_lock) std::mutex; + new (&task_link_map_lock) std::mutex; } __attribute__((constructor)) void diff --git a/ddtrace/internal/datadog/profiling/stack_v2/src/thread_span_links.cpp b/ddtrace/internal/datadog/profiling/stack_v2/src/thread_span_links.cpp index c777ff8a510..6be43a04a42 100644 --- a/ddtrace/internal/datadog/profiling/stack_v2/src/thread_span_links.cpp +++ b/ddtrace/internal/datadog/profiling/stack_v2/src/thread_span_links.cpp @@ -53,10 +53,8 @@ ThreadSpanLinks::reset() void ThreadSpanLinks::postfork_child() { - // Explicitly destroy and reconstruct the mutex to avoid undefined behavior - get_instance().mtx.~mutex(); + // NB placement-new to re-init and leak the mutex because doing anything else is UB new (&get_instance().mtx) std::mutex(); - get_instance().reset(); } diff --git a/releasenotes/notes/fix-profiling-native-mutices-62440b5a3d9d6c4b.yaml b/releasenotes/notes/fix-profiling-native-mutices-62440b5a3d9d6c4b.yaml new file mode 100644 index 00000000000..40167a974c3 --- /dev/null +++ b/releasenotes/notes/fix-profiling-native-mutices-62440b5a3d9d6c4b.yaml @@ -0,0 +1,5 @@ +--- +fixes: + - | + profiling: Fixes a bug where profiling mutexes were not cleared on fork in the child process. This could + cause deadlocks in certain configurations. From 983c84f5f0981e57a4e9125d2d3ab2367d0df8b5 Mon Sep 17 00:00:00 2001 From: David Sanchez <838104+sanchda@users.noreply.github.com> Date: Thu, 19 Dec 2024 14:44:31 -0500 Subject: [PATCH 72/78] fix(profiler): update memalloc guard (#11460) Previously, the memory allocation profiler would use Python's builtin thread-local storage interfaces in order to set and get the state of a thread-local guard. I've updated a few things here. * I think get/set idioms are slightly problematic for this type of code, since it pushes the responsibility of maintaining clean internal state up to the parent. A consequence of this is that the propagation of the underlying state _by value_ opens the door for race conditions if execution changes between contexts (unlikely here, but I think minimizing indirection is still cleaner). Accordingly, I've updated this to use native thread-local storage * Based on @nsrip-dd's observation, I widened the guard over `free()` operations. I believe this is correct, and if it isn't then the detriment is performance, not correctness. * I got rid of the PY37 failovers We don't have any reproductions for the defects that prompted this change, but I've been running a patched library in an environment that _does_ reproduce the behavior, and I haven't seen any defects. 1. I don't believe this patch is harmful, and if our memory allocation tests pass then I believe it should be fine. 2. I have a reason to believe this fixes a critical defect, which can cause crashes. ## Checklist - [X] PR author has checked that all the criteria below are met - The PR description includes an overview of the change - The PR description articulates the motivation for the change - The change includes tests OR the PR description describes a testing strategy - The PR description notes risks associated with the change, if any - Newly-added code is easy to change - The change follows the [library release note guidelines](https://ddtrace.readthedocs.io/en/stable/releasenotes.html) - The change includes or references documentation updates if necessary - Backport labels are set (if [applicable](https://ddtrace.readthedocs.io/en/latest/contributing.html#backporting)) ## Reviewer Checklist - [x] Reviewer has checked that all the criteria below are met - Title is accurate - All changes are related to the pull request's stated goal - Avoids breaking [API](https://ddtrace.readthedocs.io/en/stable/versioning.html#interfaces) changes - Testing strategy adequately addresses listed risks - Newly-added code is easy to change - Release note makes sense to a user of the library - If necessary, author has acknowledged and discussed the performance implications of this PR as reported in the benchmarks PR comment - Backport labels are set in a manner that is consistent with the [release branch maintenance policy](https://ddtrace.readthedocs.io/en/latest/contributing.html#backporting) --- ddtrace/profiling/collector/_memalloc.c | 125 ++++++++---- ddtrace/profiling/collector/_memalloc_heap.c | 93 +++++++-- .../profiling/collector/_memalloc_reentrant.c | 3 + .../profiling/collector/_memalloc_reentrant.h | 186 +++++++++++++++--- ddtrace/profiling/collector/_memalloc_tb.c | 7 +- ddtrace/profiling/collector/_pymacro.h | 4 - ...ng-memalloc-segfault-5593ad951405a75d.yaml | 5 + setup.py | 5 +- 8 files changed, 340 insertions(+), 88 deletions(-) create mode 100644 ddtrace/profiling/collector/_memalloc_reentrant.c create mode 100644 releasenotes/notes/fix-profiling-memalloc-segfault-5593ad951405a75d.yaml diff --git a/ddtrace/profiling/collector/_memalloc.c b/ddtrace/profiling/collector/_memalloc.c index 3b7f7db293f..3876517baaf 100644 --- a/ddtrace/profiling/collector/_memalloc.c +++ b/ddtrace/profiling/collector/_memalloc.c @@ -42,47 +42,95 @@ static PyObject* object_string = NULL; #define ALLOC_TRACKER_MAX_COUNT UINT64_MAX +// The data coordination primitives in this and related files are related to a crash we started seeing. +// We don't have a precise understanding of the causal factors within the runtime that lead to this condition, +// since the GIL alone was sufficient in the past for preventing this issue. +// We add an option here to _add_ a crash, in order to observe this condition in a future diagnostic iteration. +// **This option is _intended_ to crash the Python process** do not use without a good reason! +static char g_crash_on_mutex_pass_str[] = "_DD_PROFILING_MEMALLOC_CRASH_ON_MUTEX_PASS"; +static const char* g_truthy_values[] = { "1", "true", "yes", "on", "enable", "enabled", NULL }; // NB the sentinel NULL +static memlock_t g_memalloc_lock; + static alloc_tracker_t* global_alloc_tracker; +// This is a multiplatform way to define an operation to happen at static initialization time +static void +memalloc_init(void); + +#ifdef _MSC_VER +#pragma section(".CRT$XCU", read) +__declspec(allocate(".CRT$XCU")) void (*memalloc_init_func)(void) = memalloc_init; + +#elif defined(__GNUC__) || defined(__clang__) +__attribute__((constructor)) +#else +#error Unsupported compiler +#endif +static void +memalloc_init() +{ + // Check if we should crash the process on mutex pass + char* crash_on_mutex_pass_str = getenv(g_crash_on_mutex_pass_str); + bool crash_on_mutex_pass = false; + if (crash_on_mutex_pass_str) { + for (int i = 0; g_truthy_values[i]; i++) { + if (strcmp(crash_on_mutex_pass_str, g_truthy_values[i]) == 0) { + crash_on_mutex_pass = true; + break; + } + } + } + memlock_init(&g_memalloc_lock, crash_on_mutex_pass); +} + static void memalloc_add_event(memalloc_context_t* ctx, void* ptr, size_t size) { - /* Do not overflow; just ignore the new events if we ever reach that point */ - if (global_alloc_tracker->alloc_count >= ALLOC_TRACKER_MAX_COUNT) + uint64_t alloc_count = atomic_add_clamped(&global_alloc_tracker->alloc_count, 1, ALLOC_TRACKER_MAX_COUNT); + + /* Return if we've reached the maximum number of allocations */ + if (alloc_count == 0) return; - global_alloc_tracker->alloc_count++; + // Return if we can't take the guard + if (!memalloc_take_guard()) { + return; + } - /* Avoid loops */ - if (memalloc_get_reentrant()) + // In this implementation, the `global_alloc_tracker` isn't intrinsically protected. Before we read or modify, + // take the lock. The count of allocations is already forward-attributed elsewhere, so if we can't take the lock + // there's nothing to do. + if (!memlock_trylock(&g_memalloc_lock)) { return; + } /* Determine if we can capture or if we need to sample */ if (global_alloc_tracker->allocs.count < ctx->max_events) { - /* set a barrier so we don't loop as getting a traceback allocates memory */ - memalloc_set_reentrant(true); /* Buffer is not full, fill it */ traceback_t* tb = memalloc_get_traceback(ctx->max_nframe, ptr, size, ctx->domain); - memalloc_set_reentrant(false); - if (tb) + if (tb) { traceback_array_append(&global_alloc_tracker->allocs, tb); + } } else { /* Sampling mode using a reservoir sampling algorithm: replace a random * traceback with this one */ - uint64_t r = random_range(global_alloc_tracker->alloc_count); + uint64_t r = random_range(alloc_count); - if (r < ctx->max_events) { - /* set a barrier so we don't loop as getting a traceback allocates memory */ - memalloc_set_reentrant(true); + // In addition to event size, need to check that the tab is in a good state + if (r < ctx->max_events && global_alloc_tracker->allocs.tab != NULL) { /* Replace a random traceback with this one */ traceback_t* tb = memalloc_get_traceback(ctx->max_nframe, ptr, size, ctx->domain); - memalloc_set_reentrant(false); + + // Need to check not only that the tb returned if (tb) { traceback_free(global_alloc_tracker->allocs.tab[r]); global_alloc_tracker->allocs.tab[r] = tb; } } } + + memlock_unlock(&g_memalloc_lock); + memalloc_yield_guard(); } static void @@ -98,12 +146,6 @@ memalloc_free(void* ctx, void* ptr) alloc->free(alloc->ctx, ptr); } -#ifdef _PY37_AND_LATER -Py_tss_t memalloc_reentrant_key = Py_tss_NEEDS_INIT; -#else -int memalloc_reentrant_key = -1; -#endif - static void* memalloc_alloc(int use_calloc, void* ctx, size_t nelem, size_t elsize) { @@ -233,7 +275,10 @@ memalloc_start(PyObject* Py_UNUSED(module), PyObject* args) global_memalloc_ctx.domain = PYMEM_DOMAIN_OBJ; - global_alloc_tracker = alloc_tracker_new(); + if (memlock_trylock(&g_memalloc_lock)) { + global_alloc_tracker = alloc_tracker_new(); + memlock_unlock(&g_memalloc_lock); + } PyMem_GetAllocator(PYMEM_DOMAIN_OBJ, &global_memalloc_ctx.pymem_allocator_obj); PyMem_SetAllocator(PYMEM_DOMAIN_OBJ, &alloc); @@ -258,8 +303,11 @@ memalloc_stop(PyObject* Py_UNUSED(module), PyObject* Py_UNUSED(args)) PyMem_SetAllocator(PYMEM_DOMAIN_OBJ, &global_memalloc_ctx.pymem_allocator_obj); memalloc_tb_deinit(); - alloc_tracker_free(global_alloc_tracker); - global_alloc_tracker = NULL; + if (memlock_trylock(&g_memalloc_lock)) { + alloc_tracker_free(global_alloc_tracker); + global_alloc_tracker = NULL; + memlock_unlock(&g_memalloc_lock); + } memalloc_heap_tracker_deinit(); @@ -310,9 +358,15 @@ iterevents_new(PyTypeObject* type, PyObject* Py_UNUSED(args), PyObject* Py_UNUSE if (!iestate) return NULL; - iestate->alloc_tracker = global_alloc_tracker; /* reset the current traceback list */ - global_alloc_tracker = alloc_tracker_new(); + if (memlock_trylock(&g_memalloc_lock)) { + iestate->alloc_tracker = global_alloc_tracker; + global_alloc_tracker = alloc_tracker_new(); + memlock_unlock(&g_memalloc_lock); + } else { + Py_TYPE(iestate)->tp_free(iestate); + return NULL; + } iestate->seq_index = 0; PyObject* iter_and_count = PyTuple_New(3); @@ -326,8 +380,11 @@ iterevents_new(PyTypeObject* type, PyObject* Py_UNUSED(args), PyObject* Py_UNUSE static void iterevents_dealloc(IterEventsState* iestate) { - alloc_tracker_free(iestate->alloc_tracker); - Py_TYPE(iestate)->tp_free(iestate); + if (memlock_trylock(&g_memalloc_lock)) { + alloc_tracker_free(iestate->alloc_tracker); + Py_TYPE(iestate)->tp_free(iestate); + memlock_unlock(&g_memalloc_lock); + } } static PyObject* @@ -442,20 +499,6 @@ PyInit__memalloc(void) return NULL; } -#ifdef _PY37_AND_LATER - if (PyThread_tss_create(&memalloc_reentrant_key) != 0) { -#else - memalloc_reentrant_key = PyThread_create_key(); - if (memalloc_reentrant_key == -1) { -#endif -#ifdef MS_WINDOWS - PyErr_SetFromWindowsErr(0); -#else - PyErr_SetFromErrno(PyExc_OSError); -#endif - return NULL; - } - if (PyType_Ready(&MemallocIterEvents_Type) < 0) return NULL; Py_INCREF((PyObject*)&MemallocIterEvents_Type); diff --git a/ddtrace/profiling/collector/_memalloc_heap.c b/ddtrace/profiling/collector/_memalloc_heap.c index d6531d7b095..d2a5cc29eee 100644 --- a/ddtrace/profiling/collector/_memalloc_heap.c +++ b/ddtrace/profiling/collector/_memalloc_heap.c @@ -9,13 +9,13 @@ typedef struct { /* Granularity of the heap profiler in bytes */ - uint32_t sample_size; + uint64_t sample_size; /* Current sample size of the heap profiler in bytes */ - uint32_t current_sample_size; + uint64_t current_sample_size; /* Tracked allocations */ traceback_array_t allocs; /* Allocated memory counter in bytes */ - uint32_t allocated_memory; + uint64_t allocated_memory; /* True if the heap tracker is frozen */ bool frozen; /* Contains the ongoing heap allocation/deallocation while frozen */ @@ -26,8 +26,42 @@ typedef struct } freezer; } heap_tracker_t; +static char g_crash_on_mutex_pass_str[] = "_DD_PROFILING_MEMHEAP_CRASH_ON_MUTEX_PASS"; +static const char* g_truthy_values[] = { "1", "true", "yes", "on", "enable", "enabled", NULL }; // NB the sentinel NULL +static memlock_t g_memheap_lock; + static heap_tracker_t global_heap_tracker; +// This is a multiplatform way to define an operation to happen at static initialization time +static void +memheap_init(void); + +#ifdef _MSC_VER +#pragma section(".CRT$XCU", read) +__declspec(allocate(".CRT$XCU")) void (*memheap_init_func)(void) = memheap_init; + +#elif defined(__GNUC__) || defined(__clang__) +__attribute__((constructor)) +#else +#error Unsupported compiler +#endif +static void +memheap_init() +{ + // Check if we should crash the process on mutex pass + char* crash_on_mutex_pass_str = getenv(g_crash_on_mutex_pass_str); + bool crash_on_mutex_pass = false; + if (crash_on_mutex_pass_str) { + for (int i = 0; g_truthy_values[i]; i++) { + if (strcmp(crash_on_mutex_pass_str, g_truthy_values[i]) == 0) { + crash_on_mutex_pass = true; + break; + } + } + } + memlock_init(&g_memheap_lock, crash_on_mutex_pass); +} + static uint32_t heap_tracker_next_sample_size(uint32_t sample_size) { @@ -119,20 +153,30 @@ heap_tracker_thaw(heap_tracker_t* heap_tracker) void memalloc_heap_tracker_init(uint32_t sample_size) { - heap_tracker_init(&global_heap_tracker); - global_heap_tracker.sample_size = sample_size; - global_heap_tracker.current_sample_size = heap_tracker_next_sample_size(sample_size); + + if (memlock_trylock(&g_memheap_lock)) { + heap_tracker_init(&global_heap_tracker); + global_heap_tracker.sample_size = sample_size; + global_heap_tracker.current_sample_size = heap_tracker_next_sample_size(sample_size); + memlock_unlock(&g_memheap_lock); + } } void memalloc_heap_tracker_deinit(void) { - heap_tracker_wipe(&global_heap_tracker); + if (memlock_trylock(&g_memheap_lock)) { + heap_tracker_wipe(&global_heap_tracker); + memlock_unlock(&g_memheap_lock); + } } void memalloc_heap_untrack(void* ptr) { + if (!memlock_trylock(&g_memheap_lock)) { + return; + } if (global_heap_tracker.frozen) { /* Check that we still have space to store the free. If we don't have enough space, we ignore the untrack. That's sad as there is a change @@ -144,6 +188,8 @@ memalloc_heap_untrack(void* ptr) ptr_array_append(&global_heap_tracker.freezer.frees, ptr); } else heap_tracker_untrack_thawed(&global_heap_tracker, ptr); + + memlock_unlock(&g_memheap_lock); } /* Track a memory allocation in the heap profiler. @@ -157,26 +203,36 @@ memalloc_heap_track(uint16_t max_nframe, void* ptr, size_t size, PyMemAllocatorD return false; /* Check for overflow */ - global_heap_tracker.allocated_memory = Py_MIN(global_heap_tracker.allocated_memory + size, MAX_HEAP_SAMPLE_SIZE); + uint64_t res = atomic_add_clamped(&global_heap_tracker.allocated_memory, size, MAX_HEAP_SAMPLE_SIZE); + if (0 == res) + return false; + + // Take the lock + if (!memlock_trylock(&g_memheap_lock)) { + return false; + } /* Check if we have enough sample or not */ - if (global_heap_tracker.allocated_memory < global_heap_tracker.current_sample_size) + if (global_heap_tracker.allocated_memory < global_heap_tracker.current_sample_size) { + memlock_unlock(&g_memheap_lock); return false; + } /* Check if we can add more samples: the sum of the freezer + alloc tracker cannot be greater than what the alloc tracker can handle: when the alloc tracker is thawed, all the allocs in the freezer will be moved there!*/ - if ((global_heap_tracker.freezer.allocs.count + global_heap_tracker.allocs.count) >= TRACEBACK_ARRAY_MAX_COUNT) + if (global_heap_tracker.freezer.allocs.count + global_heap_tracker.allocs.count >= TRACEBACK_ARRAY_MAX_COUNT) { + memlock_unlock(&g_memheap_lock); return false; + } /* Avoid loops */ - if (memalloc_get_reentrant()) + if (!memalloc_take_guard()) { + memlock_unlock(&g_memheap_lock); return false; + } - memalloc_set_reentrant(true); traceback_t* tb = memalloc_get_traceback(max_nframe, ptr, global_heap_tracker.allocated_memory, domain); - memalloc_set_reentrant(false); - if (tb) { if (global_heap_tracker.frozen) traceback_array_append(&global_heap_tracker.freezer.allocs, tb); @@ -189,15 +245,23 @@ memalloc_heap_track(uint16_t max_nframe, void* ptr, size_t size, PyMemAllocatorD /* Compute the new target sample size */ global_heap_tracker.current_sample_size = heap_tracker_next_sample_size(global_heap_tracker.sample_size); + memalloc_yield_guard(); + memlock_unlock(&g_memheap_lock); return true; } + memalloc_yield_guard(); + memlock_unlock(&g_memheap_lock); return false; } PyObject* memalloc_heap() { + if (!memlock_trylock(&g_memheap_lock)) { + return NULL; + } + heap_tracker_freeze(&global_heap_tracker); PyObject* heap_list = PyList_New(global_heap_tracker.allocs.count); @@ -213,5 +277,6 @@ memalloc_heap() heap_tracker_thaw(&global_heap_tracker); + memlock_unlock(&g_memheap_lock); return heap_list; } diff --git a/ddtrace/profiling/collector/_memalloc_reentrant.c b/ddtrace/profiling/collector/_memalloc_reentrant.c new file mode 100644 index 00000000000..d360d19fb30 --- /dev/null +++ b/ddtrace/profiling/collector/_memalloc_reentrant.c @@ -0,0 +1,3 @@ +#include "_memalloc_reentrant.h" + +bool _MEMALLOC_ON_THREAD = false; diff --git a/ddtrace/profiling/collector/_memalloc_reentrant.h b/ddtrace/profiling/collector/_memalloc_reentrant.h index 5c8a552294e..cb4aa246961 100644 --- a/ddtrace/profiling/collector/_memalloc_reentrant.h +++ b/ddtrace/profiling/collector/_memalloc_reentrant.h @@ -1,50 +1,188 @@ #ifndef _DDTRACE_MEMALLOC_REENTRANT_H #define _DDTRACE_MEMALLOC_REENTRANT_H -#include "_pymacro.h" +#ifdef _WIN32 +#include +#else +#define _POSIX_C_SOURCE 200809L +#include +#include +#include +#include +#include +#endif #include +#include +#include -#ifndef _PY37_AND_LATER -#include +// Cross-platform macro for defining thread-local storage +// NB - we use dynamic-global on Linux because the others are problematic +#if defined(_MSC_VER) // Check for MSVC compiler +#define MEMALLOC_TLS __declspec(thread) +#elif defined(__GNUC__) || defined(__clang__) // GCC or Clang +#define MEMALLOC_TLS __attribute__((tls_model("global-dynamic"))) __thread +#else +#error "Unsupported compiler for thread-local storage" #endif +extern bool _MEMALLOC_ON_THREAD; + +// This is a saturating atomic add for 32- and 64-bit platforms. +// In order to implement the saturation logic, use a CAS loop. +// From the GCC docs: +// "‘__atomic’ builtins can be used with any integral scalar or pointer type that is 1, 2, 4, or 8 bytes in length" +// From the MSVC docs: +// "_InterlockedCompareExchange64 is available on x86 systems running on any Pentium architecture; it is not +// available on 386 or 486 architectures." +static inline uint64_t +atomic_add_clamped(uint64_t* target, uint64_t amount, uint64_t max) +{ + // In reality, there's virtually no scenario in which this deadlocks. Just the same, give it some arbitrarily high + // limit in order to prevent unpredicted deadlocks. 96 is chosen since it's the number of cores on the largest + // consumer CPU generally used by our customers. + int attempts = 96; + while (attempts--) { + uint64_t old_val = (volatile uint64_t) * target; -#ifdef _PY37_AND_LATER -extern Py_tss_t memalloc_reentrant_key; + // CAS loop + saturation check + uint64_t new_val = old_val + amount; + if (new_val > max || new_val < old_val) { + return 0; + } +#if defined(_MSC_VER) + uint64_t prev_val = + (uint64_t)InterlockedCompareExchange64((volatile LONG64*)target, (LONG64)new_val, (LONG64)old_val); + if (prev_val == old_val) { + return new_val; + } +#elif defined(__clang__) || defined(__GNUC__) + if (atomic_compare_exchange_strong_explicit( + (_Atomic uint64_t*)target, &old_val, new_val, memory_order_seq_cst, memory_order_seq_cst)) { + return new_val; + } #else -extern int memalloc_reentrant_key; +#error "Unsupported compiler for atomic operations" #endif + // If we reach here, CAS failed; another thread changed `target` + // Retry until success or until we detect max. + } -/* Any non-NULL pointer can be used */ -#define _MEMALLOC_REENTRANT_VALUE Py_True + return 0; +} -static inline void -memalloc_set_reentrant(bool reentrant) +// Opaque lock type +typedef struct +{ +#ifdef _WIN32 + HANDLE mutex; +#else + pthread_mutex_t mutex; +#endif +} memlock_t; + +// Global setting; if a lock fails to be acquired, crash +static bool g_crash_on_mutex_pass = false; + +// Generic initializer +static inline bool +memlock_init(memlock_t* lock, bool crash_on_pass) +{ + if (!lock) + return false; + + g_crash_on_mutex_pass = crash_on_pass; + +#ifdef _WIN32 + lock->mutex = CreateMutex(NULL, FALSE, NULL); + return lock->mutex != NULL; +#else + // For POSIX systems, we make sure to use an ERRORCHECK type mutex, since it pushes some of the state checking + // down to the implementation. + pthread_mutexattr_t attr; + pthread_mutexattr_init(&attr); + pthread_mutexattr_settype(&attr, PTHREAD_MUTEX_ERRORCHECK); + return pthread_mutex_init(&lock->mutex, NULL) == 0; +#endif +} + +// Unlock function +static inline bool +memlock_unlock(memlock_t* lock) { - if (reentrant) -#ifdef _PY37_AND_LATER - PyThread_tss_set(&memalloc_reentrant_key, _MEMALLOC_REENTRANT_VALUE); + if (!lock) + return false; + +#ifdef _WIN32 + return ReleaseMutex(lock->mutex); #else - PyThread_set_key_value(memalloc_reentrant_key, _MEMALLOC_REENTRANT_VALUE); + return pthread_mutex_unlock(&lock->mutex) == 0; +#endif +} + +// trylock function +static inline bool +memlock_trylock(memlock_t* lock) +{ + if (!lock) + return false; + +#ifdef __linux__ + // On Linux, we need to make sure we didn't just fork + // pthreads will guarantee the lock is consistent, but we at least need to clear it + static pid_t my_pid = 0; + if (my_pid == 0) { + my_pid = getpid(); + } else if (my_pid != getpid()) { + // We've forked, so we need to free the lock + memlock_unlock(lock); + my_pid = getpid(); + } #endif - else -#ifdef _PY37_AND_LATER - PyThread_tss_set(&memalloc_reentrant_key, NULL); + +#ifdef _WIN32 + bool result = WAIT_OBJECT_0 == WaitForSingleObject(lock->mutex, 0); // 0ms timeout -> no wait #else - PyThread_set_key_value(memalloc_reentrant_key, NULL); + bool result = 0 == pthread_mutex_trylock(&lock->mutex); #endif + if (!result && g_crash_on_mutex_pass) { + // segfault + int* p = NULL; + *p = 0; + abort(); // should never reach here + } + + return result; } +// Cleanup function static inline bool -memalloc_get_reentrant(void) +memlock_destroy(memlock_t* lock) { -#ifdef _PY37_AND_LATER - if (PyThread_tss_get(&memalloc_reentrant_key)) + if (!lock) + return false; + +#ifdef _WIN32 + return CloseHandle(lock->mutex); #else - if (PyThread_get_key_value(memalloc_reentrant_key)) + return 0 == pthread_mutex_destroy(&lock->mutex); #endif - return true; +} - return false; +static inline bool +memalloc_take_guard() +{ + // Ordinarilly, a process-wide semaphore would require a CAS, but since this is thread-local we can just set it. + if (_MEMALLOC_ON_THREAD) + return false; + _MEMALLOC_ON_THREAD = true; + return true; +} + +static inline void +memalloc_yield_guard(void) +{ + // Ideally, we'd actually capture the old state within an object and restore it, but since this is + // a coarse-grained lock, we just set it to false. + _MEMALLOC_ON_THREAD = false; } #endif diff --git a/ddtrace/profiling/collector/_memalloc_tb.c b/ddtrace/profiling/collector/_memalloc_tb.c index ba79021f719..bb265fe08d5 100644 --- a/ddtrace/profiling/collector/_memalloc_tb.c +++ b/ddtrace/profiling/collector/_memalloc_tb.c @@ -87,6 +87,9 @@ memalloc_tb_deinit(void) void traceback_free(traceback_t* tb) { + if (!tb) + return; + for (uint16_t nframe = 0; nframe < tb->nframe; nframe++) { Py_DECREF(tb->frames[nframe].filename); Py_DECREF(tb->frames[nframe].name); @@ -197,11 +200,7 @@ memalloc_get_traceback(uint16_t max_nframe, void* ptr, size_t size, PyMemAllocat traceback->size = size; traceback->ptr = ptr; -#ifdef _PY37_AND_LATER traceback->thread_id = PyThread_get_thread_ident(); -#else - traceback->thread_id = tstate->thread_id; -#endif traceback->domain = domain; diff --git a/ddtrace/profiling/collector/_pymacro.h b/ddtrace/profiling/collector/_pymacro.h index e71ed6888b9..aa31c3d4cc1 100644 --- a/ddtrace/profiling/collector/_pymacro.h +++ b/ddtrace/profiling/collector/_pymacro.h @@ -13,8 +13,4 @@ #define _PY38 #endif -#if PY_VERSION_HEX >= 0x03070000 -#define _PY37_AND_LATER -#endif - #endif diff --git a/releasenotes/notes/fix-profiling-memalloc-segfault-5593ad951405a75d.yaml b/releasenotes/notes/fix-profiling-memalloc-segfault-5593ad951405a75d.yaml new file mode 100644 index 00000000000..8632b62af50 --- /dev/null +++ b/releasenotes/notes/fix-profiling-memalloc-segfault-5593ad951405a75d.yaml @@ -0,0 +1,5 @@ +--- +fixes: + - | + Fixes an issue where the memory allocation profiler can cause a segmentation fault due to + data races when accessing its own global data structures from multiple threads. diff --git a/setup.py b/setup.py index 74e8f8187d7..dfaa5f6bf97 100644 --- a/setup.py +++ b/setup.py @@ -510,8 +510,11 @@ def get_exts_for(name): "ddtrace/profiling/collector/_memalloc.c", "ddtrace/profiling/collector/_memalloc_tb.c", "ddtrace/profiling/collector/_memalloc_heap.c", + "ddtrace/profiling/collector/_memalloc_reentrant.c", ], - extra_compile_args=debug_compile_args, + extra_compile_args=debug_compile_args + ["-D_POSIX_C_SOURCE=200809L", "-std=c11"] + if CURRENT_OS != "Windows" + else ["/std:c11"], ), Extension( "ddtrace.internal._threads", From e8aab659df2df0769586856bdf9f3eaefcfbbb5b Mon Sep 17 00:00:00 2001 From: wantsui Date: Thu, 19 Dec 2024 15:06:32 -0500 Subject: [PATCH 73/78] fix(celery): stop closing prerun_span too soon to account for Celery chains scenario (#11498) We've made a few changes to handle celery context recently, including: https://github.com/DataDog/dd-trace-py/pull/10676 In particular the goal of https://github.com/DataDog/dd-trace-py/pull/10676 was to handle a scenario where a long running task may run into an exception, preventing it from closing. Unfortunately, this scenario did not account for cases where tasks are chained and may not close until later. See: https://github.com/DataDog/dd-trace-py/issues/11479 and https://github.com/DataDog/dd-trace-py/issues/11624 With this PR, the sample app in https://github.com/DataDog/dd-trace-py/issues/11479 would attach the celery specific span back to the root span. I also need to add tests for the chains scenario. Related to AIDM-494 ## Checklist - [x] PR author has checked that all the criteria below are met - The PR description includes an overview of the change - The PR description articulates the motivation for the change - The change includes tests OR the PR description describes a testing strategy - The PR description notes risks associated with the change, if any - Newly-added code is easy to change - The change follows the [library release note guidelines](https://ddtrace.readthedocs.io/en/stable/releasenotes.html) - The change includes or references documentation updates if necessary - Backport labels are set (if [applicable](https://ddtrace.readthedocs.io/en/latest/contributing.html#backporting)) ## Reviewer Checklist - [x] Reviewer has checked that all the criteria below are met - Title is accurate - All changes are related to the pull request's stated goal - Avoids breaking [API](https://ddtrace.readthedocs.io/en/stable/versioning.html#interfaces) changes - Testing strategy adequately addresses listed risks - Newly-added code is easy to change - Release note makes sense to a user of the library - If necessary, author has acknowledged and discussed the performance implications of this PR as reported in the benchmarks PR comment - Backport labels are set in a manner that is consistent with the [release branch maintenance policy](https://ddtrace.readthedocs.io/en/latest/contributing.html#backporting) --- ddtrace/contrib/internal/celery/app.py | 11 ---- ddtrace/contrib/internal/celery/signals.py | 3 - ...-celery-closed-spans-34ff43868c1e33b8.yaml | 4 ++ tests/contrib/celery/run_tasks.py | 5 ++ tests/contrib/celery/tasks.py | 14 +++++ tests/contrib/celery/test_chained_task.py | 62 +++++++++++++++++++ 6 files changed, 85 insertions(+), 14 deletions(-) create mode 100644 releasenotes/notes/fix-celery-closed-spans-34ff43868c1e33b8.yaml create mode 100644 tests/contrib/celery/run_tasks.py create mode 100644 tests/contrib/celery/tasks.py create mode 100644 tests/contrib/celery/test_chained_task.py diff --git a/ddtrace/contrib/internal/celery/app.py b/ddtrace/contrib/internal/celery/app.py index b61585097a7..42eed2cb468 100644 --- a/ddtrace/contrib/internal/celery/app.py +++ b/ddtrace/contrib/internal/celery/app.py @@ -133,10 +133,6 @@ def _traced_apply_async_inner(func, instance, args, kwargs): if task_span: task_span.set_exc_info(*sys.exc_info()) - prerun_span = core.get_item("prerun_span") - if prerun_span: - prerun_span.set_exc_info(*sys.exc_info()) - raise finally: task_span = core.get_item("task_span") @@ -147,11 +143,4 @@ def _traced_apply_async_inner(func, instance, args, kwargs): ) task_span.finish() - prerun_span = core.get_item("prerun_span") - if prerun_span: - log.debug( - "The task_postrun signal was not called, so manually closing span: %s", prerun_span._pprint() - ) - prerun_span.finish() - return _traced_apply_async_inner diff --git a/ddtrace/contrib/internal/celery/signals.py b/ddtrace/contrib/internal/celery/signals.py index 76f07ee7524..8f27fcc53b0 100644 --- a/ddtrace/contrib/internal/celery/signals.py +++ b/ddtrace/contrib/internal/celery/signals.py @@ -54,9 +54,6 @@ def trace_prerun(*args, **kwargs): service = config.celery["worker_service_name"] span = pin.tracer.trace(c.WORKER_ROOT_SPAN, service=service, resource=task.name, span_type=SpanTypes.WORKER) - # Store an item called "prerun span" in case task_postrun doesn't get called - core.set_item("prerun_span", span) - # set span.kind to the type of request being performed span.set_tag_str(SPAN_KIND, SpanKind.CONSUMER) diff --git a/releasenotes/notes/fix-celery-closed-spans-34ff43868c1e33b8.yaml b/releasenotes/notes/fix-celery-closed-spans-34ff43868c1e33b8.yaml new file mode 100644 index 00000000000..f16f7b36fed --- /dev/null +++ b/releasenotes/notes/fix-celery-closed-spans-34ff43868c1e33b8.yaml @@ -0,0 +1,4 @@ +--- +fixes: + - | + tracing(celery): Fixes an issue where ``celery.apply`` spans from Celery prerun got closed too soon leading to span tags being missing. \ No newline at end of file diff --git a/tests/contrib/celery/run_tasks.py b/tests/contrib/celery/run_tasks.py new file mode 100644 index 00000000000..e91454ab5bb --- /dev/null +++ b/tests/contrib/celery/run_tasks.py @@ -0,0 +1,5 @@ +from tasks import fn_a +from tasks import fn_b + + +(fn_a.si() | fn_b.si()).delay() diff --git a/tests/contrib/celery/tasks.py b/tests/contrib/celery/tasks.py new file mode 100644 index 00000000000..a9dfc936ae4 --- /dev/null +++ b/tests/contrib/celery/tasks.py @@ -0,0 +1,14 @@ +from celery import Celery + + +app = Celery("tasks") + + +@app.task(name="tests.contrib.celery.tasks.fn_a") +def fn_a(): + return "a" + + +@app.task(name="tests.contrib.celery.tasks.fn_b") +def fn_b(): + return "b" diff --git a/tests/contrib/celery/test_chained_task.py b/tests/contrib/celery/test_chained_task.py new file mode 100644 index 00000000000..5fd0c543e72 --- /dev/null +++ b/tests/contrib/celery/test_chained_task.py @@ -0,0 +1,62 @@ +import os +import re +import subprocess +import time + +from celery import Celery + + +# Ensure that when we call Celery chains, the root span has celery specific span tags +# The test_integration.py setup doesn't perfectly mimic the condition of a worker process running. +# This test runs the worker as a side so we can check the tracer logs afterwards to ensure expected span results. +# See https://github.com/DataDog/dd-trace-py/issues/11479 +def test_task_chain_task_call_task(): + app = Celery("tasks") + + celery_worker_cmd = "ddtrace-run celery -A tasks worker -c 1 -l DEBUG -n uniquename1 -P solo" + celery_task_runner_cmd = "ddtrace-run python run_tasks.py" + + # The commands need to run from the directory where this test file lives + current_directory = str(os.path.dirname(__file__)) + + worker_process = subprocess.Popen( + celery_worker_cmd.split(), + stdout=subprocess.PIPE, + stderr=subprocess.PIPE, + preexec_fn=os.setsid, + close_fds=True, + cwd=current_directory, + ) + + max_wait_time = 10 + waited_so_far = 0 + # {app.control.inspect().active() returns {'celery@uniquename1': []} when the worker is running} + while app.control.inspect().active() is None and waited_so_far < max_wait_time: + time.sleep(1) + waited_so_far += 1 + + # The task should only run after the Celery worker has sufficient time to start up + task_runner_process = subprocess.Popen( + celery_task_runner_cmd.split(), + stdout=subprocess.PIPE, + stderr=subprocess.PIPE, + preexec_fn=os.setsid, + close_fds=True, + cwd=current_directory, + ) + + task_runner_process.wait() + # Kill the process so it starts to send traces to the Trace Agent + worker_process.kill() + worker_logs = worker_process.stderr.read() + + # Check that the root span was created with one of the Celery specific tags, such as celery.correlation_id + # Some versions of python seem to require escaping when using `re.search`: + old_pattern_match = r"resource=\\'tests.contrib.celery.tasks.fn_a\\' type=\\'worker\\' .* tags=.*correlation_id.*" + new_pattern_match = r"resource=\'tests.contrib.celery.tasks.fn_a\' type=\'worker\' .* tags=.*correlation_id.*" + + pattern_exists = ( + re.search(old_pattern_match, str(worker_logs)) is not None + or re.search(new_pattern_match, str(worker_logs)) is not None + ) + assert pattern_exists is not None From 3b4bd62c81651baf2c8c3af398295982a9a0ecf4 Mon Sep 17 00:00:00 2001 From: Quinna Halim Date: Thu, 19 Dec 2024 15:21:18 -0500 Subject: [PATCH 74/78] chore: output supported versions of integrations (#11372) - Creates a `Generate Supported Integration Versions` workflow that outputs the supported versions of integrations to a `supported_versions_output.json` and `supported_versions_table.csv`. PR here: https://github.com/DataDog/dd-trace-py/pull/11767 and workflow here: https://github.com/DataDog/dd-trace-py/actions/runs/12383562860/job/34566489841 - in `scripts/freshvenvs.py`, separates the workflows for outputting the outdated integrations (which is run in the `Generate Package Versions` workflow), and for creating the supported version table. - This workflow will be tied to a release, but can also be triggered manually (via `workflow_dispatch`) Future: - There will be a mechanism for converting the `csv` file to the `rst` format used by the ddtrace docs, and for generating the public datadoghq docs (in markdown) ## Checklist - [x] PR author has checked that all the criteria below are met - The PR description includes an overview of the change - The PR description articulates the motivation for the change - The change includes tests OR the PR description describes a testing strategy - The PR description notes risks associated with the change, if any - Newly-added code is easy to change - The change follows the [library release note guidelines](https://ddtrace.readthedocs.io/en/stable/releasenotes.html) - The change includes or references documentation updates if necessary - Backport labels are set (if [applicable](https://ddtrace.readthedocs.io/en/latest/contributing.html#backporting)) ## Reviewer Checklist - [x] Reviewer has checked that all the criteria below are met - Title is accurate - All changes are related to the pull request's stated goal - Avoids breaking [API](https://ddtrace.readthedocs.io/en/stable/versioning.html#interfaces) changes - Testing strategy adequately addresses listed risks - Newly-added code is easy to change - Release note makes sense to a user of the library - If necessary, author has acknowledged and discussed the performance implications of this PR as reported in the benchmarks PR comment - Backport labels are set in a manner that is consistent with the [release branch maintenance policy](https://ddtrace.readthedocs.io/en/latest/contributing.html#backporting) --- .../workflows/generate-supported-versions.yml | 121 +++++++++++++++++ scripts/freshvenvs.py | 128 ++++++++++++++++-- scripts/generate_table.py | 24 ++++ scripts/regenerate-riot-latest.sh | 5 +- 4 files changed, 264 insertions(+), 14 deletions(-) create mode 100644 .github/workflows/generate-supported-versions.yml create mode 100644 scripts/generate_table.py diff --git a/.github/workflows/generate-supported-versions.yml b/.github/workflows/generate-supported-versions.yml new file mode 100644 index 00000000000..c802e91bcf3 --- /dev/null +++ b/.github/workflows/generate-supported-versions.yml @@ -0,0 +1,121 @@ +name: Generate Supported Integration Versions + +on: + workflow_dispatch: # can be triggered manually + +jobs: + generate-supported-versions: + name: Generate supported integration versions + runs-on: ubuntu-22.04 + permissions: + actions: read + contents: write + pull-requests: write + + steps: + - uses: actions/checkout@v4 + with: + persist-credentials: false + + - name: Setup Python 3.7 + uses: actions/setup-python@v5 + with: + python-version: "3.7" + + - name: Setup Python 3.8 + uses: actions/setup-python@v5 + with: + python-version: "3.8" + + - name: Setup Python 3.9 + uses: actions/setup-python@v5 + with: + python-version: "3.9" + + - name: Setup Python 3.10 + uses: actions/setup-python@v5 + with: + python-version: "3.10" + + - name: Setup Python 3.11 + uses: actions/setup-python@v5 + with: + python-version: "3.11" + + - name: Setup Python 3.12 + uses: actions/setup-python@v5 + with: + python-version: "3.12" + + - name: Setup Python 3.13 + uses: actions/setup-python@v5 + with: + python-version: "3.13" + + - name: Set up QEMU + uses: docker/setup-qemu-action@v2 + + - name: Set up Docker Buildx + uses: docker/setup-buildx-action@v3 + + - name: Install system dependencies + run: | + sudo apt-get update + sudo apt-get install -y libmariadb-dev + + - name: Install Python dependencies + run: | + python -m pip install --upgrade pip + pip install packaging + pip install requests + pip install riot==0.20.1 + pip install wrapt==1.16.0 + + - name: Install ddtrace + run: | + pip install -e . + + - run: python scripts/freshvenvs.py generate + + - name: Generate table + run: python scripts/generate_table.py + + - run: git diff + + - name: Create Pull Request + id: pr + uses: peter-evans/create-pull-request@v6 + with: + token: ${{ secrets.GITHUB_TOKEN }} + branch: "update-supported-versions" + commit-message: "Update supported versions table" + delete-branch: true + base: main + title: "chore: update supported versions" + labels: changelog/no-changelog + body: | + Generates / updates the supported versions table for integrations. + This should be tied to releases, or triggered manually. + Workflow runs: [Generate Supported Integration Versions](https://github.com/DataDog/dd-trace-py/actions/workflows/generate-supported-versions.yml) + + ## Checklist + - [x] PR author has checked that all the criteria below are met + - The PR description includes an overview of the change + - The PR description articulates the motivation for the change + - The change includes tests OR the PR description describes a testing strategy + - The PR description notes risks associated with the change, if any + - Newly-added code is easy to change + - The change follows the [library release note guidelines](https://ddtrace.readthedocs.io/en/stable/releasenotes.html) + - The change includes or references documentation updates if necessary + - Backport labels are set (if [applicable](https://ddtrace.readthedocs.io/en/latest/contributing.html#backporting)) + + ## Reviewer Checklist + - [ ] Reviewer has checked that all the criteria below are met + - Title is accurate + - All changes are related to the pull request's stated goal + - Avoids breaking [API](https://ddtrace.readthedocs.io/en/stable/versioning.html#interfaces) changes + - Testing strategy adequately addresses listed risks + - Newly-added code is easy to change + - Release note makes sense to a user of the library + - If necessary, author has acknowledged and discussed the performance implications of this PR as reported in the benchmarks PR comment + - Backport labels are set in a manner that is consistent with the [release branch maintenance policy](https://ddtrace.readthedocs.io/en/latest/contributing.html#backporting) diff --git a/scripts/freshvenvs.py b/scripts/freshvenvs.py index 61a381d8fa6..13cd81a6fcc 100644 --- a/scripts/freshvenvs.py +++ b/scripts/freshvenvs.py @@ -4,6 +4,7 @@ from http.client import HTTPSConnection from io import StringIO import json +from operator import itemgetter import os import pathlib import sys @@ -21,7 +22,9 @@ CONTRIB_ROOT = pathlib.Path("ddtrace/contrib") LATEST = "" +excluded = {"coverage"} suite_to_package = { + "kafka": "confluent-kafka", "consul": "python-consul", "snowflake": "snowflake-connector-python", "flask_cache": "flask-caching", @@ -30,11 +33,35 @@ "asyncio": "pytest-asyncio", "sqlite3": "pysqlite3-binary", "grpc": "grpcio", + "google_generativeai": "google-generativeai", "psycopg2": "psycopg2-binary", "cassandra": "cassandra-driver", "rediscluster": "redis-py-cluster", + "dogpile_cache": "dogpile-cache", + "vertica": "vertica_python", } + +# mapping the name of the module to the name of the package (on pypi and as defined in lockfiles) +mapping_module_to_package = { + "confluent_kafka": "confluent-kafka", + "snowflake": "snowflake-connector-python", + "cassandra": "cassandra-driver", + "rediscluster": "redis-py-cluster", + "vertica_python": "vertica-python", + "flask_cache": "flask-cache", + "flask_caching": "flask-caching", + "consul": "python-consul", + "grpc": "grpcio", + "graphql": "graphql-core", + "mysql": "pymysql", +} + + +supported_versions = [] # list of dicts +pinned_packages = set() + + class Capturing(list): def __enter__(self): self._stdout = sys.stdout @@ -77,14 +104,16 @@ def _get_riot_envs_including_any(modules: typing.Set[str]) -> typing.Set[str]: with open(f".riot/requirements/{item}", "r") as lockfile: lockfile_content = lockfile.read() for module in modules: - if module in lockfile_content: + if module in lockfile_content or ( + module in suite_to_package and suite_to_package[module] in lockfile_content + ): envs |= {item.split(".")[0]} break return envs def _get_updatable_packages_implementing(modules: typing.Set[str]) -> typing.Set[str]: - """Return all packages that can be updated and have contribs implemented for them""" + """Return all packages have contribs implemented for them""" all_venvs = riotfile.venv.venvs for v in all_venvs: @@ -92,12 +121,18 @@ def _get_updatable_packages_implementing(modules: typing.Set[str]) -> typing.Set if package not in modules: continue if not _venv_sets_latest_for_package(v, package): - modules.remove(package) + pinned_packages.add(package) packages = {m for m in modules if "." not in m} return packages +def _get_all_modules(modules: typing.Set[str]) -> typing.Set[str]: + """Return all packages have contribs implemented for them""" + contrib_modules = {m for m in modules if "." not in m} + return contrib_modules + + def _get_version_extremes(package_name: str) -> typing.Tuple[Optional[str], Optional[str]]: """Return the (earliest, latest) supported versions of a given package""" with Capturing() as output: @@ -134,16 +169,27 @@ def _get_version_extremes(package_name: str) -> typing.Tuple[Optional[str], Opti def _get_package_versions_from(env: str, packages: typing.Set[str]) -> typing.List[typing.Tuple[str, str]]: - """Return the list of package versions that are tested""" + """Return the list of package versions that are tested, related to the modules""" + # Returns [(package, version), (package, versions)] lockfile_content = pathlib.Path(f".riot/requirements/{env}.txt").read_text().splitlines() lock_packages = [] for line in lockfile_content: package, _, versions = line.partition("==") + # remap the package -> module name if package in packages: lock_packages.append((package, versions)) + return lock_packages +def _is_module_autoinstrumented(module: str) -> bool: + import importlib + + _monkey = importlib.import_module("ddtrace._monkey") + PATCH_MODULES = getattr(_monkey, "PATCH_MODULES") + + return module in PATCH_MODULES and PATCH_MODULES[module] + def _versions_fully_cover_bounds(bounds: typing.Tuple[str, str], versions: typing.List[str]) -> bool: """Return whether the tested versions cover the full range of supported versions""" if not versions: @@ -173,12 +219,25 @@ def _venv_sets_latest_for_package(venv: riotfile.Venv, suite_name: str) -> bool: return False -def main(): - all_required_modules = _get_integrated_modules() - all_required_packages = _get_updatable_packages_implementing(all_required_modules) - envs = _get_riot_envs_including_any(all_required_modules) +def _get_all_used_versions(envs, packages) -> dict: + # Returns dict(module, set(versions)) for a venv, as defined in riotfiles. + all_used_versions = defaultdict(set) + for env in envs: + versions_used = _get_package_versions_from(env, packages) # returns list of (package, versions) + for package, version in versions_used: + all_used_versions[package].add(version) + return all_used_versions + +def _get_version_bounds(packages) -> dict: + # Return dict(module: (earliest, latest)) of the module on PyPI bounds = dict() + for package in packages: + earliest, latest = _get_version_extremes(package) + bounds[package] = (earliest, latest) + return bounds + +def output_outdated_packages(all_required_packages, envs, bounds): for package in all_required_packages: earliest, latest = _get_version_extremes(package) bounds[package] = (earliest, latest) @@ -194,10 +253,55 @@ def main(): if not ordered: continue if not _versions_fully_cover_bounds(bounds[package], ordered): - print( - f"{package}: policy supports version {bounds[package][0]} through {bounds[package][1]} " - f"but only these versions are used: {[str(v) for v in ordered]}" - ) + print(f"{package}") + +def generate_supported_versions(contrib_packages, all_used_versions, patched): + for mod in mapping_module_to_package: + contrib_packages.remove(mod) + contrib_packages.add(mapping_module_to_package[mod]) + patched[mapping_module_to_package[mod]] = _is_module_autoinstrumented(mod) + + # Generate supported versions + for package in contrib_packages: + ordered = sorted([Version(v) for v in all_used_versions[package]], reverse=True) + if not ordered: + continue + json_format = { + "integration": package, + "minimum_tracer_supported": str(ordered[-1]), + "max_tracer_supported": str(ordered[0]), + } + + if package in pinned_packages: + json_format["pinned"] = "true" + + if package not in patched: + patched[package] = _is_module_autoinstrumented(package) + json_format["auto-instrumented"] = patched[package] + supported_versions.append(json_format) + + supported_versions_output = sorted(supported_versions, key=itemgetter("integration")) + with open("supported_versions_output.json", "w") as file: + json.dump(supported_versions_output, file, indent=4) + +def main(): + all_required_modules = _get_integrated_modules() + all_required_packages = _get_updatable_packages_implementing(all_required_modules) # these are MODULE names + contrib_modules = _get_all_modules(all_required_modules) + envs = _get_riot_envs_including_any(all_required_modules) + patched = {} + + contrib_packages = contrib_modules + all_used_versions = _get_all_used_versions(envs, contrib_packages) + bounds = _get_version_bounds(contrib_packages) + + if len(sys.argv) != 2: + print("usage: python scripts/freshvenvs.py or ") + return + if sys.argv[1] == "output": + output_outdated_packages(all_required_packages, envs, bounds) + if sys.argv[1] == "generate": + generate_supported_versions(contrib_packages, all_used_versions, patched) if __name__ == "__main__": diff --git a/scripts/generate_table.py b/scripts/generate_table.py new file mode 100644 index 00000000000..1d7569b3e63 --- /dev/null +++ b/scripts/generate_table.py @@ -0,0 +1,24 @@ +import csv +import json + + +print("Reading supported_versions_output.json") + +with open("supported_versions_output.json", "r") as json_file: + data = json.load(json_file) + +columns = ["integration", "minimum_tracer_supported", "max_tracer_supported", "auto-instrumented"] +csv_rows = [] + +for entry in data: + integration_name = entry.get("integration", "") + if entry.get("pinned", "").lower() == "true": + integration_name += " *" + entry["integration"] = integration_name + csv_rows.append({col: entry.get(col, "") for col in columns}) + +with open("supported_versions_table.csv", "w", newline="") as csv_file: + print("Wrote to supported_versions_table.csv") + writer = csv.DictWriter(csv_file, fieldnames=columns) + writer.writeheader() + writer.writerows(csv_rows) diff --git a/scripts/regenerate-riot-latest.sh b/scripts/regenerate-riot-latest.sh index f0e68938a27..423a0524891 100755 --- a/scripts/regenerate-riot-latest.sh +++ b/scripts/regenerate-riot-latest.sh @@ -3,7 +3,7 @@ set -e DDTEST_CMD=scripts/ddtest -pkgs=$(python scripts/freshvenvs.py | cut -d':' -f1) +pkgs=$(python scripts/freshvenvs.py output) echo $pkgs if ! $DDTEST_CMD; then @@ -20,7 +20,8 @@ for pkg in ${pkgs[*]}; do echo "No riot hashes found for pattern: $VENV_NAME" else echo "VENV_NAME=$VENV_NAME" >> $GITHUB_ENV - for h in ${RIOT_HASHES[@]}; do + for h in ${RIOT_HASHES[@]}; do + echo "Removing riot lockfiles" rm ".riot/requirements/${h}.txt" done scripts/compile-and-prune-test-requirements From 494c0394aaed8e3fa81a3117557723a50527dd64 Mon Sep 17 00:00:00 2001 From: wantsui Date: Thu, 19 Dec 2024 15:51:59 -0500 Subject: [PATCH 75/78] chore: remove expired until from django snapshot test (#11763) The `until` timestamp in the flaky decorator on Django snapshots has been expired since Jan 2024, which was uncovered by https://github.com/DataDog/dd-trace-py/pull/11274 As seen in this failed run: https://gitlab.ddbuild.io/DataDog/apm-reliability/dd-trace-py/-/jobs/742978251, if we remove it, the current failure is on: > meta mismatch on '_dd.base_service': got 'tests.contrib.django' which does not match expected ''. ## Checklist - [x] PR author has checked that all the criteria below are met - The PR description includes an overview of the change - The PR description articulates the motivation for the change - The change includes tests OR the PR description describes a testing strategy - The PR description notes risks associated with the change, if any - Newly-added code is easy to change - The change follows the [library release note guidelines](https://ddtrace.readthedocs.io/en/stable/releasenotes.html) - The change includes or references documentation updates if necessary - Backport labels are set (if [applicable](https://ddtrace.readthedocs.io/en/latest/contributing.html#backporting)) ## Reviewer Checklist - [x] Reviewer has checked that all the criteria below are met - Title is accurate - All changes are related to the pull request's stated goal - Avoids breaking [API](https://ddtrace.readthedocs.io/en/stable/versioning.html#interfaces) changes - Testing strategy adequately addresses listed risks - Newly-added code is easy to change - Release note makes sense to a user of the library - If necessary, author has acknowledged and discussed the performance implications of this PR as reported in the benchmarks PR comment - Backport labels are set in a manner that is consistent with the [release branch maintenance policy](https://ddtrace.readthedocs.io/en/latest/contributing.html#backporting) --- tests/contrib/django/test_django_snapshots.py | 1 - ...t_middleware_trace_partial_based_view.json | 52 +++++++++---------- 2 files changed, 26 insertions(+), 27 deletions(-) diff --git a/tests/contrib/django/test_django_snapshots.py b/tests/contrib/django/test_django_snapshots.py index feaead253d8..d7402e37083 100644 --- a/tests/contrib/django/test_django_snapshots.py +++ b/tests/contrib/django/test_django_snapshots.py @@ -107,7 +107,6 @@ def test_middleware_trace_callable_view(client): assert client.get("/feed-view/").status_code == 200 -@flaky(until=1706677200) @pytest.mark.skipif( sys.version_info >= (3, 10, 0), reason=("func_name changed with Python 3.10 which changes the resource name." "TODO: new snapshot required."), diff --git a/tests/snapshots/tests.contrib.django.test_django_snapshots.test_middleware_trace_partial_based_view.json b/tests/snapshots/tests.contrib.django.test_django_snapshots.test_middleware_trace_partial_based_view.json index 9b21d7f1b84..8b56757961b 100644 --- a/tests/snapshots/tests.contrib.django.test_django_snapshots.test_middleware_trace_partial_based_view.json +++ b/tests/snapshots/tests.contrib.django.test_django_snapshots.test_middleware_trace_partial_based_view.json @@ -9,7 +9,7 @@ "type": "web", "error": 0, "meta": { - "_dd.base_service": "", + "_dd.base_service": "tests.contrib.django", "_dd.p.dm": "-0", "_dd.p.tid": "654a694400000000", "component": "django", @@ -45,7 +45,7 @@ "type": "", "error": 0, "meta": { - "_dd.base_service": "", + "_dd.base_service": "tests.contrib.django", "_dd.p.tid": "654a694400000000", "component": "django" }, @@ -62,7 +62,7 @@ "type": "", "error": 0, "meta": { - "_dd.base_service": "", + "_dd.base_service": "tests.contrib.django", "_dd.p.tid": "654a694400000000", "component": "django" }, @@ -79,7 +79,7 @@ "type": "", "error": 0, "meta": { - "_dd.base_service": "", + "_dd.base_service": "tests.contrib.django", "_dd.p.tid": "654a694400000000", "component": "django" }, @@ -96,7 +96,7 @@ "type": "", "error": 0, "meta": { - "_dd.base_service": "", + "_dd.base_service": "tests.contrib.django", "_dd.p.tid": "654a694400000000", "component": "django" }, @@ -113,7 +113,7 @@ "type": "", "error": 0, "meta": { - "_dd.base_service": "", + "_dd.base_service": "tests.contrib.django", "_dd.p.tid": "654a694400000000", "component": "django" }, @@ -130,7 +130,7 @@ "type": "", "error": 0, "meta": { - "_dd.base_service": "", + "_dd.base_service": "tests.contrib.django", "_dd.p.tid": "654a694400000000", "component": "django" }, @@ -147,7 +147,7 @@ "type": "", "error": 0, "meta": { - "_dd.base_service": "", + "_dd.base_service": "tests.contrib.django", "_dd.p.tid": "654a694400000000", "component": "django" }, @@ -164,7 +164,7 @@ "type": "", "error": 0, "meta": { - "_dd.base_service": "", + "_dd.base_service": "tests.contrib.django", "_dd.p.tid": "654a694400000000", "component": "django" }, @@ -181,7 +181,7 @@ "type": "", "error": 0, "meta": { - "_dd.base_service": "", + "_dd.base_service": "tests.contrib.django", "_dd.p.tid": "654a694400000000", "component": "django" }, @@ -198,7 +198,7 @@ "type": "", "error": 0, "meta": { - "_dd.base_service": "", + "_dd.base_service": "tests.contrib.django", "_dd.p.tid": "654a694400000000", "component": "django" }, @@ -215,7 +215,7 @@ "type": "", "error": 0, "meta": { - "_dd.base_service": "", + "_dd.base_service": "tests.contrib.django", "_dd.p.tid": "654a694400000000", "component": "django" }, @@ -232,7 +232,7 @@ "type": "", "error": 0, "meta": { - "_dd.base_service": "", + "_dd.base_service": "tests.contrib.django", "_dd.p.tid": "654a694400000000", "component": "django" }, @@ -249,7 +249,7 @@ "type": "", "error": 0, "meta": { - "_dd.base_service": "", + "_dd.base_service": "tests.contrib.django", "_dd.p.tid": "654a694400000000", "component": "django" }, @@ -266,7 +266,7 @@ "type": "", "error": 0, "meta": { - "_dd.base_service": "", + "_dd.base_service": "tests.contrib.django", "_dd.p.tid": "654a694400000000", "component": "django" }, @@ -283,7 +283,7 @@ "type": "", "error": 0, "meta": { - "_dd.base_service": "", + "_dd.base_service": "tests.contrib.django", "_dd.p.tid": "654a694400000000", "component": "django" }, @@ -300,7 +300,7 @@ "type": "", "error": 0, "meta": { - "_dd.base_service": "", + "_dd.base_service": "tests.contrib.django", "_dd.p.tid": "654a694400000000", "component": "django" }, @@ -317,7 +317,7 @@ "type": "", "error": 0, "meta": { - "_dd.base_service": "", + "_dd.base_service": "tests.contrib.django", "_dd.p.tid": "654a694400000000", "component": "django" }, @@ -334,7 +334,7 @@ "type": "", "error": 0, "meta": { - "_dd.base_service": "", + "_dd.base_service": "tests.contrib.django", "_dd.p.tid": "654a694400000000", "component": "django" }, @@ -351,7 +351,7 @@ "type": "", "error": 0, "meta": { - "_dd.base_service": "", + "_dd.base_service": "tests.contrib.django", "_dd.p.tid": "654a694400000000", "component": "django" }, @@ -368,7 +368,7 @@ "type": "", "error": 0, "meta": { - "_dd.base_service": "", + "_dd.base_service": "tests.contrib.django", "_dd.p.tid": "654a694400000000", "component": "django" }, @@ -385,7 +385,7 @@ "type": "", "error": 0, "meta": { - "_dd.base_service": "", + "_dd.base_service": "tests.contrib.django", "_dd.p.tid": "654a694400000000", "component": "django" }, @@ -402,7 +402,7 @@ "type": "", "error": 0, "meta": { - "_dd.base_service": "", + "_dd.base_service": "tests.contrib.django", "_dd.p.tid": "654a694400000000", "component": "django" }, @@ -419,7 +419,7 @@ "type": "", "error": 0, "meta": { - "_dd.base_service": "", + "_dd.base_service": "tests.contrib.django", "_dd.p.tid": "654a694400000000", "component": "django" }, @@ -436,7 +436,7 @@ "type": "", "error": 0, "meta": { - "_dd.base_service": "", + "_dd.base_service": "tests.contrib.django", "_dd.p.tid": "654a694400000000", "component": "django" }, @@ -453,7 +453,7 @@ "type": "", "error": 0, "meta": { - "_dd.base_service": "", + "_dd.base_service": "tests.contrib.django", "_dd.p.tid": "654a694400000000", "component": "django" }, From d197a00c6a15b73ca2ea4d6daa7c5b7f91cf5ff3 Mon Sep 17 00:00:00 2001 From: Taegyun Kim Date: Thu, 19 Dec 2024 15:59:19 -0500 Subject: [PATCH 76/78] chore(profiling): native tests w/ valgrind (#11750) ## Checklist - [x] PR author has checked that all the criteria below are met - The PR description includes an overview of the change - The PR description articulates the motivation for the change - The change includes tests OR the PR description describes a testing strategy - The PR description notes risks associated with the change, if any - Newly-added code is easy to change - The change follows the [library release note guidelines](https://ddtrace.readthedocs.io/en/stable/releasenotes.html) - The change includes or references documentation updates if necessary - Backport labels are set (if [applicable](https://ddtrace.readthedocs.io/en/latest/contributing.html#backporting)) ## Reviewer Checklist - [x] Reviewer has checked that all the criteria below are met - Title is accurate - All changes are related to the pull request's stated goal - Avoids breaking [API](https://ddtrace.readthedocs.io/en/stable/versioning.html#interfaces) changes - Testing strategy adequately addresses listed risks - Newly-added code is easy to change - Release note makes sense to a user of the library - If necessary, author has acknowledged and discussed the performance implications of this PR as reported in the benchmarks PR comment - Backport labels are set in a manner that is consistent with the [release branch maintenance policy](https://ddtrace.readthedocs.io/en/latest/contributing.html#backporting) --------- Co-authored-by: David Sanchez <838104+sanchda@users.noreply.github.com> --- .github/workflows/profiling-native.yml | 6 +++++- .../datadog/profiling/build_standalone.sh | 16 +++++++++++++++- .../profiling/dd_wrapper/test/CMakeLists.txt | 13 +++++++++++++ .../profiling/dd_wrapper/test/valgrind.supp | 7 +++++++ .../profiling/stack_v2/test/CMakeLists.txt | 13 +++++++++++++ .../profiling/stack_v2/test/valgrind.supp | 7 +++++++ 6 files changed, 60 insertions(+), 2 deletions(-) create mode 100644 ddtrace/internal/datadog/profiling/dd_wrapper/test/valgrind.supp create mode 100644 ddtrace/internal/datadog/profiling/stack_v2/test/valgrind.supp diff --git a/.github/workflows/profiling-native.yml b/.github/workflows/profiling-native.yml index 98722552dbd..280d586d36e 100644 --- a/.github/workflows/profiling-native.yml +++ b/.github/workflows/profiling-native.yml @@ -20,7 +20,7 @@ jobs: matrix: os: [ubuntu-24.04] python-version: ["3.8", "3.9", "3.10", "3.11", "3.12"] - sanitizer: ["safety", "thread"] + sanitizer: ["safety", "thread", "valgrind"] steps: - uses: actions/checkout@v4 @@ -40,6 +40,10 @@ jobs: chmod +x llvm.sh sudo ./llvm.sh 19 + - name: Install Valgrind + run: | + sudo apt-get install -y valgrind + - name: Run tests with sanitizers run: | # DEV: We currently have tests in dd_wrapper and stack_v2, setting diff --git a/ddtrace/internal/datadog/profiling/build_standalone.sh b/ddtrace/internal/datadog/profiling/build_standalone.sh index beeda4f21b4..c7bc4c14af9 100755 --- a/ddtrace/internal/datadog/profiling/build_standalone.sh +++ b/ddtrace/internal/datadog/profiling/build_standalone.sh @@ -94,6 +94,9 @@ compiler_args["cppcheck"]="-DDO_CPPCHECK=ON" compiler_args["infer"]="-DDO_INFER=ON" compiler_args["clangtidy"]="-DDO_CLANGTIDY=ON" compiler_args["clangtidy_cmd"]="-DCLANGTIDY_CMD=${CLANGTIDY_CMD}" +compiler_args["valgrind"]="-DDO_VALGRIND=ON" + +ctest_args=() # Initial cmake args cmake_args=( @@ -169,7 +172,7 @@ run_cmake() { fi if [[ " ${cmake_args[*]} " =~ " -DBUILD_TESTING=ON " ]]; then echo "--------------------------------------------------------------------- Running Tests" - ctest --output-on-failure || { echo "tests failed!"; exit 1; } + ctest ${ctest_args[*]} --output-on-failure || { echo "tests failed!"; exit 1; } fi # OK, the build or whatever went fine I guess. @@ -223,6 +226,10 @@ print_cmake_args() { echo "Targets: ${targets[*]}" } +print_ctest_args() { + echo "CTest Args: ${ctest_args[*]}" +} + ### Check input # Check the first slot, options add_compiler_args() { @@ -263,6 +270,11 @@ add_compiler_args() { cmake_args+=(${compiler_args["memory"]}) set_clang ;; + --valgrind) + cmake_args+=(${compiler_args["valgrind"]}) + ctest_args+="-T memcheck" + set_clang + ;; -C|--cppcheck) cmake_args+=(${compiler_args["cppcheck"]}) set_clang @@ -369,6 +381,8 @@ add_target "$3" # Print cmake args print_cmake_args +print_ctest_args + # Run cmake for target in "${targets[@]}"; do run_cmake $target diff --git a/ddtrace/internal/datadog/profiling/dd_wrapper/test/CMakeLists.txt b/ddtrace/internal/datadog/profiling/dd_wrapper/test/CMakeLists.txt index b80ace74968..299ba8812f8 100644 --- a/ddtrace/internal/datadog/profiling/dd_wrapper/test/CMakeLists.txt +++ b/ddtrace/internal/datadog/profiling/dd_wrapper/test/CMakeLists.txt @@ -12,6 +12,19 @@ FetchContent_MakeAvailable(googletest) include(GoogleTest) include(AnalysisFunc) +if(DO_VALGRIND) + find_program(VALGRIND_EXECUTABLE NAMES valgrind PATHS /usr/bin /usr/local/bin) + + if (VALGRIND_EXECUTABLE) + set(MEMORYCHECK_COMMAND "${VALGRIND_EXECUTABLE}") + set(MEMORYCHECK_COMMAND_OPTIONS "--leak-check=full --show-leak-kinds=definite --errors-for-leak-kinds=definite --trace-children=yes --error-exitcode=1 --log-fd=1 --suppressions=${CMAKE_CURRENT_SOURCE_DIR}/valgrind.supp") + else() + message(FATAL_ERROR "Valgrind not found") + endif() + + include(CTest) +endif() + FetchContent_Declare(json URL https://github.com/nlohmann/json/releases/download/v3.11.3/json.tar.xz) FetchContent_MakeAvailable(json) diff --git a/ddtrace/internal/datadog/profiling/dd_wrapper/test/valgrind.supp b/ddtrace/internal/datadog/profiling/dd_wrapper/test/valgrind.supp new file mode 100644 index 00000000000..d8534d2a228 --- /dev/null +++ b/ddtrace/internal/datadog/profiling/dd_wrapper/test/valgrind.supp @@ -0,0 +1,7 @@ +{ + ddcommon_uninitialized_value + Memcheck:Cond + fun:eq + ... + fun:*ddcommon*entity_id*unix*container_id* +} diff --git a/ddtrace/internal/datadog/profiling/stack_v2/test/CMakeLists.txt b/ddtrace/internal/datadog/profiling/stack_v2/test/CMakeLists.txt index 05176f2c803..d6e585f2c9f 100644 --- a/ddtrace/internal/datadog/profiling/stack_v2/test/CMakeLists.txt +++ b/ddtrace/internal/datadog/profiling/stack_v2/test/CMakeLists.txt @@ -12,6 +12,19 @@ FetchContent_MakeAvailable(googletest) include(GoogleTest) include(AnalysisFunc) +if(DO_VALGRIND) + find_program(VALGRIND_EXECUTABLE NAMES valgrind PATHS /usr/bin /usr/local/bin) + + if (VALGRIND_EXECUTABLE) + set(MEMORYCHECK_COMMAND "${VALGRIND_EXECUTABLE}") + set(MEMORYCHECK_COMMAND_OPTIONS "--leak-check=full --show-leak-kinds=definite --errors-for-leak-kinds=definite --trace-children=yes --error-exitcode=1 --log-fd=1 --suppressions=${CMAKE_CURRENT_SOURCE_DIR}/valgrind.supp") + else() + message(FATAL_ERROR "Valgrind not found") + endif() + + include(CTest) +endif() + function(dd_wrapper_add_test name) add_executable(${name} ${ARGN}) target_include_directories(${name} PRIVATE ../include) diff --git a/ddtrace/internal/datadog/profiling/stack_v2/test/valgrind.supp b/ddtrace/internal/datadog/profiling/stack_v2/test/valgrind.supp new file mode 100644 index 00000000000..d8534d2a228 --- /dev/null +++ b/ddtrace/internal/datadog/profiling/stack_v2/test/valgrind.supp @@ -0,0 +1,7 @@ +{ + ddcommon_uninitialized_value + Memcheck:Cond + fun:eq + ... + fun:*ddcommon*entity_id*unix*container_id* +} From f810101aa2d83256bcdcca250fbebbcc37e690ef Mon Sep 17 00:00:00 2001 From: Brett Langdon Date: Thu, 19 Dec 2024 16:32:23 -0500 Subject: [PATCH 77/78] ci: optimize ci runtime (#11798) --- .gitlab/package.yml | 19 ------------------- .gitlab/tests.yml | 14 +++----------- hatch.toml | 7 +++++-- scripts/gen_gitlab_config.py | 9 +-------- tests/suitespec.yml | 8 ++++++++ 5 files changed, 17 insertions(+), 40 deletions(-) diff --git a/.gitlab/package.yml b/.gitlab/package.yml index 0cf300d7cbd..973e2d55d3f 100644 --- a/.gitlab/package.yml +++ b/.gitlab/package.yml @@ -1,22 +1,3 @@ -build_base_venvs: - extends: .testrunner - stage: package - parallel: - matrix: - - PYTHON_VERSION: ["3.7", "3.8", "3.9", "3.10", "3.11", "3.12", "3.13"] - variables: - CMAKE_BUILD_PARALLEL_LEVEL: 12 - PIP_VERBOSE: 1 - script: - - pip install riot==0.20.0 - - riot -P -v generate --python=$PYTHON_VERSION - artifacts: - name: venv_$PYTHON_VERSION - paths: - - .riot/venv_* - - ddtrace/**/*.so* - - ddtrace/internal/datadog/profiling/crashtracker/crashtracker_exe* - download_ddtrace_artifacts: image: registry.ddbuild.io/github-cli:v27480869-eafb11d-2.43.0 tags: [ "arch:amd64" ] diff --git a/.gitlab/tests.yml b/.gitlab/tests.yml index d38a22cf0ff..b8c9a3d9897 100644 --- a/.gitlab/tests.yml +++ b/.gitlab/tests.yml @@ -10,17 +10,9 @@ variables: PYTEST_ADDOPTS: "-s" # CI_DEBUG_SERVICES: "true" -.testrunner: - image: registry.ddbuild.io/images/mirror/dd-trace-py/testrunner:0a50e839f4b1600f02157518b8d016451b346578@sha256:5dae9bc7872f69b31b612690f0748c7ad71ab90ef28a754b2ae93d0ba505837b - # DEV: we have a larger pool of amd64 runners, prefer that over arm64 - tags: [ "arch:amd64" ] - timeout: 20m - before_script: - - pyenv global 3.12 3.7 3.8 3.9 3.10 3.11 3.13 - - export _CI_DD_AGENT_URL=http://${HOST_IP}:8126/ - - -{{services.yml}} +include: + - local: ".gitlab/services.yml" + - local: ".gitlab/testrunner.yml" .test_base_hatch: extends: .testrunner diff --git a/hatch.toml b/hatch.toml index ff11ec3f743..614054dbfed 100644 --- a/hatch.toml +++ b/hatch.toml @@ -154,9 +154,12 @@ extra-dependencies = [ "pytest-cov", "hypothesis<6.45.1" ] +[envs.meta-testing.env-vars] +DD_CIVISIBILITY_FLAKY_RETRY_ENABLED = "0" + [envs.meta-testing.scripts] -meta-testing = [ - "pytest {args} tests/meta" +test = [ + "pytest {args} --no-ddtrace tests/meta" ] [envs.integration_test] diff --git a/scripts/gen_gitlab_config.py b/scripts/gen_gitlab_config.py index 22b236ddfc5..8dc9e5b178f 100644 --- a/scripts/gen_gitlab_config.py +++ b/scripts/gen_gitlab_config.py @@ -95,9 +95,7 @@ def gen_required_suites() -> None: circleci_jobs = set(circleci_config["jobs"].keys()) # Copy the template file - TESTS_GEN.write_text( - (GITLAB / "tests.yml").read_text().replace(r"{{services.yml}}", (GITLAB / "services.yml").read_text()) - ) + TESTS_GEN.write_text((GITLAB / "tests.yml").read_text()) # Generate the list of suites to run with TESTS_GEN.open("a") as f: for suite in required_suites: @@ -162,11 +160,6 @@ def check(name: str, command: str, paths: t.Set[str]) -> None: command="hatch run lint:suitespec-check", paths={"*"}, ) - check( - name="conftest", - command="hatch run meta-testing:meta-testing", - paths={"**conftest.py"}, - ) # ----------------------------------------------------------------------------- diff --git a/tests/suitespec.yml b/tests/suitespec.yml index 4b13005d662..41fabd7aa88 100644 --- a/tests/suitespec.yml +++ b/tests/suitespec.yml @@ -151,6 +151,14 @@ components: vendor: - ddtrace/vendor/* suites: + conftest: + parallelism: 1 + paths: + - 'conftest.py' + - '**/conftest.py' + pattern: meta-testing + runner: hatch + snapshot: false ddtracerun: parallelism: 6 paths: From 5cee25e39d9b8ee0f2db1816a265979c183715a1 Mon Sep 17 00:00:00 2001 From: Taegyun Kim Date: Thu, 19 Dec 2024 18:06:24 -0500 Subject: [PATCH 78/78] ci: cmake format fix (#11809) ## Checklist - [x] PR author has checked that all the criteria below are met - The PR description includes an overview of the change - The PR description articulates the motivation for the change - The change includes tests OR the PR description describes a testing strategy - The PR description notes risks associated with the change, if any - Newly-added code is easy to change - The change follows the [library release note guidelines](https://ddtrace.readthedocs.io/en/stable/releasenotes.html) - The change includes or references documentation updates if necessary - Backport labels are set (if [applicable](https://ddtrace.readthedocs.io/en/latest/contributing.html#backporting)) ## Reviewer Checklist - [x] Reviewer has checked that all the criteria below are met - Title is accurate - All changes are related to the pull request's stated goal - Avoids breaking [API](https://ddtrace.readthedocs.io/en/stable/versioning.html#interfaces) changes - Testing strategy adequately addresses listed risks - Newly-added code is easy to change - Release note makes sense to a user of the library - If necessary, author has acknowledged and discussed the performance implications of this PR as reported in the benchmarks PR comment - Backport labels are set in a manner that is consistent with the [release branch maintenance policy](https://ddtrace.readthedocs.io/en/latest/contributing.html#backporting) --- .../datadog/profiling/dd_wrapper/test/CMakeLists.txt | 11 ++++++++--- .../datadog/profiling/stack_v2/test/CMakeLists.txt | 11 ++++++++--- scripts/gen_circleci_config.py | 2 +- 3 files changed, 17 insertions(+), 7 deletions(-) diff --git a/ddtrace/internal/datadog/profiling/dd_wrapper/test/CMakeLists.txt b/ddtrace/internal/datadog/profiling/dd_wrapper/test/CMakeLists.txt index 299ba8812f8..66dac6b6f0d 100644 --- a/ddtrace/internal/datadog/profiling/dd_wrapper/test/CMakeLists.txt +++ b/ddtrace/internal/datadog/profiling/dd_wrapper/test/CMakeLists.txt @@ -13,11 +13,16 @@ include(GoogleTest) include(AnalysisFunc) if(DO_VALGRIND) - find_program(VALGRIND_EXECUTABLE NAMES valgrind PATHS /usr/bin /usr/local/bin) + find_program( + VALGRIND_EXECUTABLE + NAMES valgrind + PATHS /usr/bin /usr/local/bin) - if (VALGRIND_EXECUTABLE) + if(VALGRIND_EXECUTABLE) set(MEMORYCHECK_COMMAND "${VALGRIND_EXECUTABLE}") - set(MEMORYCHECK_COMMAND_OPTIONS "--leak-check=full --show-leak-kinds=definite --errors-for-leak-kinds=definite --trace-children=yes --error-exitcode=1 --log-fd=1 --suppressions=${CMAKE_CURRENT_SOURCE_DIR}/valgrind.supp") + set(MEMORYCHECK_COMMAND_OPTIONS + "--leak-check=full --show-leak-kinds=definite --errors-for-leak-kinds=definite --trace-children=yes --error-exitcode=1 --log-fd=1 --suppressions=${CMAKE_CURRENT_SOURCE_DIR}/valgrind.supp" + ) else() message(FATAL_ERROR "Valgrind not found") endif() diff --git a/ddtrace/internal/datadog/profiling/stack_v2/test/CMakeLists.txt b/ddtrace/internal/datadog/profiling/stack_v2/test/CMakeLists.txt index d6e585f2c9f..423f927d8f1 100644 --- a/ddtrace/internal/datadog/profiling/stack_v2/test/CMakeLists.txt +++ b/ddtrace/internal/datadog/profiling/stack_v2/test/CMakeLists.txt @@ -13,11 +13,16 @@ include(GoogleTest) include(AnalysisFunc) if(DO_VALGRIND) - find_program(VALGRIND_EXECUTABLE NAMES valgrind PATHS /usr/bin /usr/local/bin) + find_program( + VALGRIND_EXECUTABLE + NAMES valgrind + PATHS /usr/bin /usr/local/bin) - if (VALGRIND_EXECUTABLE) + if(VALGRIND_EXECUTABLE) set(MEMORYCHECK_COMMAND "${VALGRIND_EXECUTABLE}") - set(MEMORYCHECK_COMMAND_OPTIONS "--leak-check=full --show-leak-kinds=definite --errors-for-leak-kinds=definite --trace-children=yes --error-exitcode=1 --log-fd=1 --suppressions=${CMAKE_CURRENT_SOURCE_DIR}/valgrind.supp") + set(MEMORYCHECK_COMMAND_OPTIONS + "--leak-check=full --show-leak-kinds=definite --errors-for-leak-kinds=definite --trace-children=yes --error-exitcode=1 --log-fd=1 --suppressions=${CMAKE_CURRENT_SOURCE_DIR}/valgrind.supp" + ) else() message(FATAL_ERROR "Valgrind not found") endif() diff --git a/scripts/gen_circleci_config.py b/scripts/gen_circleci_config.py index 7225ea38d22..627a3715427 100644 --- a/scripts/gen_circleci_config.py +++ b/scripts/gen_circleci_config.py @@ -51,7 +51,7 @@ def check(name: str, command: str, paths: t.Set[str]) -> None: check( name="Style", command="hatch run lint:style", - paths={"docker*", "*.py", "*.pyi", "hatch.toml", "pyproject.toml", "*.cpp", "*.h"}, + paths={"docker*", "*.py", "*.pyi", "hatch.toml", "pyproject.toml", "*.cpp", "*.h", "CMakeLists.txt"}, ) check( name="Typing",